/drivers/gpu/drm/vkms/

/option> Russell King's ARM Linux kernel treeRussell King
summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/Makefile13
-rw-r--r--tools/accounting/Makefile2
-rw-r--r--tools/accounting/delaytop.c1145
-rw-r--r--tools/accounting/getdelays.c167
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h315
-rw-r--r--tools/arch/arm64/include/asm/cputype.h36
-rw-r--r--tools/arch/arm64/include/asm/esr.h6
-rw-r--r--tools/arch/arm64/include/asm/gpr-num.h6
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h15
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h11
-rw-r--r--tools/arch/loongarch/include/asm/inst.h12
-rw-r--r--tools/arch/loongarch/include/asm/orc_types.h4
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h13
-rw-r--r--tools/arch/riscv/include/asm/csr.h11
-rw-r--r--tools/arch/riscv/include/asm/vdso/processor.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/bitsperlong.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm_perf.h22
-rw-r--r--tools/arch/x86/include/asm/amd/ibs.h5
-rw-r--r--tools/arch/x86/include/asm/asm.h12
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h33
-rw-r--r--tools/arch/x86/include/asm/inat.h15
-rw-r--r--tools/arch/x86/include/asm/insn.h56
-rw-r--r--tools/arch/x86/include/asm/io.h101
-rw-r--r--tools/arch/x86/include/asm/msr-index.h66
-rw-r--r--tools/arch/x86/include/asm/special_insns.h27
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h111
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm_perf.h17
-rw-r--r--tools/arch/x86/include/uapi/asm/svm.h6
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h10
-rw-r--r--tools/arch/x86/lib/inat.c13
-rw-r--r--tools/arch/x86/lib/insn.c35
-rw-r--r--tools/arch/x86/lib/memcpy_64.S1
-rw-r--r--tools/arch/x86/lib/memset_64.S1
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt111
-rw-r--r--tools/arch/x86/tools/gen-cpu-feature-names-x86.awk34
-rw-r--r--tools/arch/x86/tools/gen-insn-attr-x86.awk44
-rw-r--r--tools/bootconfig/main.c45
-rw-r--r--tools/bootconfig/scripts/ftrace.sh1
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh37
-rw-r--r--tools/bpf/Makefile13
-rw-r--r--tools/bpf/bpf_jit_disasm.c2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst13
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst3
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst21
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-token.rst64
-rw-r--r--tools/bpf/bpftool/Makefile6
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool53
-rw-r--r--tools/bpf/bpftool/btf.c8
-rw-r--r--tools/bpf/bpftool/btf_dumper.c4
-rw-r--r--tools/bpf/bpftool/cgroup.c4
-rw-r--r--tools/bpf/bpftool/common.c152
-rw-r--r--tools/bpf/bpftool/feature.c86
-rw-r--r--tools/bpf/bpftool/gen.c68
-rw-r--r--tools/bpf/bpftool/iter.c2
-rw-r--r--tools/bpf/bpftool/link.c62
-rw-r--r--tools/bpf/bpftool/main.c35
-rw-r--r--tools/bpf/bpftool/main.h34
-rw-r--r--tools/bpf/bpftool/map.c59
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c3
-rw-r--r--tools/bpf/bpftool/net.c15
-rw-r--r--tools/bpf/bpftool/prog.c86
-rw-r--r--tools/bpf/bpftool/sign.c217
-rw-r--r--tools/bpf/bpftool/token.c210
-rw-r--r--tools/bpf/bpftool/tracelog.c11
-rw-r--r--tools/bpf/resolve_btfids/Makefile2
-rw-r--r--tools/bpf/runqslower/Makefile91
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c106
-rw-r--r--tools/bpf/runqslower/runqslower.c171
-rw-r--r--tools/bpf/runqslower/runqslower.h13
-rw-r--r--tools/build/Build2
-rw-r--r--tools/build/Makefile21
-rw-r--r--tools/build/Makefile.feature23
-rw-r--r--tools/build/feature/Makefile47
-rw-r--r--tools/build/feature/test-all.c48
-rw-r--r--tools/build/feature/test-get_cpuid.c8
-rw-r--r--tools/build/feature/test-get_current_dir_name.c11
-rw-r--r--tools/build/feature/test-libcrypto.c25
-rw-r--r--tools/build/feature/test-libslang-include-subdir.c7
-rw-r--r--tools/cgroup/memcg_slabinfo.py4
-rw-r--r--tools/dma/.gitignore3
-rw-r--r--tools/dma/Makefile55
-rw-r--r--tools/dma/config (renamed from tools/testing/selftests/dma/config)0
-rw-r--r--tools/dma/dma_map_benchmark.c (renamed from tools/testing/selftests/dma/dma_map_benchmark.c)3
-rwxr-xr-xtools/docs/check-variable-fonts.py37
-rwxr-xr-xtools/docs/checktransupdate.py307
-rwxr-xr-xtools/docs/documentation-file-ref-check245
-rwxr-xr-xtools/docs/features-refresh.sh98
-rwxr-xr-xtools/docs/find-unused-docs.sh62
-rwxr-xr-xtools/docs/gen-redirects.py54
-rwxr-xr-xtools/docs/gen-renames.py130
-rwxr-xr-xtools/docs/get_abi.py214
-rwxr-xr-xtools/docs/get_feat.py225
-rwxr-xr-xtools/docs/list-arch.sh11
-rwxr-xr-xtools/docs/parse-headers.py60
-rwxr-xr-xtools/docs/sphinx-build-wrapper864
-rwxr-xr-xtools/docs/sphinx-pre-install1543
-rwxr-xr-xtools/docs/test_doc_build.py513
-rw-r--r--tools/gpio/Makefile2
-rw-r--r--tools/hv/hv_fcopy_uio_daemon.c128
-rw-r--r--tools/iio/iio_event_monitor.c10
-rw-r--r--tools/iio/iio_generic_buffer.c2
-rw-r--r--tools/include/asm-generic/bitops/__fls.h2
-rw-r--r--tools/include/asm-generic/bitops/fls.h2
-rw-r--r--tools/include/asm-generic/bitops/fls64.h4
-rw-r--r--tools/include/asm-generic/io.h482
-rw-r--r--tools/include/asm/io.h11
-rw-r--r--tools/include/linux/args.h28
-rw-r--r--tools/include/linux/atomic.h22
-rw-r--r--tools/include/linux/bitmap.h1
-rw-r--r--tools/include/linux/bits.h85
-rw-r--r--tools/include/linux/build_bug.h10
-rw-r--r--tools/include/linux/cfi_types.h29
-rw-r--r--tools/include/linux/compiler.h34
-rw-r--r--tools/include/linux/gfp_types.h393
-rw-r--r--tools/include/linux/interval_tree_generic.h10
-rw-r--r--tools/include/linux/io.h4
-rw-r--r--tools/include/linux/kallsyms.h4
-rw-r--r--tools/include/linux/livepatch_external.h76
-rw-r--r--tools/include/linux/objtool_types.h3
l---------tools/include/linux/pci_ids.h1
-rw-r--r--tools/include/linux/slab.h165
-rw-r--r--tools/include/linux/static_call_types.h4
-rw-r--r--tools/include/linux/string.h14
-rw-r--r--tools/include/nolibc/Makefile34
-rw-r--r--tools/include/nolibc/arch-arm.h2
-rw-r--r--tools/include/nolibc/arch-arm64.h (renamed from tools/include/nolibc/arch-aarch64.h)12
-rw-r--r--tools/include/nolibc/arch-i386.h178
-rw-r--r--tools/include/nolibc/arch-loongarch.h2
-rw-r--r--tools/include/nolibc/arch-m68k.h2
-rw-r--r--tools/include/nolibc/arch-mips.h119
-rw-r--r--tools/include/nolibc/arch-powerpc.h2
-rw-r--r--tools/include/nolibc/arch-riscv.h2
-rw-r--r--tools/include/nolibc/arch-s390.h7
-rw-r--r--tools/include/nolibc/arch-sh.h164
-rw-r--r--tools/include/nolibc/arch-sparc.h18
-rw-r--r--tools/include/nolibc/arch-x86.h (renamed from tools/include/nolibc/arch-x86_64.h)190
-rw-r--r--tools/include/nolibc/arch.h21
-rw-r--r--tools/include/nolibc/compiler.h4
-rw-r--r--tools/include/nolibc/crt.h3
-rw-r--r--tools/include/nolibc/dirent.h6
-rw-r--r--tools/include/nolibc/getopt.h2
-rw-r--r--tools/include/nolibc/inttypes.h3
-rw-r--r--tools/include/nolibc/nolibc.h3
-rw-r--r--tools/include/nolibc/poll.h4
-rw-r--r--tools/include/nolibc/stackprotector.h2
-rw-r--r--tools/include/nolibc/std.h8
-rw-r--r--tools/include/nolibc/stdio.h14
-rw-r--r--tools/include/nolibc/stdlib.h2
-rw-r--r--tools/include/nolibc/string.h15
-rw-r--r--tools/include/nolibc/sys.h214
-rw-r--r--tools/include/nolibc/sys/auxv.h3
-rw-r--r--tools/include/nolibc/sys/mman.h5
-rw-r--r--tools/include/nolibc/sys/random.h4
-rw-r--r--tools/include/nolibc/sys/reboot.h2
-rw-r--r--tools/include/nolibc/sys/select.h103
-rw-r--r--tools/include/nolibc/sys/timerfd.h8
-rw-r--r--tools/include/nolibc/sys/uio.h49
-rw-r--r--tools/include/nolibc/sys/wait.h37
-rw-r--r--tools/include/nolibc/time.h53
-rw-r--r--tools/include/nolibc/types.h47
-rw-r--r--tools/include/nolibc/unistd.h8
-rw-r--r--tools/include/uapi/asm-generic/unistd.h8
-rw-r--r--tools/include/uapi/drm/drm.h67
-rw-r--r--tools/include/uapi/linux/bits.h8
-rw-r--r--tools/include/uapi/linux/bpf.h105
-rw-r--r--tools/include/uapi/linux/coredump.h104
-rw-r--r--tools/include/uapi/linux/fscrypt.h6
-rw-r--r--tools/include/uapi/linux/genetlink.h103
-rw-r--r--tools/include/uapi/linux/if_addr.h79
-rw-r--r--tools/include/uapi/linux/if_xdp.h1
-rw-r--r--tools/include/uapi/linux/kvm.h35
-rw-r--r--tools/include/uapi/linux/neighbour.h229
-rw-r--r--tools/include/uapi/linux/netdev.h8
-rw-r--r--tools/include/uapi/linux/netfilter.h80
-rw-r--r--tools/include/uapi/linux/netfilter_arp.h23
-rw-r--r--tools/include/uapi/linux/nsfs.h87
-rw-r--r--tools/include/uapi/linux/perf_event.h23
-rw-r--r--tools/include/uapi/linux/prctl.h9
-rw-r--r--tools/include/uapi/linux/rtnetlink.h848
-rw-r--r--tools/include/uapi/linux/stat.h8
-rw-r--r--tools/include/vdso/unaligned.h12
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/bpf.c72
-rw-r--r--tools/lib/bpf/bpf.h31
-rw-r--r--tools/lib/bpf/bpf_gen_internal.h2
-rw-r--r--tools/lib/bpf/bpf_helpers.h17
-rw-r--r--tools/lib/bpf/bpf_tracing.h2
-rw-r--r--tools/lib/bpf/btf.c82
-rw-r--r--tools/lib/bpf/btf.h11
-rw-r--r--tools/lib/bpf/btf_dump.c59
-rw-r--r--tools/lib/bpf/elf.c1
-rw-r--r--tools/lib/bpf/features.c1
-rw-r--r--tools/lib/bpf/gen_loader.c50
-rw-r--r--tools/lib/bpf/libbpf.c498
-rw-r--r--tools/lib/bpf/libbpf.h98
-rw-r--r--tools/lib/bpf/libbpf.map8
-rw-r--r--tools/lib/bpf/libbpf_errno.c75
-rw-r--r--tools/lib/bpf/libbpf_internal.h21
-rw-r--r--tools/lib/bpf/libbpf_probes.c4
-rw-r--r--tools/lib/bpf/libbpf_utils.c256
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/linker.c4
-rw-r--r--tools/lib/bpf/relo_core.c1
-rw-r--r--tools/lib/bpf/ringbuf.c1
-rw-r--r--tools/lib/bpf/skel_internal.h76
-rw-r--r--tools/lib/bpf/str_error.c104
-rw-r--r--tools/lib/bpf/str_error.h19
-rw-r--r--tools/lib/bpf/usdt.bpf.h44
-rw-r--r--tools/lib/bpf/usdt.c85
-rw-r--r--tools/lib/perf/Documentation/libperf.txt1
-rw-r--r--tools/lib/perf/cpumap.c49
-rw-r--r--tools/lib/perf/evlist.c119
-rw-r--r--tools/lib/perf/evsel.c11
-rw-r--r--tools/lib/perf/include/internal/evsel.h3
-rw-r--r--tools/lib/perf/include/perf/core.h2
-rw-r--r--tools/lib/perf/include/perf/cpumap.h2
-rw-r--r--tools/lib/perf/include/perf/event.h44
-rw-r--r--tools/lib/perf/include/perf/threadmap.h1
-rw-r--r--tools/lib/perf/mmap.c2
-rw-r--r--tools/lib/perf/threadmap.c17
-rw-r--r--tools/lib/python/__init__.py0
-rw-r--r--tools/lib/python/abi/__init__.py0
-rw-r--r--tools/lib/python/abi/abi_parser.py628
-rw-r--r--tools/lib/python/abi/abi_regex.py234
-rw-r--r--tools/lib/python/abi/helpers.py38
-rw-r--r--tools/lib/python/abi/system_symbols.py378
-rwxr-xr-xtools/lib/python/feat/parse_features.py494
-rwxr-xr-xtools/lib/python/jobserver.py149
-rw-r--r--tools/lib/python/kdoc/__init__.py0
-rw-r--r--tools/lib/python/kdoc/enrich_formatter.py70
-rw-r--r--tools/lib/python/kdoc/kdoc_files.py294
-rw-r--r--tools/lib/python/kdoc/kdoc_item.py43
-rw-r--r--tools/lib/python/kdoc/kdoc_output.py824
-rw-r--r--tools/lib/python/kdoc/kdoc_parser.py1670
-rw-r--r--tools/lib/python/kdoc/kdoc_re.py270
-rwxr-xr-xtools/lib/python/kdoc/latex_fonts.py167
-rwxr-xr-xtools/lib/python/kdoc/parse_data_structs.py482
-rw-r--r--tools/lib/python/kdoc/python_version.py178
-rw-r--r--tools/lib/subcmd/help.c15
-rw-r--r--tools/lib/subcmd/run-command.c15
-rw-r--r--tools/lib/thermal/Makefile9
-rw-r--r--tools/lib/thermal/libthermal.map5
-rw-r--r--tools/mm/page_owner_sort.c14
-rw-r--r--tools/mm/show_page_info.py169
-rw-r--r--tools/mm/slabinfo.c7
-rw-r--r--tools/net/sunrpc/xdrgen/generators/__init__.py11
-rw-r--r--tools/net/sunrpc/xdrgen/generators/union.py34
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/string.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/string.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/declaration/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/string.j26
-rwxr-xr-xtools/net/sunrpc/xdrgen/xdrgen5
-rw-r--r--tools/net/ynl/Makefile29
-rw-r--r--tools/net/ynl/Makefile.deps1
-rw-r--r--tools/net/ynl/lib/ynl-priv.h14
-rw-r--r--tools/net/ynl/lib/ynl.c6
-rwxr-xr-xtools/net/ynl/pyynl/cli.py98
-rwxr-xr-xtools/net/ynl/pyynl/ethtool.py17
-rw-r--r--tools/net/ynl/pyynl/lib/__init__.py4
-rw-r--r--tools/net/ynl/pyynl/lib/doc_generator.py402
-rw-r--r--tools/net/ynl/pyynl/lib/nlspec.py2
-rw-r--r--tools/net/ynl/pyynl/lib/ynl.py142
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_c.py224
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_rst.py384
-rw-r--r--tools/net/ynl/samples/.gitignore1
-rw-r--r--tools/net/ynl/samples/Makefile1
-rw-r--r--tools/net/ynl/samples/page-pool.c149
-rw-r--r--tools/net/ynl/samples/tc-filter-add.c335
-rw-r--r--tools/net/ynl/tests/Makefile32
-rw-r--r--tools/net/ynl/tests/config6
-rwxr-xr-xtools/net/ynl/tests/test_ynl_cli.sh327
-rwxr-xr-xtools/net/ynl/tests/test_ynl_ethtool.sh222
-rw-r--r--tools/net/ynl/ynltool/.gitignore2
-rw-r--r--tools/net/ynl/ynltool/Makefile55
-rw-r--r--tools/net/ynl/ynltool/json_writer.c288
-rw-r--r--tools/net/ynl/ynltool/json_writer.h75
-rw-r--r--tools/net/ynl/ynltool/main.c242
-rw-r--r--tools/net/ynl/ynltool/main.h66
-rw-r--r--tools/net/ynl/ynltool/page-pool.c461
-rw-r--r--tools/net/ynl/ynltool/qstats.c621
-rw-r--r--tools/objtool/.gitignore3
-rw-r--r--tools/objtool/Build8
-rw-r--r--tools/objtool/Makefile70
-rw-r--r--tools/objtool/arch/loongarch/decode.c62
-rw-r--r--tools/objtool/arch/loongarch/orc.c1
-rw-r--r--tools/objtool/arch/loongarch/special.c28
-rw-r--r--tools/objtool/arch/powerpc/decode.c31
-rw-r--r--tools/objtool/arch/powerpc/special.c5
-rw-r--r--tools/objtool/arch/x86/Build13
-rw-r--r--tools/objtool/arch/x86/decode.c123
-rw-r--r--tools/objtool/arch/x86/orc.c1
-rw-r--r--tools/objtool/arch/x86/special.c12
-rw-r--r--tools/objtool/builtin-check.c102
-rw-r--r--tools/objtool/builtin-klp.c53
-rw-r--r--tools/objtool/check.c1578
-rw-r--r--tools/objtool/disas.c1248
-rw-r--r--tools/objtool/elf.c822
-rw-r--r--tools/objtool/include/objtool/arch.h17
-rw-r--r--tools/objtool/include/objtool/builtin.h13
-rw-r--r--tools/objtool/include/objtool/check.h39
-rw-r--r--tools/objtool/include/objtool/checksum.h43
-rw-r--r--tools/objtool/include/objtool/checksum_types.h25
-rw-r--r--tools/objtool/include/objtool/disas.h81
-rw-r--r--tools/objtool/include/objtool/elf.h199
-rw-r--r--tools/objtool/include/objtool/endianness.h9
-rw-r--r--tools/objtool/include/objtool/klp.h35
-rw-r--r--tools/objtool/include/objtool/objtool.h6
-rw-r--r--tools/objtool/include/objtool/special.h4
-rw-r--r--tools/objtool/include/objtool/trace.h141
-rw-r--r--tools/objtool/include/objtool/util.h19
-rw-r--r--tools/objtool/include/objtool/warn.h66
-rw-r--r--tools/objtool/klp-diff.c1723
-rw-r--r--tools/objtool/klp-post-link.c168
-rw-r--r--tools/objtool/noreturns.h3
-rw-r--r--tools/objtool/objtool.c44
-rw-r--r--tools/objtool/orc_dump.c1
-rw-r--r--tools/objtool/orc_gen.c9
-rw-r--r--tools/objtool/signal.c135
-rw-r--r--tools/objtool/special.c16
-rwxr-xr-xtools/objtool/sync-check.sh2
-rw-r--r--tools/objtool/trace.c203
-rw-r--r--tools/objtool/weak.c7
-rw-r--r--tools/perf/.gitignore2
-rw-r--r--tools/perf/Build2
-rw-r--r--tools/perf/Documentation/Build.txt15
-rw-r--r--tools/perf/Documentation/android.txt80
-rw-r--r--tools/perf/Documentation/intel-acr.txt53
-rw-r--r--tools/perf/Documentation/perf-amd-ibs.txt68
-rw-r--r--tools/perf/Documentation/perf-annotate.txt1
-rw-r--r--tools/perf/Documentation/perf-arm-spe.txt118
-rw-r--r--tools/perf/Documentation/perf-bench.txt58
-rw-r--r--tools/perf/Documentation/perf-c2c.txt18
-rw-r--r--tools/perf/Documentation/perf-check.txt4
-rw-r--r--tools/perf/Documentation/perf-config.txt7
-rw-r--r--tools/perf/Documentation/perf-diff.txt2
-rw-r--r--tools/perf/Documentation/perf-ftrace.txt10
-rw-r--r--tools/perf/Documentation/perf-list.txt25
-rw-r--r--tools/perf/Documentation/perf-lock.txt15
-rw-r--r--tools/perf/Documentation/perf-mem.txt82
-rw-r--r--tools/perf/Documentation/perf-record.txt24
-rw-r--r--tools/perf/Documentation/perf-report.txt1
-rw-r--r--tools/perf/Documentation/perf-script.txt5
-rw-r--r--tools/perf/Documentation/perf-stat.txt13
-rw-r--r--tools/perf/Documentation/perf-timechart.txt3
-rw-r--r--tools/perf/Documentation/perf-trace.txt21
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt34
-rw-r--r--tools/perf/MANIFEST6
-rw-r--r--tools/perf/Makefile.config140
-rw-r--r--tools/perf/Makefile.perf54
-rw-r--r--tools/perf/arch/arm/annotate/instructions.c1
-rw-r--r--tools/perf/arch/arm/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/arm/util/Build2
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c1
-rw-r--r--tools/perf/arch/arm/util/pmu.c2
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c1
-rw-r--r--tools/perf/arch/arm64/util/Build19
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c6
-rw-r--r--tools/perf/arch/arm64/util/arm64_exception_types.h15
-rw-r--r--tools/perf/arch/arm64/util/hisi-ptt.c1
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl2
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/powerpc/util/Build2
-rw-r--r--tools/perf/arch/powerpc/util/auxtrace.c103
-rw-r--r--tools/perf/arch/powerpc/util/event.c60
-rw-r--r--tools/perf/arch/riscv/util/kvm-stat.c6
-rw-r--r--tools/perf/arch/riscv/util/riscv_exception_types.h35
-rw-r--r--tools/perf/arch/riscv/util/riscv_trap_types.h57
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/s390/util/Build2
-rw-r--r--tools/perf/arch/s390/util/auxtrace.c1
-rw-r--r--tools/perf/arch/sh/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/sparc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/x86/Build2
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c187
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl3
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h6
-rw-r--r--tools/perf/arch/x86/tests/Build9
-rw-r--r--tools/perf/arch/x86/tests/amd-ibs-period.c1032
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c9
-rw-r--r--tools/perf/arch/x86/tests/intel-pt-test.c6
-rw-r--r--tools/perf/arch/x86/tests/sample-parsing.c125
-rw-r--r--tools/perf/arch/x86/tests/topdown.c78
-rw-r--r--tools/perf/arch/x86/util/Build6
-rw-r--r--tools/perf/arch/x86/util/event.c46
-rw-r--r--tools/perf/arch/x86/util/evlist.c24
-rw-r--r--tools/perf/arch/x86/util/evsel.c160
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c22
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c51
-rw-r--r--tools/perf/arch/x86/util/mem-events.c6
-rw-r--r--tools/perf/arch/x86/util/mem-events.h1
-rw-r--r--tools/perf/arch/x86/util/pmu.c290
-rw-r--r--tools/perf/arch/x86/util/topdown.c60
-rw-r--r--tools/perf/arch/x86/util/topdown.h6
-rw-r--r--tools/perf/arch/xtensa/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/bench/bench.h1
-rw-r--r--tools/perf/bench/evlist-open-close.c77
-rw-r--r--tools/perf/bench/find-bit-bench.c2
-rw-r--r--tools/perf/bench/futex-hash.c2
-rw-r--r--tools/perf/bench/futex-lock-pi.c1
-rw-r--r--tools/perf/bench/futex-requeue.c1
-rw-r--r--tools/perf/bench/futex-wake-parallel.c1
-rw-r--r--tools/perf/bench/futex-wake.c1
-rw-r--r--tools/perf/bench/futex.c27
-rw-r--r--tools/perf/bench/futex.h2
-rw-r--r--tools/perf/bench/inject-buildid.c2
-rw-r--r--tools/perf/bench/mem-functions.c390
-rw-r--r--tools/perf/bench/mem-memcpy-arch.h2
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm-def.h4
-rw-r--r--tools/perf/bench/mem-memset-arch.h2
-rw-r--r--tools/perf/bench/mem-memset-x86-64-asm-def.h4
-rw-r--r--tools/perf/bench/pmu-scan.c1
-rw-r--r--tools/perf/bench/synthesize.c28
-rw-r--r--tools/perf/builtin-annotate.c16
-rw-r--r--tools/perf/builtin-bench.c1
-rw-r--r--tools/perf/builtin-buildid-cache.c22
-rw-r--r--tools/perf/builtin-buildid-list.c11
-rw-r--r--tools/perf/builtin-c2c.c267
-rw-r--r--tools/perf/builtin-check.c45
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-evlist.c3
-rw-r--r--tools/perf/builtin-ftrace.c212
-rw-r--r--tools/perf/builtin-inject.c90
-rw-r--r--tools/perf/builtin-kallsyms.c21
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-kvm.c138
-rw-r--r--tools/perf/builtin-kwork.c29
-rw-r--r--tools/perf/builtin-list.c208
-rw-r--r--tools/perf/builtin-lock.c92
-rw-r--r--tools/perf/builtin-mem.c3
-rw-r--r--tools/perf/builtin-record.c322
-rw-r--r--tools/perf/builtin-report.c54
-rw-r--r--tools/perf/builtin-sched.c179
-rw-r--r--tools/perf/builtin-script.c446
-rw-r--r--tools/perf/builtin-stat.c564
-rw-r--r--tools/perf/builtin-timechart.c17
-rw-r--r--tools/perf/builtin-top.c105
-rw-r--r--tools/perf/builtin-trace.c381
-rw-r--r--tools/perf/builtin-version.c30
-rw-r--r--tools/perf/builtin.h9
-rwxr-xr-xtools/perf/check-headers.sh23
-rw-r--r--tools/perf/include/perf/perf_dlfilter.h2
-rw-r--r--tools/perf/jvmti/libjvmti.c4
-rwxr-xr-xtools/perf/perf-archive.sh35
-rw-r--r--tools/perf/perf.c3
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/Build27
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json26
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/common-and-microarch.json70
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx94/sys/ddrc.json9
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx94/sys/metrics.json450
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json98
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json28
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json63
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json12
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json56
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json26
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json13
-rw-r--r--tools/perf/pmu-events/arch/common/common/legacy-hardware.json72
-rw-r--r--tools/perf/pmu-events/arch/common/common/metrics.json151
-rw-r--r--tools/perf/pmu-events/arch/common/common/software.json94
-rw-r--r--tools/perf/pmu-events/arch/common/common/tool.json12
-rw-r--r--tools/perf/pmu-events/arch/riscv/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json14
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/transaction.json8
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/basic.json58
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/crypto6.json142
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/extended.json541
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/pai_crypto.json1213
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/pai_ext.json261
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z17/transaction.json72
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json565
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/cache.json189
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/frontend.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/memory.json70
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/other.json200
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/pipeline.json115
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/cache.json71
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/memory.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/other.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/arl-metrics.json694
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/cache.json622
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/floating-point.json73
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/frontend.json246
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/memory.json84
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/other.json224
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json600
-rw-r--r--tools/perf/pmu-events/arch/x86/arrowlake/virtual-memory.json113
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/other.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json278
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json293
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/cache.json404
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json510
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json404
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/clearwaterforest/cache.json35
-rw-r--r--tools/perf/pmu-events/arch/x86/clearwaterforest/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/clearwaterforest/other.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/clearwaterforest/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/cache.json296
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/memory.json261
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/other.json404
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json247
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json584
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json201
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/other.json334
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json124
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/cache.json175
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/counter.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json250
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/other.json29
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/pipeline.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json45
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json338
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/cache.json327
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/counter.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/frontend.json63
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json616
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/memory.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/other.json243
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json100
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json51
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json99
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json343
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json228
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json247
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json465
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/memory.json160
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/other.json220
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/cache.json273
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json530
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json190
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json463
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json102
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json58
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/other.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/cache.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/frontend.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/lnl-metrics.json728
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/memory.json59
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/other.json361
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json308
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/uncore-interconnect.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/uncore-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv29
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json279
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json112
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json75
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json622
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json144
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/cache.json1413
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/counter.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json359
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/frontend.json565
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/memory.json302
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json2194
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/uncore-memory.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json310
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/memory.json160
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/other.json220
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json466
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json53
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json306
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json201
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/other.json384
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json596
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json124
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/cache.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/frontend.json64
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/other.json49
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json268
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json41
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json63
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json303
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json458
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json476
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/cache.json296
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/memory.json261
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/other.json404
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json466
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json8
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c2914
-rwxr-xr-xtools/perf/pmu-events/jevents.py164
-rwxr-xr-xtools/perf/pmu-events/make_legacy_cache.py129
-rw-r--r--tools/perf/pmu-events/metric.py85
-rwxr-xr-xtools/perf/pmu-events/metric_test.py4
-rw-r--r--tools/perf/pmu-events/pmu-events.h43
-rwxr-xr-xtools/perf/python/counting.py36
-rwxr-xr-xtools/perf/python/ilist.py515
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build2
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py5
-rwxr-xr-xtools/perf/scripts/python/flamegraph.py82
-rw-r--r--tools/perf/tests/Build7
-rw-r--r--tools/perf/tests/backward-ring-buffer.c1
-rw-r--r--tools/perf/tests/bp_account.c1
-rw-r--r--tools/perf/tests/builtin-test.c98
-rw-r--r--tools/perf/tests/code-reading.c139
-rw-r--r--tools/perf/tests/demangle-java-test.c22
-rw-r--r--tools/perf/tests/demangle-ocaml-test.c7
-rw-r--r--tools/perf/tests/demangle-rust-v0-test.c74
-rw-r--r--tools/perf/tests/dlfilter-test.c51
-rw-r--r--tools/perf/tests/dwarf-unwind.c41
-rw-r--r--tools/perf/tests/event-times.c8
-rw-r--r--tools/perf/tests/event_update.c4
-rw-r--r--tools/perf/tests/expand-cgroup.c24
-rw-r--r--tools/perf/tests/hists_cumulate.c8
-rw-r--r--tools/perf/tests/hists_filter.c8
-rw-r--r--tools/perf/tests/hists_link.c8
-rw-r--r--tools/perf/tests/hists_output.c10
-rw-r--r--tools/perf/tests/hwmon_pmu.c12
-rw-r--r--tools/perf/tests/kallsyms-split.c156
-rw-r--r--tools/perf/tests/keep-tracking.c4
-rw-r--r--tools/perf/tests/make20
-rw-r--r--tools/perf/tests/maps.c82
-rw-r--r--tools/perf/tests/mmap-basic.c291
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c6
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c2
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c1
-rw-r--r--tools/perf/tests/openat-syscall.c2
-rw-r--r--tools/perf/tests/parse-events.c2084
-rw-r--r--tools/perf/tests/parse-metric.c19
-rw-r--r--tools/perf/tests/pe-file-parsing.c2
-rw-r--r--tools/perf/tests/perf-record.c41
-rwxr-xr-xtools/perf/tests/perf-targz-src-pkg2
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c6
-rw-r--r--tools/perf/tests/pfm.c1
-rw-r--r--tools/perf/tests/pmu-events.c185
-rw-r--r--tools/perf/tests/pmu.c3
-rw-r--r--tools/perf/tests/python-use.c27
-rw-r--r--tools/perf/tests/sample-parsing.c14
-rw-r--r--tools/perf/tests/sdt.c4
-rwxr-xr-xtools/perf/tests/shell/amd-ibs-swfilt.sh92
-rwxr-xr-xtools/perf/tests/shell/annotate.sh15
-rw-r--r--tools/perf/tests/shell/attr/test-stat-default7
-rw-r--r--tools/perf/tests/shell/attr/test-stat-detailed-17
-rw-r--r--tools/perf/tests/shell/attr/test-stat-detailed-27
-rw-r--r--tools/perf/tests/shell/attr/test-stat-detailed-37
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh20
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh97
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_basic.sh31
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_invalid_options.sh14
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_line_semantics.sh7
-rwxr-xr-xtools/perf/tests/shell/base_report/setup.sh10
-rwxr-xr-xtools/perf/tests/shell/base_report/test_basic.sh103
-rwxr-xr-xtools/perf/tests/shell/buildid.sh205
-rwxr-xr-xtools/perf/tests/shell/c2c.sh62
-rw-r--r--tools/perf/tests/shell/common/init.sh4
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh2
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c2
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh2
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/thread_loop.c4
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh2
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c4
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh2
-rwxr-xr-xtools/perf/tests/shell/diff.sh2
-rwxr-xr-xtools/perf/tests/shell/drm_pmu.sh78
-rwxr-xr-xtools/perf/tests/shell/evlist.sh79
-rwxr-xr-xtools/perf/tests/shell/ftrace.sh2
-rwxr-xr-xtools/perf/tests/shell/header.sh74
-rwxr-xr-xtools/perf/tests/shell/jitdump-python.sh81
-rwxr-xr-xtools/perf/tests/shell/kallsyms.sh56
-rwxr-xr-xtools/perf/tests/shell/kvm.sh154
-rw-r--r--tools/perf/tests/shell/lib/perf_has_symbol.sh2
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py13
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py12
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh22
-rw-r--r--tools/perf/tests/shell/lib/setup_python.sh2
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh7
-rw-r--r--tools/perf/tests/shell/lib/waiting.sh2
-rwxr-xr-xtools/perf/tests/shell/list.sh2
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh41
-rwxr-xr-xtools/perf/tests/shell/perf-report-hierarchy.sh43
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh10
-rwxr-xr-xtools/perf/tests/shell/python-use.sh36
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh7
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh10
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh2
-rwxr-xr-xtools/perf/tests/shell/record.sh137
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh2
-rwxr-xr-xtools/perf/tests/shell/record_lbr.sh31
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh73
-rwxr-xr-xtools/perf/tests/shell/record_sideband.sh2
-rwxr-xr-xtools/perf/tests/shell/record_weak_term.sh37
-rwxr-xr-xtools/perf/tests/shell/sched.sh116
-rwxr-xr-xtools/perf/tests/shell/script.sh2
-rwxr-xr-xtools/perf/tests/shell/script_dlfilter.sh107
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+event_uniquifying.sh66
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh7
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh6
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh10
-rwxr-xr-xtools/perf/tests/shell/stat.sh45
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh3
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh120
-rwxr-xr-xtools/perf/tests/shell/stat_all_pfm.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh17
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight_disasm.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh2
-rwxr-xr-xtools/perf/tests/shell/test_bpf_metadata.sh76
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh168
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh29
-rwxr-xr-xtools/perf/tests/shell/test_event_open_fallback.sh71
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh7
-rwxr-xr-xtools/perf/tests/shell/timechart.sh67
-rwxr-xr-xtools/perf/tests/shell/top.sh74
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh11
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh30
-rwxr-xr-xtools/perf/tests/shell/trace_btf_general.sh19
-rwxr-xr-xtools/perf/tests/shell/trace_exit_race.sh2
-rwxr-xr-xtools/perf/tests/shell/trace_record_replay.sh2
-rwxr-xr-xtools/perf/tests/shell/trace_summary.sh77
-rw-r--r--tools/perf/tests/subcmd-help.c108
-rw-r--r--tools/perf/tests/switch-tracking.c6
-rw-r--r--tools/perf/tests/symbols.c12
-rw-r--r--tools/perf/tests/task-exit.c1
-rw-r--r--tools/perf/tests/tests-scripts.c3
-rw-r--r--tools/perf/tests/tests.h17
-rw-r--r--tools/perf/tests/thread-map.c2
-rw-r--r--tools/perf/tests/topology.c39
-rw-r--r--tools/perf/tests/util.c45
-rw-r--r--tools/perf/tests/workloads/Build2
-rw-r--r--tools/perf/tests/workloads/noploop.c2
-rw-r--r--tools/perf/tests/workloads/thloop.c45
-rw-r--r--tools/perf/tests/workloads/traploop.c31
-rw-r--r--tools/perf/trace/beauty/Build2
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h7
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fcntl.h19
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fs.h94
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/prctl.h24
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/stat.h8
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/vhost.h39
-rw-r--r--tools/perf/ui/Build1
-rw-r--r--tools/perf/ui/browser.c10
-rw-r--r--tools/perf/ui/browser.h5
-rw-r--r--tools/perf/ui/browsers/annotate-data.c1
-rw-r--r--tools/perf/ui/browsers/annotate.c345
-rw-r--r--tools/perf/ui/browsers/header.c5
-rw-r--r--tools/perf/ui/browsers/hists.c127
-rw-r--r--tools/perf/ui/browsers/map.c4
-rw-r--r--tools/perf/ui/browsers/scripts.c2
-rw-r--r--tools/perf/ui/hist.c308
-rw-r--r--tools/perf/ui/keysyms.c44
-rw-r--r--tools/perf/ui/keysyms.h2
-rw-r--r--tools/perf/ui/libslang.h4
-rw-r--r--tools/perf/ui/stdio/hist.c57
-rw-r--r--tools/perf/ui/tui/setup.c2
-rw-r--r--tools/perf/util/Build46
-rw-r--r--tools/perf/util/addr2line.c439
-rw-r--r--tools/perf/util/addr2line.h20
-rw-r--r--tools/perf/util/affinity.c18
-rw-r--r--tools/perf/util/affinity.h2
-rw-r--r--tools/perf/util/amd-sample-raw.c79
-rw-r--r--tools/perf/util/annotate-data.c97
-rw-r--r--tools/perf/util/annotate-data.h27
-rw-r--r--tools/perf/util/annotate.c208
-rw-r--r--tools/perf/util/annotate.h33
-rw-r--r--tools/perf/util/arm-spe-decoder/Build2
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c93
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h111
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c67
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h47
-rw-r--r--tools/perf/util/arm-spe.c391
-rw-r--r--tools/perf/util/arm-spe.h2
-rw-r--r--tools/perf/util/auxtrace.c47
-rw-r--r--tools/perf/util/auxtrace.h234
-rw-r--r--tools/perf/util/bpf-event.c419
-rw-r--r--tools/perf/util/bpf-event.h13
-rw-r--r--tools/perf/util/bpf-filter.c40
-rw-r--r--tools/perf/util/bpf-filter.h5
-rw-r--r--tools/perf/util/bpf-trace-summary.c465
-rw-r--r--tools/perf/util/bpf-utils.c61
-rw-r--r--tools/perf/util/bpf-utils.h10
-rw-r--r--tools/perf/util/bpf_counter.c93
-rw-r--r--tools/perf/util/bpf_counter.h74
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c84
-rw-r--r--tools/perf/util/bpf_ftrace.c79
-rw-r--r--tools/perf/util/bpf_lock_contention.c122
-rw-r--r--tools/perf/util/bpf_map.c1
-rw-r--r--tools/perf/util/bpf_off_cpu.c120
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c7
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c18
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.h15
-rw-r--r--tools/perf/util/bpf_skel/func_latency.bpf.c148
-rw-r--r--tools/perf/util/bpf_skel/kwork_top.bpf.c2
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c107
-rw-r--r--tools/perf/util/bpf_skel/lock_data.h1
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c98
-rw-r--r--tools/perf/util/bpf_skel/perf_version.h17
-rw-r--r--tools/perf/util/bpf_skel/sample_filter.bpf.c2
-rw-r--r--tools/perf/util/bpf_skel/syscall_summary.bpf.c153
-rw-r--r--tools/perf/util/bpf_skel/syscall_summary.h27
-rw-r--r--tools/perf/util/bpf_skel/vmlinux/vmlinux.h9
-rw-r--r--tools/perf/util/bpf_trace_augment.c143
-rw-r--r--tools/perf/util/branch.c2
-rw-r--r--tools/perf/util/build-id.c70
-rw-r--r--tools/perf/util/build-id.h8
-rw-r--r--tools/perf/util/callchain.c51
-rw-r--r--tools/perf/util/callchain.h4
-rw-r--r--tools/perf/util/cap.c1
-rw-r--r--tools/perf/util/cap.h5
-rw-r--r--tools/perf/util/capstone.c471
-rw-r--r--tools/perf/util/capstone.h24
-rw-r--r--tools/perf/util/cgroup.c24
-rw-r--r--tools/perf/util/cgroup.h3
-rw-r--r--tools/perf/util/comm.c2
-rw-r--r--tools/perf/util/config.c5
-rw-r--r--tools/perf/util/cpumap.c9
-rw-r--r--tools/perf/util/cs-etm-decoder/Build2
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c44
-rw-r--r--tools/perf/util/cs-etm.c7
-rw-r--r--tools/perf/util/data-convert-bt.c16
-rw-r--r--tools/perf/util/data-convert-json.c36
-rw-r--r--tools/perf/util/db-export.c11
-rw-r--r--tools/perf/util/debug.c75
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/debuginfo.c10
-rw-r--r--tools/perf/util/demangle-cxx.h2
-rw-r--r--tools/perf/util/demangle-rust-v0.c2042
-rw-r--r--tools/perf/util/demangle-rust-v0.h88
-rw-r--r--tools/perf/util/demangle-rust.c269
-rw-r--r--tools/perf/util/demangle-rust.h8
-rw-r--r--tools/perf/util/disasm.c661
-rw-r--r--tools/perf/util/disasm.h6
-rw-r--r--tools/perf/util/disasm_bpf.c195
-rw-r--r--tools/perf/util/disasm_bpf.h12
-rw-r--r--tools/perf/util/dlfilter.c2
-rw-r--r--tools/perf/util/drm_pmu.c689
-rw-r--r--tools/perf/util/drm_pmu.h39
-rw-r--r--tools/perf/util/dso.c272
-rw-r--r--tools/perf/util/dso.h100
-rw-r--r--tools/perf/util/dsos.c23
-rw-r--r--tools/perf/util/dwarf-aux.c69
-rw-r--r--tools/perf/util/dwarf-aux.h2
-rw-r--r--tools/perf/util/env.c154
-rw-r--r--tools/perf/util/env.h11
-rw-r--r--tools/perf/util/event.c30
-rw-r--r--tools/perf/util/event.h26
-rw-r--r--tools/perf/util/evlist.c139
-rw-r--r--tools/perf/util/evlist.h17
-rw-r--r--tools/perf/util/evsel.c527
-rw-r--r--tools/perf/util/evsel.h35
-rw-r--r--tools/perf/util/evsel_config.h2
-rw-r--r--tools/perf/util/evsel_fprintf.c5
-rw-r--r--tools/perf/util/evswitch.c1
-rw-r--r--tools/perf/util/expr.c14
-rw-r--r--tools/perf/util/fncache.c69
-rw-r--r--tools/perf/util/fncache.h1
-rw-r--r--tools/perf/util/ftrace.h5
-rw-r--r--tools/perf/util/genelf.c117
-rw-r--r--tools/perf/util/get_current_dir_name.c18
-rw-r--r--tools/perf/util/get_current_dir_name.h8
-rw-r--r--tools/perf/util/header.c263
-rw-r--r--tools/perf/util/header.h7
-rw-r--r--tools/perf/util/hisi-ptt-decoder/Build2
-rw-r--r--tools/perf/util/hist.c88
-rw-r--r--tools/perf/util/hist.h50
-rw-r--r--tools/perf/util/hwmon_pmu.c86
-rw-r--r--tools/perf/util/hwmon_pmu.h6
-rw-r--r--tools/perf/util/include/linux/linkage.h6
-rw-r--r--tools/perf/util/intel-bts.c4
-rw-r--r--tools/perf/util/intel-pt-decoder/Build8
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c2
-rw-r--r--tools/perf/util/intel-pt.c209
-rw-r--r--tools/perf/util/intel-tpebs.c735
-rw-r--r--tools/perf/util/intel-tpebs.h40
-rw-r--r--tools/perf/util/jitdump.c26
-rw-r--r--tools/perf/util/kvm-stat.h11
-rw-r--r--tools/perf/util/libbfd.c643
-rw-r--r--tools/perf/util/libbfd.h82
-rw-r--r--tools/perf/util/llvm.c273
-rw-r--r--tools/perf/util/llvm.h21
-rw-r--r--tools/perf/util/lock-contention.h9
-rw-r--r--tools/perf/util/lzma.c2
-rw-r--r--tools/perf/util/machine.c92
-rw-r--r--tools/perf/util/machine.h5
-rw-r--r--tools/perf/util/map.c34
-rw-r--r--tools/perf/util/map.h11
-rw-r--r--tools/perf/util/maps.c40
-rw-r--r--tools/perf/util/mem-events.c183
-rw-r--r--tools/perf/util/mem-events.h57
-rw-r--r--tools/perf/util/metricgroup.c457
-rw-r--r--tools/perf/util/metricgroup.h14
-rw-r--r--tools/perf/util/mmap.c1
-rw-r--r--tools/perf/util/mutex.c14
-rw-r--r--tools/perf/util/mutex.h13
-rw-r--r--tools/perf/util/namespaces.c7
-rw-r--r--tools/perf/util/off_cpu.h3
-rw-r--r--tools/perf/util/parse-events.c954
-rw-r--r--tools/perf/util/parse-events.h31
-rw-r--r--tools/perf/util/parse-events.l91
-rw-r--r--tools/perf/util/parse-events.y121
-rw-r--r--tools/perf/util/perf_api_probe.c27
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c2
-rw-r--r--tools/perf/util/pfm.c7
-rw-r--r--tools/perf/util/pmu.c607
-rw-r--r--tools/perf/util/pmu.h47
-rw-r--r--tools/perf/util/pmus.c145
-rw-r--r--tools/perf/util/pmus.h7
-rw-r--r--tools/perf/util/powerpc-vpadtl.c733
-rw-r--r--tools/perf/util/powerpc-vpadtl.h23
-rw-r--r--tools/perf/util/print-events.c310
-rw-r--r--tools/perf/util/print-events.h11
-rw-r--r--tools/perf/util/print_insn.c117
-rw-r--r--tools/perf/util/probe-event.c24
-rw-r--r--tools/perf/util/probe-file.c4
-rw-r--r--tools/perf/util/probe-finder.c5
-rw-r--r--tools/perf/util/python.c875
-rw-r--r--tools/perf/util/record.h2
-rw-r--r--tools/perf/util/rwsem.c4
-rw-r--r--tools/perf/util/rwsem.h10
-rw-r--r--tools/perf/util/s390-cpumsf.c2
-rw-r--r--tools/perf/util/s390-sample-raw.c55
-rw-r--r--tools/perf/util/sample-raw.c7
-rw-r--r--tools/perf/util/sample-raw.h2
-rw-r--r--tools/perf/util/sample.h8
-rw-r--r--tools/perf/util/scripting-engines/Build2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c9
-rw-r--r--tools/perf/util/session.c223
-rw-r--r--tools/perf/util/session.h10
-rw-r--r--tools/perf/util/setup.py14
-rw-r--r--tools/perf/util/sha1.c97
-rw-r--r--tools/perf/util/sha1.h6
-rw-r--r--tools/perf/util/sort.c211
-rw-r--r--tools/perf/util/sort.h8
-rw-r--r--tools/perf/util/spark.c8
-rw-r--r--tools/perf/util/spark.h1
-rw-r--r--tools/perf/util/srccode.c4
-rw-r--r--tools/perf/util/srcline.c772
-rw-r--r--tools/perf/util/srcline.h9
-rw-r--r--tools/perf/util/stat-display.c344
-rw-r--r--tools/perf/util/stat-shadow.c559
-rw-r--r--tools/perf/util/stat.c103
-rw-r--r--tools/perf/util/stat.h45
-rw-r--r--tools/perf/util/symbol-elf.c180
-rw-r--r--tools/perf/util/symbol-minimal.c197
-rw-r--r--tools/perf/util/symbol.c262
-rw-r--r--tools/perf/util/symbol_conf.h2
-rw-r--r--tools/perf/util/synthetic-events.c58
-rw-r--r--tools/perf/util/synthetic-events.h17
-rw-r--r--tools/perf/util/target.c54
-rw-r--r--tools/perf/util/target.h15
-rw-r--r--tools/perf/util/thread.c35
-rw-r--r--tools/perf/util/thread.h11
-rw-r--r--tools/perf/util/thread_map.c32
-rw-r--r--tools/perf/util/thread_map.h6
-rw-r--r--tools/perf/util/tool.c247
-rw-r--r--tools/perf/util/tool.h26
-rw-r--r--tools/perf/util/tool_pmu.c168
-rw-r--r--tools/perf/util/tool_pmu.h10
-rw-r--r--tools/perf/util/top.c4
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/tp_pmu.c208
-rw-r--r--tools/perf/util/tp_pmu.h19
-rw-r--r--tools/perf/util/trace.h38
-rw-r--r--tools/perf/util/trace_augment.h62
-rw-r--r--tools/perf/util/unwind-libdw.c7
-rw-r--r--tools/perf/util/zlib.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c4
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c3
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/pfrut/pfrut.c7
-rw-r--r--tools/power/cpupower/.gitignore3
-rw-r--r--tools/power/cpupower/Makefile41
-rw-r--r--tools/power/cpupower/bindings/python/Makefile12
-rw-r--r--tools/power/cpupower/lib/cpuidle.c5
-rw-r--r--tools/power/cpupower/lib/cpupower.c2
-rw-r--r--tools/power/cpupower/man/cpupower-set.17
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c16
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c5
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h14
-rw-r--r--tools/power/cpupower/utils/helpers/misc.c76
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c4
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c4
-rwxr-xr-xtools/power/x86/amd_pstate_tracer/amd_pstate_trace.py2
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst-core-tpmi.c46
-rw-r--r--tools/power/x86/turbostat/turbostat.842
-rw-r--r--tools/power/x86/turbostat/turbostat.c2289
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile29
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.815
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c178
-rw-r--r--tools/sched/dl_bw_dump.py57
-rw-r--r--tools/sched/root_domains_dump.py68
-rw-r--r--tools/sched_ext/Makefile4
-rw-r--r--tools/sched_ext/include/scx/bpf_arena_common.bpf.h175
-rw-r--r--tools/sched_ext/include/scx/bpf_arena_common.h33
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h119
-rw-r--r--tools/sched_ext/include/scx/common.h5
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h330
-rw-r--r--tools/sched_ext/include/scx/compat.h14
-rw-r--r--tools/sched_ext/include/scx/user_exit_info.bpf.h40
-rw-r--r--tools/sched_ext/include/scx/user_exit_info.h49
-rw-r--r--tools/sched_ext/include/scx/user_exit_info_common.h30
-rw-r--r--tools/sched_ext/scx_central.bpf.c2
-rw-r--r--tools/sched_ext/scx_central.c1
-rw-r--r--tools/sched_ext/scx_cpu0.bpf.c88
-rw-r--r--tools/sched_ext/scx_cpu0.c106
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c12
-rw-r--r--tools/sched_ext/scx_flatcg.c2
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c161
-rw-r--r--tools/sched_ext/scx_qmap.c12
-rw-r--r--tools/sched_ext/scx_simple.c2
-rw-r--r--tools/scripts/Makefile.include4
-rw-r--r--tools/scripts/syscall.tbl2
-rw-r--r--tools/testing/cxl/Kbuild12
-rw-r--r--tools/testing/cxl/config_check.c1
-rw-r--r--tools/testing/cxl/cxl_core_exports.c22
-rw-r--r--tools/testing/cxl/exports.h13
-rw-r--r--tools/testing/cxl/test/Kbuild1
-rw-r--r--tools/testing/cxl/test/cxl.c177
-rw-r--r--tools/testing/cxl/test/cxl_translate.c445
-rw-r--r--tools/testing/cxl/test/mem.c33
-rw-r--r--tools/testing/cxl/test/mock.c149
-rw-r--r--tools/testing/cxl/test/mock.h13
-rwxr-xr-xtools/testing/ktest/config-bisect.pl4
-rwxr-xr-xtools/testing/ktest/ktest.pl116
-rw-r--r--tools/testing/ktest/sample.conf2
-rw-r--r--tools/testing/kunit/configs/all_tests.config1
-rw-r--r--tools/testing/kunit/configs/arch_uml.config5
-rwxr-xr-xtools/testing/kunit/kunit.py4
-rw-r--r--tools/testing/kunit/kunit_parser.py8
-rw-r--r--tools/testing/kunit/qemu_configs/mips.py18
-rw-r--r--tools/testing/kunit/qemu_configs/mips64.py19
-rw-r--r--tools/testing/kunit/qemu_configs/mips64el.py19
-rw-r--r--tools/testing/kunit/qemu_configs/mipsel.py18
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-kselftest.log3
-rw-r--r--tools/testing/nvdimm/pmem-dax.c6
-rw-r--r--tools/testing/nvdimm/test/iomap.c11
-rw-r--r--tools/testing/nvdimm/test/ndtest.c13
-rw-r--r--tools/testing/nvdimm/test/nfit.c7
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h1
-rw-r--r--tools/testing/radix-tree/idr-test.c16
-rw-r--r--tools/testing/radix-tree/maple.c543
-rw-r--r--tools/testing/scatterlist/linux/mm.h1
-rw-r--r--tools/testing/selftests/Makefile16
-rw-r--r--tools/testing/selftests/acct/acct_syscall.c2
-rw-r--r--tools/testing/selftests/alsa/conf.c4
-rw-r--r--tools/testing/selftests/alsa/mixer-test.c10
-rw-r--r--tools/testing/selftests/alsa/pcm-test.c10
-rw-r--r--tools/testing/selftests/alsa/test-pcmtest-driver.c2
-rw-r--r--tools/testing/selftests/alsa/utimer-test.c3
-rw-r--r--tools/testing/selftests/arm64/abi/Makefile2
-rw-r--r--tools/testing/selftests/arm64/abi/hwcap.c40
-rw-r--r--tools/testing/selftests/arm64/abi/ptrace.c2
-rw-r--r--tools/testing/selftests/arm64/abi/syscall-abi.c2
-rw-r--r--tools/testing/selftests/arm64/abi/tpidr2.c142
-rw-r--r--tools/testing/selftests/arm64/bti/assembler.h1
-rw-r--r--tools/testing/selftests/arm64/fp/fp-ptrace.c78
-rw-r--r--tools/testing/selftests/arm64/fp/fp-stress.c8
-rw-r--r--tools/testing/selftests/arm64/fp/kernel-test.c4
-rw-r--r--tools/testing/selftests/arm64/fp/sve-probe-vls.c2
-rw-r--r--tools/testing/selftests/arm64/fp/sve-ptrace.c179
-rw-r--r--tools/testing/selftests/arm64/fp/vec-syscfg.c3
-rw-r--r--tools/testing/selftests/arm64/fp/za-ptrace.c2
-rw-r--r--tools/testing/selftests/arm64/fp/zt-ptrace.c3
-rw-r--r--tools/testing/selftests/arm64/fp/zt-test.S2
-rw-r--r--tools/testing/selftests/arm64/gcs/Makefile6
-rw-r--r--tools/testing/selftests/arm64/gcs/basic-gcs.c75
-rw-r--r--tools/testing/selftests/arm64/gcs/gcs-locking.c1
-rw-r--r--tools/testing/selftests/arm64/gcs/gcs-stress.c4
-rw-r--r--tools/testing/selftests/arm64/mte/check_buffer_fill.c12
-rw-r--r--tools/testing/selftests/arm64/mte/check_child_memory.c8
-rw-r--r--tools/testing/selftests/arm64/mte/check_hugetlb_options.c10
-rw-r--r--tools/testing/selftests/arm64/mte/check_ksm_options.c6
-rw-r--r--tools/testing/selftests/arm64/mte/check_mmap_options.c896
-rw-r--r--tools/testing/selftests/arm64/mte/check_prctl.c29
-rw-r--r--tools/testing/selftests/arm64/mte/check_tags_inclusion.c10
-rw-r--r--tools/testing/selftests/arm64/mte/check_user_mem.c4
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.c84
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.h9
-rw-r--r--tools/testing/selftests/arm64/mte/mte_def.h8
-rw-r--r--tools/testing/selftests/arm64/pauth/exec_target.c7
-rw-r--r--tools/testing/selftests/arm64/pauth/pac.c2
-rw-r--r--tools/testing/selftests/arm64/tags/tags_test.c2
-rw-r--r--tools/testing/selftests/bpf/.gitignore4
-rw-r--r--tools/testing/selftests/bpf/DENYLIST1
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch641
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x1
-rw-r--r--tools/testing/selftests/bpf/Makefile96
-rw-r--r--tools/testing/selftests/bpf/bench.c22
-rw-r--r--tools/testing/selftests/bpf/bench.h1
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c555
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_ringbufs.c65
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_sockmap.c5
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c65
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh4
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_trigger.sh4
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_common.h3
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_list.h6
-rw-r--r--tools/testing/selftests/bpf/bpf_arena_strsearch.h128
-rw-r--r--tools/testing/selftests/bpf/bpf_atomic.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h59
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h17
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h3
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c41
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h5
-rw-r--r--tools/testing/selftests/bpf/config13
-rw-r--r--tools/testing/selftests/bpf/config.aarch6412
-rw-r--r--tools/testing/selftests/bpf/config.ppc64el92
-rw-r--r--tools/testing/selftests/bpf/config.riscv641
-rw-r--r--tools/testing/selftests/bpf/config.s390x11
-rw-r--r--tools/testing/selftests/bpf/config.x86_645
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c54
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c178
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arena_strsearch.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arg_parsing.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c53
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_gotox.c292
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c504
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c120
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_split.c87
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c617
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/check_mtu.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cls_redirect.c122
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fd_array.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_noreturns.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/file_reader.c117
-rw-r--r--tools/testing/selftests/bpf/prog_tests/free_timer.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/htab_update.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kernel_flag.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c247
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_excl.c54
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c140
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_branches.c22
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning_devmap_reuse.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning_htab.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/recursive_attach.c67
-rw-r--r--tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/res_spin_lock.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c69
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c67
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sha256.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c292
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c458
-rw-r--r--tools/testing/selftests/bpf/prog_tests/socket_helpers.h9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c91
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spin_lock.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stream.c108
-rw-r--r--tools/testing/selftests/bpf/prog_tests/string_kfuncs.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_data.h386
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_work_stress.c130
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_helpers.h28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c390
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_id_ops_mapping.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_sysctl.c (renamed from tools/testing/selftests/bpf/test_sysctl.c)37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_task_local_data.c297
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_task_work.c157
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tc_edt.c145
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c714
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tunnel.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_veristat.c157
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xsk.c2596
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xsk.h298
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer.c38
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_crash.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_lockup.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_mim.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c85
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_failure.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_struct.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe.c156
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c486
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c135
-rw-r--r--tools/testing/selftests/bpf/prog_tests/user_ringbuf.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/wq.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c265
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c179
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xsk.c151
-rw-r--r--tools/testing/selftests/bpf/progs/arena_atomics.c9
-rw-r--r--tools/testing/selftests/bpf/progs/arena_spin_lock.c5
-rw-r--r--tools/testing/selftests/bpf/progs/arena_strsearch.c146
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cc_cubic.c11
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c7
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_gotox.c448
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c17
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp4.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp6.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h53
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_smc.c117
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_test_utils.h18
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h14
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c6
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_mprog.c30
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_read_xattr.c158
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c12
-rw-r--r--tools/testing/selftests/bpf/progs/compute_live_registers.c16
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c21
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_sanity.c46
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c258
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c241
-rw-r--r--tools/testing/selftests/bpf/progs/exceptions_assert.c34
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_noreturns.c15
-rw-r--r--tools/testing/selftests/bpf/progs/file_reader.c145
-rw-r--r--tools/testing/selftests/bpf/progs/file_reader_fail.c52
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c2
-rw-r--r--tools/testing/selftests/bpf/progs/htab_update.c19
-rw-r--r--tools/testing/selftests/bpf/progs/ip_check_defrag.c5
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c277
-rw-r--r--tools/testing/selftests/bpf/progs/iters_looping.c53
-rw-r--r--tools/testing/selftests/bpf/progs/iters_state_safety.c6
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task_failure.c4
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod.c46
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod_seq.c6
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_write_ctx.c22
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c5
-rw-r--r--tools/testing/selftests/bpf/progs/livepatch_trampoline.c30
-rw-r--r--tools/testing/selftests/bpf/progs/loop1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/loop2.c7
-rw-r--r--tools/testing/selftests/bpf/progs/loop3.c7
-rw-r--r--tools/testing/selftests/bpf/progs/loop6.c21
-rw-r--r--tools/testing/selftests/bpf/progs/lpm_trie.h30
-rw-r--r--tools/testing/selftests/bpf/progs/lpm_trie_bench.c230
-rw-r--r--tools/testing/selftests/bpf/progs/lpm_trie_map.c19
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c8
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_tailcall.c8
-rw-r--r--tools/testing/selftests/bpf/progs/map_excl.c34
-rw-r--r--tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c229
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_sockmap.c43
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_subflow.c2
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c14
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_search.c2
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_read_lock.c45
-rw-r--r--tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c60
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr.c60
-rw-r--r--tools/testing/selftests/bpf/progs/ringbuf_bench.c11
-rw-r--r--tools/testing/selftests/bpf/progs/security_bpf_map.c69
-rw-r--r--tools/testing/selftests/bpf/progs/set_global_vars.c56
-rw-r--r--tools/testing/selftests/bpf/progs/sk_bypass_prot_mem.c104
-rw-r--r--tools/testing/selftests/bpf/progs/sock_iter_batch.c36
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_ips.c49
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_map.c (renamed from tools/testing/selftests/bpf/progs/test_stacktrace_map.c)2
-rw-r--r--tools/testing/selftests/bpf/progs/stream.c237
-rw-r--r--tools/testing/selftests/bpf/progs/stream_fail.c33
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c105
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c26
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_success.c56
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h6
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c59
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c59
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c2
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_refcounted.c2
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c3
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c3
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c3
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c3
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_data.bpf.h237
-rw-r--r--tools/testing/selftests/bpf/progs/task_work.c107
-rw-r--r--tools/testing/selftests/bpf/progs/task_work_fail.c96
-rw-r--r--tools/testing/selftests/bpf/progs/task_work_stress.c73
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_check_mtu.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_map_resize.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_key.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_overhead.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_branches.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning_devmap.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning_htab.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_overwrite.c98
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_write.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sig_in_xattr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_ktls.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_local_data.c65
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_change_tail.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_edt.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c95
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe.c38
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_devmap_tailcall.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c637
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_pull_data.c48
-rw-r--r--tools/testing/selftests/bpf/progs/timer_interrupt.c48
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_failure.c12
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct.c33
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c18
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall.c4
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c60
-rw-r--r--tools/testing/selftests/bpf/progs/uretprobe_stack.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_and.c8
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena.c106
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_arena_large.c99
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_async_cb_context.c181
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c573
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c11
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c27
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c76
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c59
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div_overflow.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c128
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_gotox.c389
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ldsx.c178
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_live_stack.c344
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c21
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_lsm.c4
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_in_map.c118
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ptr.c7
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_may_goto_1.c38
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c16
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_mul.c38
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c5
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_precision.c78
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_private_stack.c89
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ref_tracking.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_scalar_ids.c12
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sock.c87
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c40
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c59
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_tailcall.c31
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv.c233
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c47
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c38
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_var_off.c6
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_accept.c20
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_reject.c15
-rw-r--r--tools/testing/selftests/bpf/progs/wq.c17
-rw-r--r--tools/testing/selftests/bpf/progs/wq_failures.c23
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_build.sh4
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_map.sh398
-rw-r--r--tools/testing/selftests/bpf/test_kmods/Makefile2
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c393
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.c202
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod.h6
-rw-r--r--tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h4
-rw-r--r--tools/testing/selftests/bpf/test_lirc_mode2_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_loader.c357
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c105
-rw-r--r--tools/testing/selftests/bpf/test_maps.c7
-rw-r--r--tools/testing/selftests/bpf/test_progs.c13
-rw-r--r--tools/testing/selftests/bpf/test_progs.h45
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c2
-rw-r--r--tools/testing/selftests/bpf/test_tag.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_edt.sh100
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh320
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c20
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh2
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c14
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c234
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h3
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.c94
-rw-r--r--tools/testing/selftests/bpf/usdt.h545
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_st_mem.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c32
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c3
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c33
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c10
-rwxr-xr-xtools/testing/selftests/bpf/verify_sig_setup.sh11
-rw-r--r--tools/testing/selftests/bpf/veristat.c666
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh9
-rw-r--r--tools/testing/selftests/bpf/xdping.c2
-rw-r--r--tools/testing/selftests/bpf/xsk.h4
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c2472
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h155
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test.c2
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c2
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c43
-rw-r--r--tools/testing/selftests/cachestat/.gitignore1
-rw-r--r--tools/testing/selftests/cachestat/test_cachestat.c66
-rw-r--r--tools/testing/selftests/capabilities/test_execve.c2
-rw-r--r--tools/testing/selftests/capabilities/validate_cap.c2
-rw-r--r--tools/testing/selftests/cgroup/lib/cgroup_util.c12
-rw-r--r--tools/testing/selftests/cgroup/lib/include/cgroup_util.h21
-rw-r--r--tools/testing/selftests/cgroup/test_core.c9
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c86
-rw-r--r--tools/testing/selftests/cgroup/test_cpuset.c9
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c672
-rw-r--r--tools/testing/selftests/cgroup/test_hugetlb_memcg.c2
-rw-r--r--tools/testing/selftests/cgroup/test_kill.c9
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c9
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c9
-rw-r--r--tools/testing/selftests/cgroup/test_pids.c5
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c9
-rw-r--r--tools/testing/selftests/clone3/clone3.c2
-rw-r--r--tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c2
-rw-r--r--tools/testing/selftests/clone3/clone3_clear_sighand.c2
-rw-r--r--tools/testing/selftests/clone3/clone3_selftests.h2
-rw-r--r--tools/testing/selftests/clone3/clone3_set_tid.c2
-rw-r--r--tools/testing/selftests/connector/proc_filter.c2
-rw-r--r--tools/testing/selftests/core/close_range_test.c2
-rw-r--r--tools/testing/selftests/core/unshare_test.c2
-rw-r--r--tools/testing/selftests/coredump/.gitignore4
-rw-r--r--tools/testing/selftests/coredump/Makefile10
-rw-r--r--tools/testing/selftests/coredump/config3
-rw-r--r--tools/testing/selftests/coredump/coredump_socket_protocol_test.c1568
-rw-r--r--tools/testing/selftests/coredump/coredump_socket_test.c742
-rw-r--r--tools/testing/selftests/coredump/coredump_test.h59
-rw-r--r--tools/testing/selftests/coredump/coredump_test_helpers.c383
-rw-r--r--tools/testing/selftests/coredump/stackdump_test.c505
-rwxr-xr-xtools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh4
-rw-r--r--tools/testing/selftests/damon/Makefile5
-rw-r--r--tools/testing/selftests/damon/_common.sh11
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py315
-rw-r--r--tools/testing/selftests/damon/access_memory_even.c1
-rwxr-xr-xtools/testing/selftests/damon/drgn_dump_damon_status.py223
-rwxr-xr-xtools/testing/selftests/damon/lru_sort.sh8
-rwxr-xr-xtools/testing/selftests/damon/reclaim.sh8
-rwxr-xr-xtools/testing/selftests/damon/sysfs.py303
-rwxr-xr-xtools/testing/selftests/damon/sysfs.sh11
-rwxr-xr-xtools/testing/selftests/damon/sysfs_memcg_path_leak.sh43
-rwxr-xr-xtools/testing/selftests/damon/sysfs_no_op_commit_break.py72
-rwxr-xr-xtools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh8
-rw-r--r--tools/testing/selftests/dma/Makefile7
-rw-r--r--tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c2
-rw-r--r--tools/testing/selftests/drivers/dma-buf/udmabuf.c22
-rw-r--r--tools/testing/selftests/drivers/net/.gitignore2
-rw-r--r--tools/testing/selftests/drivers/net/Makefile22
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile20
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_ipsec_offload.sh156
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_lacp_prio.sh108
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_macvlan_ipvlan.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_options.sh197
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_passive_lacp.sh105
-rw-r--r--tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh3
-rw-r--r--tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh2
-rw-r--r--tools/testing/selftests/drivers/net/bonding/config12
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/netcons_over_bonding.sh361
-rw-r--r--tools/testing/selftests/drivers/net/config7
-rw-r--r--tools/testing/selftests/drivers/net/dsa/Makefile12
-rw-r--r--tools/testing/selftests/drivers/net/gro.c (renamed from tools/testing/selftests/net/gro.c)75
-rwxr-xr-xtools/testing/selftests/drivers/net/gro.py164
-rwxr-xr-xtools/testing/selftests/drivers/net/hds.py42
-rw-r--r--tools/testing/selftests/drivers/net/hw/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile37
-rw-r--r--tools/testing/selftests/drivers/net/hw/config11
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/csum.py4
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py439
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devmem.py17
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/iou-zcrx.py98
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py46
-rw-r--r--tools/testing/selftests/drivers/net/hw/ncdevmem.c865
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/nic_timestamp.py113
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/pp_alloc_fail.py36
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_api.py476
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py77
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_flow_label.py167
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_input_xfrm.py8
-rw-r--r--tools/testing/selftests/drivers/net/hw/toeplitz.c (renamed from tools/testing/selftests/net/toeplitz.c)72
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/toeplitz.py211
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/tso.py114
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/__init__.py52
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py49
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py99
-rw-r--r--tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh239
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh9
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh12
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh9
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh12
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/napi_id.py4
-rw-r--r--tools/testing/selftests/drivers/net/napi_id_helper.c35
-rwxr-xr-xtools/testing/selftests/drivers/net/napi_threaded.py143
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_basic.sh62
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_cmdline.sh65
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_overflow.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_sysdata.sh30
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_torture.sh130
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/Makefile9
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh171
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh85
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/nexthop.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/peer.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh23
-rwxr-xr-xtools/testing/selftests/drivers/net/netpoll_basic.py396
-rwxr-xr-xtools/testing/selftests/drivers/net/ping.py2
-rwxr-xr-xtools/testing/selftests/drivers/net/psp.py640
-rw-r--r--tools/testing/selftests/drivers/net/psp_responder.c483
-rwxr-xr-xtools/testing/selftests/drivers/net/ring_reconfig.py167
-rwxr-xr-xtools/testing/selftests/drivers/net/stats.py85
-rw-r--r--tools/testing/selftests/drivers/net/team/Makefile11
-rw-r--r--tools/testing/selftests/drivers/net/team/config1
-rwxr-xr-xtools/testing/selftests/drivers/net/team/options.sh188
-rw-r--r--tools/testing/selftests/drivers/net/virtio_net/Makefile13
-rwxr-xr-xtools/testing/selftests/drivers/net/xdp.py779
-rw-r--r--tools/testing/selftests/drivers/ntsync/ntsync.c2
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c2
-rw-r--r--tools/testing/selftests/exec/check-exec.c2
-rw-r--r--tools/testing/selftests/exec/execveat.c2
-rw-r--r--tools/testing/selftests/exec/load_address.c2
-rw-r--r--tools/testing/selftests/exec/non-regular.c2
-rw-r--r--tools/testing/selftests/exec/null-argv.c2
-rw-r--r--tools/testing/selftests/exec/recursion-depth.c2
-rw-r--r--tools/testing/selftests/fchmodat2/fchmodat2_test.c2
-rw-r--r--tools/testing/selftests/filelock/ofdlocks.c2
-rw-r--r--tools/testing/selftests/filesystems/.gitignore2
-rw-r--r--tools/testing/selftests/filesystems/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/anon_inode_test.c2
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c3
-rw-r--r--tools/testing/selftests/filesystems/devpts_pts.c2
-rw-r--r--tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c2
-rw-r--r--tools/testing/selftests/filesystems/eventfd/eventfd_test.c2
-rw-r--r--tools/testing/selftests/filesystems/fclog.c130
-rw-r--r--tools/testing/selftests/filesystems/file_stressor.c2
-rw-r--r--tools/testing/selftests/filesystems/fuse/.gitignore3
-rw-r--r--tools/testing/selftests/filesystems/fuse/Makefile21
-rw-r--r--tools/testing/selftests/filesystems/fuse/fuse_mnt.c146
-rw-r--r--tools/testing/selftests/filesystems/fuse/fusectl_test.c140
-rw-r--r--tools/testing/selftests/filesystems/kernfs_test.c38
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c19
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c20
-rw-r--r--tools/testing/selftests/filesystems/nsfs/iterate_mntns.c2
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c2
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c2
-rw-r--r--tools/testing/selftests/filesystems/statmount/listmount_test.c2
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c2
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test_ns.c2
-rw-r--r--tools/testing/selftests/filesystems/utils.c4
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest34
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc107
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc46
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc40
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc28
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions6
-rw-r--r--tools/testing/selftests/futex/functional/.gitignore1
-rw-r--r--tools/testing/selftests/futex/functional/Makefile8
-rw-r--r--tools/testing/selftests/futex/functional/futex_numa.c3
-rw-r--r--tools/testing/selftests/futex/functional/futex_numa_mpol.c108
-rw-r--r--tools/testing/selftests/futex/functional/futex_priv_hash.c170
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue.c76
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c266
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c86
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c129
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait.c103
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c83
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c139
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c76
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c76
-rw-r--r--tools/testing/selftests/futex/functional/futex_waitv.c99
-rwxr-xr-xtools/testing/selftests/futex/functional/run.sh62
-rw-r--r--tools/testing/selftests/futex/include/futex2test.h8
-rw-r--r--tools/testing/selftests/futex/include/futextest.h22
-rw-r--r--tools/testing/selftests/futex/include/logging.h148
-rw-r--r--tools/testing/selftests/hid/config.common1
-rw-r--r--tools/testing/selftests/hid/hid_common.h8
-rw-r--r--tools/testing/selftests/hid/hidraw.c473
-rw-r--r--tools/testing/selftests/hid/tests/base.py46
-rw-r--r--tools/testing/selftests/hid/tests/base_device.py49
-rw-r--r--tools/testing/selftests/hid/tests/test_apple_keyboard.py3
-rw-r--r--tools/testing/selftests/hid/tests/test_gamepad.py3
-rw-r--r--tools/testing/selftests/hid/tests/test_ite_keyboard.py3
-rw-r--r--tools/testing/selftests/hid/tests/test_mouse.py70
-rw-r--r--tools/testing/selftests/hid/tests/test_multitouch.py57
-rw-r--r--tools/testing/selftests/hid/tests/test_sony.py7
-rw-r--r--tools/testing/selftests/hid/tests/test_tablet.py82
-rw-r--r--tools/testing/selftests/hid/tests/test_wacom_generic.py445
-rwxr-xr-xtools/testing/selftests/hid/vmtest.sh668
-rw-r--r--tools/testing/selftests/intel_pstate/aperf.c2
-rw-r--r--tools/testing/selftests/iommu/iommufd.c686
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c17
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h164
-rw-r--r--tools/testing/selftests/ipc/msgque.c49
-rw-r--r--tools/testing/selftests/ir/ir_loopback.c2
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c2
-rw-r--r--tools/testing/selftests/kexec/.gitignore2
-rw-r--r--tools/testing/selftests/kexec/Makefile2
-rw-r--r--tools/testing/selftests/kho/arm64.conf9
-rw-r--r--tools/testing/selftests/kho/init.c95
-rwxr-xr-xtools/testing/selftests/kho/vmtest.sh186
-rw-r--r--tools/testing/selftests/kho/x86.conf7
-rw-r--r--tools/testing/selftests/kselftest.h22
-rw-r--r--tools/testing/selftests/kselftest/runner.sh14
-rw-r--r--tools/testing/selftests/kselftest_harness.h19
-rw-r--r--tools/testing/selftests/kselftest_harness/Makefile1
-rw-r--r--tools/testing/selftests/kselftest_harness/harness-selftest.c2
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm26
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c8
-rw-r--r--tools/testing/selftests/kvm/arch_timer.c7
-rw-r--r--tools/testing/selftests/kvm/arm64/aarch32_id_regs.c2
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer.c13
-rw-r--r--tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c81
-rw-r--r--tools/testing/selftests/kvm/arm64/at.c166
-rw-r--r--tools/testing/selftests/kvm/arm64/debug-exceptions.c16
-rw-r--r--tools/testing/selftests/kvm/arm64/external_aborts.c415
-rw-r--r--tools/testing/selftests/kvm/arm64/get-reg-list.c305
-rw-r--r--tools/testing/selftests/kvm/arm64/hello_el2.c71
-rw-r--r--tools/testing/selftests/kvm/arm64/hypercalls.c2
-rw-r--r--tools/testing/selftests/kvm/arm64/kvm-uuid.c70
-rw-r--r--tools/testing/selftests/kvm/arm64/mmio_abort.c159
-rw-r--r--tools/testing/selftests/kvm/arm64/no-vgic-v3.c6
-rw-r--r--tools/testing/selftests/kvm/arm64/page_fault_test.c6
-rw-r--r--tools/testing/selftests/kvm/arm64/psci_test.c13
-rw-r--r--tools/testing/selftests/kvm/arm64/sea_to_user.c331
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c74
-rw-r--r--tools/testing/selftests/kvm/arm64/smccc_filter.c17
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_init.c261
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_irq.c303
-rw-r--r--tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c15
-rw-r--r--tools/testing/selftests/kvm/arm64/vpmu_counter_access.c77
-rw-r--r--tools/testing/selftests/kvm/config1
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c35
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c1
-rw-r--r--tools/testing/selftests/kvm/get-reg-list.c9
-rw-r--r--tools/testing/selftests/kvm/guest_memfd_test.c367
-rw-r--r--tools/testing/selftests/kvm/include/arm64/arch_timer.h24
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic.h1
-rw-r--r--tools/testing/selftests/kvm/include/arm64/gic_v3_its.h1
-rw-r--r--tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h5
-rw-r--r--tools/testing/selftests/kvm/include/arm64/processor.h94
-rw-r--r--tools/testing/selftests/kvm/include/arm64/vgic.h3
-rw-r--r--tools/testing/selftests/kvm/include/kvm_syscalls.h81
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h118
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/arch_timer.h85
-rw-r--r--tools/testing/selftests/kvm/include/loongarch/processor.h81
-rw-r--r--tools/testing/selftests/kvm/include/numaif.h110
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h1
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h19
-rw-r--r--tools/testing/selftests/kvm/include/x86/pmu.h26
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h48
-rw-r--r--tools/testing/selftests/kvm/include/x86/vmx.h3
-rw-r--r--tools/testing/selftests/kvm/irqfd_test.c143
-rw-r--r--tools/testing/selftests/kvm/kvm_binary_stats_test.c4
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic.c6
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_private.h1
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3.c22
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c19
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/processor.c117
-rw-r--r--tools/testing/selftests/kvm/lib/arm64/vgic.c64
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c302
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/exception.S6
-rw-r--r--tools/testing/selftests/kvm/lib/loongarch/processor.c47
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c2
-rw-r--r--tools/testing/selftests/kvm/lib/s390/processor.c5
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c4
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c7
-rw-r--r--tools/testing/selftests/kvm/lib/x86/memstress.c2
-rw-r--r--tools/testing/selftests/kvm/lib/x86/pmu.c49
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c138
-rw-r--r--tools/testing/selftests/kvm/lib/x86/vmx.c9
-rw-r--r--tools/testing/selftests/kvm/loongarch/arch_timer.c200
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c1
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c1
-rw-r--r--tools/testing/selftests/kvm/mmu_stress_test.c15
-rw-r--r--tools/testing/selftests/kvm/pre_fault_memory_test.c157
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c64
-rw-r--r--tools/testing/selftests/kvm/s390/cmma_test.c2
-rw-r--r--tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c2
-rw-r--r--tools/testing/selftests/kvm/s390/ucontrol_test.c16
-rw-r--r--tools/testing/selftests/kvm/s390/user_operexec.c140
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c17
-rw-r--r--tools/testing/selftests/kvm/steal_time.c2
-rw-r--r--tools/testing/selftests/kvm/x86/aperfmperf_test.c213
-rw-r--r--tools/testing/selftests/kvm/x86/fastops_test.c82
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_cpuid.c2
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_features.c18
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_ipi.c18
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c2
-rw-r--r--tools/testing/selftests/kvm/x86/monitor_mwait_test.c9
-rw-r--r--tools/testing/selftests/kvm/x86/msrs_test.c489
-rw-r--r--tools/testing/selftests/kvm/x86/nested_close_kvm_test.c (renamed from tools/testing/selftests/kvm/x86/vmx_close_while_nested_test.c)42
-rw-r--r--tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c116
-rw-r--r--tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c (renamed from tools/testing/selftests/kvm/x86/vmx_tsc_adjust_test.c)79
-rw-r--r--tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c (renamed from tools/testing/selftests/kvm/x86/vmx_nested_tsc_scaling_test.c)48
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_counters_test.c75
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_event_filter_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/private_mem_conversions_test.c9
-rw-r--r--tools/testing/selftests/kvm/x86/sev_smoke_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/state_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_io_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c12
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_exception_with_invalid_guest_state.c2
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c132
-rw-r--r--tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c7
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_ipi_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_state_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c12
-rw-r--r--tools/testing/selftests/kvm/x86/xen_shinfo_test.c21
-rw-r--r--tools/testing/selftests/landlock/Makefile2
-rw-r--r--tools/testing/selftests/landlock/audit.h13
-rw-r--r--tools/testing/selftests/landlock/audit_test.c1
-rw-r--r--tools/testing/selftests/landlock/common.h6
-rw-r--r--tools/testing/selftests/landlock/fs_test.c1514
-rw-r--r--tools/testing/selftests/lib.mk8
-rw-r--r--tools/testing/selftests/livepatch/functions.sh6
-rw-r--r--tools/testing/selftests/liveupdate/.gitignore9
-rw-r--r--tools/testing/selftests/liveupdate/Makefile34
-rw-r--r--tools/testing/selftests/liveupdate/config11
-rwxr-xr-xtools/testing/selftests/liveupdate/do_kexec.sh16
-rw-r--r--tools/testing/selftests/liveupdate/liveupdate.c348
-rw-r--r--tools/testing/selftests/liveupdate/luo_kexec_simple.c89
-rw-r--r--tools/testing/selftests/liveupdate/luo_multi_session.c162
-rw-r--r--tools/testing/selftests/liveupdate/luo_test_utils.c266
-rw-r--r--tools/testing/selftests/liveupdate/luo_test_utils.h44
-rw-r--r--tools/testing/selftests/lkdtm/config2
-rw-r--r--tools/testing/selftests/lsm/lsm_get_self_attr_test.c2
-rw-r--r--tools/testing/selftests/lsm/lsm_list_modules_test.c2
-rw-r--r--tools/testing/selftests/lsm/lsm_set_self_attr_test.c2
-rw-r--r--tools/testing/selftests/media_tests/media_device_open.c2
-rw-r--r--tools/testing/selftests/media_tests/media_device_test.c2
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_impl.h2
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c4
-rw-r--r--tools/testing/selftests/mm/.gitignore6
-rw-r--r--tools/testing/selftests/mm/Makefile6
-rw-r--r--tools/testing/selftests/mm/compaction_test.c2
-rw-r--r--tools/testing/selftests/mm/config3
-rw-r--r--tools/testing/selftests/mm/cow.c116
-rw-r--r--tools/testing/selftests/mm/droppable.c2
-rw-r--r--tools/testing/selftests/mm/guard-regions.c198
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c19
-rw-r--r--tools/testing/selftests/mm/gup_test.c28
-rw-r--r--tools/testing/selftests/mm/hmm-tests.c926
-rw-r--r--tools/testing/selftests/mm/hugepage-mmap.c2
-rw-r--r--tools/testing/selftests/mm/hugepage-mremap.c18
-rw-r--r--tools/testing/selftests/mm/hugetlb-madvise.c9
-rw-r--r--tools/testing/selftests/mm/hugetlb-read-hwpoison.c2
-rw-r--r--tools/testing/selftests/mm/hugetlb-soft-offline.c2
-rw-r--r--tools/testing/selftests/mm/hugetlb_dio.c2
-rw-r--r--tools/testing/selftests/mm/hugetlb_fault_after_madv.c2
-rw-r--r--tools/testing/selftests/mm/hugetlb_madv_vs_map.c2
-rw-r--r--tools/testing/selftests/mm/khugepaged.c9
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c286
-rw-r--r--tools/testing/selftests/mm/ksm_tests.c40
-rw-r--r--tools/testing/selftests/mm/madv_populate.c23
-rw-r--r--tools/testing/selftests/mm/map_fixed_noreplace.c2
-rw-r--r--tools/testing/selftests/mm/map_hugetlb.c2
-rw-r--r--tools/testing/selftests/mm/map_populate.c2
-rw-r--r--tools/testing/selftests/mm/mdwe_test.c2
-rw-r--r--tools/testing/selftests/mm/memfd_secret.c2
-rw-r--r--tools/testing/selftests/mm/merge.c725
-rw-r--r--tools/testing/selftests/mm/migration.c23
-rw-r--r--tools/testing/selftests/mm/mkdirty.c2
-rw-r--r--tools/testing/selftests/mm/mlock-random-test.c2
-rw-r--r--tools/testing/selftests/mm/mlock2-tests.c2
-rw-r--r--tools/testing/selftests/mm/mrelease_test.c2
-rw-r--r--tools/testing/selftests/mm/mremap_dontunmap.c2
-rw-r--r--tools/testing/selftests/mm/mremap_test.c630
-rw-r--r--tools/testing/selftests/mm/mseal_test.c2
-rw-r--r--tools/testing/selftests/mm/on-fault-limit.c2
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c96
-rw-r--r--tools/testing/selftests/mm/pfnmap.c50
-rw-r--r--tools/testing/selftests/mm/pkey-helpers.h5
-rw-r--r--tools/testing/selftests/mm/pkey_sighandler_tests.c2
-rw-r--r--tools/testing/selftests/mm/prctl_thp_disable.c291
-rw-r--r--tools/testing/selftests/mm/process_madv.c344
-rw-r--r--tools/testing/selftests/mm/protection_keys.c6
-rw-r--r--tools/testing/selftests/mm/rmap.c433
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh44
-rw-r--r--tools/testing/selftests/mm/settings2
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c141
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c481
-rwxr-xr-xtools/testing/selftests/mm/test_vmalloc.sh6
-rw-r--r--tools/testing/selftests/mm/thp_settings.c18
-rw-r--r--tools/testing/selftests/mm/thp_settings.h3
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c49
-rw-r--r--tools/testing/selftests/mm/transhuge-stress.c2
-rw-r--r--tools/testing/selftests/mm/uffd-common.c293
-rw-r--r--tools/testing/selftests/mm/uffd-common.h80
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c245
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c590
-rw-r--r--tools/testing/selftests/mm/uffd-wp-mremap.c31
-rw-r--r--tools/testing/selftests/mm/va_high_addr_switch.c6
-rwxr-xr-xtools/testing/selftests/mm/va_high_addr_switch.sh37
-rw-r--r--tools/testing/selftests/mm/virtual_address_range.c22
-rw-r--r--tools/testing/selftests/mm/vm_util.c243
-rw-r--r--tools/testing/selftests/mm/vm_util.h35
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c96
-rw-r--r--tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c2
-rw-r--r--tools/testing/selftests/mqueue/mq_open_tests.c2
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c2
-rw-r--r--tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c4
-rw-r--r--tools/testing/selftests/namespaces/.gitignore12
-rw-r--r--tools/testing/selftests/namespaces/Makefile29
-rw-r--r--tools/testing/selftests/namespaces/config7
-rw-r--r--tools/testing/selftests/namespaces/cred_change_test.c814
-rw-r--r--tools/testing/selftests/namespaces/file_handle_test.c1429
-rw-r--r--tools/testing/selftests/namespaces/init_ino_test.c61
-rw-r--r--tools/testing/selftests/namespaces/listns_efault_test.c530
-rw-r--r--tools/testing/selftests/namespaces/listns_pagination_bug.c138
-rw-r--r--tools/testing/selftests/namespaces/listns_permissions_test.c759
-rw-r--r--tools/testing/selftests/namespaces/listns_test.c679
-rw-r--r--tools/testing/selftests/namespaces/ns_active_ref_test.c2672
-rw-r--r--tools/testing/selftests/namespaces/nsid_test.c981
-rw-r--r--tools/testing/selftests/namespaces/regression_pidfd_setns_test.c113
-rw-r--r--tools/testing/selftests/namespaces/siocgskns_test.c1824
-rw-r--r--tools/testing/selftests/namespaces/stress_test.c626
-rw-r--r--tools/testing/selftests/namespaces/wrappers.h35
-rw-r--r--tools/testing/selftests/nci/nci_dev.c2
-rw-r--r--tools/testing/selftests/net/.gitignore10
-rw-r--r--tools/testing/selftests/net/Makefile295
-rw-r--r--tools/testing/selftests/net/af_unix/.gitignore8
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile14
-rw-r--r--tools/testing/selftests/net/af_unix/config2
-rw-r--r--tools/testing/selftests/net/af_unix/diag_uid.c2
-rw-r--r--tools/testing/selftests/net/af_unix/msg_oob.c144
-rw-r--r--tools/testing/selftests/net/af_unix/scm_inq.c123
-rw-r--r--tools/testing/selftests/net/af_unix/scm_pidfd.c217
-rw-r--r--tools/testing/selftests/net/af_unix/scm_rights.c30
-rw-r--r--tools/testing/selftests/net/af_unix/so_peek_off.c162
-rw-r--r--tools/testing/selftests/net/af_unix/unix_connect.c2
-rw-r--r--tools/testing/selftests/net/af_unix/unix_connreset.c180
-rwxr-xr-xtools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh2
-rwxr-xr-xtools/testing/selftests/net/bareudp.sh2
-rw-r--r--tools/testing/selftests/net/bench/Makefile7
-rw-r--r--tools/testing/selftests/net/bench/page_pool/Makefile17
-rw-r--r--tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c267
-rw-r--r--tools/testing/selftests/net/bench/page_pool/time_bench.c394
-rw-r--r--tools/testing/selftests/net/bench/page_pool/time_bench.h238
-rwxr-xr-xtools/testing/selftests/net/bench/test_bench_page_pool.sh32
-rw-r--r--tools/testing/selftests/net/bind_bhash.c4
-rw-r--r--tools/testing/selftests/net/bind_timewait.c2
-rw-r--r--tools/testing/selftests/net/bind_wildcard.c2
-rwxr-xr-xtools/testing/selftests/net/bpf_offload.py4
-rwxr-xr-xtools/testing/selftests/net/broadcast_ether_dst.sh83
-rwxr-xr-xtools/testing/selftests/net/broadcast_pmtu.sh47
-rwxr-xr-xtools/testing/selftests/net/busy_poll_test.sh24
-rw-r--r--tools/testing/selftests/net/busy_poller.c16
-rw-r--r--tools/testing/selftests/net/can/config3
-rw-r--r--tools/testing/selftests/net/can/test_raw_filter.c2
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c12
-rw-r--r--tools/testing/selftests/net/config136
-rw-r--r--tools/testing/selftests/net/epoll_busy_poll.c2
-rwxr-xr-xtools/testing/selftests/net/fcnal-ipv4.sh2
-rwxr-xr-xtools/testing/selftests/net/fcnal-ipv6.sh2
-rwxr-xr-xtools/testing/selftests/net/fcnal-other.sh2
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh435
-rwxr-xr-xtools/testing/selftests/net/fdb_notify.sh26
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh52
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh66
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile58
-rw-r--r--tools/testing/selftests/net/forwarding/README15
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_activity_notify.sh170
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh387
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh100
-rw-r--r--tools/testing/selftests/net/forwarding/config30
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh119
-rwxr-xr-xtools/testing/selftests/net/forwarding/lib_sh_test.sh7
-rwxr-xr-xtools/testing/selftests/net/forwarding/local_termination.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/router.sh29
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_ets.sh1
-rw-r--r--tools/testing/selftests/net/forwarding/sch_ets_core.sh9
-rw-r--r--tools/testing/selftests/net/forwarding/sch_ets_tests.sh8
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_red.sh12
-rw-r--r--tools/testing/selftests/net/forwarding/sch_tbf_core.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh52
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh766
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_reserved.sh33
-rwxr-xr-xtools/testing/selftests/net/gre_ipv6_lladdr.sh27
-rwxr-xr-xtools/testing/selftests/net/gro.sh105
-rw-r--r--tools/testing/selftests/net/hsr/Makefile6
-rw-r--r--tools/testing/selftests/net/hsr/config4
-rw-r--r--tools/testing/selftests/net/io_uring_zerocopy_tx.c24
-rw-r--r--tools/testing/selftests/net/ip_local_port_range.c2
-rw-r--r--tools/testing/selftests/net/ipsec.c2
-rwxr-xr-xtools/testing/selftests/net/ipv6_force_forwarding.sh105
-rw-r--r--tools/testing/selftests/net/ipv6_fragmentation.c114
-rw-r--r--tools/testing/selftests/net/lib.sh111
-rw-r--r--tools/testing/selftests/net/lib/Makefile15
-rwxr-xr-xtools/testing/selftests/net/lib/ksft_setup_loopback.sh111
-rw-r--r--tools/testing/selftests/net/lib/py/__init__.py32
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py120
-rw-r--r--tools/testing/selftests/net/lib/py/nsim.py2
-rw-r--r--tools/testing/selftests/net/lib/py/utils.py104
-rw-r--r--tools/testing/selftests/net/lib/py/ynl.py10
-rw-r--r--tools/testing/selftests/net/lib/sh/defer.sh20
-rw-r--r--tools/testing/selftests/net/lib/xdp_native.bpf.c680
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile31
-rw-r--r--tools/testing/selftests/net/mptcp/config44
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh2
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c44
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh154
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh5
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_inq.c14
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh582
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh81
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_sockopt.c30
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh45
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh6
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c25
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh46
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh19
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c24
-rwxr-xr-xtools/testing/selftests/net/msg_zerocopy.sh84
-rwxr-xr-xtools/testing/selftests/net/nat6to4.sh15
-rwxr-xr-xtools/testing/selftests/net/netdev-l2addr.sh59
-rw-r--r--tools/testing/selftests/net/netfilter/.gitignore1
-rw-r--r--tools/testing/selftests/net/netfilter/Makefile88
-rw-r--r--tools/testing/selftests/net/netfilter/config50
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_clash.sh174
-rw-r--r--tools/testing/selftests/net/netfilter/conntrack_dump_flush.c2
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_resize.sh100
-rwxr-xr-xtools/testing/selftests/net/netfilter/ipvs.sh4
-rwxr-xr-xtools/testing/selftests/net/netfilter/nf_nat_edemux.sh58
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_concat_range.sh161
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_fib.sh13
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_flowtable.sh213
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_interface_stress.sh5
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_nat.sh85
-rw-r--r--tools/testing/selftests/net/netfilter/sctp_collision.c3
-rw-r--r--tools/testing/selftests/net/netfilter/udpclash.c158
-rw-r--r--tools/testing/selftests/net/netlink-dumps.c46
-rw-r--r--tools/testing/selftests/net/nettest.c12
-rwxr-xr-xtools/testing/selftests/net/nl_netdev.py127
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh88
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py2
-rw-r--r--tools/testing/selftests/net/ovpn/Makefile12
-rw-r--r--tools/testing/selftests/net/ovpn/config12
-rw-r--r--tools/testing/selftests/net/ovpn/ovpn-cli.c6
-rwxr-xr-xtools/testing/selftests/net/ovpn/test-large-mtu.sh9
-rw-r--r--tools/testing/selftests/net/packetdrill/Makefile10
-rw-r--r--tools/testing/selftests/net/packetdrill/config4
-rwxr-xr-xtools/testing/selftests/net/packetdrill/defaults.sh3
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh66
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-read.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_close_no_rst.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_dsack_mult.pkt45
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-cookie-not-reqd.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-no-setsockopt.pkt21
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-non-tfo-listener.pkt26
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-pure-syn-data.pkt50
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-rw.pkt23
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-zero-payload.pkt26
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_client-ack-dropped-then-recovery-ms-timestamps.pkt46
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_experimental_option.pkt37
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_fin-close-socket.pkt30
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_icmp-before-accept.pkt49
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-accept.pkt37
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-before-accept.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-close-with-unread-data.pkt32
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-non-tfo-socket.pkt37
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_sockopt-fastopen-key.pkt74
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-listener-closed.pkt21
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-reconnect.pkt30
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-unread-data-closed.pkt23
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt53
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_ooo_rcv_mss.pkt27
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rcv_big_endseq.pkt44
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rcv_toobig.pkt33
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_rto_synack_rto_max.pkt54
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt4
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_user_timeout_user-timeout-probe.pkt6
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt3
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt2
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt2
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh9
-rw-r--r--tools/testing/selftests/net/proc_net_pktgen.c2
-rw-r--r--tools/testing/selftests/net/psock_fanout.c2
-rw-r--r--tools/testing/selftests/net/psock_lib.h4
-rw-r--r--tools/testing/selftests/net/psock_tpacket.c6
-rw-r--r--tools/testing/selftests/net/rds/Makefile10
-rw-r--r--tools/testing/selftests/net/reuseaddr_ports_exhausted.c2
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c2
-rw-r--r--tools/testing/selftests/net/reuseport_bpf_numa.c2
-rwxr-xr-xtools/testing/selftests/net/route_hint.sh79
-rwxr-xr-xtools/testing/selftests/net/rps_default_mask.sh12
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh129
-rwxr-xr-xtools/testing/selftests/net/rtnetlink_notification.sh112
-rw-r--r--tools/testing/selftests/net/rxtimestamp.c2
-rw-r--r--tools/testing/selftests/net/sctp_hello.c17
-rwxr-xr-xtools/testing/selftests/net/sctp_vrf.sh73
-rw-r--r--tools/testing/selftests/net/setup_loopback.sh120
-rw-r--r--tools/testing/selftests/net/setup_veth.sh45
-rw-r--r--tools/testing/selftests/net/sk_so_peek_off.c2
-rw-r--r--tools/testing/selftests/net/so_incoming_cpu.c2
-rw-r--r--tools/testing/selftests/net/so_txtime.c2
-rw-r--r--tools/testing/selftests/net/socket.c13
-rwxr-xr-xtools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh2
-rwxr-xr-xtools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh50
-rwxr-xr-xtools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh2
-rwxr-xr-xtools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh2
-rw-r--r--tools/testing/selftests/net/tap.c2
-rw-r--r--tools/testing/selftests/net/tcp_ao/config2
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/setup.c2
-rw-r--r--tools/testing/selftests/net/tcp_ao/seq-ext.c2
-rw-r--r--tools/testing/selftests/net/tcp_fastopen_backup_key.c2
-rw-r--r--tools/testing/selftests/net/tcp_port_share.c258
-rwxr-xr-xtools/testing/selftests/net/test_bridge_backup_port.sh31
-rwxr-xr-xtools/testing/selftests/net/test_neigh.sh366
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_fdb_changelink.sh8
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_nh.sh223
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_vnifiltering.sh9
-rw-r--r--tools/testing/selftests/net/tfo.c171
-rwxr-xr-xtools/testing/selftests/net/tfo_passive.sh112
-rw-r--r--tools/testing/selftests/net/tls.c590
-rwxr-xr-xtools/testing/selftests/net/toeplitz.sh199
-rwxr-xr-xtools/testing/selftests/net/toeplitz_client.sh28
-rwxr-xr-xtools/testing/selftests/net/traceroute.sh561
-rw-r--r--tools/testing/selftests/net/tun.c2
-rw-r--r--tools/testing/selftests/net/txtimestamp.c2
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh8
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c2
-rwxr-xr-xtools/testing/selftests/net/vlan_bridge_binding.sh46
-rwxr-xr-xtools/testing/selftests/net/vlan_hw_filter.sh98
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh4
-rw-r--r--tools/testing/selftests/net/ynl.mk5
-rw-r--r--tools/testing/selftests/nolibc/Makefile343
-rw-r--r--tools/testing/selftests/nolibc/Makefile.include10
-rw-r--r--tools/testing/selftests/nolibc/Makefile.nolibc382
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c72
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh16
-rw-r--r--tools/testing/selftests/openat2/helpers.h2
-rw-r--r--tools/testing/selftests/openat2/openat2_test.c2
-rw-r--r--tools/testing/selftests/openat2/rename_attack_test.c2
-rw-r--r--tools/testing/selftests/openat2/resolve_test.c2
-rw-r--r--tools/testing/selftests/pci_endpoint/pci_endpoint_test.c34
-rw-r--r--tools/testing/selftests/perf_events/.gitignore1
-rw-r--r--tools/testing/selftests/perf_events/Makefile2
-rw-r--r--tools/testing/selftests/perf_events/mmap.c236
-rw-r--r--tools/testing/selftests/perf_events/remove_on_exec.c2
-rw-r--r--tools/testing/selftests/perf_events/sigtrap_threads.c2
-rw-r--r--tools/testing/selftests/perf_events/watermark_signal.c4
-rw-r--r--tools/testing/selftests/pid_namespace/pid_max.c2
-rw-r--r--tools/testing/selftests/pid_namespace/regression_enomem.c2
-rw-r--r--tools/testing/selftests/pidfd/.gitignore2
-rw-r--r--tools/testing/selftests/pidfd/Makefile5
-rw-r--r--tools/testing/selftests/pidfd/config1
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h32
-rw-r--r--tools/testing/selftests/pidfd/pidfd_bind_mount.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_file_handle_test.c62
-rw-r--r--tools/testing/selftests/pidfd/pidfd_getfd_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_info_test.c75
-rw-r--r--tools/testing/selftests/pidfd/pidfd_open_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_poll_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setattr_test.c69
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_wait.c2
-rw-r--r--tools/testing/selftests/pidfd/pidfd_xattr_test.c132
-rw-r--r--tools/testing/selftests/powerpc/include/instructions.h2
-rw-r--r--tools/testing/selftests/prctl/set-anon-vma-name-test.c2
-rw-r--r--tools/testing/selftests/prctl/set-process-name.c2
-rw-r--r--tools/testing/selftests/proc/.gitignore3
-rw-r--r--tools/testing/selftests/proc/Makefile3
-rw-r--r--tools/testing/selftests/proc/proc-maps-race.c806
-rw-r--r--tools/testing/selftests/proc/proc-net-dev-lseek.c68
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c14
-rw-r--r--tools/testing/selftests/proc/proc-pidns.c211
-rw-r--r--tools/testing/selftests/ptp/testptp.c11
-rw-r--r--tools/testing/selftests/ptrace/.gitignore1
-rw-r--r--tools/testing/selftests/ptrace/get_set_sud.c2
-rw-r--r--tools/testing/selftests/ptrace/get_syscall_info.c2
-rw-r--r--tools/testing/selftests/ptrace/peeksiginfo.c2
-rw-r--r--tools/testing/selftests/ptrace/set_syscall_info.c2
-rw-r--r--tools/testing/selftests/ptrace/vmaccess.c2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/jitter.sh27
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-again.sh56
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-series.sh116
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh15
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mktestid.sh29
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh79
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFLIST1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-L10
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-L.boot3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE041
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h2
-rw-r--r--tools/testing/selftests/ring-buffer/map_test.c2
-rw-r--r--tools/testing/selftests/riscv/README24
-rw-r--r--tools/testing/selftests/riscv/abi/pointer_masking.c2
-rw-r--r--tools/testing/selftests/riscv/hwprobe/cbo.c167
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.c2
-rw-r--r--tools/testing/selftests/riscv/hwprobe/which-cpus.c2
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_bottomup.c2
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_default.c2
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_test.h2
-rw-r--r--tools/testing/selftests/riscv/sigreturn/sigreturn.c2
-rw-r--r--tools/testing/selftests/riscv/vector/Makefile5
-rw-r--r--tools/testing/selftests/riscv/vector/v_initval.c2
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_prctl.c2
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_ptrace.c134
-rw-r--r--tools/testing/selftests/rseq/basic_percpu_ops_test.c2
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv.h3
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h39
-rw-r--r--tools/testing/selftests/rseq/rseq.c10
-rw-r--r--tools/testing/selftests/rtc/rtctest.c2
-rwxr-xr-xtools/testing/selftests/run_kselftest.sh14
-rw-r--r--tools/testing/selftests/sched_ext/Makefile1
-rw-r--r--tools/testing/selftests/sched_ext/exit.c8
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.c1
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c5
-rw-r--r--tools/testing/selftests/sched_ext/peek_dsq.bpf.c251
-rw-r--r--tools/testing/selftests/sched_ext/peek_dsq.c224
-rw-r--r--tools/testing/selftests/seccomp/seccomp_benchmark.c2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c240
-rw-r--r--tools/testing/selftests/sgx/main.c2
-rw-r--r--tools/testing/selftests/signal/mangle_uc_sigmask.c2
-rw-r--r--tools/testing/selftests/signal/sas.c2
-rw-r--r--tools/testing/selftests/sparc64/drivers/adi-test.c2
-rw-r--r--tools/testing/selftests/sync/sync_test.c2
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c142
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh2
-rw-r--r--tools/testing/selftests/tc-testing/config2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json404
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json254
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json81
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json36
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh6
-rw-r--r--tools/testing/selftests/tdx/tdx_guest_test.c2
-rw-r--r--tools/testing/selftests/thermal/intel/workload_hint/workload_hint_test.c18
-rw-r--r--tools/testing/selftests/timens/timens.h2
-rw-r--r--tools/testing/selftests/timers/adjtick.c2
-rw-r--r--tools/testing/selftests/timers/alarmtimer-suspend.c2
-rw-r--r--tools/testing/selftests/timers/change_skew.c2
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c2
-rw-r--r--tools/testing/selftests/timers/freq-step.c2
-rw-r--r--tools/testing/selftests/timers/inconsistency-check.c2
-rw-r--r--tools/testing/selftests/timers/leap-a-day.c2
-rw-r--r--tools/testing/selftests/timers/leapcrash.c2
-rw-r--r--tools/testing/selftests/timers/mqueue-lat.c2
-rw-r--r--tools/testing/selftests/timers/nanosleep.c57
-rw-r--r--tools/testing/selftests/timers/nsleep-lat.c2
-rw-r--r--tools/testing/selftests/timers/posix_timers.c34
-rw-r--r--tools/testing/selftests/timers/raw_skew.c2
-rw-r--r--tools/testing/selftests/timers/rtcpie.c2
-rw-r--r--tools/testing/selftests/timers/set-2038.c2
-rw-r--r--tools/testing/selftests/timers/set-tai.c2
-rw-r--r--tools/testing/selftests/timers/set-timer-lat.c2
-rw-r--r--tools/testing/selftests/timers/set-tz.c2
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c2
-rw-r--r--tools/testing/selftests/timers/threadtest.c2
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c2
-rw-r--r--tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c2
-rw-r--r--tools/testing/selftests/tpm2/tpm2.py4
-rw-r--r--tools/testing/selftests/tty/.gitignore1
-rw-r--r--tools/testing/selftests/tty/Makefile6
-rw-r--r--tools/testing/selftests/tty/config1
-rw-r--r--tools/testing/selftests/tty/tty_tiocsti_test.c650
-rw-r--r--tools/testing/selftests/tty/tty_tstamp_update.c2
-rw-r--r--tools/testing/selftests/ublk/Makefile2
-rw-r--r--tools/testing/selftests/ublk/fault_inject.c17
-rw-r--r--tools/testing/selftests/ublk/file_backed.c52
-rw-r--r--tools/testing/selftests/ublk/kublk.c568
-rw-r--r--tools/testing/selftests/ublk/kublk.h222
-rw-r--r--tools/testing/selftests/ublk/null.c50
-rw-r--r--tools/testing/selftests/ublk/stripe.c46
-rwxr-xr-xtools/testing/selftests/ublk/test_common.sh5
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_01.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_02.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_12.sh59
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_13.sh20
-rwxr-xr-xtools/testing/selftests/ublk/test_null_01.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_null_02.sh4
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_03.sh11
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_04.sh13
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_05.sh11
-rw-r--r--tools/testing/selftests/ublk/trace/count_ios_per_tid.bt11
-rw-r--r--tools/testing/selftests/ublk/utils.h68
-rw-r--r--tools/testing/selftests/uevent/uevent_filtering.c2
-rw-r--r--tools/testing/selftests/user_events/abi_test.c2
-rw-r--r--tools/testing/selftests/user_events/dyn_test.c2
-rw-r--r--tools/testing/selftests/user_events/ftrace_test.c2
-rw-r--r--tools/testing/selftests/user_events/perf_test.c4
-rw-r--r--tools/testing/selftests/user_events/user_events_selftests.h2
-rw-r--r--tools/testing/selftests/vDSO/.gitignore1
-rw-r--r--tools/testing/selftests/vDSO/Makefile4
-rw-r--r--tools/testing/selftests/vDSO/vdso_call.h7
-rw-r--r--tools/testing/selftests/vDSO/vdso_config.h6
l---------[-rw-r--r--]tools/testing/selftests/vDSO/vdso_standalone_test_x86.c59
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_abi.c103
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_chacha.c5
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_clock_getres.c124
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_correctness.c4
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getcpu.c2
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getrandom.c12
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_gettimeofday.c2
-rw-r--r--tools/testing/selftests/vDSO/vgetrandom-chacha.S2
-rw-r--r--tools/testing/selftests/verification/.gitignore (renamed from tools/bpf/runqslower/.gitignore)2
-rw-r--r--tools/testing/selftests/verification/Makefile8
-rw-r--r--tools/testing/selftests/verification/config1
-rw-r--r--tools/testing/selftests/verification/settings1
-rw-r--r--tools/testing/selftests/verification/test.d/functions39
-rw-r--r--tools/testing/selftests/verification/test.d/rv_monitor_enable_disable.tc75
-rw-r--r--tools/testing/selftests/verification/test.d/rv_monitor_reactor.tc68
-rw-r--r--tools/testing/selftests/verification/test.d/rv_monitors_available.tc18
-rw-r--r--tools/testing/selftests/verification/test.d/rv_wwnr_printk.tc30
-rwxr-xr-xtools/testing/selftests/verification/verificationtest-ktap8
-rw-r--r--tools/testing/selftests/vfio/.gitignore10
-rw-r--r--tools/testing/selftests/vfio/Makefile29
-rw-r--r--tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c416
l---------tools/testing/selftests/vfio/lib/drivers/dsa/registers.h1
l---------tools/testing/selftests/vfio/lib/drivers/ioat/hw.h1
-rw-r--r--tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c235
l---------tools/testing/selftests/vfio/lib/drivers/ioat/registers.h1
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio.h26
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/assert.h54
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/iommu.h76
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/iova_allocator.h23
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h125
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_driver.h97
-rw-r--r--tools/testing/selftests/vfio/lib/iommu.c465
-rw-r--r--tools/testing/selftests/vfio/lib/iova_allocator.c94
-rw-r--r--tools/testing/selftests/vfio/lib/libvfio.c78
-rw-r--r--tools/testing/selftests/vfio/lib/libvfio.mk29
-rw-r--r--tools/testing/selftests/vfio/lib/vfio_pci_device.c378
-rw-r--r--tools/testing/selftests/vfio/lib/vfio_pci_driver.c112
-rwxr-xr-xtools/testing/selftests/vfio/scripts/cleanup.sh41
-rwxr-xr-xtools/testing/selftests/vfio/scripts/lib.sh42
-rwxr-xr-xtools/testing/selftests/vfio/scripts/run.sh16
-rwxr-xr-xtools/testing/selftests/vfio/scripts/setup.sh48
-rw-r--r--tools/testing/selftests/vfio/vfio_dma_mapping_test.c312
-rw-r--r--tools/testing/selftests/vfio/vfio_iommufd_setup_test.c127
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_device_init_perf_test.c168
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_device_test.c182
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_driver_test.c263
-rw-r--r--tools/testing/selftests/vsock/.gitignore2
-rw-r--r--tools/testing/selftests/vsock/Makefile17
-rw-r--r--tools/testing/selftests/vsock/config111
-rw-r--r--tools/testing/selftests/vsock/settings1
-rwxr-xr-xtools/testing/selftests/vsock/vmtest.sh607
-rw-r--r--tools/testing/selftests/watchdog/watchdog-test.c6
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config8
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/corrupt_xstate_header.c2
-rw-r--r--tools/testing/selftests/x86/helpers.h2
-rw-r--r--tools/testing/selftests/x86/lam.c2
-rw-r--r--tools/testing/selftests/x86/sigtrap_loop.c101
-rw-r--r--tools/testing/selftests/x86/syscall_numbering.c2
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c2
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c23
-rw-r--r--tools/testing/selftests/x86/xstate.h2
-rw-r--r--tools/testing/selftests/zram/README1
-rw-r--r--tools/testing/shared/linux.c120
-rw-r--r--tools/testing/shared/linux/idr.h4
-rw-r--r--tools/testing/shared/linux/maple_tree.h6
-rw-r--r--tools/testing/shared/maple-shared.h11
-rw-r--r--tools/testing/shared/maple-shim.c7
-rw-r--r--tools/testing/shared/shared.mk6
-rw-r--r--tools/testing/vma/linux/atomic.h17
-rw-r--r--tools/testing/vma/vma.c362
-rw-r--r--tools/testing/vma/vma_internal.h980
-rw-r--r--tools/testing/vsock/Makefile1
-rw-r--r--tools/testing/vsock/util.c113
-rw-r--r--tools/testing/vsock/util.h35
-rw-r--r--tools/testing/vsock/vsock_test.c358
-rw-r--r--tools/thermal/thermal-engine/thermal-engine.c2
-rw-r--r--tools/tracing/latency/Makefile.config8
-rw-r--r--tools/tracing/latency/latency-collector.c2
-rw-r--r--tools/tracing/rtla/Makefile.config8
-rw-r--r--tools/tracing/rtla/Makefile.rtla2
-rw-r--r--tools/tracing/rtla/src/Build2
-rw-r--r--tools/tracing/rtla/src/actions.c260
-rw-r--r--tools/tracing/rtla/src/actions.h52
-rw-r--r--tools/tracing/rtla/src/common.c350
-rw-r--r--tools/tracing/rtla/src/common.h158
-rw-r--r--tools/tracing/rtla/src/osnoise.c101
-rw-r--r--tools/tracing/rtla/src/osnoise.h114
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c463
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c379
-rw-r--r--tools/tracing/rtla/src/timerlat.bpf.c16
-rw-r--r--tools/tracing/rtla/src/timerlat.c226
-rw-r--r--tools/tracing/rtla/src/timerlat.h69
-rw-r--r--tools/tracing/rtla/src/timerlat_bpf.c35
-rw-r--r--tools/tracing/rtla/src/timerlat_bpf.h3
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c740
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c685
-rw-r--r--tools/tracing/rtla/src/timerlat_u.c12
-rw-r--r--tools/tracing/rtla/src/trace.h3
-rw-r--r--tools/tracing/rtla/src/utils.c41
-rw-r--r--tools/tracing/rtla/src/utils.h2
-rw-r--r--tools/tracing/rtla/tests/engine.sh33
-rw-r--r--tools/tracing/rtla/tests/hwnoise.t13
-rw-r--r--tools/tracing/rtla/tests/osnoise.t35
-rwxr-xr-xtools/tracing/rtla/tests/scripts/check-priority.sh8
-rw-r--r--tools/tracing/rtla/tests/timerlat.t45
-rw-r--r--tools/usb/usbip/src/usbipd.c4
-rw-r--r--tools/verification/dot2/Makefile26
-rw-r--r--tools/verification/dot2/dot2k53
-rw-r--r--tools/verification/models/rtapp/pagefault.ltl1
-rw-r--r--tools/verification/models/rtapp/sleep.ltl22
-rw-r--r--tools/verification/models/sched/nrp.dot29
-rw-r--r--tools/verification/models/sched/opid.dot35
-rw-r--r--tools/verification/models/sched/sncid.dot18
-rw-r--r--tools/verification/models/sched/sssw.dot30
-rw-r--r--tools/verification/models/sched/sts.dot38
-rw-r--r--tools/verification/models/sched/tss.dot18
-rw-r--r--tools/verification/rv/src/in_kernel.c4
-rw-r--r--tools/verification/rv/src/rv.c1
-rw-r--r--tools/verification/rvgen/.gitignore3
-rw-r--r--tools/verification/rvgen/Makefile27
-rw-r--r--tools/verification/rvgen/__main__.py67
-rw-r--r--tools/verification/rvgen/dot2c (renamed from tools/verification/dot2/dot2c)2
-rw-r--r--tools/verification/rvgen/rvgen/automata.py (renamed from tools/verification/dot2/automata.py)0
-rw-r--r--tools/verification/rvgen/rvgen/container.py32
-rw-r--r--tools/verification/rvgen/rvgen/dot2c.py (renamed from tools/verification/dot2/dot2c.py)22
-rw-r--r--tools/verification/rvgen/rvgen/dot2k.py129
-rw-r--r--tools/verification/rvgen/rvgen/generator.py (renamed from tools/verification/dot2/dot2k.py)265
-rw-r--r--tools/verification/rvgen/rvgen/ltl2ba.py566
-rw-r--r--tools/verification/rvgen/rvgen/ltl2k.py271
-rw-r--r--tools/verification/rvgen/rvgen/templates/Kconfig (renamed from tools/verification/dot2/dot2k_templates/Kconfig)0
-rw-r--r--tools/verification/rvgen/rvgen/templates/container/Kconfig5
-rw-r--r--tools/verification/rvgen/rvgen/templates/container/main.c (renamed from tools/verification/dot2/dot2k_templates/main_container.c)3
-rw-r--r--tools/verification/rvgen/rvgen/templates/container/main.h (renamed from tools/verification/dot2/dot2k_templates/main_container.h)0
-rw-r--r--tools/verification/rvgen/rvgen/templates/dot2k/main.c (renamed from tools/verification/dot2/dot2k_templates/main.c)3
-rw-r--r--tools/verification/rvgen/rvgen/templates/dot2k/trace.h (renamed from tools/verification/dot2/dot2k_templates/trace.h)0
-rw-r--r--tools/verification/rvgen/rvgen/templates/ltl2k/main.c102
-rw-r--r--tools/verification/rvgen/rvgen/templates/ltl2k/trace.h14
-rw-r--r--tools/virtio/linux/compiler.h2
-rw-r--r--tools/virtio/linux/kmsan.h2
2472 files changed, 180961 insertions, 43293 deletions
diff --git a/tools/Makefile b/tools/Makefile
index c31cbbd12c45..cb40961a740f 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -14,6 +14,7 @@ help:
@echo ' counter - counter tools'
@echo ' cpupower - a tool for all things x86 CPU power'
@echo ' debugging - tools for debugging'
+ @echo ' dma - tools for DMA mapping'
@echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
@echo ' firmware - Firmware tools'
@echo ' freefall - laptop accelerometer program for disk protection'
@@ -69,7 +70,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi firmware debugging tracing: FORCE
+counter dma firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi firmware debugging tracing: FORCE
$(call descend,$@)
bpf/%: FORCE
@@ -122,7 +123,7 @@ kvm_stat: FORCE
ynl: FORCE
$(call descend,net/ynl)
-all: acpi counter cpupower gpio hv firewire \
+all: acpi counter cpupower dma gpio hv firewire \
perf selftests bootconfig spi turbostat usb \
virtio mm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \
@@ -134,7 +135,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install debugging_install tracing_install:
+counter_install dma_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install debugging_install tracing_install:
$(call descend,$(@:_install=),install)
selftests_install:
@@ -164,7 +165,7 @@ kvm_stat_install:
ynl_install:
$(call descend,net/$(@:_install=),install)
-install: acpi_install counter_install cpupower_install gpio_install \
+install: acpi_install counter_install cpupower_install dma_install gpio_install \
hv_install firewire_install iio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install mm_install bpf_install x86_energy_perf_policy_install \
@@ -178,7 +179,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean firmware_clean debugging_clean tracing_clean:
+counter_clean dma_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean firmware_clean debugging_clean tracing_clean:
$(call descend,$(@:_clean=),clean)
libapi_clean:
@@ -224,7 +225,7 @@ build_clean:
ynl_clean:
$(call descend,net/$(@:_clean=),clean)
-clean: acpi_clean counter_clean cpupower_clean hv_clean firewire_clean \
+clean: acpi_clean counter_clean cpupower_clean dma_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
mm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean \
diff --git a/tools/accounting/Makefile b/tools/accounting/Makefile
index 11def1ad046c..20bbd461515e 100644
--- a/tools/accounting/Makefile
+++ b/tools/accounting/Makefile
@@ -2,7 +2,7 @@
CC := $(CROSS_COMPILE)gcc
CFLAGS := -I../../usr/include
-PROGS := getdelays procacct
+PROGS := getdelays procacct delaytop
all: $(PROGS)
diff --git a/tools/accounting/delaytop.c b/tools/accounting/delaytop.c
new file mode 100644
index 000000000000..72cc500b44b1
--- /dev/null
+++ b/tools/accounting/delaytop.c
@@ -0,0 +1,1145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * delaytop.c - system-wide delay monitoring tool.
+ *
+ * This tool provides real-time monitoring and statistics of
+ * system, container, and task-level delays, including CPU,
+ * memory, IO, and IRQ. It supports both interactive (top-like),
+ * and can output delay information for the whole system, specific
+ * containers (cgroups), or individual tasks (PIDs).
+ *
+ * Key features:
+ * - Collects per-task delay accounting statistics via taskstats.
+ * - Collects system-wide PSI information.
+ * - Supports sorting, filtering.
+ * - Supports both interactive (screen refresh).
+ *
+ * Copyright (C) Fan Yu, ZTE Corp. 2025
+ * Copyright (C) Wang Yaxin, ZTE Corp. 2025
+ *
+ * Compile with
+ * gcc -I/usr/src/linux/include delaytop.c -o delaytop
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <signal.h>
+#include <time.h>
+#include <dirent.h>
+#include <ctype.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/select.h>
+#include <termios.h>
+#include <limits.h>
+#include <linux/genetlink.h>
+#include <linux/taskstats.h>
+#include <linux/cgroupstats.h>
+#include <stddef.h>
+
+#define PSI_PATH "/proc/pressure"
+#define PSI_CPU_PATH "/proc/pressure/cpu"
+#define PSI_MEMORY_PATH "/proc/pressure/memory"
+#define PSI_IO_PATH "/proc/pressure/io"
+#define PSI_IRQ_PATH "/proc/pressure/irq"
+
+#define NLA_NEXT(na) ((struct nlattr *)((char *)(na) + NLA_ALIGN((na)->nla_len)))
+#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
+#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
+
+#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
+#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
+
+#define TASK_COMM_LEN 16
+#define MAX_MSG_SIZE 1024
+#define MAX_TASKS 1000
+#define MAX_BUF_LEN 256
+#define SET_TASK_STAT(task_count, field) tasks[task_count].field = stats.field
+#define BOOL_FPRINT(stream, fmt, ...) \
+({ \
+ int ret = fprintf(stream, fmt, ##__VA_ARGS__); \
+ ret >= 0; \
+})
+#define TASK_AVG(task, field) average_ms((task).field##_delay_total, (task).field##_count)
+#define PSI_LINE_FORMAT "%-12s %6.1f%%/%6.1f%%/%6.1f%%/%8llu(ms)\n"
+#define DELAY_FMT_DEFAULT "%8.2f %8.2f %8.2f %8.2f\n"
+#define DELAY_FMT_MEMVERBOSE "%8.2f %8.2f %8.2f %8.2f %8.2f %8.2f\n"
+#define SORT_FIELD(name, cmd, modes) \
+ {#name, #cmd, \
+ offsetof(struct task_info, name##_delay_total), \
+ offsetof(struct task_info, name##_count), \
+ modes}
+#define END_FIELD {NULL, 0, 0}
+
+/* Display mode types */
+#define MODE_TYPE_ALL (0xFFFFFFFF)
+#define MODE_DEFAULT (1 << 0)
+#define MODE_MEMVERBOSE (1 << 1)
+
+/* PSI statistics structure */
+struct psi_stats {
+ double cpu_some_avg10, cpu_some_avg60, cpu_some_avg300;
+ unsigned long long cpu_some_total;
+ double cpu_full_avg10, cpu_full_avg60, cpu_full_avg300;
+ unsigned long long cpu_full_total;
+ double memory_some_avg10, memory_some_avg60, memory_some_avg300;
+ unsigned long long memory_some_total;
+ double memory_full_avg10, memory_full_avg60, memory_full_avg300;
+ unsigned long long memory_full_total;
+ double io_some_avg10, io_some_avg60, io_some_avg300;
+ unsigned long long io_some_total;
+ double io_full_avg10, io_full_avg60, io_full_avg300;
+ unsigned long long io_full_total;
+ double irq_full_avg10, irq_full_avg60, irq_full_avg300;
+ unsigned long long irq_full_total;
+};
+
+/* Task delay information structure */
+struct task_info {
+ int pid;
+ int tgid;
+ char command[TASK_COMM_LEN];
+ unsigned long long cpu_count;
+ unsigned long long cpu_delay_total;
+ unsigned long long blkio_count;
+ unsigned long long blkio_delay_total;
+ unsigned long long swapin_count;
+ unsigned long long swapin_delay_total;
+ unsigned long long freepages_count;
+ unsigned long long freepages_delay_total;
+ unsigned long long thrashing_count;
+ unsigned long long thrashing_delay_total;
+ unsigned long long compact_count;
+ unsigned long long compact_delay_total;
+ unsigned long long wpcopy_count;
+ unsigned long long wpcopy_delay_total;
+ unsigned long long irq_count;
+ unsigned long long irq_delay_total;
+ unsigned long long mem_count;
+ unsigned long long mem_delay_total;
+};
+
+/* Container statistics structure */
+struct container_stats {
+ int nr_sleeping; /* Number of sleeping processes */
+ int nr_running; /* Number of running processes */
+ int nr_stopped; /* Number of stopped processes */
+ int nr_uninterruptible; /* Number of uninterruptible processes */
+ int nr_io_wait; /* Number of processes in IO wait */
+};
+
+/* Delay field structure */
+struct field_desc {
+ const char *name; /* Field name for cmdline argument */
+ const char *cmd_char; /* Interactive command */
+ unsigned long total_offset; /* Offset of total delay in task_info */
+ unsigned long count_offset; /* Offset of count in task_info */
+ size_t supported_modes; /* Supported display modes */
+};
+
+/* Program settings structure */
+struct config {
+ int delay; /* Update interval in seconds */
+ int iterations; /* Number of iterations, 0 == infinite */
+ int max_processes; /* Maximum number of processes to show */
+ int output_one_time; /* Output once and exit */
+ int monitor_pid; /* Monitor specific PID */
+ char *container_path; /* Path to container cgroup */
+ const struct field_desc *sort_field; /* Current sort field */
+ size_t display_mode; /* Current display mode */
+};
+
+/* Global variables */
+static struct config cfg;
+static struct psi_stats psi;
+static struct task_info tasks[MAX_TASKS];
+static int task_count;
+static int running = 1;
+static struct container_stats container_stats;
+static const struct field_desc sort_fields[] = {
+ SORT_FIELD(cpu, c, MODE_DEFAULT),
+ SORT_FIELD(blkio, i, MODE_DEFAULT),
+ SORT_FIELD(irq, q, MODE_DEFAULT),
+ SORT_FIELD(mem, m, MODE_DEFAULT | MODE_MEMVERBOSE),
+ SORT_FIELD(swapin, s, MODE_MEMVERBOSE),
+ SORT_FIELD(freepages, r, MODE_MEMVERBOSE),
+ SORT_FIELD(thrashing, t, MODE_MEMVERBOSE),
+ SORT_FIELD(compact, p, MODE_MEMVERBOSE),
+ SORT_FIELD(wpcopy, w, MODE_MEMVERBOSE),
+ END_FIELD
+};
+static int sort_selected;
+
+/* Netlink socket variables */
+static int nl_sd = -1;
+static int family_id;
+
+/* Set terminal to non-canonical mode for q-to-quit */
+static struct termios orig_termios;
+static void enable_raw_mode(void)
+{
+ struct termios raw;
+
+ tcgetattr(STDIN_FILENO, &orig_termios);
+ raw = orig_termios;
+ raw.c_lflag &= ~(ICANON | ECHO);
+ tcsetattr(STDIN_FILENO, TCSAFLUSH, &raw);
+}
+static void disable_raw_mode(void)
+{
+ tcsetattr(STDIN_FILENO, TCSAFLUSH, &orig_termios);
+}
+
+/* Find field descriptor by command line */
+static const struct field_desc *get_field_by_cmd_char(char ch)
+{
+ const struct field_desc *field;
+
+ for (field = sort_fields; field->name != NULL; field++) {
+ if (field->cmd_char[0] == ch)
+ return field;
+ }
+
+ return NULL;
+}
+
+/* Find field descriptor by name with string comparison */
+static const struct field_desc *get_field_by_name(const char *name)
+{
+ const struct field_desc *field;
+ size_t field_len;
+
+ for (field = sort_fields; field->name != NULL; field++) {
+ field_len = strlen(field->name);
+ if (field_len != strlen(name))
+ continue;
+ if (strncmp(field->name, name, field_len) == 0)
+ return field;
+ }
+
+ return NULL;
+}
+
+/* Find display name for a field descriptor */
+static const char *get_name_by_field(const struct field_desc *field)
+{
+ return field ? field->name : "UNKNOWN";
+}
+
+/* Generate string of available field names */
+static void display_available_fields(size_t mode)
+{
+ const struct field_desc *field;
+ char buf[MAX_BUF_LEN];
+
+ buf[0] = '\0';
+
+ for (field = sort_fields; field->name != NULL; field++) {
+ if (!(field->supported_modes & mode))
+ continue;
+ strncat(buf, "|", MAX_BUF_LEN - strlen(buf) - 1);
+ strncat(buf, field->name, MAX_BUF_LEN - strlen(buf) - 1);
+ buf[MAX_BUF_LEN - 1] = '\0';
+ }
+
+ fprintf(stderr, "Available fields: %s\n", buf);
+}
+
+/* Display usage information and command line options */
+static void usage(void)
+{
+ printf("Usage: delaytop [Options]\n"
+ "Options:\n"
+ " -h, --help Show this help message and exit\n"
+ " -d, --delay=SECONDS Set refresh interval (default: 2 seconds, min: 1)\n"
+ " -n, --iterations=COUNT Set number of updates (default: 0 = infinite)\n"
+ " -P, --processes=NUMBER Set maximum number of processes to show (default: 20, max: 1000)\n"
+ " -o, --once Display once and exit\n"
+ " -p, --pid=PID Monitor only the specified PID\n"
+ " -C, --container=PATH Monitor the container at specified cgroup path\n"
+ " -s, --sort=FIELD Sort by delay field (default: cpu)\n"
+ " -M, --memverbose Display memory detailed information\n");
+ exit(0);
+}
+
+/* Parse command line arguments and set configuration */
+static void parse_args(int argc, char **argv)
+{
+ int c;
+ const struct field_desc *field;
+ struct option long_options[] = {
+ {"help", no_argument, 0, 'h'},
+ {"delay", required_argument, 0, 'd'},
+ {"iterations", required_argument, 0, 'n'},
+ {"pid", required_argument, 0, 'p'},
+ {"once", no_argument, 0, 'o'},
+ {"processes", required_argument, 0, 'P'},
+ {"sort", required_argument, 0, 's'},
+ {"container", required_argument, 0, 'C'},
+ {"memverbose", no_argument, 0, 'M'},
+ {0, 0, 0, 0}
+ };
+
+ /* Set defaults */
+ cfg.delay = 2;
+ cfg.iterations = 0;
+ cfg.max_processes = 20;
+ cfg.sort_field = &sort_fields[0]; /* Default sorted by CPU delay */
+ cfg.output_one_time = 0;
+ cfg.monitor_pid = 0; /* 0 means monitor all PIDs */
+ cfg.container_path = NULL;
+ cfg.display_mode = MODE_DEFAULT;
+
+ while (1) {
+ int option_index = 0;
+
+ c = getopt_long(argc, argv, "hd:n:p:oP:C:s:M", long_options, &option_index);
+ if (c == -1)
+ break;
+
+ switch (c) {
+ case 'h':
+ usage();
+ break;
+ case 'd':
+ cfg.delay = atoi(optarg);
+ if (cfg.delay < 1) {
+ fprintf(stderr, "Error: delay must be >= 1.\n");
+ exit(1);
+ }
+ break;
+ case 'n':
+ cfg.iterations = atoi(optarg);
+ if (cfg.iterations < 0) {
+ fprintf(stderr, "Error: iterations must be >= 0.\n");
+ exit(1);
+ }
+ break;
+ case 'p':
+ cfg.monitor_pid = atoi(optarg);
+ if (cfg.monitor_pid < 1) {
+ fprintf(stderr, "Error: pid must be >= 1.\n");
+ exit(1);
+ }
+ break;
+ case 'o':
+ cfg.output_one_time = 1;
+ break;
+ case 'P':
+ cfg.max_processes = atoi(optarg);
+ if (cfg.max_processes < 1) {
+ fprintf(stderr, "Error: processes must be >= 1.\n");
+ exit(1);
+ }
+ if (cfg.max_processes > MAX_TASKS) {
+ fprintf(stderr, "Warning: processes capped to %d.\n",
+ MAX_TASKS);
+ cfg.max_processes = MAX_TASKS;
+ }
+ break;
+ case 'C':
+ cfg.container_path = strdup(optarg);
+ break;
+ case 's':
+ if (strlen(optarg) == 0) {
+ fprintf(stderr, "Error: empty sort field\n");
+ exit(1);
+ }
+
+ field = get_field_by_name(optarg);
+ /* Show available fields if invalid option provided */
+ if (!field) {
+ fprintf(stderr, "Error: invalid sort field '%s'\n", optarg);
+ display_available_fields(MODE_TYPE_ALL);
+ exit(1);
+ }
+
+ cfg.sort_field = field;
+ break;
+ case 'M':
+ cfg.display_mode = MODE_MEMVERBOSE;
+ cfg.sort_field = get_field_by_name("mem");
+ break;
+ default:
+ fprintf(stderr, "Try 'delaytop --help' for more information.\n");
+ exit(1);
+ }
+ }
+}
+
+/* Calculate average delay in milliseconds for overall memory */
+static void set_mem_delay_total(struct task_info *t)
+{
+ t->mem_delay_total = t->swapin_delay_total +
+ t->freepages_delay_total +
+ t->thrashing_delay_total +
+ t->compact_delay_total +
+ t->wpcopy_delay_total;
+}
+
+static void set_mem_count(struct task_info *t)
+{
+ t->mem_count = t->swapin_count +
+ t->freepages_count +
+ t->thrashing_count +
+ t->compact_count +
+ t->wpcopy_count;
+}
+
+/* Create a raw netlink socket and bind */
+static int create_nl_socket(void)
+{
+ int fd;
+ struct sockaddr_nl local;
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+ if (fd < 0)
+ return -1;
+
+ memset(&local, 0, sizeof(local));
+ local.nl_family = AF_NETLINK;
+
+ if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0) {
+ fprintf(stderr, "Failed to bind socket when create nl_socket\n");
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+/* Send a command via netlink */
+static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
+ __u8 genl_cmd, __u16 nla_type,
+ void *nla_data, int nla_len)
+{
+ struct sockaddr_nl nladdr;
+ struct nlattr *na;
+ int r, buflen;
+ char *buf;
+
+ struct {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[MAX_MSG_SIZE];
+ } msg;
+
+ msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ msg.n.nlmsg_type = nlmsg_type;
+ msg.n.nlmsg_flags = NLM_F_REQUEST;
+ msg.n.nlmsg_seq = 0;
+ msg.n.nlmsg_pid = nlmsg_pid;
+ msg.g.cmd = genl_cmd;
+ msg.g.version = 0x1;
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+ na->nla_len = nla_len + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+
+ buf = (char *) &msg;
+ buflen = msg.n.nlmsg_len;
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+ while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr,
+ sizeof(nladdr))) < buflen) {
+ if (r > 0) {
+ buf += r;
+ buflen -= r;
+ } else if (errno != EAGAIN)
+ return -1;
+ }
+ return 0;
+}
+
+/* Get family ID for taskstats via netlink */
+static int get_family_id(int sd)
+{
+ struct {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[256];
+ } ans;
+
+ int id = 0, rc;
+ struct nlattr *na;
+ int rep_len;
+ char name[100];
+
+ strncpy(name, TASKSTATS_GENL_NAME, sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+ rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
+ CTRL_ATTR_FAMILY_NAME, (void *)name,
+ strlen(TASKSTATS_GENL_NAME)+1);
+ if (rc < 0) {
+ fprintf(stderr, "Failed to send cmd for family id\n");
+ return 0;
+ }
+
+ rep_len = recv(sd, &ans, sizeof(ans), 0);
+ if (ans.n.nlmsg_type == NLMSG_ERROR ||
+ (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len)) {
+ fprintf(stderr, "Failed to receive response for family id\n");
+ return 0;
+ }
+
+ na = (struct nlattr *) GENLMSG_DATA(&ans);
+ na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
+ if (na->nla_type == CTRL_ATTR_FAMILY_ID)
+ id = *(__u16 *) NLA_DATA(na);
+ return id;
+}
+
+static int read_psi_stats(void)
+{
+ FILE *fp;
+ char line[256];
+ int ret = 0;
+ int error_count = 0;
+
+ /* Check if PSI path exists */
+ if (access(PSI_PATH, F_OK) != 0) {
+ fprintf(stderr, "Error: PSI interface not found at %s\n", PSI_PATH);
+ fprintf(stderr, "Please ensure your kernel supports PSI (Pressure Stall Information)\n");
+ return -1;
+ }
+
+ /* Zero all fields */
+ memset(&psi, 0, sizeof(psi));
+
+ /* CPU pressure */
+ fp = fopen(PSI_CPU_PATH, "r");
+ if (fp) {
+ while (fgets(line, sizeof(line), fp)) {
+ if (strncmp(line, "some", 4) == 0) {
+ ret = sscanf(line, "some avg10=%lf avg60=%lf avg300=%lf total=%llu",
+ &psi.cpu_some_avg10, &psi.cpu_some_avg60,
+ &psi.cpu_some_avg300, &psi.cpu_some_total);
+ if (ret != 4) {
+ fprintf(stderr, "Failed to parse CPU some PSI data\n");
+ error_count++;
+ }
+ } else if (strncmp(line, "full", 4) == 0) {
+ ret = sscanf(line, "full avg10=%lf avg60=%lf avg300=%lf total=%llu",
+ &psi.cpu_full_avg10, &psi.cpu_full_avg60,
+ &psi.cpu_full_avg300, &psi.cpu_full_total);
+ if (ret != 4) {
+ fprintf(stderr, "Failed to parse CPU full PSI data\n");
+ error_count++;
+ }
+ }
+ }
+ fclose(fp);
+ } else {
+ fprintf(stderr, "Warning: Failed to open %s\n", PSI_CPU_PATH);
+ error_count++;
+ }
+
+ /* Memory pressure */
+ fp = fopen(PSI_MEMORY_PATH, "r");
+ if (fp) {
+ while (fgets(line, sizeof(line), fp)) {
+ if (strncmp(line, "some", 4) == 0) {
+ ret = sscanf(line, "some avg10=%lf avg60=%lf avg300=%lf total=%llu",
+ &psi.memory_some_avg10, &psi.memory_some_avg60,
+ &psi.memory_some_avg300, &psi.memory_some_total);
+ if (ret != 4) {
+ fprintf(stderr, "Failed to parse Memory some PSI data\n");
+ error_count++;
+ }
+ } else if (strncmp(line, "full", 4) == 0) {
+ ret = sscanf(line, "full avg10=%lf avg60=%lf avg300=%lf total=%llu",
+ &psi.memory_full_avg10, &psi.memory_full_avg60,
+ &psi.memory_full_avg300, &psi.memory_full_total);
+ if (ret != 4) {
+ fprintf(stderr, "Failed to parse Memory full PSI data\n");
+ error_count++;
+ }
+ }
+ }
+ fclose(fp);
+ } else {
+ fprintf(stderr, "Warning: Failed to open %s\n", PSI_MEMORY_PATH);
+ error_count++;
+ }
+
+ /* IO pressure */
+ fp = fopen(PSI_IO_PATH, "r");
+ if (fp) {
+ while (fgets(line, sizeof(line), fp)) {
+ if (strncmp(line, "some", 4) == 0) {
+ ret = sscanf(line, "some avg10=%lf avg60=%lf avg300=%lf total=%llu",
+ &psi.io_some_avg10, &psi.io_some_avg60,
+ &psi.io_some_avg300, &psi.io_some_total);
+ if (ret != 4) {
+ fprintf(stderr, "Failed to parse IO some PSI data\n");
+ error_count++;
+ }
+ } else if (strncmp(line, "full", 4) == 0) {
+ ret = sscanf(line, "full avg10=%lf avg60=%lf avg300=%lf total=%llu",
+ &psi.io_full_avg10, &psi.io_full_avg60,
+ &psi.io_full_avg300, &psi.io_full_total);
+ if (ret != 4) {
+ fprintf(stderr, "Failed to parse IO full PSI data\n");
+ error_count++;
+ }
+ }
+ }
+ fclose(fp);
+ } else {
+ fprintf(stderr, "Warning: Failed to open %s\n", PSI_IO_PATH);
+ error_count++;
+ }
+
+ /* IRQ pressure (only full) */
+ fp = fopen(PSI_IRQ_PATH, "r");
+ if (fp) {
+ while (fgets(line, sizeof(line), fp)) {
+ if (strncmp(line, "full", 4) == 0) {
+ ret = sscanf(line, "full avg10=%lf avg60=%lf avg300=%lf total=%llu",
+ &psi.irq_full_avg10, &psi.irq_full_avg60,
+ &psi.irq_full_avg300, &psi.irq_full_total);
+ if (ret != 4) {
+ fprintf(stderr, "Failed to parse IRQ full PSI data\n");
+ error_count++;
+ }
+ }
+ }
+ fclose(fp);
+ } else {
+ fprintf(stderr, "Warning: Failed to open %s\n", PSI_IRQ_PATH);
+ error_count++;
+ }
+
+ /* Return error count: 0 means success, >0 means warnings, -1 means fatal error */
+ if (error_count > 0) {
+ fprintf(stderr, "PSI stats reading completed with %d warnings\n", error_count);
+ return error_count;
+ }
+
+ return 0;
+}
+
+static int read_comm(int pid, char *comm_buf, size_t buf_size)
+{
+ char path[64];
+ int ret = -1;
+ size_t len;
+ FILE *fp;
+
+ snprintf(path, sizeof(path), "/proc/%d/comm", pid);
+ fp = fopen(path, "r");
+ if (!fp) {
+ fprintf(stderr, "Failed to open comm file /proc/%d/comm\n", pid);
+ return ret;
+ }
+
+ if (fgets(comm_buf, buf_size, fp)) {
+ len = strlen(comm_buf);
+ if (len > 0 && comm_buf[len - 1] == '\n')
+ comm_buf[len - 1] = '\0';
+ ret = 0;
+ }
+
+ fclose(fp);
+
+ return ret;
+}
+
+static void fetch_and_fill_task_info(int pid, const char *comm)
+{
+ struct {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[MAX_MSG_SIZE];
+ } resp;
+ struct taskstats stats;
+ struct nlattr *nested;
+ struct nlattr *na;
+ int nested_len;
+ int nl_len;
+ int rc;
+
+ /* Send request for task stats */
+ if (send_cmd(nl_sd, family_id, getpid(), TASKSTATS_CMD_GET,
+ TASKSTATS_CMD_ATTR_PID, &pid, sizeof(pid)) < 0) {
+ fprintf(stderr, "Failed to send request for task stats\n");
+ return;
+ }
+
+ /* Receive response */
+ rc = recv(nl_sd, &resp, sizeof(resp), 0);
+ if (rc < 0 || resp.n.nlmsg_type == NLMSG_ERROR) {
+ fprintf(stderr, "Failed to receive response for task stats\n");
+ return;
+ }
+
+ /* Parse response */
+ nl_len = GENLMSG_PAYLOAD(&resp.n);
+ na = (struct nlattr *) GENLMSG_DATA(&resp);
+ while (nl_len > 0) {
+ if (na->nla_type == TASKSTATS_TYPE_AGGR_PID) {
+ nested = (struct nlattr *) NLA_DATA(na);
+ nested_len = NLA_PAYLOAD(na->nla_len);
+ while (nested_len > 0) {
+ if (nested->nla_type == TASKSTATS_TYPE_STATS) {
+ memcpy(&stats, NLA_DATA(nested), sizeof(stats));
+ if (task_count < MAX_TASKS) {
+ tasks[task_count].pid = pid;
+ tasks[task_count].tgid = pid;
+ strncpy(tasks[task_count].command, comm,
+ TASK_COMM_LEN - 1);
+ tasks[task_count].command[TASK_COMM_LEN - 1] = '\0';
+ SET_TASK_STAT(task_count, cpu_count);
+ SET_TASK_STAT(task_count, cpu_delay_total);
+ SET_TASK_STAT(task_count, blkio_count);
+ SET_TASK_STAT(task_count, blkio_delay_total);
+ SET_TASK_STAT(task_count, swapin_count);
+ SET_TASK_STAT(task_count, swapin_delay_total);
+ SET_TASK_STAT(task_count, freepages_count);
+ SET_TASK_STAT(task_count, freepages_delay_total);
+ SET_TASK_STAT(task_count, thrashing_count);
+ SET_TASK_STAT(task_count, thrashing_delay_total);
+ SET_TASK_STAT(task_count, compact_count);
+ SET_TASK_STAT(task_count, compact_delay_total);
+ SET_TASK_STAT(task_count, wpcopy_count);
+ SET_TASK_STAT(task_count, wpcopy_delay_total);
+ SET_TASK_STAT(task_count, irq_count);
+ SET_TASK_STAT(task_count, irq_delay_total);
+ set_mem_count(&tasks[task_count]);
+ set_mem_delay_total(&tasks[task_count]);
+ task_count++;
+ }
+ break;
+ }
+ nested_len -= NLA_ALIGN(nested->nla_len);
+ nested = NLA_NEXT(nested);
+ }
+ }
+ nl_len -= NLA_ALIGN(na->nla_len);
+ na = NLA_NEXT(na);
+ }
+ return;
+}
+
+static void get_task_delays(void)
+{
+ char comm[TASK_COMM_LEN];
+ struct dirent *entry;
+ DIR *dir;
+ int pid;
+
+ task_count = 0;
+ if (cfg.monitor_pid > 0) {
+ if (read_comm(cfg.monitor_pid, comm, sizeof(comm)) == 0)
+ fetch_and_fill_task_info(cfg.monitor_pid, comm);
+ return;
+ }
+
+ dir = opendir("/proc");
+ if (!dir) {
+ fprintf(stderr, "Error opening /proc directory\n");
+ return;
+ }
+
+ while ((entry = readdir(dir)) != NULL && task_count < MAX_TASKS) {
+ if (!isdigit(entry->d_name[0]))
+ continue;
+ pid = atoi(entry->d_name);
+ if (pid == 0)
+ continue;
+ if (read_comm(pid, comm, sizeof(comm)) != 0)
+ continue;
+ fetch_and_fill_task_info(pid, comm);
+ }
+ closedir(dir);
+}
+
+/* Calculate average delay in milliseconds */
+static double average_ms(unsigned long long total, unsigned long long count)
+{
+ if (count == 0)
+ return 0;
+ return (double)total / 1000000.0 / count;
+}
+
+/* Comparison function for sorting tasks */
+static int compare_tasks(const void *a, const void *b)
+{
+ const struct task_info *t1 = (const struct task_info *)a;
+ const struct task_info *t2 = (const struct task_info *)b;
+ unsigned long long total1;
+ unsigned long long total2;
+ unsigned long count1;
+ unsigned long count2;
+ double avg1, avg2;
+
+ total1 = *(unsigned long long *)((char *)t1 + cfg.sort_field->total_offset);
+ total2 = *(unsigned long long *)((char *)t2 + cfg.sort_field->total_offset);
+ count1 = *(unsigned long *)((char *)t1 + cfg.sort_field->count_offset);
+ count2 = *(unsigned long *)((char *)t2 + cfg.sort_field->count_offset);
+
+ avg1 = average_ms(total1, count1);
+ avg2 = average_ms(total2, count2);
+ if (avg1 != avg2)
+ return avg2 > avg1 ? 1 : -1;
+
+ return 0;
+}
+
+/* Sort tasks by selected field */
+static void sort_tasks(void)
+{
+ if (task_count > 0)
+ qsort(tasks, task_count, sizeof(struct task_info), compare_tasks);
+}
+
+/* Get container statistics via cgroupstats */
+static void get_container_stats(void)
+{
+ int rc, cfd;
+ struct {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[MAX_MSG_SIZE];
+ } req, resp;
+ struct nlattr *na;
+ int nl_len;
+ struct cgroupstats stats;
+
+ /* Check if container path is set */
+ if (!cfg.container_path)
+ return;
+
+ /* Open container cgroup */
+ cfd = open(cfg.container_path, O_RDONLY);
+ if (cfd < 0) {
+ fprintf(stderr, "Error opening container path: %s\n", cfg.container_path);
+ return;
+ }
+
+ /* Send request for container stats */
+ if (send_cmd(nl_sd, family_id, getpid(), CGROUPSTATS_CMD_GET,
+ CGROUPSTATS_CMD_ATTR_FD, &cfd, sizeof(__u32)) < 0) {
+ fprintf(stderr, "Failed to send request for container stats\n");
+ close(cfd);
+ return;
+ }
+
+ /* Receive response */
+ rc = recv(nl_sd, &resp, sizeof(resp), 0);
+ if (rc < 0 || resp.n.nlmsg_type == NLMSG_ERROR) {
+ fprintf(stderr, "Failed to receive response for container stats\n");
+ close(cfd);
+ return;
+ }
+
+ /* Parse response */
+ nl_len = GENLMSG_PAYLOAD(&resp.n);
+ na = (struct nlattr *) GENLMSG_DATA(&resp);
+ while (nl_len > 0) {
+ if (na->nla_type == CGROUPSTATS_TYPE_CGROUP_STATS) {
+ /* Get the cgroupstats structure */
+ memcpy(&stats, NLA_DATA(na), sizeof(stats));
+
+ /* Fill container stats */
+ container_stats.nr_sleeping = stats.nr_sleeping;
+ container_stats.nr_running = stats.nr_running;
+ container_stats.nr_stopped = stats.nr_stopped;
+ container_stats.nr_uninterruptible = stats.nr_uninterruptible;
+ container_stats.nr_io_wait = stats.nr_io_wait;
+ break;
+ }
+ nl_len -= NLA_ALIGN(na->nla_len);
+ na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
+ }
+
+ close(cfd);
+}
+
+/* Display results to stdout or log file */
+static void display_results(int psi_ret)
+{
+ time_t now = time(NULL);
+ struct tm *tm_now = localtime(&now);
+ FILE *out = stdout;
+ char timestamp[32];
+ bool suc = true;
+ int i, count;
+
+ /* Clear terminal screen */
+ suc &= BOOL_FPRINT(out, "\033[H\033[J");
+
+ /* PSI output (one-line, no cat style) */
+ suc &= BOOL_FPRINT(out, "System Pressure Information: (avg10/avg60vg300/total)\n");
+ if (psi_ret) {
+ suc &= BOOL_FPRINT(out, " PSI not found: check if psi=1 enabled in cmdline\n");
+ } else {
+ suc &= BOOL_FPRINT(out, PSI_LINE_FORMAT,
+ "CPU some:",
+ psi.cpu_some_avg10,
+ psi.cpu_some_avg60,
+ psi.cpu_some_avg300,
+ psi.cpu_some_total / 1000);
+ suc &= BOOL_FPRINT(out, PSI_LINE_FORMAT,
+ "CPU full:",
+ psi.cpu_full_avg10,
+ psi.cpu_full_avg60,
+ psi.cpu_full_avg300,
+ psi.cpu_full_total / 1000);
+ suc &= BOOL_FPRINT(out, PSI_LINE_FORMAT,
+ "Memory full:",
+ psi.memory_full_avg10,
+ psi.memory_full_avg60,
+ psi.memory_full_avg300,
+ psi.memory_full_total / 1000);
+ suc &= BOOL_FPRINT(out, PSI_LINE_FORMAT,
+ "Memory some:",
+ psi.memory_some_avg10,
+ psi.memory_some_avg60,
+ psi.memory_some_avg300,
+ psi.memory_some_total / 1000);
+ suc &= BOOL_FPRINT(out, PSI_LINE_FORMAT,
+ "IO full:",
+ psi.io_full_avg10,
+ psi.io_full_avg60,
+ psi.io_full_avg300,
+ psi.io_full_total / 1000);
+ suc &= BOOL_FPRINT(out, PSI_LINE_FORMAT,
+ "IO some:",
+ psi.io_some_avg10,
+ psi.io_some_avg60,
+ psi.io_some_avg300,
+ psi.io_some_total / 1000);
+ suc &= BOOL_FPRINT(out, PSI_LINE_FORMAT,
+ "IRQ full:",
+ psi.irq_full_avg10,
+ psi.irq_full_avg60,
+ psi.irq_full_avg300,
+ psi.irq_full_total / 1000);
+ }
+
+ if (cfg.container_path) {
+ suc &= BOOL_FPRINT(out, "Container Information (%s):\n", cfg.container_path);
+ suc &= BOOL_FPRINT(out, "Processes: running=%d, sleeping=%d, ",
+ container_stats.nr_running, container_stats.nr_sleeping);
+ suc &= BOOL_FPRINT(out, "stopped=%d, uninterruptible=%d, io_wait=%d\n\n",
+ container_stats.nr_stopped, container_stats.nr_uninterruptible,
+ container_stats.nr_io_wait);
+ }
+
+ /* Interacive command */
+ suc &= BOOL_FPRINT(out, "[o]sort [M]memverbose [q]quit\n");
+ if (sort_selected) {
+ if (cfg.display_mode == MODE_MEMVERBOSE)
+ suc &= BOOL_FPRINT(out,
+ "sort selection: [m]MEM [r]RCL [t]THR [p]CMP [w]WP\n");
+ else
+ suc &= BOOL_FPRINT(out,
+ "sort selection: [c]CPU [i]IO [m]MEM [q]IRQ\n");
+ }
+
+ /* Task delay output */
+ suc &= BOOL_FPRINT(out, "Top %d processes (sorted by %s delay):\n",
+ cfg.max_processes, get_name_by_field(cfg.sort_field));
+
+ suc &= BOOL_FPRINT(out, "%8s %8s %-17s", "PID", "TGID", "COMMAND");
+ if (cfg.display_mode == MODE_MEMVERBOSE) {
+ suc &= BOOL_FPRINT(out, "%8s %8s %8s %8s %8s %8s\n",
+ "MEM(ms)", "SWAP(ms)", "RCL(ms)",
+ "THR(ms)", "CMP(ms)", "WP(ms)");
+ suc &= BOOL_FPRINT(out, "-----------------------");
+ suc &= BOOL_FPRINT(out, "-----------------------");
+ suc &= BOOL_FPRINT(out, "-----------------------");
+ suc &= BOOL_FPRINT(out, "---------------------\n");
+ } else {
+ suc &= BOOL_FPRINT(out, "%8s %8s %8s %8s\n",
+ "CPU(ms)", "IO(ms)", "IRQ(ms)", "MEM(ms)");
+ suc &= BOOL_FPRINT(out, "-----------------------");
+ suc &= BOOL_FPRINT(out, "-----------------------");
+ suc &= BOOL_FPRINT(out, "--------------------------\n");
+ }
+
+ count = task_count < cfg.max_processes ? task_count : cfg.max_processes;
+
+ for (i = 0; i < count; i++) {
+ suc &= BOOL_FPRINT(out, "%8d %8d %-15s",
+ tasks[i].pid, tasks[i].tgid, tasks[i].command);
+ if (cfg.display_mode == MODE_MEMVERBOSE) {
+ suc &= BOOL_FPRINT(out, DELAY_FMT_MEMVERBOSE,
+ TASK_AVG(tasks[i], mem),
+ TASK_AVG(tasks[i], swapin),
+ TASK_AVG(tasks[i], freepages),
+ TASK_AVG(tasks[i], thrashing),
+ TASK_AVG(tasks[i], compact),
+ TASK_AVG(tasks[i], wpcopy));
+ } else {
+ suc &= BOOL_FPRINT(out, DELAY_FMT_DEFAULT,
+ TASK_AVG(tasks[i], cpu),
+ TASK_AVG(tasks[i], blkio),
+ TASK_AVG(tasks[i], irq),
+ TASK_AVG(tasks[i], mem));
+ }
+ }
+
+ suc &= BOOL_FPRINT(out, "\n");
+
+ if (!suc)
+ perror("Error writing to output");
+}
+
+/* Check for keyboard input with timeout based on cfg.delay */
+static char check_for_keypress(void)
+{
+ struct timeval tv = {cfg.delay, 0};
+ fd_set readfds;
+ char ch = 0;
+
+ FD_ZERO(&readfds);
+ FD_SET(STDIN_FILENO, &readfds);
+ int r = select(STDIN_FILENO + 1, &readfds, NULL, NULL, &tv);
+
+ if (r > 0 && FD_ISSET(STDIN_FILENO, &readfds)) {
+ read(STDIN_FILENO, &ch, 1);
+ return ch;
+ }
+
+ return 0;
+}
+
+#define MAX_MODE_SIZE 2
+static void toggle_display_mode(void)
+{
+ static const size_t modes[MAX_MODE_SIZE] = {MODE_DEFAULT, MODE_MEMVERBOSE};
+ static size_t cur_index;
+
+ cur_index = (cur_index + 1) % MAX_MODE_SIZE;
+ cfg.display_mode = modes[cur_index];
+}
+
+/* Handle keyboard input: sorting selection, mode toggle, or quit */
+static void handle_keypress(char ch, int *running)
+{
+ const struct field_desc *field;
+
+ /* Change sort field */
+ if (sort_selected) {
+ field = get_field_by_cmd_char(ch);
+ if (field && (field->supported_modes & cfg.display_mode))
+ cfg.sort_field = field;
+
+ sort_selected = 0;
+ /* Handle mode changes or quit */
+ } else {
+ switch (ch) {
+ case 'o':
+ sort_selected = 1;
+ break;
+ case 'M':
+ toggle_display_mode();
+ for (field = sort_fields; field->name != NULL; field++) {
+ if (field->supported_modes & cfg.display_mode) {
+ cfg.sort_field = field;
+ break;
+ }
+ }
+ break;
+ case 'q':
+ case 'Q':
+ *running = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/* Main function */
+int main(int argc, char **argv)
+{
+ const struct field_desc *field;
+ int iterations = 0;
+ int psi_ret = 0;
+ char keypress;
+
+ /* Parse command line arguments */
+ parse_args(argc, argv);
+
+ /* Setup netlink socket */
+ nl_sd = create_nl_socket();
+ if (nl_sd < 0) {
+ fprintf(stderr, "Error creating netlink socket\n");
+ exit(1);
+ }
+
+ /* Get family ID for taskstats via netlink */
+ family_id = get_family_id(nl_sd);
+ if (!family_id) {
+ fprintf(stderr, "Error getting taskstats family ID\n");
+ close(nl_sd);
+ exit(1);
+ }
+
+ /* Set terminal to non-canonical mode for interaction */
+ enable_raw_mode();
+
+ /* Main loop */
+ while (running) {
+ /* Auto-switch sort field when not matching display mode */
+ if (!(cfg.sort_field->supported_modes & cfg.display_mode)) {
+ for (field = sort_fields; field->name != NULL; field++) {
+ if (field->supported_modes & cfg.display_mode) {
+ cfg.sort_field = field;
+ printf("Auto-switched sort field to: %s\n", field->name);
+ break;
+ }
+ }
+ }
+
+ /* Read PSI statistics */
+ psi_ret = read_psi_stats();
+
+ /* Get container stats if container path provided */
+ if (cfg.container_path)
+ get_container_stats();
+
+ /* Get task delays */
+ get_task_delays();
+
+ /* Sort tasks */
+ sort_tasks();
+
+ /* Display results to stdout or log file */
+ display_results(psi_ret);
+
+ /* Check for iterations */
+ if (cfg.iterations > 0 && ++iterations >= cfg.iterations)
+ break;
+
+ /* Exit if output_one_time is set */
+ if (cfg.output_one_time)
+ break;
+
+ /* Keypress for interactive usage */
+ keypress = check_for_keypress();
+ if (keypress)
+ handle_keypress(keypress, &running);
+ }
+
+ /* Restore terminal mode */
+ disable_raw_mode();
+
+ /* Cleanup */
+ close(nl_sd);
+ if (cfg.container_path)
+ free(cfg.container_path);
+
+ return 0;
+}
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index 3feac0482fe9..21cb3c3d1331 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -194,75 +194,108 @@ static int get_family_id(int sd)
#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
#define delay_ms(t) (t / 1000000ULL)
+/*
+ * Version compatibility note:
+ * Field availability depends on taskstats version (t->version),
+ * corresponding to TASKSTATS_VERSION in kernel headers
+ * see include/uapi/linux/taskstats.h
+ *
+ * Version feature mapping:
+ * version >= 11 - supports COMPACT statistics
+ * version >= 13 - supports WPCOPY statistics
+ * version >= 14 - supports IRQ statistics
+ * version >= 16 - supports *_max and *_min delay statistics
+ *
+ * Always verify version before accessing version-dependent fields
+ * to maintain backward compatibility.
+ */
+#define PRINT_CPU_DELAY(version, t) \
+ do { \
+ if (version >= 16) { \
+ printf("%-10s%15s%15s%15s%15s%15s%15s%15s\n", \
+ "CPU", "count", "real total", "virtual total", \
+ "delay total", "delay average", "delay max", "delay min"); \
+ printf(" %15llu%15llu%15llu%15llu%15.3fms%13.6fms%13.6fms\n", \
+ (unsigned long long)(t)->cpu_count, \
+ (unsigned long long)(t)->cpu_run_real_total, \
+ (unsigned long long)(t)->cpu_run_virtual_total, \
+ (unsigned long long)(t)->cpu_delay_total, \
+ average_ms((double)(t)->cpu_delay_total, (t)->cpu_count), \
+ delay_ms((double)(t)->cpu_delay_max), \
+ delay_ms((double)(t)->cpu_delay_min)); \
+ } else { \
+ printf("%-10s%15s%15s%15s%15s%15s\n", \
+ "CPU", "count", "real total", "virtual total", \
+ "delay total", "delay average"); \
+ printf(" %15llu%15llu%15llu%15llu%15.3fms\n", \
+ (unsigned long long)(t)->cpu_count, \
+ (unsigned long long)(t)->cpu_run_real_total, \
+ (unsigned long long)(t)->cpu_run_virtual_total, \
+ (unsigned long long)(t)->cpu_delay_total, \
+ average_ms((double)(t)->cpu_delay_total, (t)->cpu_count)); \
+ } \
+ } while (0)
+#define PRINT_FILED_DELAY(name, version, t, count, total, max, min) \
+ do { \
+ if (version >= 16) { \
+ printf("%-10s%15s%15s%15s%15s%15s\n", \
+ name, "count", "delay total", "delay average", \
+ "delay max", "delay min"); \
+ printf(" %15llu%15llu%15.3fms%13.6fms%13.6fms\n", \
+ (unsigned long long)(t)->count, \
+ (unsigned long long)(t)->total, \
+ average_ms((double)(t)->total, (t)->count), \
+ delay_ms((double)(t)->max), \
+ delay_ms((double)(t)->min)); \
+ } else { \
+ printf("%-10s%15s%15s%15s\n", \
+ name, "count", "delay total", "delay average"); \
+ printf(" %15llu%15llu%15.3fms\n", \
+ (unsigned long long)(t)->count, \
+ (unsigned long long)(t)->total, \
+ average_ms((double)(t)->total, (t)->count)); \
+ } \
+ } while (0)
+
static void print_delayacct(struct taskstats *t)
{
- printf("\n\nCPU %15s%15s%15s%15s%15s%15s%15s\n"
- " %15llu%15llu%15llu%15llu%15.3fms%13.6fms%13.6fms\n"
- "IO %15s%15s%15s%15s%15s\n"
- " %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
- "SWAP %15s%15s%15s%15s%15s\n"
- " %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
- "RECLAIM %12s%15s%15s%15s%15s\n"
- " %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
- "THRASHING%12s%15s%15s%15s%15s\n"
- " %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
- "COMPACT %12s%15s%15s%15s%15s\n"
- " %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
- "WPCOPY %12s%15s%15s%15s%15s\n"
- " %15llu%15llu%15.3fms%13.6fms%13.6fms\n"
- "IRQ %15s%15s%15s%15s%15s\n"
- " %15llu%15llu%15.3fms%13.6fms%13.6fms\n",
- "count", "real total", "virtual total",
- "delay total", "delay average", "delay max", "delay min",
- (unsigned long long)t->cpu_count,
- (unsigned long long)t->cpu_run_real_total,
- (unsigned long long)t->cpu_run_virtual_total,
- (unsigned long long)t->cpu_delay_total,
- average_ms((double)t->cpu_delay_total, t->cpu_count),
- delay_ms((double)t->cpu_delay_max),
- delay_ms((double)t->cpu_delay_min),
- "count", "delay total", "delay average", "delay max", "delay min",
- (unsigned long long)t->blkio_count,
- (unsigned long long)t->blkio_delay_total,
- average_ms((double)t->blkio_delay_total, t->blkio_count),
- delay_ms((double)t->blkio_delay_max),
- delay_ms((double)t->blkio_delay_min),
- "count", "delay total", "delay average", "delay max", "delay min",
- (unsigned long long)t->swapin_count,
- (unsigned long long)t->swapin_delay_total,
- average_ms((double)t->swapin_delay_total, t->swapin_count),
- delay_ms((double)t->swapin_delay_max),
- delay_ms((double)t->swapin_delay_min),
- "count", "delay total", "delay average", "delay max", "delay min",
- (unsigned long long)t->freepages_count,
- (unsigned long long)t->freepages_delay_total,
- average_ms((double)t->freepages_delay_total, t->freepages_count),
- delay_ms((double)t->freepages_delay_max),
- delay_ms((double)t->freepages_delay_min),
- "count", "delay total", "delay average", "delay max", "delay min",
- (unsigned long long)t->thrashing_count,
- (unsigned long long)t->thrashing_delay_total,
- average_ms((double)t->thrashing_delay_total, t->thrashing_count),
- delay_ms((double)t->thrashing_delay_max),
- delay_ms((double)t->thrashing_delay_min),
- "count", "delay total", "delay average", "delay max", "delay min",
- (unsigned long long)t->compact_count,
- (unsigned long long)t->compact_delay_total,
- average_ms((double)t->compact_delay_total, t->compact_count),
- delay_ms((double)t->compact_delay_max),
- delay_ms((double)t->compact_delay_min),
- "count", "delay total", "delay average", "delay max", "delay min",
- (unsigned long long)t->wpcopy_count,
- (unsigned long long)t->wpcopy_delay_total,
- average_ms((double)t->wpcopy_delay_total, t->wpcopy_count),
- delay_ms((double)t->wpcopy_delay_max),
- delay_ms((double)t->wpcopy_delay_min),
- "count", "delay total", "delay average", "delay max", "delay min",
- (unsigned long long)t->irq_count,
- (unsigned long long)t->irq_delay_total,
- average_ms((double)t->irq_delay_total, t->irq_count),
- delay_ms((double)t->irq_delay_max),
- delay_ms((double)t->irq_delay_min));
+ printf("\n\n");
+
+ PRINT_CPU_DELAY(t->version, t);
+
+ PRINT_FILED_DELAY("IO", t->version, t,
+ blkio_count, blkio_delay_total,
+ blkio_delay_max, blkio_delay_min);
+
+ PRINT_FILED_DELAY("SWAP", t->version, t,
+ swapin_count, swapin_delay_total,
+ swapin_delay_max, swapin_delay_min);
+
+ PRINT_FILED_DELAY("RECLAIM", t->version, t,
+ freepages_count, freepages_delay_total,
+ freepages_delay_max, freepages_delay_min);
+
+ PRINT_FILED_DELAY("THRASHING", t->version, t,
+ thrashing_count, thrashing_delay_total,
+ thrashing_delay_max, thrashing_delay_min);
+
+ if (t->version >= 11) {
+ PRINT_FILED_DELAY("COMPACT", t->version, t,
+ compact_count, compact_delay_total,
+ compact_delay_max, compact_delay_min);
+ }
+
+ if (t->version >= 13) {
+ PRINT_FILED_DELAY("WPCOPY", t->version, t,
+ wpcopy_count, wpcopy_delay_total,
+ wpcopy_delay_max, wpcopy_delay_min);
+ }
+
+ if (t->version >= 14) {
+ PRINT_FILED_DELAY("IRQ", t->version, t,
+ irq_count, irq_delay_total,
+ irq_delay_max, irq_delay_min);
+ }
}
static void task_context_switch_counts(struct taskstats *t)
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
deleted file mode 100644
index d5dd96902817..000000000000
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ /dev/null
@@ -1,315 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_H__
-#define __ARM_KVM_H__
-
-#include <linux/types.h>
-#include <linux/psci.h>
-#include <asm/ptrace.h>
-
-#define __KVM_HAVE_GUEST_DEBUG
-#define __KVM_HAVE_IRQ_LINE
-#define __KVM_HAVE_READONLY_MEM
-#define __KVM_HAVE_VCPU_EVENTS
-
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
-/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
-#define KVM_ARM_SVC_sp svc_regs[0]
-#define KVM_ARM_SVC_lr svc_regs[1]
-#define KVM_ARM_SVC_spsr svc_regs[2]
-#define KVM_ARM_ABT_sp abt_regs[0]
-#define KVM_ARM_ABT_lr abt_regs[1]
-#define KVM_ARM_ABT_spsr abt_regs[2]
-#define KVM_ARM_UND_sp und_regs[0]
-#define KVM_ARM_UND_lr und_regs[1]
-#define KVM_ARM_UND_spsr und_regs[2]
-#define KVM_ARM_IRQ_sp irq_regs[0]
-#define KVM_ARM_IRQ_lr irq_regs[1]
-#define KVM_ARM_IRQ_spsr irq_regs[2]
-
-/* Valid only for fiq_regs in struct kvm_regs */
-#define KVM_ARM_FIQ_r8 fiq_regs[0]
-#define KVM_ARM_FIQ_r9 fiq_regs[1]
-#define KVM_ARM_FIQ_r10 fiq_regs[2]
-#define KVM_ARM_FIQ_fp fiq_regs[3]
-#define KVM_ARM_FIQ_ip fiq_regs[4]
-#define KVM_ARM_FIQ_sp fiq_regs[5]
-#define KVM_ARM_FIQ_lr fiq_regs[6]
-#define KVM_ARM_FIQ_spsr fiq_regs[7]
-
-struct kvm_regs {
- struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
- unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
- unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
- unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
- unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
- unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
-};
-
-/* Supported Processor Types */
-#define KVM_ARM_TARGET_CORTEX_A15 0
-#define KVM_ARM_TARGET_CORTEX_A7 1
-#define KVM_ARM_NUM_TARGETS 2
-
-/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
-#define KVM_ARM_DEVICE_TYPE_SHIFT 0
-#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
-#define KVM_ARM_DEVICE_ID_SHIFT 16
-#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
-
-/* Supported device IDs */
-#define KVM_ARM_DEVICE_VGIC_V2 0
-
-/* Supported VGIC address types */
-#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
-#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
-
-#define KVM_VGIC_V2_DIST_SIZE 0x1000
-#define KVM_VGIC_V2_CPU_SIZE 0x2000
-
-/* Supported VGICv3 address types */
-#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
-#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
-#define KVM_VGIC_ITS_ADDR_TYPE 4
-#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
-
-#define KVM_VGIC_V3_DIST_SIZE SZ_64K
-#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
-#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
-
-#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
-#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
-
-struct kvm_vcpu_init {
- __u32 target;
- __u32 features[7];
-};
-
-struct kvm_sregs {
-};
-
-struct kvm_fpu {
-};
-
-struct kvm_guest_debug_arch {
-};
-
-struct kvm_debug_exit_arch {
-};
-
-struct kvm_sync_regs {
- /* Used with KVM_CAP_ARM_USER_IRQ */
- __u64 device_irq_level;
-};
-
-struct kvm_arch_memory_slot {
-};
-
-/* for KVM_GET/SET_VCPU_EVENTS */
-struct kvm_vcpu_events {
- struct {
- __u8 serror_pending;
- __u8 serror_has_esr;
- __u8 ext_dabt_pending;
- /* Align it to 8 bytes */
- __u8 pad[5];
- __u64 serror_esr;
- } exception;
- __u32 reserved[12];
-};
-
-/* If you need to interpret the index values, here is the key: */
-#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
-#define KVM_REG_ARM_COPROC_SHIFT 16
-#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
-#define KVM_REG_ARM_32_OPC2_SHIFT 0
-#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
-#define KVM_REG_ARM_OPC1_SHIFT 3
-#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
-#define KVM_REG_ARM_CRM_SHIFT 7
-#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
-#define KVM_REG_ARM_32_CRN_SHIFT 11
-/*
- * For KVM currently all guest registers are nonsecure, but we reserve a bit
- * in the encoding to distinguish secure from nonsecure for AArch32 system
- * registers that are banked by security. This is 1 for the secure banked
- * register, and 0 for the nonsecure banked register or if the register is
- * not banked by security.
- */
-#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
-#define KVM_REG_ARM_SECURE_SHIFT 28
-
-#define ARM_CP15_REG_SHIFT_MASK(x,n) \
- (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
-
-#define __ARM_CP15_REG(op1,crn,crm,op2) \
- (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
- ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
- ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
- ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
- ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
-
-#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
-
-#define __ARM_CP15_REG64(op1,crm) \
- (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
-#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
-
-/* PL1 Physical Timer Registers */
-#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
-#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
-#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
-
-/* Virtual Timer Registers */
-#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
-#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
-#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
-
-/* Normal registers are mapped as coprocessor 16. */
-#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
-
-/* Some registers need more space to represent values. */
-#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
-#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
-#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
-#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
-#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
-
-/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
-#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
-#define KVM_REG_ARM_VFP_BASE_REG 0x0
-#define KVM_REG_ARM_VFP_FPSID 0x1000
-#define KVM_REG_ARM_VFP_FPSCR 0x1001
-#define KVM_REG_ARM_VFP_MVFR1 0x1006
-#define KVM_REG_ARM_VFP_MVFR0 0x1007
-#define KVM_REG_ARM_VFP_FPEXC 0x1008
-#define KVM_REG_ARM_VFP_FPINST 0x1009
-#define KVM_REG_ARM_VFP_FPINST2 0x100A
-
-/* KVM-as-firmware specific pseudo-registers */
-#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
- KVM_REG_ARM_FW | ((r) & 0xffff))
-#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
- /* Higher values mean better protection. */
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
- /* Higher values mean better protection. */
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
-
-/* Device Control API: ARM VGIC */
-#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
-#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
-#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
-#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
-#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
-#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
-#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
- (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
-#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
-#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
-#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
-#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
-#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
-#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
-#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
-#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
-#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
-#define KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ 9
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
- (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
-#define VGIC_LEVEL_INFO_LINE_LEVEL 0
-
-/* Device Control API on vcpu fd */
-#define KVM_ARM_VCPU_PMU_V3_CTRL 0
-#define KVM_ARM_VCPU_PMU_V3_IRQ 0
-#define KVM_ARM_VCPU_PMU_V3_INIT 1
-#define KVM_ARM_VCPU_TIMER_CTRL 1
-#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
-#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
-
-#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
-#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
-#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
-#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
-#define KVM_DEV_ARM_ITS_CTRL_RESET 4
-
-/* KVM_IRQ_LINE irq field index values */
-#define KVM_ARM_IRQ_VCPU2_SHIFT 28
-#define KVM_ARM_IRQ_VCPU2_MASK 0xf
-#define KVM_ARM_IRQ_TYPE_SHIFT 24
-#define KVM_ARM_IRQ_TYPE_MASK 0xf
-#define KVM_ARM_IRQ_VCPU_SHIFT 16
-#define KVM_ARM_IRQ_VCPU_MASK 0xff
-#define KVM_ARM_IRQ_NUM_SHIFT 0
-#define KVM_ARM_IRQ_NUM_MASK 0xffff
-
-/* irq_type field */
-#define KVM_ARM_IRQ_TYPE_CPU 0
-#define KVM_ARM_IRQ_TYPE_SPI 1
-#define KVM_ARM_IRQ_TYPE_PPI 2
-
-/* out-of-kernel GIC cpu interrupt injection irq_number field */
-#define KVM_ARM_IRQ_CPU_IRQ 0
-#define KVM_ARM_IRQ_CPU_FIQ 1
-
-/*
- * This used to hold the highest supported SPI, but it is now obsolete
- * and only here to provide source code level compatibility with older
- * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
- */
-#ifndef __KERNEL__
-#define KVM_ARM_IRQ_GIC_MAX 127
-#endif
-
-/* One single KVM irqchip, ie. the VGIC */
-#define KVM_NR_IRQCHIPS 1
-
-/* PSCI interface */
-#define KVM_PSCI_FN_BASE 0x95c1ba5e
-#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
-
-#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
-#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
-#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
-#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
-
-#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
-#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
-#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
-#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
-
-#endif /* __ARM_KVM_H__ */
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 488f8e751349..f898c47e551f 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -75,11 +75,13 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
+#define ARM_CPU_PART_CORTEX_A76AE 0xD0E
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
+#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D
@@ -94,6 +96,7 @@
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
+#define ARM_CPU_PART_CORTEX_A720AE 0xD89
#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
#define APM_CPU_PART_XGENE 0x000
@@ -119,9 +122,11 @@
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
+#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
+#define QCOM_CPU_PART_ORYON_X1 0x001
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
@@ -129,6 +134,8 @@
#define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01
+#define HISI_CPU_PART_HIP09 0xD02
+#define HISI_CPU_PART_HIP12 0xD06
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
@@ -158,11 +165,13 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
+#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
@@ -177,6 +186,7 @@
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
+#define MIDR_CORTEX_A720AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720AE)
#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
@@ -195,13 +205,27 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
+#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
+#define MIDR_QCOM_ORYON_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_ORYON_X1)
+
+/*
+ * NOTES:
+ * - Qualcomm Kryo 5XX Prime / Gold ID themselves as MIDR_CORTEX_A77
+ * - Qualcomm Kryo 5XX Silver IDs itself as MIDR_QCOM_KRYO_4XX_SILVER
+ * - Qualcomm Kryo 6XX Prime IDs itself as MIDR_CORTEX_X1
+ * - Qualcomm Kryo 6XX Gold IDs itself as ARM_CPU_PART_CORTEX_A78
+ * - Qualcomm Kryo 6XX Silver IDs itself as MIDR_CORTEX_A55
+ */
+
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
+#define MIDR_HISI_HIP12 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP12)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
@@ -223,7 +247,7 @@
#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0))
#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/sysreg.h>
@@ -289,6 +313,14 @@ static inline u32 __attribute_const__ read_cpuid_id(void)
return read_cpuid(MIDR_EL1);
}
+struct target_impl_cpu {
+ u64 midr;
+ u64 revidr;
+ u64 aidr;
+};
+
+bool cpu_errata_set_target_impl(u64 num, void *impl_cpus);
+
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
return read_cpuid(MPIDR_EL1);
@@ -308,6 +340,6 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
return read_cpuid(CTR_EL0);
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/tools/arch/arm64/include/asm/esr.h b/tools/arch/arm64/include/asm/esr.h
index bd592ca81571..f3c6403e5ef2 100644
--- a/tools/arch/arm64/include/asm/esr.h
+++ b/tools/arch/arm64/include/asm/esr.h
@@ -141,6 +141,8 @@
#define ESR_ELx_SF (UL(1) << ESR_ELx_SF_SHIFT)
#define ESR_ELx_AR_SHIFT (14)
#define ESR_ELx_AR (UL(1) << ESR_ELx_AR_SHIFT)
+#define ESR_ELx_VNCR_SHIFT (13)
+#define ESR_ELx_VNCR (UL(1) << ESR_ELx_VNCR_SHIFT)
#define ESR_ELx_CM_SHIFT (8)
#define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT)
@@ -385,7 +387,7 @@
#define ESR_ELx_MOPS_ISS_SRCREG(esr) (((esr) & (UL(0x1f) << 5)) >> 5)
#define ESR_ELx_MOPS_ISS_SIZEREG(esr) (((esr) & (UL(0x1f) << 0)) >> 0)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/types.h>
static inline unsigned long esr_brk_comment(unsigned long esr)
@@ -450,6 +452,6 @@ static inline bool esr_iss_is_eretab(unsigned long esr)
}
const char *esr_get_class_string(unsigned long esr);
-#endif /* __ASSEMBLY */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_ESR_H */
diff --git a/tools/arch/arm64/include/asm/gpr-num.h b/tools/arch/arm64/include/asm/gpr-num.h
index 05da4a7c5788..a114e4f8209b 100644
--- a/tools/arch/arm64/include/asm/gpr-num.h
+++ b/tools/arch/arm64/include/asm/gpr-num.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
.equ .L__gpr_num_x\num, \num
@@ -11,7 +11,7 @@
.equ .L__gpr_num_xzr, 31
.equ .L__gpr_num_wzr, 31
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define __DEFINE_ASM_GPR_NUMS \
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
@@ -21,6 +21,6 @@
" .equ .L__gpr_num_xzr, 31\n" \
" .equ .L__gpr_num_wzr, 31\n"
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_GPR_NUM_H */
diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
index 690b6ebd118f..178b7322bf04 100644
--- a/tools/arch/arm64/include/asm/sysreg.h
+++ b/tools/arch/arm64/include/asm/sysreg.h
@@ -51,7 +51,7 @@
#ifndef CONFIG_BROKEN_GAS_INST
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
// The space separator is omitted so that __emit_inst(x) can be parsed as
// either an assembler directive or an assembler macro argument.
#define __emit_inst(x) .inst(x)
@@ -70,11 +70,11 @@
(((x) >> 24) & 0x000000ff))
#endif /* CONFIG_CPU_BIG_ENDIAN */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define __emit_inst(x) .long __INSTR_BSWAP(x)
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_BROKEN_GAS_INST */
@@ -1078,12 +1078,7 @@
#define GCS_CAP(x) ((((unsigned long)x) & GCS_CAP_ADDR_MASK) | \
GCS_CAP_VALID_TOKEN)
-#define ARM64_FEATURE_FIELD_BITS 4
-
-/* Defined for compatibility only, do not add new users. */
-#define ARM64_FEATURE_MASK(x) (x##_MASK)
-
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro mrs_s, rt, sreg
__emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt))
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index af9d9acaf997..a792a599b9d6 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -31,7 +31,7 @@
#define KVM_SPSR_FIQ 4
#define KVM_NR_SPSR 5
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/psci.h>
#include <linux/types.h>
#include <asm/ptrace.h>
@@ -431,10 +431,11 @@ enum {
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
-#define KVM_ARM_VCPU_PMU_V3_IRQ 0
-#define KVM_ARM_VCPU_PMU_V3_INIT 1
-#define KVM_ARM_VCPU_PMU_V3_FILTER 2
-#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
+#define KVM_ARM_VCPU_PMU_V3_IRQ 0
+#define KVM_ARM_VCPU_PMU_V3_INIT 1
+#define KVM_ARM_VCPU_PMU_V3_FILTER 2
+#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
+#define KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS 4
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
diff --git a/tools/arch/loongarch/include/asm/inst.h b/tools/arch/loongarch/include/asm/inst.h
index c25b5853181d..d68fad63c8b7 100644
--- a/tools/arch/loongarch/include/asm/inst.h
+++ b/tools/arch/loongarch/include/asm/inst.h
@@ -51,6 +51,10 @@ enum reg2i16_op {
bgeu_op = 0x1b,
};
+enum reg3_op {
+ amswapw_op = 0x70c0,
+};
+
struct reg0i15_format {
unsigned int immediate : 15;
unsigned int opcode : 17;
@@ -96,6 +100,13 @@ struct reg2i16_format {
unsigned int opcode : 6;
};
+struct reg3_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int rk : 5;
+ unsigned int opcode : 17;
+};
+
union loongarch_instruction {
unsigned int word;
struct reg0i15_format reg0i15_format;
@@ -105,6 +116,7 @@ union loongarch_instruction {
struct reg2i12_format reg2i12_format;
struct reg2i14_format reg2i14_format;
struct reg2i16_format reg2i16_format;
+ struct reg3_format reg3_format;
};
#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
diff --git a/tools/arch/loongarch/include/asm/orc_types.h b/tools/arch/loongarch/include/asm/orc_types.h
index caf1f71a1057..d5fa98d1d177 100644
--- a/tools/arch/loongarch/include/asm/orc_types.h
+++ b/tools/arch/loongarch/include/asm/orc_types.h
@@ -34,7 +34,7 @@
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
@@ -53,6 +53,6 @@ struct orc_entry {
unsigned int type:3;
unsigned int signal:1;
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ORC_TYPES_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index eaeda001784e..077c5437f521 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -1,18 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
* Copyright IBM Corp. 2007
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
diff --git a/tools/arch/riscv/include/asm/csr.h b/tools/arch/riscv/include/asm/csr.h
index 0dfc09254f99..21d8cee04638 100644
--- a/tools/arch/riscv/include/asm/csr.h
+++ b/tools/arch/riscv/include/asm/csr.h
@@ -167,7 +167,8 @@
#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT)
#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \
(_AC(1, UL) << IRQ_S_TIMER) | \
- (_AC(1, UL) << IRQ_S_EXT))
+ (_AC(1, UL) << IRQ_S_EXT) | \
+ (_AC(1, UL) << IRQ_PMU_OVF))
/* AIA CSR bits */
#define TOPI_IID_SHIFT 16
@@ -280,7 +281,7 @@
#define CSR_HPMCOUNTER30H 0xc9e
#define CSR_HPMCOUNTER31H 0xc9f
-#define CSR_SSCOUNTOVF 0xda0
+#define CSR_SCOUNTOVF 0xda0
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
@@ -468,13 +469,13 @@
#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define __ASM_STR(x) x
#else
#define __ASM_STR(x) #x
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define csr_swap(csr, val) \
({ \
@@ -536,6 +537,6 @@
: "memory"); \
})
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_RISCV_CSR_H */
diff --git a/tools/arch/riscv/include/asm/vdso/processor.h b/tools/arch/riscv/include/asm/vdso/processor.h
index 662aca039848..0665b117f30f 100644
--- a/tools/arch/riscv/include/asm/vdso/processor.h
+++ b/tools/arch/riscv/include/asm/vdso/processor.h
@@ -2,7 +2,7 @@
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm-generic/barrier.h>
@@ -27,6 +27,6 @@ static inline void cpu_relax(void)
barrier();
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/tools/arch/s390/include/uapi/asm/bitsperlong.h b/tools/arch/s390/include/uapi/asm/bitsperlong.h
index d2bb620119bf..a226a1686a53 100644
--- a/tools/arch/s390/include/uapi/asm/bitsperlong.h
+++ b/tools/arch/s390/include/uapi/asm/bitsperlong.h
@@ -2,11 +2,7 @@
#ifndef __ASM_S390_BITSPERLONG_H
#define __ASM_S390_BITSPERLONG_H
-#ifndef __s390x__
-#define __BITS_PER_LONG 32
-#else
#define __BITS_PER_LONG 64
-#endif
#include <asm-generic/bitsperlong.h>
diff --git a/tools/arch/s390/include/uapi/asm/kvm_perf.h b/tools/arch/s390/include/uapi/asm/kvm_perf.h
deleted file mode 100644
index 84606b8cc49e..000000000000
--- a/tools/arch/s390/include/uapi/asm/kvm_perf.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Definitions for perf-kvm on s390
- *
- * Copyright 2014 IBM Corp.
- * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- */
-
-#ifndef __LINUX_KVM_PERF_S390_H
-#define __LINUX_KVM_PERF_S390_H
-
-#include <asm/sie.h>
-
-#define DECODE_STR_LEN 40
-
-#define VCPU_ID "id"
-
-#define KVM_ENTRY_TRACE "kvm:kvm_s390_sie_enter"
-#define KVM_EXIT_TRACE "kvm:kvm_s390_sie_exit"
-#define KVM_EXIT_REASON "icptcode"
-
-#endif
diff --git a/tools/arch/x86/include/asm/amd/ibs.h b/tools/arch/x86/include/asm/amd/ibs.h
index 300b6e0765b2..cbce54fec7b9 100644
--- a/tools/arch/x86/include/asm/amd/ibs.h
+++ b/tools/arch/x86/include/asm/amd/ibs.h
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_AMD_IBS_H
+#define _ASM_X86_AMD_IBS_H
+
/*
* From PPR Vol 1 for AMD Family 19h Model 01h B1
* 55898 Rev 0.35 - Feb 5, 2021
@@ -151,3 +154,5 @@ struct perf_ibs_data {
};
u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
};
+
+#endif /* _ASM_X86_AMD_IBS_H */
diff --git a/tools/arch/x86/include/asm/asm.h b/tools/arch/x86/include/asm/asm.h
index dbe39b44256b..6e1b357c374b 100644
--- a/tools/arch/x86/include/asm/asm.h
+++ b/tools/arch/x86/include/asm/asm.h
@@ -108,18 +108,6 @@
#endif
-/*
- * Macros to generate condition code outputs from inline assembly,
- * The output operand must be type "bool".
- */
-#ifdef __GCC_ASM_FLAG_OUTPUTS__
-# define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
-# define CC_OUT(c) "=@cc" #c
-#else
-# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n"
-# define CC_OUT(c) [_cc_ ## c] "=qm"
-#endif
-
#ifdef __KERNEL__
/* Exception table entry */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 98c8931db4a5..ccc01ad6ff7c 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -75,7 +75,7 @@
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
#define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
-/* Free ( 3*32+ 6) */
+#define X86_FEATURE_ZEN6 ( 3*32+ 6) /* CPU based on Zen6 microarchitecture */
/* Free ( 3*32+ 7) */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */
@@ -218,6 +218,7 @@
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
+#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
@@ -319,7 +320,7 @@
#define X86_FEATURE_FSRS (12*32+11) /* Fast short REP STOSB */
#define X86_FEATURE_FSRC (12*32+12) /* Fast short REP {CMPSB,SCASB} */
#define X86_FEATURE_FRED (12*32+17) /* "fred" Flexible Return and Event Delivery */
-#define X86_FEATURE_LKGS (12*32+18) /* Load "kernel" (userspace) GS */
+#define X86_FEATURE_LKGS (12*32+18) /* Like MOV_GS except MSR_KERNEL_GS_BASE = GS.base */
#define X86_FEATURE_WRMSRNS (12*32+19) /* Non-serializing WRMSR */
#define X86_FEATURE_AMX_FP16 (12*32+21) /* AMX fp16 Support */
#define X86_FEATURE_AVX_IFMA (12*32+23) /* Support for VPMADD52[H,L]UQ */
@@ -336,7 +337,7 @@
#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* Single Thread Indirect Branch Predictors always-on preferred */
-#define X86_FEATURE_AMD_IBRS_SAME_MODE (13*32+19) /* Indirect Branch Restricted Speculation same mode protection*/
+#define X86_FEATURE_AMD_IBRS_SAME_MODE (13*32+19) /* Indirect Branch Restricted Speculation same mode protection*/
#define X86_FEATURE_AMD_PPIN (13*32+23) /* "amd_ppin" Protected Processor Inventory Number */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* "virt_ssbd" Virtualized Speculative Store Bypass Disable */
@@ -379,6 +380,7 @@
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */
#define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */
+#define X86_FEATURE_BUS_LOCK_THRESHOLD (15*32+29) /* Bus lock threshold */
#define X86_FEATURE_IDLE_HLT (15*32+30) /* IDLE HLT intercept */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
@@ -405,9 +407,12 @@
#define X86_FEATURE_ENQCMD (16*32+29) /* "enqcmd" ENQCMD and ENQCMDS instructions */
#define X86_FEATURE_SGX_LC (16*32+30) /* "sgx_lc" Software Guard Extensions Launch Control */
-/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
+/*
+ * Linux-defined word for use with scattered/synthetic bits.
+ */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* "overflow_recov" MCA overflow recovery support */
#define X86_FEATURE_SUCCOR (17*32+ 1) /* "succor" Uncorrectable error containment and recovery */
+
#define X86_FEATURE_SMCA (17*32+ 3) /* "smca" Scalable MCA */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
@@ -442,11 +447,13 @@
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */
#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" Secure Encrypted Virtualization - Encrypted State */
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" Secure Encrypted Virtualization - Secure Nested Paging */
+#define X86_FEATURE_SNP_SECURE_TSC (19*32+ 8) /* SEV-SNP Secure TSC */
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* hardware-enforced cache coherency */
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" SEV-ES full debug state swap support */
#define X86_FEATURE_RMPREAD (19*32+21) /* RMPREAD instruction */
#define X86_FEATURE_SEGMENTED_RMP (19*32+23) /* Segmented RMP support */
+#define X86_FEATURE_ALLOWED_SEV_FEATURES (19*32+27) /* Allowed SEV Features */
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
#define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */
@@ -454,10 +461,15 @@
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
+#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
+
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
+#define X86_FEATURE_GP_ON_USER_CPUID (20*32+17) /* User CPUID faulting */
+
+#define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
@@ -482,6 +494,14 @@
#define X86_FEATURE_AMD_HTR_CORES (21*32+ 6) /* Heterogeneous Core Topology */
#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32+ 7) /* Workload Classification */
#define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */
+#define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */
+#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
+#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
+#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
+#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
+#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
+#define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */
+#define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */
/*
* BUG word(s)
@@ -534,4 +554,9 @@
#define X86_BUG_BHI X86_BUG( 1*32+ 3) /* "bhi" CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG( 1*32+ 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#define X86_BUG_SPECTRE_V2_USER X86_BUG( 1*32+ 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
+#define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
+#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
+#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
+#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
+#define X86_BUG_VMSCAPE X86_BUG( 1*32+10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/inat.h b/tools/arch/x86/include/asm/inat.h
index 183aa662b165..099e926595bd 100644
--- a/tools/arch/x86/include/asm/inat.h
+++ b/tools/arch/x86/include/asm/inat.h
@@ -37,6 +37,8 @@
#define INAT_PFX_EVEX 15 /* EVEX prefix */
/* x86-64 REX2 prefix */
#define INAT_PFX_REX2 16 /* 0xD5 */
+/* AMD XOP prefix */
+#define INAT_PFX_XOP 17 /* 0x8F */
#define INAT_LSTPFX_MAX 3
#define INAT_LGCPFX_MAX 11
@@ -77,6 +79,7 @@
#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
+#define INAT_XOPOK INAT_VEXOK
#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
#define INAT_NO_REX2 (1 << (INAT_FLAG_OFFS + 8))
@@ -111,6 +114,8 @@ extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
insn_byte_t vex_m,
insn_byte_t vex_pp);
+extern insn_attr_t inat_get_xop_attribute(insn_byte_t opcode,
+ insn_byte_t map_select);
/* Attribute checking functions */
static inline int inat_is_legacy_prefix(insn_attr_t attr)
@@ -164,6 +169,11 @@ static inline int inat_is_vex3_prefix(insn_attr_t attr)
return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
}
+static inline int inat_is_xop_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_XOP;
+}
+
static inline int inat_is_escape(insn_attr_t attr)
{
return attr & INAT_ESC_MASK;
@@ -229,6 +239,11 @@ static inline int inat_accept_vex(insn_attr_t attr)
return attr & INAT_VEXOK;
}
+static inline int inat_accept_xop(insn_attr_t attr)
+{
+ return attr & INAT_XOPOK;
+}
+
static inline int inat_must_vex(insn_attr_t attr)
{
return attr & (INAT_VEXONLY | INAT_EVEXONLY);
diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h
index 0e5abd896ad4..8f10f2943370 100644
--- a/tools/arch/x86/include/asm/insn.h
+++ b/tools/arch/x86/include/asm/insn.h
@@ -71,7 +71,10 @@ struct insn {
* prefixes.bytes[3]: last prefix
*/
struct insn_field rex_prefix; /* REX prefix */
- struct insn_field vex_prefix; /* VEX prefix */
+ union {
+ struct insn_field vex_prefix; /* VEX prefix */
+ struct insn_field xop_prefix; /* XOP prefix */
+ };
struct insn_field opcode; /*
* opcode.bytes[0]: opcode1
* opcode.bytes[1]: opcode2
@@ -135,6 +138,17 @@ struct insn {
#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
+/* XOP bit fields */
+#define X86_XOP_R(xop) ((xop) & 0x80) /* XOP Byte2 */
+#define X86_XOP_X(xop) ((xop) & 0x40) /* XOP Byte2 */
+#define X86_XOP_B(xop) ((xop) & 0x20) /* XOP Byte2 */
+#define X86_XOP_M(xop) ((xop) & 0x1f) /* XOP Byte2 */
+#define X86_XOP_W(xop) ((xop) & 0x80) /* XOP Byte3 */
+#define X86_XOP_V(xop) ((xop) & 0x78) /* XOP Byte3 */
+#define X86_XOP_L(xop) ((xop) & 0x04) /* XOP Byte3 */
+#define X86_XOP_P(xop) ((xop) & 0x03) /* XOP Byte3 */
+#define X86_XOP_M_MIN 0x08 /* Min of XOP.M */
+#define X86_XOP_M_MAX 0x1f /* Max of XOP.M */
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
extern int insn_get_prefixes(struct insn *insn);
@@ -178,7 +192,7 @@ static inline insn_byte_t insn_rex2_m_bit(struct insn *insn)
return X86_REX2_M(insn->rex_prefix.bytes[1]);
}
-static inline int insn_is_avx(struct insn *insn)
+static inline int insn_is_avx_or_xop(struct insn *insn)
{
if (!insn->prefixes.got)
insn_get_prefixes(insn);
@@ -192,6 +206,22 @@ static inline int insn_is_evex(struct insn *insn)
return (insn->vex_prefix.nbytes == 4);
}
+/* If we already know this is AVX/XOP encoded */
+static inline int avx_insn_is_xop(struct insn *insn)
+{
+ insn_attr_t attr = inat_get_opcode_attribute(insn->vex_prefix.bytes[0]);
+
+ return inat_is_xop_prefix(attr);
+}
+
+static inline int insn_is_xop(struct insn *insn)
+{
+ if (!insn_is_avx_or_xop(insn))
+ return 0;
+
+ return avx_insn_is_xop(insn);
+}
+
static inline int insn_has_emulate_prefix(struct insn *insn)
{
return !!insn->emulate_prefix_size;
@@ -222,11 +252,26 @@ static inline insn_byte_t insn_vex_w_bit(struct insn *insn)
return X86_VEX_W(insn->vex_prefix.bytes[2]);
}
+static inline insn_byte_t insn_xop_map_bits(struct insn *insn)
+{
+ if (insn->xop_prefix.nbytes < 3) /* XOP is 3 bytes */
+ return 0;
+ return X86_XOP_M(insn->xop_prefix.bytes[1]);
+}
+
+static inline insn_byte_t insn_xop_p_bits(struct insn *insn)
+{
+ return X86_XOP_P(insn->vex_prefix.bytes[2]);
+}
+
/* Get the last prefix id from last prefix or VEX prefix */
static inline int insn_last_prefix_id(struct insn *insn)
{
- if (insn_is_avx(insn))
+ if (insn_is_avx_or_xop(insn)) {
+ if (avx_insn_is_xop(insn))
+ return insn_xop_p_bits(insn);
return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
+ }
if (insn->prefixes.bytes[3])
return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
@@ -267,7 +312,6 @@ static inline int insn_offset_immediate(struct insn *insn)
/**
* for_each_insn_prefix() -- Iterate prefixes in the instruction
* @insn: Pointer to struct insn.
- * @idx: Index storage.
* @prefix: Prefix byte.
*
* Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
@@ -276,8 +320,8 @@ static inline int insn_offset_immediate(struct insn *insn)
* Since prefixes.nbytes can be bigger than 4 if some prefixes
* are repeated, it cannot be used for looping over the prefixes.
*/
-#define for_each_insn_prefix(insn, idx, prefix) \
- for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
+#define for_each_insn_prefix(insn, prefix) \
+ for (int idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e
diff --git a/tools/arch/x86/include/asm/io.h b/tools/arch/x86/include/asm/io.h
new file mode 100644
index 000000000000..ecad61a3ea52
--- /dev/null
+++ b/tools/arch/x86/include/asm/io.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_ASM_X86_IO_H
+#define _TOOLS_ASM_X86_IO_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include "special_insns.h"
+
+#define build_mmio_read(name, size, type, reg, barrier) \
+static inline type name(const volatile void __iomem *addr) \
+{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
+:"m" (*(volatile type __force *)addr) barrier); return ret; }
+
+#define build_mmio_write(name, size, type, reg, barrier) \
+static inline void name(type val, volatile void __iomem *addr) \
+{ asm volatile("mov" size " %0,%1": :reg (val), \
+"m" (*(volatile type __force *)addr) barrier); }
+
+build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
+build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
+build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
+
+build_mmio_read(__readb, "b", unsigned char, "=q", )
+build_mmio_read(__readw, "w", unsigned short, "=r", )
+build_mmio_read(__readl, "l", unsigned int, "=r", )
+
+build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
+build_mmio_write(writew, "w", unsigned short, "r", :"memory")
+build_mmio_write(writel, "l", unsigned int, "r", :"memory")
+
+build_mmio_write(__writeb, "b", unsigned char, "q", )
+build_mmio_write(__writew, "w", unsigned short, "r", )
+build_mmio_write(__writel, "l", unsigned int, "r", )
+
+#define readb readb
+#define readw readw
+#define readl readl
+#define readb_relaxed(a) __readb(a)
+#define readw_relaxed(a) __readw(a)
+#define readl_relaxed(a) __readl(a)
+#define __raw_readb __readb
+#define __raw_readw __readw
+#define __raw_readl __readl
+
+#define writeb writeb
+#define writew writew
+#define writel writel
+#define writeb_relaxed(v, a) __writeb(v, a)
+#define writew_relaxed(v, a) __writew(v, a)
+#define writel_relaxed(v, a) __writel(v, a)
+#define __raw_writeb __writeb
+#define __raw_writew __writew
+#define __raw_writel __writel
+
+#ifdef __x86_64__
+
+build_mmio_read(readq, "q", u64, "=r", :"memory")
+build_mmio_read(__readq, "q", u64, "=r", )
+build_mmio_write(writeq, "q", u64, "r", :"memory")
+build_mmio_write(__writeq, "q", u64, "r", )
+
+#define readq_relaxed(a) __readq(a)
+#define writeq_relaxed(v, a) __writeq(v, a)
+
+#define __raw_readq __readq
+#define __raw_writeq __writeq
+
+/* Let people know that we have them */
+#define readq readq
+#define writeq writeq
+
+#endif /* __x86_64__ */
+
+#include <asm-generic/io.h>
+
+/**
+ * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
+ * @dst: destination, in MMIO space (must be 512-bit aligned)
+ * @src: source
+ * @count: number of 512 bits quantities to submit
+ *
+ * Submit data from kernel space to MMIO space, in units of 512 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ *
+ * Warning: Do not use this helper unless your driver has checked that the CPU
+ * instruction is supported on the platform.
+ */
+static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
+ size_t count)
+{
+ const u8 *from = src;
+ const u8 *end = from + count * 64;
+
+ while (from < end) {
+ movdir64b(dst, from);
+ from += 64;
+ }
+}
+
+#endif /* _TOOLS_ASM_X86_IO_H */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index e6134ef2263d..9e1720d73244 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -211,6 +211,14 @@
* VERW clears CPU Register
* File.
*/
+#define ARCH_CAP_ITS_NO BIT_ULL(62) /*
+ * Not susceptible to
+ * Indirect Target Selection.
+ * This bit is not set by
+ * HW, but is synthesized by
+ * VMMs for guests to know
+ * their affected status.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -307,12 +315,17 @@
#define PERF_CAP_PT_IDX 16
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
-#define PERF_CAP_PEBS_TRAP BIT_ULL(6)
-#define PERF_CAP_ARCH_REG BIT_ULL(7)
-#define PERF_CAP_PEBS_FORMAT 0xf00
-#define PERF_CAP_PEBS_BASELINE BIT_ULL(14)
-#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
- PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
+
+#define PERF_CAP_LBR_FMT 0x3f
+#define PERF_CAP_PEBS_TRAP BIT_ULL(6)
+#define PERF_CAP_ARCH_REG BIT_ULL(7)
+#define PERF_CAP_PEBS_FORMAT 0xf00
+#define PERF_CAP_FW_WRITES BIT_ULL(13)
+#define PERF_CAP_PEBS_BASELINE BIT_ULL(14)
+#define PERF_CAP_PEBS_TIMING_INFO BIT_ULL(17)
+#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
+ PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE | \
+ PERF_CAP_PEBS_TIMING_INFO)
#define MSR_IA32_RTIT_CTL 0x00000570
#define RTIT_CTL_TRACEEN BIT(0)
@@ -411,6 +424,7 @@
#define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI (1UL << 12)
#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14
#define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
+#define DEBUGCTLMSR_RTM_DEBUG BIT(15)
#define MSR_PEBS_FRONTEND 0x000003f7
@@ -525,7 +539,7 @@
#define MSR_HWP_CAPABILITIES 0x00000771
#define MSR_HWP_REQUEST_PKG 0x00000772
#define MSR_HWP_INTERRUPT 0x00000773
-#define MSR_HWP_REQUEST 0x00000774
+#define MSR_HWP_REQUEST 0x00000774
#define MSR_HWP_STATUS 0x00000777
/* CPUID.6.EAX */
@@ -542,16 +556,16 @@
#define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff)
/* IA32_HWP_REQUEST */
-#define HWP_MIN_PERF(x) (x & 0xff)
-#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
+#define HWP_MIN_PERF(x) (x & 0xff)
+#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
-#define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24)
+#define HWP_ENERGY_PERF_PREFERENCE(x) (((u64)x & 0xff) << 24)
#define HWP_EPP_PERFORMANCE 0x00
#define HWP_EPP_BALANCE_PERFORMANCE 0x80
#define HWP_EPP_BALANCE_POWERSAVE 0xC0
#define HWP_EPP_POWERSAVE 0xFF
-#define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32)
-#define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42)
+#define HWP_ACTIVITY_WINDOW(x) ((u64)(x & 0xff3) << 32)
+#define HWP_PACKAGE_CONTROL(x) ((u64)(x & 0x1) << 42)
/* IA32_HWP_STATUS */
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
@@ -594,7 +608,11 @@
/* V6 PMON MSR range */
#define MSR_IA32_PMC_V6_GP0_CTR 0x1900
#define MSR_IA32_PMC_V6_GP0_CFG_A 0x1901
+#define MSR_IA32_PMC_V6_GP0_CFG_B 0x1902
+#define MSR_IA32_PMC_V6_GP0_CFG_C 0x1903
#define MSR_IA32_PMC_V6_FX0_CTR 0x1980
+#define MSR_IA32_PMC_V6_FX0_CFG_B 0x1982
+#define MSR_IA32_PMC_V6_FX0_CFG_C 0x1983
#define MSR_IA32_PMC_V6_STEP 4
/* KeyID partitioning between MKTME and TDX */
@@ -616,7 +634,13 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD_PPIN_CTL 0xc00102f0
#define MSR_AMD_PPIN 0xc00102f1
+#define MSR_AMD64_CPUID_FN_7 0xc0011002
#define MSR_AMD64_CPUID_FN_1 0xc0011004
+
+#define MSR_AMD64_CPUID_EXT_FEAT 0xc0011005
+#define MSR_AMD64_CPUID_EXT_FEAT_TOPOEXT_BIT 54
+#define MSR_AMD64_CPUID_EXT_FEAT_TOPOEXT BIT_ULL(MSR_AMD64_CPUID_EXT_FEAT_TOPOEXT_BIT)
+
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_TW_CFG 0xc0011023
@@ -685,8 +709,15 @@
#define MSR_AMD64_SNP_VMSA_REG_PROT BIT_ULL(MSR_AMD64_SNP_VMSA_REG_PROT_BIT)
#define MSR_AMD64_SNP_SMT_PROT_BIT 17
#define MSR_AMD64_SNP_SMT_PROT BIT_ULL(MSR_AMD64_SNP_SMT_PROT_BIT)
-#define MSR_AMD64_SNP_RESV_BIT 18
+#define MSR_AMD64_SNP_SECURE_AVIC_BIT 18
+#define MSR_AMD64_SNP_SECURE_AVIC BIT_ULL(MSR_AMD64_SNP_SECURE_AVIC_BIT)
+#define MSR_AMD64_SNP_RESV_BIT 19
#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, MSR_AMD64_SNP_RESV_BIT)
+#define MSR_AMD64_SAVIC_CONTROL 0xc0010138
+#define MSR_AMD64_SAVIC_EN_BIT 0
+#define MSR_AMD64_SAVIC_EN BIT_ULL(MSR_AMD64_SAVIC_EN_BIT)
+#define MSR_AMD64_SAVIC_ALLOWEDNMI_BIT 1
+#define MSR_AMD64_SAVIC_ALLOWEDNMI BIT_ULL(MSR_AMD64_SAVIC_ALLOWEDNMI_BIT)
#define MSR_AMD64_RMP_BASE 0xc0010132
#define MSR_AMD64_RMP_END 0xc0010133
#define MSR_AMD64_RMP_CFG 0xc0010136
@@ -719,6 +750,12 @@
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET 0xc0000303
+
+/* AMD Hardware Feedback Support MSRs */
+#define MSR_AMD_WORKLOAD_CLASS_CONFIG 0xc0000500
+#define MSR_AMD_WORKLOAD_CLASS_ID 0xc0000501
+#define MSR_AMD_WORKLOAD_HRST 0xc0000502
/* AMD Last Branch Record MSRs */
#define MSR_AMD64_LBR_SELECT 0xc000010e
@@ -818,6 +855,7 @@
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
#define MSR_K7_HWCR_IRPERF_EN_BIT 30
#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
+#define MSR_K7_HWCR_CPUID_USER_DIS_BIT 35
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
#define MSR_K7_HWCR_CPB_DIS_BIT 25
@@ -1203,6 +1241,8 @@
/* - AMD: */
#define MSR_IA32_MBA_BW_BASE 0xc0000200
#define MSR_IA32_SMBA_BW_BASE 0xc0000280
+#define MSR_IA32_L3_QOS_ABMC_CFG 0xc00003fd
+#define MSR_IA32_L3_QOS_EXT_CFG 0xc00003ff
#define MSR_IA32_EVT_CFG_BASE 0xc0000400
/* AMD-V MSRs */
diff --git a/tools/arch/x86/include/asm/special_insns.h b/tools/arch/x86/include/asm/special_insns.h
new file mode 100644
index 000000000000..04af42a99c38
--- /dev/null
+++ b/tools/arch/x86/include/asm/special_insns.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_ASM_X86_SPECIAL_INSNS_H
+#define _TOOLS_ASM_X86_SPECIAL_INSNS_H
+
+/* The dst parameter must be 64-bytes aligned */
+static inline void movdir64b(void *dst, const void *src)
+{
+ const struct { char _[64]; } *__src = src;
+ struct { char _[64]; } *__dst = dst;
+
+ /*
+ * MOVDIR64B %(rdx), rax.
+ *
+ * Both __src and __dst must be memory constraints in order to tell the
+ * compiler that no other memory accesses should be reordered around
+ * this one.
+ *
+ * Also, both must be supplied as lvalues because this tells
+ * the compiler what the object is (its size) the instruction accesses.
+ * I.e., not the pointers but what they point to, thus the deref'ing '*'.
+ */
+ asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+ : "+m" (*__dst)
+ : "m" (*__src), "a" (__dst), "d" (__src));
+}
+
+#endif /* _TOOLS_ASM_X86_SPECIAL_INSNS_H */
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index b663d916f162..d420c9c066d4 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -35,6 +35,11 @@
#define MC_VECTOR 18
#define XM_VECTOR 19
#define VE_VECTOR 20
+#define CP_VECTOR 21
+
+#define HV_VECTOR 28
+#define VC_VECTOR 29
+#define SX_VECTOR 30
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_PIT
@@ -411,6 +416,35 @@ struct kvm_xcrs {
__u64 padding[16];
};
+#define KVM_X86_REG_TYPE_MSR 2
+#define KVM_X86_REG_TYPE_KVM 3
+
+#define KVM_X86_KVM_REG_SIZE(reg) \
+({ \
+ reg == KVM_REG_GUEST_SSP ? KVM_REG_SIZE_U64 : 0; \
+})
+
+#define KVM_X86_REG_TYPE_SIZE(type, reg) \
+({ \
+ __u64 type_size = (__u64)type << 32; \
+ \
+ type_size |= type == KVM_X86_REG_TYPE_MSR ? KVM_REG_SIZE_U64 : \
+ type == KVM_X86_REG_TYPE_KVM ? KVM_X86_KVM_REG_SIZE(reg) : \
+ 0; \
+ type_size; \
+})
+
+#define KVM_X86_REG_ID(type, index) \
+ (KVM_REG_X86 | KVM_X86_REG_TYPE_SIZE(type, index) | index)
+
+#define KVM_X86_REG_MSR(index) \
+ KVM_X86_REG_ID(KVM_X86_REG_TYPE_MSR, index)
+#define KVM_X86_REG_KVM(index) \
+ KVM_X86_REG_ID(KVM_X86_REG_TYPE_KVM, index)
+
+/* KVM-defined registers starting from 0 */
+#define KVM_REG_GUEST_SSP 0
+
#define KVM_SYNC_X86_REGS (1UL << 0)
#define KVM_SYNC_X86_SREGS (1UL << 1)
#define KVM_SYNC_X86_EVENTS (1UL << 2)
@@ -441,6 +475,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8)
+#define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
@@ -931,4 +966,80 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_SNP_VM 4
#define KVM_X86_TDX_VM 5
+/* Trust Domain eXtension sub-ioctl() commands. */
+enum kvm_tdx_cmd_id {
+ KVM_TDX_CAPABILITIES = 0,
+ KVM_TDX_INIT_VM,
+ KVM_TDX_INIT_VCPU,
+ KVM_TDX_INIT_MEM_REGION,
+ KVM_TDX_FINALIZE_VM,
+ KVM_TDX_GET_CPUID,
+
+ KVM_TDX_CMD_NR_MAX,
+};
+
+struct kvm_tdx_cmd {
+ /* enum kvm_tdx_cmd_id */
+ __u32 id;
+ /* flags for sub-commend. If sub-command doesn't use this, set zero. */
+ __u32 flags;
+ /*
+ * data for each sub-command. An immediate or a pointer to the actual
+ * data in process virtual address. If sub-command doesn't use it,
+ * set zero.
+ */
+ __u64 data;
+ /*
+ * Auxiliary error code. The sub-command may return TDX SEAMCALL
+ * status code in addition to -Exxx.
+ */
+ __u64 hw_error;
+};
+
+struct kvm_tdx_capabilities {
+ __u64 supported_attrs;
+ __u64 supported_xfam;
+
+ __u64 kernel_tdvmcallinfo_1_r11;
+ __u64 user_tdvmcallinfo_1_r11;
+ __u64 kernel_tdvmcallinfo_1_r12;
+ __u64 user_tdvmcallinfo_1_r12;
+
+ __u64 reserved[250];
+
+ /* Configurable CPUID bits for userspace */
+ struct kvm_cpuid2 cpuid;
+};
+
+struct kvm_tdx_init_vm {
+ __u64 attributes;
+ __u64 xfam;
+ __u64 mrconfigid[6]; /* sha384 digest */
+ __u64 mrowner[6]; /* sha384 digest */
+ __u64 mrownerconfig[6]; /* sha384 digest */
+
+ /* The total space for TD_PARAMS before the CPUIDs is 256 bytes */
+ __u64 reserved[12];
+
+ /*
+ * Call KVM_TDX_INIT_VM before vcpu creation, thus before
+ * KVM_SET_CPUID2.
+ * This configuration supersedes KVM_SET_CPUID2s for VCPUs because the
+ * TDX module directly virtualizes those CPUIDs without VMM. The user
+ * space VMM, e.g. qemu, should make KVM_SET_CPUID2 consistent with
+ * those values. If it doesn't, KVM may have wrong idea of vCPUIDs of
+ * the guest, and KVM may wrongly emulate CPUIDs or MSRs that the TDX
+ * module doesn't virtualize.
+ */
+ struct kvm_cpuid2 cpuid;
+};
+
+#define KVM_TDX_MEASURE_MEMORY_REGION _BITULL(0)
+
+struct kvm_tdx_init_mem_region {
+ __u64 source_addr;
+ __u64 gpa;
+ __u64 nr_pages;
+};
+
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/include/uapi/asm/kvm_perf.h b/tools/arch/x86/include/uapi/asm/kvm_perf.h
deleted file mode 100644
index 125cf5cdf6c5..000000000000
--- a/tools/arch/x86/include/uapi/asm/kvm_perf.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_X86_KVM_PERF_H
-#define _ASM_X86_KVM_PERF_H
-
-#include <asm/svm.h>
-#include <asm/vmx.h>
-#include <asm/kvm.h>
-
-#define DECODE_STR_LEN 20
-
-#define VCPU_ID "vcpu_id"
-
-#define KVM_ENTRY_TRACE "kvm:kvm_entry"
-#define KVM_EXIT_TRACE "kvm:kvm_exit"
-#define KVM_EXIT_REASON "exit_reason"
-
-#endif /* _ASM_X86_KVM_PERF_H */
diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h
index ec1321248dac..650e3256ea7d 100644
--- a/tools/arch/x86/include/uapi/asm/svm.h
+++ b/tools/arch/x86/include/uapi/asm/svm.h
@@ -95,6 +95,7 @@
#define SVM_EXIT_CR14_WRITE_TRAP 0x09e
#define SVM_EXIT_CR15_WRITE_TRAP 0x09f
#define SVM_EXIT_INVPCID 0x0a2
+#define SVM_EXIT_BUS_LOCK 0x0a5
#define SVM_EXIT_IDLE_HLT 0x0a6
#define SVM_EXIT_NPF 0x400
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
@@ -117,6 +118,10 @@
#define SVM_VMGEXIT_AP_CREATE 1
#define SVM_VMGEXIT_AP_DESTROY 2
#define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018
+#define SVM_VMGEXIT_SAVIC 0x8000001a
+#define SVM_VMGEXIT_SAVIC_REGISTER_GPA 0
+#define SVM_VMGEXIT_SAVIC_UNREGISTER_GPA 1
+#define SVM_VMGEXIT_SAVIC_SELF_GPA ~0ULL
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
@@ -225,6 +230,7 @@
{ SVM_EXIT_CR4_WRITE_TRAP, "write_cr4_trap" }, \
{ SVM_EXIT_CR8_WRITE_TRAP, "write_cr8_trap" }, \
{ SVM_EXIT_INVPCID, "invpcid" }, \
+ { SVM_EXIT_BUS_LOCK, "buslock" }, \
{ SVM_EXIT_IDLE_HLT, "idle-halt" }, \
{ SVM_EXIT_NPF, "npf" }, \
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index a5faf6d88f1b..1baa86dfe029 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -34,6 +34,7 @@
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_INIT_SIGNAL 3
#define EXIT_REASON_SIPI_SIGNAL 4
+#define EXIT_REASON_OTHER_SMI 6
#define EXIT_REASON_INTERRUPT_WINDOW 7
#define EXIT_REASON_NMI_WINDOW 8
@@ -92,6 +93,10 @@
#define EXIT_REASON_TPAUSE 68
#define EXIT_REASON_BUS_LOCK 74
#define EXIT_REASON_NOTIFY 75
+#define EXIT_REASON_SEAMCALL 76
+#define EXIT_REASON_TDCALL 77
+#define EXIT_REASON_MSR_READ_IMM 84
+#define EXIT_REASON_MSR_WRITE_IMM 85
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -155,7 +160,10 @@
{ EXIT_REASON_UMWAIT, "UMWAIT" }, \
{ EXIT_REASON_TPAUSE, "TPAUSE" }, \
{ EXIT_REASON_BUS_LOCK, "BUS_LOCK" }, \
- { EXIT_REASON_NOTIFY, "NOTIFY" }
+ { EXIT_REASON_NOTIFY, "NOTIFY" }, \
+ { EXIT_REASON_TDCALL, "TDCALL" }, \
+ { EXIT_REASON_MSR_READ_IMM, "MSR_READ_IMM" }, \
+ { EXIT_REASON_MSR_WRITE_IMM, "MSR_WRITE_IMM" }
#define VMX_EXIT_REASON_FLAGS \
{ VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" }
diff --git a/tools/arch/x86/lib/inat.c b/tools/arch/x86/lib/inat.c
index dfbcc6405941..ffcb0e27453b 100644
--- a/tools/arch/x86/lib/inat.c
+++ b/tools/arch/x86/lib/inat.c
@@ -81,3 +81,16 @@ insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
return table[opcode];
}
+insn_attr_t inat_get_xop_attribute(insn_byte_t opcode, insn_byte_t map_select)
+{
+ const insn_attr_t *table;
+
+ if (map_select < X86_XOP_M_MIN || map_select > X86_XOP_M_MAX)
+ return 0;
+ map_select -= X86_XOP_M_MIN;
+ /* At first, this checks the master table */
+ table = inat_xop_tables[map_select];
+ if (!table)
+ return 0;
+ return table[opcode];
+}
diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c
index bce69c6bfa69..1d1c57c74d1f 100644
--- a/tools/arch/x86/lib/insn.c
+++ b/tools/arch/x86/lib/insn.c
@@ -200,12 +200,15 @@ found:
}
insn->rex_prefix.got = 1;
- /* Decode VEX prefix */
+ /* Decode VEX/XOP prefix */
b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- if (inat_is_vex_prefix(attr)) {
+ if (inat_is_vex_prefix(attr) || inat_is_xop_prefix(attr)) {
insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
- if (!insn->x86_64) {
+
+ if (inat_is_xop_prefix(attr) && X86_MODRM_REG(b2) == 0) {
+ /* Grp1A.0 is always POP Ev */
+ goto vex_end;
+ } else if (!insn->x86_64) {
/*
* In 32-bits mode, if the [7:6] bits (mod bits of
* ModRM) on the second byte are not 11b, it is
@@ -226,13 +229,13 @@ found:
if (insn->x86_64 && X86_VEX_W(b2))
/* VEX.W overrides opnd_size */
insn->opnd_bytes = 8;
- } else if (inat_is_vex3_prefix(attr)) {
+ } else if (inat_is_vex3_prefix(attr) || inat_is_xop_prefix(attr)) {
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
insn_set_byte(&insn->vex_prefix, 2, b2);
insn->vex_prefix.nbytes = 3;
insn->next_byte += 3;
if (insn->x86_64 && X86_VEX_W(b2))
- /* VEX.W overrides opnd_size */
+ /* VEX.W/XOP.W overrides opnd_size */
insn->opnd_bytes = 8;
} else {
/*
@@ -288,9 +291,22 @@ int insn_get_opcode(struct insn *insn)
insn_set_byte(opcode, 0, op);
opcode->nbytes = 1;
- /* Check if there is VEX prefix or not */
- if (insn_is_avx(insn)) {
+ /* Check if there is VEX/XOP prefix or not */
+ if (insn_is_avx_or_xop(insn)) {
insn_byte_t m, p;
+
+ /* XOP prefix has different encoding */
+ if (unlikely(avx_insn_is_xop(insn))) {
+ m = insn_xop_map_bits(insn);
+ insn->attr = inat_get_xop_attribute(op, m);
+ if (!inat_accept_xop(insn->attr)) {
+ insn->attr = 0;
+ return -EINVAL;
+ }
+ /* XOP has only 1 byte for opcode */
+ goto end;
+ }
+
m = insn_vex_m_bits(insn);
p = insn_vex_p_bits(insn);
insn->attr = inat_get_avx_attribute(op, m, p);
@@ -383,7 +399,8 @@ int insn_get_modrm(struct insn *insn)
pfx_id = insn_last_prefix_id(insn);
insn->attr = inat_get_group_attribute(mod, pfx_id,
insn->attr);
- if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
+ if (insn_is_avx_or_xop(insn) && !inat_accept_vex(insn->attr) &&
+ !inat_accept_xop(insn->attr)) {
/* Bad insn */
insn->attr = 0;
return -EINVAL;
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index 59cf6f9065aa..ccc3d923fc1e 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -40,6 +40,7 @@ SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
+SYM_PIC_ALIAS(memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_START_LOCAL(memcpy_orig)
diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S
index d66b710d628f..fb5a03cf5ab7 100644
--- a/tools/arch/x86/lib/memset_64.S
+++ b/tools/arch/x86/lib/memset_64.S
@@ -42,6 +42,7 @@ SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
+SYM_PIC_ALIAS(memset)
EXPORT_SYMBOL(memset)
SYM_FUNC_START_LOCAL(memset_orig)
diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
index 262f7ca1fb95..2a4e69ecc2de 100644
--- a/tools/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/arch/x86/lib/x86-opcode-map.txt
@@ -27,6 +27,11 @@
# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
# (v): this opcode requires VEX prefix.
# (v1): this opcode only supports 128bit VEX.
+# (xop): this opcode accepts XOP prefix.
+#
+# XOP Superscripts
+# (W=0): this opcode requires XOP.W == 0
+# (W=1): this opcode requires XOP.W == 1
#
# Last Prefix Superscripts
# - (66): the last prefix is 0x66
@@ -194,7 +199,7 @@ AVXcode:
8c: MOV Ev,Sw
8d: LEA Gv,M
8e: MOV Sw,Ew
-8f: Grp1A (1A) | POP Ev (d64)
+8f: Grp1A (1A) | POP Ev (d64) | XOP (Prefix)
# 0x90 - 0x9f
90: NOP | PAUSE (F3) | XCHG r8,rAX
91: XCHG rCX/r9,rAX
@@ -1106,6 +1111,84 @@ AVXcode: 7
f8: URDMSR Rq,Id (F2),(v1),(11B) | UWRMSR Id,Rq (F3),(v1),(11B)
EndTable
+# From AMD64 Architecture Programmer's Manual Vol3, Appendix A.1.5
+Table: XOP map 8h
+Referrer:
+XOPcode: 0
+85: VPMACSSWW Vo,Ho,Wo,Lo
+86: VPMACSSWD Vo,Ho,Wo,Lo
+87: VPMACSSDQL Vo,Ho,Wo,Lo
+8e: VPMACSSDD Vo,Ho,Wo,Lo
+8f: VPMACSSDQH Vo,Ho,Wo,Lo
+95: VPMACSWW Vo,Ho,Wo,Lo
+96: VPMACSWD Vo,Ho,Wo,Lo
+97: VPMACSDQL Vo,Ho,Wo,Lo
+9e: VPMACSDD Vo,Ho,Wo,Lo
+9f: VPMACSDQH Vo,Ho,Wo,Lo
+a2: VPCMOV Vx,Hx,Wx,Lx (W=0) | VPCMOV Vx,Hx,Lx,Wx (W=1)
+a3: VPPERM Vo,Ho,Wo,Lo (W=0) | VPPERM Vo,Ho,Lo,Wo (W=1)
+a6: VPMADCSSWD Vo,Ho,Wo,Lo
+b6: VPMADCSWD Vo,Ho,Wo,Lo
+c0: VPROTB Vo,Wo,Ib
+c1: VPROTW Vo,Wo,Ib
+c2: VPROTD Vo,Wo,Ib
+c3: VPROTQ Vo,Wo,Ib
+cc: VPCOMccB Vo,Ho,Wo,Ib
+cd: VPCOMccW Vo,Ho,Wo,Ib
+ce: VPCOMccD Vo,Ho,Wo,Ib
+cf: VPCOMccQ Vo,Ho,Wo,Ib
+ec: VPCOMccUB Vo,Ho,Wo,Ib
+ed: VPCOMccUW Vo,Ho,Wo,Ib
+ee: VPCOMccUD Vo,Ho,Wo,Ib
+ef: VPCOMccUQ Vo,Ho,Wo,Ib
+EndTable
+
+Table: XOP map 9h
+Referrer:
+XOPcode: 1
+01: GrpXOP1
+02: GrpXOP2
+12: GrpXOP3
+80: VFRCZPS Vx,Wx
+81: VFRCZPD Vx,Wx
+82: VFRCZSS Vq,Wss
+83: VFRCZSD Vq,Wsd
+90: VPROTB Vo,Wo,Ho (W=0) | VPROTB Vo,Ho,Wo (W=1)
+91: VPROTW Vo,Wo,Ho (W=0) | VPROTB Vo,Ho,Wo (W=1)
+92: VPROTD Vo,Wo,Ho (W=0) | VPROTB Vo,Ho,Wo (W=1)
+93: VPROTQ Vo,Wo,Ho (W=0) | VPROTB Vo,Ho,Wo (W=1)
+94: VPSHLB Vo,Wo,Ho (W=0) | VPSHLB Vo,Ho,Wo (W=1)
+95: VPSHLW Vo,Wo,Ho (W=0) | VPSHLW Vo,Ho,Wo (W=1)
+96: VPSHLD Vo,Wo,Ho (W=0) | VPSHLD Vo,Ho,Wo (W=1)
+97: VPSHLQ Vo,Wo,Ho (W=0) | VPSHLQ Vo,Ho,Wo (W=1)
+98: VPSHAB Vo,Wo,Ho (W=0) | VPSHAB Vo,Ho,Wo (W=1)
+99: VPSHAW Vo,Wo,Ho (W=0) | VPSHAW Vo,Ho,Wo (W=1)
+9a: VPSHAD Vo,Wo,Ho (W=0) | VPSHAD Vo,Ho,Wo (W=1)
+9b: VPSHAQ Vo,Wo,Ho (W=0) | VPSHAQ Vo,Ho,Wo (W=1)
+c1: VPHADDBW Vo,Wo
+c2: VPHADDBD Vo,Wo
+c3: VPHADDBQ Vo,Wo
+c6: VPHADDWD Vo,Wo
+c7: VPHADDWQ Vo,Wo
+cb: VPHADDDQ Vo,Wo
+d1: VPHADDUBWD Vo,Wo
+d2: VPHADDUBD Vo,Wo
+d3: VPHADDUBQ Vo,Wo
+d6: VPHADDUWD Vo,Wo
+d7: VPHADDUWQ Vo,Wo
+db: VPHADDUDQ Vo,Wo
+e1: VPHSUBBW Vo,Wo
+e2: VPHSUBWD Vo,Wo
+e3: VPHSUBDQ Vo,Wo
+EndTable
+
+Table: XOP map Ah
+Referrer:
+XOPcode: 2
+10: BEXTR Gy,Ey,Id
+12: GrpXOP4
+EndTable
+
GrpTable: Grp1
0: ADD
1: OR
@@ -1320,3 +1403,29 @@ GrpTable: GrpRNG
4: xcrypt-cfb
5: xcrypt-ofb
EndTable
+
+# GrpXOP1-4 is shown in AMD APM Vol.3 Appendix A as XOP group #1-4
+GrpTable: GrpXOP1
+1: BLCFILL By,Ey (xop)
+2: BLSFILL By,Ey (xop)
+3: BLCS By,Ey (xop)
+4: TZMSK By,Ey (xop)
+5: BLCIC By,Ey (xop)
+6: BLSIC By,Ey (xop)
+7: T1MSKC By,Ey (xop)
+EndTable
+
+GrpTable: GrpXOP2
+1: BLCMSK By,Ey (xop)
+6: BLCI By,Ey (xop)
+EndTable
+
+GrpTable: GrpXOP3
+0: LLWPCB Ry (xop)
+1: SLWPCB Ry (xop)
+EndTable
+
+GrpTable: GrpXOP4
+0: LWPINS By,Ed,Id (xop)
+1: LWPVAL By,Ed,Id (xop)
+EndTable
diff --git a/tools/arch/x86/tools/gen-cpu-feature-names-x86.awk b/tools/arch/x86/tools/gen-cpu-feature-names-x86.awk
new file mode 100644
index 000000000000..cc4c7a3e6c2e
--- /dev/null
+++ b/tools/arch/x86/tools/gen-cpu-feature-names-x86.awk
@@ -0,0 +1,34 @@
+#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2025, Oracle and/or its affiliates.
+#
+# Usage: awk -f gen-cpu-feature-names-x86.awk cpufeatures.h > cpu-feature-names.c
+#
+
+BEGIN {
+ print "/* cpu feature name array generated from cpufeatures.h */"
+ print "/* Do not change this code. */"
+ print
+ print "static const char *cpu_feature_names[(NCAPINTS+NBUGINTS)*32] = {"
+
+ value_expr = "\\([0-9*+ ]+\\)"
+}
+
+/^#define X86_FEATURE_/ {
+ if (match($0, value_expr)) {
+ value = substr($0, RSTART + 1, RLENGTH - 2)
+ print "\t[" value "] = \"" $2 "\","
+ }
+}
+
+/^#define X86_BUG_/ {
+ if (match($0, value_expr)) {
+ value = substr($0, RSTART + 1, RLENGTH - 2)
+ print "\t[NCAPINTS*32+(" value ")] = \"" $2 "\","
+ }
+}
+
+END {
+ print "};"
+}
diff --git a/tools/arch/x86/tools/gen-insn-attr-x86.awk b/tools/arch/x86/tools/gen-insn-attr-x86.awk
index 2c19d7fc8a85..7ea1b75e59b7 100644
--- a/tools/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/tools/arch/x86/tools/gen-insn-attr-x86.awk
@@ -21,6 +21,7 @@ function clear_vars() {
eid = -1 # escape id
gid = -1 # group id
aid = -1 # AVX id
+ xopid = -1 # XOP id
tname = ""
}
@@ -39,9 +40,11 @@ BEGIN {
ggid = 1
geid = 1
gaid = 0
+ gxopid = 0
delete etable
delete gtable
delete atable
+ delete xoptable
opnd_expr = "^[A-Za-z/]"
ext_expr = "^\\("
@@ -61,6 +64,7 @@ BEGIN {
imm_flag["Ob"] = "INAT_MOFFSET"
imm_flag["Ov"] = "INAT_MOFFSET"
imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+ imm_flag["Lo"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
@@ -87,6 +91,8 @@ BEGIN {
evexonly_expr = "\\(ev\\)"
# (es) is the same as (ev) but also "SCALABLE" i.e. W and pp determine operand size
evex_scalable_expr = "\\(es\\)"
+ # All opcodes in XOP table or with (xop) superscript accept XOP prefix
+ xopok_expr = "\\(xop\\)"
prefix_expr = "\\(Prefix\\)"
prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
@@ -106,6 +112,7 @@ BEGIN {
prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
prefix_num["EVEX"] = "INAT_PFX_EVEX"
prefix_num["REX2"] = "INAT_PFX_REX2"
+ prefix_num["XOP"] = "INAT_PFX_XOP"
clear_vars()
}
@@ -147,6 +154,7 @@ function array_size(arr, i,c) {
if (NF != 1) {
# AVX/escape opcode table
aid = $2
+ xopid = -1
if (gaid <= aid)
gaid = aid + 1
if (tname == "") # AVX only opcode table
@@ -156,6 +164,20 @@ function array_size(arr, i,c) {
tname = "inat_primary_table"
}
+/^XOPcode:/ {
+ if (NF != 1) {
+ # XOP opcode table
+ xopid = $2
+ aid = -1
+ if (gxopid <= xopid)
+ gxopid = xopid + 1
+ if (tname == "") # XOP only opcode table
+ tname = sprintf("inat_xop_table_%d", $2)
+ }
+ if (xopid == -1 && eid == -1) # primary opcode table
+ tname = "inat_primary_table"
+}
+
/^GrpTable:/ {
print "/* " $0 " */"
if (!($2 in group))
@@ -206,6 +228,8 @@ function print_table(tbl,name,fmt,n)
etable[eid,0] = tname
if (aid >= 0)
atable[aid,0] = tname
+ else if (xopid >= 0)
+ xoptable[xopid] = tname
}
if (array_size(lptable1) != 0) {
print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
@@ -347,6 +371,8 @@ function convert_operands(count,opnd, i,j,imm,mod)
flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
flags = add_flags(flags, "INAT_VEXOK")
+ else if (match(ext, xopok_expr) || xopid >= 0)
+ flags = add_flags(flags, "INAT_XOPOK")
# check prefixes
if (match(ext, prefix_expr)) {
@@ -413,6 +439,14 @@ END {
print " ["i"]["j"] = "atable[i,j]","
print "};\n"
+ print "/* XOP opcode map array */"
+ print "const insn_attr_t * const inat_xop_tables[X86_XOP_M_MAX - X86_XOP_M_MIN + 1]" \
+ " = {"
+ for (i = 0; i < gxopid; i++)
+ if (xoptable[i])
+ print " ["i"] = "xoptable[i]","
+ print "};"
+
print "#else /* !__BOOT_COMPRESSED */\n"
print "/* Escape opcode map array */"
@@ -430,6 +464,10 @@ END {
"[INAT_LSTPFX_MAX + 1];"
print ""
+ print "/* XOP opcode map array */"
+ print "static const insn_attr_t *inat_xop_tables[X86_XOP_M_MAX - X86_XOP_M_MIN + 1];"
+ print ""
+
print "static void inat_init_tables(void)"
print "{"
@@ -455,6 +493,12 @@ END {
if (atable[i,j])
print "\tinat_avx_tables["i"]["j"] = "atable[i,j]";"
+ print ""
+ print "\t/* Print XOP opcode map array */"
+ for (i = 0; i < gxopid; i++)
+ if (xoptable[i])
+ print "\tinat_xop_tables["i"] = "xoptable[i]";"
+
print "}"
print "#endif"
}
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 8a48cc2536f5..55d59ed507d5 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -11,11 +11,16 @@
#include <string.h>
#include <errno.h>
#include <endian.h>
+#include <assert.h>
#include <linux/bootconfig.h>
#define pr_err(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
+/* Bootconfig footer is [size][csum][BOOTCONFIG_MAGIC]. */
+#define BOOTCONFIG_FOOTER_SIZE \
+ (sizeof(uint32_t) * 2 + BOOTCONFIG_MAGIC_LEN)
+
static int xbc_show_value(struct xbc_node *node, bool semicolon)
{
const char *val, *eol;
@@ -185,10 +190,10 @@ static int load_xbc_from_initrd(int fd, char **buf)
if (ret < 0)
return -errno;
- if (stat.st_size < 8 + BOOTCONFIG_MAGIC_LEN)
+ if (stat.st_size < BOOTCONFIG_FOOTER_SIZE)
return 0;
- if (lseek(fd, -BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0)
+ if (lseek(fd, -(off_t)BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0)
return pr_errno("Failed to lseek for magic", -errno);
if (read(fd, magic, BOOTCONFIG_MAGIC_LEN) < 0)
@@ -198,7 +203,7 @@ static int load_xbc_from_initrd(int fd, char **buf)
if (memcmp(magic, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN) != 0)
return 0;
- if (lseek(fd, -(8 + BOOTCONFIG_MAGIC_LEN), SEEK_END) < 0)
+ if (lseek(fd, -(off_t)BOOTCONFIG_FOOTER_SIZE, SEEK_END) < 0)
return pr_errno("Failed to lseek for size", -errno);
if (read(fd, &size, sizeof(uint32_t)) < 0)
@@ -210,12 +215,12 @@ static int load_xbc_from_initrd(int fd, char **buf)
csum = le32toh(csum);
/* Wrong size error */
- if (stat.st_size < size + 8 + BOOTCONFIG_MAGIC_LEN) {
+ if (stat.st_size < size + BOOTCONFIG_FOOTER_SIZE) {
pr_err("bootconfig size is too big\n");
return -E2BIG;
}
- if (lseek(fd, stat.st_size - (size + 8 + BOOTCONFIG_MAGIC_LEN),
+ if (lseek(fd, stat.st_size - (size + BOOTCONFIG_FOOTER_SIZE),
SEEK_SET) < 0)
return pr_errno("Failed to lseek", -errno);
@@ -346,7 +351,7 @@ static int delete_xbc(const char *path)
ret = fstat(fd, &stat);
if (!ret)
ret = ftruncate(fd, stat.st_size
- - size - 8 - BOOTCONFIG_MAGIC_LEN);
+ - size - BOOTCONFIG_FOOTER_SIZE);
if (ret)
ret = -errno;
} /* Ignore if there is no boot config in initrd */
@@ -359,7 +364,12 @@ static int delete_xbc(const char *path)
static int apply_xbc(const char *path, const char *xbc_path)
{
- char *buf, *data, *p;
+ struct {
+ uint32_t size;
+ uint32_t csum;
+ char magic[BOOTCONFIG_MAGIC_LEN];
+ } footer;
+ char *buf, *data;
size_t total_size;
struct stat stat;
const char *msg;
@@ -376,8 +386,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
csum = xbc_calc_checksum(buf, size);
/* Backup the bootconfig data */
- data = calloc(size + BOOTCONFIG_ALIGN +
- sizeof(uint32_t) + sizeof(uint32_t) + BOOTCONFIG_MAGIC_LEN, 1);
+ data = calloc(size + BOOTCONFIG_ALIGN + BOOTCONFIG_FOOTER_SIZE, 1);
if (!data)
return -ENOMEM;
memcpy(data, buf, size);
@@ -425,22 +434,18 @@ static int apply_xbc(const char *path, const char *xbc_path)
}
/* To align up the total size to BOOTCONFIG_ALIGN, get padding size */
- total_size = stat.st_size + size + sizeof(uint32_t) * 2 + BOOTCONFIG_MAGIC_LEN;
+ total_size = stat.st_size + size + BOOTCONFIG_FOOTER_SIZE;
pad = ((total_size + BOOTCONFIG_ALIGN - 1) & (~BOOTCONFIG_ALIGN_MASK)) - total_size;
size += pad;
/* Add a footer */
- p = data + size;
- *(uint32_t *)p = htole32(size);
- p += sizeof(uint32_t);
-
- *(uint32_t *)p = htole32(csum);
- p += sizeof(uint32_t);
-
- memcpy(p, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
- p += BOOTCONFIG_MAGIC_LEN;
+ footer.size = htole32(size);
+ footer.csum = htole32(csum);
+ memcpy(footer.magic, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
+ static_assert(sizeof(footer) == BOOTCONFIG_FOOTER_SIZE);
+ memcpy(data + size, &footer, BOOTCONFIG_FOOTER_SIZE);
- total_size = p - data;
+ total_size = size + BOOTCONFIG_FOOTER_SIZE;
ret = write(fd, data, total_size);
if (ret < total_size) {
diff --git a/tools/bootconfig/scripts/ftrace.sh b/tools/bootconfig/scripts/ftrace.sh
index 186eed923041..cc5250c64699 100644
--- a/tools/bootconfig/scripts/ftrace.sh
+++ b/tools/bootconfig/scripts/ftrace.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
clear_trace() { # reset trace output
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index a2c484c243f5..7594659af1e1 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -27,16 +27,16 @@ NO=1
xpass() { # pass test command
echo "test case $NO ($*)... "
- if ! ($@ && echo "\t\t[OK]"); then
- echo "\t\t[NG]"; NG=$((NG + 1))
+ if ! ($@ && printf "\t\t[OK]\n"); then
+ printf "\t\t[NG]\n"; NG=$((NG + 1))
fi
NO=$((NO + 1))
}
xfail() { # fail test command
echo "test case $NO ($*)... "
- if ! (! $@ && echo "\t\t[OK]"); then
- echo "\t\t[NG]"; NG=$((NG + 1))
+ if ! (! $@ && printf "\t\t[OK]\n"); then
+ printf "\t\t[NG]\n"; NG=$((NG + 1))
fi
NO=$((NO + 1))
}
@@ -48,13 +48,13 @@ echo "Delete command should success without bootconfig"
xpass $BOOTCONF -d $INITRD
dd if=/dev/zero of=$INITRD bs=4096 count=1
-echo "key = value;" > $TEMPCONF
-bconf_size=$(stat -c %s $TEMPCONF)
-initrd_size=$(stat -c %s $INITRD)
+printf "key = value;" > $TEMPCONF
+bconf_size=$(wc -c < $TEMPCONF)
+initrd_size=$(wc -c < $INITRD)
echo "Apply command test"
xpass $BOOTCONF -a $TEMPCONF $INITRD
-new_size=$(stat -c %s $INITRD)
+new_size=$(wc -c < $INITRD)
echo "Show command test"
xpass $BOOTCONF $INITRD
@@ -69,13 +69,13 @@ echo "Apply command repeat test"
xpass $BOOTCONF -a $TEMPCONF $INITRD
echo "File size check"
-xpass test $new_size -eq $(stat -c %s $INITRD)
+xpass test $new_size -eq $(wc -c < $INITRD)
echo "Delete command check"
xpass $BOOTCONF -d $INITRD
echo "File size check"
-new_size=$(stat -c %s $INITRD)
+new_size=$(wc -c < $INITRD)
xpass test $new_size -eq $initrd_size
echo "No error messge while applying"
@@ -97,19 +97,20 @@ BEGIN {
' > $TEMPCONF
xpass $BOOTCONF -a $TEMPCONF $INITRD
-echo "badnode" >> $TEMPCONF
+printf "badnode\n" >> $TEMPCONF
xfail $BOOTCONF -a $TEMPCONF $INITRD
echo "Max filesize check"
# Max size is 32767 (including terminal byte)
-echo -n "data = \"" > $TEMPCONF
+printf "data = \"" > $TEMPCONF
dd if=/dev/urandom bs=768 count=32 | base64 -w0 >> $TEMPCONF
-echo "\"" >> $TEMPCONF
+printf "\"\n" >> $TEMPCONF
xfail $BOOTCONF -a $TEMPCONF $INITRD
-truncate -s 32764 $TEMPCONF
-echo "\"" >> $TEMPCONF # add 2 bytes + terminal ('\"\n\0')
+dd if=$TEMPCONF of=$OUTFILE bs=1 count=32764
+cp $OUTFILE $TEMPCONF
+printf "\"\n" >> $TEMPCONF # add 2 bytes + terminal ('\"\n\0')
xpass $BOOTCONF -a $TEMPCONF $INITRD
echo "Adding same-key values"
@@ -139,7 +140,7 @@ xfail grep -q "baz" $OUTFILE
xpass grep -q "qux" $OUTFILE
echo "Double/single quotes test"
-echo "key = '\"string\"';" > $TEMPCONF
+printf "key = '\"string\"';" > $TEMPCONF
$BOOTCONF -a $TEMPCONF $INITRD
$BOOTCONF $INITRD > $TEMPCONF
cat $TEMPCONF
@@ -167,8 +168,8 @@ echo > $INITRD
xpass $BOOTCONF -a $TEMPCONF $INITRD
$BOOTCONF $INITRD > $OUTFILE
-xfail grep -q val[[:space:]] $OUTFILE
-xpass grep -q val2[[:space:]] $OUTFILE
+xfail grep -q 'val[[:space:]]' $OUTFILE
+xpass grep -q 'val2[[:space:]]' $OUTFILE
echo "=== expected failure cases ==="
for i in samples/bad-* ; do
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 062bbd6cd048..fd2585af1252 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -32,7 +32,7 @@ FEATURE_TESTS = libbfd disassembler-four-args disassembler-init-styled
FEATURE_DISPLAY = libbfd
check_feat := 1
-NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean resolve_btfids_clean
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean resolve_btfids_clean
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
check_feat := 0
@@ -70,7 +70,7 @@ $(OUTPUT)%.lex.o: $(OUTPUT)%.lex.c
PROGS = $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg $(OUTPUT)bpf_asm
-all: $(PROGS) bpftool runqslower
+all: $(PROGS) bpftool
$(OUTPUT)bpf_jit_disasm: CFLAGS += -DPACKAGE='bpf_jit_disasm'
$(OUTPUT)bpf_jit_disasm: $(OUTPUT)bpf_jit_disasm.o
@@ -86,7 +86,7 @@ $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c
$(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c
$(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c
-clean: bpftool_clean runqslower_clean resolve_btfids_clean
+clean: bpftool_clean resolve_btfids_clean
$(call QUIET_CLEAN, bpf-progs)
$(Q)$(RM) -r -- $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \
$(OUTPUT)bpf_asm $(OUTPUT)bpf_exp.yacc.* $(OUTPUT)bpf_exp.lex.*
@@ -112,12 +112,6 @@ bpftool_install:
bpftool_clean:
$(call descend,bpftool,clean)
-runqslower:
- $(call descend,runqslower)
-
-runqslower_clean:
- $(call descend,runqslower,clean)
-
resolve_btfids:
$(call descend,resolve_btfids)
@@ -125,5 +119,4 @@ resolve_btfids_clean:
$(call descend,resolve_btfids,clean)
.PHONY: all install clean bpftool bpftool_install bpftool_clean \
- runqslower runqslower_clean \
resolve_btfids resolve_btfids_clean
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index 1baee9e2aba9..5ab8f80e2834 100644
--- a/tools/bpf/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -45,6 +45,8 @@ static void get_exec_path(char *tpath, size_t size)
assert(path);
len = readlink(path, tpath, size);
+ if (len < 0)
+ len = 0;
tpath[len] = 0;
free(path);
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index ca860fd97d8d..d0a36f442db7 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -16,7 +16,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **gen** *COMMAND*
-*OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } }
+*OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } | [ { **-S** | **--sign** } {**-k** <private_key.pem>} **-i** <certificate.x509> ] }
*COMMAND* := { **object** | **skeleton** | **help** }
@@ -186,6 +186,17 @@ OPTIONS
skeleton). A light skeleton contains a loader eBPF program. It does not use
the majority of the libbpf infrastructure, and does not need libelf.
+-S, --sign
+ For skeletons, generate a signed skeleton. This option must be used with
+ **-k** and **-i**. Using this flag implicitly enables **--use-loader**.
+
+-k <private_key.pem>
+ Path to the private key file in PEM format, required for signing.
+
+-i <certificate.x509>
+ Path to the X.509 certificate file in PEM or DER format, required for
+ signing.
+
EXAMPLES
========
**$ cat example1.bpf.c**
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 252e4c538edb..1af3305ea2b2 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -55,7 +55,8 @@ MAP COMMANDS
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
-| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** | **arena** }
+| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** | **arena**
+| | **insn_array** }
DESCRIPTION
===========
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index da3152c16228..35aeeaf5f711 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -18,7 +18,7 @@ SYNOPSIS
*OPTIONS* := { |COMMON_OPTIONS| |
{ **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } |
-{ **-L** | **--use-loader** } }
+{ **-L** | **--use-loader** } | [ { **-S** | **--sign** } **-k** <private_key.pem> **-i** <certificate.x509> ] }
*COMMANDS* :=
{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** |
@@ -35,6 +35,7 @@ PROG COMMANDS
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
+| **bpftool** **prog tracelog** [ { **stdout** | **stderr** } *PROG* ]
| **bpftool** **prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*]
| **bpftool** **prog profile** *PROG* [**duration** *DURATION*] *METRICs*
| **bpftool** **prog help**
@@ -179,6 +180,12 @@ bpftool prog tracelog
purposes. For streaming data from BPF programs to user space, one can use
perf events (see also **bpftool-map**\ (8)).
+bpftool prog tracelog { stdout | stderr } *PROG*
+ Dump the BPF stream of the program. BPF programs can write to these streams
+ at runtime with the **bpf_stream_vprintk_impl**\ () kfunc. The kernel may write
+ error messages to the standard error stream. This facility should be used
+ only for debugging purposes.
+
bpftool prog run *PROG* data_in *FILE* [data_out *FILE* [data_size_out *L*]] [ctx_in *FILE* [ctx_out *FILE* [ctx_size_out *M*]]] [repeat *N*]
Run BPF program *PROG* in the kernel testing infrastructure for BPF,
meaning that the program works on the data and context provided by the
@@ -241,6 +248,18 @@ OPTIONS
creating the maps, and loading the programs (see **bpftool prog tracelog**
as a way to dump those messages).
+-S, --sign
+ Enable signing of the BPF program before loading. This option must be
+ used with **-k** and **-i**. Using this flag implicitly enables
+ **--use-loader**.
+
+-k <private_key.pem>
+ Path to the private key file in PEM format, required when signing.
+
+-i <certificate.x509>
+ Path to the X.509 certificate file in PEM or DER format, required when
+ signing.
+
EXAMPLES
========
**# bpftool prog show**
diff --git a/tools/bpf/bpftool/Documentation/bpftool-token.rst b/tools/bpf/bpftool/Documentation/bpftool-token.rst
new file mode 100644
index 000000000000..d082c499cfe3
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-token.rst
@@ -0,0 +1,64 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-token
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF tokens
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+**bpftool** [*OPTIONS*] **token** *COMMAND*
+
+*OPTIONS* := { |COMMON_OPTIONS| }
+
+*COMMANDS* := { **show** | **list** | **help** }
+
+TOKEN COMMANDS
+===============
+
+| **bpftool** **token** { **show** | **list** }
+| **bpftool** **token help**
+|
+
+DESCRIPTION
+===========
+bpftool token { show | list }
+ List BPF token information for each *bpffs* mount point containing token
+ information on the system. Information include mount point path, allowed
+ **bpf**\ () system call commands, maps, programs, and attach types for the
+ token.
+
+bpftool prog help
+ Print short help message.
+
+OPTIONS
+========
+.. include:: common_options.rst
+
+EXAMPLES
+========
+|
+| **# mkdir -p /sys/fs/bpf/token**
+| **# mount -t bpf bpffs /sys/fs/bpf/token** \
+| **-o delegate_cmds=prog_load:map_create** \
+| **-o delegate_progs=kprobe** \
+| **-o delegate_attachs=xdp**
+| **# bpftool token list**
+
+::
+
+ token_info /sys/fs/bpf/token
+ allowed_cmds:
+ map_create prog_load
+ allowed_maps:
+ allowed_progs:
+ kprobe
+ allowed_attachs:
+ xdp
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 9e9a5f006cd2..586d1b2595d1 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -130,8 +130,8 @@ include $(FEATURES_DUMP)
endif
endif
-LIBS = $(LIBBPF) -lelf -lz
-LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz
+LIBS = $(LIBBPF) -lelf -lz -lcrypto
+LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz -lcrypto
ifeq ($(feature-libelf-zstd),1)
LIBS += -lzstd
@@ -194,7 +194,7 @@ endif
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
-BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o)
+BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o sign.o)
$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP)
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 27512feb5c70..53bcfeb1a76e 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -262,7 +262,7 @@ _bpftool()
# Deal with options
if [[ ${words[cword]} == -* ]]; then
local c='--version --json --pretty --bpffs --mapcompat --debug \
- --use-loader --base-btf'
+ --use-loader --base-btf --sign -i -k'
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
return 0
fi
@@ -283,7 +283,7 @@ _bpftool()
_sysfs_get_netdevs
return 0
;;
- file|pinned|-B|--base-btf)
+ file|pinned|-B|--base-btf|-i|-k)
_filedir
return 0
;;
@@ -296,13 +296,21 @@ _bpftool()
# Remove all options so completions don't have to deal with them.
local i pprev
for (( i=1; i < ${#words[@]}; )); do
- if [[ ${words[i]::1} == - ]] &&
- [[ ${words[i]} != "-B" ]] && [[ ${words[i]} != "--base-btf" ]]; then
- words=( "${words[@]:0:i}" "${words[@]:i+1}" )
- [[ $i -le $cword ]] && cword=$(( cword - 1 ))
- else
- i=$(( ++i ))
- fi
+ case ${words[i]} in
+ # Remove option and its argument
+ -B|--base-btf|-i|-k)
+ words=( "${words[@]:0:i}" "${words[@]:i+2}" )
+ [[ $i -le $(($cword + 1)) ]] && cword=$(( cword - 2 ))
+ ;;
+ # No argument, remove option only
+ -*)
+ words=( "${words[@]:0:i}" "${words[@]:i+1}" )
+ [[ $i -le $cword ]] && cword=$(( cword - 1 ))
+ ;;
+ *)
+ i=$(( ++i ))
+ ;;
+ esac
done
cur=${words[cword]}
prev=${words[cword - 1]}
@@ -518,7 +526,21 @@ _bpftool()
esac
;;
tracelog)
- return 0
+ case $prev in
+ $command)
+ COMPREPLY+=( $( compgen -W "stdout stderr" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ stdout|stderr)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ *)
+ return 0
+ ;;
+ esac
;;
profile)
case $cword in
@@ -1201,6 +1223,17 @@ _bpftool()
;;
esac
;;
+ token)
+ case $command in
+ show|list)
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help show list' -- "$cur" ) )
+ ;;
+ esac
+ ;;
esac
} &&
complete -F _bpftool bpftool
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 6b14cbfa58aa..946612029dee 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -905,7 +905,8 @@ static int do_dump(int argc, char **argv)
return -1;
}
- fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len,
+ BPF_F_RDONLY);
if (fd < 0)
return -1;
@@ -1118,10 +1119,13 @@ build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
[BPF_OBJ_PROG] = "prog",
[BPF_OBJ_MAP] = "map",
};
+ LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts_ro);
__u32 btf_id, id = 0;
int err;
int fd;
+ opts_ro.open_flags = BPF_F_RDONLY;
+
while (true) {
switch (type) {
case BPF_OBJ_PROG:
@@ -1151,7 +1155,7 @@ build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
fd = bpf_prog_get_fd_by_id(id);
break;
case BPF_OBJ_MAP:
- fd = bpf_map_get_fd_by_id(id);
+ fd = bpf_map_get_fd_by_id_opts(id, &opts_ro);
break;
default:
err = -1;
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 4e896d8a2416..def297e879f4 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -38,7 +38,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
__u32 info_len = sizeof(info);
const char *prog_name = NULL;
struct btf *prog_btf = NULL;
- struct bpf_func_info finfo;
+ struct bpf_func_info finfo = {};
__u32 finfo_rec_size;
char prog_str[1024];
int err;
@@ -590,7 +590,7 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
case BTF_KIND_DATASEC:
return btf_dumper_datasec(d, type_id, data);
default:
- jsonw_printf(d->jw, "(unsupported-kind");
+ jsonw_printf(d->jw, "(unsupported-kind)");
return -EINVAL;
}
}
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index 944ebe21a216..ec356deb27c9 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -2,6 +2,10 @@
// Copyright (C) 2017 Facebook
// Author: Roman Gushchin <guro@fb.com>
+#undef GCC_VERSION
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
#define _XOPEN_SOURCE 500
#include <errno.h>
#include <fcntl.h>
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index ecfa790adc13..e8daf963ecef 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -4,6 +4,7 @@
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
+#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
@@ -20,6 +21,7 @@
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/vfs.h>
+#include <sys/utsname.h>
#include <linux/filter.h>
#include <linux/limits.h>
@@ -30,6 +32,7 @@
#include <bpf/hashmap.h>
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
#include <bpf/btf.h>
+#include <zlib.h>
#include "main.h"
@@ -193,7 +196,8 @@ int mount_tracefs(const char *target)
return err;
}
-int open_obj_pinned(const char *path, bool quiet)
+int open_obj_pinned(const char *path, bool quiet,
+ const struct bpf_obj_get_opts *opts)
{
char *pname;
int fd = -1;
@@ -205,7 +209,7 @@ int open_obj_pinned(const char *path, bool quiet)
goto out_ret;
}
- fd = bpf_obj_get(pname);
+ fd = bpf_obj_get_opts(pname, opts);
if (fd < 0) {
if (!quiet)
p_err("bpf obj get (%s): %s", pname,
@@ -221,12 +225,13 @@ out_ret:
return fd;
}
-int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
+int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type,
+ const struct bpf_obj_get_opts *opts)
{
enum bpf_obj_type type;
int fd;
- fd = open_obj_pinned(path, false);
+ fd = open_obj_pinned(path, false, opts);
if (fd < 0)
return -1;
@@ -555,7 +560,7 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
if (typeflag != FTW_F)
goto out_ret;
- fd = open_obj_pinned(fpath, true);
+ fd = open_obj_pinned(fpath, true, NULL);
if (fd < 0)
goto out_ret;
@@ -928,7 +933,7 @@ int prog_parse_fds(int *argc, char ***argv, int **fds)
path = **argv;
NEXT_ARGP();
- (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
+ (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG, NULL);
if ((*fds)[0] < 0)
return -1;
return 1;
@@ -965,7 +970,8 @@ exit_free:
return fd;
}
-static int map_fd_by_name(char *name, int **fds)
+static int map_fd_by_name(char *name, int **fds,
+ const struct bpf_get_fd_by_id_opts *opts)
{
unsigned int id = 0;
int fd, nb_fds = 0;
@@ -973,6 +979,7 @@ static int map_fd_by_name(char *name, int **fds)
int err;
while (true) {
+ LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts_ro);
struct bpf_map_info info = {};
__u32 len = sizeof(info);
@@ -985,7 +992,9 @@ static int map_fd_by_name(char *name, int **fds)
return nb_fds;
}
- fd = bpf_map_get_fd_by_id(id);
+ /* Request a read-only fd to query the map info */
+ opts_ro.open_flags = BPF_F_RDONLY;
+ fd = bpf_map_get_fd_by_id_opts(id, &opts_ro);
if (fd < 0) {
p_err("can't get map by id (%u): %s",
id, strerror(errno));
@@ -1004,6 +1013,19 @@ static int map_fd_by_name(char *name, int **fds)
continue;
}
+ /* Get an fd with the requested options, if they differ
+ * from the read-only options used to get the fd above.
+ */
+ if (memcmp(opts, &opts_ro, sizeof(opts_ro))) {
+ close(fd);
+ fd = bpf_map_get_fd_by_id_opts(id, opts);
+ if (fd < 0) {
+ p_err("can't get map by id (%u): %s", id,
+ strerror(errno));
+ goto err_close_fds;
+ }
+ }
+
if (nb_fds > 0) {
tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
if (!tmp) {
@@ -1023,8 +1045,13 @@ err_close_fds:
return -1;
}
-int map_parse_fds(int *argc, char ***argv, int **fds)
+int map_parse_fds(int *argc, char ***argv, int **fds, __u32 open_flags)
{
+ LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts);
+
+ assert((open_flags & ~BPF_F_RDONLY) == 0);
+ opts.open_flags = open_flags;
+
if (is_prefix(**argv, "id")) {
unsigned int id;
char *endptr;
@@ -1038,7 +1065,7 @@ int map_parse_fds(int *argc, char ***argv, int **fds)
}
NEXT_ARGP();
- (*fds)[0] = bpf_map_get_fd_by_id(id);
+ (*fds)[0] = bpf_map_get_fd_by_id_opts(id, &opts);
if ((*fds)[0] < 0) {
p_err("get map by id (%u): %s", id, strerror(errno));
return -1;
@@ -1056,16 +1083,18 @@ int map_parse_fds(int *argc, char ***argv, int **fds)
}
NEXT_ARGP();
- return map_fd_by_name(name, fds);
+ return map_fd_by_name(name, fds, &opts);
} else if (is_prefix(**argv, "pinned")) {
char *path;
+ LIBBPF_OPTS(bpf_obj_get_opts, get_opts);
+ get_opts.file_flags = open_flags;
NEXT_ARGP();
path = **argv;
NEXT_ARGP();
- (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
+ (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP, &get_opts);
if ((*fds)[0] < 0)
return -1;
return 1;
@@ -1075,7 +1104,7 @@ int map_parse_fds(int *argc, char ***argv, int **fds)
return -1;
}
-int map_parse_fd(int *argc, char ***argv)
+int map_parse_fd(int *argc, char ***argv, __u32 open_flags)
{
int *fds = NULL;
int nb_fds, fd;
@@ -1085,7 +1114,7 @@ int map_parse_fd(int *argc, char ***argv)
p_err("mem alloc failed");
return -1;
}
- nb_fds = map_parse_fds(argc, argv, &fds);
+ nb_fds = map_parse_fds(argc, argv, &fds, open_flags);
if (nb_fds != 1) {
if (nb_fds > 1) {
p_err("several maps match this handle");
@@ -1103,12 +1132,12 @@ exit_free:
}
int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
- __u32 *info_len)
+ __u32 *info_len, __u32 open_flags)
{
int err;
int fd;
- fd = map_parse_fd(argc, argv);
+ fd = map_parse_fd(argc, argv, open_flags);
if (fd < 0)
return -1;
@@ -1181,3 +1210,94 @@ int pathname_concat(char *buf, int buf_sz, const char *path,
return 0;
}
+
+static bool read_next_kernel_config_option(gzFile file, char *buf, size_t n,
+ char **value)
+{
+ char *sep;
+
+ while (gzgets(file, buf, n)) {
+ if (strncmp(buf, "CONFIG_", 7))
+ continue;
+
+ sep = strchr(buf, '=');
+ if (!sep)
+ continue;
+
+ /* Trim ending '\n' */
+ buf[strlen(buf) - 1] = '\0';
+
+ /* Split on '=' and ensure that a value is present. */
+ *sep = '\0';
+ if (!sep[1])
+ continue;
+
+ *value = sep + 1;
+ return true;
+ }
+
+ return false;
+}
+
+int read_kernel_config(const struct kernel_config_option *requested_options,
+ size_t num_options, char **out_values,
+ const char *define_prefix)
+{
+ struct utsname utsn;
+ char path[PATH_MAX];
+ gzFile file = NULL;
+ char buf[4096];
+ char *value;
+ size_t i;
+ int ret = 0;
+
+ if (!requested_options || !out_values || num_options == 0)
+ return -1;
+
+ if (!uname(&utsn)) {
+ snprintf(path, sizeof(path), "/boot/config-%s", utsn.release);
+
+ /* gzopen also accepts uncompressed files. */
+ file = gzopen(path, "r");
+ }
+
+ if (!file) {
+ /* Some distributions build with CONFIG_IKCONFIG=y and put the
+ * config file at /proc/config.gz.
+ */
+ file = gzopen("/proc/config.gz", "r");
+ }
+
+ if (!file) {
+ p_info("skipping kernel config, can't open file: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ if (!gzgets(file, buf, sizeof(buf)) || !gzgets(file, buf, sizeof(buf))) {
+ p_info("skipping kernel config, can't read from file: %s",
+ strerror(errno));
+ ret = -1;
+ goto end_parse;
+ }
+
+ if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) {
+ p_info("skipping kernel config, can't find correct file");
+ ret = -1;
+ goto end_parse;
+ }
+
+ while (read_next_kernel_config_option(file, buf, sizeof(buf), &value)) {
+ for (i = 0; i < num_options; i++) {
+ if ((define_prefix && !requested_options[i].macro_dump) ||
+ out_values[i] || strcmp(buf, requested_options[i].name))
+ continue;
+
+ out_values[i] = strdup(value);
+ }
+ }
+
+end_parse:
+ gzclose(file);
+ return ret;
+}
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 24fecdf8e430..0f6070a0c8e7 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -10,7 +10,6 @@
#ifdef USE_LIBCAP
#include <sys/capability.h>
#endif
-#include <sys/utsname.h>
#include <sys/vfs.h>
#include <linux/filter.h>
@@ -18,7 +17,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include <zlib.h>
#include "main.h"
@@ -327,40 +325,9 @@ static void probe_jit_limit(void)
}
}
-static bool read_next_kernel_config_option(gzFile file, char *buf, size_t n,
- char **value)
-{
- char *sep;
-
- while (gzgets(file, buf, n)) {
- if (strncmp(buf, "CONFIG_", 7))
- continue;
-
- sep = strchr(buf, '=');
- if (!sep)
- continue;
-
- /* Trim ending '\n' */
- buf[strlen(buf) - 1] = '\0';
-
- /* Split on '=' and ensure that a value is present. */
- *sep = '\0';
- if (!sep[1])
- continue;
-
- *value = sep + 1;
- return true;
- }
-
- return false;
-}
-
static void probe_kernel_image_config(const char *define_prefix)
{
- static const struct {
- const char * const name;
- bool macro_dump;
- } options[] = {
+ struct kernel_config_option options[] = {
/* Enable BPF */
{ "CONFIG_BPF", },
/* Enable bpf() syscall */
@@ -435,52 +402,11 @@ static void probe_kernel_image_config(const char *define_prefix)
{ "CONFIG_HZ", true, }
};
char *values[ARRAY_SIZE(options)] = { };
- struct utsname utsn;
- char path[PATH_MAX];
- gzFile file = NULL;
- char buf[4096];
- char *value;
size_t i;
- if (!uname(&utsn)) {
- snprintf(path, sizeof(path), "/boot/config-%s", utsn.release);
-
- /* gzopen also accepts uncompressed files. */
- file = gzopen(path, "r");
- }
-
- if (!file) {
- /* Some distributions build with CONFIG_IKCONFIG=y and put the
- * config file at /proc/config.gz.
- */
- file = gzopen("/proc/config.gz", "r");
- }
- if (!file) {
- p_info("skipping kernel config, can't open file: %s",
- strerror(errno));
- goto end_parse;
- }
- /* Sanity checks */
- if (!gzgets(file, buf, sizeof(buf)) ||
- !gzgets(file, buf, sizeof(buf))) {
- p_info("skipping kernel config, can't read from file: %s",
- strerror(errno));
- goto end_parse;
- }
- if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) {
- p_info("skipping kernel config, can't find correct file");
- goto end_parse;
- }
-
- while (read_next_kernel_config_option(file, buf, sizeof(buf), &value)) {
- for (i = 0; i < ARRAY_SIZE(options); i++) {
- if ((define_prefix && !options[i].macro_dump) ||
- values[i] || strcmp(buf, options[i].name))
- continue;
-
- values[i] = strdup(value);
- }
- }
+ if (read_kernel_config(options, ARRAY_SIZE(options), values,
+ define_prefix))
+ return;
for (i = 0; i < ARRAY_SIZE(options); i++) {
if (define_prefix && !options[i].macro_dump)
@@ -488,10 +414,6 @@ static void probe_kernel_image_config(const char *define_prefix)
print_kernel_option(options[i].name, values[i], define_prefix);
free(values[i]);
}
-
-end_parse:
- if (file)
- gzclose(file);
}
static bool probe_bpf_syscall(const char *define_prefix)
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 67a60114368f..993c7d9484a4 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -688,10 +688,17 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
{
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
+ struct bpf_load_and_run_opts sopts = {};
+ char sig_buf[MAX_SIG_SIZE];
+ __u8 prog_sha[SHA256_DIGEST_LENGTH];
struct bpf_map *map;
+
char ident[256];
int err = 0;
+ if (sign_progs)
+ opts.gen_hash = true;
+
err = bpf_object__gen_loader(obj, &opts);
if (err)
return err;
@@ -701,6 +708,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
p_err("failed to load object file");
goto out;
}
+
/* If there was no error during load then gen_loader_opts
* are populated with the loader program.
*/
@@ -780,8 +788,52 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
print_hex(opts.insns, opts.insns_sz);
codegen("\
\n\
- \"; \n\
- \n\
+ \";\n");
+
+ if (sign_progs) {
+ sopts.insns = opts.insns;
+ sopts.insns_sz = opts.insns_sz;
+ sopts.excl_prog_hash = prog_sha;
+ sopts.excl_prog_hash_sz = sizeof(prog_sha);
+ sopts.signature = sig_buf;
+ sopts.signature_sz = MAX_SIG_SIZE;
+
+ err = bpftool_prog_sign(&sopts);
+ if (err < 0) {
+ p_err("failed to sign program");
+ goto out;
+ }
+
+ codegen("\
+ \n\
+ static const char opts_sig[] __attribute__((__aligned__(8))) = \"\\\n\
+ ");
+ print_hex((const void *)sig_buf, sopts.signature_sz);
+ codegen("\
+ \n\
+ \";\n");
+
+ codegen("\
+ \n\
+ static const char opts_excl_hash[] __attribute__((__aligned__(8))) = \"\\\n\
+ ");
+ print_hex((const void *)prog_sha, sizeof(prog_sha));
+ codegen("\
+ \n\
+ \";\n");
+
+ codegen("\
+ \n\
+ opts.signature = (void *)opts_sig; \n\
+ opts.signature_sz = sizeof(opts_sig) - 1; \n\
+ opts.excl_prog_hash = (void *)opts_excl_hash; \n\
+ opts.excl_prog_hash_sz = sizeof(opts_excl_hash) - 1; \n\
+ opts.keyring_id = skel->keyring_id; \n\
+ ");
+ }
+
+ codegen("\
+ \n\
opts.ctx = (struct bpf_loader_ctx *)skel; \n\
opts.data_sz = sizeof(opts_data) - 1; \n\
opts.data = (void *)opts_data; \n\
@@ -1240,7 +1292,7 @@ static int do_skeleton(int argc, char **argv)
err = -errno;
libbpf_strerror(err, err_buf, sizeof(err_buf));
p_err("failed to open BPF object file: %s", err_buf);
- goto out;
+ goto out_obj;
}
bpf_object__for_each_map(map, obj) {
@@ -1355,6 +1407,13 @@ static int do_skeleton(int argc, char **argv)
printf("\t} links;\n");
}
+ if (sign_progs) {
+ codegen("\
+ \n\
+ __s32 keyring_id; \n\
+ ");
+ }
+
if (btf) {
err = codegen_datasecs(obj, obj_name);
if (err)
@@ -1552,6 +1611,7 @@ static int do_skeleton(int argc, char **argv)
err = 0;
out:
bpf_object__close(obj);
+out_obj:
if (obj_data)
munmap(obj_data, mmap_sz);
close(fd);
@@ -1930,7 +1990,7 @@ static int do_help(int argc, char **argv)
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_OPTIONS " |\n"
- " {-L|--use-loader} }\n"
+ " {-L|--use-loader} | [ {-S|--sign } {-k} <private_key.pem> {-i} <certificate.x509> ]}\n"
"",
bin_name, "gen");
diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
index 5c39c2ed36a2..df5f0d1e07e8 100644
--- a/tools/bpf/bpftool/iter.c
+++ b/tools/bpf/bpftool/iter.c
@@ -37,7 +37,7 @@ static int do_pin(int argc, char **argv)
return -1;
}
- map_fd = map_parse_fd(&argc, &argv);
+ map_fd = map_parse_fd(&argc, &argv, BPF_F_RDONLY);
if (map_fd < 0)
return -1;
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 3535afc80a49..bdcd717b0348 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -117,7 +117,7 @@ static int link_parse_fd(int *argc, char ***argv)
path = **argv;
NEXT_ARGP();
- return open_obj_pinned_any(path, BPF_OBJ_LINK);
+ return open_obj_pinned_any(path, BPF_OBJ_LINK, NULL);
}
p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
@@ -282,11 +282,52 @@ get_addr_cookie_array(__u64 *addrs, __u64 *cookies, __u32 count)
return data;
}
+static bool is_x86_ibt_enabled(void)
+{
+#if defined(__x86_64__)
+ struct kernel_config_option options[] = {
+ { "CONFIG_X86_KERNEL_IBT", },
+ };
+ char *values[ARRAY_SIZE(options)] = { };
+ bool ret;
+
+ if (read_kernel_config(options, ARRAY_SIZE(options), values, NULL))
+ return false;
+
+ ret = !!values[0];
+ free(values[0]);
+ return ret;
+#else
+ return false;
+#endif
+}
+
+static bool
+symbol_matches_target(__u64 sym_addr, __u64 target_addr, bool is_ibt_enabled)
+{
+ if (sym_addr == target_addr)
+ return true;
+
+ /*
+ * On x86_64 architectures with CET (Control-flow Enforcement Technology),
+ * function entry points have a 4-byte 'endbr' instruction prefix.
+ * This causes kprobe hooks to target the address *after* 'endbr'
+ * (symbol address + 4), preserving the CET instruction.
+ * Here we check if the symbol address matches the hook target address
+ * minus 4, indicating a CET-enabled function entry point.
+ */
+ if (is_ibt_enabled && sym_addr == target_addr - 4)
+ return true;
+
+ return false;
+}
+
static void
show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
{
struct addr_cookie *data;
__u32 i, j = 0;
+ bool is_ibt_enabled;
jsonw_bool_field(json_wtr, "retprobe",
info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
@@ -306,11 +347,13 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
if (!dd.sym_count)
goto error;
+ is_ibt_enabled = is_x86_ibt_enabled();
for (i = 0; i < dd.sym_count; i++) {
- if (dd.sym_mapping[i].address != data[j].addr)
+ if (!symbol_matches_target(dd.sym_mapping[i].address,
+ data[j].addr, is_ibt_enabled))
continue;
jsonw_start_object(json_wtr);
- jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
+ jsonw_uint_field(json_wtr, "addr", (unsigned long)data[j].addr);
jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name);
/* Print null if it is vmlinux */
if (dd.sym_mapping[i].module[0] == '\0') {
@@ -485,6 +528,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
case BPF_LINK_TYPE_RAW_TRACEPOINT:
jsonw_string_field(json_wtr, "tp_name",
u64_to_ptr(info->raw_tracepoint.tp_name));
+ jsonw_uint_field(json_wtr, "cookie", info->raw_tracepoint.cookie);
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);
@@ -502,6 +546,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
json_wtr);
jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
+ jsonw_uint_field(json_wtr, "cookie", info->tracing.cookie);
break;
case BPF_LINK_TYPE_CGROUP:
jsonw_lluint_field(json_wtr, "cgroup_id",
@@ -717,6 +762,7 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
{
struct addr_cookie *data;
__u32 i, j = 0;
+ bool is_ibt_enabled;
if (!info->kprobe_multi.count)
return;
@@ -740,12 +786,14 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
if (!dd.sym_count)
goto error;
+ is_ibt_enabled = is_x86_ibt_enabled();
printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]");
for (i = 0; i < dd.sym_count; i++) {
- if (dd.sym_mapping[i].address != data[j].addr)
+ if (!symbol_matches_target(dd.sym_mapping[i].address,
+ data[j].addr, is_ibt_enabled))
continue;
printf("\n\t%016lx %-16llx %s",
- dd.sym_mapping[i].address, data[j].cookie, dd.sym_mapping[i].name);
+ (unsigned long)data[j].addr, data[j].cookie, dd.sym_mapping[i].name);
if (dd.sym_mapping[i].module[0] != '\0')
printf(" [%s] ", dd.sym_mapping[i].module);
else
@@ -879,6 +927,8 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
case BPF_LINK_TYPE_RAW_TRACEPOINT:
printf("\n\ttp '%s' ",
(const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
+ if (info->raw_tracepoint.cookie)
+ printf("cookie %llu ", info->raw_tracepoint.cookie);
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);
@@ -897,6 +947,8 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
printf("\n\ttarget_obj_id %u target_btf_id %u ",
info->tracing.target_obj_id,
info->tracing.target_btf_id);
+ if (info->tracing.cookie)
+ printf("\n\tcookie %llu ", info->tracing.cookie);
break;
case BPF_LINK_TYPE_CGROUP:
printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id);
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index cd5963cb6058..a829a6a49037 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -33,6 +33,9 @@ bool relaxed_maps;
bool use_loader;
struct btf *base_btf;
struct hashmap *refs_table;
+bool sign_progs;
+const char *private_key_path;
+const char *cert_path;
static void __noreturn clean_and_exit(int i)
{
@@ -61,7 +64,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | link | cgroup | perf | net | feature | btf | gen | struct_ops | iter }\n"
+ " OBJECT := { prog | map | link | cgroup | perf | net | feature | btf | gen | struct_ops | iter | token }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-V|--version} }\n"
"",
@@ -87,6 +90,7 @@ static const struct cmd commands[] = {
{ "gen", do_gen },
{ "struct_ops", do_struct_ops },
{ "iter", do_iter },
+ { "token", do_token },
{ "version", do_version },
{ 0 }
};
@@ -447,6 +451,7 @@ int main(int argc, char **argv)
{ "nomount", no_argument, NULL, 'n' },
{ "debug", no_argument, NULL, 'd' },
{ "use-loader", no_argument, NULL, 'L' },
+ { "sign", no_argument, NULL, 'S' },
{ "base-btf", required_argument, NULL, 'B' },
{ 0 }
};
@@ -473,7 +478,7 @@ int main(int argc, char **argv)
bin_name = "bpftool";
opterr = 0;
- while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l",
+ while ((opt = getopt_long(argc, argv, "VhpjfLmndSi:k:B:l",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
@@ -519,6 +524,16 @@ int main(int argc, char **argv)
case 'L':
use_loader = true;
break;
+ case 'S':
+ sign_progs = true;
+ use_loader = true;
+ break;
+ case 'k':
+ private_key_path = optarg;
+ break;
+ case 'i':
+ cert_path = optarg;
+ break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
@@ -533,10 +548,20 @@ int main(int argc, char **argv)
if (argc < 0)
usage();
- if (version_requested)
- return do_version(argc, argv);
+ if (sign_progs && (private_key_path == NULL || cert_path == NULL)) {
+ p_err("-i <identity_x509_cert> and -k <private_key> must be supplied with -S for signing");
+ return -EINVAL;
+ }
+
+ if (!sign_progs && (private_key_path != NULL || cert_path != NULL)) {
+ p_err("--sign (or -S) must be explicitly passed with -i <identity_x509_cert> and -k <private_key> to sign the programs");
+ return -EINVAL;
+ }
- ret = cmd_select(commands, argc, argv, do_help);
+ if (version_requested)
+ ret = do_version(argc, argv);
+ else
+ ret = cmd_select(commands, argc, argv, do_help);
if (json_output)
jsonw_destroy(&json_wtr);
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 9eb764fe4cc8..1130299cede0 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -6,15 +6,21 @@
/* BFD and kernel.h both define GCC_VERSION, differently */
#undef GCC_VERSION
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
#include <stdbool.h>
#include <stdio.h>
+#include <errno.h>
#include <stdlib.h>
+#include <bpf/skel_internal.h>
#include <linux/bpf.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
#include "json_writer.h"
@@ -51,6 +57,7 @@ static inline void *u64_to_ptr(__u64 ptr)
})
#define ERR_MAX_LEN 1024
+#define MAX_SIG_SIZE 4096
#define BPF_TAG_FMT "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
@@ -84,6 +91,9 @@ extern bool relaxed_maps;
extern bool use_loader;
extern struct btf *base_btf;
extern struct hashmap *refs_table;
+extern bool sign_progs;
+extern const char *private_key_path;
+extern const char *cert_path;
void __printf(1, 2) p_err(const char *fmt, ...);
void __printf(1, 2) p_info(const char *fmt, ...);
@@ -140,8 +150,10 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
int get_fd_type(int fd);
const char *get_fd_type_name(enum bpf_obj_type type);
char *get_fdinfo(int fd, const char *key);
-int open_obj_pinned(const char *path, bool quiet);
-int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
+int open_obj_pinned(const char *path, bool quiet,
+ const struct bpf_obj_get_opts *opts);
+int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type,
+ const struct bpf_obj_get_opts *opts);
int mount_bpffs_for_file(const char *file_name);
int create_and_mount_bpffs_dir(const char *dir_name);
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
@@ -163,14 +175,15 @@ int do_tracelog(int argc, char **arg) __weak;
int do_feature(int argc, char **argv) __weak;
int do_struct_ops(int argc, char **argv) __weak;
int do_iter(int argc, char **argv) __weak;
+int do_token(int argc, char **argv) __weak;
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
int prog_parse_fds(int *argc, char ***argv, int **fds);
-int map_parse_fd(int *argc, char ***argv);
-int map_parse_fds(int *argc, char ***argv, int **fds);
+int map_parse_fd(int *argc, char ***argv, __u32 open_flags);
+int map_parse_fds(int *argc, char ***argv, int **fds, __u32 open_flags);
int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
- __u32 *info_len);
+ __u32 *info_len, __u32 open_flags);
struct bpf_prog_linfo;
#if defined(HAVE_LLVM_SUPPORT) || defined(HAVE_LIBBFD_SUPPORT)
@@ -271,4 +284,15 @@ int pathname_concat(char *buf, int buf_sz, const char *path,
/* print netfilter bpf_link info */
void netfilter_dump_plain(const struct bpf_link_info *info);
void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr);
+
+struct kernel_config_option {
+ const char *name;
+ bool macro_dump;
+};
+
+int read_kernel_config(const struct kernel_config_option *requested_options,
+ size_t num_options, char **out_values,
+ const char *define_prefix);
+int bpftool_prog_sign(struct bpf_load_and_run_opts *opts);
+__u32 register_session_key(const char *key_der_path);
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 81cc668b4b05..7ebf7dbcfba4 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -337,9 +337,9 @@ static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
memcpy(value + i * step, value, info->value_size);
}
-static int parse_elem(char **argv, struct bpf_map_info *info,
- void *key, void *value, __u32 key_size, __u32 value_size,
- __u32 *flags, __u32 **value_fd)
+static int parse_elem(char **argv, struct bpf_map_info *info, void *key,
+ void *value, __u32 key_size, __u32 value_size,
+ __u32 *flags, __u32 **value_fd, __u32 open_flags)
{
if (!*argv) {
if (!key && !value)
@@ -362,7 +362,7 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
return -1;
return parse_elem(argv, info, NULL, value, key_size, value_size,
- flags, value_fd);
+ flags, value_fd, open_flags);
} else if (is_prefix(*argv, "value")) {
int fd;
@@ -388,7 +388,7 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
return -1;
}
- fd = map_parse_fd(&argc, &argv);
+ fd = map_parse_fd(&argc, &argv, open_flags);
if (fd < 0)
return -1;
@@ -424,7 +424,7 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
}
return parse_elem(argv, info, key, NULL, key_size, value_size,
- flags, NULL);
+ flags, NULL, open_flags);
} else if (is_prefix(*argv, "any") || is_prefix(*argv, "noexist") ||
is_prefix(*argv, "exist")) {
if (!flags) {
@@ -440,7 +440,7 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
*flags = BPF_EXIST;
return parse_elem(argv + 1, info, key, value, key_size,
- value_size, NULL, value_fd);
+ value_size, NULL, value_fd, open_flags);
}
p_err("expected key or value, got: %s", *argv);
@@ -639,7 +639,7 @@ static int do_show_subset(int argc, char **argv)
p_err("mem alloc failed");
return -1;
}
- nb_fds = map_parse_fds(&argc, &argv, &fds);
+ nb_fds = map_parse_fds(&argc, &argv, &fds, BPF_F_RDONLY);
if (nb_fds < 1)
goto exit_free;
@@ -672,12 +672,15 @@ exit_free:
static int do_show(int argc, char **argv)
{
+ LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts);
struct bpf_map_info info = {};
__u32 len = sizeof(info);
__u32 id = 0;
int err;
int fd;
+ opts.open_flags = BPF_F_RDONLY;
+
if (show_pinned) {
map_table = hashmap__new(hash_fn_for_key_as_id,
equal_fn_for_key_as_id, NULL);
@@ -707,7 +710,7 @@ static int do_show(int argc, char **argv)
break;
}
- fd = bpf_map_get_fd_by_id(id);
+ fd = bpf_map_get_fd_by_id_opts(id, &opts);
if (fd < 0) {
if (errno == ENOENT)
continue;
@@ -909,7 +912,7 @@ static int do_dump(int argc, char **argv)
p_err("mem alloc failed");
return -1;
}
- nb_fds = map_parse_fds(&argc, &argv, &fds);
+ nb_fds = map_parse_fds(&argc, &argv, &fds, BPF_F_RDONLY);
if (nb_fds < 1)
goto exit_free;
@@ -997,7 +1000,7 @@ static int do_update(int argc, char **argv)
if (argc < 2)
usage();
- fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len, 0);
if (fd < 0)
return -1;
@@ -1006,7 +1009,7 @@ static int do_update(int argc, char **argv)
goto exit_free;
err = parse_elem(argv, &info, key, value, info.key_size,
- info.value_size, &flags, &value_fd);
+ info.value_size, &flags, &value_fd, 0);
if (err)
goto exit_free;
@@ -1076,7 +1079,7 @@ static int do_lookup(int argc, char **argv)
if (argc < 2)
usage();
- fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len, BPF_F_RDONLY);
if (fd < 0)
return -1;
@@ -1084,7 +1087,8 @@ static int do_lookup(int argc, char **argv)
if (err)
goto exit_free;
- err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
+ err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL,
+ BPF_F_RDONLY);
if (err)
goto exit_free;
@@ -1127,7 +1131,7 @@ static int do_getnext(int argc, char **argv)
if (argc < 2)
usage();
- fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len, BPF_F_RDONLY);
if (fd < 0)
return -1;
@@ -1140,8 +1144,8 @@ static int do_getnext(int argc, char **argv)
}
if (argc) {
- err = parse_elem(argv, &info, key, NULL, info.key_size, 0,
- NULL, NULL);
+ err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL,
+ NULL, BPF_F_RDONLY);
if (err)
goto exit_free;
} else {
@@ -1198,7 +1202,7 @@ static int do_delete(int argc, char **argv)
if (argc < 2)
usage();
- fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len, 0);
if (fd < 0)
return -1;
@@ -1209,7 +1213,8 @@ static int do_delete(int argc, char **argv)
goto exit_free;
}
- err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
+ err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL,
+ 0);
if (err)
goto exit_free;
@@ -1226,11 +1231,16 @@ exit_free:
return err;
}
+static int map_parse_read_only_fd(int *argc, char ***argv)
+{
+ return map_parse_fd(argc, argv, BPF_F_RDONLY);
+}
+
static int do_pin(int argc, char **argv)
{
int err;
- err = do_pin_any(argc, argv, map_parse_fd);
+ err = do_pin_any(argc, argv, map_parse_read_only_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
@@ -1319,7 +1329,7 @@ offload_dev:
if (!REQ_ARGS(2))
usage();
inner_map_fd = map_parse_fd_and_info(&argc, &argv,
- &info, &len);
+ &info, &len, BPF_F_RDONLY);
if (inner_map_fd < 0)
return -1;
attr.inner_map_fd = inner_map_fd;
@@ -1368,7 +1378,7 @@ static int do_pop_dequeue(int argc, char **argv)
if (argc < 2)
usage();
- fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len, 0);
if (fd < 0)
return -1;
@@ -1407,7 +1417,7 @@ static int do_freeze(int argc, char **argv)
if (!REQ_ARGS(2))
return -1;
- fd = map_parse_fd(&argc, &argv);
+ fd = map_parse_fd(&argc, &argv, 0);
if (fd < 0)
return -1;
@@ -1467,7 +1477,8 @@ static int do_help(int argc, char **argv)
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
- " task_storage | bloom_filter | user_ringbuf | cgrp_storage | arena }\n"
+ " task_storage | bloom_filter | user_ringbuf | cgrp_storage | arena |\n"
+ " insn_array }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-n|--nomount} }\n"
"",
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index 552b4ca40c27..bcb767e2d673 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -128,7 +128,8 @@ int do_event_pipe(int argc, char **argv)
int err, map_fd;
map_info_len = sizeof(map_info);
- map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
+ map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len,
+ 0);
if (map_fd < 0)
return -1;
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index 64f958f437b0..cfc6f944f7c3 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -366,17 +366,18 @@ static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_netdev_t *netinfo = cookie;
struct ifinfomsg *ifinfo = msg;
+ struct ip_devname_ifindex *tmp;
if (netinfo->filter_idx > 0 && netinfo->filter_idx != ifinfo->ifi_index)
return 0;
if (netinfo->used_len == netinfo->array_len) {
- netinfo->devices = realloc(netinfo->devices,
- (netinfo->array_len + 16) *
- sizeof(struct ip_devname_ifindex));
- if (!netinfo->devices)
+ tmp = realloc(netinfo->devices,
+ (netinfo->array_len + 16) * sizeof(struct ip_devname_ifindex));
+ if (!tmp)
return -ENOMEM;
+ netinfo->devices = tmp;
netinfo->array_len += 16;
}
netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index;
@@ -395,6 +396,7 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_tcinfo_t *tcinfo = cookie;
struct tcmsg *info = msg;
+ struct tc_kind_handle *tmp;
if (tcinfo->is_qdisc) {
/* skip clsact qdisc */
@@ -406,11 +408,12 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
}
if (tcinfo->used_len == tcinfo->array_len) {
- tcinfo->handle_array = realloc(tcinfo->handle_array,
+ tmp = realloc(tcinfo->handle_array,
(tcinfo->array_len + 16) * sizeof(struct tc_kind_handle));
- if (!tcinfo->handle_array)
+ if (!tmp)
return -ENOMEM;
+ tcinfo->handle_array = tmp;
tcinfo->array_len += 16;
}
tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 96eea8a67225..6daf19809ca4 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -23,6 +23,7 @@
#include <linux/err.h>
#include <linux/perf_event.h>
#include <linux/sizes.h>
+#include <linux/keyctl.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
@@ -714,7 +715,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (mode == DUMP_JITED) {
if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
- p_info("no instructions returned");
+ p_err("error retrieving jit dump: no instructions returned or kernel.kptr_restrict set?");
return -1;
}
buf = u64_to_ptr(info->jited_prog_insns);
@@ -1062,7 +1063,7 @@ static int parse_attach_detach_args(int argc, char **argv, int *progfd,
if (!REQ_ARGS(2))
return -EINVAL;
- *mapfd = map_parse_fd(&argc, &argv);
+ *mapfd = map_parse_fd(&argc, &argv, 0);
if (*mapfd < 0)
return *mapfd;
@@ -1113,6 +1114,52 @@ static int do_detach(int argc, char **argv)
return 0;
}
+enum prog_tracelog_mode {
+ TRACE_STDOUT,
+ TRACE_STDERR,
+};
+
+static int
+prog_tracelog_stream(int prog_fd, enum prog_tracelog_mode mode)
+{
+ FILE *file = mode == TRACE_STDOUT ? stdout : stderr;
+ int stream_id = mode == TRACE_STDOUT ? 1 : 2;
+ char buf[512];
+ int ret;
+
+ ret = 0;
+ do {
+ ret = bpf_prog_stream_read(prog_fd, stream_id, buf, sizeof(buf), NULL);
+ if (ret > 0)
+ fwrite(buf, sizeof(buf[0]), ret, file);
+ } while (ret > 0);
+
+ fflush(file);
+ return ret ? -1 : 0;
+}
+
+static int do_tracelog_any(int argc, char **argv)
+{
+ enum prog_tracelog_mode mode;
+ int fd;
+
+ if (argc == 0)
+ return do_tracelog(argc, argv);
+ if (!is_prefix(*argv, "stdout") && !is_prefix(*argv, "stderr"))
+ usage();
+ mode = is_prefix(*argv, "stdout") ? TRACE_STDOUT : TRACE_STDERR;
+ NEXT_ARG();
+
+ if (!REQ_ARGS(2))
+ return -1;
+
+ fd = prog_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return -1;
+
+ return prog_tracelog_stream(fd, mode);
+}
+
static int check_single_stdin(char *file_data_in, char *file_ctx_in)
{
if (file_data_in && file_ctx_in &&
@@ -1608,7 +1655,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
}
NEXT_ARG();
- fd = map_parse_fd(&argc, &argv);
+ fd = map_parse_fd(&argc, &argv, 0);
if (fd < 0)
goto err_free_reuse_maps;
@@ -1884,6 +1931,8 @@ static int try_loader(struct gen_loader_opts *gen)
{
struct bpf_load_and_run_opts opts = {};
struct bpf_loader_ctx *ctx;
+ char sig_buf[MAX_SIG_SIZE];
+ __u8 prog_sha[SHA256_DIGEST_LENGTH];
int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
sizeof(struct bpf_prog_desc));
int log_buf_sz = (1u << 24) - 1;
@@ -1907,6 +1956,26 @@ static int try_loader(struct gen_loader_opts *gen)
opts.insns = gen->insns;
opts.insns_sz = gen->insns_sz;
fds_before = count_open_fds();
+
+ if (sign_progs) {
+ opts.excl_prog_hash = prog_sha;
+ opts.excl_prog_hash_sz = sizeof(prog_sha);
+ opts.signature = sig_buf;
+ opts.signature_sz = MAX_SIG_SIZE;
+ opts.keyring_id = KEY_SPEC_SESSION_KEYRING;
+
+ err = bpftool_prog_sign(&opts);
+ if (err < 0) {
+ p_err("failed to sign program");
+ goto out;
+ }
+
+ err = register_session_key(cert_path);
+ if (err < 0) {
+ p_err("failed to add session key");
+ goto out;
+ }
+ }
err = bpf_load_and_run(&opts);
fd_delta = count_open_fds() - fds_before;
if (err < 0 || verifier_logs) {
@@ -1915,6 +1984,7 @@ static int try_loader(struct gen_loader_opts *gen)
fprintf(stderr, "loader prog leaked %d FDs\n",
fd_delta);
}
+out:
free(log_buf);
return err;
}
@@ -1942,6 +2012,9 @@ static int do_loader(int argc, char **argv)
goto err_close_obj;
}
+ if (sign_progs)
+ gen.gen_hash = true;
+
err = bpf_object__gen_loader(obj, &gen);
if (err)
goto err_close_obj;
@@ -2216,7 +2289,7 @@ static void profile_print_readings(void)
static char *profile_target_name(int tgt_fd)
{
- struct bpf_func_info func_info;
+ struct bpf_func_info func_info = {};
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
const struct btf_type *t;
@@ -2493,6 +2566,7 @@ static int do_help(int argc, char **argv)
" [repeat N]\n"
" %1$s %2$s profile PROG [duration DURATION] METRICs\n"
" %1$s %2$s tracelog\n"
+ " %1$s %2$s tracelog { stdout | stderr } PROG\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
@@ -2515,7 +2589,7 @@ static int do_help(int argc, char **argv)
" METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
- " {-L|--use-loader} }\n"
+ " {-L|--use-loader} | [ {-S|--sign } {-k} <private_key.pem> {-i} <certificate.x509> ] \n"
"",
bin_name, argv[-2]);
@@ -2532,7 +2606,7 @@ static const struct cmd cmds[] = {
{ "loadall", do_loadall },
{ "attach", do_attach },
{ "detach", do_detach },
- { "tracelog", do_tracelog },
+ { "tracelog", do_tracelog_any },
{ "run", do_run },
{ "profile", do_profile },
{ 0 }
diff --git a/tools/bpf/bpftool/sign.c b/tools/bpf/bpftool/sign.c
new file mode 100644
index 000000000000..f9b742f4bb10
--- /dev/null
+++ b/tools/bpf/bpftool/sign.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (C) 2025 Google LLC.
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <getopt.h>
+#include <err.h>
+#include <openssl/opensslv.h>
+#include <openssl/bio.h>
+#include <openssl/evp.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/cms.h>
+#include <linux/keyctl.h>
+#include <errno.h>
+
+#include <bpf/skel_internal.h>
+
+#include "main.h"
+
+#define OPEN_SSL_ERR_BUF_LEN 256
+
+/* Use deprecated in 3.0 ERR_get_error_line_data for openssl < 3 */
+#if !defined(OPENSSL_VERSION_MAJOR) || (OPENSSL_VERSION_MAJOR < 3)
+#define ERR_get_error_all(file, line, func, data, flags) \
+ ERR_get_error_line_data(file, line, data, flags)
+#endif
+
+static void display_openssl_errors(int l)
+{
+ char buf[OPEN_SSL_ERR_BUF_LEN];
+ const char *file;
+ const char *data;
+ unsigned long e;
+ int flags;
+ int line;
+
+ while ((e = ERR_get_error_all(&file, &line, NULL, &data, &flags))) {
+ ERR_error_string_n(e, buf, sizeof(buf));
+ if (data && (flags & ERR_TXT_STRING)) {
+ p_err("OpenSSL %s: %s:%d: %s", buf, file, line, data);
+ } else {
+ p_err("OpenSSL %s: %s:%d", buf, file, line);
+ }
+ }
+}
+
+#define DISPLAY_OSSL_ERR(cond) \
+ do { \
+ bool __cond = (cond); \
+ if (__cond && ERR_peek_error()) \
+ display_openssl_errors(__LINE__);\
+ } while (0)
+
+static EVP_PKEY *read_private_key(const char *pkey_path)
+{
+ EVP_PKEY *private_key = NULL;
+ BIO *b;
+
+ b = BIO_new_file(pkey_path, "rb");
+ private_key = PEM_read_bio_PrivateKey(b, NULL, NULL, NULL);
+ BIO_free(b);
+ DISPLAY_OSSL_ERR(!private_key);
+ return private_key;
+}
+
+static X509 *read_x509(const char *x509_name)
+{
+ unsigned char buf[2];
+ X509 *x509 = NULL;
+ BIO *b;
+ int n;
+
+ b = BIO_new_file(x509_name, "rb");
+ if (!b)
+ goto cleanup;
+
+ /* Look at the first two bytes of the file to determine the encoding */
+ n = BIO_read(b, buf, 2);
+ if (n != 2)
+ goto cleanup;
+
+ if (BIO_reset(b) != 0)
+ goto cleanup;
+
+ if (buf[0] == 0x30 && buf[1] >= 0x81 && buf[1] <= 0x84)
+ /* Assume raw DER encoded X.509 */
+ x509 = d2i_X509_bio(b, NULL);
+ else
+ /* Assume PEM encoded X.509 */
+ x509 = PEM_read_bio_X509(b, NULL, NULL, NULL);
+
+cleanup:
+ BIO_free(b);
+ DISPLAY_OSSL_ERR(!x509);
+ return x509;
+}
+
+__u32 register_session_key(const char *key_der_path)
+{
+ unsigned char *der_buf = NULL;
+ X509 *x509 = NULL;
+ int key_id = -1;
+ int der_len;
+
+ if (!key_der_path)
+ return key_id;
+ x509 = read_x509(key_der_path);
+ if (!x509)
+ goto cleanup;
+ der_len = i2d_X509(x509, &der_buf);
+ if (der_len < 0)
+ goto cleanup;
+ key_id = syscall(__NR_add_key, "asymmetric", key_der_path, der_buf,
+ (size_t)der_len, KEY_SPEC_SESSION_KEYRING);
+cleanup:
+ X509_free(x509);
+ OPENSSL_free(der_buf);
+ DISPLAY_OSSL_ERR(key_id == -1);
+ return key_id;
+}
+
+int bpftool_prog_sign(struct bpf_load_and_run_opts *opts)
+{
+ BIO *bd_in = NULL, *bd_out = NULL;
+ EVP_PKEY *private_key = NULL;
+ CMS_ContentInfo *cms = NULL;
+ long actual_sig_len = 0;
+ X509 *x509 = NULL;
+ int err = 0;
+
+ bd_in = BIO_new_mem_buf(opts->insns, opts->insns_sz);
+ if (!bd_in) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ private_key = read_private_key(private_key_path);
+ if (!private_key) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ x509 = read_x509(cert_path);
+ if (!x509) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ cms = CMS_sign(NULL, NULL, NULL, NULL,
+ CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED |
+ CMS_STREAM);
+ if (!cms) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ if (!CMS_add1_signer(cms, x509, private_key, EVP_sha256(),
+ CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP |
+ CMS_USE_KEYID | CMS_NOATTR)) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ if (CMS_final(cms, bd_in, NULL, CMS_NOCERTS | CMS_BINARY) != 1) {
+ err = -EIO;
+ goto cleanup;
+ }
+
+ EVP_Digest(opts->insns, opts->insns_sz, opts->excl_prog_hash,
+ &opts->excl_prog_hash_sz, EVP_sha256(), NULL);
+
+ bd_out = BIO_new(BIO_s_mem());
+ if (!bd_out) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ if (!i2d_CMS_bio_stream(bd_out, cms, NULL, 0)) {
+ err = -EIO;
+ goto cleanup;
+ }
+
+ actual_sig_len = BIO_get_mem_data(bd_out, NULL);
+ if (actual_sig_len <= 0) {
+ err = -EIO;
+ goto cleanup;
+ }
+
+ if ((size_t)actual_sig_len > opts->signature_sz) {
+ err = -ENOSPC;
+ goto cleanup;
+ }
+
+ if (BIO_read(bd_out, opts->signature, actual_sig_len) != actual_sig_len) {
+ err = -EIO;
+ goto cleanup;
+ }
+
+ opts->signature_sz = actual_sig_len;
+cleanup:
+ BIO_free(bd_out);
+ CMS_ContentInfo_free(cms);
+ X509_free(x509);
+ EVP_PKEY_free(private_key);
+ BIO_free(bd_in);
+ DISPLAY_OSSL_ERR(err < 0);
+ return err;
+}
diff --git a/tools/bpf/bpftool/token.c b/tools/bpf/bpftool/token.c
new file mode 100644
index 000000000000..c08f34b9d51b
--- /dev/null
+++ b/tools/bpf/bpftool/token.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2025 Didi Technology Co., Tao Chen */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <mntent.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+#define MOUNTS_FILE "/proc/mounts"
+
+static struct {
+ const char *header;
+ const char *key;
+} sets[] = {
+ {"allowed_cmds", "delegate_cmds"},
+ {"allowed_maps", "delegate_maps"},
+ {"allowed_progs", "delegate_progs"},
+ {"allowed_attachs", "delegate_attachs"},
+};
+
+static bool has_delegate_options(const char *mnt_ops)
+{
+ return strstr(mnt_ops, "delegate_cmds") ||
+ strstr(mnt_ops, "delegate_maps") ||
+ strstr(mnt_ops, "delegate_progs") ||
+ strstr(mnt_ops, "delegate_attachs");
+}
+
+static char *get_delegate_value(char *opts, const char *key)
+{
+ char *token, *rest, *ret = NULL;
+
+ if (!opts)
+ return NULL;
+
+ for (token = strtok_r(opts, ",", &rest); token;
+ token = strtok_r(NULL, ",", &rest)) {
+ if (strncmp(token, key, strlen(key)) == 0 &&
+ token[strlen(key)] == '=') {
+ ret = token + strlen(key) + 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void print_items_per_line(char *input, int items_per_line)
+{
+ char *str, *rest;
+ int cnt = 0;
+
+ if (!input)
+ return;
+
+ for (str = strtok_r(input, ":", &rest); str;
+ str = strtok_r(NULL, ":", &rest)) {
+ if (cnt % items_per_line == 0)
+ printf("\n\t ");
+
+ printf("%-20s", str);
+ cnt++;
+ }
+}
+
+#define ITEMS_PER_LINE 4
+static void show_token_info_plain(struct mntent *mntent)
+{
+ size_t i;
+
+ printf("token_info %s", mntent->mnt_dir);
+
+ for (i = 0; i < ARRAY_SIZE(sets); i++) {
+ char *opts, *value;
+
+ printf("\n\t%s:", sets[i].header);
+ opts = strdup(mntent->mnt_opts);
+ value = get_delegate_value(opts, sets[i].key);
+ print_items_per_line(value, ITEMS_PER_LINE);
+ free(opts);
+ }
+
+ printf("\n");
+}
+
+static void split_json_array_str(char *input)
+{
+ char *str, *rest;
+
+ if (!input) {
+ jsonw_start_array(json_wtr);
+ jsonw_end_array(json_wtr);
+ return;
+ }
+
+ jsonw_start_array(json_wtr);
+ for (str = strtok_r(input, ":", &rest); str;
+ str = strtok_r(NULL, ":", &rest)) {
+ jsonw_string(json_wtr, str);
+ }
+ jsonw_end_array(json_wtr);
+}
+
+static void show_token_info_json(struct mntent *mntent)
+{
+ size_t i;
+
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "token_info", mntent->mnt_dir);
+
+ for (i = 0; i < ARRAY_SIZE(sets); i++) {
+ char *opts, *value;
+
+ jsonw_name(json_wtr, sets[i].header);
+ opts = strdup(mntent->mnt_opts);
+ value = get_delegate_value(opts, sets[i].key);
+ split_json_array_str(value);
+ free(opts);
+ }
+
+ jsonw_end_object(json_wtr);
+}
+
+static int __show_token_info(struct mntent *mntent)
+{
+ if (json_output)
+ show_token_info_json(mntent);
+ else
+ show_token_info_plain(mntent);
+
+ return 0;
+}
+
+static int show_token_info(void)
+{
+ FILE *fp;
+ struct mntent *ent;
+
+ fp = setmntent(MOUNTS_FILE, "r");
+ if (!fp) {
+ p_err("Failed to open: %s", MOUNTS_FILE);
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+
+ while ((ent = getmntent(fp)) != NULL) {
+ if (strncmp(ent->mnt_type, "bpf", 3) == 0) {
+ if (has_delegate_options(ent->mnt_opts))
+ __show_token_info(ent);
+ }
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ endmntent(fp);
+
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ if (argc)
+ return BAD_ARG();
+
+ return show_token_info();
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list }\n"
+ " %1$s %2$s help\n"
+ " " HELP_SPEC_OPTIONS " }\n"
+ "\n"
+ "",
+ bin_name, argv[-2]);
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_token(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c
index 31d806e3bdaa..573a8d99f009 100644
--- a/tools/bpf/bpftool/tracelog.c
+++ b/tools/bpf/bpftool/tracelog.c
@@ -57,10 +57,8 @@ find_tracefs_mnt_single(unsigned long magic, char *mnt, const char *mntpt)
static bool get_tracefs_pipe(char *mnt)
{
static const char * const known_mnts[] = {
- "/sys/kernel/debug/tracing",
"/sys/kernel/tracing",
- "/tracing",
- "/trace",
+ "/sys/kernel/debug/tracing",
};
const char *pipe_name = "/trace_pipe";
const char *fstype = "tracefs";
@@ -95,12 +93,7 @@ static bool get_tracefs_pipe(char *mnt)
return false;
p_info("could not find tracefs, attempting to mount it now");
- /* Most of the time, tracefs is automatically mounted by debugfs at
- * /sys/kernel/debug/tracing when we try to access it. If we could not
- * find it, it is likely that debugfs is not mounted. Let's give one
- * attempt at mounting just tracefs at /sys/kernel/tracing.
- */
- strcpy(mnt, known_mnts[1]);
+ strcpy(mnt, known_mnts[0]);
if (mount_tracefs(mnt))
return false;
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index afbddea3a39c..ce1b556dfa90 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -17,7 +17,7 @@ endif
# Overrides for the prepare step libraries.
HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \
- CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
+ CROSS_COMPILE="" CLANG_CROSS_FLAGS="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
RM ?= rm
HOSTCC ?= gcc
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
deleted file mode 100644
index 78a436c4072e..000000000000
--- a/tools/bpf/runqslower/Makefile
+++ /dev/null
@@ -1,91 +0,0 @@
-# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-include ../../scripts/Makefile.include
-
-OUTPUT ?= $(abspath .output)/
-
-BPFTOOL_OUTPUT := $(OUTPUT)bpftool/
-DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)bootstrap/bpftool
-BPFTOOL ?= $(DEFAULT_BPFTOOL)
-BPF_TARGET_ENDIAN ?= --target=bpf
-LIBBPF_SRC := $(abspath ../../lib/bpf)
-BPFOBJ_OUTPUT := $(OUTPUT)libbpf/
-BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a
-BPF_DESTDIR := $(BPFOBJ_OUTPUT)
-BPF_INCLUDE := $(BPF_DESTDIR)/include
-INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
-CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
-CFLAGS += $(EXTRA_CFLAGS)
-LDFLAGS += $(EXTRA_LDFLAGS)
-LDLIBS += -lelf -lz
-
-# Try to detect best kernel BTF source
-KERNEL_REL := $(shell uname -r)
-VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
- $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
- ../../../vmlinux /sys/kernel/btf/vmlinux \
- /boot/vmlinux-$(KERNEL_REL)
-VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \
- $(wildcard $(VMLINUX_BTF_PATHS))))
-
-ifneq ($(V),1)
-MAKEFLAGS += --no-print-directory
-submake_extras := feature_display=0
-endif
-
-.DELETE_ON_ERROR:
-
-.PHONY: all clean runqslower libbpf_hdrs
-all: runqslower
-
-runqslower: $(OUTPUT)/runqslower
-
-clean:
- $(call QUIET_CLEAN, runqslower)
- $(Q)$(RM) -r $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT)
- $(Q)$(RM) $(OUTPUT)*.o $(OUTPUT)*.d
- $(Q)$(RM) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
- $(Q)$(RM) $(OUTPUT)runqslower
- $(Q)$(RM) -r .output
-
-libbpf_hdrs: $(BPFOBJ)
-
-$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
-
-$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
- $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs
-
-$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h | libbpf_hdrs
-
-$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL)
- $(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@
-
-$(OUTPUT)/%.bpf.o: %.bpf.c $(BPFOBJ) | $(OUTPUT)
- $(QUIET_GEN)$(CLANG) -g -O2 $(BPF_TARGET_ENDIAN) $(INCLUDES) \
- -c $(filter %.c,$^) -o $@ && \
- $(LLVM_STRIP) -g $@
-
-$(OUTPUT)/%.o: %.c | $(OUTPUT)
- $(QUIET_CC)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@
-
-$(OUTPUT) $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT):
- $(QUIET_MKDIR)mkdir -p $@
-
-$(OUTPUT)/vmlinux.h: $(VMLINUX_BTF_PATH) | $(OUTPUT) $(BPFTOOL)
-ifeq ($(VMLINUX_H),)
- $(Q)if [ ! -e "$(VMLINUX_BTF_PATH)" ] ; then \
- echo "Couldn't find kernel BTF; set VMLINUX_BTF to" \
- "specify its location." >&2; \
- exit 1;\
- fi
- $(QUIET_GEN)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
-else
- $(Q)cp "$(VMLINUX_H)" $@
-endif
-
-$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT)
- $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) \
- DESTDIR=$(BPFOBJ_OUTPUT) prefix= $(abspath $@) install_headers
-
-$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
- $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) bootstrap
diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c
deleted file mode 100644
index fced54a3adf6..000000000000
--- a/tools/bpf/runqslower/runqslower.bpf.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2019 Facebook
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include "runqslower.h"
-
-#define TASK_RUNNING 0
-#define BPF_F_CURRENT_CPU 0xffffffffULL
-
-const volatile __u64 min_us = 0;
-const volatile pid_t targ_pid = 0;
-
-struct {
- __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
- __uint(map_flags, BPF_F_NO_PREALLOC);
- __type(key, int);
- __type(value, u64);
-} start SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(u32));
- __uint(value_size, sizeof(u32));
-} events SEC(".maps");
-
-/* record enqueue timestamp */
-__always_inline
-static int trace_enqueue(struct task_struct *t)
-{
- u32 pid = t->pid;
- u64 *ptr;
-
- if (!pid || (targ_pid && targ_pid != pid))
- return 0;
-
- ptr = bpf_task_storage_get(&start, t, 0,
- BPF_LOCAL_STORAGE_GET_F_CREATE);
- if (!ptr)
- return 0;
-
- *ptr = bpf_ktime_get_ns();
- return 0;
-}
-
-SEC("tp_btf/sched_wakeup")
-int handle__sched_wakeup(u64 *ctx)
-{
- /* TP_PROTO(struct task_struct *p) */
- struct task_struct *p = (void *)ctx[0];
-
- return trace_enqueue(p);
-}
-
-SEC("tp_btf/sched_wakeup_new")
-int handle__sched_wakeup_new(u64 *ctx)
-{
- /* TP_PROTO(struct task_struct *p) */
- struct task_struct *p = (void *)ctx[0];
-
- return trace_enqueue(p);
-}
-
-SEC("tp_btf/sched_switch")
-int handle__sched_switch(u64 *ctx)
-{
- /* TP_PROTO(bool preempt, struct task_struct *prev,
- * struct task_struct *next)
- */
- struct task_struct *prev = (struct task_struct *)ctx[1];
- struct task_struct *next = (struct task_struct *)ctx[2];
- struct runq_event event = {};
- u64 *tsp, delta_us;
- u32 pid;
-
- /* ivcsw: treat like an enqueue event and store timestamp */
- if (prev->__state == TASK_RUNNING)
- trace_enqueue(prev);
-
- pid = next->pid;
-
- /* For pid mismatch, save a bpf_task_storage_get */
- if (!pid || (targ_pid && targ_pid != pid))
- return 0;
-
- /* fetch timestamp and calculate delta */
- tsp = bpf_task_storage_get(&start, next, 0, 0);
- if (!tsp)
- return 0; /* missed enqueue */
-
- delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
- if (min_us && delta_us <= min_us)
- return 0;
-
- event.pid = pid;
- event.delta_us = delta_us;
- bpf_get_current_comm(&event.task, sizeof(event.task));
-
- /* output */
- bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
- &event, sizeof(event));
-
- bpf_task_storage_delete(&start, next);
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c
deleted file mode 100644
index 83c5993a139a..000000000000
--- a/tools/bpf/runqslower/runqslower.c
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-// Copyright (c) 2019 Facebook
-#include <argp.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-#include <bpf/libbpf.h>
-#include <bpf/bpf.h>
-#include "runqslower.h"
-#include "runqslower.skel.h"
-
-struct env {
- pid_t pid;
- __u64 min_us;
- bool verbose;
-} env = {
- .min_us = 10000,
-};
-
-const char *argp_program_version = "runqslower 0.1";
-const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
-const char argp_program_doc[] =
-"runqslower Trace long process scheduling delays.\n"
-" For Linux, uses eBPF, BPF CO-RE, libbpf, BTF.\n"
-"\n"
-"This script traces high scheduling delays between tasks being\n"
-"ready to run and them running on CPU after that.\n"
-"\n"
-"USAGE: runqslower [-p PID] [min_us]\n"
-"\n"
-"EXAMPLES:\n"
-" runqslower # trace run queue latency higher than 10000 us (default)\n"
-" runqslower 1000 # trace run queue latency higher than 1000 us\n"
-" runqslower -p 123 # trace pid 123 only\n";
-
-static const struct argp_option opts[] = {
- { "pid", 'p', "PID", 0, "Process PID to trace"},
- { "verbose", 'v', NULL, 0, "Verbose debug output" },
- {},
-};
-
-static error_t parse_arg(int key, char *arg, struct argp_state *state)
-{
- static int pos_args;
- int pid;
- long long min_us;
-
- switch (key) {
- case 'v':
- env.verbose = true;
- break;
- case 'p':
- errno = 0;
- pid = strtol(arg, NULL, 10);
- if (errno || pid <= 0) {
- fprintf(stderr, "Invalid PID: %s\n", arg);
- argp_usage(state);
- }
- env.pid = pid;
- break;
- case ARGP_KEY_ARG:
- if (pos_args++) {
- fprintf(stderr,
- "Unrecognized positional argument: %s\n", arg);
- argp_usage(state);
- }
- errno = 0;
- min_us = strtoll(arg, NULL, 10);
- if (errno || min_us <= 0) {
- fprintf(stderr, "Invalid delay (in us): %s\n", arg);
- argp_usage(state);
- }
- env.min_us = min_us;
- break;
- default:
- return ARGP_ERR_UNKNOWN;
- }
- return 0;
-}
-
-int libbpf_print_fn(enum libbpf_print_level level,
- const char *format, va_list args)
-{
- if (level == LIBBPF_DEBUG && !env.verbose)
- return 0;
- return vfprintf(stderr, format, args);
-}
-
-void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
-{
- const struct runq_event *e = data;
- struct tm *tm;
- char ts[32];
- time_t t;
-
- time(&t);
- tm = localtime(&t);
- strftime(ts, sizeof(ts), "%H:%M:%S", tm);
- printf("%-8s %-16s %-6d %14llu\n", ts, e->task, e->pid, e->delta_us);
-}
-
-void handle_lost_events(void *ctx, int cpu, __u64 lost_cnt)
-{
- printf("Lost %llu events on CPU #%d!\n", lost_cnt, cpu);
-}
-
-int main(int argc, char **argv)
-{
- static const struct argp argp = {
- .options = opts,
- .parser = parse_arg,
- .doc = argp_program_doc,
- };
- struct perf_buffer *pb = NULL;
- struct runqslower_bpf *obj;
- int err;
-
- err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
- if (err)
- return err;
-
- libbpf_set_print(libbpf_print_fn);
-
- /* Use libbpf 1.0 API mode */
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
-
- obj = runqslower_bpf__open();
- if (!obj) {
- fprintf(stderr, "failed to open and/or load BPF object\n");
- return 1;
- }
-
- /* initialize global data (filtering options) */
- obj->rodata->targ_pid = env.pid;
- obj->rodata->min_us = env.min_us;
-
- err = runqslower_bpf__load(obj);
- if (err) {
- fprintf(stderr, "failed to load BPF object: %d\n", err);
- goto cleanup;
- }
-
- err = runqslower_bpf__attach(obj);
- if (err) {
- fprintf(stderr, "failed to attach BPF programs\n");
- goto cleanup;
- }
-
- printf("Tracing run queue latency higher than %llu us\n", env.min_us);
- printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)");
-
- pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64,
- handle_event, handle_lost_events, NULL, NULL);
- err = libbpf_get_error(pb);
- if (err) {
- pb = NULL;
- fprintf(stderr, "failed to open perf buffer: %d\n", err);
- goto cleanup;
- }
-
- while ((err = perf_buffer__poll(pb, 100)) >= 0)
- ;
- printf("Error polling perf buffer: %d\n", err);
-
-cleanup:
- perf_buffer__free(pb);
- runqslower_bpf__destroy(obj);
-
- return err != 0;
-}
diff --git a/tools/bpf/runqslower/runqslower.h b/tools/bpf/runqslower/runqslower.h
deleted file mode 100644
index 4f70f07200c2..000000000000
--- a/tools/bpf/runqslower/runqslower.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-#ifndef __RUNQSLOWER_H
-#define __RUNQSLOWER_H
-
-#define TASK_COMM_LEN 16
-
-struct runq_event {
- char task[TASK_COMM_LEN];
- __u64 delta_us;
- pid_t pid;
-};
-
-#endif /* __RUNQSLOWER_H */
diff --git a/tools/build/Build b/tools/build/Build
new file mode 100644
index 000000000000..1c7e598e9f59
--- /dev/null
+++ b/tools/build/Build
@@ -0,0 +1,2 @@
+hostprogs := fixdep
+fixdep-y := fixdep.o
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 63ef21878761..3a5a3808ab2a 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -37,5 +37,22 @@ ifneq ($(wildcard $(TMP_O)),)
$(Q)$(MAKE) -C feature OUTPUT=$(TMP_O) clean >/dev/null
endif
-$(OUTPUT)fixdep: $(srctree)/tools/build/fixdep.c
- $(QUIET_CC)$(HOSTCC) $(KBUILD_HOSTCFLAGS) $(KBUILD_HOSTLDFLAGS) -o $@ $<
+FIXDEP := $(OUTPUT)fixdep
+FIXDEP_IN := $(OUTPUT)fixdep-in.o
+
+# To track fixdep's dependencies properly, fixdep needs to run on itself.
+# Build it twice the first time.
+$(FIXDEP_IN): FORCE
+ $(Q)if [ ! -f $(FIXDEP) ]; then \
+ $(MAKE) $(build)=fixdep HOSTCFLAGS="$(KBUILD_HOSTCFLAGS)"; \
+ rm -f $(FIXDEP).o; \
+ fi
+ $(Q)$(MAKE) $(build)=fixdep HOSTCFLAGS="$(KBUILD_HOSTCFLAGS)"
+
+
+$(FIXDEP): $(FIXDEP_IN)
+ $(QUIET_LINK)$(HOSTCC) $(FIXDEP_IN) $(KBUILD_HOSTLDFLAGS) -o $@
+
+FORCE:
+
+.PHONY: FORCE
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 1f44ca677ad3..a7f030fc5e83 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -68,7 +68,6 @@ FEATURE_TESTS_BASIC := \
libdw \
eventfd \
fortify-source \
- get_current_dir_name \
gettid \
glibc \
libbfd \
@@ -80,14 +79,10 @@ FEATURE_TESTS_BASIC := \
libelf-zstd \
libnuma \
numa_num_possible_cpus \
- libperl \
libpython \
libslang \
libtraceevent \
- libtracefs \
libcpupower \
- libcrypto \
- libunwind \
pthread-attr-setaffinity-np \
pthread-barrier \
reallocarray \
@@ -95,7 +90,6 @@ FEATURE_TESTS_BASIC := \
timerfd \
zlib \
lzma \
- get_cpuid \
bpf \
scandirat \
sched_getcpu \
@@ -123,16 +117,11 @@ FEATURE_TESTS_EXTRA := \
libbfd-liberty \
libbfd-liberty-z \
libopencsd \
+ libperl \
cxx \
llvm \
clang \
libbpf \
- libbpf-btf__load_from_kernel_by_id \
- libbpf-bpf_prog_load \
- libbpf-bpf_object__next_program \
- libbpf-bpf_object__next_map \
- libbpf-bpf_program__set_insns \
- libbpf-bpf_create_map \
libpfm4 \
libdebuginfod \
clang-bpf-co-re \
@@ -148,20 +137,14 @@ endif
FEATURE_DISPLAY ?= \
libdw \
glibc \
- libbfd \
- libbfd-buildid \
libelf \
libnuma \
numa_num_possible_cpus \
- libperl \
libpython \
- libcrypto \
- libunwind \
libcapstone \
llvm-perf \
zlib \
lzma \
- get_cpuid \
bpf \
libaio \
libzstd
@@ -330,5 +313,7 @@ endef
ifeq ($(FEATURE_DISPLAY_DEFERRED),)
$(call feature_display_entries)
- $(info )
+ ifeq ($(feature_display),1)
+ $(info )
+ endif
endif
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index b8b5fb183dd4..87a5a908d6fa 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -8,7 +8,6 @@ FILES= \
test-libdw.bin \
test-eventfd.bin \
test-fortify-source.bin \
- test-get_current_dir_name.bin \
test-glibc.bin \
test-gtk2.bin \
test-gtk2-infobar.bin \
@@ -34,11 +33,9 @@ FILES= \
test-libperl.bin \
test-libpython.bin \
test-libslang.bin \
- test-libslang-include-subdir.bin \
test-libtraceevent.bin \
test-libcpupower.bin \
test-libtracefs.bin \
- test-libcrypto.bin \
test-libunwind.bin \
test-libunwind-debug-frame.bin \
test-libunwind-x86.bin \
@@ -59,7 +56,6 @@ FILES= \
test-lzma.bin \
test-bpf.bin \
test-libbpf.bin \
- test-get_cpuid.bin \
test-sdt.bin \
test-cxx.bin \
test-gettid.bin \
@@ -94,7 +90,7 @@ else
# paths are used instead.
ifdef CROSS_COMPILE
ifeq ($(PKG_CONFIG_LIBDIR)$(PKG_CONFIG_PATH)$(PKG_CONFIG_SYSROOT_DIR),)
- CROSS_ARCH = $(shell $(CC) -dumpmachine)
+ CROSS_ARCH = $(notdir $(CROSS_COMPILE:%-=%))
PKG_CONFIG_LIBDIR := /usr/local/$(CROSS_ARCH)/lib/pkgconfig/
PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/lib/$(CROSS_ARCH)/pkgconfig/
PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/lib/$(CROSS_ARCH)/pkgconfig/
@@ -110,7 +106,7 @@ all: $(FILES)
__BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS)
BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1
BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
- BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd
+ BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -ldl -lz -llzma -lzstd
__BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS)
BUILDXX = $(__BUILDXX) > $(@:.bin=.make.output) 2>&1
@@ -118,7 +114,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
###############################
$(OUTPUT)test-all.bin:
- $(BUILD_ALL) || $(BUILD_ALL) -lopcodes -liberty
+ $(BUILD_ALL)
$(OUTPUT)test-hello.bin:
$(BUILD)
@@ -147,9 +143,6 @@ $(OUTPUT)test-libelf.bin:
$(OUTPUT)test-eventfd.bin:
$(BUILD)
-$(OUTPUT)test-get_current_dir_name.bin:
- $(BUILD)
-
$(OUTPUT)test-glibc.bin:
$(BUILD)
@@ -234,9 +227,6 @@ $(OUTPUT)test-libunwind-debug-frame-aarch64.bin:
$(OUTPUT)test-libslang.bin:
$(BUILD) -lslang
-$(OUTPUT)test-libslang-include-subdir.bin:
- $(BUILD) -lslang
-
$(OUTPUT)test-libtraceevent.bin:
$(BUILD) -ltraceevent
@@ -246,9 +236,6 @@ $(OUTPUT)test-libcpupower.bin:
$(OUTPUT)test-libtracefs.bin:
$(BUILD) $(shell $(PKG_CONFIG) --cflags libtracefs 2>/dev/null) -ltracefs
-$(OUTPUT)test-libcrypto.bin:
- $(BUILD) -lcrypto
-
$(OUTPUT)test-gtk2.bin:
$(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) -Wno-deprecated-declarations
@@ -319,10 +306,10 @@ $(OUTPUT)test-libcapstone.bin:
$(BUILD) # -lcapstone provided by $(FEATURE_CHECK_LDFLAGS-libcapstone)
$(OUTPUT)test-compile-32.bin:
- $(CC) -m32 -o $@ test-compile.c
+ $(CC) -m32 -Wall -Werror -o $@ test-compile.c
$(OUTPUT)test-compile-x32.bin:
- $(CC) -mx32 -o $@ test-compile.c
+ $(CC) -mx32 -Wall -Werror -o $@ test-compile.c
$(OUTPUT)test-zlib.bin:
$(BUILD) -lz
@@ -330,36 +317,12 @@ $(OUTPUT)test-zlib.bin:
$(OUTPUT)test-lzma.bin:
$(BUILD) -llzma
-$(OUTPUT)test-get_cpuid.bin:
- $(BUILD)
-
$(OUTPUT)test-bpf.bin:
$(BUILD)
$(OUTPUT)test-libbpf.bin:
$(BUILD) -lbpf
-$(OUTPUT)test-libbpf-btf__load_from_kernel_by_id.bin:
- $(BUILD) -lbpf
-
-$(OUTPUT)test-libbpf-bpf_prog_load.bin:
- $(BUILD) -lbpf
-
-$(OUTPUT)test-libbpf-bpf_map_create.bin:
- $(BUILD) -lbpf
-
-$(OUTPUT)test-libbpf-bpf_object__next_program.bin:
- $(BUILD) -lbpf
-
-$(OUTPUT)test-libbpf-bpf_object__next_map.bin:
- $(BUILD) -lbpf
-
-$(OUTPUT)test-libbpf-bpf_program__set_insns.bin:
- $(BUILD) -lbpf
-
-$(OUTPUT)test-libbpf-btf__raw_data.bin:
- $(BUILD) -lbpf
-
$(OUTPUT)test-sdt.bin:
$(BUILD)
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 03ddaac6f4c4..eb346160d0ba 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -7,17 +7,13 @@
*/
/*
- * Quirk: Python and Perl headers cannot be in arbitrary places, so keep
- * these 3 testcases at the top:
+ * Quirk: Python headers cannot be in arbitrary places, so keep this testcase at
+ * the top:
*/
#define main main_test_libpython
# include "test-libpython.c"
#undef main
-#define main main_test_libperl
-# include "test-libperl.c"
-#undef main
-
#define main main_test_hello
# include "test-hello.c"
#undef main
@@ -26,10 +22,6 @@
# include "test-libelf.c"
#undef main
-#define main main_test_get_current_dir_name
-# include "test-get_current_dir_name.c"
-#undef main
-
#define main main_test_gettid
# include "test-gettid.c"
#undef main
@@ -66,14 +58,6 @@
# include "test-libslang.c"
#undef main
-#define main main_test_libbfd
-# include "test-libbfd.c"
-#undef main
-
-#define main main_test_libbfd_buildid
-# include "test-libbfd-buildid.c"
-#undef main
-
#define main main_test_backtrace
# include "test-backtrace.c"
#undef main
@@ -130,18 +114,10 @@
# include "test-lzma.c"
#undef main
-#define main main_test_get_cpuid
-# include "test-get_cpuid.c"
-#undef main
-
#define main main_test_bpf
# include "test-bpf.c"
#undef main
-#define main main_test_libcrypto
-# include "test-libcrypto.c"
-#undef main
-
#define main main_test_sdt
# include "test-sdt.c"
#undef main
@@ -158,14 +134,6 @@
# include "test-reallocarray.c"
#undef main
-#define main main_test_disassembler_four_args
-# include "test-disassembler-four-args.c"
-#undef main
-
-#define main main_test_disassembler_init_styled
-# include "test-disassembler-init-styled.c"
-#undef main
-
#define main main_test_libzstd
# include "test-libzstd.c"
#undef main
@@ -174,17 +142,11 @@
# include "test-libtraceevent.c"
#undef main
-#define main main_test_libtracefs
-# include "test-libtracefs.c"
-#undef main
-
int main(int argc, char *argv[])
{
main_test_libpython();
- main_test_libperl();
main_test_hello();
main_test_libelf();
- main_test_get_current_dir_name();
main_test_gettid();
main_test_glibc();
main_test_libdw();
@@ -193,8 +155,6 @@ int main(int argc, char *argv[])
main_test_libelf_gelf_getnote();
main_test_libelf_getshdrstrndx();
main_test_libslang();
- main_test_libbfd();
- main_test_libbfd_buildid();
main_test_backtrace();
main_test_libnuma();
main_test_numa_num_possible_cpus();
@@ -204,19 +164,15 @@ int main(int argc, char *argv[])
main_test_pthread_attr_setaffinity_np();
main_test_pthread_barrier();
main_test_lzma();
- main_test_get_cpuid();
main_test_bpf();
- main_test_libcrypto();
main_test_scandirat();
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
main_test_libaio();
main_test_reallocarray();
- main_test_disassembler_four_args();
main_test_libzstd();
main_test_libtraceevent();
- main_test_libtracefs();
return 0;
}
diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c
deleted file mode 100644
index bb4f065f28a6..000000000000
--- a/tools/build/feature/test-get_cpuid.c
+++ /dev/null
@@ -1,8 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <cpuid.h>
-
-int main(void)
-{
- unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
- return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
-}
diff --git a/tools/build/feature/test-get_current_dir_name.c b/tools/build/feature/test-get_current_dir_name.c
deleted file mode 100644
index c3c201691b4f..000000000000
--- a/tools/build/feature/test-get_current_dir_name.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define _GNU_SOURCE
-#include <unistd.h>
-#include <stdlib.h>
-
-int main(void)
-{
- free(get_current_dir_name());
- return 0;
-}
-#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-libcrypto.c b/tools/build/feature/test-libcrypto.c
deleted file mode 100644
index bc34a5bbb504..000000000000
--- a/tools/build/feature/test-libcrypto.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <openssl/evp.h>
-#include <openssl/sha.h>
-#include <openssl/md5.h>
-
-int main(void)
-{
- EVP_MD_CTX *mdctx;
- unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
- unsigned char dat[] = "12345";
- unsigned int digest_len;
-
- mdctx = EVP_MD_CTX_new();
- if (!mdctx)
- return 0;
-
- EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
- EVP_DigestUpdate(mdctx, &dat[0], sizeof(dat));
- EVP_DigestFinal_ex(mdctx, &md[0], &digest_len);
- EVP_MD_CTX_free(mdctx);
-
- SHA1(&dat[0], sizeof(dat), &md[0]);
-
- return 0;
-}
diff --git a/tools/build/feature/test-libslang-include-subdir.c b/tools/build/feature/test-libslang-include-subdir.c
deleted file mode 100644
index 3ea47ec7590e..000000000000
--- a/tools/build/feature/test-libslang-include-subdir.c
+++ /dev/null
@@ -1,7 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <slang/slang.h>
-
-int main(void)
-{
- return SLsmg_init_smg();
-}
diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py
index 270c28a0d098..6bf4bde77903 100644
--- a/tools/cgroup/memcg_slabinfo.py
+++ b/tools/cgroup/memcg_slabinfo.py
@@ -146,11 +146,11 @@ def detect_kernel_config():
def for_each_slab(prog):
- PGSlab = ~prog.constant('PG_slab')
+ slabtype = prog.constant('PGTY_slab')
for page in for_each_page(prog):
try:
- if page.page_type.value_() == PGSlab:
+ if (page.page_type.value_() >> 24) == slabtype:
yield cast('struct slab *', page)
except FaultError:
pass
diff --git a/tools/dma/.gitignore b/tools/dma/.gitignore
new file mode 100644
index 000000000000..94b68cf4147b
--- /dev/null
+++ b/tools/dma/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+dma_map_benchmark
+include/linux/map_benchmark.h
diff --git a/tools/dma/Makefile b/tools/dma/Makefile
new file mode 100644
index 000000000000..e4abf37bf020
--- /dev/null
+++ b/tools/dma/Makefile
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+include ../scripts/Makefile.include
+
+bindir ?= /usr/bin
+
+# This will work when dma is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+# Do not use make's built-in rules
+# (this improves performance and avoids hard-to-debug behaviour);
+MAKEFLAGS += -r
+
+override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ALL_TARGETS := dma_map_benchmark
+ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
+
+all: $(ALL_PROGRAMS)
+
+export srctree OUTPUT CC LD CFLAGS
+include $(srctree)/tools/build/Makefile.include
+
+#
+# We need the following to be outside of kernel tree
+#
+$(OUTPUT)include/linux/map_benchmark.h: ../../include/uapi/linux/map_benchmark.h
+ mkdir -p $(OUTPUT)include/linux 2>&1 || true
+ ln -sf $(CURDIR)/../../include/uapi/linux/map_benchmark.h $@
+
+prepare: $(OUTPUT)include/linux/map_benchmark.h
+
+FORCE:
+
+DMA_MAP_BENCHMARK = dma_map_benchmark
+$(DMA_MAP_BENCHMARK): prepare FORCE
+ $(CC) $(CFLAGS) $(DMA_MAP_BENCHMARK).c -o $(DMA_MAP_BENCHMARK)
+
+clean:
+ rm -f $(ALL_PROGRAMS)
+ rm -rf $(OUTPUT)include
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
+
+install: $(ALL_PROGRAMS)
+ install -d -m 755 $(DESTDIR)$(bindir); \
+ for program in $(ALL_PROGRAMS); do \
+ install $$program $(DESTDIR)$(bindir); \
+ done
+
+.PHONY: all install clean prepare FORCE
diff --git a/tools/testing/selftests/dma/config b/tools/dma/config
index 6102ee3c43cd..6102ee3c43cd 100644
--- a/tools/testing/selftests/dma/config
+++ b/tools/dma/config
diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/dma/dma_map_benchmark.c
index b12f1f9babf8..dd0ed528e6df 100644
--- a/tools/testing/selftests/dma/dma_map_benchmark.c
+++ b/tools/dma/dma_map_benchmark.c
@@ -10,7 +10,6 @@
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
-#include <linux/types.h>
#include <linux/map_benchmark.h>
#define NSEC_PER_MSEC 1000000L
@@ -118,7 +117,7 @@ int main(int argc, char **argv)
}
printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n",
- threads, seconds, node, dir[directions], granule);
+ threads, seconds, node, directions[dir], granule);
printf("average map latency(us):%.1f standard deviation:%.1f\n",
map.avg_map_100ns/10.0, map.map_stddev/10.0);
printf("average unmap latency(us):%.1f standard deviation:%.1f\n",
diff --git a/tools/docs/check-variable-fonts.py b/tools/docs/check-variable-fonts.py
new file mode 100755
index 000000000000..958d5a745724
--- /dev/null
+++ b/tools/docs/check-variable-fonts.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) Akira Yokosawa, 2024
+#
+# Ported to Python by (c) Mauro Carvalho Chehab, 2025
+#
+# pylint: disable=C0103
+
+"""
+Detect problematic Noto CJK variable fonts.
+
+or more details, see .../tools/lib/python/kdoc/latex_fonts.py.
+"""
+
+import argparse
+import sys
+import os.path
+
+src_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, os.path.join(src_dir, '../lib/python'))
+
+from kdoc.latex_fonts import LatexFontChecker
+
+checker = LatexFontChecker()
+
+parser=argparse.ArgumentParser(description=checker.description(),
+ formatter_class=argparse.RawTextHelpFormatter)
+parser.add_argument("--deny-vf",
+ help="XDG_CONFIG_HOME dir containing fontconfig/fonts.conf file")
+
+args=parser.parse_args()
+
+msg = LatexFontChecker(args.deny_vf).check()
+if msg:
+ print(msg)
+
+sys.exit(1)
diff --git a/tools/docs/checktransupdate.py b/tools/docs/checktransupdate.py
new file mode 100755
index 000000000000..e894652369a5
--- /dev/null
+++ b/tools/docs/checktransupdate.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+This script helps track the translation status of the documentation
+in different locales, e.g., zh_CN. More specially, it uses `git log`
+commit to find the latest english commit from the translation commit
+(order by author date) and the latest english commits from HEAD. If
+differences occur, report the file and commits that need to be updated.
+
+The usage is as follows:
+- tools/docs/checktransupdate.py -l zh_CN
+This will print all the files that need to be updated or translated in the zh_CN locale.
+- tools/docs/checktransupdate.py Documentation/translations/zh_CN/dev-tools/testing-overview.rst
+This will only print the status of the specified file.
+
+The output is something like:
+Documentation/dev-tools/kfence.rst
+No translation in the locale of zh_CN
+
+Documentation/translations/zh_CN/dev-tools/testing-overview.rst
+commit 42fb9cfd5b18 ("Documentation: dev-tools: Add link to RV docs")
+1 commits needs resolving in total
+"""
+
+import os
+import re
+import time
+import logging
+from argparse import ArgumentParser, ArgumentTypeError, BooleanOptionalAction
+from datetime import datetime
+
+
+def get_origin_path(file_path):
+ """Get the origin path from the translation path"""
+ paths = file_path.split("/")
+ tidx = paths.index("translations")
+ opaths = paths[:tidx]
+ opaths += paths[tidx + 2 :]
+ return "/".join(opaths)
+
+
+def get_latest_commit_from(file_path, commit):
+ """Get the latest commit from the specified commit for the specified file"""
+ command = f"git log --pretty=format:%H%n%aD%n%cD%n%n%B {commit} -1 -- {file_path}"
+ logging.debug(command)
+ pipe = os.popen(command)
+ result = pipe.read()
+ result = result.split("\n")
+ if len(result) <= 1:
+ return None
+
+ logging.debug("Result: %s", result[0])
+
+ return {
+ "hash": result[0],
+ "author_date": datetime.strptime(result[1], "%a, %d %b %Y %H:%M:%S %z"),
+ "commit_date": datetime.strptime(result[2], "%a, %d %b %Y %H:%M:%S %z"),
+ "message": result[4:],
+ }
+
+
+def get_origin_from_trans(origin_path, t_from_head):
+ """Get the latest origin commit from the translation commit"""
+ o_from_t = get_latest_commit_from(origin_path, t_from_head["hash"])
+ while o_from_t is not None and o_from_t["author_date"] > t_from_head["author_date"]:
+ o_from_t = get_latest_commit_from(origin_path, o_from_t["hash"] + "^")
+ if o_from_t is not None:
+ logging.debug("tracked origin commit id: %s", o_from_t["hash"])
+ return o_from_t
+
+
+def get_origin_from_trans_smartly(origin_path, t_from_head):
+ """Get the latest origin commit from the formatted translation commit:
+ (1) update to commit HASH (TITLE)
+ (2) Update the translation through commit HASH (TITLE)
+ """
+ # catch flag for 12-bit commit hash
+ HASH = r'([0-9a-f]{12})'
+ # pattern 1: contains "update to commit HASH"
+ pat_update_to = re.compile(rf'update to commit {HASH}')
+ # pattern 2: contains "Update the translation through commit HASH"
+ pat_update_translation = re.compile(rf'Update the translation through commit {HASH}')
+
+ origin_commit_hash = None
+ for line in t_from_head["message"]:
+ # check if the line matches the first pattern
+ match = pat_update_to.search(line)
+ if match:
+ origin_commit_hash = match.group(1)
+ break
+ # check if the line matches the second pattern
+ match = pat_update_translation.search(line)
+ if match:
+ origin_commit_hash = match.group(1)
+ break
+ if origin_commit_hash is None:
+ return None
+ o_from_t = get_latest_commit_from(origin_path, origin_commit_hash)
+ if o_from_t is not None:
+ logging.debug("tracked origin commit id: %s", o_from_t["hash"])
+ return o_from_t
+
+
+def get_commits_count_between(opath, commit1, commit2):
+ """Get the commits count between two commits for the specified file"""
+ command = f"git log --pretty=format:%H {commit1}...{commit2} -- {opath}"
+ logging.debug(command)
+ pipe = os.popen(command)
+ result = pipe.read().split("\n")
+ # filter out empty lines
+ result = list(filter(lambda x: x != "", result))
+ return result
+
+
+def pretty_output(commit):
+ """Pretty print the commit message"""
+ command = f"git log --pretty='format:%h (\"%s\")' -1 {commit}"
+ logging.debug(command)
+ pipe = os.popen(command)
+ return pipe.read()
+
+
+def valid_commit(commit):
+ """Check if the commit is valid or not"""
+ msg = pretty_output(commit)
+ return "Merge tag" not in msg
+
+def check_per_file(file_path):
+ """Check the translation status for the specified file"""
+ opath = get_origin_path(file_path)
+
+ if not os.path.isfile(opath):
+ logging.error("Cannot find the origin path for {file_path}")
+ return
+
+ o_from_head = get_latest_commit_from(opath, "HEAD")
+ t_from_head = get_latest_commit_from(file_path, "HEAD")
+
+ if o_from_head is None or t_from_head is None:
+ logging.error("Cannot find the latest commit for %s", file_path)
+ return
+
+ o_from_t = get_origin_from_trans_smartly(opath, t_from_head)
+ # notice, o_from_t from get_*_smartly() is always more accurate than from get_*()
+ if o_from_t is None:
+ o_from_t = get_origin_from_trans(opath, t_from_head)
+
+ if o_from_t is None:
+ logging.error("Error: Cannot find the latest origin commit for %s", file_path)
+ return
+
+ if o_from_head["hash"] == o_from_t["hash"]:
+ logging.debug("No update needed for %s", file_path)
+ else:
+ logging.info(file_path)
+ commits = get_commits_count_between(
+ opath, o_from_t["hash"], o_from_head["hash"]
+ )
+ count = 0
+ for commit in commits:
+ if valid_commit(commit):
+ logging.info("commit %s", pretty_output(commit))
+ count += 1
+ logging.info("%d commits needs resolving in total\n", count)
+
+
+def valid_locales(locale):
+ """Check if the locale is valid or not"""
+ script_path = os.path.dirname(os.path.abspath(__file__))
+ linux_path = os.path.join(script_path, "../..")
+ if not os.path.isdir(f"{linux_path}/Documentation/translations/{locale}"):
+ raise ArgumentTypeError("Invalid locale: {locale}")
+ return locale
+
+
+def list_files_with_excluding_folders(folder, exclude_folders, include_suffix):
+ """List all files with the specified suffix in the folder and its subfolders"""
+ files = []
+ stack = [folder]
+
+ while stack:
+ pwd = stack.pop()
+ # filter out the exclude folders
+ if os.path.basename(pwd) in exclude_folders:
+ continue
+ # list all files and folders
+ for item in os.listdir(pwd):
+ ab_item = os.path.join(pwd, item)
+ if os.path.isdir(ab_item):
+ stack.append(ab_item)
+ else:
+ if ab_item.endswith(include_suffix):
+ files.append(ab_item)
+
+ return files
+
+
+class DmesgFormatter(logging.Formatter):
+ """Custom dmesg logging formatter"""
+ def format(self, record):
+ timestamp = time.time()
+ formatted_time = f"[{timestamp:>10.6f}]"
+ log_message = f"{formatted_time} {record.getMessage()}"
+ return log_message
+
+
+def config_logging(log_level, log_file="checktransupdate.log"):
+ """configure logging based on the log level"""
+ # set up the root logger
+ logger = logging.getLogger()
+ logger.setLevel(log_level)
+
+ # Create console handler
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(log_level)
+
+ # Create file handler
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(log_level)
+
+ # Create formatter and add it to the handlers
+ formatter = DmesgFormatter()
+ console_handler.setFormatter(formatter)
+ file_handler.setFormatter(formatter)
+
+ # Add the handler to the logger
+ logger.addHandler(console_handler)
+ logger.addHandler(file_handler)
+
+
+def main():
+ """Main function of the script"""
+ script_path = os.path.dirname(os.path.abspath(__file__))
+ linux_path = os.path.join(script_path, "../..")
+
+ parser = ArgumentParser(description="Check the translation update")
+ parser.add_argument(
+ "-l",
+ "--locale",
+ default="zh_CN",
+ type=valid_locales,
+ help="Locale to check when files are not specified",
+ )
+
+ parser.add_argument(
+ "--print-missing-translations",
+ action=BooleanOptionalAction,
+ default=True,
+ help="Print files that do not have translations",
+ )
+
+ parser.add_argument(
+ '--log',
+ default='INFO',
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
+ help='Set the logging level')
+
+ parser.add_argument(
+ '--logfile',
+ default='checktransupdate.log',
+ help='Set the logging file (default: checktransupdate.log)')
+
+ parser.add_argument(
+ "files", nargs="*", help="Files to check, if not specified, check all files"
+ )
+ args = parser.parse_args()
+
+ # Configure logging based on the --log argument
+ log_level = getattr(logging, args.log.upper(), logging.INFO)
+ config_logging(log_level)
+
+ # Get files related to linux path
+ files = args.files
+ if len(files) == 0:
+ offical_files = list_files_with_excluding_folders(
+ os.path.join(linux_path, "Documentation"), ["translations", "output"], "rst"
+ )
+
+ for file in offical_files:
+ # split the path into parts
+ path_parts = file.split(os.sep)
+ # find the index of the "Documentation" directory
+ kindex = path_parts.index("Documentation")
+ # insert the translations and locale after the Documentation directory
+ new_path_parts = path_parts[:kindex + 1] + ["translations", args.locale] \
+ + path_parts[kindex + 1 :]
+ # join the path parts back together
+ new_file = os.sep.join(new_path_parts)
+ if os.path.isfile(new_file):
+ files.append(new_file)
+ else:
+ if args.print_missing_translations:
+ logging.info(os.path.relpath(os.path.abspath(file), linux_path))
+ logging.info("No translation in the locale of %s\n", args.locale)
+
+ files = list(map(lambda x: os.path.relpath(os.path.abspath(x), linux_path), files))
+
+ # cd to linux root directory
+ os.chdir(linux_path)
+
+ for file in files:
+ check_per_file(file)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/documentation-file-ref-check b/tools/docs/documentation-file-ref-check
new file mode 100755
index 000000000000..0cad42f6943b
--- /dev/null
+++ b/tools/docs/documentation-file-ref-check
@@ -0,0 +1,245 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+#
+# Treewide grep for references to files under Documentation, and report
+# non-existing files in stderr.
+
+use warnings;
+use strict;
+use Getopt::Long qw(:config no_auto_abbrev);
+
+# NOTE: only add things here when the file was gone, but the text wants
+# to mention a past documentation file, for example, to give credits for
+# the original work.
+my %false_positives = (
+ "Documentation/scsi/scsi_mid_low_api.rst" => "Documentation/Configure.help",
+ "drivers/vhost/vhost.c" => "Documentation/virtual/lguest/lguest.c",
+);
+
+my $scriptname = $0;
+$scriptname =~ s,tools/docs/([^/]+/),$1,;
+
+# Parse arguments
+my $help = 0;
+my $fix = 0;
+my $warn = 0;
+
+if (! -e ".git") {
+ printf "Warning: can't check if file exists, as this is not a git tree\n";
+ exit 0;
+}
+
+GetOptions(
+ 'fix' => \$fix,
+ 'warn' => \$warn,
+ 'h|help|usage' => \$help,
+);
+
+if ($help != 0) {
+ print "$scriptname [--help] [--fix]\n";
+ exit -1;
+}
+
+# Step 1: find broken references
+print "Finding broken references. This may take a while... " if ($fix);
+
+my %broken_ref;
+
+my $doc_fix = 0;
+
+open IN, "git grep ':doc:\`' Documentation/|"
+ or die "Failed to run git grep";
+while (<IN>) {
+ next if (!m,^([^:]+):.*\:doc\:\`([^\`]+)\`,);
+ next if (m,sphinx/,);
+
+ my $file = $1;
+ my $d = $1;
+ my $doc_ref = $2;
+
+ my $f = $doc_ref;
+
+ $d =~ s,(.*/).*,$1,;
+ $f =~ s,.*\<([^\>]+)\>,$1,;
+
+ if ($f =~ m,^/,) {
+ $f = "$f.rst";
+ $f =~ s,^/,Documentation/,;
+ } else {
+ $f = "$d$f.rst";
+ }
+
+ next if (grep -e, glob("$f"));
+
+ if ($fix && !$doc_fix) {
+ print STDERR "\nWARNING: Currently, can't fix broken :doc:`` fields\n";
+ }
+ $doc_fix++;
+
+ print STDERR "$file: :doc:`$doc_ref`\n";
+}
+close IN;
+
+open IN, "git grep 'Documentation/'|"
+ or die "Failed to run git grep";
+while (<IN>) {
+ next if (!m/^([^:]+):(.*)/);
+
+ my $f = $1;
+ my $ln = $2;
+
+ # On linux-next, discard the Next/ directory
+ next if ($f =~ m,^Next/,);
+
+ # Makefiles and scripts contain nasty expressions to parse docs
+ next if ($f =~ m/Makefile/ || $f =~ m/\.(sh|py|pl|~|rej|org|orig)$/);
+
+ # It doesn't make sense to parse hidden files
+ next if ($f =~ m#/\.#);
+
+ # Skip this script
+ next if ($f eq $scriptname);
+
+ # Ignore the dir where documentation will be built
+ next if ($ln =~ m,\b(\S*)Documentation/output,);
+
+ if ($ln =~ m,\b(\S*)(Documentation/[A-Za-z0-9\_\.\,\~/\*\[\]\?+-]*)(.*),) {
+ my $prefix = $1;
+ my $ref = $2;
+ my $base = $2;
+ my $extra = $3;
+
+ # some file references are like:
+ # /usr/src/linux/Documentation/DMA-{API,mapping}.txt
+ # For now, ignore them
+ next if ($extra =~ m/^{/);
+
+ # Remove footnotes at the end like:
+ # Documentation/devicetree/dt-object-internal.txt[1]
+ $ref =~ s/(txt|rst)\[\d+]$/$1/;
+
+ # Remove ending ']' without any '['
+ $ref =~ s/\].*// if (!($ref =~ m/\[/));
+
+ # Remove puntuation marks at the end
+ $ref =~ s/[\,\.]+$//;
+
+ my $fulref = "$prefix$ref";
+
+ $fulref =~ s/^(\<file|ref)://;
+ $fulref =~ s/^[\'\`]+//;
+ $fulref =~ s,^\$\(.*\)/,,;
+ $base =~ s,.*/,,;
+
+ # Remove URL false-positives
+ next if ($fulref =~ m/^http/);
+
+ # Remove sched-pelt false-positive
+ next if ($fulref =~ m,^Documentation/scheduler/sched-pelt$,);
+
+ # Discard some build examples from Documentation/target/tcm_mod_builder.rst
+ next if ($fulref =~ m,mnt/sdb/lio-core-2.6.git/Documentation/target,);
+
+ # Check if exists, evaluating wildcards
+ next if (grep -e, glob("$ref $fulref"));
+
+ # Accept relative Documentation patches for tools/
+ if ($f =~ m/tools/) {
+ my $path = $f;
+ $path =~ s,(.*)/.*,$1,;
+ $path =~ s,testing/selftests/bpf,bpf/bpftool,;
+ next if (grep -e, glob("$path/$ref $path/../$ref $path/$fulref"));
+ }
+
+ # Discard known false-positives
+ if (defined($false_positives{$f})) {
+ next if ($false_positives{$f} eq $fulref);
+ }
+
+ if ($fix) {
+ if (!($ref =~ m/(scripts|Kconfig|Kbuild)/)) {
+ $broken_ref{$ref}++;
+ }
+ } elsif ($warn) {
+ print STDERR "Warning: $f references a file that doesn't exist: $fulref\n";
+ } else {
+ print STDERR "$f: $fulref\n";
+ }
+ }
+}
+close IN;
+
+exit 0 if (!$fix);
+
+# Step 2: Seek for file name alternatives
+print "Auto-fixing broken references. Please double-check the results\n";
+
+foreach my $ref (keys %broken_ref) {
+ my $new =$ref;
+
+ my $basedir = ".";
+ # On translations, only seek inside the translations directory
+ $basedir = $1 if ($ref =~ m,(Documentation/translations/[^/]+),);
+
+ # get just the basename
+ $new =~ s,.*/,,;
+
+ my $f="";
+
+ # usual reason for breakage: DT file moved around
+ if ($ref =~ /devicetree/) {
+ # usual reason for breakage: DT file renamed to .yaml
+ if (!$f) {
+ my $new_ref = $ref;
+ $new_ref =~ s/\.txt$/.yaml/;
+ $f=$new_ref if (-f $new_ref);
+ }
+
+ if (!$f) {
+ my $search = $new;
+ $search =~ s,^.*/,,;
+ $f = qx(find Documentation/devicetree/ -iname "*$search*") if ($search);
+ if (!$f) {
+ # Manufacturer name may have changed
+ $search =~ s/^.*,//;
+ $f = qx(find Documentation/devicetree/ -iname "*$search*") if ($search);
+ }
+ }
+ }
+
+ # usual reason for breakage: file renamed to .rst
+ if (!$f) {
+ $new =~ s/\.txt$/.rst/;
+ $f=qx(find $basedir -iname $new) if ($new);
+ }
+
+ # usual reason for breakage: use dash or underline
+ if (!$f) {
+ $new =~ s/[-_]/[-_]/g;
+ $f=qx(find $basedir -iname $new) if ($new);
+ }
+
+ # Wild guess: seek for the same name on another place
+ if (!$f) {
+ $f = qx(find $basedir -iname $new) if ($new);
+ }
+
+ my @find = split /\s+/, $f;
+
+ if (!$f) {
+ print STDERR "ERROR: Didn't find a replacement for $ref\n";
+ } elsif (scalar(@find) > 1) {
+ print STDERR "WARNING: Won't auto-replace, as found multiple files close to $ref:\n";
+ foreach my $j (@find) {
+ $j =~ s,^./,,;
+ print STDERR " $j\n";
+ }
+ } else {
+ $f = $find[0];
+ $f =~ s,^./,,;
+ print "INFO: Replacing $ref to $f\n";
+ foreach my $j (qx(git grep -l $ref)) {
+ qx(sed "s\@$ref\@$f\@g" -i $j);
+ }
+ }
+}
diff --git a/tools/docs/features-refresh.sh b/tools/docs/features-refresh.sh
new file mode 100755
index 000000000000..c2288124e94a
--- /dev/null
+++ b/tools/docs/features-refresh.sh
@@ -0,0 +1,98 @@
+#
+# Small script that refreshes the kernel feature support status in place.
+#
+
+for F_FILE in Documentation/features/*/*/arch-support.txt; do
+ F=$(grep "^# Kconfig:" "$F_FILE" | cut -c26-)
+
+ #
+ # Each feature F is identified by a pair (O, K), where 'O' can
+ # be either the empty string (for 'nop') or "not" (the logical
+ # negation operator '!'); other operators are not supported.
+ #
+ O=""
+ K=$F
+ if [[ "$F" == !* ]]; then
+ O="not"
+ K=$(echo $F | sed -e 's/^!//g')
+ fi
+
+ #
+ # F := (O, K) is 'valid' iff there is a Kconfig file (for some
+ # arch) which contains K.
+ #
+ # Notice that this definition entails an 'asymmetry' between
+ # the case 'O = ""' and the case 'O = "not"'. E.g., F may be
+ # _invalid_ if:
+ #
+ # [case 'O = ""']
+ # 1) no arch provides support for F,
+ # 2) K does not exist (e.g., it was renamed/mis-typed);
+ #
+ # [case 'O = "not"']
+ # 3) all archs provide support for F,
+ # 4) as in (2).
+ #
+ # The rationale for adopting this definition (and, thus, for
+ # keeping the asymmetry) is:
+ #
+ # We want to be able to 'detect' (2) (or (4)).
+ #
+ # (1) and (3) may further warn the developers about the fact
+ # that K can be removed.
+ #
+ F_VALID="false"
+ for ARCH_DIR in arch/*/; do
+ K_FILES=$(find $ARCH_DIR -name "Kconfig*")
+ K_GREP=$(grep "$K" $K_FILES)
+ if [ ! -z "$K_GREP" ]; then
+ F_VALID="true"
+ break
+ fi
+ done
+ if [ "$F_VALID" = "false" ]; then
+ printf "WARNING: '%s' is not a valid Kconfig\n" "$F"
+ fi
+
+ T_FILE="$F_FILE.tmp"
+ grep "^#" $F_FILE > $T_FILE
+ echo " -----------------------" >> $T_FILE
+ echo " | arch |status|" >> $T_FILE
+ echo " -----------------------" >> $T_FILE
+ for ARCH_DIR in arch/*/; do
+ ARCH=$(echo $ARCH_DIR | sed -e 's/^arch//g' | sed -e 's/\///g')
+ K_FILES=$(find $ARCH_DIR -name "Kconfig*")
+ K_GREP=$(grep "$K" $K_FILES)
+ #
+ # Arch support status values for (O, K) are updated according
+ # to the following rules.
+ #
+ # - ("", K) is 'supported by a given arch', if there is a
+ # Kconfig file for that arch which contains K;
+ #
+ # - ("not", K) is 'supported by a given arch', if there is
+ # no Kconfig file for that arch which contains K;
+ #
+ # - otherwise: preserve the previous status value (if any),
+ # default to 'not yet supported'.
+ #
+ # Notice that, according these rules, invalid features may be
+ # updated/modified.
+ #
+ if [ "$O" = "" ] && [ ! -z "$K_GREP" ]; then
+ printf " |%12s: | ok |\n" "$ARCH" >> $T_FILE
+ elif [ "$O" = "not" ] && [ -z "$K_GREP" ]; then
+ printf " |%12s: | ok |\n" "$ARCH" >> $T_FILE
+ else
+ S=$(grep -v "^#" "$F_FILE" | grep " $ARCH:")
+ if [ ! -z "$S" ]; then
+ echo "$S" >> $T_FILE
+ else
+ printf " |%12s: | TODO |\n" "$ARCH" \
+ >> $T_FILE
+ fi
+ fi
+ done
+ echo " -----------------------" >> $T_FILE
+ mv $T_FILE $F_FILE
+done
diff --git a/tools/docs/find-unused-docs.sh b/tools/docs/find-unused-docs.sh
new file mode 100755
index 000000000000..05552dbda5bc
--- /dev/null
+++ b/tools/docs/find-unused-docs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# (c) 2017, Jonathan Corbet <corbet@lwn.net>
+# sayli karnik <karniksayli1995@gmail.com>
+#
+# This script detects files with kernel-doc comments for exported functions
+# that are not included in documentation.
+#
+# usage: Run 'tools/docs/find-unused-docs.sh directory' from top level of kernel
+# tree.
+#
+# example: $tools/docs/find-unused-docs.sh drivers/scsi
+#
+# Licensed under the terms of the GNU GPL License
+
+if ! [ -d "Documentation" ]; then
+ echo "Run from top level of kernel tree"
+ exit 1
+fi
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: tools/docs/find-unused-docs.sh directory"
+ exit 1
+fi
+
+if ! [ -d "$1" ]; then
+ echo "Directory $1 doesn't exist"
+ exit 1
+fi
+
+cd "$( dirname "${BASH_SOURCE[0]}" )"
+cd ..
+
+cd Documentation/
+
+echo "The following files contain kerneldoc comments for exported functions \
+that are not used in the formatted documentation"
+
+# FILES INCLUDED
+
+files_included=($(grep -rHR ".. kernel-doc" --include \*.rst | cut -d " " -f 3))
+
+declare -A FILES_INCLUDED
+
+for each in "${files_included[@]}"; do
+ FILES_INCLUDED[$each]="$each"
+ done
+
+cd ..
+
+# FILES NOT INCLUDED
+
+for file in `find $1 -name '*.c'`; do
+
+ if [[ ${FILES_INCLUDED[$file]+_} ]]; then
+ continue;
+ fi
+ str=$(PYTHONDONTWRITEBYTECODE=1 scripts/kernel-doc -export "$file" 2>/dev/null)
+ if [[ -n "$str" ]]; then
+ echo "$file"
+ fi
+ done
+
diff --git a/tools/docs/gen-redirects.py b/tools/docs/gen-redirects.py
new file mode 100755
index 000000000000..6a6ebf6f42dc
--- /dev/null
+++ b/tools/docs/gen-redirects.py
@@ -0,0 +1,54 @@
+#! /usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright © 2025, Oracle and/or its affiliates.
+# Author: Vegard Nossum <vegard.nossum@oracle.com>
+
+"""Generate HTML redirects for renamed Documentation/**.rst files using
+the output of tools/docs/gen-renames.py.
+
+Example:
+
+ tools/docs/gen-redirects.py --output Documentation/output/ < Documentation/.renames.txt
+"""
+
+import argparse
+import os
+import sys
+
+parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
+parser.add_argument('-o', '--output', help='output directory')
+
+args = parser.parse_args()
+
+for line in sys.stdin:
+ line = line.rstrip('\n')
+
+ old_name, new_name = line.split(' ', 2)
+
+ old_html_path = os.path.join(args.output, old_name + '.html')
+ new_html_path = os.path.join(args.output, new_name + '.html')
+
+ if not os.path.exists(new_html_path):
+ print(f"warning: target does not exist: {new_html_path} (redirect from {old_html_path})")
+ continue
+
+ old_html_dir = os.path.dirname(old_html_path)
+ if not os.path.exists(old_html_dir):
+ os.makedirs(old_html_dir)
+
+ relpath = os.path.relpath(new_name, os.path.dirname(old_name)) + '.html'
+
+ with open(old_html_path, 'w') as f:
+ print(f"""\
+<!DOCTYPE html>
+
+<html lang="en">
+<head>
+ <title>This page has moved</title>
+ <meta http-equiv="refresh" content="0; url={relpath}">
+</head>
+<body>
+<p>This page has moved to <a href="{relpath}">{new_name}</a>.</p>
+</body>
+</html>""", file=f)
diff --git a/tools/docs/gen-renames.py b/tools/docs/gen-renames.py
new file mode 100755
index 000000000000..8cb3b2157d83
--- /dev/null
+++ b/tools/docs/gen-renames.py
@@ -0,0 +1,130 @@
+#! /usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright © 2025, Oracle and/or its affiliates.
+# Author: Vegard Nossum <vegard.nossum@oracle.com>
+
+"""Trawl repository history for renames of Documentation/**.rst files.
+
+Example:
+
+ tools/docs/gen-renames.py --rev HEAD > Documentation/.renames.txt
+"""
+
+import argparse
+import itertools
+import os
+import subprocess
+import sys
+
+parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
+parser.add_argument('--rev', default='HEAD', help='generate renames up to this revision')
+
+args = parser.parse_args()
+
+def normalize(path):
+ prefix = 'Documentation/'
+ suffix = '.rst'
+
+ assert path.startswith(prefix)
+ assert path.endswith(suffix)
+
+ return path[len(prefix):-len(suffix)]
+
+class Name(object):
+ def __init__(self, name):
+ self.names = [name]
+
+ def rename(self, new_name):
+ self.names.append(new_name)
+
+names = {
+}
+
+for line in subprocess.check_output([
+ 'git', 'log',
+ '--reverse',
+ '--oneline',
+ '--find-renames',
+ '--diff-filter=RD',
+ '--name-status',
+ '--format=commit %H',
+ # ~v4.8-ish is when Sphinx/.rst was added in the first place
+ f'v4.8..{args.rev}',
+ '--',
+ 'Documentation/'
+], text=True).splitlines():
+ # rename
+ if line.startswith('R'):
+ _, old, new = line[1:].split('\t', 2)
+
+ if old.endswith('.rst') and new.endswith('.rst'):
+ old = normalize(old)
+ new = normalize(new)
+
+ name = names.get(old)
+ if name is None:
+ name = Name(old)
+ else:
+ del names[old]
+
+ name.rename(new)
+ names[new] = name
+
+ continue
+
+ # delete
+ if line.startswith('D'):
+ _, old = line.split('\t', 1)
+
+ if old.endswith('.rst'):
+ old = normalize(old)
+
+ # TODO: we could save added/modified files as well and propose
+ # them as alternatives
+ name = names.get(old)
+ if name is None:
+ pass
+ else:
+ del names[old]
+
+ continue
+
+#
+# Get the set of current files so we can sanity check that we aren't
+# redirecting any of those
+#
+
+current_files = set()
+for line in subprocess.check_output([
+ 'git', 'ls-tree',
+ '-r',
+ '--name-only',
+ args.rev,
+ 'Documentation/',
+], text=True).splitlines():
+ if line.endswith('.rst'):
+ current_files.add(normalize(line))
+
+#
+# Format/group/output result
+#
+
+result = []
+for _, v in names.items():
+ old_names = v.names[:-1]
+ new_name = v.names[-1]
+
+ for old_name in old_names:
+ if old_name == new_name:
+ # A file was renamed to its new name twice; don't redirect that
+ continue
+
+ if old_name in current_files:
+ # A file was recreated with a former name; don't redirect those
+ continue
+
+ result.append((old_name, new_name))
+
+for old_name, new_name in sorted(result):
+ print(f"{old_name} {new_name}")
diff --git a/tools/docs/get_abi.py b/tools/docs/get_abi.py
new file mode 100755
index 000000000000..2f0b99401f26
--- /dev/null
+++ b/tools/docs/get_abi.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python3
+# pylint: disable=R0903
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+import argparse
+import logging
+import os
+import sys
+
+# Import Python modules
+
+LIB_DIR = "../lib/python"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
+
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
+
+from abi.abi_parser import AbiParser # pylint: disable=C0413
+from abi.abi_regex import AbiRegex # pylint: disable=C0413
+from abi.helpers import ABI_DIR, DEBUG_HELP # pylint: disable=C0413
+from abi.system_symbols import SystemSymbols # pylint: disable=C0413
+
+# Command line classes
+
+
+REST_DESC = """
+Produce output in ReST format.
+
+The output is done on two sections:
+
+- Symbols: show all parsed symbols in alphabetic order;
+- Files: cross reference the content of each file with the symbols on it.
+"""
+
+class AbiRest:
+ """Initialize an argparse subparser for rest output"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("rest",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description=REST_DESC)
+
+ parser.add_argument("--enable-lineno", action="store_true",
+ help="enable lineno")
+ parser.add_argument("--raw", action="store_true",
+ help="output text as contained in the ABI files. "
+ "It not used, output will contain dynamically"
+ " generated cross references when possible.")
+ parser.add_argument("--no-file", action="store_true",
+ help="Don't the files section")
+ parser.add_argument("--show-hints", help="Show-hints")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.check_issues()
+
+ for t in parser.doc(args.raw, not args.no_file):
+ if args.enable_lineno:
+ print (f".. LINENO {t[1]}#{t[2]}\n\n")
+
+ print(t[0])
+
+class AbiValidate:
+ """Initialize an argparse subparser for ABI validation"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("validate",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description="list events")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.check_issues()
+
+
+class AbiSearch:
+ """Initialize an argparse subparser for ABI search"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("search",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description="Search ABI using a regular expression")
+
+ parser.add_argument("expression",
+ help="Case-insensitive search pattern for the ABI symbol")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.search_symbols(args.expression)
+
+UNDEFINED_DESC="""
+Check undefined ABIs on local machine.
+
+Read sysfs devnodes and check if the devnodes there are defined inside
+ABI documentation.
+
+The search logic tries to minimize the number of regular expressions to
+search per each symbol.
+
+By default, it runs on a single CPU, as Python support for CPU threads
+is still experimental, and multi-process runs on Python is very slow.
+
+On experimental tests, if the number of ABI symbols to search per devnode
+is contained on a limit of ~150 regular expressions, using a single CPU
+is a lot faster than using multiple processes. However, if the number of
+regular expressions to check is at the order of ~30000, using multiple
+CPUs speeds up the check.
+"""
+
+class AbiUndefined:
+ """
+ Initialize an argparse subparser for logic to check undefined ABI at
+ the current machine's sysfs
+ """
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("undefined",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description=UNDEFINED_DESC)
+
+ parser.add_argument("-S", "--sysfs-dir", default="/sys",
+ help="directory where sysfs is mounted")
+ parser.add_argument("-s", "--search-string",
+ help="search string regular expression to limit symbol search")
+ parser.add_argument("-H", "--show-hints", action="store_true",
+ help="Hints about definitions for missing ABI symbols.")
+ parser.add_argument("-j", "--jobs", "--max-workers", type=int, default=1,
+ help="If bigger than one, enables multiprocessing.")
+ parser.add_argument("-c", "--max-chunk-size", type=int, default=50,
+ help="Maximum number of chunk size")
+ parser.add_argument("-f", "--found", action="store_true",
+ help="Also show found items. "
+ "Helpful to debug the parser."),
+ parser.add_argument("-d", "--dry-run", action="store_true",
+ help="Don't actually search for undefined. "
+ "Helpful to debug the parser."),
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ abi = AbiRegex(args.dir, debug=args.debug,
+ search_string=args.search_string)
+
+ abi_symbols = SystemSymbols(abi=abi, hints=args.show_hints,
+ sysfs=args.sysfs_dir)
+
+ abi_symbols.check_undefined_symbols(dry_run=args.dry_run,
+ found=args.found,
+ max_workers=args.jobs,
+ chunk_size=args.max_chunk_size)
+
+
+def main():
+ """Main program"""
+
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+
+ parser.add_argument("-d", "--debug", type=int, default=0, help="debug level")
+ parser.add_argument("-D", "--dir", default=ABI_DIR, help=DEBUG_HELP)
+
+ subparsers = parser.add_subparsers()
+
+ AbiRest(subparsers)
+ AbiValidate(subparsers)
+ AbiSearch(subparsers)
+ AbiUndefined(subparsers)
+
+ args = parser.parse_args()
+
+ if args.debug:
+ level = logging.DEBUG
+ else:
+ level = logging.INFO
+
+ logging.basicConfig(level=level, format="[%(levelname)s] %(message)s")
+
+ if "func" in args:
+ args.func(args)
+ else:
+ sys.exit(f"Please specify a valid command for {sys.argv[0]}")
+
+
+# Call main method
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/get_feat.py b/tools/docs/get_feat.py
new file mode 100755
index 000000000000..2b5155a1f134
--- /dev/null
+++ b/tools/docs/get_feat.py
@@ -0,0 +1,225 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0911,R0912,R0914,R0915
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+
+"""
+Parse the Linux Feature files and produce a ReST book.
+"""
+
+import argparse
+import os
+import subprocess
+import sys
+
+from pprint import pprint
+
+LIB_DIR = "../../tools/lib/python"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
+
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
+
+from feat.parse_features import ParseFeature # pylint: disable=C0413
+
+SRCTREE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..")
+DEFAULT_DIR = "Documentation/features"
+
+
+class GetFeature:
+ """Helper class to parse feature parsing parameters"""
+
+ @staticmethod
+ def get_current_arch():
+ """Detects the current architecture"""
+
+ proc = subprocess.run(["uname", "-m"], check=True,
+ capture_output=True, text=True)
+
+ arch = proc.stdout.strip()
+ if arch in ["x86_64", "i386"]:
+ arch = "x86"
+ elif arch == "s390x":
+ arch = "s390"
+
+ return arch
+
+ def run_parser(self, args):
+ """Execute the feature parser"""
+
+ feat = ParseFeature(args.directory, args.debug, args.enable_fname)
+ data = feat.parse()
+
+ if args.debug > 2:
+ pprint(data)
+
+ return feat
+
+ def run_rest(self, args):
+ """
+ Generate tables in ReST format. Three types of tables are
+ supported, depending on the calling arguments:
+
+ - neither feature nor arch is passed: generates a full matrix;
+ - arch provided: generates a table of supported tables for the
+ guiven architecture, eventually filtered by feature;
+ - only feature provided: generates a table with feature details,
+ showing what architectures it is implemented.
+ """
+
+ feat = self.run_parser(args)
+
+ if args.arch:
+ rst = feat.output_arch_table(args.arch, args.feat)
+ elif args.feat:
+ rst = feat.output_feature(args.feat)
+ else:
+ rst = feat.output_matrix()
+
+ print(rst)
+
+ def run_current(self, args):
+ """
+ Instead of using a --arch parameter, get feature for the current
+ architecture.
+ """
+
+ args.arch = self.get_current_arch()
+
+ self.run_rest(args)
+
+ def run_list(self, args):
+ """
+ Generate a list of features for a given architecture, in a format
+ parseable by other scripts. The output format is not ReST.
+ """
+
+ if not args.arch:
+ args.arch = self.get_current_arch()
+
+ feat = self.run_parser(args)
+ msg = feat.list_arch_features(args.arch, args.feat)
+
+ print(msg)
+
+ def parse_arch(self, parser):
+ """Add a --arch parsing argument"""
+
+ parser.add_argument("--arch",
+ help="Output features for an specific"
+ " architecture, optionally filtering for a "
+ "single specific feature.")
+
+ def parse_feat(self, parser):
+ """Add a --feat parsing argument"""
+
+ parser.add_argument("--feat", "--feature",
+ help="Output features for a single specific "
+ "feature.")
+
+
+ def current_args(self, subparsers):
+ """Implementscurrent argparse subparser"""
+
+ parser = subparsers.add_parser("current",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Output table in ReST "
+ "compatible ASCII format "
+ "with features for this "
+ "machine's architecture")
+
+ self.parse_feat(parser)
+ parser.set_defaults(func=self.run_current)
+
+ def rest_args(self, subparsers):
+ """Implement rest argparse subparser"""
+
+ parser = subparsers.add_parser("rest",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Output table(s) in ReST "
+ "compatible ASCII format "
+ "with features in ReST "
+ "markup language. The "
+ "output is affected by "
+ "--arch or --feat/--feature"
+ " flags.")
+
+ self.parse_arch(parser)
+ self.parse_feat(parser)
+ parser.set_defaults(func=self.run_rest)
+
+ def list_args(self, subparsers):
+ """Implement list argparse subparser"""
+
+ parser = subparsers.add_parser("list",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="List features for this "
+ "machine's architecture, "
+ "using an easier to parse "
+ "format. The output is "
+ "affected by --arch flag.")
+
+ self.parse_arch(parser)
+ self.parse_feat(parser)
+ parser.set_defaults(func=self.run_list)
+
+ def validate_args(self, subparsers):
+ """Implement validate argparse subparser"""
+
+ parser = subparsers.add_parser("validate",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Validate the contents of "
+ "the files under "
+ f"{DEFAULT_DIR}.")
+
+ parser.set_defaults(func=self.run_parser)
+
+ def parser(self):
+ """
+ Create an arparse with common options and several subparsers
+ """
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+
+ parser.add_argument("-d", "--debug", action="count", default=0,
+ help="Put the script in verbose mode, useful for "
+ "debugging. Can be called multiple times, to "
+ "increase verbosity.")
+
+ parser.add_argument("--directory", "--dir", default=DEFAULT_DIR,
+ help="Changes the location of the Feature files. "
+ f"By default, it uses the {DEFAULT_DIR} "
+ "directory.")
+
+ parser.add_argument("--enable-fname", action="store_true",
+ help="Prints the file name of the feature files. "
+ "This can be used in order to track "
+ "dependencies during documentation build.")
+
+ subparsers = parser.add_subparsers()
+
+ self.current_args(subparsers)
+ self.rest_args(subparsers)
+ self.list_args(subparsers)
+ self.validate_args(subparsers)
+
+ args = parser.parse_args()
+
+ return args
+
+
+def main():
+ """Main program"""
+
+ feat = GetFeature()
+
+ args = feat.parser()
+
+ if "func" in args:
+ args.func(args)
+ else:
+ sys.exit(f"Please specify a valid command for {sys.argv[0]}")
+
+
+# Call main method
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/list-arch.sh b/tools/docs/list-arch.sh
new file mode 100755
index 000000000000..96fe83b7058b
--- /dev/null
+++ b/tools/docs/list-arch.sh
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Small script that visualizes the kernel feature support status
+# of an architecture.
+#
+# (If no arguments are given then it will print the host architecture's status.)
+#
+
+ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/')}
+
+$(dirname $0)/get_feat.pl list --arch $ARCH
diff --git a/tools/docs/parse-headers.py b/tools/docs/parse-headers.py
new file mode 100755
index 000000000000..436acea4c6ca
--- /dev/null
+++ b/tools/docs/parse-headers.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016, 2025 by Mauro Carvalho Chehab <mchehab@kernel.org>.
+# pylint: disable=C0103
+
+"""
+Convert a C header or source file ``FILE_IN``, into a ReStructured Text
+included via ..parsed-literal block with cross-references for the
+documentation files that describe the API. It accepts an optional
+``FILE_RULES`` file to describes what elements will be either ignored or
+be pointed to a non-default reference type/name.
+
+The output is written at ``FILE_OUT``.
+
+It is capable of identifying defines, functions, structs, typedefs,
+enums and enum symbols and create cross-references for all of them.
+It is also capable of distinguish #define used for specifying a Linux
+ioctl.
+
+The optional ``FILE_RULES`` contains a set of rules like:
+
+ ignore ioctl VIDIOC_ENUM_FMT
+ replace ioctl VIDIOC_DQBUF vidioc_qbuf
+ replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ :c:type:`v4l2_event_motion_det`
+"""
+
+import argparse, sys
+import os.path
+
+src_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, os.path.join(src_dir, '../lib/python'))
+from kdoc.parse_data_structs import ParseDataStructs
+from kdoc.enrich_formatter import EnrichFormatter
+
+def main():
+ """Main function"""
+ parser = argparse.ArgumentParser(description=__doc__,
+ formatter_class=EnrichFormatter)
+
+ parser.add_argument("-d", "--debug", action="count", default=0,
+ help="Increase debug level. Can be used multiple times")
+ parser.add_argument("-t", "--toc", action="store_true",
+ help="instead of a literal block, outputs a TOC table at the RST file")
+
+ parser.add_argument("file_in", help="Input C file")
+ parser.add_argument("file_out", help="Output RST file")
+ parser.add_argument("file_rules", nargs="?",
+ help="Exceptions file (optional)")
+
+ args = parser.parse_args()
+
+ parser = ParseDataStructs(debug=args.debug)
+ parser.parse_file(args.file_in, args.file_rules)
+
+ parser.debug_print()
+ parser.write_output(args.file_in, args.file_out, args.toc)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/sphinx-build-wrapper b/tools/docs/sphinx-build-wrapper
new file mode 100755
index 000000000000..7a5fcef25429
--- /dev/null
+++ b/tools/docs/sphinx-build-wrapper
@@ -0,0 +1,864 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+#
+# pylint: disable=R0902, R0912, R0913, R0914, R0915, R0917, C0103
+#
+# Converted from docs Makefile and parallel-wrapper.sh, both under
+# GPLv2, copyrighted since 2008 by the following authors:
+#
+# Akira Yokosawa <akiyks@gmail.com>
+# Arnd Bergmann <arnd@arndb.de>
+# Breno Leitao <leitao@debian.org>
+# Carlos Bilbao <carlos.bilbao@amd.com>
+# Dave Young <dyoung@redhat.com>
+# Donald Hunter <donald.hunter@gmail.com>
+# Geert Uytterhoeven <geert+renesas@glider.be>
+# Jani Nikula <jani.nikula@intel.com>
+# Jan Stancek <jstancek@redhat.com>
+# Jonathan Corbet <corbet@lwn.net>
+# Joshua Clayton <stillcompiling@gmail.com>
+# Kees Cook <keescook@chromium.org>
+# Linus Torvalds <torvalds@linux-foundation.org>
+# Magnus Damm <damm+renesas@opensource.se>
+# Masahiro Yamada <masahiroy@kernel.org>
+# Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+# Maxim Cournoyer <maxim.cournoyer@gmail.com>
+# Peter Foley <pefoley2@pefoley.com>
+# Randy Dunlap <rdunlap@infradead.org>
+# Rob Herring <robh@kernel.org>
+# Shuah Khan <shuahkh@osg.samsung.com>
+# Thorsten Blum <thorsten.blum@toblux.com>
+# Tomas Winkler <tomas.winkler@intel.com>
+
+
+"""
+Sphinx build wrapper that handles Kernel-specific business rules:
+
+- it gets the Kernel build environment vars;
+- it determines what's the best parallelism;
+- it handles SPHINXDIRS
+
+This tool ensures that MIN_PYTHON_VERSION is satisfied. If version is
+below that, it seeks for a new Python version. If found, it re-runs using
+the newer version.
+"""
+
+import argparse
+import locale
+import os
+import re
+import shlex
+import shutil
+import subprocess
+import sys
+
+from concurrent import futures
+from glob import glob
+
+
+LIB_DIR = "../lib/python"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
+
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
+
+from kdoc.python_version import PythonVersion
+from kdoc.latex_fonts import LatexFontChecker
+from jobserver import JobserverExec # pylint: disable=C0413,C0411,E0401
+
+#
+# Some constants
+#
+VENV_DEFAULT = "sphinx_latest"
+MIN_PYTHON_VERSION = PythonVersion("3.7").version
+PAPER = ["", "a4", "letter"]
+
+TARGETS = {
+ "cleandocs": { "builder": "clean" },
+ "linkcheckdocs": { "builder": "linkcheck" },
+ "htmldocs": { "builder": "html" },
+ "epubdocs": { "builder": "epub", "out_dir": "epub" },
+ "texinfodocs": { "builder": "texinfo", "out_dir": "texinfo" },
+ "infodocs": { "builder": "texinfo", "out_dir": "texinfo" },
+ "mandocs": { "builder": "man", "out_dir": "man" },
+ "latexdocs": { "builder": "latex", "out_dir": "latex" },
+ "pdfdocs": { "builder": "latex", "out_dir": "latex" },
+ "xmldocs": { "builder": "xml", "out_dir": "xml" },
+}
+
+
+#
+# SphinxBuilder class
+#
+
+class SphinxBuilder:
+ """
+ Handles a sphinx-build target, adding needed arguments to build
+ with the Kernel.
+ """
+
+ def get_path(self, path, use_cwd=False, abs_path=False):
+ """
+ Ancillary routine to handle patches the right way, as shell does.
+
+ It first expands "~" and "~user". Then, if patch is not absolute,
+ join self.srctree. Finally, if requested, convert to abspath.
+ """
+
+ path = os.path.expanduser(path)
+ if not path.startswith("/"):
+ if use_cwd:
+ base = os.getcwd()
+ else:
+ base = self.srctree
+
+ path = os.path.join(base, path)
+
+ if abs_path:
+ return os.path.abspath(path)
+
+ return path
+
+ def check_rust(self):
+ """
+ Checks if Rust is enabled
+ """
+ self.rustdoc = False
+
+ config = os.path.join(self.srctree, ".config")
+
+ if not os.path.isfile(config):
+ return
+
+ re_rust = re.compile(r"CONFIG_RUST=(m|y)")
+
+ try:
+ with open(config, "r", encoding="utf-8") as fp:
+ for line in fp:
+ if re_rust.match(line):
+ self.rustdoc = True
+ return
+
+ except OSError as e:
+ print(f"Failed to open {config}", file=sys.stderr)
+
+ def get_sphinx_extra_opts(self, n_jobs):
+ """
+ Get the number of jobs to be used for docs build passed via command
+ line and desired sphinx verbosity.
+
+ The number of jobs can be on different places:
+
+ 1) It can be passed via "-j" argument;
+ 2) The SPHINXOPTS="-j8" env var may have "-j";
+ 3) if called via GNU make, -j specifies the desired number of jobs.
+ with GNU makefile, this number is available via POSIX jobserver;
+ 4) if none of the above is available, it should default to "-jauto",
+ and let sphinx decide the best value.
+ """
+
+ #
+ # SPHINXOPTS env var, if used, contains extra arguments to be used
+ # by sphinx-build time. Among them, it may contain sphinx verbosity
+ # and desired number of parallel jobs.
+ #
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-j', '--jobs', type=int)
+ parser.add_argument('-q', '--quiet', action='store_true')
+
+ #
+ # Other sphinx-build arguments go as-is, so place them
+ # at self.sphinxopts, using shell parser
+ #
+ sphinxopts = shlex.split(os.environ.get("SPHINXOPTS", ""))
+
+ #
+ # Build a list of sphinx args, honoring verbosity here if specified
+ #
+
+ verbose = self.verbose
+ sphinx_args, self.sphinxopts = parser.parse_known_args(sphinxopts)
+ if sphinx_args.quiet is True:
+ verbose = False
+
+ #
+ # If the user explicitly sets "-j" at command line, use it.
+ # Otherwise, pick it from SPHINXOPTS args
+ #
+ if n_jobs:
+ self.n_jobs = n_jobs
+ elif sphinx_args.jobs:
+ self.n_jobs = sphinx_args.jobs
+ else:
+ self.n_jobs = None
+
+ if not verbose:
+ self.sphinxopts += ["-q"]
+
+ def __init__(self, builddir, venv=None, verbose=False, n_jobs=None,
+ interactive=None):
+ """Initialize internal variables"""
+ self.venv = venv
+ self.verbose = None
+
+ #
+ # Normal variables passed from Kernel's makefile
+ #
+ self.kernelversion = os.environ.get("KERNELVERSION", "unknown")
+ self.kernelrelease = os.environ.get("KERNELRELEASE", "unknown")
+ self.pdflatex = os.environ.get("PDFLATEX", "xelatex")
+
+ #
+ # Kernel main Makefile defines a PYTHON3 variable whose default is
+ # "python3". When set to a different value, it allows running a
+ # diferent version than the default official python3 package.
+ # Several distros package python3xx-sphinx packages with newer
+ # versions of Python and sphinx-build.
+ #
+ # Honor such variable different than default
+ #
+ self.python = os.environ.get("PYTHON3")
+ if self.python == "python3":
+ self.python = None
+
+ if not interactive:
+ self.latexopts = os.environ.get("LATEXOPTS", "-interaction=batchmode -no-shell-escape")
+ else:
+ self.latexopts = os.environ.get("LATEXOPTS", "")
+
+ if not verbose:
+ verbose = bool(os.environ.get("KBUILD_VERBOSE", "") != "")
+
+ if verbose is not None:
+ self.verbose = verbose
+
+ #
+ # Source tree directory. This needs to be at os.environ, as
+ # Sphinx extensions use it
+ #
+ self.srctree = os.environ.get("srctree")
+ if not self.srctree:
+ self.srctree = "."
+ os.environ["srctree"] = self.srctree
+
+ #
+ # Now that we can expand srctree, get other directories as well
+ #
+ self.sphinxbuild = os.environ.get("SPHINXBUILD", "sphinx-build")
+ self.kerneldoc = self.get_path(os.environ.get("KERNELDOC",
+ "scripts/kernel-doc.py"))
+ self.builddir = self.get_path(builddir, use_cwd=True, abs_path=True)
+
+ #
+ # Get directory locations for LaTeX build toolchain
+ #
+ self.pdflatex_cmd = shutil.which(self.pdflatex)
+ self.latexmk_cmd = shutil.which("latexmk")
+
+ self.env = os.environ.copy()
+
+ self.get_sphinx_extra_opts(n_jobs)
+
+ self.check_rust()
+
+ #
+ # If venv command line argument is specified, run Sphinx from venv
+ #
+ if venv:
+ bin_dir = os.path.join(venv, "bin")
+ if not os.path.isfile(os.path.join(bin_dir, "activate")):
+ sys.exit(f"Venv {venv} not found.")
+
+ # "activate" virtual env
+ self.env["PATH"] = bin_dir + ":" + self.env["PATH"]
+ self.env["VIRTUAL_ENV"] = venv
+ if "PYTHONHOME" in self.env:
+ del self.env["PYTHONHOME"]
+ print(f"Setting venv to {venv}")
+
+ def run_sphinx(self, sphinx_build, build_args, *args, **pwargs):
+ """
+ Executes sphinx-build using current python3 command.
+
+ When calling via GNU make, POSIX jobserver is used to tell how
+ many jobs are still available from a job pool. claim all remaining
+ jobs, as we don't want sphinx-build to run in parallel with other
+ jobs.
+
+ Despite that, the user may actually force a different value than
+ the number of available jobs via command line.
+
+ The "with" logic here is used to ensure that the claimed jobs will
+ be freed once subprocess finishes
+ """
+
+ with JobserverExec() as jobserver:
+ if jobserver.claim:
+ #
+ # when GNU make is used, claim available jobs from jobserver
+ #
+ n_jobs = str(jobserver.claim)
+ else:
+ #
+ # Otherwise, let sphinx decide by default
+ #
+ n_jobs = "auto"
+
+ #
+ # If explicitly requested via command line, override default
+ #
+ if self.n_jobs:
+ n_jobs = str(self.n_jobs)
+
+ #
+ # We can't simply call python3 sphinx-build, as OpenSUSE
+ # Tumbleweed uses an ELF binary file (/usr/bin/alts) to switch
+ # between different versions of sphinx-build. So, only call it
+ # prepending "python3.xx" when PYTHON3 variable is not default.
+ #
+ if self.python:
+ cmd = [self.python]
+ else:
+ cmd = []
+
+ cmd += [sphinx_build]
+ cmd += [f"-j{n_jobs}"]
+ cmd += build_args
+ cmd += self.sphinxopts
+
+ if self.verbose:
+ print(" ".join(cmd))
+
+ return subprocess.call(cmd, *args, **pwargs)
+
+ def handle_html(self, css, output_dir):
+ """
+ Extra steps for HTML and epub output.
+
+ For such targets, we need to ensure that CSS will be properly
+ copied to the output _static directory
+ """
+
+ if css:
+ css = os.path.expanduser(css)
+ if not css.startswith("/"):
+ css = os.path.join(self.srctree, css)
+
+ static_dir = os.path.join(output_dir, "_static")
+ os.makedirs(static_dir, exist_ok=True)
+
+ try:
+ shutil.copy2(css, static_dir)
+ except (OSError, IOError) as e:
+ print(f"Warning: Failed to copy CSS: {e}", file=sys.stderr)
+
+ if self.rustdoc:
+ print("Building rust docs")
+ if "MAKE" in self.env:
+ cmd = [self.env["MAKE"]]
+ else:
+ cmd = ["make", "LLVM=1"]
+
+ cmd += [ "rustdoc"]
+ if self.verbose:
+ print(" ".join(cmd))
+
+ try:
+ subprocess.run(cmd, check=True)
+ except subprocess.CalledProcessError as e:
+ print(f"Ignored errors when building rustdoc: {e}. Is RUST enabled?",
+ file=sys.stderr)
+
+ def build_pdf_file(self, latex_cmd, from_dir, path):
+ """Builds a single pdf file using latex_cmd"""
+ try:
+ subprocess.run(latex_cmd + [path],
+ cwd=from_dir, check=True, env=self.env)
+
+ return True
+ except subprocess.CalledProcessError:
+ return False
+
+ def pdf_parallel_build(self, tex_suffix, latex_cmd, tex_files, n_jobs):
+ """Build PDF files in parallel if possible"""
+ builds = {}
+ build_failed = False
+ max_len = 0
+ has_tex = False
+
+ #
+ # LaTeX PDF error code is almost useless for us:
+ # any warning makes it non-zero. For kernel doc builds it always return
+ # non-zero even when build succeeds. So, let's do the best next thing:
+ # Ignore build errors. At the end, check if all PDF files were built,
+ # printing a summary with the built ones and returning 0 if all of
+ # them were actually built.
+ #
+ with futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:
+ jobs = {}
+
+ for from_dir, pdf_dir, entry in tex_files:
+ name = entry.name
+
+ if not name.endswith(tex_suffix):
+ continue
+
+ name = name[:-len(tex_suffix)]
+ has_tex = True
+
+ future = executor.submit(self.build_pdf_file, latex_cmd,
+ from_dir, entry.path)
+ jobs[future] = (from_dir, pdf_dir, name)
+
+ for future in futures.as_completed(jobs):
+ from_dir, pdf_dir, name = jobs[future]
+
+ pdf_name = name + ".pdf"
+ pdf_from = os.path.join(from_dir, pdf_name)
+ pdf_to = os.path.join(pdf_dir, pdf_name)
+ out_name = os.path.relpath(pdf_to, self.builddir)
+ max_len = max(max_len, len(out_name))
+
+ try:
+ success = future.result()
+
+ if success and os.path.exists(pdf_from):
+ os.rename(pdf_from, pdf_to)
+
+ #
+ # if verbose, get the name of built PDF file
+ #
+ if self.verbose:
+ builds[out_name] = "SUCCESS"
+ else:
+ builds[out_name] = "FAILED"
+ build_failed = True
+ except futures.Error as e:
+ builds[out_name] = f"FAILED ({repr(e)})"
+ build_failed = True
+
+ #
+ # Handle case where no .tex files were found
+ #
+ if not has_tex:
+ out_name = "LaTeX files"
+ max_len = max(max_len, len(out_name))
+ builds[out_name] = "FAILED: no .tex files were generated"
+ build_failed = True
+
+ return builds, build_failed, max_len
+
+ def handle_pdf(self, output_dirs, deny_vf):
+ """
+ Extra steps for PDF output.
+
+ As PDF is handled via a LaTeX output, after building the .tex file,
+ a new build is needed to create the PDF output from the latex
+ directory.
+ """
+ builds = {}
+ max_len = 0
+ tex_suffix = ".tex"
+ tex_files = []
+
+ #
+ # Since early 2024, Fedora and openSUSE tumbleweed have started
+ # deploying variable-font format of "Noto CJK", causing LaTeX
+ # to break with CJK. Work around it, by denying the variable font
+ # usage during xelatex build by passing the location of a config
+ # file with a deny list.
+ #
+ # See tools/docs/lib/latex_fonts.py for more details.
+ #
+ if deny_vf:
+ deny_vf = os.path.expanduser(deny_vf)
+ if os.path.isdir(deny_vf):
+ self.env["XDG_CONFIG_HOME"] = deny_vf
+
+ for from_dir in output_dirs:
+ pdf_dir = os.path.join(from_dir, "../pdf")
+ os.makedirs(pdf_dir, exist_ok=True)
+
+ if self.latexmk_cmd:
+ latex_cmd = [self.latexmk_cmd, f"-{self.pdflatex}"]
+ else:
+ latex_cmd = [self.pdflatex]
+
+ latex_cmd.extend(shlex.split(self.latexopts))
+
+ # Get a list of tex files to process
+ with os.scandir(from_dir) as it:
+ for entry in it:
+ if entry.name.endswith(tex_suffix):
+ tex_files.append((from_dir, pdf_dir, entry))
+
+ #
+ # When using make, this won't be used, as the number of jobs comes
+ # from POSIX jobserver. So, this covers the case where build comes
+ # from command line. On such case, serialize by default, except if
+ # the user explicitly sets the number of jobs.
+ #
+ n_jobs = 1
+
+ # n_jobs is either an integer or "auto". Only use it if it is a number
+ if self.n_jobs:
+ try:
+ n_jobs = int(self.n_jobs)
+ except ValueError:
+ pass
+
+ #
+ # When using make, jobserver.claim is the number of jobs that were
+ # used with "-j" and that aren't used by other make targets
+ #
+ with JobserverExec() as jobserver:
+ n_jobs = 1
+
+ #
+ # Handle the case when a parameter is passed via command line,
+ # using it as default, if jobserver doesn't claim anything
+ #
+ if self.n_jobs:
+ try:
+ n_jobs = int(self.n_jobs)
+ except ValueError:
+ pass
+
+ if jobserver.claim:
+ n_jobs = jobserver.claim
+
+ builds, build_failed, max_len = self.pdf_parallel_build(tex_suffix,
+ latex_cmd,
+ tex_files,
+ n_jobs)
+
+ #
+ # In verbose mode, print a summary with the build results per file.
+ # Otherwise, print a single line with all failures, if any.
+ # On both cases, return code 1 indicates build failures,
+ #
+ if self.verbose:
+ msg = "Summary"
+ msg += "\n" + "=" * len(msg)
+ print()
+ print(msg)
+
+ for pdf_name, pdf_file in builds.items():
+ print(f"{pdf_name:<{max_len}}: {pdf_file}")
+
+ print()
+ if build_failed:
+ msg = LatexFontChecker().check()
+ if msg:
+ print(msg)
+
+ sys.exit("Error: not all PDF files were created.")
+
+ elif build_failed:
+ n_failures = len(builds)
+ failures = ", ".join(builds.keys())
+
+ msg = LatexFontChecker().check()
+ if msg:
+ print(msg)
+
+ sys.exit(f"Error: Can't build {n_failures} PDF file(s): {failures}")
+
+ def handle_info(self, output_dirs):
+ """
+ Extra steps for Info output.
+
+ For texinfo generation, an additional make is needed from the
+ texinfo directory.
+ """
+
+ for output_dir in output_dirs:
+ try:
+ subprocess.run(["make", "info"], cwd=output_dir, check=True)
+ except subprocess.CalledProcessError as e:
+ sys.exit(f"Error generating info docs: {e}")
+
+ def handle_man(self, kerneldoc, docs_dir, src_dir, output_dir):
+ """
+ Create man pages from kernel-doc output
+ """
+
+ re_kernel_doc = re.compile(r"^\.\.\s+kernel-doc::\s*(\S+)")
+ re_man = re.compile(r'^\.TH "[^"]*" (\d+) "([^"]*)"')
+
+ if docs_dir == src_dir:
+ #
+ # Pick the entire set of kernel-doc markups from the entire tree
+ #
+ kdoc_files = set([self.srctree])
+ else:
+ kdoc_files = set()
+
+ for fname in glob(os.path.join(src_dir, "**"), recursive=True):
+ if os.path.isfile(fname) and fname.endswith(".rst"):
+ with open(fname, "r", encoding="utf-8") as in_fp:
+ data = in_fp.read()
+
+ for line in data.split("\n"):
+ match = re_kernel_doc.match(line)
+ if match:
+ if os.path.isfile(match.group(1)):
+ kdoc_files.add(match.group(1))
+
+ if not kdoc_files:
+ sys.exit(f"Directory {src_dir} doesn't contain kernel-doc tags")
+
+ cmd = [ kerneldoc, "-m" ] + sorted(kdoc_files)
+ try:
+ if self.verbose:
+ print(" ".join(cmd))
+
+ result = subprocess.run(cmd, stdout=subprocess.PIPE, text= True)
+
+ if result.returncode:
+ print(f"Warning: kernel-doc returned {result.returncode} warnings")
+
+ except (OSError, ValueError, subprocess.SubprocessError) as e:
+ sys.exit(f"Failed to create man pages for {src_dir}: {repr(e)}")
+
+ fp = None
+ try:
+ for line in result.stdout.split("\n"):
+ match = re_man.match(line)
+ if not match:
+ if fp:
+ fp.write(line + '\n')
+ continue
+
+ if fp:
+ fp.close()
+
+ fname = f"{output_dir}/{match.group(2)}.{match.group(1)}"
+
+ if self.verbose:
+ print(f"Creating {fname}")
+ fp = open(fname, "w", encoding="utf-8")
+ fp.write(line + '\n')
+ finally:
+ if fp:
+ fp.close()
+
+ def cleandocs(self, builder): # pylint: disable=W0613
+ """Remove documentation output directory"""
+ shutil.rmtree(self.builddir, ignore_errors=True)
+
+ def build(self, target, sphinxdirs=None,
+ theme=None, css=None, paper=None, deny_vf=None,
+ skip_sphinx=False):
+ """
+ Build documentation using Sphinx. This is the core function of this
+ module. It prepares all arguments required by sphinx-build.
+ """
+
+ builder = TARGETS[target]["builder"]
+ out_dir = TARGETS[target].get("out_dir", "")
+
+ #
+ # Cleandocs doesn't require sphinx-build
+ #
+ if target == "cleandocs":
+ self.cleandocs(builder)
+ return
+
+ if theme:
+ os.environ["DOCS_THEME"] = theme
+
+ #
+ # Other targets require sphinx-build, so check if it exists
+ #
+ if not skip_sphinx:
+ sphinxbuild = shutil.which(self.sphinxbuild, path=self.env["PATH"])
+ if not sphinxbuild and target != "mandocs":
+ sys.exit(f"Error: {self.sphinxbuild} not found in PATH.\n")
+
+ if target == "pdfdocs":
+ if not self.pdflatex_cmd and not self.latexmk_cmd:
+ sys.exit("Error: pdflatex or latexmk required for PDF generation")
+
+ docs_dir = os.path.abspath(os.path.join(self.srctree, "Documentation"))
+
+ #
+ # Fill in base arguments for Sphinx build
+ #
+ kerneldoc = self.kerneldoc
+ if kerneldoc.startswith(self.srctree):
+ kerneldoc = os.path.relpath(kerneldoc, self.srctree)
+
+ args = [ "-b", builder, "-c", docs_dir ]
+
+ if builder == "latex":
+ if not paper:
+ paper = PAPER[1]
+
+ args.extend(["-D", f"latex_elements.papersize={paper}paper"])
+
+ if self.rustdoc:
+ args.extend(["-t", "rustdoc"])
+
+ if not sphinxdirs:
+ sphinxdirs = os.environ.get("SPHINXDIRS", ".")
+
+ #
+ # The sphinx-build tool has a bug: internally, it tries to set
+ # locale with locale.setlocale(locale.LC_ALL, ''). This causes a
+ # crash if language is not set. Detect and fix it.
+ #
+ try:
+ locale.setlocale(locale.LC_ALL, '')
+ except locale.Error:
+ self.env["LC_ALL"] = "C"
+
+ #
+ # sphinxdirs can be a list or a whitespace-separated string
+ #
+ sphinxdirs_list = []
+ for sphinxdir in sphinxdirs:
+ if isinstance(sphinxdir, list):
+ sphinxdirs_list += sphinxdir
+ else:
+ sphinxdirs_list += sphinxdir.split()
+
+ #
+ # Step 1: Build each directory in separate.
+ #
+ # This is not the best way of handling it, as cross-references between
+ # them will be broken, but this is what we've been doing since
+ # the beginning.
+ #
+ output_dirs = []
+ for sphinxdir in sphinxdirs_list:
+ src_dir = os.path.join(docs_dir, sphinxdir)
+ doctree_dir = os.path.join(self.builddir, ".doctrees")
+ output_dir = os.path.join(self.builddir, sphinxdir, out_dir)
+
+ #
+ # Make directory names canonical
+ #
+ src_dir = os.path.normpath(src_dir)
+ doctree_dir = os.path.normpath(doctree_dir)
+ output_dir = os.path.normpath(output_dir)
+
+ os.makedirs(doctree_dir, exist_ok=True)
+ os.makedirs(output_dir, exist_ok=True)
+
+ output_dirs.append(output_dir)
+
+ build_args = args + [
+ "-d", doctree_dir,
+ "-D", f"kerneldoc_bin={kerneldoc}",
+ "-D", f"version={self.kernelversion}",
+ "-D", f"release={self.kernelrelease}",
+ "-D", f"kerneldoc_srctree={self.srctree}",
+ src_dir,
+ output_dir,
+ ]
+
+ if target == "mandocs":
+ self.handle_man(kerneldoc, docs_dir, src_dir, output_dir)
+ elif not skip_sphinx:
+ try:
+ result = self.run_sphinx(sphinxbuild, build_args,
+ env=self.env)
+
+ if result:
+ sys.exit(f"Build failed: return code: {result}")
+
+ except (OSError, ValueError, subprocess.SubprocessError) as e:
+ sys.exit(f"Build failed: {repr(e)}")
+
+ #
+ # Ensure that each html/epub output will have needed static files
+ #
+ if target in ["htmldocs", "epubdocs"]:
+ self.handle_html(css, output_dir)
+
+ #
+ # Step 2: Some targets (PDF and info) require an extra step once
+ # sphinx-build finishes
+ #
+ if target == "pdfdocs":
+ self.handle_pdf(output_dirs, deny_vf)
+ elif target == "infodocs":
+ self.handle_info(output_dirs)
+
+def jobs_type(value):
+ """
+ Handle valid values for -j. Accepts Sphinx "-jauto", plus a number
+ equal or bigger than one.
+ """
+ if value is None:
+ return None
+
+ if value.lower() == 'auto':
+ return value.lower()
+
+ try:
+ if int(value) >= 1:
+ return value
+
+ raise argparse.ArgumentTypeError(f"Minimum jobs is 1, got {value}")
+ except ValueError:
+ raise argparse.ArgumentTypeError(f"Must be 'auto' or positive integer, got {value}") # pylint: disable=W0707
+
+def main():
+ """
+ Main function. The only mandatory argument is the target. If not
+ specified, the other arguments will use default values if not
+ specified at os.environ.
+ """
+ parser = argparse.ArgumentParser(description="Kernel documentation builder")
+
+ parser.add_argument("target", choices=list(TARGETS.keys()),
+ help="Documentation target to build")
+ parser.add_argument("--sphinxdirs", nargs="+",
+ help="Specific directories to build")
+ parser.add_argument("--builddir", default="output",
+ help="Sphinx configuration file")
+
+ parser.add_argument("--theme", help="Sphinx theme to use")
+
+ parser.add_argument("--css", help="Custom CSS file for HTML/EPUB")
+
+ parser.add_argument("--paper", choices=PAPER, default=PAPER[0],
+ help="Paper size for LaTeX/PDF output")
+
+ parser.add_argument('--deny-vf',
+ help="Configuration to deny variable fonts on pdf builds")
+
+ parser.add_argument("-v", "--verbose", action='store_true',
+ help="place build in verbose mode")
+
+ parser.add_argument('-j', '--jobs', type=jobs_type,
+ help="Sets number of jobs to use with sphinx-build")
+
+ parser.add_argument('-i', '--interactive', action='store_true',
+ help="Change latex default to run in interactive mode")
+
+ parser.add_argument('-s', '--skip-sphinx-build', action='store_true',
+ help="Skip sphinx-build step")
+
+ parser.add_argument("-V", "--venv", nargs='?', const=f'{VENV_DEFAULT}',
+ default=None,
+ help=f'If used, run Sphinx from a venv dir (default dir: {VENV_DEFAULT})')
+
+ args = parser.parse_args()
+
+ PythonVersion.check_python(MIN_PYTHON_VERSION, show_alternatives=True,
+ bail_out=True)
+
+ builder = SphinxBuilder(builddir=args.builddir, venv=args.venv,
+ verbose=args.verbose, n_jobs=args.jobs,
+ interactive=args.interactive)
+
+ builder.build(args.target, sphinxdirs=args.sphinxdirs,
+ theme=args.theme, css=args.css, paper=args.paper,
+ deny_vf=args.deny_vf,
+ skip_sphinx=args.skip_sphinx_build)
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/sphinx-pre-install b/tools/docs/sphinx-pre-install
new file mode 100755
index 000000000000..965c9b093a41
--- /dev/null
+++ b/tools/docs/sphinx-pre-install
@@ -0,0 +1,1543 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2017-2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+#
+# pylint: disable=C0103,C0114,C0115,C0116,C0301,C0302
+# pylint: disable=R0902,R0904,R0911,R0912,R0914,R0915,R1705,R1710,E1121
+
+# Note: this script requires at least Python 3.6 to run.
+# Don't add changes not compatible with it, it is meant to report
+# incompatible python versions.
+
+"""
+Dependency checker for Sphinx documentation Kernel build.
+
+This module provides tools to check for all required dependencies needed to
+build documentation using Sphinx, including system packages, Python modules
+and LaTeX packages for PDF generation.
+
+It detect packages for a subset of Linux distributions used by Kernel
+maintainers, showing hints and missing dependencies.
+
+The main class SphinxDependencyChecker handles the dependency checking logic
+and provides recommendations for installing missing packages. It supports both
+system package installations and Python virtual environments. By default,
+system pacage install is recommended.
+"""
+
+import argparse
+import locale
+import os
+import re
+import subprocess
+import sys
+from glob import glob
+import os.path
+
+src_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, os.path.join(src_dir, '../lib/python'))
+from kdoc.python_version import PythonVersion
+
+RECOMMENDED_VERSION = PythonVersion("3.4.3").version
+MIN_PYTHON_VERSION = PythonVersion("3.7").version
+
+
+class DepManager:
+ """
+ Manage package dependencies. There are three types of dependencies:
+
+ - System: dependencies required for docs build;
+ - Python: python dependencies for a native distro Sphinx install;
+ - PDF: dependencies needed by PDF builds.
+
+ Each dependency can be mandatory or optional. Not installing an optional
+ dependency won't break the build, but will cause degradation at the
+ docs output.
+ """
+
+ # Internal types of dependencies. Don't use them outside DepManager class.
+ _SYS_TYPE = 0
+ _PHY_TYPE = 1
+ _PDF_TYPE = 2
+
+ # Dependencies visible outside the class.
+ # The keys are tuple with: (type, is_mandatory flag).
+ #
+ # Currently we're not using all optional dep types. Yet, we'll keep all
+ # possible combinations here. They're not many, and that makes easier
+ # if later needed and for the name() method below
+
+ SYSTEM_MANDATORY = (_SYS_TYPE, True)
+ PYTHON_MANDATORY = (_PHY_TYPE, True)
+ PDF_MANDATORY = (_PDF_TYPE, True)
+
+ SYSTEM_OPTIONAL = (_SYS_TYPE, False)
+ PYTHON_OPTIONAL = (_PHY_TYPE, False)
+ PDF_OPTIONAL = (_PDF_TYPE, True)
+
+ def __init__(self, pdf):
+ """
+ Initialize internal vars:
+
+ - missing: missing dependencies list, containing a distro-independent
+ name for a missing dependency and its type.
+ - missing_pkg: ancillary dict containing missing dependencies in
+ distro namespace, organized by type.
+ - need: total number of needed dependencies. Never cleaned.
+ - optional: total number of optional dependencies. Never cleaned.
+ - pdf: Is PDF support enabled?
+ """
+ self.missing = {}
+ self.missing_pkg = {}
+ self.need = 0
+ self.optional = 0
+ self.pdf = pdf
+
+ @staticmethod
+ def name(dtype):
+ """
+ Ancillary routine to output a warn/error message reporting
+ missing dependencies.
+ """
+ if dtype[0] == DepManager._SYS_TYPE:
+ msg = "build"
+ elif dtype[0] == DepManager._PHY_TYPE:
+ msg = "Python"
+ else:
+ msg = "PDF"
+
+ if dtype[1]:
+ return f"ERROR: {msg} mandatory deps missing"
+ else:
+ return f"Warning: {msg} optional deps missing"
+
+ @staticmethod
+ def is_optional(dtype):
+ """Ancillary routine to report if a dependency is optional"""
+ return not dtype[1]
+
+ @staticmethod
+ def is_pdf(dtype):
+ """Ancillary routine to report if a dependency is for PDF generation"""
+ if dtype[0] == DepManager._PDF_TYPE:
+ return True
+
+ return False
+
+ def add_package(self, package, dtype):
+ """
+ Add a package at the self.missing() dictionary.
+ Doesn't update missing_pkg.
+ """
+ is_optional = DepManager.is_optional(dtype)
+ self.missing[package] = dtype
+ if is_optional:
+ self.optional += 1
+ else:
+ self.need += 1
+
+ def del_package(self, package):
+ """
+ Remove a package at the self.missing() dictionary.
+ Doesn't update missing_pkg.
+ """
+ if package in self.missing:
+ del self.missing[package]
+
+ def clear_deps(self):
+ """
+ Clear dependencies without changing needed/optional.
+
+ This is an ackward way to have a separate section to recommend
+ a package after system main dependencies.
+
+ TODO: rework the logic to prevent needing it.
+ """
+
+ self.missing = {}
+ self.missing_pkg = {}
+
+ def check_missing(self, progs):
+ """
+ Update self.missing_pkg, using progs dict to convert from the
+ agnostic package name to distro-specific one.
+
+ Returns an string with the packages to be installed, sorted and
+ with eventual duplicates removed.
+ """
+
+ self.missing_pkg = {}
+
+ for prog, dtype in sorted(self.missing.items()):
+ # At least on some LTS distros like CentOS 7, texlive doesn't
+ # provide all packages we need. When such distros are
+ # detected, we have to disable PDF output.
+ #
+ # So, we need to ignore the packages that distros would
+ # need for LaTeX to work
+ if DepManager.is_pdf(dtype) and not self.pdf:
+ self.optional -= 1
+ continue
+
+ if not dtype in self.missing_pkg:
+ self.missing_pkg[dtype] = []
+
+ self.missing_pkg[dtype].append(progs.get(prog, prog))
+
+ install = []
+ for dtype, pkgs in self.missing_pkg.items():
+ install += pkgs
+
+ return " ".join(sorted(set(install)))
+
+ def warn_install(self):
+ """
+ Emit warnings/errors related to missing packages.
+ """
+
+ output_msg = ""
+
+ for dtype in sorted(self.missing_pkg.keys()):
+ progs = " ".join(sorted(set(self.missing_pkg[dtype])))
+
+ try:
+ name = DepManager.name(dtype)
+ output_msg += f'{name}:\t{progs}\n'
+ except KeyError:
+ raise KeyError(f"ERROR!!!: invalid dtype for {progs}: {dtype}")
+
+ if output_msg:
+ print(f"\n{output_msg}")
+
+class AncillaryMethods:
+ """
+ Ancillary methods that checks for missing dependencies for different
+ types of types, like binaries, python modules, rpm deps, etc.
+ """
+
+ @staticmethod
+ def which(prog):
+ """
+ Our own implementation of which(). We could instead use
+ shutil.which(), but this function is simple enough.
+ Probably faster to use this implementation than to import shutil.
+ """
+ for path in os.environ.get("PATH", "").split(":"):
+ full_path = os.path.join(path, prog)
+ if os.access(full_path, os.X_OK):
+ return full_path
+
+ return None
+
+ @staticmethod
+ def run(*args, **kwargs):
+ """
+ Excecute a command, hiding its output by default.
+ Preserve compatibility with older Python versions.
+ """
+
+ capture_output = kwargs.pop('capture_output', False)
+
+ if capture_output:
+ if 'stdout' not in kwargs:
+ kwargs['stdout'] = subprocess.PIPE
+ if 'stderr' not in kwargs:
+ kwargs['stderr'] = subprocess.PIPE
+ else:
+ if 'stdout' not in kwargs:
+ kwargs['stdout'] = subprocess.DEVNULL
+ if 'stderr' not in kwargs:
+ kwargs['stderr'] = subprocess.DEVNULL
+
+ # Don't break with older Python versions
+ if 'text' in kwargs and sys.version_info < (3, 7):
+ kwargs['universal_newlines'] = kwargs.pop('text')
+
+ return subprocess.run(*args, **kwargs)
+
+class MissingCheckers(AncillaryMethods):
+ """
+ Contains some ancillary checkers for different types of binaries and
+ package managers.
+ """
+
+ def __init__(self, args, texlive):
+ """
+ Initialize its internal variables
+ """
+ self.pdf = args.pdf
+ self.virtualenv = args.virtualenv
+ self.version_check = args.version_check
+ self.texlive = texlive
+
+ self.min_version = (0, 0, 0)
+ self.cur_version = (0, 0, 0)
+
+ self.deps = DepManager(self.pdf)
+
+ self.need_symlink = 0
+ self.need_sphinx = 0
+
+ self.verbose_warn_install = 1
+
+ self.virtenv_dir = ""
+ self.install = ""
+ self.python_cmd = ""
+
+ self.virtenv_prefix = ["sphinx_", "Sphinx_" ]
+
+ def check_missing_file(self, files, package, dtype):
+ """
+ Does the file exists? If not, add it to missing dependencies.
+ """
+ for f in files:
+ if os.path.exists(f):
+ return
+ self.deps.add_package(package, dtype)
+
+ def check_program(self, prog, dtype):
+ """
+ Does the program exists and it is at the PATH?
+ If not, add it to missing dependencies.
+ """
+ found = self.which(prog)
+ if found:
+ return found
+
+ self.deps.add_package(prog, dtype)
+
+ return None
+
+ def check_perl_module(self, prog, dtype):
+ """
+ Does perl have a dependency? Is it available?
+ If not, add it to missing dependencies.
+
+ Right now, we still need Perl for doc build, as it is required
+ by some tools called at docs or kernel build time, like:
+
+ tools/docs/documentation-file-ref-check
+
+ Also, checkpatch is on Perl.
+ """
+
+ # While testing with lxc download template, one of the
+ # distros (Oracle) didn't have perl - nor even an option to install
+ # before installing oraclelinux-release-el9 package.
+ #
+ # Check it before running an error. If perl is not there,
+ # add it as a mandatory package, as some parts of the doc builder
+ # needs it.
+ if not self.which("perl"):
+ self.deps.add_package("perl", DepManager.SYSTEM_MANDATORY)
+ self.deps.add_package(prog, dtype)
+ return
+
+ try:
+ self.run(["perl", f"-M{prog}", "-e", "1"], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_python_module(self, module, is_optional=False):
+ """
+ Does a python module exists outside venv? If not, add it to missing
+ dependencies.
+ """
+ if is_optional:
+ dtype = DepManager.PYTHON_OPTIONAL
+ else:
+ dtype = DepManager.PYTHON_MANDATORY
+
+ try:
+ self.run([self.python_cmd, "-c", f"import {module}"], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(module, dtype)
+
+ def check_rpm_missing(self, pkgs, dtype):
+ """
+ Does a rpm package exists? If not, add it to missing dependencies.
+ """
+ for prog in pkgs:
+ try:
+ self.run(["rpm", "-q", prog], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_pacman_missing(self, pkgs, dtype):
+ """
+ Does a pacman package exists? If not, add it to missing dependencies.
+ """
+ for prog in pkgs:
+ try:
+ self.run(["pacman", "-Q", prog], check=True)
+ except subprocess.CalledProcessError:
+ self.deps.add_package(prog, dtype)
+
+ def check_missing_tex(self, is_optional=False):
+ """
+ Does a LaTeX package exists? If not, add it to missing dependencies.
+ """
+ if is_optional:
+ dtype = DepManager.PDF_OPTIONAL
+ else:
+ dtype = DepManager.PDF_MANDATORY
+
+ kpsewhich = self.which("kpsewhich")
+ for prog, package in self.texlive.items():
+
+ # If kpsewhich is not there, just add it to deps
+ if not kpsewhich:
+ self.deps.add_package(package, dtype)
+ continue
+
+ # Check if the package is needed
+ try:
+ result = self.run(
+ [kpsewhich, prog], stdout=subprocess.PIPE, text=True, check=True
+ )
+
+ # Didn't find. Add it
+ if not result.stdout.strip():
+ self.deps.add_package(package, dtype)
+
+ except subprocess.CalledProcessError:
+ # kpsewhich returned an error. Add it, just in case
+ self.deps.add_package(package, dtype)
+
+ def get_sphinx_fname(self):
+ """
+ Gets the binary filename for sphinx-build.
+ """
+ if "SPHINXBUILD" in os.environ:
+ return os.environ["SPHINXBUILD"]
+
+ fname = "sphinx-build"
+ if self.which(fname):
+ return fname
+
+ fname = "sphinx-build-3"
+ if self.which(fname):
+ self.need_symlink = 1
+ return fname
+
+ return ""
+
+ def get_sphinx_version(self, cmd):
+ """
+ Gets sphinx-build version.
+ """
+ env = os.environ.copy()
+
+ # The sphinx-build tool has a bug: internally, it tries to set
+ # locale with locale.setlocale(locale.LC_ALL, ''). This causes a
+ # crash if language is not set. Detect and fix it.
+ try:
+ locale.setlocale(locale.LC_ALL, '')
+ except Exception:
+ env["LC_ALL"] = "C"
+ env["LANG"] = "C"
+
+ try:
+ result = self.run([cmd, "--version"], env=env,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, check=True)
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ return None
+
+ for line in result.stdout.split("\n"):
+ match = re.match(r"^sphinx-build\s+([\d\.]+)(?:\+(?:/[\da-f]+)|b\d+)?\s*$", line)
+ if match:
+ return PythonVersion.parse_version(match.group(1))
+
+ match = re.match(r"^Sphinx.*\s+([\d\.]+)\s*$", line)
+ if match:
+ return PythonVersion.parse_version(match.group(1))
+
+ def check_sphinx(self, conf):
+ """
+ Checks Sphinx minimal requirements
+ """
+ try:
+ with open(conf, "r", encoding="utf-8") as f:
+ for line in f:
+ match = re.match(r"^\s*needs_sphinx\s*=\s*[\'\"]([\d\.]+)[\'\"]", line)
+ if match:
+ self.min_version = PythonVersion.parse_version(match.group(1))
+ break
+ except IOError:
+ sys.exit(f"Can't open {conf}")
+
+ if not self.min_version:
+ sys.exit(f"Can't get needs_sphinx version from {conf}")
+
+ self.virtenv_dir = self.virtenv_prefix[0] + "latest"
+
+ sphinx = self.get_sphinx_fname()
+ if not sphinx:
+ self.need_sphinx = 1
+ return
+
+ self.cur_version = self.get_sphinx_version(sphinx)
+ if not self.cur_version:
+ sys.exit(f"{sphinx} didn't return its version")
+
+ if self.cur_version < self.min_version:
+ curver = PythonVersion.ver_str(self.cur_version)
+ minver = PythonVersion.ver_str(self.min_version)
+
+ print(f"ERROR: Sphinx version is {curver}. It should be >= {minver}")
+ self.need_sphinx = 1
+ return
+
+ # On version check mode, just assume Sphinx has all mandatory deps
+ if self.version_check and self.cur_version >= RECOMMENDED_VERSION:
+ sys.exit(0)
+
+ def catcheck(self, filename):
+ """
+ Reads a file if it exists, returning as string.
+ If not found, returns an empty string.
+ """
+ if os.path.exists(filename):
+ with open(filename, "r", encoding="utf-8") as f:
+ return f.read().strip()
+ return ""
+
+ def get_system_release(self):
+ """
+ Determine the system type. There's no unique way that would work
+ with all distros with a minimal package install. So, several
+ methods are used here.
+
+ By default, it will use lsb_release function. If not available, it will
+ fail back to reading the known different places where the distro name
+ is stored.
+
+ Several modern distros now have /etc/os-release, which usually have
+ a decent coverage.
+ """
+
+ system_release = ""
+
+ if self.which("lsb_release"):
+ result = self.run(["lsb_release", "-d"], capture_output=True, text=True)
+ system_release = result.stdout.replace("Description:", "").strip()
+
+ release_files = [
+ "/etc/system-release",
+ "/etc/redhat-release",
+ "/etc/lsb-release",
+ "/etc/gentoo-release",
+ ]
+
+ if not system_release:
+ for f in release_files:
+ system_release = self.catcheck(f)
+ if system_release:
+ break
+
+ # This seems more common than LSB these days
+ if not system_release:
+ os_var = {}
+ try:
+ with open("/etc/os-release", "r", encoding="utf-8") as f:
+ for line in f:
+ match = re.match(r"^([\w\d\_]+)=\"?([^\"]*)\"?\n", line)
+ if match:
+ os_var[match.group(1)] = match.group(2)
+
+ system_release = os_var.get("NAME", "")
+ if "VERSION_ID" in os_var:
+ system_release += " " + os_var["VERSION_ID"]
+ elif "VERSION" in os_var:
+ system_release += " " + os_var["VERSION"]
+ except IOError:
+ pass
+
+ if not system_release:
+ system_release = self.catcheck("/etc/issue")
+
+ system_release = system_release.strip()
+
+ return system_release
+
+class SphinxDependencyChecker(MissingCheckers):
+ """
+ Main class for checking Sphinx documentation build dependencies.
+
+ - Check for missing system packages;
+ - Check for missing Python modules;
+ - Check for missing LaTeX packages needed by PDF generation;
+ - Propose Sphinx install via Python Virtual environment;
+ - Propose Sphinx install via distro-specific package install.
+ """
+ def __init__(self, args):
+ """Initialize checker variables"""
+
+ # List of required texlive packages on Fedora and OpenSuse
+ texlive = {
+ "amsfonts.sty": "texlive-amsfonts",
+ "amsmath.sty": "texlive-amsmath",
+ "amssymb.sty": "texlive-amsfonts",
+ "amsthm.sty": "texlive-amscls",
+ "anyfontsize.sty": "texlive-anyfontsize",
+ "atbegshi.sty": "texlive-oberdiek",
+ "bm.sty": "texlive-tools",
+ "capt-of.sty": "texlive-capt-of",
+ "cmap.sty": "texlive-cmap",
+ "ctexhook.sty": "texlive-ctex",
+ "ecrm1000.tfm": "texlive-ec",
+ "eqparbox.sty": "texlive-eqparbox",
+ "eu1enc.def": "texlive-euenc",
+ "fancybox.sty": "texlive-fancybox",
+ "fancyvrb.sty": "texlive-fancyvrb",
+ "float.sty": "texlive-float",
+ "fncychap.sty": "texlive-fncychap",
+ "footnote.sty": "texlive-mdwtools",
+ "framed.sty": "texlive-framed",
+ "luatex85.sty": "texlive-luatex85",
+ "multirow.sty": "texlive-multirow",
+ "needspace.sty": "texlive-needspace",
+ "palatino.sty": "texlive-psnfss",
+ "parskip.sty": "texlive-parskip",
+ "polyglossia.sty": "texlive-polyglossia",
+ "tabulary.sty": "texlive-tabulary",
+ "threeparttable.sty": "texlive-threeparttable",
+ "titlesec.sty": "texlive-titlesec",
+ "ucs.sty": "texlive-ucs",
+ "upquote.sty": "texlive-upquote",
+ "wrapfig.sty": "texlive-wrapfig",
+ }
+
+ super().__init__(args, texlive)
+
+ self.need_pip = False
+ self.rec_sphinx_upgrade = 0
+
+ self.system_release = self.get_system_release()
+ self.activate_cmd = ""
+
+ # Some distros may not have a Sphinx shipped package compatible with
+ # our minimal requirements
+ self.package_supported = True
+
+ # Recommend a new python version
+ self.recommend_python = None
+
+ # Certain hints are meant to be shown only once
+ self.distro_msg = None
+
+ self.latest_avail_ver = (0, 0, 0)
+ self.venv_ver = (0, 0, 0)
+
+ prefix = os.environ.get("srctree", ".") + "/"
+
+ self.conf = prefix + "Documentation/conf.py"
+ self.requirement_file = prefix + "Documentation/sphinx/requirements.txt"
+
+ def get_install_progs(self, progs, cmd, extra=None):
+ """
+ Check for missing dependencies using the provided program mapping.
+
+ The actual distro-specific programs are mapped via progs argument.
+ """
+ install = self.deps.check_missing(progs)
+
+ if self.verbose_warn_install:
+ self.deps.warn_install()
+
+ if not install:
+ return
+
+ if cmd:
+ if self.verbose_warn_install:
+ msg = "You should run:"
+ else:
+ msg = ""
+
+ if extra:
+ msg += "\n\t" + extra.replace("\n", "\n\t")
+
+ return(msg + "\n\tsudo " + cmd + " " + install)
+
+ return None
+
+ #
+ # Distro-specific hints methods
+ #
+
+ def give_debian_hints(self):
+ """
+ Provide package installation hints for Debian-based distros.
+ """
+ progs = {
+ "Pod::Usage": "perl-modules",
+ "convert": "imagemagick",
+ "dot": "graphviz",
+ "ensurepip": "python3-venv",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2-bin",
+ "virtualenv": "virtualenv",
+ "xelatex": "texlive-xetex",
+ "yaml": "python3-yaml",
+ }
+
+ if self.pdf:
+ pdf_pkgs = {
+ "fonts-dejavu": [
+ "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
+ ],
+ "fonts-noto-cjk": [
+ "/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/opentype/noto/NotoSerifCJK-Regular.ttc",
+ ],
+ "tex-gyre": [
+ "/usr/share/texmf/tex/latex/tex-gyre/tgtermes.sty"
+ ],
+ "texlive-fonts-recommended": [
+ "/usr/share/texlive/texmf-dist/fonts/tfm/adobe/zapfding/pzdr.tfm",
+ ],
+ "texlive-lang-chinese": [
+ "/usr/share/texlive/texmf-dist/tex/latex/ctex/ctexhook.sty",
+ ],
+ }
+
+ for package, files in pdf_pkgs.items():
+ self.check_missing_file(files, package, DepManager.PDF_MANDATORY)
+
+ self.check_program("dvipng", DepManager.PDF_MANDATORY)
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Note: ImageMagick is broken on some distros, affecting PDF output. For more details:\n" \
+ "\thttps://askubuntu.com/questions/1158894/imagemagick-still-broken-using-with-usr-bin-convert"
+
+ return self.get_install_progs(progs, "apt-get install")
+
+ def give_redhat_hints(self):
+ """
+ Provide package installation hints for RedHat-based distros
+ (Fedora, RHEL and RHEL-based variants).
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2-tools",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive-xetex-bin",
+ "yaml": "python3-pyyaml",
+ }
+
+ fedora_tex_pkgs = [
+ "dejavu-sans-fonts",
+ "dejavu-sans-mono-fonts",
+ "dejavu-serif-fonts",
+ "texlive-collection-fontsrecommended",
+ "texlive-collection-latex",
+ "texlive-xecjk",
+ ]
+
+ fedora = False
+ rel = None
+
+ match = re.search(r"(release|Linux)\s+(\d+)", self.system_release)
+ if match:
+ rel = int(match.group(2))
+
+ if not rel:
+ print("Couldn't identify release number")
+ noto_sans_redhat = None
+ self.pdf = False
+ elif re.search("Fedora", self.system_release):
+ # Fedora 38 and upper use this CJK font
+
+ noto_sans_redhat = "google-noto-sans-cjk-fonts"
+ fedora = True
+ else:
+ # Almalinux, CentOS, RHEL, ...
+
+ # at least up to version 9 (and Fedora < 38), that's the CJK font
+ noto_sans_redhat = "google-noto-sans-cjk-ttc-fonts"
+
+ progs["virtualenv"] = "python-virtualenv"
+
+ if not rel or rel < 8:
+ print("ERROR: Distro not supported. Too old?")
+ return
+
+ # RHEL 8 uses Python 3.6, which is not compatible with
+ # the build system anymore. Suggest Python 3.11
+ if rel == 8:
+ self.check_program("python3.9", DepManager.SYSTEM_MANDATORY)
+ progs["python3.9"] = "python39"
+ progs["yaml"] = "python39-pyyaml"
+
+ self.recommend_python = True
+
+ # There's no python39-sphinx package. Only pip is supported
+ self.package_supported = False
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Note: RHEL-based distros typically require extra repositories.\n" \
+ "For most, enabling epel and crb are enough:\n" \
+ "\tsudo dnf install -y epel-release\n" \
+ "\tsudo dnf config-manager --set-enabled crb\n" \
+ "Yet, some may have other required repositories. Those commands could be useful:\n" \
+ "\tsudo dnf repolist all\n" \
+ "\tsudo dnf repoquery --available --info <pkgs>\n" \
+ "\tsudo dnf config-manager --set-enabled '*' # enable all - probably not what you want"
+
+ if self.pdf:
+ pdf_pkgs = [
+ "/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/google-noto-sans-cjk-fonts/NotoSansCJK-Regular.ttc",
+ ]
+
+ self.check_missing_file(pdf_pkgs, noto_sans_redhat, DepManager.PDF_MANDATORY)
+
+ self.check_rpm_missing(fedora_tex_pkgs, DepManager.PDF_MANDATORY)
+
+ self.check_missing_tex(DepManager.PDF_MANDATORY)
+
+ # There's no texlive-ctex on RHEL 8 repositories. This will
+ # likely affect CJK pdf build only.
+ if not fedora and rel == 8:
+ self.deps.del_package("texlive-ctex")
+
+ return self.get_install_progs(progs, "dnf install")
+
+ def give_opensuse_hints(self):
+ """
+ Provide package installation hints for openSUSE-based distros
+ (Leap and Tumbleweed).
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive-xetex-bin texlive-dejavu",
+ "yaml": "python3-pyyaml",
+ }
+
+ suse_tex_pkgs = [
+ "texlive-babel-english",
+ "texlive-caption",
+ "texlive-colortbl",
+ "texlive-courier",
+ "texlive-dvips",
+ "texlive-helvetic",
+ "texlive-makeindex",
+ "texlive-metafont",
+ "texlive-metapost",
+ "texlive-palatino",
+ "texlive-preview",
+ "texlive-times",
+ "texlive-zapfchan",
+ "texlive-zapfding",
+ ]
+
+ progs["latexmk"] = "texlive-latexmk-bin"
+
+ match = re.search(r"(Leap)\s+(\d+).(\d)", self.system_release)
+ if match:
+ rel = int(match.group(2))
+
+ # Leap 15.x uses Python 3.6, which is not compatible with
+ # the build system anymore. Suggest Python 3.11
+ if rel == 15:
+ if not self.which(self.python_cmd):
+ self.check_program("python3.11", DepManager.SYSTEM_MANDATORY)
+ progs["python3.11"] = "python311"
+ self.recommend_python = True
+
+ progs.update({
+ "python-sphinx": "python311-Sphinx python311-Sphinx-latex",
+ "virtualenv": "python311-virtualenv",
+ "yaml": "python311-PyYAML",
+ })
+ else:
+ # Tumbleweed defaults to Python 3.11
+
+ progs.update({
+ "python-sphinx": "python313-Sphinx python313-Sphinx-latex",
+ "virtualenv": "python313-virtualenv",
+ "yaml": "python313-PyYAML",
+ })
+
+ # FIXME: add support for installing CJK fonts
+ #
+ # I tried hard, but was unable to find a way to install
+ # "Noto Sans CJK SC" on openSUSE
+
+ if self.pdf:
+ self.check_rpm_missing(suse_tex_pkgs, DepManager.PDF_MANDATORY)
+ if self.pdf:
+ self.check_missing_tex()
+
+ return self.get_install_progs(progs, "zypper install --no-recommends")
+
+ def give_mageia_hints(self):
+ """
+ Provide package installation hints for Mageia and OpenMandriva.
+ """
+ progs = {
+ "Pod::Usage": "perl-Pod-Usage",
+ "convert": "ImageMagick",
+ "dot": "graphviz",
+ "python-sphinx": "python3-sphinx",
+ "rsvg-convert": "librsvg2",
+ "virtualenv": "python3-virtualenv",
+ "xelatex": "texlive",
+ "yaml": "python3-yaml",
+ }
+
+ tex_pkgs = [
+ "texlive-fontsextra",
+ "texlive-fonts-asian",
+ "fonts-ttf-dejavu",
+ ]
+
+ if re.search(r"OpenMandriva", self.system_release):
+ packager_cmd = "dnf install"
+ noto_sans = "noto-sans-cjk-fonts"
+ tex_pkgs = [
+ "texlive-collection-basic",
+ "texlive-collection-langcjk",
+ "texlive-collection-fontsextra",
+ "texlive-collection-fontsrecommended"
+ ]
+
+ # Tested on OpenMandriva Lx 4.3
+ progs["convert"] = "imagemagick"
+ progs["yaml"] = "python-pyyaml"
+ progs["python-virtualenv"] = "python-virtualenv"
+ progs["python-sphinx"] = "python-sphinx"
+ progs["xelatex"] = "texlive"
+
+ self.check_program("python-virtualenv", DepManager.PYTHON_MANDATORY)
+
+ # On my tests with openMandriva LX 4.0 docker image, upgraded
+ # to 4.3, python-virtualenv package is broken: it is missing
+ # ensurepip. Without it, the alternative would be to run:
+ # python3 -m venv --without-pip ~/sphinx_latest, but running
+ # pip there won't install sphinx at venv.
+ #
+ # Add a note about that.
+
+ if not self.distro_msg:
+ self.distro_msg = \
+ "Notes:\n"\
+ "1. for venv, ensurepip could be broken, preventing its install method.\n" \
+ "2. at least on OpenMandriva LX 4.3, texlive packages seem broken"
+
+ else:
+ packager_cmd = "urpmi"
+ noto_sans = "google-noto-sans-cjk-ttc-fonts"
+
+ progs["latexmk"] = "texlive-collection-basic"
+
+ if self.pdf:
+ pdf_pkgs = [
+ "/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/TTF/NotoSans-Regular.ttf",
+ ]
+
+ self.check_missing_file(pdf_pkgs, noto_sans, DepManager.PDF_MANDATORY)
+ self.check_rpm_missing(tex_pkgs, DepManager.PDF_MANDATORY)
+
+ return self.get_install_progs(progs, packager_cmd)
+
+ def give_arch_linux_hints(self):
+ """
+ Provide package installation hints for ArchLinux.
+ """
+ progs = {
+ "convert": "imagemagick",
+ "dot": "graphviz",
+ "latexmk": "texlive-core",
+ "rsvg-convert": "extra/librsvg",
+ "virtualenv": "python-virtualenv",
+ "xelatex": "texlive-xetex",
+ "yaml": "python-yaml",
+ }
+
+ archlinux_tex_pkgs = [
+ "texlive-basic",
+ "texlive-binextra",
+ "texlive-core",
+ "texlive-fontsrecommended",
+ "texlive-langchinese",
+ "texlive-langcjk",
+ "texlive-latexextra",
+ "ttf-dejavu",
+ ]
+
+ if self.pdf:
+ self.check_pacman_missing(archlinux_tex_pkgs,
+ DepManager.PDF_MANDATORY)
+
+ self.check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc"],
+ "noto-fonts-cjk",
+ DepManager.PDF_MANDATORY)
+
+
+ return self.get_install_progs(progs, "pacman -S")
+
+ def give_gentoo_hints(self):
+ """
+ Provide package installation hints for Gentoo.
+ """
+ texlive_deps = [
+ "dev-texlive/texlive-fontsrecommended",
+ "dev-texlive/texlive-latexextra",
+ "dev-texlive/texlive-xetex",
+ "media-fonts/dejavu",
+ ]
+
+ progs = {
+ "convert": "media-gfx/imagemagick",
+ "dot": "media-gfx/graphviz",
+ "rsvg-convert": "gnome-base/librsvg",
+ "virtualenv": "dev-python/virtualenv",
+ "xelatex": " ".join(texlive_deps),
+ "yaml": "dev-python/pyyaml",
+ "python-sphinx": "dev-python/sphinx",
+ }
+
+ if self.pdf:
+ pdf_pkgs = {
+ "media-fonts/dejavu": [
+ "/usr/share/fonts/dejavu/DejaVuSans.ttf",
+ ],
+ "media-fonts/noto-cjk": [
+ "/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf",
+ "/usr/share/fonts/noto-cjk/NotoSerifCJK-Regular.ttc",
+ ],
+ }
+ for package, files in pdf_pkgs.items():
+ self.check_missing_file(files, package, DepManager.PDF_MANDATORY)
+
+ # Handling dependencies is a nightmare, as Gentoo refuses to emerge
+ # some packages if there's no package.use file describing them.
+ # To make it worse, compilation flags shall also be present there
+ # for some packages. If USE is not perfect, error/warning messages
+ # like those are shown:
+ #
+ # !!! The following binary packages have been ignored due to non matching USE:
+ #
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_13 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf python_single_target_python3_12 -python_single_target_python3_13 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_10 qt6 svg
+ # =media-gfx/graphviz-12.2.1-r1 X pdf -python_single_target_python3_10 python_single_target_python3_12 -python_single_target_python3_13 qt6 svg
+ # =media-fonts/noto-cjk-20190416 X
+ # =app-text/texlive-core-2024-r1 X cjk -xetex
+ # =app-text/texlive-core-2024-r1 X -xetex
+ # =app-text/texlive-core-2024-r1 -xetex
+ # =dev-libs/zziplib-0.13.79-r1 sdl
+ #
+ # And will ignore such packages, installing the remaining ones. That
+ # affects mostly the image extension and PDF generation.
+
+ # Package dependencies and the minimal needed args:
+ portages = {
+ "graphviz": "media-gfx/graphviz",
+ "imagemagick": "media-gfx/imagemagick",
+ "media-libs": "media-libs/harfbuzz icu",
+ "media-fonts": "media-fonts/noto-cjk",
+ "texlive": "app-text/texlive-core xetex",
+ "zziblib": "dev-libs/zziplib sdl",
+ }
+
+ extra_cmds = ""
+ if not self.distro_msg:
+ self.distro_msg = "Note: Gentoo requires package.use to be adjusted before emerging packages"
+
+ use_base = "/etc/portage/package.use"
+ files = glob(f"{use_base}/*")
+
+ for fname, portage in portages.items():
+ install = False
+
+ while install is False:
+ if not files:
+ # No files under package.usage. Install all
+ install = True
+ break
+
+ args = portage.split(" ")
+
+ name = args.pop(0)
+
+ cmd = ["grep", "-l", "-E", rf"^{name}\b" ] + files
+ result = self.run(cmd, stdout=subprocess.PIPE, text=True)
+ if result.returncode or not result.stdout.strip():
+ # File containing portage name not found
+ install = True
+ break
+
+ # Ensure that needed USE flags are present
+ if args:
+ match_fname = result.stdout.strip()
+ with open(match_fname, 'r', encoding='utf8',
+ errors='backslashreplace') as fp:
+ for line in fp:
+ for arg in args:
+ if arg.startswith("-"):
+ continue
+
+ if not re.search(rf"\s*{arg}\b", line):
+ # Needed file argument not found
+ install = True
+ break
+
+ # Everything looks ok, don't install
+ break
+
+ # emit a code to setup missing USE
+ if install:
+ extra_cmds += (f"sudo su -c 'echo \"{portage}\" > {use_base}/{fname}'\n")
+
+ # Now, we can use emerge and let it respect USE
+ return self.get_install_progs(progs,
+ "emerge --ask --changed-use --binpkg-respect-use=y",
+ extra_cmds)
+
+ def get_install(self):
+ """
+ OS-specific hints logic. Seeks for a hinter. If found, use it to
+ provide package-manager specific install commands.
+
+ Otherwise, outputs install instructions for the meta-packages.
+
+ Returns a string with the command to be executed to install the
+ the needed packages, if distro found. Otherwise, return just a
+ list of packages that require installation.
+ """
+ os_hints = {
+ re.compile("Red Hat Enterprise Linux"): self.give_redhat_hints,
+ re.compile("Fedora"): self.give_redhat_hints,
+ re.compile("AlmaLinux"): self.give_redhat_hints,
+ re.compile("Amazon Linux"): self.give_redhat_hints,
+ re.compile("CentOS"): self.give_redhat_hints,
+ re.compile("openEuler"): self.give_redhat_hints,
+ re.compile("Oracle Linux Server"): self.give_redhat_hints,
+ re.compile("Rocky Linux"): self.give_redhat_hints,
+ re.compile("Springdale Open Enterprise"): self.give_redhat_hints,
+
+ re.compile("Ubuntu"): self.give_debian_hints,
+ re.compile("Debian"): self.give_debian_hints,
+ re.compile("Devuan"): self.give_debian_hints,
+ re.compile("Kali"): self.give_debian_hints,
+ re.compile("Mint"): self.give_debian_hints,
+
+ re.compile("openSUSE"): self.give_opensuse_hints,
+
+ re.compile("Mageia"): self.give_mageia_hints,
+ re.compile("OpenMandriva"): self.give_mageia_hints,
+
+ re.compile("Arch Linux"): self.give_arch_linux_hints,
+ re.compile("Gentoo"): self.give_gentoo_hints,
+ }
+
+ # If the OS is detected, use per-OS hint logic
+ for regex, os_hint in os_hints.items():
+ if regex.search(self.system_release):
+ return os_hint()
+
+ #
+ # Fall-back to generic hint code for other distros
+ # That's far from ideal, specially for LaTeX dependencies.
+ #
+ progs = {"sphinx-build": "sphinx"}
+ if self.pdf:
+ self.check_missing_tex()
+
+ self.distro_msg = \
+ f"I don't know distro {self.system_release}.\n" \
+ "So, I can't provide you a hint with the install procedure.\n" \
+ "There are likely missing dependencies."
+
+ return self.get_install_progs(progs, None)
+
+ #
+ # Common dependencies
+ #
+ def deactivate_help(self):
+ """
+ Print a helper message to disable a virtual environment.
+ """
+
+ print("\n If you want to exit the virtualenv, you can use:")
+ print("\tdeactivate")
+
+ def get_virtenv(self):
+ """
+ Give a hint about how to activate an already-existing virtual
+ environment containing sphinx-build.
+
+ Returns a tuble with (activate_cmd_path, sphinx_version) with
+ the newest available virtual env.
+ """
+
+ cwd = os.getcwd()
+
+ activates = []
+
+ # Add all sphinx prefixes with possible version numbers
+ for p in self.virtenv_prefix:
+ activates += glob(f"{cwd}/{p}[0-9]*/bin/activate")
+
+ activates.sort(reverse=True, key=str.lower)
+
+ # Place sphinx_latest first, if it exists
+ for p in self.virtenv_prefix:
+ activates = glob(f"{cwd}/{p}*latest/bin/activate") + activates
+
+ ver = (0, 0, 0)
+ for f in activates:
+ # Discard too old Sphinx virtual environments
+ match = re.search(r"(\d+)\.(\d+)\.(\d+)", f)
+ if match:
+ ver = (int(match.group(1)), int(match.group(2)), int(match.group(3)))
+
+ if ver < self.min_version:
+ continue
+
+ sphinx_cmd = f.replace("activate", "sphinx-build")
+ if not os.path.isfile(sphinx_cmd):
+ continue
+
+ ver = self.get_sphinx_version(sphinx_cmd)
+
+ if not ver:
+ venv_dir = f.replace("/bin/activate", "")
+ print(f"Warning: virtual environment {venv_dir} is not working.\n" \
+ "Python version upgrade? Remove it with:\n\n" \
+ "\trm -rf {venv_dir}\n\n")
+ else:
+ if self.need_sphinx and ver >= self.min_version:
+ return (f, ver)
+ elif PythonVersion.parse_version(ver) > self.cur_version:
+ return (f, ver)
+
+ return ("", ver)
+
+ def recommend_sphinx_upgrade(self):
+ """
+ Check if Sphinx needs to be upgraded.
+
+ Returns a tuple with the higest available Sphinx version if found.
+ Otherwise, returns None to indicate either that no upgrade is needed
+ or no venv was found.
+ """
+
+ # Avoid running sphinx-builds from venv if cur_version is good
+ if self.cur_version and self.cur_version >= RECOMMENDED_VERSION:
+ self.latest_avail_ver = self.cur_version
+ return None
+
+ # Get the highest version from sphinx_*/bin/sphinx-build and the
+ # corresponding command to activate the venv/virtenv
+ self.activate_cmd, self.venv_ver = self.get_virtenv()
+
+ # Store the highest version from Sphinx existing virtualenvs
+ if self.activate_cmd and self.venv_ver > self.cur_version:
+ self.latest_avail_ver = self.venv_ver
+ else:
+ if self.cur_version:
+ self.latest_avail_ver = self.cur_version
+ else:
+ self.latest_avail_ver = (0, 0, 0)
+
+ # As we don't know package version of Sphinx, and there's no
+ # virtual environments, don't check if upgrades are needed
+ if not self.virtualenv:
+ if not self.latest_avail_ver:
+ return None
+
+ return self.latest_avail_ver
+
+ # Either there are already a virtual env or a new one should be created
+ self.need_pip = True
+
+ if not self.latest_avail_ver:
+ return None
+
+ # Return if the reason is due to an upgrade or not
+ if self.latest_avail_ver != (0, 0, 0):
+ if self.latest_avail_ver < RECOMMENDED_VERSION:
+ self.rec_sphinx_upgrade = 1
+
+ return self.latest_avail_ver
+
+ def recommend_package(self):
+ """
+ Recommend installing Sphinx as a distro-specific package.
+ """
+
+ print("\n2) As a package with:")
+
+ old_need = self.deps.need
+ old_optional = self.deps.optional
+
+ self.pdf = False
+ self.deps.optional = 0
+ old_verbose = self.verbose_warn_install
+ self.verbose_warn_install = 0
+
+ self.deps.clear_deps()
+
+ self.deps.add_package("python-sphinx", DepManager.PYTHON_MANDATORY)
+
+ cmd = self.get_install()
+ if cmd:
+ print(cmd)
+
+ self.deps.need = old_need
+ self.deps.optional = old_optional
+ self.verbose_warn_install = old_verbose
+
+ def recommend_sphinx_version(self, virtualenv_cmd):
+ """
+ Provide recommendations for installing or upgrading Sphinx based
+ on current version.
+
+ The logic here is complex, as it have to deal with different versions:
+
+ - minimal supported version;
+ - minimal PDF version;
+ - recommended version.
+
+ It also needs to work fine with both distro's package and
+ venv/virtualenv
+ """
+
+ if self.recommend_python:
+ cur_ver = sys.version_info[:3]
+ if cur_ver < MIN_PYTHON_VERSION:
+ print(f"\nPython version {cur_ver} is incompatible with doc build.\n" \
+ "Please upgrade it and re-run.\n")
+ return
+
+ # Version is OK. Nothing to do.
+ if self.cur_version != (0, 0, 0) and self.cur_version >= RECOMMENDED_VERSION:
+ return
+
+ if self.latest_avail_ver:
+ latest_avail_ver = PythonVersion.ver_str(self.latest_avail_ver)
+
+ if not self.need_sphinx:
+ # sphinx-build is present and its version is >= $min_version
+
+ # only recommend enabling a newer virtenv version if makes sense.
+ if self.latest_avail_ver and self.latest_avail_ver > self.cur_version:
+ print(f"\nYou may also use the newer Sphinx version {latest_avail_ver} with:")
+ if f"{self.virtenv_prefix}" in os.getcwd():
+ print("\tdeactivate")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+
+ if self.latest_avail_ver and self.latest_avail_ver >= RECOMMENDED_VERSION:
+ return
+
+ if not self.virtualenv:
+ # No sphinx either via package or via virtenv. As we can't
+ # Compare the versions here, just return, recommending the
+ # user to install it from the package distro.
+ if not self.latest_avail_ver or self.latest_avail_ver == (0, 0, 0):
+ return
+
+ # User doesn't want a virtenv recommendation, but he already
+ # installed one via virtenv with a newer version.
+ # So, print commands to enable it
+ if self.latest_avail_ver > self.cur_version:
+ print(f"\nYou may also use the Sphinx virtualenv version {latest_avail_ver} with:")
+ if f"{self.virtenv_prefix}" in os.getcwd():
+ print("\tdeactivate")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+ print("\n")
+ else:
+ if self.need_sphinx:
+ self.deps.need += 1
+
+ # Suggest newer versions if current ones are too old
+ if self.latest_avail_ver and self.latest_avail_ver >= self.min_version:
+ if self.latest_avail_ver >= RECOMMENDED_VERSION:
+ print(f"\nNeed to activate Sphinx (version {latest_avail_ver}) on virtualenv with:")
+ print(f"\t. {self.activate_cmd}")
+ self.deactivate_help()
+ return
+
+ # Version is above the minimal required one, but may be
+ # below the recommended one. So, print warnings/notes
+ if self.latest_avail_ver < RECOMMENDED_VERSION:
+ print(f"Warning: It is recommended at least Sphinx version {RECOMMENDED_VERSION}.")
+
+ # At this point, either it needs Sphinx or upgrade is recommended,
+ # both via pip
+
+ if self.rec_sphinx_upgrade:
+ if not self.virtualenv:
+ print("Instead of install/upgrade Python Sphinx pkg, you could use pip/pypi with:\n\n")
+ else:
+ print("To upgrade Sphinx, use:\n\n")
+ else:
+ print("\nSphinx needs to be installed either:\n1) via pip/pypi with:\n")
+
+ if not virtualenv_cmd:
+ print(" Currently not possible.\n")
+ print(" Please upgrade Python to a newer version and run this script again")
+ else:
+ print(f"\t{virtualenv_cmd} {self.virtenv_dir}")
+ print(f"\t. {self.virtenv_dir}/bin/activate")
+ print(f"\tpip install -r {self.requirement_file}")
+ self.deactivate_help()
+
+ if self.package_supported:
+ self.recommend_package()
+
+ print("\n" \
+ " Please note that Sphinx currentlys produce false-positive\n" \
+ " warnings when the same name is used for more than one type (functions,\n" \
+ " structs, enums,...). This is known Sphinx bug. For more details, see:\n" \
+ "\thttps://github.com/sphinx-doc/sphinx/pull/8313")
+
+ def check_needs(self):
+ """
+ Main method that checks needed dependencies and provides
+ recommendations.
+ """
+ self.python_cmd = sys.executable
+
+ # Check if Sphinx is already accessible from current environment
+ self.check_sphinx(self.conf)
+
+ if self.system_release:
+ print(f"Detected OS: {self.system_release}.")
+ else:
+ print("Unknown OS")
+ if self.cur_version != (0, 0, 0):
+ ver = PythonVersion.ver_str(self.cur_version)
+ print(f"Sphinx version: {ver}\n")
+
+ # Check the type of virtual env, depending on Python version
+ virtualenv_cmd = None
+
+ if sys.version_info < MIN_PYTHON_VERSION:
+ min_ver = ver_str(MIN_PYTHON_VERSION)
+ print(f"ERROR: at least python {min_ver} is required to build the kernel docs")
+ self.need_sphinx = 1
+
+ self.venv_ver = self.recommend_sphinx_upgrade()
+
+ if self.need_pip:
+ if sys.version_info < MIN_PYTHON_VERSION:
+ self.need_pip = False
+ print("Warning: python version is not supported.")
+ else:
+ virtualenv_cmd = f"{self.python_cmd} -m venv"
+ self.check_python_module("ensurepip")
+
+ # Check for needed programs/tools
+ self.check_perl_module("Pod::Usage", DepManager.SYSTEM_MANDATORY)
+
+ self.check_program("make", DepManager.SYSTEM_MANDATORY)
+ self.check_program("which", DepManager.SYSTEM_MANDATORY)
+
+ self.check_program("dot", DepManager.SYSTEM_OPTIONAL)
+ self.check_program("convert", DepManager.SYSTEM_OPTIONAL)
+
+ self.check_python_module("yaml")
+
+ if self.pdf:
+ self.check_program("xelatex", DepManager.PDF_MANDATORY)
+ self.check_program("rsvg-convert", DepManager.PDF_MANDATORY)
+ self.check_program("latexmk", DepManager.PDF_MANDATORY)
+
+ # Do distro-specific checks and output distro-install commands
+ cmd = self.get_install()
+ if cmd:
+ print(cmd)
+
+ # If distro requires some special instructions, print here.
+ # Please notice that get_install() needs to be called first.
+ if self.distro_msg:
+ print("\n" + self.distro_msg)
+
+ if not self.python_cmd:
+ if self.need == 1:
+ sys.exit("Can't build as 1 mandatory dependency is missing")
+ elif self.need:
+ sys.exit(f"Can't build as {self.need} mandatory dependencies are missing")
+
+ # Check if sphinx-build is called sphinx-build-3
+ if self.need_symlink:
+ sphinx_path = self.which("sphinx-build-3")
+ if sphinx_path:
+ print(f"\tsudo ln -sf {sphinx_path} /usr/bin/sphinx-build\n")
+
+ self.recommend_sphinx_version(virtualenv_cmd)
+ print("")
+
+ if not self.deps.optional:
+ print("All optional dependencies are met.")
+
+ if self.deps.need == 1:
+ sys.exit("Can't build as 1 mandatory dependency is missing")
+ elif self.deps.need:
+ sys.exit(f"Can't build as {self.deps.need} mandatory dependencies are missing")
+
+ print("Needed package dependencies are met.")
+
+DESCRIPTION = """
+Process some flags related to Sphinx installation and documentation build.
+"""
+
+
+def main():
+ """Main function"""
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
+
+ parser.add_argument(
+ "--no-virtualenv",
+ action="store_false",
+ dest="virtualenv",
+ help="Recommend installing Sphinx instead of using a virtualenv",
+ )
+
+ parser.add_argument(
+ "--no-pdf",
+ action="store_false",
+ dest="pdf",
+ help="Don't check for dependencies required to build PDF docs",
+ )
+
+ parser.add_argument(
+ "--version-check",
+ action="store_true",
+ dest="version_check",
+ help="If version is compatible, don't check for missing dependencies",
+ )
+
+ args = parser.parse_args()
+
+ checker = SphinxDependencyChecker(args)
+
+ PythonVersion.check_python(MIN_PYTHON_VERSION,
+ bail_out=True, success_on_error=True)
+ checker.check_needs()
+
+# Call main if not used as module
+if __name__ == "__main__":
+ main()
diff --git a/tools/docs/test_doc_build.py b/tools/docs/test_doc_build.py
new file mode 100755
index 000000000000..47b4606569f9
--- /dev/null
+++ b/tools/docs/test_doc_build.py
@@ -0,0 +1,513 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+#
+# pylint: disable=R0903,R0912,R0913,R0914,R0917,C0301
+
+"""
+Install minimal supported requirements for different Sphinx versions
+and optionally test the build.
+"""
+
+import argparse
+import asyncio
+import os.path
+import shutil
+import sys
+import time
+import subprocess
+
+# Minimal python version supported by the building system.
+
+PYTHON = os.path.basename(sys.executable)
+
+min_python_bin = None
+
+for i in range(9, 13):
+ p = f"python3.{i}"
+ if shutil.which(p):
+ min_python_bin = p
+ break
+
+if not min_python_bin:
+ min_python_bin = PYTHON
+
+# Starting from 8.0, Python 3.9 is not supported anymore.
+PYTHON_VER_CHANGES = {(8, 0, 0): PYTHON}
+
+DEFAULT_VERSIONS_TO_TEST = [
+ (3, 4, 3), # Minimal supported version
+ (5, 3, 0), # CentOS Stream 9 / AlmaLinux 9
+ (6, 1, 1), # Debian 12
+ (7, 2, 1), # openSUSE Leap 15.6
+ (7, 2, 6), # Ubuntu 24.04 LTS
+ (7, 4, 7), # Ubuntu 24.10
+ (7, 3, 0), # openSUSE Tumbleweed
+ (8, 1, 3), # Fedora 42
+ (8, 2, 3) # Latest version - covers rolling distros
+]
+
+# Sphinx versions to be installed and their incremental requirements
+SPHINX_REQUIREMENTS = {
+ # Oldest versions we support for each package required by Sphinx 3.4.3
+ (3, 4, 3): {
+ "docutils": "0.16",
+ "alabaster": "0.7.12",
+ "babel": "2.8.0",
+ "certifi": "2020.6.20",
+ "docutils": "0.16",
+ "idna": "2.10",
+ "imagesize": "1.2.0",
+ "Jinja2": "2.11.2",
+ "MarkupSafe": "1.1.1",
+ "packaging": "20.4",
+ "Pygments": "2.6.1",
+ "PyYAML": "5.1",
+ "requests": "2.24.0",
+ "snowballstemmer": "2.0.0",
+ "sphinxcontrib-applehelp": "1.0.2",
+ "sphinxcontrib-devhelp": "1.0.2",
+ "sphinxcontrib-htmlhelp": "1.0.3",
+ "sphinxcontrib-jsmath": "1.0.1",
+ "sphinxcontrib-qthelp": "1.0.3",
+ "sphinxcontrib-serializinghtml": "1.1.4",
+ "urllib3": "1.25.9",
+ },
+
+ # Update package dependencies to a more modern base. The goal here
+ # is to avoid to many incremental changes for the next entries
+ (3, 5, 0): {
+ "alabaster": "0.7.13",
+ "babel": "2.17.0",
+ "certifi": "2025.6.15",
+ "idna": "3.10",
+ "imagesize": "1.4.1",
+ "packaging": "25.0",
+ "Pygments": "2.8.1",
+ "requests": "2.32.4",
+ "snowballstemmer": "3.0.1",
+ "sphinxcontrib-applehelp": "1.0.4",
+ "sphinxcontrib-htmlhelp": "2.0.1",
+ "sphinxcontrib-serializinghtml": "1.1.5",
+ "urllib3": "2.0.0",
+ },
+
+ # Starting from here, ensure all docutils versions are covered with
+ # supported Sphinx versions. Other packages are upgraded only when
+ # required by pip
+ (4, 0, 0): {
+ "PyYAML": "5.1",
+ },
+ (4, 1, 0): {
+ "docutils": "0.17",
+ "Pygments": "2.19.1",
+ "Jinja2": "3.0.3",
+ "MarkupSafe": "2.0",
+ },
+ (4, 3, 0): {},
+ (4, 4, 0): {},
+ (4, 5, 0): {
+ "docutils": "0.17.1",
+ },
+ (5, 0, 0): {},
+ (5, 1, 0): {},
+ (5, 2, 0): {
+ "docutils": "0.18",
+ "Jinja2": "3.1.2",
+ "MarkupSafe": "2.0",
+ "PyYAML": "5.3.1",
+ },
+ (5, 3, 0): {
+ "docutils": "0.18.1",
+ },
+ (6, 0, 0): {},
+ (6, 1, 0): {},
+ (6, 2, 0): {
+ "PyYAML": "5.4.1",
+ },
+ (7, 0, 0): {},
+ (7, 1, 0): {},
+ (7, 2, 0): {
+ "docutils": "0.19",
+ "PyYAML": "6.0.1",
+ "sphinxcontrib-serializinghtml": "1.1.9",
+ },
+ (7, 2, 6): {
+ "docutils": "0.20",
+ },
+ (7, 3, 0): {
+ "alabaster": "0.7.14",
+ "PyYAML": "6.0.1",
+ "tomli": "2.0.1",
+ },
+ (7, 4, 0): {
+ "docutils": "0.20.1",
+ "PyYAML": "6.0.1",
+ },
+ (8, 0, 0): {
+ "docutils": "0.21",
+ },
+ (8, 1, 0): {
+ "docutils": "0.21.1",
+ "PyYAML": "6.0.1",
+ "sphinxcontrib-applehelp": "1.0.7",
+ "sphinxcontrib-devhelp": "1.0.6",
+ "sphinxcontrib-htmlhelp": "2.0.6",
+ "sphinxcontrib-qthelp": "1.0.6",
+ },
+ (8, 2, 0): {
+ "docutils": "0.21.2",
+ "PyYAML": "6.0.1",
+ "sphinxcontrib-serializinghtml": "1.1.9",
+ },
+}
+
+
+class AsyncCommands:
+ """Excecute command synchronously"""
+
+ def __init__(self, fp=None):
+
+ self.stdout = None
+ self.stderr = None
+ self.output = None
+ self.fp = fp
+
+ def log(self, out, verbose, is_info=True):
+ out = out.removesuffix('\n')
+
+ if verbose:
+ if is_info:
+ print(out)
+ else:
+ print(out, file=sys.stderr)
+
+ if self.fp:
+ self.fp.write(out + "\n")
+
+ async def _read(self, stream, verbose, is_info):
+ """Ancillary routine to capture while displaying"""
+
+ while stream is not None:
+ line = await stream.readline()
+ if line:
+ out = line.decode("utf-8", errors="backslashreplace")
+ self.log(out, verbose, is_info)
+ if is_info:
+ self.stdout += out
+ else:
+ self.stderr += out
+ else:
+ break
+
+ async def run(self, cmd, capture_output=False, check=False,
+ env=None, verbose=True):
+
+ """
+ Execute an arbitrary command, handling errors.
+
+ Please notice that this class is not thread safe
+ """
+
+ self.stdout = ""
+ self.stderr = ""
+
+ self.log("$ " + " ".join(cmd), verbose)
+
+ proc = await asyncio.create_subprocess_exec(cmd[0],
+ *cmd[1:],
+ env=env,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE)
+
+ # Handle input and output in realtime
+ await asyncio.gather(
+ self._read(proc.stdout, verbose, True),
+ self._read(proc.stderr, verbose, False),
+ )
+
+ await proc.wait()
+
+ if check and proc.returncode > 0:
+ raise subprocess.CalledProcessError(returncode=proc.returncode,
+ cmd=" ".join(cmd),
+ output=self.stdout,
+ stderr=self.stderr)
+
+ if capture_output:
+ if proc.returncode > 0:
+ self.log(f"Error {proc.returncode}", verbose=True, is_info=False)
+ return ""
+
+ return self.output
+
+ ret = subprocess.CompletedProcess(args=cmd,
+ returncode=proc.returncode,
+ stdout=self.stdout,
+ stderr=self.stderr)
+
+ return ret
+
+
+class SphinxVenv:
+ """
+ Installs Sphinx on one virtual env per Sphinx version with a minimal
+ set of dependencies, adjusting them to each specific version.
+ """
+
+ def __init__(self):
+ """Initialize instance variables"""
+
+ self.built_time = {}
+ self.first_run = True
+
+ async def _handle_version(self, args, fp,
+ cur_ver, cur_requirements, python_bin):
+ """Handle a single Sphinx version"""
+
+ cmd = AsyncCommands(fp)
+
+ ver = ".".join(map(str, cur_ver))
+
+ if not self.first_run and args.wait_input and args.build:
+ ret = input("Press Enter to continue or 'a' to abort: ").strip().lower()
+ if ret == "a":
+ print("Aborted.")
+ sys.exit()
+ else:
+ self.first_run = False
+
+ venv_dir = f"Sphinx_{ver}"
+ req_file = f"requirements_{ver}.txt"
+
+ cmd.log(f"\nSphinx {ver} with {python_bin}", verbose=True)
+
+ # Create venv
+ await cmd.run([python_bin, "-m", "venv", venv_dir],
+ verbose=args.verbose, check=True)
+ pip = os.path.join(venv_dir, "bin/pip")
+
+ # Create install list
+ reqs = []
+ for pkg, verstr in cur_requirements.items():
+ reqs.append(f"{pkg}=={verstr}")
+
+ reqs.append(f"Sphinx=={ver}")
+
+ await cmd.run([pip, "install"] + reqs, check=True, verbose=args.verbose)
+
+ # Freeze environment
+ result = await cmd.run([pip, "freeze"], verbose=False, check=True)
+
+ # Pip install succeeded. Write requirements file
+ if args.req_file:
+ with open(req_file, "w", encoding="utf-8") as fp:
+ fp.write(result.stdout)
+
+ if args.build:
+ start_time = time.time()
+
+ # Prepare a venv environment
+ env = os.environ.copy()
+ bin_dir = os.path.join(venv_dir, "bin")
+ env["PATH"] = bin_dir + ":" + env["PATH"]
+ env["VIRTUAL_ENV"] = venv_dir
+ if "PYTHONHOME" in env:
+ del env["PYTHONHOME"]
+
+ # Test doc build
+ await cmd.run(["make", "cleandocs"], env=env, check=True)
+ make = ["make"]
+
+ if args.output:
+ sphinx_build = os.path.realpath(f"{bin_dir}/sphinx-build")
+ make += [f"O={args.output}", f"SPHINXBUILD={sphinx_build}"]
+
+ if args.make_args:
+ make += args.make_args
+
+ make += args.targets
+
+ if args.verbose:
+ cmd.log(f". {bin_dir}/activate", verbose=True)
+ await cmd.run(make, env=env, check=True, verbose=True)
+ if args.verbose:
+ cmd.log("deactivate", verbose=True)
+
+ end_time = time.time()
+ elapsed_time = end_time - start_time
+ hours, minutes = divmod(elapsed_time, 3600)
+ minutes, seconds = divmod(minutes, 60)
+
+ hours = int(hours)
+ minutes = int(minutes)
+ seconds = int(seconds)
+
+ self.built_time[ver] = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
+
+ cmd.log(f"Finished doc build for Sphinx {ver}. Elapsed time: {self.built_time[ver]}", verbose=True)
+
+ async def run(self, args):
+ """
+ Navigate though multiple Sphinx versions, handling each of them
+ on a loop.
+ """
+
+ if args.log:
+ fp = open(args.log, "w", encoding="utf-8")
+ if not args.verbose:
+ args.verbose = False
+ else:
+ fp = None
+ if not args.verbose:
+ args.verbose = True
+
+ cur_requirements = {}
+ python_bin = min_python_bin
+
+ vers = set(SPHINX_REQUIREMENTS.keys()) | set(args.versions)
+
+ for cur_ver in sorted(vers):
+ if cur_ver in SPHINX_REQUIREMENTS:
+ new_reqs = SPHINX_REQUIREMENTS[cur_ver]
+ cur_requirements.update(new_reqs)
+
+ if cur_ver in PYTHON_VER_CHANGES: # pylint: disable=R1715
+ python_bin = PYTHON_VER_CHANGES[cur_ver]
+
+ if cur_ver not in args.versions:
+ continue
+
+ if args.min_version:
+ if cur_ver < args.min_version:
+ continue
+
+ if args.max_version:
+ if cur_ver > args.max_version:
+ break
+
+ await self._handle_version(args, fp, cur_ver, cur_requirements,
+ python_bin)
+
+ if args.build:
+ cmd = AsyncCommands(fp)
+ cmd.log("\nSummary:", verbose=True)
+ for ver, elapsed_time in sorted(self.built_time.items()):
+ cmd.log(f"\tSphinx {ver} elapsed time: {elapsed_time}",
+ verbose=True)
+
+ if fp:
+ fp.close()
+
+def parse_version(ver_str):
+ """Convert a version string into a tuple."""
+
+ return tuple(map(int, ver_str.split(".")))
+
+
+DEFAULT_VERS = " - "
+DEFAULT_VERS += "\n - ".join(map(lambda v: f"{v[0]}.{v[1]}.{v[2]}",
+ DEFAULT_VERSIONS_TO_TEST))
+
+SCRIPT = os.path.relpath(__file__)
+
+DESCRIPTION = f"""
+This tool allows creating Python virtual environments for different
+Sphinx versions that are supported by the Linux Kernel build system.
+
+Besides creating the virtual environment, it can also test building
+the documentation using "make htmldocs" (and/or other doc targets).
+
+If called without "--versions" argument, it covers the versions shipped
+on major distros, plus the lowest supported version:
+
+{DEFAULT_VERS}
+
+A typical usage is to run:
+
+ {SCRIPT} -m -l sphinx_builds.log
+
+This will create one virtual env for the default version set and run
+"make htmldocs" for each version, creating a log file with the
+excecuted commands on it.
+
+NOTE: The build time can be very long, specially on old versions. Also, there
+is a known bug with Sphinx version 6.0.x: each subprocess uses a lot of
+memory. That, together with "-jauto" may cause OOM killer to cause
+failures at the doc generation. To minimize the risk, you may use the
+"-a" command line parameter to constrain the built directories and/or
+reduce the number of threads from "-jauto" to, for instance, "-j4":
+
+ {SCRIPT} -m -V 6.0.1 -a "SPHINXDIRS=process" "SPHINXOPTS='-j4'"
+
+"""
+
+MAKE_TARGETS = [
+ "htmldocs",
+ "texinfodocs",
+ "infodocs",
+ "latexdocs",
+ "pdfdocs",
+ "epubdocs",
+ "xmldocs",
+]
+
+async def main():
+ """Main program"""
+
+ parser = argparse.ArgumentParser(description=DESCRIPTION,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ ver_group = parser.add_argument_group("Version range options")
+
+ ver_group.add_argument('-V', '--versions', nargs="*",
+ default=DEFAULT_VERSIONS_TO_TEST,type=parse_version,
+ help='Sphinx versions to test')
+ ver_group.add_argument('--min-version', "--min", type=parse_version,
+ help='Sphinx minimal version')
+ ver_group.add_argument('--max-version', "--max", type=parse_version,
+ help='Sphinx maximum version')
+ ver_group.add_argument('-f', '--full', action='store_true',
+ help='Add all Sphinx (major,minor) supported versions to the version range')
+
+ build_group = parser.add_argument_group("Build options")
+
+ build_group.add_argument('-b', '--build', action='store_true',
+ help='Build documentation')
+ build_group.add_argument('-a', '--make-args', nargs="*",
+ help='extra arguments for make, like SPHINXDIRS=netlink/specs',
+ )
+ build_group.add_argument('-t', '--targets', nargs="+", choices=MAKE_TARGETS,
+ default=[MAKE_TARGETS[0]],
+ help="make build targets. Default: htmldocs.")
+ build_group.add_argument("-o", '--output',
+ help="output directory for the make O=OUTPUT")
+
+ other_group = parser.add_argument_group("Other options")
+
+ other_group.add_argument('-r', '--req-file', action='store_true',
+ help='write a requirements.txt file')
+ other_group.add_argument('-l', '--log',
+ help='Log command output on a file')
+ other_group.add_argument('-v', '--verbose', action='store_true',
+ help='Verbose all commands')
+ other_group.add_argument('-i', '--wait-input', action='store_true',
+ help='Wait for an enter before going to the next version')
+
+ args = parser.parse_args()
+
+ if not args.make_args:
+ args.make_args = []
+
+ sphinx_versions = sorted(list(SPHINX_REQUIREMENTS.keys()))
+
+ if args.full:
+ args.versions += list(SPHINX_REQUIREMENTS.keys())
+
+ venv = SphinxVenv()
+ await venv.run(args)
+
+
+# Call main method
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index ed565eb52275..342e056c8c66 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -77,7 +77,7 @@ $(OUTPUT)gpio-watch: $(GPIO_WATCH_IN)
clean:
rm -f $(ALL_PROGRAMS)
- rm -f $(OUTPUT)include/linux/gpio.h
+ rm -rf $(OUTPUT)include
find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
install: $(ALL_PROGRAMS)
diff --git a/tools/hv/hv_fcopy_uio_daemon.c b/tools/hv/hv_fcopy_uio_daemon.c
index 0198321d14a2..92e8307b2a46 100644
--- a/tools/hv/hv_fcopy_uio_daemon.c
+++ b/tools/hv/hv_fcopy_uio_daemon.c
@@ -35,7 +35,10 @@
#define WIN8_SRV_MINOR 1
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
-#define FCOPY_UIO "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/uio"
+#define FCOPY_DEVICE_PATH(subdir) \
+ "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/" #subdir
+#define FCOPY_UIO_PATH FCOPY_DEVICE_PATH(uio)
+#define FCOPY_CHANNELS_PATH FCOPY_DEVICE_PATH(channels)
#define FCOPY_VER_COUNT 1
static const int fcopy_versions[] = {
@@ -47,9 +50,62 @@ static const int fw_versions[] = {
UTIL_FW_VERSION
};
-#define HV_RING_SIZE 0x4000 /* 16KB ring buffer size */
+static uint32_t get_ring_buffer_size(void)
+{
+ char ring_path[PATH_MAX];
+ DIR *dir;
+ struct dirent *entry;
+ struct stat st;
+ uint32_t ring_size = 0;
+ int retry_count = 0;
+
+ /* Find the channel directory */
+ dir = opendir(FCOPY_CHANNELS_PATH);
+ if (!dir) {
+ usleep(100 * 1000); /* Avoid race with kernel, wait 100ms and retry once */
+ dir = opendir(FCOPY_CHANNELS_PATH);
+ if (!dir) {
+ syslog(LOG_ERR, "Failed to open channels directory: %s", strerror(errno));
+ return 0;
+ }
+ }
+
+retry_once:
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_type == DT_DIR && strcmp(entry->d_name, ".") != 0 &&
+ strcmp(entry->d_name, "..") != 0) {
+ snprintf(ring_path, sizeof(ring_path), "%s/%s/ring",
+ FCOPY_CHANNELS_PATH, entry->d_name);
+
+ if (stat(ring_path, &st) == 0) {
+ /*
+ * stat returns size of Tx, Rx rings combined,
+ * so take half of it for individual ring size.
+ */
+ ring_size = (uint32_t)st.st_size / 2;
+ syslog(LOG_INFO, "Ring buffer size from %s: %u bytes",
+ ring_path, ring_size);
+ break;
+ }
+ }
+ }
+
+ if (!ring_size && retry_count == 0) {
+ retry_count = 1;
+ rewinddir(dir);
+ usleep(100 * 1000); /* Wait 100ms and retry once */
+ goto retry_once;
+ }
+
+ closedir(dir);
-static unsigned char desc[HV_RING_SIZE];
+ if (!ring_size)
+ syslog(LOG_ERR, "Could not determine ring size");
+
+ return ring_size;
+}
+
+static unsigned char *desc;
static int target_fd;
static char target_fname[PATH_MAX];
@@ -62,8 +118,11 @@ static int hv_fcopy_create_file(char *file_name, char *path_name, __u32 flags)
filesize = 0;
p = path_name;
- snprintf(target_fname, sizeof(target_fname), "%s/%s",
- path_name, file_name);
+ if (snprintf(target_fname, sizeof(target_fname), "%s/%s",
+ path_name, file_name) >= sizeof(target_fname)) {
+ syslog(LOG_ERR, "target file name is too long: %s/%s", path_name, file_name);
+ goto done;
+ }
/*
* Check to see if the path is already in place; if not,
@@ -270,7 +329,7 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size)
{
size_t len = 0;
- while (len < dest_size) {
+ while (len < dest_size && *src) {
if (src[len] < 0x80)
dest[len++] = (char)(*src++);
else
@@ -282,27 +341,15 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size)
static int hv_fcopy_start(struct hv_start_fcopy *smsg_in)
{
- setlocale(LC_ALL, "en_US.utf8");
- size_t file_size, path_size;
- char *file_name, *path_name;
- char *in_file_name = (char *)smsg_in->file_name;
- char *in_path_name = (char *)smsg_in->path_name;
-
- file_size = wcstombs(NULL, (const wchar_t *restrict)in_file_name, 0) + 1;
- path_size = wcstombs(NULL, (const wchar_t *restrict)in_path_name, 0) + 1;
-
- file_name = (char *)malloc(file_size * sizeof(char));
- path_name = (char *)malloc(path_size * sizeof(char));
-
- if (!file_name || !path_name) {
- free(file_name);
- free(path_name);
- syslog(LOG_ERR, "Can't allocate memory for file name and/or path name");
- return HV_E_FAIL;
- }
+ /*
+ * file_name and path_name should have same length with appropriate
+ * member of hv_start_fcopy.
+ */
+ char file_name[W_MAX_PATH], path_name[W_MAX_PATH];
- wcstoutf8(file_name, (__u16 *)in_file_name, file_size);
- wcstoutf8(path_name, (__u16 *)in_path_name, path_size);
+ setlocale(LC_ALL, "en_US.utf8");
+ wcstoutf8(file_name, smsg_in->file_name, W_MAX_PATH - 1);
+ wcstoutf8(path_name, smsg_in->path_name, W_MAX_PATH - 1);
return hv_fcopy_create_file(file_name, path_name, smsg_in->copy_flags);
}
@@ -406,7 +453,7 @@ int main(int argc, char *argv[])
int daemonize = 1, long_index = 0, opt, ret = -EINVAL;
struct vmbus_br txbr, rxbr;
void *ring;
- uint32_t len = HV_RING_SIZE;
+ uint32_t ring_size, len;
char uio_name[NAME_MAX] = {0};
char uio_dev_path[PATH_MAX] = {0};
@@ -437,7 +484,20 @@ int main(int argc, char *argv[])
openlog("HV_UIO_FCOPY", 0, LOG_USER);
syslog(LOG_INFO, "starting; pid is:%d", getpid());
- fcopy_get_first_folder(FCOPY_UIO, uio_name);
+ ring_size = get_ring_buffer_size();
+ if (!ring_size) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ desc = malloc(ring_size * sizeof(unsigned char));
+ if (!desc) {
+ syslog(LOG_ERR, "malloc failed for desc buffer");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ fcopy_get_first_folder(FCOPY_UIO_PATH, uio_name);
snprintf(uio_dev_path, sizeof(uio_dev_path), "/dev/%s", uio_name);
fcopy_fd = open(uio_dev_path, O_RDWR);
@@ -445,17 +505,17 @@ int main(int argc, char *argv[])
syslog(LOG_ERR, "open %s failed; error: %d %s",
uio_dev_path, errno, strerror(errno));
ret = fcopy_fd;
- goto exit;
+ goto free_desc;
}
- ring = vmbus_uio_map(&fcopy_fd, HV_RING_SIZE);
+ ring = vmbus_uio_map(&fcopy_fd, ring_size);
if (!ring) {
ret = errno;
syslog(LOG_ERR, "mmap ringbuffer failed; error: %d %s", ret, strerror(ret));
goto close;
}
- vmbus_br_setup(&txbr, ring, HV_RING_SIZE);
- vmbus_br_setup(&rxbr, (char *)ring + HV_RING_SIZE, HV_RING_SIZE);
+ vmbus_br_setup(&txbr, ring, ring_size);
+ vmbus_br_setup(&rxbr, (char *)ring + ring_size, ring_size);
rxbr.vbr->imask = 0;
@@ -472,7 +532,7 @@ int main(int argc, char *argv[])
goto close;
}
- len = HV_RING_SIZE;
+ len = ring_size;
ret = rte_vmbus_chan_recv_raw(&rxbr, desc, &len);
if (unlikely(ret <= 0)) {
/* This indicates a failure to communicate (or worse) */
@@ -492,6 +552,8 @@ int main(int argc, char *argv[])
}
close:
close(fcopy_fd);
+free_desc:
+ free(desc);
exit:
return ret;
}
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index eab7b082f19d..03ca33869ce8 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -64,6 +64,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_COLORTEMP] = "colortemp",
[IIO_CHROMATICITY] = "chromaticity",
[IIO_ATTENTION] = "attention",
+ [IIO_ALTCURRENT] = "altcurrent",
};
static const char * const iio_ev_type_text[] = {
@@ -140,6 +141,10 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_PITCH] = "pitch",
[IIO_MOD_YAW] = "yaw",
[IIO_MOD_ROLL] = "roll",
+ [IIO_MOD_RMS] = "rms",
+ [IIO_MOD_ACTIVE] = "active",
+ [IIO_MOD_REACTIVE] = "reactive",
+ [IIO_MOD_APPARENT] = "apparent",
};
static bool event_is_known(struct iio_event_data *event)
@@ -187,6 +192,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_COLORTEMP:
case IIO_CHROMATICITY:
case IIO_ATTENTION:
+ case IIO_ALTCURRENT:
break;
default:
return false;
@@ -238,6 +244,10 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_PM4:
case IIO_MOD_PM10:
case IIO_MOD_O2:
+ case IIO_MOD_RMS:
+ case IIO_MOD_ACTIVE:
+ case IIO_MOD_REACTIVE:
+ case IIO_MOD_APPARENT:
break;
default:
return false;
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 9ef5ee087eda..bc82bb6a7a2a 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -335,7 +335,7 @@ static const struct option longopts[] = {
{ "device-num", 1, 0, 'N' },
{ "trigger-name", 1, 0, 't' },
{ "trigger-num", 1, 0, 'T' },
- { },
+ { }
};
int main(int argc, char **argv)
diff --git a/tools/include/asm-generic/bitops/__fls.h b/tools/include/asm-generic/bitops/__fls.h
index e974ec932ec1..35f33780ca6c 100644
--- a/tools/include/asm-generic/bitops/__fls.h
+++ b/tools/include/asm-generic/bitops/__fls.h
@@ -10,7 +10,7 @@
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int generic___fls(unsigned long word)
+static __always_inline __attribute_const__ unsigned int generic___fls(unsigned long word)
{
unsigned int num = BITS_PER_LONG - 1;
diff --git a/tools/include/asm-generic/bitops/fls.h b/tools/include/asm-generic/bitops/fls.h
index 26f3ce1dd6e4..8eed3437edb9 100644
--- a/tools/include/asm-generic/bitops/fls.h
+++ b/tools/include/asm-generic/bitops/fls.h
@@ -10,7 +10,7 @@
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int generic_fls(unsigned int x)
+static __always_inline __attribute_const__ int generic_fls(unsigned int x)
{
int r = 32;
diff --git a/tools/include/asm-generic/bitops/fls64.h b/tools/include/asm-generic/bitops/fls64.h
index 866f2b2304ff..b5f58dd261a3 100644
--- a/tools/include/asm-generic/bitops/fls64.h
+++ b/tools/include/asm-generic/bitops/fls64.h
@@ -16,7 +16,7 @@
* at position 64.
*/
#if BITS_PER_LONG == 32
-static __always_inline int fls64(__u64 x)
+static __always_inline __attribute_const__ int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
@@ -24,7 +24,7 @@ static __always_inline int fls64(__u64 x)
return fls(x);
}
#elif BITS_PER_LONG == 64
-static __always_inline int fls64(__u64 x)
+static __always_inline __attribute_const__ int fls64(__u64 x)
{
if (x == 0)
return 0;
diff --git a/tools/include/asm-generic/io.h b/tools/include/asm-generic/io.h
new file mode 100644
index 000000000000..e5a0b07ad452
--- /dev/null
+++ b/tools/include/asm-generic/io.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_ASM_GENERIC_IO_H
+#define _TOOLS_ASM_GENERIC_IO_H
+
+#include <asm/barrier.h>
+#include <asm/byteorder.h>
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#ifndef mmiowb_set_pending
+#define mmiowb_set_pending() do { } while (0)
+#endif
+
+#ifndef __io_br
+#define __io_br() barrier()
+#endif
+
+/* prevent prefetching of coherent DMA data ahead of a dma-complete */
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar(v) rmb()
+#else
+#define __io_ar(v) barrier()
+#endif
+#endif
+
+/* flush writes to coherent DMA data before possibly triggering a DMA read */
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() barrier()
+#endif
+#endif
+
+/* serialize device access against a spin_unlock, usually handled there. */
+#ifndef __io_aw
+#define __io_aw() mmiowb_set_pending()
+#endif
+
+#ifndef __io_pbw
+#define __io_pbw() __io_bw()
+#endif
+
+#ifndef __io_paw
+#define __io_paw() __io_aw()
+#endif
+
+#ifndef __io_pbr
+#define __io_pbr() __io_br()
+#endif
+
+#ifndef __io_par
+#define __io_par(v) __io_ar(v)
+#endif
+
+#ifndef _THIS_IP_
+#define _THIS_IP_ 0
+#endif
+
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+
+/*
+ * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
+ *
+ * On some architectures memory mapped IO needs to be accessed differently.
+ * On the simple architectures, we just read/write the memory location
+ * directly.
+ */
+
+#ifndef __raw_readb
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(const volatile u8 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readw
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return *(const volatile u16 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readl
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return *(const volatile u32 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readq
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ return *(const volatile u64 __force *)addr;
+}
+#endif
+
+#ifndef __raw_writeb
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
+{
+ *(volatile u8 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writew
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 value, volatile void __iomem *addr)
+{
+ *(volatile u16 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writel
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 value, volatile void __iomem *addr)
+{
+ *(volatile u32 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writeq
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
+{
+ *(volatile u64 __force *)addr = value;
+}
+#endif
+
+/*
+ * {read,write}{b,w,l,q}() access little endian memory and return result in
+ * native endianness.
+ */
+
+#ifndef readb
+#define readb readb
+static inline u8 readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar(val);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readw
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readl
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readq
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef writeb
+#define writeb writeb
+static inline void writeb(u8 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writeb(value, addr);
+ __io_aw();
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writew
+#define writew writew
+static inline void writew(u16 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writel
+#define writel writel
+static inline void writel(u32 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+/*
+ * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
+ * are not guaranteed to provide ordering against spinlocks or memory
+ * accesses.
+ */
+#ifndef readb_relaxed
+#define readb_relaxed readb_relaxed
+static inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ val = __raw_readb(addr);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readw_relaxed
+#define readw_relaxed readw_relaxed
+static inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readl_relaxed
+#define readl_relaxed readl_relaxed
+static inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#if defined(readq) && !defined(readq_relaxed)
+#define readq_relaxed readq_relaxed
+static inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef writeb_relaxed
+#define writeb_relaxed writeb_relaxed
+static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ __raw_writeb(value, addr);
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writew_relaxed
+#define writew_relaxed writew_relaxed
+static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writel_relaxed
+#define writel_relaxed writel_relaxed
+static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#if defined(writeq) && !defined(writeq_relaxed)
+#define writeq_relaxed writeq_relaxed
+static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+/*
+ * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
+ * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
+ */
+#ifndef readsb
+#define readsb readsb
+static inline void readsb(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u8 *buf = buffer;
+
+ do {
+ u8 x = __raw_readb(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsw
+#define readsw readsw
+static inline void readsw(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u16 *buf = buffer;
+
+ do {
+ u16 x = __raw_readw(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsl
+#define readsl readsl
+static inline void readsl(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+
+ do {
+ u32 x = __raw_readl(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsq
+#define readsq readsq
+static inline void readsq(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u64 *buf = buffer;
+
+ do {
+ u64 x = __raw_readq(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesb
+#define writesb writesb
+static inline void writesb(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u8 *buf = buffer;
+
+ do {
+ __raw_writeb(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesw
+#define writesw writesw
+static inline void writesw(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u16 *buf = buffer;
+
+ do {
+ __raw_writew(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesl
+#define writesl writesl
+static inline void writesl(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+
+ do {
+ __raw_writel(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesq
+#define writesq writesq
+static inline void writesq(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u64 *buf = buffer;
+
+ do {
+ __raw_writeq(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#endif /* _TOOLS_ASM_GENERIC_IO_H */
diff --git a/tools/include/asm/io.h b/tools/include/asm/io.h
new file mode 100644
index 000000000000..eed5066f25c4
--- /dev/null
+++ b/tools/include/asm/io.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_ASM_IO_H
+#define _TOOLS_ASM_IO_H
+
+#if defined(__i386__) || defined(__x86_64__)
+#include "../../arch/x86/include/asm/io.h"
+#else
+#include <asm-generic/io.h>
+#endif
+
+#endif /* _TOOLS_ASM_IO_H */
diff --git a/tools/include/linux/args.h b/tools/include/linux/args.h
new file mode 100644
index 000000000000..2e8e65d975c7
--- /dev/null
+++ b/tools/include/linux/args.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ARGS_H
+#define _LINUX_ARGS_H
+
+/*
+ * How do these macros work?
+ *
+ * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * in order to make sure _n is positioned over the correct number
+ * from 12 to 0 (depending on X, which is a variadic argument list).
+ * They serve no purpose other than occupying a position. Since each
+ * macro parameter must have a distinct identifier, those identifiers
+ * are as good as any.
+ *
+ * In COUNT_ARGS() we use actual integers, so __COUNT_ARGS() returns
+ * that as _n.
+ */
+
+/* This counts to 15. Any more, it will return 16th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+/* Concatenate two parameters, but allow them to be expanded beforehand. */
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
+
+#endif /* _LINUX_ARGS_H */
diff --git a/tools/include/linux/atomic.h b/tools/include/linux/atomic.h
index 01907b33537e..50c66ba9ada5 100644
--- a/tools/include/linux/atomic.h
+++ b/tools/include/linux/atomic.h
@@ -12,4 +12,26 @@ void atomic_long_set(atomic_long_t *v, long i);
#define atomic_cmpxchg_release atomic_cmpxchg
#endif /* atomic_cmpxchg_relaxed */
+static inline bool atomic_try_cmpxchg(atomic_t *ptr, int *oldp, int new)
+{
+ int ret, old = *oldp;
+
+ ret = atomic_cmpxchg(ptr, old, new);
+ if (ret != old)
+ *oldp = ret;
+ return ret == old;
+}
+
+static inline bool atomic_inc_unless_negative(atomic_t *v)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+
#endif /* __TOOLS_LINUX_ATOMIC_H */
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index d4d300040d01..0d992245c600 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -3,6 +3,7 @@
#define _TOOLS_LINUX_BITMAP_H
#include <string.h>
+#include <asm-generic/bitsperlong.h>
#include <linux/align.h>
#include <linux/bitops.h>
#include <linux/find.h>
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
index 8de2914e6510..a40cc861b3a7 100644
--- a/tools/include/linux/bits.h
+++ b/tools/include/linux/bits.h
@@ -2,16 +2,15 @@
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
-#include <linux/const.h>
#include <vdso/bits.h>
#include <uapi/linux/bits.h>
-#include <asm/bitsperlong.h>
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
/*
* Create a contiguous bitmask starting at bit position @l and ending at
@@ -19,36 +18,72 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#if !defined(__ASSEMBLY__)
+
+/*
+ * Missing asm support
+ *
+ * GENMASK_U*() and BIT_U*() depend on BITS_PER_TYPE() which relies on sizeof(),
+ * something not available in asm. Nevertheless, fixed width integers is a C
+ * concept. Assembly code can rely on the long and long long versions instead.
+ */
+
#include <linux/build_bug.h>
-#define GENMASK_INPUT_CHECK(h, l) \
- (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __is_constexpr((l) > (h)), (l) > (h), 0)))
-#else
+#include <linux/compiler.h>
+#include <linux/overflow.h>
+
+#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
+
/*
- * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
- * disable the input check if that is the case.
+ * Generate a mask for the specified type @t. Additional checks are made to
+ * guarantee the value returned fits in that type, relying on
+ * -Wshift-count-overflow compiler check to detect incompatible arguments.
+ * For example, all these create build errors or warnings:
+ *
+ * - GENMASK(15, 20): wrong argument order
+ * - GENMASK(72, 15): doesn't fit unsigned long
+ * - GENMASK_U32(33, 15): doesn't fit in a u32
*/
-#define GENMASK_INPUT_CHECK(h, l) 0
-#endif
+#define GENMASK_TYPE(t, h, l) \
+ ((t)(GENMASK_INPUT_CHECK(h, l) + \
+ (type_max(t) << (l) & \
+ type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
-#define GENMASK(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-#define GENMASK_ULL(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l)
+#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l)
+
+#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
+#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
+#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
+#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
+#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l)
-#if !defined(__ASSEMBLY__)
/*
- * Missing asm support
+ * Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
+ * following examples generate compiler warnings due to -Wshift-count-overflow:
*
- * __GENMASK_U128() depends on _BIT128() which would not work
- * in the asm code, as it shifts an 'unsigned __int128' data
- * type instead of direct representation of 128 bit constants
- * such as long and unsigned long. The fundamental problem is
- * that a 128 bit constant will get silently truncated by the
- * gcc compiler.
+ * - BIT_U8(8)
+ * - BIT_U32(-1)
+ * - BIT_U32(40)
*/
-#define GENMASK_U128(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
-#endif
+#define BIT_INPUT_CHECK(type, nr) \
+ BUILD_BUG_ON_ZERO(const_true((nr) >= BITS_PER_TYPE(type)))
+
+#define BIT_TYPE(type, nr) ((type)(BIT_INPUT_CHECK(type, nr) + BIT_ULL(nr)))
+
+#define BIT_U8(nr) BIT_TYPE(u8, nr)
+#define BIT_U16(nr) BIT_TYPE(u16, nr)
+#define BIT_U32(nr) BIT_TYPE(u32, nr)
+#define BIT_U64(nr) BIT_TYPE(u64, nr)
+
+#else /* defined(__ASSEMBLY__) */
+
+/*
+ * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+ * disable the input check if that is the case.
+ */
+#define GENMASK(h, l) __GENMASK(h, l)
+#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l)
+
+#endif /* !defined(__ASSEMBLY__) */
#endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/build_bug.h b/tools/include/linux/build_bug.h
index b4898ff085de..ab2aa97bd8ce 100644
--- a/tools/include/linux/build_bug.h
+++ b/tools/include/linux/build_bug.h
@@ -4,17 +4,17 @@
#include <linux/compiler.h>
-#ifdef __CHECKER__
-#define BUILD_BUG_ON_ZERO(e) (0)
-#else /* __CHECKER__ */
/*
* Force a compilation error if condition is true, but also produce a
* result (of value 0 and type int), so the expression can be used
* e.g. in a structure initializer (or where-ever else comma expressions
* aren't permitted).
+ *
+ * Take an error message as an optional second argument. If omitted,
+ * default to the stringification of the tested expression.
*/
-#define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
-#endif /* __CHECKER__ */
+#define BUILD_BUG_ON_ZERO(e, ...) \
+ __BUILD_BUG_ON_ZERO_MSG(e, ##__VA_ARGS__, #e " is true")
/* Force a compilation error if a constant expression is not a power of 2 */
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
diff --git a/tools/include/linux/cfi_types.h b/tools/include/linux/cfi_types.h
index 6b8713675765..a86af9bc8bdc 100644
--- a/tools/include/linux/cfi_types.h
+++ b/tools/include/linux/cfi_types.h
@@ -8,7 +8,7 @@
#ifdef __ASSEMBLY__
#include <linux/linkage.h>
-#ifdef CONFIG_CFI_CLANG
+#ifdef CONFIG_CFI
/*
* Use the __kcfi_typeid_<function> type identifier symbol to
* annotate indirectly called assembly functions. The compiler emits
@@ -29,17 +29,40 @@
#define SYM_TYPED_START(name, linkage, align...) \
SYM_TYPED_ENTRY(name, linkage, align)
-#else /* CONFIG_CFI_CLANG */
+#else /* CONFIG_CFI */
#define SYM_TYPED_START(name, linkage, align...) \
SYM_START(name, linkage, align)
-#endif /* CONFIG_CFI_CLANG */
+#endif /* CONFIG_CFI */
#ifndef SYM_TYPED_FUNC_START
#define SYM_TYPED_FUNC_START(name) \
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_CFI
+#define DEFINE_CFI_TYPE(name, func) \
+ /* \
+ * Force a reference to the function so the compiler generates \
+ * __kcfi_typeid_<func>. \
+ */ \
+ __ADDRESSABLE(func); \
+ /* u32 name __ro_after_init = __kcfi_typeid_<func> */ \
+ extern u32 name; \
+ asm ( \
+ " .pushsection .data..ro_after_init,\"aw\",\%progbits \n" \
+ " .type " #name ",\%object \n" \
+ " .globl " #name " \n" \
+ " .p2align 2, 0x0 \n" \
+ #name ": \n" \
+ " .4byte __kcfi_typeid_" #func " \n" \
+ " .size " #name ", 4 \n" \
+ " .popsection \n" \
+ );
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_CFI_TYPES_H */
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 9c05a59f0184..f40bd2b04c29 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -81,6 +81,28 @@
#define __is_constexpr(x) \
(sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+/*
+ * Similar to statically_true() but produces a constant expression
+ *
+ * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
+ * which require their input to be a constant expression and for which
+ * statically_true() would otherwise fail.
+ *
+ * This is a trade-off: const_true() requires all its operands to be
+ * compile time constants. Else, it would always returns false even on
+ * the most trivial cases like:
+ *
+ * true || non_const_var
+ *
+ * On the opposite, statically_true() is able to fold more complex
+ * tautologies and will return true on expressions such as:
+ *
+ * !(non_const_var * 8 % 4)
+ *
+ * For the general case, statically_true() is better.
+ */
+#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
+
#ifdef __ANDROID__
/*
* FIXME: Big hammer to get rid of tons of:
@@ -116,6 +138,10 @@
# define __force
#endif
+#ifndef __iomem
+# define __iomem
+#endif
+
#ifndef __weak
# define __weak __attribute__((weak))
#endif
@@ -222,6 +248,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
__asm__ ("" : "=r" (var) : "0" (var))
#endif
+#ifndef __BUILD_BUG_ON_ZERO_MSG
+#if defined(__clang__)
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) ((int)(sizeof(struct { int:(-!!(e)); })))
+#else
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
+#endif
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/gfp_types.h b/tools/include/linux/gfp_types.h
index 5f9f1ed190a0..65db9349f905 100644
--- a/tools/include/linux/gfp_types.h
+++ b/tools/include/linux/gfp_types.h
@@ -1 +1,392 @@
-#include "../../../include/linux/gfp_types.h"
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_TYPES_H
+#define __LINUX_GFP_TYPES_H
+
+#include <linux/bits.h>
+
+/* The typedef is in types.h but we want the documentation here */
+#if 0
+/**
+ * typedef gfp_t - Memory allocation flags.
+ *
+ * GFP flags are commonly used throughout Linux to indicate how memory
+ * should be allocated. The GFP acronym stands for get_free_pages(),
+ * the underlying memory allocation function. Not every GFP flag is
+ * supported by every function which may allocate memory. Most users
+ * will want to use a plain ``GFP_KERNEL``.
+ */
+typedef unsigned int __bitwise gfp_t;
+#endif
+
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
+enum {
+ ___GFP_DMA_BIT,
+ ___GFP_HIGHMEM_BIT,
+ ___GFP_DMA32_BIT,
+ ___GFP_MOVABLE_BIT,
+ ___GFP_RECLAIMABLE_BIT,
+ ___GFP_HIGH_BIT,
+ ___GFP_IO_BIT,
+ ___GFP_FS_BIT,
+ ___GFP_ZERO_BIT,
+ ___GFP_UNUSED_BIT, /* 0x200u unused */
+ ___GFP_DIRECT_RECLAIM_BIT,
+ ___GFP_KSWAPD_RECLAIM_BIT,
+ ___GFP_WRITE_BIT,
+ ___GFP_NOWARN_BIT,
+ ___GFP_RETRY_MAYFAIL_BIT,
+ ___GFP_NOFAIL_BIT,
+ ___GFP_NORETRY_BIT,
+ ___GFP_MEMALLOC_BIT,
+ ___GFP_COMP_BIT,
+ ___GFP_NOMEMALLOC_BIT,
+ ___GFP_HARDWALL_BIT,
+ ___GFP_THISNODE_BIT,
+ ___GFP_ACCOUNT_BIT,
+ ___GFP_ZEROTAGS_BIT,
+#ifdef CONFIG_KASAN_HW_TAGS
+ ___GFP_SKIP_ZERO_BIT,
+ ___GFP_SKIP_KASAN_BIT,
+#endif
+#ifdef CONFIG_LOCKDEP
+ ___GFP_NOLOCKDEP_BIT,
+#endif
+#ifdef CONFIG_SLAB_OBJ_EXT
+ ___GFP_NO_OBJ_EXT_BIT,
+#endif
+ ___GFP_LAST_BIT
+};
+
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA BIT(___GFP_DMA_BIT)
+#define ___GFP_HIGHMEM BIT(___GFP_HIGHMEM_BIT)
+#define ___GFP_DMA32 BIT(___GFP_DMA32_BIT)
+#define ___GFP_MOVABLE BIT(___GFP_MOVABLE_BIT)
+#define ___GFP_RECLAIMABLE BIT(___GFP_RECLAIMABLE_BIT)
+#define ___GFP_HIGH BIT(___GFP_HIGH_BIT)
+#define ___GFP_IO BIT(___GFP_IO_BIT)
+#define ___GFP_FS BIT(___GFP_FS_BIT)
+#define ___GFP_ZERO BIT(___GFP_ZERO_BIT)
+/* 0x200u unused */
+#define ___GFP_DIRECT_RECLAIM BIT(___GFP_DIRECT_RECLAIM_BIT)
+#define ___GFP_KSWAPD_RECLAIM BIT(___GFP_KSWAPD_RECLAIM_BIT)
+#define ___GFP_WRITE BIT(___GFP_WRITE_BIT)
+#define ___GFP_NOWARN BIT(___GFP_NOWARN_BIT)
+#define ___GFP_RETRY_MAYFAIL BIT(___GFP_RETRY_MAYFAIL_BIT)
+#define ___GFP_NOFAIL BIT(___GFP_NOFAIL_BIT)
+#define ___GFP_NORETRY BIT(___GFP_NORETRY_BIT)
+#define ___GFP_MEMALLOC BIT(___GFP_MEMALLOC_BIT)
+#define ___GFP_COMP BIT(___GFP_COMP_BIT)
+#define ___GFP_NOMEMALLOC BIT(___GFP_NOMEMALLOC_BIT)
+#define ___GFP_HARDWALL BIT(___GFP_HARDWALL_BIT)
+#define ___GFP_THISNODE BIT(___GFP_THISNODE_BIT)
+#define ___GFP_ACCOUNT BIT(___GFP_ACCOUNT_BIT)
+#define ___GFP_ZEROTAGS BIT(___GFP_ZEROTAGS_BIT)
+#ifdef CONFIG_KASAN_HW_TAGS
+#define ___GFP_SKIP_ZERO BIT(___GFP_SKIP_ZERO_BIT)
+#define ___GFP_SKIP_KASAN BIT(___GFP_SKIP_KASAN_BIT)
+#else
+#define ___GFP_SKIP_ZERO 0
+#define ___GFP_SKIP_KASAN 0
+#endif
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP BIT(___GFP_NOLOCKDEP_BIT)
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
+#ifdef CONFIG_SLAB_OBJ_EXT
+#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT)
+#else
+#define ___GFP_NO_OBJ_EXT 0
+#endif
+
+/*
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
+ *
+ * Do not put any conditional on these. If necessary modify the definitions
+ * without the underscores and use them consistently. The definitions here may
+ * be used in bit comparisons.
+ */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/**
+ * DOC: Page mobility and placement hints
+ *
+ * Page mobility and placement hints
+ * ---------------------------------
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
+ * node with no fallbacks or placement policy enforcements.
+ *
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ *
+ * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
+#define __GFP_NO_OBJ_EXT ((__force gfp_t)___GFP_NO_OBJ_EXT)
+
+/**
+ * DOC: Watermark modifiers
+ *
+ * Watermark modifiers -- controls access to emergency reserves
+ * ------------------------------------------------------------
+ *
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example creating an IO context to clean pages and requests
+ * from atomic context.
+ *
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * Users of this flag have to be extremely careful to not deplete the reserve
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory.
+ * Usage of a pre-allocated pool (e.g. mempool) should be always considered
+ * before using this flag.
+ *
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
+ */
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+
+/**
+ * DOC: Reclaim modifiers
+ *
+ * Reclaim modifiers
+ * -----------------
+ * Please note that all the following flags are only applicable to sleepable
+ * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
+ *
+ * %__GFP_IO can start physical IO.
+ *
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ *
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules. Please note that all of them must be used along with
+ * %__GFP_DIRECT_RECLAIM flag.
+ *
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput.
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made elsewhere. It can wait for other
+ * tasks to attempt high-level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM.
+ * It should _never_ be used in non-sleepable contexts.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is
+ * not supported. Please consider using kvmalloc() instead.
+ */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
+
+/**
+ * DOC: Action modifiers
+ *
+ * Action modifiers
+ * ----------------
+ *
+ * %__GFP_NOWARN suppresses allocation failure reports.
+ *
+ * %__GFP_COMP address compound page metadata.
+ *
+ * %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
+ * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
+ * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
+ * memory tags at the same time as zeroing memory has minimal additional
+ * performance impact.
+ *
+ * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
+ * Used for userspace and vmalloc pages; the latter are unpoisoned by
+ * kasan_unpoison_vmalloc instead. For userspace pages, results in
+ * poisoning being skipped as well, see should_skip_kasan_poison for
+ * details. Only effective in HW_TAGS mode.
+ */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
+#define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
+
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT ___GFP_LAST_BIT
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ----------------------------
+ *
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves".
+ * The current implementation doesn't support NMI and few other strict
+ * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback. It is very
+ * likely to fail to allocate memory, even for very small allocations.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
+ * because the DMA32 kmalloc cache array is not implemented.
+ * (Reason: there is no such user in kernel).
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | __GFP_SKIP_KASAN)
+#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+
+#endif /* __LINUX_GFP_TYPES_H */
diff --git a/tools/include/linux/interval_tree_generic.h b/tools/include/linux/interval_tree_generic.h
index aaa8a0767aa3..c5a2fed49eb0 100644
--- a/tools/include/linux/interval_tree_generic.h
+++ b/tools/include/linux/interval_tree_generic.h
@@ -77,7 +77,7 @@ ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
* Cond2: start <= ITLAST(node) \
*/ \
\
-static ITSTRUCT * \
+ITSTATIC ITSTRUCT * \
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
while (true) { \
@@ -104,12 +104,8 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
- if (node->ITRB.rb_right) { \
- node = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB); \
- if (start <= node->ITSUBTREE) \
- continue; \
- } \
+ node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
+ continue; \
} \
return NULL; /* No match */ \
} \
diff --git a/tools/include/linux/io.h b/tools/include/linux/io.h
index e129871fe661..4b94b84160b8 100644
--- a/tools/include/linux/io.h
+++ b/tools/include/linux/io.h
@@ -2,4 +2,6 @@
#ifndef _TOOLS_IO_H
#define _TOOLS_IO_H
-#endif
+#include <asm/io.h>
+
+#endif /* _TOOLS_IO_H */
diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h
index 5a37ccbec54f..f61a01dd7eb7 100644
--- a/tools/include/linux/kallsyms.h
+++ b/tools/include/linux/kallsyms.h
@@ -18,6 +18,7 @@ static inline const char *kallsyms_lookup(unsigned long addr,
return NULL;
}
+#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#include <stdlib.h>
static inline void print_ip_sym(const char *loglvl, unsigned long ip)
@@ -30,5 +31,8 @@ static inline void print_ip_sym(const char *loglvl, unsigned long ip)
free(name);
}
+#else
+static inline void print_ip_sym(const char *loglvl, unsigned long ip) {}
+#endif
#endif
diff --git a/tools/include/linux/livepatch_external.h b/tools/include/linux/livepatch_external.h
new file mode 100644
index 000000000000..138af19b0f5c
--- /dev/null
+++ b/tools/include/linux/livepatch_external.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * External livepatch interfaces for patch creation tooling
+ */
+
+#ifndef _LINUX_LIVEPATCH_EXTERNAL_H_
+#define _LINUX_LIVEPATCH_EXTERNAL_H_
+
+#include <linux/types.h>
+
+#define KLP_RELOC_SEC_PREFIX ".klp.rela."
+#define KLP_SYM_PREFIX ".klp.sym."
+
+#define __KLP_PRE_PATCH_PREFIX __klp_pre_patch_callback_
+#define __KLP_POST_PATCH_PREFIX __klp_post_patch_callback_
+#define __KLP_PRE_UNPATCH_PREFIX __klp_pre_unpatch_callback_
+#define __KLP_POST_UNPATCH_PREFIX __klp_post_unpatch_callback_
+
+#define KLP_PRE_PATCH_PREFIX __stringify(__KLP_PRE_PATCH_PREFIX)
+#define KLP_POST_PATCH_PREFIX __stringify(__KLP_POST_PATCH_PREFIX)
+#define KLP_PRE_UNPATCH_PREFIX __stringify(__KLP_PRE_UNPATCH_PREFIX)
+#define KLP_POST_UNPATCH_PREFIX __stringify(__KLP_POST_UNPATCH_PREFIX)
+
+struct klp_object;
+
+typedef int (*klp_pre_patch_t)(struct klp_object *obj);
+typedef void (*klp_post_patch_t)(struct klp_object *obj);
+typedef void (*klp_pre_unpatch_t)(struct klp_object *obj);
+typedef void (*klp_post_unpatch_t)(struct klp_object *obj);
+
+/**
+ * struct klp_callbacks - pre/post live-(un)patch callback structure
+ * @pre_patch: executed before code patching
+ * @post_patch: executed after code patching
+ * @pre_unpatch: executed before code unpatching
+ * @post_unpatch: executed after code unpatching
+ * @post_unpatch_enabled: flag indicating if post-unpatch callback
+ * should run
+ *
+ * All callbacks are optional. Only the pre-patch callback, if provided,
+ * will be unconditionally executed. If the parent klp_object fails to
+ * patch for any reason, including a non-zero error status returned from
+ * the pre-patch callback, no further callbacks will be executed.
+ */
+struct klp_callbacks {
+ klp_pre_patch_t pre_patch;
+ klp_post_patch_t post_patch;
+ klp_pre_unpatch_t pre_unpatch;
+ klp_post_unpatch_t post_unpatch;
+ bool post_unpatch_enabled;
+};
+
+/*
+ * 'struct klp_{func,object}_ext' are compact "external" representations of
+ * 'struct klp_{func,object}'. They are used by objtool for livepatch
+ * generation. The structs are then read by the livepatch module and converted
+ * to the real structs before calling klp_enable_patch().
+ *
+ * TODO make these the official API for klp_enable_patch(). That should
+ * simplify livepatch's interface as well as its data structure lifetime
+ * management.
+ */
+struct klp_func_ext {
+ const char *old_name;
+ void *new_func;
+ unsigned long sympos;
+};
+
+struct klp_object_ext {
+ const char *name;
+ struct klp_func_ext *funcs;
+ struct klp_callbacks callbacks;
+ unsigned int nr_funcs;
+};
+
+#endif /* _LINUX_LIVEPATCH_EXTERNAL_H_ */
diff --git a/tools/include/linux/objtool_types.h b/tools/include/linux/objtool_types.h
index df5d9fa84dba..c6def4049b1a 100644
--- a/tools/include/linux/objtool_types.h
+++ b/tools/include/linux/objtool_types.h
@@ -65,5 +65,8 @@ struct unwind_hint {
#define ANNOTYPE_IGNORE_ALTS 6
#define ANNOTYPE_INTRA_FUNCTION_CALL 7
#define ANNOTYPE_REACHABLE 8
+#define ANNOTYPE_NOCFI 9
+
+#define ANNOTYPE_DATA_SPECIAL 1
#endif /* _LINUX_OBJTOOL_TYPES_H */
diff --git a/tools/include/linux/pci_ids.h b/tools/include/linux/pci_ids.h
new file mode 120000
index 000000000000..1c9e88f41261
--- /dev/null
+++ b/tools/include/linux/pci_ids.h
@@ -0,0 +1 @@
+../../../include/linux/pci_ids.h \ No newline at end of file
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index c87051e2b26f..94937a699402 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -4,11 +4,31 @@
#include <linux/types.h>
#include <linux/gfp.h>
+#include <pthread.h>
-#define SLAB_PANIC 2
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define kzalloc_node(size, flags, node) kmalloc(size, flags)
+enum _slab_flag_bits {
+ _SLAB_KMALLOC,
+ _SLAB_HWCACHE_ALIGN,
+ _SLAB_PANIC,
+ _SLAB_TYPESAFE_BY_RCU,
+ _SLAB_ACCOUNT,
+ _SLAB_FLAGS_LAST_BIT
+};
+
+#define __SLAB_FLAG_BIT(nr) ((unsigned int __force)(1U << (nr)))
+#define __SLAB_FLAG_UNUSED ((unsigned int __force)(0U))
+
+#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
+#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
+#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
+#ifdef CONFIG_MEMCG
+# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
+#else
+# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
+#endif
void *kmalloc(size_t size, gfp_t gfp);
void kfree(void *p);
@@ -23,6 +43,98 @@ enum slab_state {
FULL
};
+struct kmem_cache {
+ pthread_mutex_t lock;
+ unsigned int size;
+ unsigned int align;
+ unsigned int sheaf_capacity;
+ int nr_objs;
+ void *objs;
+ void (*ctor)(void *);
+ bool non_kernel_enabled;
+ unsigned int non_kernel;
+ unsigned long nr_allocated;
+ unsigned long nr_tallocated;
+ bool exec_callback;
+ void (*callback)(void *);
+ void *private;
+};
+
+struct kmem_cache_args {
+ /**
+ * @align: The required alignment for the objects.
+ *
+ * %0 means no specific alignment is requested.
+ */
+ unsigned int align;
+ /**
+ * @sheaf_capacity: The maximum size of the sheaf.
+ */
+ unsigned int sheaf_capacity;
+ /**
+ * @useroffset: Usercopy region offset.
+ *
+ * %0 is a valid offset, when @usersize is non-%0
+ */
+ unsigned int useroffset;
+ /**
+ * @usersize: Usercopy region size.
+ *
+ * %0 means no usercopy region is specified.
+ */
+ unsigned int usersize;
+ /**
+ * @freeptr_offset: Custom offset for the free pointer
+ * in &SLAB_TYPESAFE_BY_RCU caches
+ *
+ * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
+ * outside of the object. This might cause the object to grow in size.
+ * Cache creators that have a reason to avoid this can specify a custom
+ * free pointer offset in their struct where the free pointer will be
+ * placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
+ * details).
+ *
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, %use_freeptr_offset must be set %true.
+ *
+ * Note that @ctor currently isn't supported with custom free pointers
+ * as a @ctor requires an external free pointer.
+ */
+ unsigned int freeptr_offset;
+ /**
+ * @use_freeptr_offset: Whether a @freeptr_offset is used.
+ */
+ bool use_freeptr_offset;
+ /**
+ * @ctor: A constructor for the objects.
+ *
+ * The constructor is invoked for each object in a newly allocated slab
+ * page. It is the cache user's responsibility to free object in the
+ * same state as after calling the constructor, or deal appropriately
+ * with any differences between a freshly constructed and a reallocated
+ * object.
+ *
+ * %NULL means no constructor.
+ */
+ void (*ctor)(void *);
+};
+
+struct slab_sheaf {
+ union {
+ struct list_head barn_list;
+ /* only used for prefilled sheafs */
+ unsigned int capacity;
+ };
+ struct kmem_cache *cache;
+ unsigned int size;
+ int node; /* only used for rcu_sheaf */
+ void *objects[];
+};
+
static inline void *kzalloc(size_t size, gfp_t gfp)
{
return kmalloc(size, gfp | __GFP_ZERO);
@@ -37,12 +149,57 @@ static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
}
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
-struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
- unsigned int align, unsigned int flags,
- void (*ctor)(void *));
+
+struct kmem_cache *
+__kmem_cache_create_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args, unsigned int flags);
+
+/* If NULL is passed for @args, use this variant with default arguments. */
+static inline struct kmem_cache *
+__kmem_cache_default_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args, unsigned int flags)
+{
+ struct kmem_cache_args kmem_default_args = {};
+
+ return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
+}
+
+static inline struct kmem_cache *
+__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
+ unsigned int flags, void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+#define kmem_cache_create(__name, __object_size, __args, ...) \
+ _Generic((__args), \
+ struct kmem_cache_args *: __kmem_cache_create_args, \
+ void *: __kmem_cache_default_args, \
+ default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
void **list);
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+
+void *
+kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size);
+
+static inline unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
+{
+ return sheaf->size;
+}
#endif /* _TOOLS_SLAB_H */
diff --git a/tools/include/linux/static_call_types.h b/tools/include/linux/static_call_types.h
index 5a00b8b2cf9f..cfb6ddeb292b 100644
--- a/tools/include/linux/static_call_types.h
+++ b/tools/include/linux/static_call_types.h
@@ -25,6 +25,8 @@
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
+#ifndef __ASSEMBLY__
+
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
@@ -100,4 +102,6 @@ struct static_call_key {
#endif /* CONFIG_HAVE_STATIC_CALL */
+#endif /* __ASSEMBLY__ */
+
#endif /* _STATIC_CALL_TYPES_H */
diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
index 8499f509f03e..51ad3cf4fa82 100644
--- a/tools/include/linux/string.h
+++ b/tools/include/linux/string.h
@@ -44,6 +44,20 @@ static inline bool strstarts(const char *str, const char *prefix)
return strncmp(str, prefix, strlen(prefix)) == 0;
}
+/*
+ * Checks if a string ends with another.
+ */
+static inline bool str_ends_with(const char *str, const char *substr)
+{
+ size_t len = strlen(str);
+ size_t sublen = strlen(substr);
+
+ if (sublen > len)
+ return false;
+
+ return !strcmp(str + len - sublen, substr);
+}
+
extern char * __must_check skip_spaces(const char *);
extern char *strim(char *);
diff --git a/tools/include/nolibc/Makefile b/tools/include/nolibc/Makefile
index c335ce0bd195..8118e22844f1 100644
--- a/tools/include/nolibc/Makefile
+++ b/tools/include/nolibc/Makefile
@@ -23,8 +23,7 @@ else
Q=@
endif
-nolibc_arch := $(patsubst arm64,aarch64,$(ARCH))
-arch_file := arch-$(nolibc_arch).h
+arch_files := arch.h $(wildcard arch-*.h)
all_files := \
compiler.h \
crt.h \
@@ -34,6 +33,7 @@ all_files := \
errno.h \
fcntl.h \
getopt.h \
+ inttypes.h \
limits.h \
math.h \
nolibc.h \
@@ -57,12 +57,14 @@ all_files := \
sys/random.h \
sys/reboot.h \
sys/resource.h \
+ sys/select.h \
sys/stat.h \
sys/syscall.h \
sys/sysmacros.h \
sys/time.h \
sys/timerfd.h \
sys/types.h \
+ sys/uio.h \
sys/utsname.h \
sys/wait.h \
time.h \
@@ -80,7 +82,7 @@ help:
@echo "Supported targets under nolibc:"
@echo " all call \"headers\""
@echo " clean clean the sysroot"
- @echo " headers prepare a sysroot in tools/include/nolibc/sysroot"
+ @echo " headers prepare a multi-arch sysroot in \$${OUTPUT}sysroot"
@echo " headers_standalone like \"headers\", and also install kernel headers"
@echo " help this help"
@echo ""
@@ -91,34 +93,18 @@ help:
@echo " OUTPUT = $(OUTPUT)"
@echo ""
-# Note: when ARCH is "x86" we concatenate both x86_64 and i386
+# installs headers for all archs at once.
headers:
- $(Q)mkdir -p $(OUTPUT)sysroot
- $(Q)mkdir -p $(OUTPUT)sysroot/include
- $(Q)cp --parents $(all_files) $(OUTPUT)sysroot/include/
- $(Q)if [ "$(ARCH)" = "x86" ]; then \
- sed -e \
- 's,^#ifndef _NOLIBC_ARCH_X86_64_H,#if !defined(_NOLIBC_ARCH_X86_64_H) \&\& defined(__x86_64__),' \
- arch-x86_64.h; \
- sed -e \
- 's,^#ifndef _NOLIBC_ARCH_I386_H,#if !defined(_NOLIBC_ARCH_I386_H) \&\& !defined(__x86_64__),' \
- arch-i386.h; \
- elif [ -e "$(arch_file)" ]; then \
- cat $(arch_file); \
- else \
- echo "Fatal: architecture $(ARCH) not yet supported by nolibc." >&2; \
- exit 1; \
- fi > $(OUTPUT)sysroot/include/arch.h
+ $(Q)mkdir -p "$(OUTPUT)sysroot"
+ $(Q)mkdir -p "$(OUTPUT)sysroot/include"
+ $(Q)cp --parents $(arch_files) $(all_files) "$(OUTPUT)sysroot/include/"
headers_standalone: headers
$(Q)$(MAKE) -C $(srctree) headers
$(Q)$(MAKE) -C $(srctree) headers_install INSTALL_HDR_PATH=$(OUTPUT)sysroot
-# GCC uses "s390", clang "systemz"
-CLANG_CROSS_FLAGS := $(subst --target=s390-linux,--target=systemz-linux,$(CLANG_CROSS_FLAGS))
-
headers_check: headers_standalone
- for header in $(filter-out crt.h std.h,$(all_files)); do \
+ $(Q)for header in $(filter-out crt.h std.h,$(all_files)); do \
$(CC) $(CLANG_CROSS_FLAGS) -Wall -Werror -nostdinc -fsyntax-only -x c /dev/null \
-I$(or $(objtree),$(srctree))/usr/include -include $$header -include $$header || exit 1; \
done
diff --git a/tools/include/nolibc/arch-arm.h b/tools/include/nolibc/arch-arm.h
index 1f66e7e5a444..251c42579028 100644
--- a/tools/include/nolibc/arch-arm.h
+++ b/tools/include/nolibc/arch-arm.h
@@ -184,6 +184,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -193,5 +194,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_ARM_H */
diff --git a/tools/include/nolibc/arch-aarch64.h b/tools/include/nolibc/arch-arm64.h
index 937a348da42e..080a55a7144e 100644
--- a/tools/include/nolibc/arch-aarch64.h
+++ b/tools/include/nolibc/arch-arm64.h
@@ -1,16 +1,16 @@
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
/*
- * AARCH64 specific definitions for NOLIBC
+ * ARM64 specific definitions for NOLIBC
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
*/
-#ifndef _NOLIBC_ARCH_AARCH64_H
-#define _NOLIBC_ARCH_AARCH64_H
+#ifndef _NOLIBC_ARCH_ARM64_H
+#define _NOLIBC_ARCH_ARM64_H
#include "compiler.h"
#include "crt.h"
-/* Syscalls for AARCH64 :
+/* Syscalls for ARM64 :
* - registers are 64-bit
* - stack is 16-byte aligned
* - syscall number is passed in x8
@@ -141,6 +141,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -150,4 +151,5 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
-#endif /* _NOLIBC_ARCH_AARCH64_H */
+#endif /* NOLIBC_NO_RUNTIME */
+#endif /* _NOLIBC_ARCH_ARM64_H */
diff --git a/tools/include/nolibc/arch-i386.h b/tools/include/nolibc/arch-i386.h
deleted file mode 100644
index 7c9b38e96418..000000000000
--- a/tools/include/nolibc/arch-i386.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
-/*
- * i386 specific definitions for NOLIBC
- * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
- */
-
-#ifndef _NOLIBC_ARCH_I386_H
-#define _NOLIBC_ARCH_I386_H
-
-#include "compiler.h"
-#include "crt.h"
-
-/* Syscalls for i386 :
- * - mostly similar to x86_64
- * - registers are 32-bit
- * - syscall number is passed in eax
- * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively
- * - all registers are preserved (except eax of course)
- * - the system call is performed by calling int $0x80
- * - syscall return comes in eax
- * - the arguments are cast to long and assigned into the target registers
- * which are then simply passed as registers to the asm code, so that we
- * don't have to experience issues with register constraints.
- * - the syscall number is always specified last in order to allow to force
- * some registers before (gcc refuses a %-register at the last position).
- *
- * Also, i386 supports the old_select syscall if newselect is not available
- */
-#define __ARCH_WANT_SYS_OLD_SELECT
-
-#define my_syscall0(num) \
-({ \
- long _ret; \
- register long _num __asm__ ("eax") = (num); \
- \
- __asm__ volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall1(num, arg1) \
-({ \
- long _ret; \
- register long _num __asm__ ("eax") = (num); \
- register long _arg1 __asm__ ("ebx") = (long)(arg1); \
- \
- __asm__ volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall2(num, arg1, arg2) \
-({ \
- long _ret; \
- register long _num __asm__ ("eax") = (num); \
- register long _arg1 __asm__ ("ebx") = (long)(arg1); \
- register long _arg2 __asm__ ("ecx") = (long)(arg2); \
- \
- __asm__ volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall3(num, arg1, arg2, arg3) \
-({ \
- long _ret; \
- register long _num __asm__ ("eax") = (num); \
- register long _arg1 __asm__ ("ebx") = (long)(arg1); \
- register long _arg2 __asm__ ("ecx") = (long)(arg2); \
- register long _arg3 __asm__ ("edx") = (long)(arg3); \
- \
- __asm__ volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall4(num, arg1, arg2, arg3, arg4) \
-({ \
- long _ret; \
- register long _num __asm__ ("eax") = (num); \
- register long _arg1 __asm__ ("ebx") = (long)(arg1); \
- register long _arg2 __asm__ ("ecx") = (long)(arg2); \
- register long _arg3 __asm__ ("edx") = (long)(arg3); \
- register long _arg4 __asm__ ("esi") = (long)(arg4); \
- \
- __asm__ volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
-({ \
- long _ret; \
- register long _num __asm__ ("eax") = (num); \
- register long _arg1 __asm__ ("ebx") = (long)(arg1); \
- register long _arg2 __asm__ ("ecx") = (long)(arg2); \
- register long _arg3 __asm__ ("edx") = (long)(arg3); \
- register long _arg4 __asm__ ("esi") = (long)(arg4); \
- register long _arg5 __asm__ ("edi") = (long)(arg5); \
- \
- __asm__ volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
-({ \
- long _eax = (long)(num); \
- long _arg6 = (long)(arg6); /* Always in memory */ \
- __asm__ volatile ( \
- "pushl %[_arg6]\n\t" \
- "pushl %%ebp\n\t" \
- "movl 4(%%esp),%%ebp\n\t" \
- "int $0x80\n\t" \
- "popl %%ebp\n\t" \
- "addl $4,%%esp\n\t" \
- : "+a"(_eax) /* %eax */ \
- : "b"(arg1), /* %ebx */ \
- "c"(arg2), /* %ecx */ \
- "d"(arg3), /* %edx */ \
- "S"(arg4), /* %esi */ \
- "D"(arg5), /* %edi */ \
- [_arg6]"m"(_arg6) /* memory */ \
- : "memory", "cc" \
- ); \
- _eax; \
-})
-
-/* startup code */
-/*
- * i386 System V ABI mandates:
- * 1) last pushed argument must be 16-byte aligned.
- * 2) The deepest stack frame should be set to zero
- *
- */
-void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
-{
- __asm__ volatile (
- "xor %ebp, %ebp\n" /* zero the stack frame */
- "mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */
- "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */
- "push %eax\n" /* push arg1 on stack to support plain stack modes too */
- "call _start_c\n" /* transfer to c runtime */
- "hlt\n" /* ensure it does not return */
- );
- __nolibc_entrypoint_epilogue();
-}
-
-#endif /* _NOLIBC_ARCH_I386_H */
diff --git a/tools/include/nolibc/arch-loongarch.h b/tools/include/nolibc/arch-loongarch.h
index 5511705303ea..c894176c3f89 100644
--- a/tools/include/nolibc/arch-loongarch.h
+++ b/tools/include/nolibc/arch-loongarch.h
@@ -142,6 +142,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -151,5 +152,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_LOONGARCH_H */
diff --git a/tools/include/nolibc/arch-m68k.h b/tools/include/nolibc/arch-m68k.h
index 6dac1845f298..2a4fbada5e79 100644
--- a/tools/include/nolibc/arch-m68k.h
+++ b/tools/include/nolibc/arch-m68k.h
@@ -128,6 +128,7 @@
_num; \
})
+#ifndef NOLIBC_NO_RUNTIME
void _start(void);
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -137,5 +138,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_M68K_H */
diff --git a/tools/include/nolibc/arch-mips.h b/tools/include/nolibc/arch-mips.h
index 753a8ed2cf69..a72506ceec6b 100644
--- a/tools/include/nolibc/arch-mips.h
+++ b/tools/include/nolibc/arch-mips.h
@@ -10,7 +10,7 @@
#include "compiler.h"
#include "crt.h"
-#if !defined(_ABIO32)
+#if !defined(_ABIO32) && !defined(_ABIN32) && !defined(_ABI64)
#error Unsupported MIPS ABI
#endif
@@ -32,11 +32,32 @@
* - the arguments are cast to long and assigned into the target registers
* which are then simply passed as registers to the asm code, so that we
* don't have to experience issues with register constraints.
+ *
+ * Syscalls for MIPS ABI N32, same as ABI O32 with the following differences :
+ * - arguments are in a0, a1, a2, a3, t0, t1, t2, t3.
+ * t0..t3 are also known as a4..a7.
+ * - stack is 16-byte aligned
*/
+#if defined(_ABIO32)
+
#define _NOLIBC_SYSCALL_CLOBBERLIST \
"memory", "cc", "at", "v1", "hi", "lo", \
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9"
+#define _NOLIBC_SYSCALL_STACK_RESERVE "addiu $sp, $sp, -32\n"
+#define _NOLIBC_SYSCALL_STACK_UNRESERVE "addiu $sp, $sp, 32\n"
+
+#else /* _ABIN32 || _ABI64 */
+
+/* binutils, GCC and clang disagree about register aliases, use numbers instead. */
+#define _NOLIBC_SYSCALL_CLOBBERLIST \
+ "memory", "cc", "at", "v1", \
+ "10", "11", "12", "13", "14", "15", "24", "25"
+
+#define _NOLIBC_SYSCALL_STACK_RESERVE
+#define _NOLIBC_SYSCALL_STACK_UNRESERVE
+
+#endif /* _ABIO32 */
#define my_syscall0(num) \
({ \
@@ -44,9 +65,9 @@
register long _arg4 __asm__ ("a3"); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r"(_num), "=r"(_arg4) \
: "r"(_num) \
: _NOLIBC_SYSCALL_CLOBBERLIST \
@@ -61,9 +82,9 @@
register long _arg4 __asm__ ("a3"); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1) \
@@ -80,9 +101,9 @@
register long _arg4 __asm__ ("a3"); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2) \
@@ -100,9 +121,9 @@
register long _arg4 __asm__ ("a3"); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3) \
@@ -120,9 +141,9 @@
register long _arg4 __asm__ ("a3") = (long)(arg4); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r" (_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
@@ -131,6 +152,8 @@
_arg4 ? -_num : _num; \
})
+#if defined(_ABIO32)
+
#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
({ \
register long _num __asm__ ("v0") = (num); \
@@ -141,10 +164,10 @@
register long _arg5 = (long)(arg5); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"sw %7, 16($sp)\n" \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
: "=r" (_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
@@ -164,11 +187,53 @@
register long _arg6 = (long)(arg6); \
\
__asm__ volatile ( \
- "addiu $sp, $sp, -32\n" \
+ _NOLIBC_SYSCALL_STACK_RESERVE \
"sw %7, 16($sp)\n" \
"sw %8, 20($sp)\n" \
"syscall\n" \
- "addiu $sp, $sp, 32\n" \
+ _NOLIBC_SYSCALL_STACK_UNRESERVE \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_arg6) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#else /* _ABIN32 || _ABI64 */
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("$4") = (long)(arg1); \
+ register long _arg2 __asm__ ("$5") = (long)(arg2); \
+ register long _arg3 __asm__ ("$6") = (long)(arg3); \
+ register long _arg4 __asm__ ("$7") = (long)(arg4); \
+ register long _arg5 __asm__ ("$8") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("$4") = (long)(arg1); \
+ register long _arg2 __asm__ ("$5") = (long)(arg2); \
+ register long _arg3 __asm__ ("$6") = (long)(arg3); \
+ register long _arg4 __asm__ ("$7") = (long)(arg4); \
+ register long _arg5 __asm__ ("$8") = (long)(arg5); \
+ register long _arg6 __asm__ ("$9") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
: "=r" (_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
@@ -178,30 +243,30 @@
_arg4 ? -_num : _num; \
})
+#endif /* _ABIO32 */
+
+#ifndef NOLIBC_NO_RUNTIME
/* startup code, note that it's called __start on MIPS */
void __start(void);
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector __start(void)
{
__asm__ volatile (
- ".set push\n"
- ".set noreorder\n"
- "bal 1f\n" /* prime $ra for .cpload */
- "nop\n"
- "1:\n"
- ".cpload $ra\n"
"move $a0, $sp\n" /* save stack pointer to $a0, as arg1 of _start_c */
- "addiu $sp, $sp, -4\n" /* space for .cprestore to store $gp */
- ".cprestore 0\n"
- "li $t0, -8\n"
- "and $sp, $sp, $t0\n" /* $sp must be 8-byte aligned */
+#if defined(_ABIO32)
"addiu $sp, $sp, -16\n" /* the callee expects to save a0..a3 there */
+#endif /* _ABIO32 */
"lui $t9, %hi(_start_c)\n" /* ABI requires current function address in $t9 */
"ori $t9, %lo(_start_c)\n"
+#if defined(_ABI64)
+ "lui $t0, %highest(_start_c)\n"
+ "ori $t0, %higher(_start_c)\n"
+ "dsll $t0, 0x20\n"
+ "or $t9, $t0\n"
+#endif /* _ABI64 */
"jalr $t9\n" /* transfer to c runtime */
- " nop\n" /* delayed slot */
- ".set pop\n"
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_MIPS_H */
diff --git a/tools/include/nolibc/arch-powerpc.h b/tools/include/nolibc/arch-powerpc.h
index 204564bbcd32..e0c7e0b81f7c 100644
--- a/tools/include/nolibc/arch-powerpc.h
+++ b/tools/include/nolibc/arch-powerpc.h
@@ -183,6 +183,7 @@
#endif
#endif /* !__powerpc64__ */
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -215,5 +216,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
#endif
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_POWERPC_H */
diff --git a/tools/include/nolibc/arch-riscv.h b/tools/include/nolibc/arch-riscv.h
index 885383a86c38..1c00cacf57e1 100644
--- a/tools/include/nolibc/arch-riscv.h
+++ b/tools/include/nolibc/arch-riscv.h
@@ -139,6 +139,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -152,5 +153,6 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_ARCH_RISCV_H */
diff --git a/tools/include/nolibc/arch-s390.h b/tools/include/nolibc/arch-s390.h
index df4c3cc713ac..74125a254ce3 100644
--- a/tools/include/nolibc/arch-s390.h
+++ b/tools/include/nolibc/arch-s390.h
@@ -139,22 +139,19 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
__asm__ volatile (
-#ifdef __s390x__
"lgr %r2, %r15\n" /* save stack pointer to %r2, as arg1 of _start_c */
"aghi %r15, -160\n" /* allocate new stackframe */
-#else
- "lr %r2, %r15\n"
- "ahi %r15, -96\n"
-#endif
"xc 0(8,%r15), 0(%r15)\n" /* clear backchain */
"brasl %r14, _start_c\n" /* transfer to c runtime */
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
struct s390_mmap_arg_struct {
unsigned long addr;
diff --git a/tools/include/nolibc/arch-sh.h b/tools/include/nolibc/arch-sh.h
new file mode 100644
index 000000000000..7a421197d104
--- /dev/null
+++ b/tools/include/nolibc/arch-sh.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * SuperH specific definitions for NOLIBC
+ * Copyright (C) 2025 Thomas Weißschuh <linux@weissschuh.net>
+ */
+
+#ifndef _NOLIBC_ARCH_SH_H
+#define _NOLIBC_ARCH_SH_H
+
+#include "compiler.h"
+#include "crt.h"
+
+/*
+ * Syscalls for SuperH:
+ * - registers are 32bit wide
+ * - syscall number is passed in r3
+ * - arguments are in r4, r5, r6, r7, r0, r1, r2
+ * - the system call is performed by calling trapa #31
+ * - syscall return value is in r0
+ */
+
+#define my_syscall0(num) \
+({ \
+ register long _num __asm__ ("r3") = (num); \
+ register long _ret __asm__ ("r0"); \
+ \
+ __asm__ volatile ( \
+ "trapa #31" \
+ : "=r"(_ret) \
+ : "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num __asm__ ("r3") = (num); \
+ register long _ret __asm__ ("r0"); \
+ register long _arg1 __asm__ ("r4") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ "trapa #31" \
+ : "=r"(_ret) \
+ : "r"(_num), "r"(_arg1) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__ ("r3") = (num); \
+ register long _ret __asm__ ("r0"); \
+ register long _arg1 __asm__ ("r4") = (long)(arg1); \
+ register long _arg2 __asm__ ("r5") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ "trapa #31" \
+ : "=r"(_ret) \
+ : "r"(_num), "r"(_arg1), "r"(_arg2) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num __asm__ ("r3") = (num); \
+ register long _ret __asm__ ("r0"); \
+ register long _arg1 __asm__ ("r4") = (long)(arg1); \
+ register long _arg2 __asm__ ("r5") = (long)(arg2); \
+ register long _arg3 __asm__ ("r6") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ "trapa #31" \
+ : "=r"(_ret) \
+ : "r"(_num), "r"(_arg1), "r"(_arg2), "r"(_arg3) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num __asm__ ("r3") = (num); \
+ register long _ret __asm__ ("r0"); \
+ register long _arg1 __asm__ ("r4") = (long)(arg1); \
+ register long _arg2 __asm__ ("r5") = (long)(arg2); \
+ register long _arg3 __asm__ ("r6") = (long)(arg3); \
+ register long _arg4 __asm__ ("r7") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "trapa #31" \
+ : "=r"(_ret) \
+ : "r"(_num), "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__ ("r3") = (num); \
+ register long _ret __asm__ ("r0"); \
+ register long _arg1 __asm__ ("r4") = (long)(arg1); \
+ register long _arg2 __asm__ ("r5") = (long)(arg2); \
+ register long _arg3 __asm__ ("r6") = (long)(arg3); \
+ register long _arg4 __asm__ ("r7") = (long)(arg4); \
+ register long _arg5 __asm__ ("r0") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "trapa #31" \
+ : "=r"(_ret) \
+ : "r"(_num), "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_arg5) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num __asm__ ("r3") = (num); \
+ register long _ret __asm__ ("r0"); \
+ register long _arg1 __asm__ ("r4") = (long)(arg1); \
+ register long _arg2 __asm__ ("r5") = (long)(arg2); \
+ register long _arg3 __asm__ ("r6") = (long)(arg3); \
+ register long _arg4 __asm__ ("r7") = (long)(arg4); \
+ register long _arg5 __asm__ ("r0") = (long)(arg5); \
+ register long _arg6 __asm__ ("r1") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "trapa #31" \
+ : "=r"(_ret) \
+ : "r"(_num), "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_arg5), "r"(_arg6) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#ifndef NOLIBC_NO_RUNTIME
+/* startup code */
+void _start_wrapper(void);
+void __attribute__((weak,noreturn)) __nolibc_entrypoint __no_stack_protector _start_wrapper(void)
+{
+ __asm__ volatile (
+ ".global _start\n" /* The C function will have a prologue, */
+ ".type _start, @function\n" /* corrupting "sp" */
+ ".weak _start\n"
+ "_start:\n"
+
+ "mov sp, r4\n" /* save argc pointer to r4, as arg1 of _start_c */
+ "bsr _start_c\n" /* transfer to c runtime */
+ "nop\n" /* delay slot */
+
+ ".size _start, .-_start\n"
+ );
+ __nolibc_entrypoint_epilogue();
+}
+#endif /* NOLIBC_NO_RUNTIME */
+
+#endif /* _NOLIBC_ARCH_SH_H */
diff --git a/tools/include/nolibc/arch-sparc.h b/tools/include/nolibc/arch-sparc.h
index 1435172f3dfe..2ebb5686e105 100644
--- a/tools/include/nolibc/arch-sparc.h
+++ b/tools/include/nolibc/arch-sparc.h
@@ -152,6 +152,7 @@
_arg1; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
@@ -169,6 +170,7 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
static pid_t getpid(void);
@@ -188,4 +190,20 @@ pid_t sys_fork(void)
}
#define sys_fork sys_fork
+static __attribute__((unused))
+pid_t sys_vfork(void)
+{
+ pid_t parent, ret;
+
+ parent = getpid();
+ ret = my_syscall0(__NR_vfork);
+
+ /* The syscall returns the parent pid in the child instead of 0 */
+ if (ret == parent)
+ return 0;
+ else
+ return ret;
+}
+#define sys_vfork sys_vfork
+
#endif /* _NOLIBC_ARCH_SPARC_H */
diff --git a/tools/include/nolibc/arch-x86_64.h b/tools/include/nolibc/arch-x86.h
index 67305e24dbef..f6c43ac5377b 100644
--- a/tools/include/nolibc/arch-x86_64.h
+++ b/tools/include/nolibc/arch-x86.h
@@ -1,15 +1,186 @@
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
/*
- * x86_64 specific definitions for NOLIBC
- * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ * x86 specific definitions for NOLIBC (both 32- and 64-bit)
+ * Copyright (C) 2017-2025 Willy Tarreau <w@1wt.eu>
*/
-#ifndef _NOLIBC_ARCH_X86_64_H
-#define _NOLIBC_ARCH_X86_64_H
+#ifndef _NOLIBC_ARCH_X86_H
+#define _NOLIBC_ARCH_X86_H
#include "compiler.h"
#include "crt.h"
+#if !defined(__x86_64__)
+
+/* Syscalls for i386 :
+ * - mostly similar to x86_64
+ * - registers are 32-bit
+ * - syscall number is passed in eax
+ * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively
+ * - all registers are preserved (except eax of course)
+ * - the system call is performed by calling int $0x80
+ * - syscall return comes in eax
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ * - the syscall number is always specified last in order to allow to force
+ * some registers before (gcc refuses a %-register at the last position).
+ *
+ * Also, i386 supports the old_select syscall if newselect is not available
+ */
+#define __ARCH_WANT_SYS_OLD_SELECT
+
+#define my_syscall0(num) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ register long _arg2 __asm__ ("ecx") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ register long _arg2 __asm__ ("ecx") = (long)(arg2); \
+ register long _arg3 __asm__ ("edx") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ register long _arg2 __asm__ ("ecx") = (long)(arg2); \
+ register long _arg3 __asm__ ("edx") = (long)(arg3); \
+ register long _arg4 __asm__ ("esi") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ register long _arg2 __asm__ ("ecx") = (long)(arg2); \
+ register long _arg3 __asm__ ("edx") = (long)(arg3); \
+ register long _arg4 __asm__ ("esi") = (long)(arg4); \
+ register long _arg5 __asm__ ("edi") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ long _eax = (long)(num); \
+ long _arg6 = (long)(arg6); /* Always in memory */ \
+ __asm__ volatile ( \
+ "pushl %[_arg6]\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl 4(%%esp),%%ebp\n\t" \
+ "int $0x80\n\t" \
+ "popl %%ebp\n\t" \
+ "addl $4,%%esp\n\t" \
+ : "+a"(_eax) /* %eax */ \
+ : "b"(arg1), /* %ebx */ \
+ "c"(arg2), /* %ecx */ \
+ "d"(arg3), /* %edx */ \
+ "S"(arg4), /* %esi */ \
+ "D"(arg5), /* %edi */ \
+ [_arg6]"m"(_arg6) /* memory */ \
+ : "memory", "cc" \
+ ); \
+ _eax; \
+})
+
+#ifndef NOLIBC_NO_RUNTIME
+/* startup code */
+/*
+ * i386 System V ABI mandates:
+ * 1) last pushed argument must be 16-byte aligned.
+ * 2) The deepest stack frame should be set to zero
+ *
+ */
+void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
+{
+ __asm__ volatile (
+ "xor %ebp, %ebp\n" /* zero the stack frame */
+ "mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */
+ "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */
+ "push %eax\n" /* push arg1 on stack to support plain stack modes too */
+ "call _start_c\n" /* transfer to c runtime */
+ "hlt\n" /* ensure it does not return */
+ );
+ __nolibc_entrypoint_epilogue();
+}
+#endif /* NOLIBC_NO_RUNTIME */
+
+#else /* !defined(__x86_64__) */
+
/* Syscalls for x86_64 :
* - registers are 64-bit
* - syscall number is passed in rax
@@ -154,6 +325,7 @@
_ret; \
})
+#ifndef NOLIBC_NO_RUNTIME
/* startup code */
/*
* x86-64 System V ABI mandates:
@@ -171,6 +343,7 @@ void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _s
);
__nolibc_entrypoint_epilogue();
}
+#endif /* NOLIBC_NO_RUNTIME */
#define NOLIBC_ARCH_HAS_MEMMOVE
void *memmove(void *dst, const void *src, size_t len);
@@ -182,7 +355,7 @@ void *memcpy(void *dst, const void *src, size_t len);
void *memset(void *dst, int c, size_t len);
__asm__ (
-".section .text.nolibc_memmove_memcpy\n"
+".pushsection .text.nolibc_memmove_memcpy\n"
".weak memmove\n"
".weak memcpy\n"
"memmove:\n"
@@ -202,8 +375,9 @@ __asm__ (
"rep movsb\n\t"
"cld\n\t"
"retq\n"
+".popsection\n"
-".section .text.nolibc_memset\n"
+".pushsection .text.nolibc_memset\n"
".weak memset\n"
"memset:\n"
"xchgl %eax, %esi\n\t"
@@ -212,6 +386,8 @@ __asm__ (
"rep stosb\n\t"
"popq %rax\n\t"
"retq\n"
+".popsection\n"
);
-#endif /* _NOLIBC_ARCH_X86_64_H */
+#endif /* !defined(__x86_64__) */
+#endif /* _NOLIBC_ARCH_X86_H */
diff --git a/tools/include/nolibc/arch.h b/tools/include/nolibc/arch.h
index d20b2304aac2..a3adaf433f2c 100644
--- a/tools/include/nolibc/arch.h
+++ b/tools/include/nolibc/arch.h
@@ -3,33 +3,22 @@
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
*/
-/* Below comes the architecture-specific code. For each architecture, we have
- * the syscall declarations and the _start code definition. This is the only
- * global part. On all architectures the kernel puts everything in the stack
- * before jumping to _start just above us, without any return address (_start
- * is not a function but an entry point). So at the stack pointer we find argc.
- * Then argv[] begins, and ends at the first NULL. Then we have envp which
- * starts and ends with a NULL as well. So envp=argv+argc+1.
- */
-
#ifndef _NOLIBC_ARCH_H
#define _NOLIBC_ARCH_H
-#if defined(__x86_64__)
-#include "arch-x86_64.h"
-#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
-#include "arch-i386.h"
+#if defined(__x86_64__) || defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
+#include "arch-x86.h"
#elif defined(__ARM_EABI__)
#include "arch-arm.h"
#elif defined(__aarch64__)
-#include "arch-aarch64.h"
+#include "arch-arm64.h"
#elif defined(__mips__)
#include "arch-mips.h"
#elif defined(__powerpc__)
#include "arch-powerpc.h"
#elif defined(__riscv)
#include "arch-riscv.h"
-#elif defined(__s390x__) || defined(__s390__)
+#elif defined(__s390x__)
#include "arch-s390.h"
#elif defined(__loongarch__)
#include "arch-loongarch.h"
@@ -37,6 +26,8 @@
#include "arch-sparc.h"
#elif defined(__m68k__)
#include "arch-m68k.h"
+#elif defined(__sh__)
+#include "arch-sh.h"
#else
#error Unsupported Architecture
#endif
diff --git a/tools/include/nolibc/compiler.h b/tools/include/nolibc/compiler.h
index 369cfb5a0e78..87090bbc53e0 100644
--- a/tools/include/nolibc/compiler.h
+++ b/tools/include/nolibc/compiler.h
@@ -41,8 +41,8 @@
# define __no_stack_protector __attribute__((__optimize__("-fno-stack-protector")))
#endif /* __nolibc_has_attribute(no_stack_protector) */
-#if __nolibc_has_attribute(fallthrough)
-# define __nolibc_fallthrough do { } while (0); __attribute__((fallthrough))
+#if __nolibc_has_attribute(__fallthrough__)
+# define __nolibc_fallthrough do { } while (0); __attribute__((__fallthrough__))
#else
# define __nolibc_fallthrough do { } while (0)
#endif /* __nolibc_has_attribute(fallthrough) */
diff --git a/tools/include/nolibc/crt.h b/tools/include/nolibc/crt.h
index 961cfe777c35..d9262998dae9 100644
--- a/tools/include/nolibc/crt.h
+++ b/tools/include/nolibc/crt.h
@@ -7,6 +7,8 @@
#ifndef _NOLIBC_CRT_H
#define _NOLIBC_CRT_H
+#ifndef NOLIBC_NO_RUNTIME
+
#include "compiler.h"
char **environ __attribute__((weak));
@@ -88,4 +90,5 @@ void _start_c(long *sp)
exit(exitcode);
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_CRT_H */
diff --git a/tools/include/nolibc/dirent.h b/tools/include/nolibc/dirent.h
index 758b95c48e7a..61a122a60327 100644
--- a/tools/include/nolibc/dirent.h
+++ b/tools/include/nolibc/dirent.h
@@ -86,9 +86,9 @@ int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result)
* readdir() can only return one entry at a time.
* Make sure the non-returned ones are not skipped.
*/
- ret = lseek(fd, ldir->d_off, SEEK_SET);
- if (ret == -1)
- return errno;
+ ret = sys_lseek(fd, ldir->d_off, SEEK_SET);
+ if (ret < 0)
+ return -ret;
entry->d_ino = ldir->d_ino;
/* the destination should always be big enough */
diff --git a/tools/include/nolibc/getopt.h b/tools/include/nolibc/getopt.h
index 217abb95264b..87565e3b6a33 100644
--- a/tools/include/nolibc/getopt.h
+++ b/tools/include/nolibc/getopt.h
@@ -78,7 +78,7 @@ int getopt(int argc, char * const argv[], const char *optstring)
return '?';
}
if (optstring[i] == ':') {
- optarg = 0;
+ optarg = NULL;
if (optstring[i + 1] != ':' || __optpos) {
optarg = argv[optind++];
if (__optpos)
diff --git a/tools/include/nolibc/inttypes.h b/tools/include/nolibc/inttypes.h
new file mode 100644
index 000000000000..1977bd74bfeb
--- /dev/null
+++ b/tools/include/nolibc/inttypes.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#include "nolibc.h"
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index c199ade200c2..272dfc961158 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -104,11 +104,13 @@
#include "sys/random.h"
#include "sys/reboot.h"
#include "sys/resource.h"
+#include "sys/select.h"
#include "sys/stat.h"
#include "sys/syscall.h"
#include "sys/sysmacros.h"
#include "sys/time.h"
#include "sys/timerfd.h"
+#include "sys/uio.h"
#include "sys/utsname.h"
#include "sys/wait.h"
#include "ctype.h"
@@ -116,6 +118,7 @@
#include "sched.h"
#include "signal.h"
#include "unistd.h"
+#include "stdbool.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
diff --git a/tools/include/nolibc/poll.h b/tools/include/nolibc/poll.h
index 1765acb17ea0..0d053f93ea99 100644
--- a/tools/include/nolibc/poll.h
+++ b/tools/include/nolibc/poll.h
@@ -39,10 +39,8 @@ int sys_poll(struct pollfd *fds, int nfds, int timeout)
t.tv_nsec = (timeout % 1000) * 1000000;
}
return my_syscall5(__NR_ppoll_time64, fds, nfds, (timeout >= 0) ? &t : NULL, NULL, 0);
-#elif defined(__NR_poll)
- return my_syscall3(__NR_poll, fds, nfds, timeout);
#else
- return __nolibc_enosys(__func__, fds, nfds, timeout);
+ return my_syscall3(__NR_poll, fds, nfds, timeout);
#endif
}
diff --git a/tools/include/nolibc/stackprotector.h b/tools/include/nolibc/stackprotector.h
index c71a2c257177..7123aa056cb0 100644
--- a/tools/include/nolibc/stackprotector.h
+++ b/tools/include/nolibc/stackprotector.h
@@ -9,6 +9,7 @@
#include "compiler.h"
+#ifndef NOLIBC_NO_RUNTIME
#if defined(_NOLIBC_STACKPROTECTOR)
#include "sys.h"
@@ -49,5 +50,6 @@ static __no_stack_protector void __stack_chk_init(void)
#else /* !defined(_NOLIBC_STACKPROTECTOR) */
static void __stack_chk_init(void) {}
#endif /* defined(_NOLIBC_STACKPROTECTOR) */
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_STACKPROTECTOR_H */
diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h
index adda7333d12e..392f4dd94158 100644
--- a/tools/include/nolibc/std.h
+++ b/tools/include/nolibc/std.h
@@ -16,17 +16,19 @@
#include "stdint.h"
#include "stddef.h"
+#include <linux/types.h>
+
/* those are commonly provided by sys/types.h */
typedef unsigned int dev_t;
-typedef unsigned long ino_t;
+typedef uint64_t ino_t;
typedef unsigned int mode_t;
typedef signed int pid_t;
typedef unsigned int uid_t;
typedef unsigned int gid_t;
typedef unsigned long nlink_t;
-typedef signed long off_t;
+typedef int64_t off_t;
typedef signed long blksize_t;
typedef signed long blkcnt_t;
-typedef signed long time_t;
+typedef __kernel_time_t time_t;
#endif /* _NOLIBC_STD_H */
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
index c470d334ef3f..1f16dab2ac88 100644
--- a/tools/include/nolibc/stdio.h
+++ b/tools/include/nolibc/stdio.h
@@ -321,11 +321,13 @@ int __nolibc_printf(__nolibc_printf_cb cb, intptr_t state, size_t n, const char
if (!outstr)
outstr="(null)";
}
-#ifndef NOLIBC_IGNORE_ERRNO
else if (c == 'm') {
+#ifdef NOLIBC_IGNORE_ERRNO
+ outstr = "unknown error";
+#else
outstr = strerror(errno);
- }
#endif /* NOLIBC_IGNORE_ERRNO */
+ }
else if (c == '%') {
/* queue it verbatim */
continue;
@@ -358,11 +360,11 @@ int __nolibc_printf(__nolibc_printf_cb cb, intptr_t state, size_t n, const char
n -= w;
while (width-- > w) {
if (cb(state, " ", 1) != 0)
- break;
+ return -1;
written += 1;
}
if (cb(state, outstr, w) != 0)
- break;
+ return -1;
}
written += len;
@@ -600,7 +602,11 @@ int sscanf(const char *str, const char *format, ...)
static __attribute__((unused))
void perror(const char *msg)
{
+#ifdef NOLIBC_IGNORE_ERRNO
+ fprintf(stderr, "%s%sunknown error\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "");
+#else
fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
+#endif
}
static __attribute__((unused))
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index 5fd99a480f82..f184e108ed0a 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -100,6 +100,7 @@ void free(void *ptr)
munmap(heap, heap->len);
}
+#ifndef NOLIBC_NO_RUNTIME
/* getenv() tries to find the environment variable named <name> in the
* environment array pointed to by global variable "environ" which must be
* declared as a char **, and must be terminated by a NULL (it is recommended
@@ -122,6 +123,7 @@ char *getenv(const char *name)
}
return NULL;
}
+#endif /* NOLIBC_NO_RUNTIME */
static __attribute__((unused))
void *malloc(size_t len)
diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
index 163a17e7dd38..4000926f44ac 100644
--- a/tools/include/nolibc/string.h
+++ b/tools/include/nolibc/string.h
@@ -93,6 +93,21 @@ void *memset(void *dst, int b, size_t len)
}
#endif /* #ifndef NOLIBC_ARCH_HAS_MEMSET */
+#ifndef NOLIBC_ARCH_HAS_MEMCHR
+static __attribute__((unused))
+void *memchr(const void *s, int c, size_t len)
+{
+ char *p = (char *)s;
+
+ while (len--) {
+ if (*p == (char)c)
+ return p;
+ p++;
+ }
+ return NULL;
+}
+#endif /* #ifndef NOLIBC_ARCH_HAS_MEMCHR */
+
static __attribute__((unused))
char *strchr(const char *s, int c)
{
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
index 9556c69a6ae1..847af1ccbdc9 100644
--- a/tools/include/nolibc/sys.h
+++ b/tools/include/nolibc/sys.h
@@ -22,6 +22,7 @@
#include <linux/time.h>
#include <linux/auxvec.h>
#include <linux/fcntl.h> /* for O_* and AT_* */
+#include <linux/sched.h> /* for clone_args */
#include <linux/stat.h> /* for statx() */
#include "errno.h"
@@ -105,7 +106,7 @@ static __attribute__((unused))
void *sbrk(intptr_t inc)
{
/* first call to find current end */
- void *ret = sys_brk(0);
+ void *ret = sys_brk(NULL);
if (ret && sys_brk(ret + inc) == ret + inc)
return ret + inc;
@@ -117,6 +118,7 @@ void *sbrk(intptr_t inc)
/*
* int chdir(const char *path);
+ * int fchdir(int fildes);
*/
static __attribute__((unused))
@@ -131,6 +133,18 @@ int chdir(const char *path)
return __sysret(sys_chdir(path));
}
+static __attribute__((unused))
+int sys_fchdir(int fildes)
+{
+ return my_syscall1(__NR_fchdir, fildes);
+}
+
+static __attribute__((unused))
+int fchdir(int fildes)
+{
+ return __sysret(sys_fchdir(fildes));
+}
+
/*
* int chmod(const char *path, mode_t mode);
@@ -139,12 +153,10 @@ int chdir(const char *path)
static __attribute__((unused))
int sys_chmod(const char *path, mode_t mode)
{
-#ifdef __NR_fchmodat
+#if defined(__NR_fchmodat)
return my_syscall4(__NR_fchmodat, AT_FDCWD, path, mode, 0);
-#elif defined(__NR_chmod)
- return my_syscall2(__NR_chmod, path, mode);
#else
- return __nolibc_enosys(__func__, path, mode);
+ return my_syscall2(__NR_chmod, path, mode);
#endif
}
@@ -162,12 +174,10 @@ int chmod(const char *path, mode_t mode)
static __attribute__((unused))
int sys_chown(const char *path, uid_t owner, gid_t group)
{
-#ifdef __NR_fchownat
+#if defined(__NR_fchownat)
return my_syscall5(__NR_fchownat, AT_FDCWD, path, owner, group, 0);
-#elif defined(__NR_chown)
- return my_syscall3(__NR_chown, path, owner, group);
#else
- return __nolibc_enosys(__func__, path, owner, group);
+ return my_syscall3(__NR_chown, path, owner, group);
#endif
}
@@ -236,12 +246,23 @@ int dup(int fd)
static __attribute__((unused))
int sys_dup2(int old, int new)
{
-#ifdef __NR_dup3
+#if defined(__NR_dup3)
+ int ret, nr_fcntl;
+
+#ifdef __NR_fcntl64
+ nr_fcntl = __NR_fcntl64;
+#else
+ nr_fcntl = __NR_fcntl;
+#endif
+
+ if (old == new) {
+ ret = my_syscall2(nr_fcntl, old, F_GETFD);
+ return ret < 0 ? ret : old;
+ }
+
return my_syscall3(__NR_dup3, old, new, 0);
-#elif defined(__NR_dup2)
- return my_syscall2(__NR_dup2, old, new);
#else
- return __nolibc_enosys(__func__, old, new);
+ return my_syscall2(__NR_dup2, old, new);
#endif
}
@@ -256,7 +277,7 @@ int dup2(int old, int new)
* int dup3(int old, int new, int flags);
*/
-#ifdef __NR_dup3
+#if defined(__NR_dup3)
static __attribute__((unused))
int sys_dup3(int old, int new, int flags)
{
@@ -320,16 +341,14 @@ void exit(int status)
static __attribute__((unused))
pid_t sys_fork(void)
{
-#ifdef __NR_clone
+#if defined(__NR_clone)
/* note: some archs only have clone() and not fork(). Different archs
* have a different API, but most archs have the flags on first arg and
* will not use the rest with no other flag.
*/
return my_syscall5(__NR_clone, SIGCHLD, 0, 0, 0, 0);
-#elif defined(__NR_fork)
- return my_syscall0(__NR_fork);
#else
- return __nolibc_enosys(__func__);
+ return my_syscall0(__NR_fork);
#endif
}
#endif
@@ -340,6 +359,32 @@ pid_t fork(void)
return __sysret(sys_fork());
}
+#ifndef sys_vfork
+static __attribute__((unused))
+pid_t sys_vfork(void)
+{
+#if defined(__NR_vfork)
+ return my_syscall0(__NR_vfork);
+#else
+ /*
+ * clone() could be used but has different argument orders per
+ * architecture.
+ */
+ struct clone_args args = {
+ .flags = CLONE_VM | CLONE_VFORK,
+ .exit_signal = SIGCHLD,
+ };
+
+ return my_syscall2(__NR_clone3, &args, sizeof(args));
+#endif
+}
+#endif
+
+static __attribute__((unused))
+pid_t vfork(void)
+{
+ return __sysret(sys_vfork());
+}
/*
* int fsync(int fd);
@@ -382,7 +427,7 @@ int getdents64(int fd, struct linux_dirent64 *dirp, int count)
static __attribute__((unused))
uid_t sys_geteuid(void)
{
-#ifdef __NR_geteuid32
+#if defined(__NR_geteuid32)
return my_syscall0(__NR_geteuid32);
#else
return my_syscall0(__NR_geteuid);
@@ -480,6 +525,7 @@ pid_t gettid(void)
return sys_gettid();
}
+#ifndef NOLIBC_NO_RUNTIME
static unsigned long getauxval(unsigned long key);
/*
@@ -491,7 +537,7 @@ int getpagesize(void)
{
return __sysret((int)getauxval(AT_PAGESZ) ?: -ENOENT);
}
-
+#endif /* NOLIBC_NO_RUNTIME */
/*
* uid_t getuid(void);
@@ -500,7 +546,7 @@ int getpagesize(void)
static __attribute__((unused))
uid_t sys_getuid(void)
{
-#ifdef __NR_getuid32
+#if defined(__NR_getuid32)
return my_syscall0(__NR_getuid32);
#else
return my_syscall0(__NR_getuid);
@@ -538,12 +584,10 @@ int kill(pid_t pid, int signal)
static __attribute__((unused))
int sys_link(const char *old, const char *new)
{
-#ifdef __NR_linkat
+#if defined(__NR_linkat)
return my_syscall5(__NR_linkat, AT_FDCWD, old, AT_FDCWD, new, 0);
-#elif defined(__NR_link)
- return my_syscall2(__NR_link, old, new);
#else
- return __nolibc_enosys(__func__, old, new);
+ return my_syscall2(__NR_link, old, new);
#endif
}
@@ -561,44 +605,27 @@ int link(const char *old, const char *new)
static __attribute__((unused))
off_t sys_lseek(int fd, off_t offset, int whence)
{
-#ifdef __NR_lseek
- return my_syscall3(__NR_lseek, fd, offset, whence);
-#else
- return __nolibc_enosys(__func__, fd, offset, whence);
-#endif
-}
+#if defined(__NR_llseek)
+ __kernel_loff_t loff = 0;
+ off_t result;
+ int ret;
-static __attribute__((unused))
-int sys_llseek(int fd, unsigned long offset_high, unsigned long offset_low,
- __kernel_loff_t *result, int whence)
-{
-#ifdef __NR_llseek
- return my_syscall5(__NR_llseek, fd, offset_high, offset_low, result, whence);
+ ret = my_syscall5(__NR_llseek, fd, offset >> 32, (uint32_t)offset, &loff, whence);
+ if (ret < 0)
+ result = ret;
+ else
+ result = loff;
+
+ return result;
#else
- return __nolibc_enosys(__func__, fd, offset_high, offset_low, result, whence);
+ return my_syscall3(__NR_lseek, fd, offset, whence);
#endif
}
static __attribute__((unused))
off_t lseek(int fd, off_t offset, int whence)
{
- __kernel_loff_t loff = 0;
- off_t result;
- int ret;
-
- result = sys_lseek(fd, offset, whence);
- if (result == -ENOSYS) {
- /* Only exists on 32bit where nolibc off_t is also 32bit */
- ret = sys_llseek(fd, 0, offset, &loff, whence);
- if (ret < 0)
- result = ret;
- else if (loff != (off_t)loff)
- result = -EOVERFLOW;
- else
- result = loff;
- }
-
- return __sysret(result);
+ return __sysret(sys_lseek(fd, offset, whence));
}
@@ -609,12 +636,10 @@ off_t lseek(int fd, off_t offset, int whence)
static __attribute__((unused))
int sys_mkdir(const char *path, mode_t mode)
{
-#ifdef __NR_mkdirat
+#if defined(__NR_mkdirat)
return my_syscall3(__NR_mkdirat, AT_FDCWD, path, mode);
-#elif defined(__NR_mkdir)
- return my_syscall2(__NR_mkdir, path, mode);
#else
- return __nolibc_enosys(__func__, path, mode);
+ return my_syscall2(__NR_mkdir, path, mode);
#endif
}
@@ -631,12 +656,10 @@ int mkdir(const char *path, mode_t mode)
static __attribute__((unused))
int sys_rmdir(const char *path)
{
-#ifdef __NR_rmdir
+#if defined(__NR_rmdir)
return my_syscall1(__NR_rmdir, path);
-#elif defined(__NR_unlinkat)
- return my_syscall3(__NR_unlinkat, AT_FDCWD, path, AT_REMOVEDIR);
#else
- return __nolibc_enosys(__func__, path);
+ return my_syscall3(__NR_unlinkat, AT_FDCWD, path, AT_REMOVEDIR);
#endif
}
@@ -654,12 +677,10 @@ int rmdir(const char *path)
static __attribute__((unused))
long sys_mknod(const char *path, mode_t mode, dev_t dev)
{
-#ifdef __NR_mknodat
+#if defined(__NR_mknodat)
return my_syscall4(__NR_mknodat, AT_FDCWD, path, mode, dev);
-#elif defined(__NR_mknod)
- return my_syscall3(__NR_mknod, path, mode, dev);
#else
- return __nolibc_enosys(__func__, path, mode, dev);
+ return my_syscall3(__NR_mknod, path, mode, dev);
#endif
}
@@ -746,53 +767,6 @@ int sched_yield(void)
/*
- * int select(int nfds, fd_set *read_fds, fd_set *write_fds,
- * fd_set *except_fds, struct timeval *timeout);
- */
-
-static __attribute__((unused))
-int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
-{
-#if defined(__ARCH_WANT_SYS_OLD_SELECT) && !defined(__NR__newselect)
- struct sel_arg_struct {
- unsigned long n;
- fd_set *r, *w, *e;
- struct timeval *t;
- } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
- return my_syscall1(__NR_select, &arg);
-#elif defined(__NR__newselect)
- return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout);
-#elif defined(__NR_select)
- return my_syscall5(__NR_select, nfds, rfds, wfds, efds, timeout);
-#elif defined(__NR_pselect6)
- struct timespec t;
-
- if (timeout) {
- t.tv_sec = timeout->tv_sec;
- t.tv_nsec = timeout->tv_usec * 1000;
- }
- return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
-#elif defined(__NR_pselect6_time64)
- struct __kernel_timespec t;
-
- if (timeout) {
- t.tv_sec = timeout->tv_sec;
- t.tv_nsec = timeout->tv_usec * 1000;
- }
- return my_syscall6(__NR_pselect6_time64, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
-#else
- return __nolibc_enosys(__func__, nfds, rfds, wfds, efds, timeout);
-#endif
-}
-
-static __attribute__((unused))
-int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
-{
- return __sysret(sys_select(nfds, rfds, wfds, efds, timeout));
-}
-
-
-/*
* int setpgid(pid_t pid, pid_t pgid);
*/
@@ -843,12 +817,10 @@ pid_t setsid(void)
static __attribute__((unused))
int sys_symlink(const char *old, const char *new)
{
-#ifdef __NR_symlinkat
+#if defined(__NR_symlinkat)
return my_syscall3(__NR_symlinkat, old, AT_FDCWD, new);
-#elif defined(__NR_symlink)
- return my_syscall2(__NR_symlink, old, new);
#else
- return __nolibc_enosys(__func__, old, new);
+ return my_syscall2(__NR_symlink, old, new);
#endif
}
@@ -900,12 +872,10 @@ int umount2(const char *path, int flags)
static __attribute__((unused))
int sys_unlink(const char *path)
{
-#ifdef __NR_unlinkat
+#if defined(__NR_unlinkat)
return my_syscall3(__NR_unlinkat, AT_FDCWD, path, 0);
-#elif defined(__NR_unlink)
- return my_syscall1(__NR_unlink, path);
#else
- return __nolibc_enosys(__func__, path);
+ return my_syscall1(__NR_unlink, path);
#endif
}
diff --git a/tools/include/nolibc/sys/auxv.h b/tools/include/nolibc/sys/auxv.h
index c52463d6c18d..0e98325e7347 100644
--- a/tools/include/nolibc/sys/auxv.h
+++ b/tools/include/nolibc/sys/auxv.h
@@ -10,6 +10,8 @@
#ifndef _NOLIBC_SYS_AUXV_H
#define _NOLIBC_SYS_AUXV_H
+#ifndef NOLIBC_NO_RUNTIME
+
#include "../crt.h"
static __attribute__((unused))
@@ -38,4 +40,5 @@ unsigned long getauxval(unsigned long type)
return ret;
}
+#endif /* NOLIBC_NO_RUNTIME */
#endif /* _NOLIBC_SYS_AUXV_H */
diff --git a/tools/include/nolibc/sys/mman.h b/tools/include/nolibc/sys/mman.h
index 5228751b458c..77084ac3405a 100644
--- a/tools/include/nolibc/sys/mman.h
+++ b/tools/include/nolibc/sys/mman.h
@@ -31,11 +31,6 @@ void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd,
}
#endif
-/* Note that on Linux, MAP_FAILED is -1 so we can use the generic __sysret()
- * which returns -1 upon error and still satisfy user land that checks for
- * MAP_FAILED.
- */
-
static __attribute__((unused))
void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
{
diff --git a/tools/include/nolibc/sys/random.h b/tools/include/nolibc/sys/random.h
index 8d9749f1c845..cd5d25c571a8 100644
--- a/tools/include/nolibc/sys/random.h
+++ b/tools/include/nolibc/sys/random.h
@@ -22,13 +22,13 @@
static __attribute__((unused))
ssize_t sys_getrandom(void *buf, size_t buflen, unsigned int flags)
{
- return my_syscall3(__NR_getrandom, buf, buflen, flags);
+ return my_syscall3(__NR_getrandom, buf, buflen, flags);
}
static __attribute__((unused))
ssize_t getrandom(void *buf, size_t buflen, unsigned int flags)
{
- return __sysret(sys_getrandom(buf, buflen, flags));
+ return __sysret(sys_getrandom(buf, buflen, flags));
}
#endif /* _NOLIBC_SYS_RANDOM_H */
diff --git a/tools/include/nolibc/sys/reboot.h b/tools/include/nolibc/sys/reboot.h
index 4a1e435be669..38274c64a722 100644
--- a/tools/include/nolibc/sys/reboot.h
+++ b/tools/include/nolibc/sys/reboot.h
@@ -28,7 +28,7 @@ ssize_t sys_reboot(int magic1, int magic2, int cmd, void *arg)
static __attribute__((unused))
int reboot(int cmd)
{
- return __sysret(sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, 0));
+ return __sysret(sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, NULL));
}
#endif /* _NOLIBC_SYS_REBOOT_H */
diff --git a/tools/include/nolibc/sys/select.h b/tools/include/nolibc/sys/select.h
new file mode 100644
index 000000000000..2a5619c01277
--- /dev/null
+++ b/tools/include/nolibc/sys/select.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#include "../nolibc.h"
+
+#ifndef _NOLIBC_SYS_SELECT_H
+#define _NOLIBC_SYS_SELECT_H
+
+#include <linux/time.h>
+#include <linux/unistd.h>
+
+/* commonly an fd_set represents 256 FDs */
+#ifndef FD_SETSIZE
+#define FD_SETSIZE 256
+#endif
+
+#define FD_SETIDXMASK (8 * sizeof(unsigned long))
+#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
+
+/* for select() */
+typedef struct {
+ unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
+} fd_set;
+
+#define FD_CLR(fd, set) do { \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ if (__fd >= 0) \
+ __set->fds[__fd / FD_SETIDXMASK] &= \
+ ~(1U << (__fd & FD_SETBITMASK)); \
+ } while (0)
+
+#define FD_SET(fd, set) do { \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ if (__fd >= 0) \
+ __set->fds[__fd / FD_SETIDXMASK] |= \
+ 1 << (__fd & FD_SETBITMASK); \
+ } while (0)
+
+#define FD_ISSET(fd, set) ({ \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ int __r = 0; \
+ if (__fd >= 0) \
+ __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \
+1U << (__fd & FD_SETBITMASK)); \
+ __r; \
+ })
+
+#define FD_ZERO(set) do { \
+ fd_set *__set = (set); \
+ int __idx; \
+ int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
+ for (__idx = 0; __idx < __size; __idx++) \
+ __set->fds[__idx] = 0; \
+ } while (0)
+
+/*
+ * int select(int nfds, fd_set *read_fds, fd_set *write_fds,
+ * fd_set *except_fds, struct timeval *timeout);
+ */
+
+static __attribute__((unused))
+int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
+{
+#if defined(__ARCH_WANT_SYS_OLD_SELECT) && !defined(__NR__newselect)
+ struct sel_arg_struct {
+ unsigned long n;
+ fd_set *r, *w, *e;
+ struct timeval *t;
+ } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
+ return my_syscall1(__NR_select, &arg);
+#elif defined(__NR__newselect)
+ return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout);
+#elif defined(__NR_select)
+ return my_syscall5(__NR_select, nfds, rfds, wfds, efds, timeout);
+#elif defined(__NR_pselect6)
+ struct timespec t;
+
+ if (timeout) {
+ t.tv_sec = timeout->tv_sec;
+ t.tv_nsec = timeout->tv_usec * 1000;
+ }
+ return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
+#else
+ struct __kernel_timespec t;
+
+ if (timeout) {
+ t.tv_sec = timeout->tv_sec;
+ t.tv_nsec = timeout->tv_usec * 1000;
+ }
+ return my_syscall6(__NR_pselect6_time64, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
+#endif
+}
+
+static __attribute__((unused))
+int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
+{
+ return __sysret(sys_select(nfds, rfds, wfds, efds, timeout));
+}
+
+
+#endif /* _NOLIBC_SYS_SELECT_H */
diff --git a/tools/include/nolibc/sys/timerfd.h b/tools/include/nolibc/sys/timerfd.h
index 4375d546ba58..5dd61030c991 100644
--- a/tools/include/nolibc/sys/timerfd.h
+++ b/tools/include/nolibc/sys/timerfd.h
@@ -34,7 +34,7 @@ int sys_timerfd_gettime(int fd, struct itimerspec *curr_value)
{
#if defined(__NR_timerfd_gettime)
return my_syscall2(__NR_timerfd_gettime, fd, curr_value);
-#elif defined(__NR_timerfd_gettime64)
+#else
struct __kernel_itimerspec kcurr_value;
int ret;
@@ -42,8 +42,6 @@ int sys_timerfd_gettime(int fd, struct itimerspec *curr_value)
__nolibc_timespec_kernel_to_user(&kcurr_value.it_interval, &curr_value->it_interval);
__nolibc_timespec_kernel_to_user(&kcurr_value.it_value, &curr_value->it_value);
return ret;
-#else
- return __nolibc_enosys(__func__, fd, curr_value);
#endif
}
@@ -60,7 +58,7 @@ int sys_timerfd_settime(int fd, int flags,
{
#if defined(__NR_timerfd_settime)
return my_syscall4(__NR_timerfd_settime, fd, flags, new_value, old_value);
-#elif defined(__NR_timerfd_settime64)
+#else
struct __kernel_itimerspec knew_value, kold_value;
int ret;
@@ -72,8 +70,6 @@ int sys_timerfd_settime(int fd, int flags,
__nolibc_timespec_kernel_to_user(&kold_value.it_value, &old_value->it_value);
}
return ret;
-#else
- return __nolibc_enosys(__func__, fd, flags, new_value, old_value);
#endif
}
diff --git a/tools/include/nolibc/sys/uio.h b/tools/include/nolibc/sys/uio.h
new file mode 100644
index 000000000000..7ad42b927d2f
--- /dev/null
+++ b/tools/include/nolibc/sys/uio.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * uio for NOLIBC
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+/* make sure to include all global symbols */
+#include "../nolibc.h"
+
+#ifndef _NOLIBC_SYS_UIO_H
+#define _NOLIBC_SYS_UIO_H
+
+#include "../sys.h"
+#include <linux/uio.h>
+
+
+/*
+ * ssize_t readv(int fd, const struct iovec *iovec, int count);
+ */
+static __attribute__((unused))
+ssize_t sys_readv(int fd, const struct iovec *iovec, int count)
+{
+ return my_syscall3(__NR_readv, fd, iovec, count);
+}
+
+static __attribute__((unused))
+ssize_t readv(int fd, const struct iovec *iovec, int count)
+{
+ return __sysret(sys_readv(fd, iovec, count));
+}
+
+/*
+ * ssize_t writev(int fd, const struct iovec *iovec, int count);
+ */
+static __attribute__((unused))
+ssize_t sys_writev(int fd, const struct iovec *iovec, int count)
+{
+ return my_syscall3(__NR_writev, fd, iovec, count);
+}
+
+static __attribute__((unused))
+ssize_t writev(int fd, const struct iovec *iovec, int count)
+{
+ return __sysret(sys_writev(fd, iovec, count));
+}
+
+
+#endif /* _NOLIBC_SYS_UIO_H */
diff --git a/tools/include/nolibc/sys/wait.h b/tools/include/nolibc/sys/wait.h
index 4d44e3da0ba8..9d9319ba92cb 100644
--- a/tools/include/nolibc/sys/wait.h
+++ b/tools/include/nolibc/sys/wait.h
@@ -16,28 +16,11 @@
/*
* pid_t wait(int *status);
- * pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage);
* pid_t waitpid(pid_t pid, int *status, int options);
* int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options);
*/
static __attribute__((unused))
-pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage)
-{
-#ifdef __NR_wait4
- return my_syscall4(__NR_wait4, pid, status, options, rusage);
-#else
- return __nolibc_enosys(__func__, pid, status, options, rusage);
-#endif
-}
-
-static __attribute__((unused))
-pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage)
-{
- return __sysret(sys_wait4(pid, status, options, rusage));
-}
-
-static __attribute__((unused))
int sys_waitid(int which, pid_t pid, siginfo_t *infop, int options, struct rusage *rusage)
{
return my_syscall5(__NR_waitid, which, pid, infop, options, rusage);
@@ -78,27 +61,33 @@ pid_t waitpid(pid_t pid, int *status, int options)
ret = waitid(idtype, id, &info, options);
if (ret)
- return ret;
+ return -1;
switch (info.si_code) {
case 0:
- *status = 0;
+ if (status)
+ *status = 0;
break;
case CLD_EXITED:
- *status = (info.si_status & 0xff) << 8;
+ if (status)
+ *status = (info.si_status & 0xff) << 8;
break;
case CLD_KILLED:
- *status = info.si_status & 0x7f;
+ if (status)
+ *status = info.si_status & 0x7f;
break;
case CLD_DUMPED:
- *status = (info.si_status & 0x7f) | 0x80;
+ if (status)
+ *status = (info.si_status & 0x7f) | 0x80;
break;
case CLD_STOPPED:
case CLD_TRAPPED:
- *status = (info.si_status << 8) + 0x7f;
+ if (status)
+ *status = (info.si_status << 8) + 0x7f;
break;
case CLD_CONTINUED:
- *status = 0xffff;
+ if (status)
+ *status = 0xffff;
break;
default:
return -1;
diff --git a/tools/include/nolibc/time.h b/tools/include/nolibc/time.h
index fc387940d51f..48e78f8becf9 100644
--- a/tools/include/nolibc/time.h
+++ b/tools/include/nolibc/time.h
@@ -36,6 +36,8 @@ void __nolibc_timespec_kernel_to_user(const struct __kernel_timespec *kts, struc
* int clock_getres(clockid_t clockid, struct timespec *res);
* int clock_gettime(clockid_t clockid, struct timespec *tp);
* int clock_settime(clockid_t clockid, const struct timespec *tp);
+ * int clock_nanosleep(clockid_t clockid, int flags, const struct timespec *rqtp,
+ * struct timespec *rmtp)
*/
static __attribute__((unused))
@@ -43,7 +45,7 @@ int sys_clock_getres(clockid_t clockid, struct timespec *res)
{
#if defined(__NR_clock_getres)
return my_syscall2(__NR_clock_getres, clockid, res);
-#elif defined(__NR_clock_getres_time64)
+#else
struct __kernel_timespec kres;
int ret;
@@ -51,8 +53,6 @@ int sys_clock_getres(clockid_t clockid, struct timespec *res)
if (res)
__nolibc_timespec_kernel_to_user(&kres, res);
return ret;
-#else
- return __nolibc_enosys(__func__, clockid, res);
#endif
}
@@ -67,7 +67,7 @@ int sys_clock_gettime(clockid_t clockid, struct timespec *tp)
{
#if defined(__NR_clock_gettime)
return my_syscall2(__NR_clock_gettime, clockid, tp);
-#elif defined(__NR_clock_gettime64)
+#else
struct __kernel_timespec ktp;
int ret;
@@ -75,8 +75,6 @@ int sys_clock_gettime(clockid_t clockid, struct timespec *tp)
if (tp)
__nolibc_timespec_kernel_to_user(&ktp, tp);
return ret;
-#else
- return __nolibc_enosys(__func__, clockid, tp);
#endif
}
@@ -91,13 +89,11 @@ int sys_clock_settime(clockid_t clockid, struct timespec *tp)
{
#if defined(__NR_clock_settime)
return my_syscall2(__NR_clock_settime, clockid, tp);
-#elif defined(__NR_clock_settime64)
+#else
struct __kernel_timespec ktp;
__nolibc_timespec_user_to_kernel(tp, &ktp);
return my_syscall2(__NR_clock_settime64, clockid, &ktp);
-#else
- return __nolibc_enosys(__func__, clockid, tp);
#endif
}
@@ -107,6 +103,31 @@ int clock_settime(clockid_t clockid, struct timespec *tp)
return __sysret(sys_clock_settime(clockid, tp));
}
+static __attribute__((unused))
+int sys_clock_nanosleep(clockid_t clockid, int flags, const struct timespec *rqtp,
+ struct timespec *rmtp)
+{
+#if defined(__NR_clock_nanosleep)
+ return my_syscall4(__NR_clock_nanosleep, clockid, flags, rqtp, rmtp);
+#else
+ struct __kernel_timespec krqtp, krmtp;
+ int ret;
+
+ __nolibc_timespec_user_to_kernel(rqtp, &krqtp);
+ ret = my_syscall4(__NR_clock_nanosleep_time64, clockid, flags, &krqtp, &krmtp);
+ if (rmtp)
+ __nolibc_timespec_kernel_to_user(&krmtp, rmtp);
+ return ret;
+#endif
+}
+
+static __attribute__((unused))
+int clock_nanosleep(clockid_t clockid, int flags, const struct timespec *rqtp,
+ struct timespec *rmtp)
+{
+ /* Directly return a positive error number */
+ return -sys_clock_nanosleep(clockid, flags, rqtp, rmtp);
+}
static __inline__
double difftime(time_t time1, time_t time2)
@@ -114,6 +135,12 @@ double difftime(time_t time1, time_t time2)
return time1 - time2;
}
+static __inline__
+int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
+{
+ return __sysret(sys_clock_nanosleep(CLOCK_REALTIME, 0, rqtp, rmtp));
+}
+
static __attribute__((unused))
time_t time(time_t *tptr)
@@ -164,7 +191,7 @@ int sys_timer_gettime(timer_t timerid, struct itimerspec *curr_value)
{
#if defined(__NR_timer_gettime)
return my_syscall2(__NR_timer_gettime, timerid, curr_value);
-#elif defined(__NR_timer_gettime64)
+#else
struct __kernel_itimerspec kcurr_value;
int ret;
@@ -172,8 +199,6 @@ int sys_timer_gettime(timer_t timerid, struct itimerspec *curr_value)
__nolibc_timespec_kernel_to_user(&kcurr_value.it_interval, &curr_value->it_interval);
__nolibc_timespec_kernel_to_user(&kcurr_value.it_value, &curr_value->it_value);
return ret;
-#else
- return __nolibc_enosys(__func__, timerid, curr_value);
#endif
}
@@ -189,7 +214,7 @@ int sys_timer_settime(timer_t timerid, int flags,
{
#if defined(__NR_timer_settime)
return my_syscall4(__NR_timer_settime, timerid, flags, new_value, old_value);
-#elif defined(__NR_timer_settime64)
+#else
struct __kernel_itimerspec knew_value, kold_value;
int ret;
@@ -201,8 +226,6 @@ int sys_timer_settime(timer_t timerid, int flags,
__nolibc_timespec_kernel_to_user(&kold_value.it_value, &old_value->it_value);
}
return ret;
-#else
- return __nolibc_enosys(__func__, timerid, flags, new_value, old_value);
#endif
}
diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
index 30904be544ed..470a5f77bc0f 100644
--- a/tools/include/nolibc/types.h
+++ b/tools/include/nolibc/types.h
@@ -70,11 +70,6 @@
#define DT_LNK 0xa
#define DT_SOCK 0xc
-/* commonly an fd_set represents 256 FDs */
-#ifndef FD_SETSIZE
-#define FD_SETSIZE 256
-#endif
-
/* PATH_MAX and MAXPATHLEN are often used and found with plenty of different
* values.
*/
@@ -115,48 +110,6 @@
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
-#define FD_SETIDXMASK (8 * sizeof(unsigned long))
-#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
-
-/* for select() */
-typedef struct {
- unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
-} fd_set;
-
-#define FD_CLR(fd, set) do { \
- fd_set *__set = (set); \
- int __fd = (fd); \
- if (__fd >= 0) \
- __set->fds[__fd / FD_SETIDXMASK] &= \
- ~(1U << (__fd & FX_SETBITMASK)); \
- } while (0)
-
-#define FD_SET(fd, set) do { \
- fd_set *__set = (set); \
- int __fd = (fd); \
- if (__fd >= 0) \
- __set->fds[__fd / FD_SETIDXMASK] |= \
- 1 << (__fd & FD_SETBITMASK); \
- } while (0)
-
-#define FD_ISSET(fd, set) ({ \
- fd_set *__set = (set); \
- int __fd = (fd); \
- int __r = 0; \
- if (__fd >= 0) \
- __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \
-1U << (__fd & FD_SET_BITMASK)); \
- __r; \
- })
-
-#define FD_ZERO(set) do { \
- fd_set *__set = (set); \
- int __idx; \
- int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
- for (__idx = 0; __idx < __size; __idx++) \
- __set->fds[__idx] = 0; \
- } while (0)
-
/* for getdents64() */
struct linux_dirent64 {
uint64_t d_ino;
diff --git a/tools/include/nolibc/unistd.h b/tools/include/nolibc/unistd.h
index 25bfc7732ec7..bb5e80f3f05d 100644
--- a/tools/include/nolibc/unistd.h
+++ b/tools/include/nolibc/unistd.h
@@ -33,7 +33,7 @@
static __attribute__((unused))
int sys_faccessat(int fd, const char *path, int amode, int flag)
{
- return my_syscall4(__NR_faccessat, fd, path, amode, flag);
+ return my_syscall4(__NR_faccessat, fd, path, amode, flag);
}
static __attribute__((unused))
@@ -54,7 +54,7 @@ int msleep(unsigned int msecs)
{
struct timeval my_timeval = { msecs / 1000, (msecs % 1000) * 1000 };
- if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+ if (sys_select(0, NULL, NULL, NULL, &my_timeval) < 0)
return (my_timeval.tv_sec * 1000) +
(my_timeval.tv_usec / 1000) +
!!(my_timeval.tv_usec % 1000);
@@ -67,7 +67,7 @@ unsigned int sleep(unsigned int seconds)
{
struct timeval my_timeval = { seconds, 0 };
- if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+ if (sys_select(0, NULL, NULL, NULL, &my_timeval) < 0)
return my_timeval.tv_sec + !!my_timeval.tv_usec;
else
return 0;
@@ -78,7 +78,7 @@ int usleep(unsigned int usecs)
{
struct timeval my_timeval = { usecs / 1000000, usecs % 1000000 };
- return sys_select(0, 0, 0, 0, &my_timeval);
+ return sys_select(0, NULL, NULL, NULL, &my_timeval);
}
static __attribute__((unused))
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 2892a45023af..04e0077fb4c9 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -852,8 +852,14 @@ __SYSCALL(__NR_removexattrat, sys_removexattrat)
#define __NR_open_tree_attr 467
__SYSCALL(__NR_open_tree_attr, sys_open_tree_attr)
+/* fs/inode.c */
+#define __NR_file_getattr 468
+__SYSCALL(__NR_file_getattr, sys_file_getattr)
+#define __NR_file_setattr 469
+__SYSCALL(__NR_file_setattr, sys_file_setattr)
+
#undef __NR_syscalls
-#define __NR_syscalls 468
+#define __NR_syscalls 470
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 7fba37b94401..3cd5cf15e3c9 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -597,35 +597,66 @@ struct drm_set_version {
int drm_dd_minor;
};
-/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/**
+ * struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl.
+ * @handle: Handle of the object to be closed.
+ * @pad: Padding.
+ *
+ * Releases the handle to an mm object.
+ */
struct drm_gem_close {
- /** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/* DRM_IOCTL_GEM_FLINK ioctl argument type */
+/**
+ * struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl.
+ * @handle: Handle for the object being named.
+ * @name: Returned global name.
+ *
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
struct drm_gem_flink {
- /** Handle for the object being named */
__u32 handle;
-
- /** Returned global name */
__u32 name;
};
-/* DRM_IOCTL_GEM_OPEN ioctl argument type */
+/**
+ * struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl.
+ * @name: Name of object being opened.
+ * @handle: Returned handle for the object.
+ * @size: Returned size of the object
+ *
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
struct drm_gem_open {
- /** Name of object being opened */
__u32 name;
-
- /** Returned handle for the object */
__u32 handle;
-
- /** Returned size of the object */
__u64 size;
};
/**
+ * struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl.
+ * @handle: The handle of a gem object.
+ * @new_handle: An available gem handle.
+ *
+ * This ioctl changes the handle of a GEM object to the specified one.
+ * The new handle must be unused. On success the old handle is closed
+ * and all further IOCTL should refer to the new handle only.
+ * Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle.
+ */
+struct drm_gem_change_handle {
+ __u32 handle;
+ __u32 new_handle;
+};
+
+/**
* DRM_CAP_DUMB_BUFFER
*
* If set to 1, the driver supports creating dumb buffers via the
@@ -905,13 +936,17 @@ struct drm_syncobj_destroy {
};
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
+
+ __u64 point;
};
struct drm_syncobj_transfer {
@@ -1305,6 +1340,14 @@ extern "C" {
*/
#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name)
+/**
+ * DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle
+ *
+ * Some applications (notably CRIU) need objects to have specific gem handles.
+ * This ioctl changes the object at one gem handle to use a new gem handle.
+ */
+#define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/tools/include/uapi/linux/bits.h b/tools/include/uapi/linux/bits.h
index 5ee30f882736..a04afef9efca 100644
--- a/tools/include/uapi/linux/bits.h
+++ b/tools/include/uapi/linux/bits.h
@@ -4,13 +4,9 @@
#ifndef _UAPI_LINUX_BITS_H
#define _UAPI_LINUX_BITS_H
-#define __GENMASK(h, l) \
- (((~_UL(0)) - (_UL(1) << (l)) + 1) & \
- (~_UL(0) >> (__BITS_PER_LONG - 1 - (h))))
+#define __GENMASK(h, l) (((~_UL(0)) << (l)) & (~_UL(0) >> (__BITS_PER_LONG - 1 - (h))))
-#define __GENMASK_ULL(h, l) \
- (((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
- (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+#define __GENMASK_ULL(h, l) (((~_ULL(0)) << (l)) & (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
#define __GENMASK_U128(h, l) \
((_BIT128((h)) << 1) - (_BIT128(l)))
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 85180e4aaa5a..be7d8e060e10 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -450,6 +450,7 @@ union bpf_iter_link_info {
* * **struct bpf_map_info**
* * **struct bpf_btf_info**
* * **struct bpf_link_info**
+ * * **struct bpf_token_info**
*
* Return
* Returns zero on success. On error, -1 is returned and *errno*
@@ -906,6 +907,17 @@ union bpf_iter_link_info {
* A new file descriptor (a nonnegative integer), or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
+ * BPF_PROG_STREAM_READ_BY_FD
+ * Description
+ * Read data of a program's BPF stream. The program is identified
+ * by *prog_fd*, and the stream is identified by the *stream_id*.
+ * The data is copied to a buffer pointed to by *stream_buf*, and
+ * filled less than or equal to *stream_buf_len* bytes.
+ *
+ * Return
+ * Number of bytes read from the stream on success, or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -961,6 +973,7 @@ enum bpf_cmd {
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
+ BPF_PROG_STREAM_READ_BY_FD,
__MAX_BPF_CMD,
};
@@ -1013,6 +1026,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
BPF_MAP_TYPE_ARENA,
+ BPF_MAP_TYPE_INSN_ARRAY,
__MAX_BPF_MAP_TYPE
};
@@ -1417,6 +1431,9 @@ enum {
/* Do not translate kernel bpf_arena pointers to user pointers */
BPF_F_NO_USER_CONV = (1U << 18),
+
+/* Enable BPF ringbuf overwrite mode */
+ BPF_F_RB_OVERWRITE = (1U << 19),
};
/* Flags for BPF_PROG_QUERY. */
@@ -1463,6 +1480,11 @@ struct bpf_stack_build_id {
#define BPF_OBJ_NAME_LEN 16U
+enum {
+ BPF_STREAM_STDOUT = 1,
+ BPF_STREAM_STDERR = 2,
+};
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -1504,6 +1526,12 @@ union bpf_attr {
* If provided, map_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 map_token_fd;
+
+ /* Hash of the program that has exclusive access to the map.
+ */
+ __aligned_u64 excl_prog_hash;
+ /* Size of the passed excl_prog_hash. */
+ __u32 excl_prog_hash_size;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM and BPF_MAP_FREEZE commands */
@@ -1587,6 +1615,16 @@ union bpf_attr {
* continuous.
*/
__u32 fd_array_cnt;
+ /* Pointer to a buffer containing the signature of the BPF
+ * program.
+ */
+ __aligned_u64 signature;
+ /* Size of the signature buffer in bytes. */
+ __u32 signature_size;
+ /* ID of the kernel keyring to be used for signature
+ * verification.
+ */
+ __s32 keyring_id;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -1794,6 +1832,13 @@ union bpf_attr {
};
__u64 expected_revision;
} netkit;
+ struct {
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
+ } cgroup;
};
} link_create;
@@ -1842,6 +1887,13 @@ union bpf_attr {
__u32 bpffs_fd;
} token_create;
+ struct {
+ __aligned_u64 stream_buf;
+ __u32 stream_buf_len;
+ __u32 stream_id;
+ __u32 prog_fd;
+ } prog_stream_read;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -2056,6 +2108,7 @@ union bpf_attr {
* for updates resulting in a null checksum the value is set to
* **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
* that the modified header field is part of the pseudo-header.
+ * Flag **BPF_F_IPV6** should be set for IPv6 packets.
*
* This helper works in combination with **bpf_csum_diff**\ (),
* which does not update the checksum in-place, but offers more
@@ -2402,7 +2455,7 @@ union bpf_attr {
* into it. An example is available in file
* *samples/bpf/trace_output_user.c* in the Linux kernel source
* tree (the eBPF program counterpart is in
- * *samples/bpf/trace_output_kern.c*).
+ * *samples/bpf/trace_output.bpf.c*).
*
* **bpf_perf_event_output**\ () achieves better performance
* than **bpf_trace_printk**\ () for sharing data with user
@@ -4842,7 +4895,7 @@ union bpf_attr {
*
* **-ENOENT** if the bpf_local_storage cannot be found.
*
- * long bpf_d_path(struct path *path, char *buf, u32 sz)
+ * long bpf_d_path(const struct path *path, char *buf, u32 sz)
* Description
* Return full path for given **struct path** object, which
* needs to be the kernel BTF *path* object. The path is
@@ -5569,7 +5622,7 @@ union bpf_attr {
* Return
* *sk* if casting is valid, or **NULL** otherwise.
*
- * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * long bpf_dynptr_from_mem(void *data, u64 size, u64 flags, struct bpf_dynptr *ptr)
* Description
* Get a dynptr to local memory *data*.
*
@@ -5612,7 +5665,7 @@ union bpf_attr {
* Return
* Nothing. Always succeeds.
*
- * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags)
+ * long bpf_dynptr_read(void *dst, u64 len, const struct bpf_dynptr *src, u64 offset, u64 flags)
* Description
* Read *len* bytes from *src* into *dst*, starting from *offset*
* into *src*.
@@ -5622,7 +5675,7 @@ union bpf_attr {
* of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
* *flags* is not 0.
*
- * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
+ * long bpf_dynptr_write(const struct bpf_dynptr *dst, u64 offset, void *src, u64 len, u64 flags)
* Description
* Write *len* bytes from *src* into *dst*, starting from *offset*
* into *dst*.
@@ -5643,7 +5696,7 @@ union bpf_attr {
* is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
* other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
*
- * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
+ * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u64 offset, u64 len)
* Description
* Get a pointer to the underlying dynptr data.
*
@@ -6072,6 +6125,7 @@ enum {
BPF_F_PSEUDO_HDR = (1ULL << 4),
BPF_F_MARK_MANGLED_0 = (1ULL << 5),
BPF_F_MARK_ENFORCE = (1ULL << 6),
+ BPF_F_IPV6 = (1ULL << 7),
};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
@@ -6181,6 +6235,7 @@ enum {
BPF_RB_RING_SIZE = 1,
BPF_RB_CONS_POS = 2,
BPF_RB_PROD_POS = 3,
+ BPF_RB_OVERWRITE_POS = 4,
};
/* BPF ring buffer constants */
@@ -6632,6 +6687,8 @@ struct bpf_map_info {
__u32 btf_value_type_id;
__u32 btf_vmlinux_id;
__u64 map_extra;
+ __aligned_u64 hash;
+ __u32 hash_size;
} __attribute__((aligned(8)));
struct bpf_btf_info {
@@ -6651,11 +6708,15 @@ struct bpf_link_info {
struct {
__aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
__u32 tp_name_len; /* in/out: tp_name buffer len */
+ __u32 :32;
+ __u64 cookie;
} raw_tracepoint;
struct {
__u32 attach_type;
__u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
__u32 target_btf_id; /* BTF type id inside the object */
+ __u32 :32;
+ __u64 cookie;
} tracing;
struct {
__u64 cgroup_id;
@@ -6766,6 +6827,13 @@ struct bpf_link_info {
};
} __attribute__((aligned(8)));
+struct bpf_token_info {
+ __u64 allowed_cmds;
+ __u64 allowed_maps;
+ __u64 allowed_progs;
+ __u64 allowed_attachs;
+} __attribute__((aligned(8)));
+
/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
* by user and intended to be used by socket (e.g. to bind to, depends on
* attach type).
@@ -7137,6 +7205,7 @@ enum {
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */
+ SK_BPF_BYPASS_PROT_MEM = 1010, /* Get or Set sk->sk_bypass_prot_mem */
};
enum {
@@ -7373,6 +7442,10 @@ struct bpf_timer {
__u64 __opaque[2];
} __attribute__((aligned(8)));
+struct bpf_task_work {
+ __u64 __opaque;
+} __attribute__((aligned(8)));
+
struct bpf_wq {
__u64 __opaque[2];
} __attribute__((aligned(8)));
@@ -7578,4 +7651,24 @@ enum bpf_kfunc_flags {
BPF_F_PAD_ZEROS = (1ULL << 0),
};
+/*
+ * Values of a BPF_MAP_TYPE_INSN_ARRAY entry must be of this type.
+ *
+ * Before the map is used the orig_off field should point to an
+ * instruction inside the program being loaded. The other fields
+ * must be set to 0.
+ *
+ * After the program is loaded, the xlated_off will be adjusted
+ * by the verifier to point to the index of the original instruction
+ * in the xlated program. If the instruction is deleted, it will
+ * be set to (u32)-1. The jitted_off will be set to the corresponding
+ * offset in the jitted image of the program.
+ */
+struct bpf_insn_array_value {
+ __u32 orig_off;
+ __u32 xlated_off;
+ __u32 jitted_off;
+ __u32 :32;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/coredump.h b/tools/include/uapi/linux/coredump.h
new file mode 100644
index 000000000000..dc3789b78af0
--- /dev/null
+++ b/tools/include/uapi/linux/coredump.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_COREDUMP_H
+#define _UAPI_LINUX_COREDUMP_H
+
+#include <linux/types.h>
+
+/**
+ * coredump_{req,ack} flags
+ * @COREDUMP_KERNEL: kernel writes coredump
+ * @COREDUMP_USERSPACE: userspace writes coredump
+ * @COREDUMP_REJECT: don't generate coredump
+ * @COREDUMP_WAIT: wait for coredump server
+ */
+enum {
+ COREDUMP_KERNEL = (1ULL << 0),
+ COREDUMP_USERSPACE = (1ULL << 1),
+ COREDUMP_REJECT = (1ULL << 2),
+ COREDUMP_WAIT = (1ULL << 3),
+};
+
+/**
+ * struct coredump_req - message kernel sends to userspace
+ * @size: size of struct coredump_req
+ * @size_ack: known size of struct coredump_ack on this kernel
+ * @mask: supported features
+ *
+ * When a coredump happens the kernel will connect to the coredump
+ * socket and send a coredump request to the coredump server. The @size
+ * member is set to the size of struct coredump_req and provides a hint
+ * to userspace how much data can be read. Userspace may use MSG_PEEK to
+ * peek the size of struct coredump_req and then choose to consume it in
+ * one go. Userspace may also simply read a COREDUMP_ACK_SIZE_VER0
+ * request. If the size the kernel sends is larger userspace simply
+ * discards any remaining data.
+ *
+ * The coredump_req->mask member is set to the currently know features.
+ * Userspace may only set coredump_ack->mask to the bits raised by the
+ * kernel in coredump_req->mask.
+ *
+ * The coredump_req->size_ack member is set by the kernel to the size of
+ * struct coredump_ack the kernel knows. Userspace may only send up to
+ * coredump_req->size_ack bytes to the kernel and must set
+ * coredump_ack->size accordingly.
+ */
+struct coredump_req {
+ __u32 size;
+ __u32 size_ack;
+ __u64 mask;
+};
+
+enum {
+ COREDUMP_REQ_SIZE_VER0 = 16U, /* size of first published struct */
+};
+
+/**
+ * struct coredump_ack - message userspace sends to kernel
+ * @size: size of the struct
+ * @spare: unused
+ * @mask: features kernel is supposed to use
+ *
+ * The @size member must be set to the size of struct coredump_ack. It
+ * may never exceed what the kernel returned in coredump_req->size_ack
+ * but it may of course be smaller (>= COREDUMP_ACK_SIZE_VER0 and <=
+ * coredump_req->size_ack).
+ *
+ * The @mask member must be set to the features the coredump server
+ * wants the kernel to use. Only bits the kernel returned in
+ * coredump_req->mask may be set.
+ */
+struct coredump_ack {
+ __u32 size;
+ __u32 spare;
+ __u64 mask;
+};
+
+enum {
+ COREDUMP_ACK_SIZE_VER0 = 16U, /* size of first published struct */
+};
+
+/**
+ * enum coredump_mark - Markers for the coredump socket
+ *
+ * The kernel will place a single byte on the coredump socket. The
+ * markers notify userspace whether the coredump ack succeeded or
+ * failed.
+ *
+ * @COREDUMP_MARK_MINSIZE: the provided coredump_ack size was too small
+ * @COREDUMP_MARK_MAXSIZE: the provided coredump_ack size was too big
+ * @COREDUMP_MARK_UNSUPPORTED: the provided coredump_ack mask was invalid
+ * @COREDUMP_MARK_CONFLICTING: the provided coredump_ack mask has conflicting options
+ * @COREDUMP_MARK_REQACK: the coredump request and ack was successful
+ * @__COREDUMP_MARK_MAX: the maximum coredump mark value
+ */
+enum coredump_mark {
+ COREDUMP_MARK_REQACK = 0U,
+ COREDUMP_MARK_MINSIZE = 1U,
+ COREDUMP_MARK_MAXSIZE = 2U,
+ COREDUMP_MARK_UNSUPPORTED = 3U,
+ COREDUMP_MARK_CONFLICTING = 4U,
+ __COREDUMP_MARK_MAX = (1U << 31),
+};
+
+#endif /* _UAPI_LINUX_COREDUMP_H */
diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h
index 7a8f4c290187..3aff99f2696a 100644
--- a/tools/include/uapi/linux/fscrypt.h
+++ b/tools/include/uapi/linux/fscrypt.h
@@ -119,7 +119,7 @@ struct fscrypt_key_specifier {
*/
struct fscrypt_provisioning_key_payload {
__u32 type;
- __u32 __reserved;
+ __u32 flags;
__u8 raw[];
};
@@ -128,7 +128,9 @@ struct fscrypt_add_key_arg {
struct fscrypt_key_specifier key_spec;
__u32 raw_size;
__u32 key_id;
- __u32 __reserved[8];
+#define FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001
+ __u32 flags;
+ __u32 __reserved[7];
__u8 raw[];
};
diff --git a/tools/include/uapi/linux/genetlink.h b/tools/include/uapi/linux/genetlink.h
new file mode 100644
index 000000000000..ddba3ca01e39
--- /dev/null
+++ b/tools/include/uapi/linux/genetlink.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__LINUX_GENERIC_NETLINK_H
+#define _UAPI__LINUX_GENERIC_NETLINK_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+
+#define GENL_NAMSIZ 16 /* length of family name */
+
+#define GENL_MIN_ID NLMSG_MIN_TYPE
+#define GENL_MAX_ID 1023
+
+struct genlmsghdr {
+ __u8 cmd;
+ __u8 version;
+ __u16 reserved;
+};
+
+#define GENL_HDRLEN NLMSG_ALIGN(sizeof(struct genlmsghdr))
+
+#define GENL_ADMIN_PERM 0x01
+#define GENL_CMD_CAP_DO 0x02
+#define GENL_CMD_CAP_DUMP 0x04
+#define GENL_CMD_CAP_HASPOL 0x08
+#define GENL_UNS_ADMIN_PERM 0x10
+
+/*
+ * List of reserved static generic netlink identifiers:
+ */
+#define GENL_ID_CTRL NLMSG_MIN_TYPE
+#define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1)
+#define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2)
+/* must be last reserved + 1 */
+#define GENL_START_ALLOC (NLMSG_MIN_TYPE + 3)
+
+/**************************************************************************
+ * Controller
+ **************************************************************************/
+
+enum {
+ CTRL_CMD_UNSPEC,
+ CTRL_CMD_NEWFAMILY,
+ CTRL_CMD_DELFAMILY,
+ CTRL_CMD_GETFAMILY,
+ CTRL_CMD_NEWOPS,
+ CTRL_CMD_DELOPS,
+ CTRL_CMD_GETOPS,
+ CTRL_CMD_NEWMCAST_GRP,
+ CTRL_CMD_DELMCAST_GRP,
+ CTRL_CMD_GETMCAST_GRP, /* unused */
+ CTRL_CMD_GETPOLICY,
+ __CTRL_CMD_MAX,
+};
+
+#define CTRL_CMD_MAX (__CTRL_CMD_MAX - 1)
+
+enum {
+ CTRL_ATTR_UNSPEC,
+ CTRL_ATTR_FAMILY_ID,
+ CTRL_ATTR_FAMILY_NAME,
+ CTRL_ATTR_VERSION,
+ CTRL_ATTR_HDRSIZE,
+ CTRL_ATTR_MAXATTR,
+ CTRL_ATTR_OPS,
+ CTRL_ATTR_MCAST_GROUPS,
+ CTRL_ATTR_POLICY,
+ CTRL_ATTR_OP_POLICY,
+ CTRL_ATTR_OP,
+ __CTRL_ATTR_MAX,
+};
+
+#define CTRL_ATTR_MAX (__CTRL_ATTR_MAX - 1)
+
+enum {
+ CTRL_ATTR_OP_UNSPEC,
+ CTRL_ATTR_OP_ID,
+ CTRL_ATTR_OP_FLAGS,
+ __CTRL_ATTR_OP_MAX,
+};
+
+#define CTRL_ATTR_OP_MAX (__CTRL_ATTR_OP_MAX - 1)
+
+enum {
+ CTRL_ATTR_MCAST_GRP_UNSPEC,
+ CTRL_ATTR_MCAST_GRP_NAME,
+ CTRL_ATTR_MCAST_GRP_ID,
+ __CTRL_ATTR_MCAST_GRP_MAX,
+};
+
+#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
+
+enum {
+ CTRL_ATTR_POLICY_UNSPEC,
+ CTRL_ATTR_POLICY_DO,
+ CTRL_ATTR_POLICY_DUMP,
+
+ __CTRL_ATTR_POLICY_DUMP_MAX,
+ CTRL_ATTR_POLICY_DUMP_MAX = __CTRL_ATTR_POLICY_DUMP_MAX - 1
+};
+
+#define CTRL_ATTR_POLICY_MAX (__CTRL_ATTR_POLICY_DUMP_MAX - 1)
+
+#endif /* _UAPI__LINUX_GENERIC_NETLINK_H */
diff --git a/tools/include/uapi/linux/if_addr.h b/tools/include/uapi/linux/if_addr.h
new file mode 100644
index 000000000000..aa7958b4e41d
--- /dev/null
+++ b/tools/include/uapi/linux/if_addr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__LINUX_IF_ADDR_H
+#define _UAPI__LINUX_IF_ADDR_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+
+struct ifaddrmsg {
+ __u8 ifa_family;
+ __u8 ifa_prefixlen; /* The prefix length */
+ __u8 ifa_flags; /* Flags */
+ __u8 ifa_scope; /* Address scope */
+ __u32 ifa_index; /* Link index */
+};
+
+/*
+ * Important comment:
+ * IFA_ADDRESS is prefix address, rather than local interface address.
+ * It makes no difference for normally configured broadcast interfaces,
+ * but for point-to-point IFA_ADDRESS is DESTINATION address,
+ * local address is supplied in IFA_LOCAL attribute.
+ *
+ * IFA_FLAGS is a u32 attribute that extends the u8 field ifa_flags.
+ * If present, the value from struct ifaddrmsg will be ignored.
+ */
+enum {
+ IFA_UNSPEC,
+ IFA_ADDRESS,
+ IFA_LOCAL,
+ IFA_LABEL,
+ IFA_BROADCAST,
+ IFA_ANYCAST,
+ IFA_CACHEINFO,
+ IFA_MULTICAST,
+ IFA_FLAGS,
+ IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */
+ IFA_TARGET_NETNSID,
+ IFA_PROTO, /* u8, address protocol */
+ __IFA_MAX,
+};
+
+#define IFA_MAX (__IFA_MAX - 1)
+
+/* ifa_flags */
+#define IFA_F_SECONDARY 0x01
+#define IFA_F_TEMPORARY IFA_F_SECONDARY
+
+#define IFA_F_NODAD 0x02
+#define IFA_F_OPTIMISTIC 0x04
+#define IFA_F_DADFAILED 0x08
+#define IFA_F_HOMEADDRESS 0x10
+#define IFA_F_DEPRECATED 0x20
+#define IFA_F_TENTATIVE 0x40
+#define IFA_F_PERMANENT 0x80
+#define IFA_F_MANAGETEMPADDR 0x100
+#define IFA_F_NOPREFIXROUTE 0x200
+#define IFA_F_MCAUTOJOIN 0x400
+#define IFA_F_STABLE_PRIVACY 0x800
+
+struct ifa_cacheinfo {
+ __u32 ifa_prefered;
+ __u32 ifa_valid;
+ __u32 cstamp; /* created timestamp, hundredths of seconds */
+ __u32 tstamp; /* updated timestamp, hundredths of seconds */
+};
+
+/* backwards compatibility for userspace */
+#ifndef __KERNEL__
+#define IFA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifaddrmsg))))
+#define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg))
+#endif
+
+/* ifa_proto */
+#define IFAPROT_UNSPEC 0
+#define IFAPROT_KERNEL_LO 1 /* loopback */
+#define IFAPROT_KERNEL_RA 2 /* set by kernel from router announcement */
+#define IFAPROT_KERNEL_LL 3 /* link-local set by kernel */
+
+#endif
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
index 44f2bb93e7e6..23a062781468 100644
--- a/tools/include/uapi/linux/if_xdp.h
+++ b/tools/include/uapi/linux/if_xdp.h
@@ -79,6 +79,7 @@ struct xdp_mmap_offsets {
#define XDP_UMEM_COMPLETION_RING 6
#define XDP_STATISTICS 7
#define XDP_OPTIONS 8
+#define XDP_MAX_TX_SKB_BUDGET 9
struct xdp_umem_reg {
__u64 addr; /* Start of packet data area */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index b6ae8ad8934b..52f6000ab020 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -178,6 +178,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_LOONGARCH_IOCSR 38
#define KVM_EXIT_MEMORY_FAULT 39
+#define KVM_EXIT_TDX 40
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -375,6 +376,7 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_WAKEUP 4
#define KVM_SYSTEM_EVENT_SUSPEND 5
#define KVM_SYSTEM_EVENT_SEV_TERM 6
+#define KVM_SYSTEM_EVENT_TDX_FATAL 7
__u32 type;
__u32 ndata;
union {
@@ -446,6 +448,31 @@ struct kvm_run {
__u64 gpa;
__u64 size;
} memory_fault;
+ /* KVM_EXIT_TDX */
+ struct {
+ __u64 flags;
+ __u64 nr;
+ union {
+ struct {
+ __u64 ret;
+ __u64 data[5];
+ } unknown;
+ struct {
+ __u64 ret;
+ __u64 gpa;
+ __u64 size;
+ } get_quote;
+ struct {
+ __u64 ret;
+ __u64 leaf;
+ __u64 r11, r12, r13, r14;
+ } get_tdvmcall_info;
+ struct {
+ __u64 ret;
+ __u64 vector;
+ } setup_event_notify;
+ };
+ } tdx;
/* Fix the size of the union. */
char padding[256];
};
@@ -617,6 +644,7 @@ struct kvm_ioeventfd {
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
+#define KVM_X86_DISABLE_EXITS_APERFMPERF (1 << 4)
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
@@ -930,6 +958,11 @@ struct kvm_enable_cap {
#define KVM_CAP_X86_APIC_BUS_CYCLES_NS 237
#define KVM_CAP_X86_GUEST_MODE 238
#define KVM_CAP_ARM_WRITABLE_IMP_ID_REGS 239
+#define KVM_CAP_ARM_EL2 240
+#define KVM_CAP_ARM_EL2_E2H0 241
+#define KVM_CAP_RISCV_MP_STATE_RESET 242
+#define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243
+#define KVM_CAP_GUEST_MEMFD_FLAGS 244
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1566,6 +1599,8 @@ struct kvm_memory_attributes {
#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
+#define GUEST_MEMFD_FLAG_MMAP (1ULL << 0)
+#define GUEST_MEMFD_FLAG_INIT_SHARED (1ULL << 1)
struct kvm_create_guest_memfd {
__u64 size;
diff --git a/tools/include/uapi/linux/neighbour.h b/tools/include/uapi/linux/neighbour.h
new file mode 100644
index 000000000000..c34a81245f87
--- /dev/null
+++ b/tools/include/uapi/linux/neighbour.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__LINUX_NEIGHBOUR_H
+#define _UAPI__LINUX_NEIGHBOUR_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+
+struct ndmsg {
+ __u8 ndm_family;
+ __u8 ndm_pad1;
+ __u16 ndm_pad2;
+ __s32 ndm_ifindex;
+ __u16 ndm_state;
+ __u8 ndm_flags;
+ __u8 ndm_type;
+};
+
+enum {
+ NDA_UNSPEC,
+ NDA_DST,
+ NDA_LLADDR,
+ NDA_CACHEINFO,
+ NDA_PROBES,
+ NDA_VLAN,
+ NDA_PORT,
+ NDA_VNI,
+ NDA_IFINDEX,
+ NDA_MASTER,
+ NDA_LINK_NETNSID,
+ NDA_SRC_VNI,
+ NDA_PROTOCOL, /* Originator of entry */
+ NDA_NH_ID,
+ NDA_FDB_EXT_ATTRS,
+ NDA_FLAGS_EXT,
+ NDA_NDM_STATE_MASK,
+ NDA_NDM_FLAGS_MASK,
+ __NDA_MAX
+};
+
+#define NDA_MAX (__NDA_MAX - 1)
+
+/*
+ * Neighbor Cache Entry Flags
+ */
+
+#define NTF_USE (1 << 0)
+#define NTF_SELF (1 << 1)
+#define NTF_MASTER (1 << 2)
+#define NTF_PROXY (1 << 3) /* == ATF_PUBL */
+#define NTF_EXT_LEARNED (1 << 4)
+#define NTF_OFFLOADED (1 << 5)
+#define NTF_STICKY (1 << 6)
+#define NTF_ROUTER (1 << 7)
+/* Extended flags under NDA_FLAGS_EXT: */
+#define NTF_EXT_MANAGED (1 << 0)
+#define NTF_EXT_LOCKED (1 << 1)
+#define NTF_EXT_EXT_VALIDATED (1 << 2)
+
+/*
+ * Neighbor Cache Entry States.
+ */
+
+#define NUD_INCOMPLETE 0x01
+#define NUD_REACHABLE 0x02
+#define NUD_STALE 0x04
+#define NUD_DELAY 0x08
+#define NUD_PROBE 0x10
+#define NUD_FAILED 0x20
+
+/* Dummy states */
+#define NUD_NOARP 0x40
+#define NUD_PERMANENT 0x80
+#define NUD_NONE 0x00
+
+/* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change and make no
+ * address resolution or NUD.
+ *
+ * NUD_PERMANENT also cannot be deleted by garbage collectors. This holds true
+ * for dynamic entries with NTF_EXT_LEARNED flag as well. However, upon carrier
+ * down event, NUD_PERMANENT entries are not flushed whereas NTF_EXT_LEARNED
+ * flagged entries explicitly are (which is also consistent with the routing
+ * subsystem).
+ *
+ * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
+ * states don't make sense and thus are ignored. Such entries don't age and
+ * can roam.
+ *
+ * NTF_EXT_MANAGED flagged neigbor entries are managed by the kernel on behalf
+ * of a user space control plane, and automatically refreshed so that (if
+ * possible) they remain in NUD_REACHABLE state.
+ *
+ * NTF_EXT_LOCKED flagged bridge FDB entries are entries generated by the
+ * bridge in response to a host trying to communicate via a locked bridge port
+ * with MAB enabled. Their purpose is to notify user space that a host requires
+ * authentication.
+ *
+ * NTF_EXT_EXT_VALIDATED flagged neighbor entries were externally validated by
+ * a user space control plane. The kernel will not remove or invalidate them,
+ * but it can probe them and notify user space when they become reachable.
+ */
+
+struct nda_cacheinfo {
+ __u32 ndm_confirmed;
+ __u32 ndm_used;
+ __u32 ndm_updated;
+ __u32 ndm_refcnt;
+};
+
+/*****************************************************************
+ * Neighbour tables specific messages.
+ *
+ * To retrieve the neighbour tables send RTM_GETNEIGHTBL with the
+ * NLM_F_DUMP flag set. Every neighbour table configuration is
+ * spread over multiple messages to avoid running into message
+ * size limits on systems with many interfaces. The first message
+ * in the sequence transports all not device specific data such as
+ * statistics, configuration, and the default parameter set.
+ * This message is followed by 0..n messages carrying device
+ * specific parameter sets.
+ * Although the ordering should be sufficient, NDTA_NAME can be
+ * used to identify sequences. The initial message can be identified
+ * by checking for NDTA_CONFIG. The device specific messages do
+ * not contain this TLV but have NDTPA_IFINDEX set to the
+ * corresponding interface index.
+ *
+ * To change neighbour table attributes, send RTM_SETNEIGHTBL
+ * with NDTA_NAME set. Changeable attribute include NDTA_THRESH[1-3],
+ * NDTA_GC_INTERVAL, and all TLVs in NDTA_PARMS unless marked
+ * otherwise. Device specific parameter sets can be changed by
+ * setting NDTPA_IFINDEX to the interface index of the corresponding
+ * device.
+ ****/
+
+struct ndt_stats {
+ __u64 ndts_allocs;
+ __u64 ndts_destroys;
+ __u64 ndts_hash_grows;
+ __u64 ndts_res_failed;
+ __u64 ndts_lookups;
+ __u64 ndts_hits;
+ __u64 ndts_rcv_probes_mcast;
+ __u64 ndts_rcv_probes_ucast;
+ __u64 ndts_periodic_gc_runs;
+ __u64 ndts_forced_gc_runs;
+ __u64 ndts_table_fulls;
+};
+
+enum {
+ NDTPA_UNSPEC,
+ NDTPA_IFINDEX, /* u32, unchangeable */
+ NDTPA_REFCNT, /* u32, read-only */
+ NDTPA_REACHABLE_TIME, /* u64, read-only, msecs */
+ NDTPA_BASE_REACHABLE_TIME, /* u64, msecs */
+ NDTPA_RETRANS_TIME, /* u64, msecs */
+ NDTPA_GC_STALETIME, /* u64, msecs */
+ NDTPA_DELAY_PROBE_TIME, /* u64, msecs */
+ NDTPA_QUEUE_LEN, /* u32 */
+ NDTPA_APP_PROBES, /* u32 */
+ NDTPA_UCAST_PROBES, /* u32 */
+ NDTPA_MCAST_PROBES, /* u32 */
+ NDTPA_ANYCAST_DELAY, /* u64, msecs */
+ NDTPA_PROXY_DELAY, /* u64, msecs */
+ NDTPA_PROXY_QLEN, /* u32 */
+ NDTPA_LOCKTIME, /* u64, msecs */
+ NDTPA_QUEUE_LENBYTES, /* u32 */
+ NDTPA_MCAST_REPROBES, /* u32 */
+ NDTPA_PAD,
+ NDTPA_INTERVAL_PROBE_TIME_MS, /* u64, msecs */
+ __NDTPA_MAX
+};
+#define NDTPA_MAX (__NDTPA_MAX - 1)
+
+struct ndtmsg {
+ __u8 ndtm_family;
+ __u8 ndtm_pad1;
+ __u16 ndtm_pad2;
+};
+
+struct ndt_config {
+ __u16 ndtc_key_len;
+ __u16 ndtc_entry_size;
+ __u32 ndtc_entries;
+ __u32 ndtc_last_flush; /* delta to now in msecs */
+ __u32 ndtc_last_rand; /* delta to now in msecs */
+ __u32 ndtc_hash_rnd;
+ __u32 ndtc_hash_mask;
+ __u32 ndtc_hash_chain_gc;
+ __u32 ndtc_proxy_qlen;
+};
+
+enum {
+ NDTA_UNSPEC,
+ NDTA_NAME, /* char *, unchangeable */
+ NDTA_THRESH1, /* u32 */
+ NDTA_THRESH2, /* u32 */
+ NDTA_THRESH3, /* u32 */
+ NDTA_CONFIG, /* struct ndt_config, read-only */
+ NDTA_PARMS, /* nested TLV NDTPA_* */
+ NDTA_STATS, /* struct ndt_stats, read-only */
+ NDTA_GC_INTERVAL, /* u64, msecs */
+ NDTA_PAD,
+ __NDTA_MAX
+};
+#define NDTA_MAX (__NDTA_MAX - 1)
+
+ /* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY:
+ * - FDB_NOTIFY_BIT - notify on activity/expire for any entry
+ * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications
+ */
+enum {
+ FDB_NOTIFY_BIT = (1 << 0),
+ FDB_NOTIFY_INACTIVE_BIT = (1 << 1)
+};
+
+/* embedded into NDA_FDB_EXT_ATTRS:
+ * [NDA_FDB_EXT_ATTRS] = {
+ * [NFEA_ACTIVITY_NOTIFY]
+ * ...
+ * }
+ */
+enum {
+ NFEA_UNSPEC,
+ NFEA_ACTIVITY_NOTIFY,
+ NFEA_DONT_REFRESH,
+ __NFEA_MAX
+};
+#define NFEA_MAX (__NFEA_MAX - 1)
+
+#endif
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index 7eb9571786b8..e0b579a1df4f 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/netdev.yaml */
/* YNL-GEN uapi header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _UAPI_LINUX_NETDEV_H
#define _UAPI_LINUX_NETDEV_H
@@ -77,6 +78,12 @@ enum netdev_qstats_scope {
NETDEV_QSTATS_SCOPE_QUEUE = 1,
};
+enum netdev_napi_threaded {
+ NETDEV_NAPI_THREADED_DISABLED,
+ NETDEV_NAPI_THREADED_ENABLED,
+ NETDEV_NAPI_THREADED_BUSY_POLL,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
@@ -134,6 +141,7 @@ enum {
NETDEV_A_NAPI_DEFER_HARD_IRQS,
NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
+ NETDEV_A_NAPI_THREADED,
__NETDEV_A_NAPI_MAX,
NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
diff --git a/tools/include/uapi/linux/netfilter.h b/tools/include/uapi/linux/netfilter.h
new file mode 100644
index 000000000000..5a79ccb76701
--- /dev/null
+++ b/tools/include/uapi/linux/netfilter.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__LINUX_NETFILTER_H
+#define _UAPI__LINUX_NETFILTER_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
+/* Responses from hook functions. */
+#define NF_DROP 0
+#define NF_ACCEPT 1
+#define NF_STOLEN 2
+#define NF_QUEUE 3
+#define NF_REPEAT 4
+#define NF_STOP 5 /* Deprecated, for userspace nf_queue compatibility. */
+#define NF_MAX_VERDICT NF_STOP
+
+/* we overload the higher bits for encoding auxiliary data such as the queue
+ * number or errno values. Not nice, but better than additional function
+ * arguments. */
+#define NF_VERDICT_MASK 0x000000ff
+
+/* extra verdict flags have mask 0x0000ff00 */
+#define NF_VERDICT_FLAG_QUEUE_BYPASS 0x00008000
+
+/* queue number (NF_QUEUE) or errno (NF_DROP) */
+#define NF_VERDICT_QMASK 0xffff0000
+#define NF_VERDICT_QBITS 16
+
+#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE)
+
+#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP)
+
+/* only for userspace compatibility */
+#ifndef __KERNEL__
+
+/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
+#define NF_VERDICT_BITS 16
+#endif
+
+enum nf_inet_hooks {
+ NF_INET_PRE_ROUTING,
+ NF_INET_LOCAL_IN,
+ NF_INET_FORWARD,
+ NF_INET_LOCAL_OUT,
+ NF_INET_POST_ROUTING,
+ NF_INET_NUMHOOKS,
+ NF_INET_INGRESS = NF_INET_NUMHOOKS,
+};
+
+enum nf_dev_hooks {
+ NF_NETDEV_INGRESS,
+ NF_NETDEV_EGRESS,
+ NF_NETDEV_NUMHOOKS
+};
+
+enum {
+ NFPROTO_UNSPEC = 0,
+ NFPROTO_INET = 1,
+ NFPROTO_IPV4 = 2,
+ NFPROTO_ARP = 3,
+ NFPROTO_NETDEV = 5,
+ NFPROTO_BRIDGE = 7,
+ NFPROTO_IPV6 = 10,
+#ifndef __KERNEL__ /* no longer supported by kernel */
+ NFPROTO_DECNET = 12,
+#endif
+ NFPROTO_NUMPROTO,
+};
+
+union nf_inet_addr {
+ __u32 all[4];
+ __be32 ip;
+ __be32 ip6[4];
+ struct in_addr in;
+ struct in6_addr in6;
+};
+
+#endif /* _UAPI__LINUX_NETFILTER_H */
diff --git a/tools/include/uapi/linux/netfilter_arp.h b/tools/include/uapi/linux/netfilter_arp.h
new file mode 100644
index 000000000000..791dfc5ae907
--- /dev/null
+++ b/tools/include/uapi/linux/netfilter_arp.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */
+#ifndef __LINUX_ARP_NETFILTER_H
+#define __LINUX_ARP_NETFILTER_H
+
+/* ARP-specific defines for netfilter.
+ * (C)2002 Rusty Russell IBM -- This code is GPL.
+ */
+
+#include <linux/netfilter.h>
+
+/* There is no PF_ARP. */
+#define NF_ARP 0
+
+/* ARP Hooks */
+#define NF_ARP_IN 0
+#define NF_ARP_OUT 1
+#define NF_ARP_FORWARD 2
+
+#ifndef __KERNEL__
+#define NF_ARP_NUMHOOKS 3
+#endif
+
+#endif /* __LINUX_ARP_NETFILTER_H */
diff --git a/tools/include/uapi/linux/nsfs.h b/tools/include/uapi/linux/nsfs.h
index 34127653fd00..a25e38d1c874 100644
--- a/tools/include/uapi/linux/nsfs.h
+++ b/tools/include/uapi/linux/nsfs.h
@@ -16,8 +16,6 @@
#define NS_GET_NSTYPE _IO(NSIO, 0x3)
/* Get owner UID (in the caller's user namespace) for a user namespace */
#define NS_GET_OWNER_UID _IO(NSIO, 0x4)
-/* Get the id for a mount namespace */
-#define NS_GET_MNTNS_ID _IOR(NSIO, 0x5, __u64)
/* Translate pid from target pid namespace into the caller's pid namespace. */
#define NS_GET_PID_FROM_PIDNS _IOR(NSIO, 0x6, int)
/* Return thread-group leader id of pid in the callers pid namespace. */
@@ -42,4 +40,89 @@ struct mnt_ns_info {
/* Get previous namespace. */
#define NS_MNT_GET_PREV _IOR(NSIO, 12, struct mnt_ns_info)
+/* Retrieve namespace identifiers. */
+#define NS_GET_MNTNS_ID _IOR(NSIO, 5, __u64)
+#define NS_GET_ID _IOR(NSIO, 13, __u64)
+
+enum init_ns_ino {
+ IPC_NS_INIT_INO = 0xEFFFFFFFU,
+ UTS_NS_INIT_INO = 0xEFFFFFFEU,
+ USER_NS_INIT_INO = 0xEFFFFFFDU,
+ PID_NS_INIT_INO = 0xEFFFFFFCU,
+ CGROUP_NS_INIT_INO = 0xEFFFFFFBU,
+ TIME_NS_INIT_INO = 0xEFFFFFFAU,
+ NET_NS_INIT_INO = 0xEFFFFFF9U,
+ MNT_NS_INIT_INO = 0xEFFFFFF8U,
+#ifdef __KERNEL__
+ MNT_NS_ANON_INO = 0xEFFFFFF7U,
+#endif
+};
+
+struct nsfs_file_handle {
+ __u64 ns_id;
+ __u32 ns_type;
+ __u32 ns_inum;
+};
+
+#define NSFS_FILE_HANDLE_SIZE_VER0 16 /* sizeof first published struct */
+#define NSFS_FILE_HANDLE_SIZE_LATEST sizeof(struct nsfs_file_handle) /* sizeof latest published struct */
+
+enum init_ns_id {
+ IPC_NS_INIT_ID = 1ULL,
+ UTS_NS_INIT_ID = 2ULL,
+ USER_NS_INIT_ID = 3ULL,
+ PID_NS_INIT_ID = 4ULL,
+ CGROUP_NS_INIT_ID = 5ULL,
+ TIME_NS_INIT_ID = 6ULL,
+ NET_NS_INIT_ID = 7ULL,
+ MNT_NS_INIT_ID = 8ULL,
+#ifdef __KERNEL__
+ NS_LAST_INIT_ID = MNT_NS_INIT_ID,
+#endif
+};
+
+enum ns_type {
+ TIME_NS = (1ULL << 7), /* CLONE_NEWTIME */
+ MNT_NS = (1ULL << 17), /* CLONE_NEWNS */
+ CGROUP_NS = (1ULL << 25), /* CLONE_NEWCGROUP */
+ UTS_NS = (1ULL << 26), /* CLONE_NEWUTS */
+ IPC_NS = (1ULL << 27), /* CLONE_NEWIPC */
+ USER_NS = (1ULL << 28), /* CLONE_NEWUSER */
+ PID_NS = (1ULL << 29), /* CLONE_NEWPID */
+ NET_NS = (1ULL << 30), /* CLONE_NEWNET */
+};
+
+/**
+ * struct ns_id_req - namespace ID request structure
+ * @size: size of this structure
+ * @spare: reserved for future use
+ * @filter: filter mask
+ * @ns_id: last namespace id
+ * @user_ns_id: owning user namespace ID
+ *
+ * Structure for passing namespace ID and miscellaneous parameters to
+ * statns(2) and listns(2).
+ *
+ * For statns(2) @param represents the request mask.
+ * For listns(2) @param represents the last listed mount id (or zero).
+ */
+struct ns_id_req {
+ __u32 size;
+ __u32 spare;
+ __u64 ns_id;
+ struct /* listns */ {
+ __u32 ns_type;
+ __u32 spare2;
+ __u64 user_ns_id;
+ };
+};
+
+/*
+ * Special @user_ns_id value that can be passed to listns()
+ */
+#define LISTNS_CURRENT_USER 0xffffffffffffffff /* Caller's userns */
+
+/* List of all ns_id_req versions. */
+#define NS_ID_REQ_SIZE_VER0 32 /* sizeof first published struct */
+
#endif /* __LINUX_NSFS_H */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 78a362b80027..c44a8fb3e418 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -382,6 +382,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */
#define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */
#define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */
+#define PERF_ATTR_SIZE_VER9 144 /* add: config4 */
/*
* 'struct perf_event_attr' contains various attributes that define
@@ -463,7 +464,9 @@ struct perf_event_attr {
inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
remove_on_exec : 1, /* event is removed from task on exec */
sigtrap : 1, /* send synchronous SIGTRAP on event */
- __reserved_1 : 26;
+ defer_callchain: 1, /* request PERF_RECORD_CALLCHAIN_DEFERRED records */
+ defer_output : 1, /* output PERF_RECORD_CALLCHAIN_DEFERRED records */
+ __reserved_1 : 24;
union {
__u32 wakeup_events; /* wake up every n events */
@@ -543,6 +546,7 @@ struct perf_event_attr {
__u64 sig_data;
__u64 config3; /* extension of config2 */
+ __u64 config4; /* extension of config3 */
};
/*
@@ -1239,6 +1243,22 @@ enum perf_event_type {
*/
PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
+ /*
+ * This user callchain capture was deferred until shortly before
+ * returning to user space. Previous samples would have kernel
+ * callchains only and they need to be stitched with this to make full
+ * callchains.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 cookie;
+ * u64 nr;
+ * u64 ips[nr];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_CALLCHAIN_DEFERRED = 22,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -1269,6 +1289,7 @@ enum perf_callchain_context {
PERF_CONTEXT_HV = (__u64)-32,
PERF_CONTEXT_KERNEL = (__u64)-128,
PERF_CONTEXT_USER = (__u64)-512,
+ PERF_CONTEXT_USER_DEFERRED = (__u64)-640,
PERF_CONTEXT_GUEST = (__u64)-2048,
PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index 43dec6eed559..475fc8ca4403 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -255,7 +255,12 @@ struct prctl_mm_map {
/* Dispatch syscalls to a userspace handler */
#define PR_SET_SYSCALL_USER_DISPATCH 59
# define PR_SYS_DISPATCH_OFF 0
-# define PR_SYS_DISPATCH_ON 1
+/* Enable dispatch except for the specified range */
+# define PR_SYS_DISPATCH_EXCLUSIVE_ON 1
+/* Enable dispatch for the specified range */
+# define PR_SYS_DISPATCH_INCLUSIVE_ON 2
+/* Legacy name for backwards compatibility */
+# define PR_SYS_DISPATCH_ON PR_SYS_DISPATCH_EXCLUSIVE_ON
/* The control values for the user space selector when dispatch is enabled */
# define SYSCALL_DISPATCH_FILTER_ALLOW 0
# define SYSCALL_DISPATCH_FILTER_BLOCK 1
@@ -367,8 +372,6 @@ struct prctl_mm_map {
/* FUTEX hash management */
#define PR_FUTEX_HASH 78
# define PR_FUTEX_HASH_SET_SLOTS 1
-# define FH_FLAG_IMMUTABLE (1ULL << 0)
# define PR_FUTEX_HASH_GET_SLOTS 2
-# define PR_FUTEX_HASH_GET_IMMUTABLE 3
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/linux/rtnetlink.h b/tools/include/uapi/linux/rtnetlink.h
new file mode 100644
index 000000000000..dab9493c791b
--- /dev/null
+++ b/tools/include/uapi/linux/rtnetlink.h
@@ -0,0 +1,848 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__LINUX_RTNETLINK_H
+#define _UAPI__LINUX_RTNETLINK_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/if_link.h>
+#include <linux/if_addr.h>
+#include <linux/neighbour.h>
+
+/* rtnetlink families. Values up to 127 are reserved for real address
+ * families, values above 128 may be used arbitrarily.
+ */
+#define RTNL_FAMILY_IPMR 128
+#define RTNL_FAMILY_IP6MR 129
+#define RTNL_FAMILY_MAX 129
+
+/****
+ * Routing/neighbour discovery messages.
+ ****/
+
+/* Types of messages */
+
+enum {
+ RTM_BASE = 16,
+#define RTM_BASE RTM_BASE
+
+ RTM_NEWLINK = 16,
+#define RTM_NEWLINK RTM_NEWLINK
+ RTM_DELLINK,
+#define RTM_DELLINK RTM_DELLINK
+ RTM_GETLINK,
+#define RTM_GETLINK RTM_GETLINK
+ RTM_SETLINK,
+#define RTM_SETLINK RTM_SETLINK
+
+ RTM_NEWADDR = 20,
+#define RTM_NEWADDR RTM_NEWADDR
+ RTM_DELADDR,
+#define RTM_DELADDR RTM_DELADDR
+ RTM_GETADDR,
+#define RTM_GETADDR RTM_GETADDR
+
+ RTM_NEWROUTE = 24,
+#define RTM_NEWROUTE RTM_NEWROUTE
+ RTM_DELROUTE,
+#define RTM_DELROUTE RTM_DELROUTE
+ RTM_GETROUTE,
+#define RTM_GETROUTE RTM_GETROUTE
+
+ RTM_NEWNEIGH = 28,
+#define RTM_NEWNEIGH RTM_NEWNEIGH
+ RTM_DELNEIGH,
+#define RTM_DELNEIGH RTM_DELNEIGH
+ RTM_GETNEIGH,
+#define RTM_GETNEIGH RTM_GETNEIGH
+
+ RTM_NEWRULE = 32,
+#define RTM_NEWRULE RTM_NEWRULE
+ RTM_DELRULE,
+#define RTM_DELRULE RTM_DELRULE
+ RTM_GETRULE,
+#define RTM_GETRULE RTM_GETRULE
+
+ RTM_NEWQDISC = 36,
+#define RTM_NEWQDISC RTM_NEWQDISC
+ RTM_DELQDISC,
+#define RTM_DELQDISC RTM_DELQDISC
+ RTM_GETQDISC,
+#define RTM_GETQDISC RTM_GETQDISC
+
+ RTM_NEWTCLASS = 40,
+#define RTM_NEWTCLASS RTM_NEWTCLASS
+ RTM_DELTCLASS,
+#define RTM_DELTCLASS RTM_DELTCLASS
+ RTM_GETTCLASS,
+#define RTM_GETTCLASS RTM_GETTCLASS
+
+ RTM_NEWTFILTER = 44,
+#define RTM_NEWTFILTER RTM_NEWTFILTER
+ RTM_DELTFILTER,
+#define RTM_DELTFILTER RTM_DELTFILTER
+ RTM_GETTFILTER,
+#define RTM_GETTFILTER RTM_GETTFILTER
+
+ RTM_NEWACTION = 48,
+#define RTM_NEWACTION RTM_NEWACTION
+ RTM_DELACTION,
+#define RTM_DELACTION RTM_DELACTION
+ RTM_GETACTION,
+#define RTM_GETACTION RTM_GETACTION
+
+ RTM_NEWPREFIX = 52,
+#define RTM_NEWPREFIX RTM_NEWPREFIX
+
+ RTM_NEWMULTICAST = 56,
+#define RTM_NEWMULTICAST RTM_NEWMULTICAST
+ RTM_DELMULTICAST,
+#define RTM_DELMULTICAST RTM_DELMULTICAST
+ RTM_GETMULTICAST,
+#define RTM_GETMULTICAST RTM_GETMULTICAST
+
+ RTM_NEWANYCAST = 60,
+#define RTM_NEWANYCAST RTM_NEWANYCAST
+ RTM_DELANYCAST,
+#define RTM_DELANYCAST RTM_DELANYCAST
+ RTM_GETANYCAST,
+#define RTM_GETANYCAST RTM_GETANYCAST
+
+ RTM_NEWNEIGHTBL = 64,
+#define RTM_NEWNEIGHTBL RTM_NEWNEIGHTBL
+ RTM_GETNEIGHTBL = 66,
+#define RTM_GETNEIGHTBL RTM_GETNEIGHTBL
+ RTM_SETNEIGHTBL,
+#define RTM_SETNEIGHTBL RTM_SETNEIGHTBL
+
+ RTM_NEWNDUSEROPT = 68,
+#define RTM_NEWNDUSEROPT RTM_NEWNDUSEROPT
+
+ RTM_NEWADDRLABEL = 72,
+#define RTM_NEWADDRLABEL RTM_NEWADDRLABEL
+ RTM_DELADDRLABEL,
+#define RTM_DELADDRLABEL RTM_DELADDRLABEL
+ RTM_GETADDRLABEL,
+#define RTM_GETADDRLABEL RTM_GETADDRLABEL
+
+ RTM_GETDCB = 78,
+#define RTM_GETDCB RTM_GETDCB
+ RTM_SETDCB,
+#define RTM_SETDCB RTM_SETDCB
+
+ RTM_NEWNETCONF = 80,
+#define RTM_NEWNETCONF RTM_NEWNETCONF
+ RTM_DELNETCONF,
+#define RTM_DELNETCONF RTM_DELNETCONF
+ RTM_GETNETCONF = 82,
+#define RTM_GETNETCONF RTM_GETNETCONF
+
+ RTM_NEWMDB = 84,
+#define RTM_NEWMDB RTM_NEWMDB
+ RTM_DELMDB = 85,
+#define RTM_DELMDB RTM_DELMDB
+ RTM_GETMDB = 86,
+#define RTM_GETMDB RTM_GETMDB
+
+ RTM_NEWNSID = 88,
+#define RTM_NEWNSID RTM_NEWNSID
+ RTM_DELNSID = 89,
+#define RTM_DELNSID RTM_DELNSID
+ RTM_GETNSID = 90,
+#define RTM_GETNSID RTM_GETNSID
+
+ RTM_NEWSTATS = 92,
+#define RTM_NEWSTATS RTM_NEWSTATS
+ RTM_GETSTATS = 94,
+#define RTM_GETSTATS RTM_GETSTATS
+ RTM_SETSTATS,
+#define RTM_SETSTATS RTM_SETSTATS
+
+ RTM_NEWCACHEREPORT = 96,
+#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT
+
+ RTM_NEWCHAIN = 100,
+#define RTM_NEWCHAIN RTM_NEWCHAIN
+ RTM_DELCHAIN,
+#define RTM_DELCHAIN RTM_DELCHAIN
+ RTM_GETCHAIN,
+#define RTM_GETCHAIN RTM_GETCHAIN
+
+ RTM_NEWNEXTHOP = 104,
+#define RTM_NEWNEXTHOP RTM_NEWNEXTHOP
+ RTM_DELNEXTHOP,
+#define RTM_DELNEXTHOP RTM_DELNEXTHOP
+ RTM_GETNEXTHOP,
+#define RTM_GETNEXTHOP RTM_GETNEXTHOP
+
+ RTM_NEWLINKPROP = 108,
+#define RTM_NEWLINKPROP RTM_NEWLINKPROP
+ RTM_DELLINKPROP,
+#define RTM_DELLINKPROP RTM_DELLINKPROP
+ RTM_GETLINKPROP,
+#define RTM_GETLINKPROP RTM_GETLINKPROP
+
+ RTM_NEWVLAN = 112,
+#define RTM_NEWVLAN RTM_NEWVLAN
+ RTM_DELVLAN,
+#define RTM_DELVLAN RTM_DELVLAN
+ RTM_GETVLAN,
+#define RTM_GETVLAN RTM_GETVLAN
+
+ RTM_NEWNEXTHOPBUCKET = 116,
+#define RTM_NEWNEXTHOPBUCKET RTM_NEWNEXTHOPBUCKET
+ RTM_DELNEXTHOPBUCKET,
+#define RTM_DELNEXTHOPBUCKET RTM_DELNEXTHOPBUCKET
+ RTM_GETNEXTHOPBUCKET,
+#define RTM_GETNEXTHOPBUCKET RTM_GETNEXTHOPBUCKET
+
+ RTM_NEWTUNNEL = 120,
+#define RTM_NEWTUNNEL RTM_NEWTUNNEL
+ RTM_DELTUNNEL,
+#define RTM_DELTUNNEL RTM_DELTUNNEL
+ RTM_GETTUNNEL,
+#define RTM_GETTUNNEL RTM_GETTUNNEL
+
+ __RTM_MAX,
+#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
+};
+
+#define RTM_NR_MSGTYPES (RTM_MAX + 1 - RTM_BASE)
+#define RTM_NR_FAMILIES (RTM_NR_MSGTYPES >> 2)
+#define RTM_FAM(cmd) (((cmd) - RTM_BASE) >> 2)
+
+/*
+ Generic structure for encapsulation of optional route information.
+ It is reminiscent of sockaddr, but with sa_family replaced
+ with attribute type.
+ */
+
+struct rtattr {
+ unsigned short rta_len;
+ unsigned short rta_type;
+};
+
+/* Macros to handle rtattributes */
+
+#define RTA_ALIGNTO 4U
+#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
+#define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
+ (rta)->rta_len >= sizeof(struct rtattr) && \
+ (rta)->rta_len <= (len))
+#define RTA_NEXT(rta,attrlen) ((attrlen) -= RTA_ALIGN((rta)->rta_len), \
+ (struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len)))
+#define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len))
+#define RTA_SPACE(len) RTA_ALIGN(RTA_LENGTH(len))
+#define RTA_DATA(rta) ((void*)(((char*)(rta)) + RTA_LENGTH(0)))
+#define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0))
+
+
+
+
+/******************************************************************************
+ * Definitions used in routing table administration.
+ ****/
+
+struct rtmsg {
+ unsigned char rtm_family;
+ unsigned char rtm_dst_len;
+ unsigned char rtm_src_len;
+ unsigned char rtm_tos;
+
+ unsigned char rtm_table; /* Routing table id */
+ unsigned char rtm_protocol; /* Routing protocol; see below */
+ unsigned char rtm_scope; /* See below */
+ unsigned char rtm_type; /* See below */
+
+ unsigned rtm_flags;
+};
+
+/* rtm_type */
+
+enum {
+ RTN_UNSPEC,
+ RTN_UNICAST, /* Gateway or direct route */
+ RTN_LOCAL, /* Accept locally */
+ RTN_BROADCAST, /* Accept locally as broadcast,
+ send as broadcast */
+ RTN_ANYCAST, /* Accept locally as broadcast,
+ but send as unicast */
+ RTN_MULTICAST, /* Multicast route */
+ RTN_BLACKHOLE, /* Drop */
+ RTN_UNREACHABLE, /* Destination is unreachable */
+ RTN_PROHIBIT, /* Administratively prohibited */
+ RTN_THROW, /* Not in this table */
+ RTN_NAT, /* Translate this address */
+ RTN_XRESOLVE, /* Use external resolver */
+ __RTN_MAX
+};
+
+#define RTN_MAX (__RTN_MAX - 1)
+
+
+/* rtm_protocol */
+
+#define RTPROT_UNSPEC 0
+#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects;
+ not used by current IPv4 */
+#define RTPROT_KERNEL 2 /* Route installed by kernel */
+#define RTPROT_BOOT 3 /* Route installed during boot */
+#define RTPROT_STATIC 4 /* Route installed by administrator */
+
+/* Values of protocol >= RTPROT_STATIC are not interpreted by kernel;
+ they are just passed from user and back as is.
+ It will be used by hypothetical multiple routing daemons.
+ Note that protocol values should be standardized in order to
+ avoid conflicts.
+ */
+
+#define RTPROT_GATED 8 /* Apparently, GateD */
+#define RTPROT_RA 9 /* RDISC/ND router advertisements */
+#define RTPROT_MRT 10 /* Merit MRT */
+#define RTPROT_ZEBRA 11 /* Zebra */
+#define RTPROT_BIRD 12 /* BIRD */
+#define RTPROT_DNROUTED 13 /* DECnet routing daemon */
+#define RTPROT_XORP 14 /* XORP */
+#define RTPROT_NTK 15 /* Netsukuku */
+#define RTPROT_DHCP 16 /* DHCP client */
+#define RTPROT_MROUTED 17 /* Multicast daemon */
+#define RTPROT_KEEPALIVED 18 /* Keepalived daemon */
+#define RTPROT_BABEL 42 /* Babel daemon */
+#define RTPROT_OVN 84 /* OVN daemon */
+#define RTPROT_OPENR 99 /* Open Routing (Open/R) Routes */
+#define RTPROT_BGP 186 /* BGP Routes */
+#define RTPROT_ISIS 187 /* ISIS Routes */
+#define RTPROT_OSPF 188 /* OSPF Routes */
+#define RTPROT_RIP 189 /* RIP Routes */
+#define RTPROT_EIGRP 192 /* EIGRP Routes */
+
+/* rtm_scope
+
+ Really it is not scope, but sort of distance to the destination.
+ NOWHERE are reserved for not existing destinations, HOST is our
+ local addresses, LINK are destinations, located on directly attached
+ link and UNIVERSE is everywhere in the Universe.
+
+ Intermediate values are also possible f.e. interior routes
+ could be assigned a value between UNIVERSE and LINK.
+*/
+
+enum rt_scope_t {
+ RT_SCOPE_UNIVERSE=0,
+/* User defined values */
+ RT_SCOPE_SITE=200,
+ RT_SCOPE_LINK=253,
+ RT_SCOPE_HOST=254,
+ RT_SCOPE_NOWHERE=255
+};
+
+/* rtm_flags */
+
+#define RTM_F_NOTIFY 0x100 /* Notify user of route change */
+#define RTM_F_CLONED 0x200 /* This route is cloned */
+#define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */
+#define RTM_F_PREFIX 0x800 /* Prefix addresses */
+#define RTM_F_LOOKUP_TABLE 0x1000 /* set rtm_table to FIB lookup result */
+#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */
+#define RTM_F_OFFLOAD 0x4000 /* route is offloaded */
+#define RTM_F_TRAP 0x8000 /* route is trapping packets */
+#define RTM_F_OFFLOAD_FAILED 0x20000000 /* route offload failed, this value
+ * is chosen to avoid conflicts with
+ * other flags defined in
+ * include/uapi/linux/ipv6_route.h
+ */
+
+/* Reserved table identifiers */
+
+enum rt_class_t {
+ RT_TABLE_UNSPEC=0,
+/* User defined values */
+ RT_TABLE_COMPAT=252,
+ RT_TABLE_DEFAULT=253,
+ RT_TABLE_MAIN=254,
+ RT_TABLE_LOCAL=255,
+ RT_TABLE_MAX=0xFFFFFFFF
+};
+
+
+/* Routing message attributes */
+
+enum rtattr_type_t {
+ RTA_UNSPEC,
+ RTA_DST,
+ RTA_SRC,
+ RTA_IIF,
+ RTA_OIF,
+ RTA_GATEWAY,
+ RTA_PRIORITY,
+ RTA_PREFSRC,
+ RTA_METRICS,
+ RTA_MULTIPATH,
+ RTA_PROTOINFO, /* no longer used */
+ RTA_FLOW,
+ RTA_CACHEINFO,
+ RTA_SESSION, /* no longer used */
+ RTA_MP_ALGO, /* no longer used */
+ RTA_TABLE,
+ RTA_MARK,
+ RTA_MFC_STATS,
+ RTA_VIA,
+ RTA_NEWDST,
+ RTA_PREF,
+ RTA_ENCAP_TYPE,
+ RTA_ENCAP,
+ RTA_EXPIRES,
+ RTA_PAD,
+ RTA_UID,
+ RTA_TTL_PROPAGATE,
+ RTA_IP_PROTO,
+ RTA_SPORT,
+ RTA_DPORT,
+ RTA_NH_ID,
+ RTA_FLOWLABEL,
+ __RTA_MAX
+};
+
+#define RTA_MAX (__RTA_MAX - 1)
+
+#define RTM_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct rtmsg))))
+#define RTM_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct rtmsg))
+
+/* RTM_MULTIPATH --- array of struct rtnexthop.
+ *
+ * "struct rtnexthop" describes all necessary nexthop information,
+ * i.e. parameters of path to a destination via this nexthop.
+ *
+ * At the moment it is impossible to set different prefsrc, mtu, window
+ * and rtt for different paths from multipath.
+ */
+
+struct rtnexthop {
+ unsigned short rtnh_len;
+ unsigned char rtnh_flags;
+ unsigned char rtnh_hops;
+ int rtnh_ifindex;
+};
+
+/* rtnh_flags */
+
+#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
+#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
+#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
+#define RTNH_F_OFFLOAD 8 /* Nexthop is offloaded */
+#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
+#define RTNH_F_UNRESOLVED 32 /* The entry is unresolved (ipmr) */
+#define RTNH_F_TRAP 64 /* Nexthop is trapping packets */
+
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | \
+ RTNH_F_OFFLOAD | RTNH_F_TRAP)
+
+/* Macros to handle hexthops */
+
+#define RTNH_ALIGNTO 4
+#define RTNH_ALIGN(len) ( ((len)+RTNH_ALIGNTO-1) & ~(RTNH_ALIGNTO-1) )
+#define RTNH_OK(rtnh,len) ((rtnh)->rtnh_len >= sizeof(struct rtnexthop) && \
+ ((int)(rtnh)->rtnh_len) <= (len))
+#define RTNH_NEXT(rtnh) ((struct rtnexthop*)(((char*)(rtnh)) + RTNH_ALIGN((rtnh)->rtnh_len)))
+#define RTNH_LENGTH(len) (RTNH_ALIGN(sizeof(struct rtnexthop)) + (len))
+#define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len))
+#define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0)))
+
+/* RTA_VIA */
+struct rtvia {
+ __kernel_sa_family_t rtvia_family;
+ __u8 rtvia_addr[];
+};
+
+/* RTM_CACHEINFO */
+
+struct rta_cacheinfo {
+ __u32 rta_clntref;
+ __u32 rta_lastuse;
+ __s32 rta_expires;
+ __u32 rta_error;
+ __u32 rta_used;
+
+#define RTNETLINK_HAVE_PEERINFO 1
+ __u32 rta_id;
+ __u32 rta_ts;
+ __u32 rta_tsage;
+};
+
+/* RTM_METRICS --- array of struct rtattr with types of RTAX_* */
+
+enum {
+ RTAX_UNSPEC,
+#define RTAX_UNSPEC RTAX_UNSPEC
+ RTAX_LOCK,
+#define RTAX_LOCK RTAX_LOCK
+ RTAX_MTU,
+#define RTAX_MTU RTAX_MTU
+ RTAX_WINDOW,
+#define RTAX_WINDOW RTAX_WINDOW
+ RTAX_RTT,
+#define RTAX_RTT RTAX_RTT
+ RTAX_RTTVAR,
+#define RTAX_RTTVAR RTAX_RTTVAR
+ RTAX_SSTHRESH,
+#define RTAX_SSTHRESH RTAX_SSTHRESH
+ RTAX_CWND,
+#define RTAX_CWND RTAX_CWND
+ RTAX_ADVMSS,
+#define RTAX_ADVMSS RTAX_ADVMSS
+ RTAX_REORDERING,
+#define RTAX_REORDERING RTAX_REORDERING
+ RTAX_HOPLIMIT,
+#define RTAX_HOPLIMIT RTAX_HOPLIMIT
+ RTAX_INITCWND,
+#define RTAX_INITCWND RTAX_INITCWND
+ RTAX_FEATURES,
+#define RTAX_FEATURES RTAX_FEATURES
+ RTAX_RTO_MIN,
+#define RTAX_RTO_MIN RTAX_RTO_MIN
+ RTAX_INITRWND,
+#define RTAX_INITRWND RTAX_INITRWND
+ RTAX_QUICKACK,
+#define RTAX_QUICKACK RTAX_QUICKACK
+ RTAX_CC_ALGO,
+#define RTAX_CC_ALGO RTAX_CC_ALGO
+ RTAX_FASTOPEN_NO_COOKIE,
+#define RTAX_FASTOPEN_NO_COOKIE RTAX_FASTOPEN_NO_COOKIE
+ __RTAX_MAX
+};
+
+#define RTAX_MAX (__RTAX_MAX - 1)
+
+#define RTAX_FEATURE_ECN (1 << 0)
+#define RTAX_FEATURE_SACK (1 << 1) /* unused */
+#define RTAX_FEATURE_TIMESTAMP (1 << 2) /* unused */
+#define RTAX_FEATURE_ALLFRAG (1 << 3) /* unused */
+#define RTAX_FEATURE_TCP_USEC_TS (1 << 4)
+
+#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | \
+ RTAX_FEATURE_SACK | \
+ RTAX_FEATURE_TIMESTAMP | \
+ RTAX_FEATURE_ALLFRAG | \
+ RTAX_FEATURE_TCP_USEC_TS)
+
+struct rta_session {
+ __u8 proto;
+ __u8 pad1;
+ __u16 pad2;
+
+ union {
+ struct {
+ __u16 sport;
+ __u16 dport;
+ } ports;
+
+ struct {
+ __u8 type;
+ __u8 code;
+ __u16 ident;
+ } icmpt;
+
+ __u32 spi;
+ } u;
+};
+
+struct rta_mfc_stats {
+ __u64 mfcs_packets;
+ __u64 mfcs_bytes;
+ __u64 mfcs_wrong_if;
+};
+
+/****
+ * General form of address family dependent message.
+ ****/
+
+struct rtgenmsg {
+ unsigned char rtgen_family;
+};
+
+/*****************************************************************
+ * Link layer specific messages.
+ ****/
+
+/* struct ifinfomsg
+ * passes link level specific information, not dependent
+ * on network protocol.
+ */
+
+struct ifinfomsg {
+ unsigned char ifi_family;
+ unsigned char __ifi_pad;
+ unsigned short ifi_type; /* ARPHRD_* */
+ int ifi_index; /* Link index */
+ unsigned ifi_flags; /* IFF_* flags */
+ unsigned ifi_change; /* IFF_* change mask */
+};
+
+/********************************************************************
+ * prefix information
+ ****/
+
+struct prefixmsg {
+ unsigned char prefix_family;
+ unsigned char prefix_pad1;
+ unsigned short prefix_pad2;
+ int prefix_ifindex;
+ unsigned char prefix_type;
+ unsigned char prefix_len;
+ unsigned char prefix_flags;
+ unsigned char prefix_pad3;
+};
+
+enum
+{
+ PREFIX_UNSPEC,
+ PREFIX_ADDRESS,
+ PREFIX_CACHEINFO,
+ __PREFIX_MAX
+};
+
+#define PREFIX_MAX (__PREFIX_MAX - 1)
+
+struct prefix_cacheinfo {
+ __u32 preferred_time;
+ __u32 valid_time;
+};
+
+
+/*****************************************************************
+ * Traffic control messages.
+ ****/
+
+struct tcmsg {
+ unsigned char tcm_family;
+ unsigned char tcm__pad1;
+ unsigned short tcm__pad2;
+ int tcm_ifindex;
+ __u32 tcm_handle;
+ __u32 tcm_parent;
+/* tcm_block_index is used instead of tcm_parent
+ * in case tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK
+ */
+#define tcm_block_index tcm_parent
+ __u32 tcm_info;
+};
+
+/* For manipulation of filters in shared block, tcm_ifindex is set to
+ * TCM_IFINDEX_MAGIC_BLOCK, and tcm_parent is aliased to tcm_block_index
+ * which is the block index.
+ */
+#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU)
+
+enum {
+ TCA_UNSPEC,
+ TCA_KIND,
+ TCA_OPTIONS,
+ TCA_STATS,
+ TCA_XSTATS,
+ TCA_RATE,
+ TCA_FCNT,
+ TCA_STATS2,
+ TCA_STAB,
+ TCA_PAD,
+ TCA_DUMP_INVISIBLE,
+ TCA_CHAIN,
+ TCA_HW_OFFLOAD,
+ TCA_INGRESS_BLOCK,
+ TCA_EGRESS_BLOCK,
+ TCA_DUMP_FLAGS,
+ TCA_EXT_WARN_MSG,
+ __TCA_MAX
+};
+
+#define TCA_MAX (__TCA_MAX - 1)
+
+#define TCA_DUMP_FLAGS_TERSE (1 << 0) /* Means that in dump user gets only basic
+ * data necessary to identify the objects
+ * (handle, cookie, etc.) and stats.
+ */
+
+#define TCA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcmsg))))
+#define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg))
+
+/********************************************************************
+ * Neighbor Discovery userland options
+ ****/
+
+struct nduseroptmsg {
+ unsigned char nduseropt_family;
+ unsigned char nduseropt_pad1;
+ unsigned short nduseropt_opts_len; /* Total length of options */
+ int nduseropt_ifindex;
+ __u8 nduseropt_icmp_type;
+ __u8 nduseropt_icmp_code;
+ unsigned short nduseropt_pad2;
+ unsigned int nduseropt_pad3;
+ /* Followed by one or more ND options */
+};
+
+enum {
+ NDUSEROPT_UNSPEC,
+ NDUSEROPT_SRCADDR,
+ __NDUSEROPT_MAX
+};
+
+#define NDUSEROPT_MAX (__NDUSEROPT_MAX - 1)
+
+#ifndef __KERNEL__
+/* RTnetlink multicast groups - backwards compatibility for userspace */
+#define RTMGRP_LINK 1
+#define RTMGRP_NOTIFY 2
+#define RTMGRP_NEIGH 4
+#define RTMGRP_TC 8
+
+#define RTMGRP_IPV4_IFADDR 0x10
+#define RTMGRP_IPV4_MROUTE 0x20
+#define RTMGRP_IPV4_ROUTE 0x40
+#define RTMGRP_IPV4_RULE 0x80
+
+#define RTMGRP_IPV6_IFADDR 0x100
+#define RTMGRP_IPV6_MROUTE 0x200
+#define RTMGRP_IPV6_ROUTE 0x400
+#define RTMGRP_IPV6_IFINFO 0x800
+
+#define RTMGRP_DECnet_IFADDR 0x1000
+#define RTMGRP_DECnet_ROUTE 0x4000
+
+#define RTMGRP_IPV6_PREFIX 0x20000
+#endif
+
+/* RTnetlink multicast groups */
+enum rtnetlink_groups {
+ RTNLGRP_NONE,
+#define RTNLGRP_NONE RTNLGRP_NONE
+ RTNLGRP_LINK,
+#define RTNLGRP_LINK RTNLGRP_LINK
+ RTNLGRP_NOTIFY,
+#define RTNLGRP_NOTIFY RTNLGRP_NOTIFY
+ RTNLGRP_NEIGH,
+#define RTNLGRP_NEIGH RTNLGRP_NEIGH
+ RTNLGRP_TC,
+#define RTNLGRP_TC RTNLGRP_TC
+ RTNLGRP_IPV4_IFADDR,
+#define RTNLGRP_IPV4_IFADDR RTNLGRP_IPV4_IFADDR
+ RTNLGRP_IPV4_MROUTE,
+#define RTNLGRP_IPV4_MROUTE RTNLGRP_IPV4_MROUTE
+ RTNLGRP_IPV4_ROUTE,
+#define RTNLGRP_IPV4_ROUTE RTNLGRP_IPV4_ROUTE
+ RTNLGRP_IPV4_RULE,
+#define RTNLGRP_IPV4_RULE RTNLGRP_IPV4_RULE
+ RTNLGRP_IPV6_IFADDR,
+#define RTNLGRP_IPV6_IFADDR RTNLGRP_IPV6_IFADDR
+ RTNLGRP_IPV6_MROUTE,
+#define RTNLGRP_IPV6_MROUTE RTNLGRP_IPV6_MROUTE
+ RTNLGRP_IPV6_ROUTE,
+#define RTNLGRP_IPV6_ROUTE RTNLGRP_IPV6_ROUTE
+ RTNLGRP_IPV6_IFINFO,
+#define RTNLGRP_IPV6_IFINFO RTNLGRP_IPV6_IFINFO
+ RTNLGRP_DECnet_IFADDR,
+#define RTNLGRP_DECnet_IFADDR RTNLGRP_DECnet_IFADDR
+ RTNLGRP_NOP2,
+ RTNLGRP_DECnet_ROUTE,
+#define RTNLGRP_DECnet_ROUTE RTNLGRP_DECnet_ROUTE
+ RTNLGRP_DECnet_RULE,
+#define RTNLGRP_DECnet_RULE RTNLGRP_DECnet_RULE
+ RTNLGRP_NOP4,
+ RTNLGRP_IPV6_PREFIX,
+#define RTNLGRP_IPV6_PREFIX RTNLGRP_IPV6_PREFIX
+ RTNLGRP_IPV6_RULE,
+#define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE
+ RTNLGRP_ND_USEROPT,
+#define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT
+ RTNLGRP_PHONET_IFADDR,
+#define RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_IFADDR
+ RTNLGRP_PHONET_ROUTE,
+#define RTNLGRP_PHONET_ROUTE RTNLGRP_PHONET_ROUTE
+ RTNLGRP_DCB,
+#define RTNLGRP_DCB RTNLGRP_DCB
+ RTNLGRP_IPV4_NETCONF,
+#define RTNLGRP_IPV4_NETCONF RTNLGRP_IPV4_NETCONF
+ RTNLGRP_IPV6_NETCONF,
+#define RTNLGRP_IPV6_NETCONF RTNLGRP_IPV6_NETCONF
+ RTNLGRP_MDB,
+#define RTNLGRP_MDB RTNLGRP_MDB
+ RTNLGRP_MPLS_ROUTE,
+#define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE
+ RTNLGRP_NSID,
+#define RTNLGRP_NSID RTNLGRP_NSID
+ RTNLGRP_MPLS_NETCONF,
+#define RTNLGRP_MPLS_NETCONF RTNLGRP_MPLS_NETCONF
+ RTNLGRP_IPV4_MROUTE_R,
+#define RTNLGRP_IPV4_MROUTE_R RTNLGRP_IPV4_MROUTE_R
+ RTNLGRP_IPV6_MROUTE_R,
+#define RTNLGRP_IPV6_MROUTE_R RTNLGRP_IPV6_MROUTE_R
+ RTNLGRP_NEXTHOP,
+#define RTNLGRP_NEXTHOP RTNLGRP_NEXTHOP
+ RTNLGRP_BRVLAN,
+#define RTNLGRP_BRVLAN RTNLGRP_BRVLAN
+ RTNLGRP_MCTP_IFADDR,
+#define RTNLGRP_MCTP_IFADDR RTNLGRP_MCTP_IFADDR
+ RTNLGRP_TUNNEL,
+#define RTNLGRP_TUNNEL RTNLGRP_TUNNEL
+ RTNLGRP_STATS,
+#define RTNLGRP_STATS RTNLGRP_STATS
+ RTNLGRP_IPV4_MCADDR,
+#define RTNLGRP_IPV4_MCADDR RTNLGRP_IPV4_MCADDR
+ RTNLGRP_IPV6_MCADDR,
+#define RTNLGRP_IPV6_MCADDR RTNLGRP_IPV6_MCADDR
+ RTNLGRP_IPV6_ACADDR,
+#define RTNLGRP_IPV6_ACADDR RTNLGRP_IPV6_ACADDR
+ __RTNLGRP_MAX
+};
+#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
+
+/* TC action piece */
+struct tcamsg {
+ unsigned char tca_family;
+ unsigned char tca__pad1;
+ unsigned short tca__pad2;
+};
+
+enum {
+ TCA_ROOT_UNSPEC,
+ TCA_ROOT_TAB,
+#define TCA_ACT_TAB TCA_ROOT_TAB
+#define TCAA_MAX TCA_ROOT_TAB
+ TCA_ROOT_FLAGS,
+ TCA_ROOT_COUNT,
+ TCA_ROOT_TIME_DELTA, /* in msecs */
+ TCA_ROOT_EXT_WARN_MSG,
+ __TCA_ROOT_MAX,
+#define TCA_ROOT_MAX (__TCA_ROOT_MAX - 1)
+};
+
+#define TA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcamsg))))
+#define TA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcamsg))
+/* tcamsg flags stored in attribute TCA_ROOT_FLAGS
+ *
+ * TCA_ACT_FLAG_LARGE_DUMP_ON user->kernel to request for larger than
+ * TCA_ACT_MAX_PRIO actions in a dump. All dump responses will contain the
+ * number of actions being dumped stored in for user app's consumption in
+ * TCA_ROOT_COUNT
+ *
+ * TCA_ACT_FLAG_TERSE_DUMP user->kernel to request terse (brief) dump that only
+ * includes essential action info (kind, index, etc.)
+ *
+ */
+#define TCA_FLAG_LARGE_DUMP_ON (1 << 0)
+#define TCA_ACT_FLAG_LARGE_DUMP_ON TCA_FLAG_LARGE_DUMP_ON
+#define TCA_ACT_FLAG_TERSE_DUMP (1 << 1)
+
+/* New extended info filters for IFLA_EXT_MASK */
+#define RTEXT_FILTER_VF (1 << 0)
+#define RTEXT_FILTER_BRVLAN (1 << 1)
+#define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
+#define RTEXT_FILTER_SKIP_STATS (1 << 3)
+#define RTEXT_FILTER_MRP (1 << 4)
+#define RTEXT_FILTER_CFM_CONFIG (1 << 5)
+#define RTEXT_FILTER_CFM_STATUS (1 << 6)
+#define RTEXT_FILTER_MST (1 << 7)
+
+/* End of information exported to user level */
+
+
+
+#endif /* _UAPI__LINUX_RTNETLINK_H */
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
index f78ee3670dd5..1686861aae20 100644
--- a/tools/include/uapi/linux/stat.h
+++ b/tools/include/uapi/linux/stat.h
@@ -182,8 +182,12 @@ struct statx {
/* File offset alignment for direct I/O reads */
__u32 stx_dio_read_offset_align;
- /* 0xb8 */
- __u64 __spare3[9]; /* Spare space for future expansion */
+ /* Optimised max atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_max_opt;
+ __u32 __spare2[1];
+
+ /* 0xc0 */
+ __u64 __spare3[8]; /* Spare space for future expansion */
/* 0x100 */
};
diff --git a/tools/include/vdso/unaligned.h b/tools/include/vdso/unaligned.h
index eee3d2a4dbe4..ff0c06b6513e 100644
--- a/tools/include/vdso/unaligned.h
+++ b/tools/include/vdso/unaligned.h
@@ -2,14 +2,14 @@
#ifndef __VDSO_UNALIGNED_H
#define __VDSO_UNALIGNED_H
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x; \
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed * __get_pptr = (typeof(__get_pptr))(ptr); \
+ __get_pptr->x; \
})
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x = (val); \
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed * __put_pptr = (typeof(__put_pptr))(ptr); \
+ __put_pptr->x = (val); \
} while (0)
#endif /* __VDSO_UNALIGNED_H */
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index e2cd558ca0b4..c80204bb72a2 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,4 +1,4 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_utils.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index a9c3e33d0f8a..b66f5fbfbbb2 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -154,7 +154,7 @@ int bump_rlimit_memlock(void)
memlock_bumped = true;
- /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
+ /* zero memlock_rlim disables auto-bumping RLIMIT_MEMLOCK */
if (memlock_rlim == 0)
return 0;
@@ -172,7 +172,7 @@ int bpf_map_create(enum bpf_map_type map_type,
__u32 max_entries,
const struct bpf_map_create_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, excl_prog_hash_size);
union bpf_attr attr;
int fd;
@@ -203,6 +203,8 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
+ attr.excl_prog_hash = ptr_to_u64(OPTS_GET(opts, excl_prog_hash, NULL));
+ attr.excl_prog_hash_size = OPTS_GET(opts, excl_prog_hash_size, 0);
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd);
@@ -238,7 +240,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, fd_array_cnt);
+ const size_t attr_sz = offsetofend(union bpf_attr, keyring_id);
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@@ -837,6 +839,50 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, netkit))
return libbpf_err(-EINVAL);
break;
+ case BPF_CGROUP_INET_INGRESS:
+ case BPF_CGROUP_INET_EGRESS:
+ case BPF_CGROUP_INET_SOCK_CREATE:
+ case BPF_CGROUP_INET_SOCK_RELEASE:
+ case BPF_CGROUP_INET4_BIND:
+ case BPF_CGROUP_INET6_BIND:
+ case BPF_CGROUP_INET4_POST_BIND:
+ case BPF_CGROUP_INET6_POST_BIND:
+ case BPF_CGROUP_INET4_CONNECT:
+ case BPF_CGROUP_INET6_CONNECT:
+ case BPF_CGROUP_UNIX_CONNECT:
+ case BPF_CGROUP_INET4_GETPEERNAME:
+ case BPF_CGROUP_INET6_GETPEERNAME:
+ case BPF_CGROUP_UNIX_GETPEERNAME:
+ case BPF_CGROUP_INET4_GETSOCKNAME:
+ case BPF_CGROUP_INET6_GETSOCKNAME:
+ case BPF_CGROUP_UNIX_GETSOCKNAME:
+ case BPF_CGROUP_UDP4_SENDMSG:
+ case BPF_CGROUP_UDP6_SENDMSG:
+ case BPF_CGROUP_UNIX_SENDMSG:
+ case BPF_CGROUP_UDP4_RECVMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
+ case BPF_CGROUP_UNIX_RECVMSG:
+ case BPF_CGROUP_SOCK_OPS:
+ case BPF_CGROUP_DEVICE:
+ case BPF_CGROUP_SYSCTL:
+ case BPF_CGROUP_GETSOCKOPT:
+ case BPF_CGROUP_SETSOCKOPT:
+ case BPF_LSM_CGROUP:
+ relative_fd = OPTS_GET(opts, cgroup.relative_fd, 0);
+ relative_id = OPTS_GET(opts, cgroup.relative_id, 0);
+ if (relative_fd && relative_id)
+ return libbpf_err(-EINVAL);
+ if (relative_id) {
+ attr.link_create.cgroup.relative_id = relative_id;
+ attr.link_create.flags |= BPF_F_ID;
+ } else {
+ attr.link_create.cgroup.relative_fd = relative_fd;
+ }
+ attr.link_create.cgroup.expected_revision =
+ OPTS_GET(opts, cgroup.expected_revision, 0);
+ if (!OPTS_ZEROED(opts, cgroup))
+ return libbpf_err(-EINVAL);
+ break;
default:
if (!OPTS_ZEROED(opts, flags))
return libbpf_err(-EINVAL);
@@ -1331,3 +1377,23 @@ int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd);
}
+
+int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len,
+ struct bpf_prog_stream_read_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_stream_read);
+ union bpf_attr attr;
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_prog_stream_read_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_stream_read.stream_buf = ptr_to_u64(buf);
+ attr.prog_stream_read.stream_buf_len = buf_len;
+ attr.prog_stream_read.stream_id = stream_id;
+ attr.prog_stream_read.prog_fd = prog_fd;
+
+ err = sys_bpf(BPF_PROG_STREAM_READ_BY_FD, &attr, attr_sz);
+ return libbpf_err_errno(err);
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 777627d33d25..e983a3e40d61 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -54,9 +54,12 @@ struct bpf_map_create_opts {
__s32 value_type_btf_obj_fd;
__u32 token_fd;
+
+ const void *excl_prog_hash;
+ __u32 excl_prog_hash_size;
size_t :0;
};
-#define bpf_map_create_opts__last_field token_fd
+#define bpf_map_create_opts__last_field excl_prog_hash_size
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
const char *map_name,
@@ -438,6 +441,11 @@ struct bpf_link_create_opts {
__u32 relative_id;
__u64 expected_revision;
} netkit;
+ struct {
+ __u32 relative_fd;
+ __u32 relative_id;
+ __u64 expected_revision;
+ } cgroup;
};
size_t :0;
};
@@ -704,6 +712,27 @@ struct bpf_token_create_opts {
LIBBPF_API int bpf_token_create(int bpffs_fd,
struct bpf_token_create_opts *opts);
+struct bpf_prog_stream_read_opts {
+ size_t sz;
+ size_t :0;
+};
+#define bpf_prog_stream_read_opts__last_field sz
+/**
+ * @brief **bpf_prog_stream_read** reads data from the BPF stream of a given BPF
+ * program.
+ *
+ * @param prog_fd FD for the BPF program whose BPF stream is to be read.
+ * @param stream_id ID of the BPF stream to be read.
+ * @param buf Buffer to read data into from the BPF stream.
+ * @param buf_len Maximum number of bytes to read from the BPF stream.
+ * @param opts optional options, can be NULL
+ *
+ * @return The number of bytes read, on success; negative error code, otherwise
+ * (errno is also set to the error code)
+ */
+LIBBPF_API int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len,
+ struct bpf_prog_stream_read_opts *opts);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
index 6ff963a491d9..49af4260b8e6 100644
--- a/tools/lib/bpf/bpf_gen_internal.h
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -4,6 +4,7 @@
#define __BPF_GEN_INTERNAL_H
#include "bpf.h"
+#include "libbpf_internal.h"
struct ksym_relo_desc {
const char *name;
@@ -50,6 +51,7 @@ struct bpf_gen {
__u32 nr_ksyms;
int fd_array;
int nr_fd_array;
+ int hash_insn_offset[SHA256_DWORD_SIZE];
};
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index a50773d4616e..d4e4e388e625 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -215,6 +215,7 @@ enum libbpf_tristate {
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
#define __arg_nullable __attribute((btf_decl_tag("arg:nullable")))
#define __arg_trusted __attribute((btf_decl_tag("arg:trusted")))
+#define __arg_untrusted __attribute((btf_decl_tag("arg:untrusted")))
#define __arg_arena __attribute((btf_decl_tag("arg:arena")))
#ifndef ___bpf_concat
@@ -314,6 +315,22 @@ enum libbpf_tristate {
___param, sizeof(___param)); \
})
+extern int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args,
+ __u32 len__sz, void *aux__prog) __weak __ksym;
+
+#define bpf_stream_printk(stream_id, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_stream_vprintk_impl(stream_id, ___fmt, ___param, sizeof(___param), NULL); \
+})
+
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
* Otherwise use __bpf_vprintk
*/
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index a8f6cd4841b0..dbe32a5d02cd 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -311,7 +311,7 @@ struct pt_regs___arm64 {
#define __PT_RET_REG regs[31]
#define __PT_FP_REG __unsupported__
#define __PT_RC_REG gpr[3]
-#define __PT_SP_REG sp
+#define __PT_SP_REG gpr[1]
#define __PT_IP_REG nip
#elif defined(bpf_target_sparc)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index f1d495dc66bb..84a4b0abc8be 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -23,7 +23,6 @@
#include "libbpf_internal.h"
#include "hashmap.h"
#include "strset.h"
-#include "str_error.h"
#define BTF_MAX_NR_TYPES 0x7fffffffU
#define BTF_MAX_STR_OFFSET 0x7fffffffU
@@ -1062,7 +1061,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, b
if (base_btf) {
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len;
+ btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
}
if (is_mmap) {
@@ -1384,12 +1383,12 @@ static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf)
fd = open(path, O_RDONLY);
if (fd < 0)
- return libbpf_err_ptr(-errno);
+ return ERR_PTR(-errno);
if (fstat(fd, &st) < 0) {
err = -errno;
close(fd);
- return libbpf_err_ptr(err);
+ return ERR_PTR(err);
}
data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
@@ -1397,7 +1396,7 @@ static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf)
close(fd);
if (data == MAP_FAILED)
- return libbpf_err_ptr(err);
+ return ERR_PTR(err);
btf = btf_new(data, st.st_size, base_btf, true);
if (IS_ERR(btf))
@@ -3902,6 +3901,20 @@ err_out:
return err;
}
+/*
+ * Calculate type signature hash of TYPEDEF, ignoring referenced type IDs,
+ * as referenced type IDs equivalence is established separately during type
+ * graph equivalence check algorithm.
+ */
+static long btf_hash_typedef(struct btf_type *t)
+{
+ long h;
+
+ h = hash_combine(0, t->name_off);
+ h = hash_combine(h, t->info);
+ return h;
+}
+
static long btf_hash_common(struct btf_type *t)
{
long h;
@@ -3919,6 +3932,13 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
t1->size == t2->size;
}
+/* Check structural compatibility of two TYPEDEF. */
+static bool btf_equal_typedef(struct btf_type *t1, struct btf_type *t2)
+{
+ return t1->name_off == t2->name_off &&
+ t1->info == t2->info;
+}
+
/* Calculate type signature hash of INT or TAG. */
static long btf_hash_int_decl_tag(struct btf_type *t)
{
@@ -4845,13 +4865,30 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
}
}
+static inline long btf_hash_by_kind(struct btf_type *t, __u16 kind)
+{
+ if (kind == BTF_KIND_TYPEDEF)
+ return btf_hash_typedef(t);
+ else
+ return btf_hash_struct(t);
+}
+
+static inline bool btf_equal_by_kind(struct btf_type *t1, struct btf_type *t2, __u16 kind)
+{
+ if (kind == BTF_KIND_TYPEDEF)
+ return btf_equal_typedef(t1, t2);
+ else
+ return btf_shallow_equal_struct(t1, t2);
+}
+
/*
- * Deduplicate struct/union types.
+ * Deduplicate struct/union and typedef types.
*
* For each struct/union type its type signature hash is calculated, taking
* into account type's name, size, number, order and names of fields, but
* ignoring type ID's referenced from fields, because they might not be deduped
- * completely until after reference types deduplication phase. This type hash
+ * completely until after reference types deduplication phase. For each typedef
+ * type, the hash is computed based on the type’s name and size. This type hash
* is used to iterate over all potential canonical types, sharing same hash.
* For each canonical candidate we check whether type graphs that they form
* (through referenced types in fields and so on) are equivalent using algorithm
@@ -4883,18 +4920,20 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
t = btf_type_by_id(d->btf, type_id);
kind = btf_kind(t);
- if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
+ if (kind != BTF_KIND_STRUCT &&
+ kind != BTF_KIND_UNION &&
+ kind != BTF_KIND_TYPEDEF)
return 0;
- h = btf_hash_struct(t);
+ h = btf_hash_by_kind(t, kind);
for_each_dedup_cand(d, hash_entry, h) {
__u32 cand_id = hash_entry->value;
int eq;
/*
* Even though btf_dedup_is_equiv() checks for
- * btf_shallow_equal_struct() internally when checking two
- * structs (unions) for equivalence, we need to guard here
+ * btf_equal_by_kind() internally when checking two
+ * structs (unions) or typedefs for equivalence, we need to guard here
* from picking matching FWD type as a dedup candidate.
* This can happen due to hash collision. In such case just
* relying on btf_dedup_is_equiv() would lead to potentially
@@ -4902,7 +4941,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
* FWD and compatible STRUCT/UNION are considered equivalent.
*/
cand_type = btf_type_by_id(d->btf, cand_id);
- if (!btf_shallow_equal_struct(t, cand_type))
+ if (!btf_equal_by_kind(t, cand_type, kind))
continue;
btf_dedup_clear_hypot_map(d);
@@ -4940,18 +4979,18 @@ static int btf_dedup_struct_types(struct btf_dedup *d)
/*
* Deduplicate reference type.
*
- * Once all primitive and struct/union types got deduplicated, we can easily
+ * Once all primitive, struct/union and typedef types got deduplicated, we can easily
* deduplicate all other (reference) BTF types. This is done in two steps:
*
* 1. Resolve all referenced type IDs into their canonical type IDs. This
- * resolution can be done either immediately for primitive or struct/union types
- * (because they were deduped in previous two phases) or recursively for
+ * resolution can be done either immediately for primitive, struct/union, and typedef
+ * types (because they were deduped in previous two phases) or recursively for
* reference types. Recursion will always terminate at either primitive or
- * struct/union type, at which point we can "unwind" chain of reference types
- * one by one. There is no danger of encountering cycles because in C type
- * system the only way to form type cycle is through struct/union, so any chain
- * of reference types, even those taking part in a type cycle, will inevitably
- * reach struct/union at some point.
+ * struct/union and typedef types, at which point we can "unwind" chain of reference
+ * types one by one. There is no danger of encountering cycles in C, as the only way to
+ * form a type cycle is through struct or union types. Go can form such cycles through
+ * typedef. Thus, any chain of reference types, even those taking part in a type cycle,
+ * will inevitably reach a struct/union or typedef type at some point.
*
* 2. Once all referenced type IDs are resolved into canonical ones, BTF type
* becomes "stable", in the sense that no further deduplication will cause
@@ -4983,7 +5022,6 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_PTR:
- case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_TYPE_TAG:
ref_type_id = btf_dedup_ref_type(d, t->type);
@@ -5819,7 +5857,7 @@ void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
{
btf->base_btf = (struct btf *)base_btf;
btf->start_id = btf__type_cnt(base_btf);
- btf->start_str_off = base_btf->hdr->str_len;
+ btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off;
}
int btf__relocate(struct btf *btf, const struct btf *base_btf)
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 4392451d634b..cc01494d6210 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -94,6 +94,7 @@ LIBBPF_API struct btf *btf__new_empty(void);
* @brief **btf__new_empty_split()** creates an unpopulated BTF object from an
* ELF BTF section except with a base BTF on top of which split BTF should be
* based
+ * @param base_btf base BTF object
* @return new BTF object instance which has to be eventually freed with
* **btf__free()**
*
@@ -115,6 +116,10 @@ LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
* When that split BTF is loaded against a (possibly changed) base, this
* distilled base BTF will help update references to that (possibly changed)
* base BTF.
+ * @param src_btf source split BTF object
+ * @param new_base_btf pointer to where the new base BTF object pointer will be stored
+ * @param new_split_btf pointer to where the new split BTF object pointer will be stored
+ * @return 0 on success; negative error code, otherwise
*
* Both the new split and its associated new base BTF must be freed by
* the caller.
@@ -264,6 +269,9 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
* to base BTF kinds, and verify those references are compatible with
* *base_btf*; if they are, *btf* is adjusted such that is re-parented to
* *base_btf* and type ids and strings are adjusted to accommodate this.
+ * @param btf split BTF object to relocate
+ * @param base_btf base BTF object
+ * @return 0 on success; negative error code, otherwise
*
* If successful, 0 is returned and **btf** now has **base_btf** as its
* base.
@@ -326,9 +334,10 @@ struct btf_dump_type_data_opts {
bool compact; /* no newlines/indentation */
bool skip_names; /* skip member/type names */
bool emit_zeroes; /* show 0-valued fields */
+ bool emit_strings; /* print char arrays as strings */
size_t :0;
};
-#define btf_dump_type_data_opts__last_field emit_zeroes
+#define btf_dump_type_data_opts__last_field emit_strings
LIBBPF_API int
btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 460c3e57fadb..6388392f49a0 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -21,7 +21,6 @@
#include "hashmap.h"
#include "libbpf.h"
#include "libbpf_internal.h"
-#include "str_error.h"
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
@@ -68,6 +67,7 @@ struct btf_dump_data {
bool compact;
bool skip_names;
bool emit_zeroes;
+ bool emit_strings;
__u8 indent_lvl; /* base indent level */
char indent_str[BTF_DATA_INDENT_STR_LEN];
/* below are used during iteration */
@@ -226,6 +226,9 @@ static void btf_dump_free_names(struct hashmap *map)
size_t bkt;
struct hashmap_entry *cur;
+ if (!map)
+ return;
+
hashmap__for_each_entry(map, cur, bkt)
free((void *)cur->pkey);
@@ -2028,6 +2031,52 @@ static int btf_dump_var_data(struct btf_dump *d,
return btf_dump_dump_type_data(d, NULL, t, type_id, data, 0, 0);
}
+static int btf_dump_string_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data)
+{
+ const struct btf_array *array = btf_array(t);
+ const char *chars = data;
+ __u32 i;
+
+ /* Make sure it is a NUL-terminated string. */
+ for (i = 0; i < array->nelems; i++) {
+ if ((void *)(chars + i) >= d->typed_dump->data_end)
+ return -E2BIG;
+ if (chars[i] == '\0')
+ break;
+ }
+ if (i == array->nelems) {
+ /* The caller will print this as a regular array. */
+ return -EINVAL;
+ }
+
+ btf_dump_data_pfx(d);
+ btf_dump_printf(d, "\"");
+
+ for (i = 0; i < array->nelems; i++) {
+ char c = chars[i];
+
+ if (c == '\0') {
+ /*
+ * When printing character arrays as strings, NUL bytes
+ * are always treated as string terminators; they are
+ * never printed.
+ */
+ break;
+ }
+ if (isprint(c))
+ btf_dump_printf(d, "%c", c);
+ else
+ btf_dump_printf(d, "\\x%02x", (__u8)c);
+ }
+
+ btf_dump_printf(d, "\"");
+
+ return 0;
+}
+
static int btf_dump_array_data(struct btf_dump *d,
const struct btf_type *t,
__u32 id,
@@ -2055,8 +2104,13 @@ static int btf_dump_array_data(struct btf_dump *d,
* char arrays, so if size is 1 and element is
* printable as a char, we'll do that.
*/
- if (elem_size == 1)
+ if (elem_size == 1) {
+ if (d->typed_dump->emit_strings &&
+ btf_dump_string_data(d, t, id, data) == 0) {
+ return 0;
+ }
d->typed_dump->is_array_char = true;
+ }
}
/* note that we increment depth before calling btf_dump_print() below;
@@ -2544,6 +2598,7 @@ int btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
d->typed_dump->compact = OPTS_GET(opts, compact, false);
d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false);
d->typed_dump->emit_zeroes = OPTS_GET(opts, emit_zeroes, false);
+ d->typed_dump->emit_strings = OPTS_GET(opts, emit_strings, false);
ret = btf_dump_dump_type_data(d, NULL, t, id, data, 0, 0);
diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
index 823f83ad819c..295dbda24580 100644
--- a/tools/lib/bpf/elf.c
+++ b/tools/lib/bpf/elf.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include "libbpf_internal.h"
-#include "str_error.h"
/* A SHT_GNU_versym section holds 16-bit words. This bit is set if
* the symbol is hidden and can only be seen when referenced using an
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
index 760657f5224c..b842b83e2480 100644
--- a/tools/lib/bpf/features.c
+++ b/tools/lib/bpf/features.c
@@ -6,7 +6,6 @@
#include "libbpf.h"
#include "libbpf_common.h"
#include "libbpf_internal.h"
-#include "str_error.h"
static inline __u64 ptr_to_u64(const void *ptr)
{
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 113ae4abd345..cd5c2543f54d 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -4,6 +4,7 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
+#include <asm/byteorder.h>
#include <linux/filter.h>
#include <sys/param.h>
#include "btf.h"
@@ -13,8 +14,6 @@
#include "hashmap.h"
#include "bpf_gen_internal.h"
#include "skel_internal.h"
-#include <asm/byteorder.h>
-#include "str_error.h"
#define MAX_USED_MAPS 64
#define MAX_USED_PROGS 32
@@ -110,6 +109,7 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in
static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
+static void emit_signature_match(struct bpf_gen *gen);
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
{
@@ -152,6 +152,8 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps
/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
emit(gen, BPF_EXIT_INSN());
+ if (OPTS_GET(gen->opts, gen_hash, false))
+ emit_signature_match(gen);
}
static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
@@ -368,6 +370,8 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
__emit_sys_close(gen);
}
+static void compute_sha_update_offsets(struct bpf_gen *gen);
+
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
{
int i;
@@ -394,6 +398,9 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
blob_fd_array_off(gen, i));
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
emit(gen, BPF_EXIT_INSN());
+ if (OPTS_GET(gen->opts, gen_hash, false))
+ compute_sha_update_offsets(gen);
+
pr_debug("gen: finish %s\n", errstr(gen->error));
if (!gen->error) {
struct gen_loader_opts *opts = gen->opts;
@@ -446,6 +453,22 @@ void bpf_gen__free(struct bpf_gen *gen)
_val; \
})
+static void compute_sha_update_offsets(struct bpf_gen *gen)
+{
+ __u64 sha[SHA256_DWORD_SIZE];
+ __u64 sha_dw;
+ int i;
+
+ libbpf_sha256(gen->data_start, gen->data_cur - gen->data_start, (__u8 *)sha);
+ for (i = 0; i < SHA256_DWORD_SIZE; i++) {
+ struct bpf_insn *insn =
+ (struct bpf_insn *)(gen->insn_start + gen->hash_insn_offset[i]);
+ sha_dw = tgt_endian(sha[i]);
+ insn[0].imm = (__u32)sha_dw;
+ insn[1].imm = sha_dw >> 32;
+ }
+}
+
void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
__u32 btf_raw_size)
{
@@ -557,6 +580,29 @@ void bpf_gen__map_create(struct bpf_gen *gen,
emit_sys_close_stack(gen, stack_off(inner_map_fd));
}
+static void emit_signature_match(struct bpf_gen *gen)
+{
+ __s64 off;
+ int i;
+
+ for (i = 0; i < SHA256_DWORD_SIZE; i++) {
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX,
+ 0, 0, 0, 0));
+ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, i * sizeof(__u64)));
+ gen->hash_insn_offset[i] = gen->insn_cur - gen->insn_start;
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_3, 0, 0, 0, 0, 0));
+
+ off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
+ if (is_simm16(off)) {
+ emit(gen, BPF_MOV64_IMM(BPF_REG_7, -EINVAL));
+ emit(gen, BPF_JMP_REG(BPF_JNE, BPF_REG_2, BPF_REG_3, off));
+ } else {
+ gen->error = -ERANGE;
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
+ }
+ }
+}
+
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
enum bpf_attach_type type)
{
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e9c641a2fb20..3dc8a8078815 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -50,7 +50,6 @@
#include "libbpf.h"
#include "bpf.h"
#include "btf.h"
-#include "str_error.h"
#include "libbpf_internal.h"
#include "hashmap.h"
#include "bpf_gen_internal.h"
@@ -191,6 +190,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
[BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
[BPF_MAP_TYPE_ARENA] = "arena",
+ [BPF_MAP_TYPE_INSN_ARRAY] = "insn_array",
};
static const char * const prog_type_name[] = {
@@ -318,8 +318,6 @@ static void pr_perm_msg(int err)
buf);
}
-#define STRERR_BUFSIZE 128
-
/* Copied from tools/perf/util/util.h */
#ifndef zfree
# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
@@ -372,6 +370,7 @@ enum reloc_type {
RELO_EXTERN_CALL,
RELO_SUBPROG_ADDR,
RELO_CORE,
+ RELO_INSN_ARRAY,
};
struct reloc_desc {
@@ -382,7 +381,16 @@ struct reloc_desc {
struct {
int map_idx;
int sym_off;
- int ext_idx;
+ /*
+ * The following two fields can be unionized, as the
+ * ext_idx field is used for extern symbols, and the
+ * sym_size is used for jump tables, which are never
+ * extern
+ */
+ union {
+ int ext_idx;
+ int sym_size;
+ };
};
};
};
@@ -424,6 +432,11 @@ struct bpf_sec_def {
libbpf_prog_attach_fn_t prog_attach_fn;
};
+struct bpf_light_subprog {
+ __u32 sec_insn_off;
+ __u32 sub_insn_off;
+};
+
/*
* bpf_prog should be a better name but it has been used in
* linux/filter.h.
@@ -496,6 +509,10 @@ struct bpf_program {
__u32 line_info_rec_size;
__u32 line_info_cnt;
__u32 prog_flags;
+ __u8 hash[SHA256_DIGEST_LENGTH];
+
+ struct bpf_light_subprog *subprogs;
+ __u32 subprog_cnt;
};
struct bpf_struct_ops {
@@ -575,6 +592,7 @@ struct bpf_map {
bool autocreate;
bool autoattach;
__u64 map_extra;
+ struct bpf_program *excl_prog;
};
enum extern_type {
@@ -597,7 +615,7 @@ struct extern_desc {
int sym_idx;
int btf_id;
int sec_btf_id;
- const char *name;
+ char *name;
char *essent_name;
bool is_set;
bool is_weak;
@@ -668,6 +686,7 @@ struct elf_state {
int symbols_shndx;
bool has_st_ops;
int arena_data_shndx;
+ int jumptables_data_shndx;
};
struct usdt_manager;
@@ -735,10 +754,20 @@ struct bpf_object {
struct usdt_manager *usdt_man;
- struct bpf_map *arena_map;
+ int arena_map_idx;
void *arena_data;
size_t arena_data_sz;
+ void *jumptables_data;
+ size_t jumptables_data_sz;
+
+ struct {
+ struct bpf_program *prog;
+ int sym_off;
+ int fd;
+ } *jumptable_maps;
+ size_t jumptable_map_cnt;
+
struct kern_feature_cache *feat_cache;
char *token_path;
int token_fd;
@@ -765,6 +794,7 @@ void bpf_program__unload(struct bpf_program *prog)
zfree(&prog->func_info);
zfree(&prog->line_info);
+ zfree(&prog->subprogs);
}
static void bpf_program__exit(struct bpf_program *prog)
@@ -1013,35 +1043,33 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
const struct btf_member *kern_data_member;
struct btf *btf = NULL;
__s32 kern_vtype_id, kern_type_id;
- char tname[256];
+ char tname[192], stname[256];
__u32 i;
snprintf(tname, sizeof(tname), "%.*s",
(int)bpf_core_essential_name_len(tname_raw), tname_raw);
- kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
- &btf, mod_btf);
- if (kern_type_id < 0) {
- pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
- tname);
- return kern_type_id;
- }
- kern_type = btf__type_by_id(btf, kern_type_id);
+ snprintf(stname, sizeof(stname), "%s%s", STRUCT_OPS_VALUE_PREFIX, tname);
- /* Find the corresponding "map_value" type that will be used
- * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
- * find "struct bpf_struct_ops_tcp_congestion_ops" from the
- * btf_vmlinux.
+ /* Look for the corresponding "map_value" type that will be used
+ * in map_update(BPF_MAP_TYPE_STRUCT_OPS) first, figure out the btf
+ * and the mod_btf.
+ * For example, find "struct bpf_struct_ops_tcp_congestion_ops".
*/
- kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
- tname, BTF_KIND_STRUCT);
+ kern_vtype_id = find_ksym_btf_id(obj, stname, BTF_KIND_STRUCT, &btf, mod_btf);
if (kern_vtype_id < 0) {
- pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
- STRUCT_OPS_VALUE_PREFIX, tname);
+ pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", stname);
return kern_vtype_id;
}
kern_vtype = btf__type_by_id(btf, kern_vtype_id);
+ kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ if (kern_type_id < 0) {
+ pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname);
+ return kern_type_id;
+ }
+ kern_type = btf__type_by_id(btf, kern_type_id);
+
/* Find "struct tcp_congestion_ops" from
* struct bpf_struct_ops_tcp_congestion_ops {
* [ ... ]
@@ -1054,8 +1082,8 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
break;
}
if (i == btf_vlen(kern_vtype)) {
- pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
- tname, STRUCT_OPS_VALUE_PREFIX, tname);
+ pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n",
+ tname, stname);
return -EINVAL;
}
@@ -1517,6 +1545,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
obj->kconfig_map_idx = -1;
+ obj->arena_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->state = OBJ_OPEN;
@@ -2964,7 +2993,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t mmap_sz;
- mmap_sz = bpf_map_mmap_sz(obj->arena_map);
+ mmap_sz = bpf_map_mmap_sz(map);
if (roundup(data_sz, page_sz) > mmap_sz) {
pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
sec_name, mmap_sz, data_sz);
@@ -2998,7 +3027,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
data = elf_sec_data(obj, scn);
- if (!scn || !data) {
+ if (!data) {
pr_warn("elf: failed to get %s map definitions for %s\n",
MAPS_ELF_SEC, obj->path);
return -EINVAL;
@@ -3038,12 +3067,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
if (map->def.type != BPF_MAP_TYPE_ARENA)
continue;
- if (obj->arena_map) {
+ if (obj->arena_map_idx >= 0) {
pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
- map->name, obj->arena_map->name);
+ map->name, obj->maps[obj->arena_map_idx].name);
return -EINVAL;
}
- obj->arena_map = map;
+ obj->arena_map_idx = i;
if (obj->efile.arena_data) {
err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
@@ -3053,7 +3082,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return err;
}
}
- if (obj->efile.arena_data && !obj->arena_map) {
+ if (obj->efile.arena_data && obj->arena_map_idx < 0) {
pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
ARENA_SEC);
return -ENOENT;
@@ -3944,6 +3973,13 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (strcmp(name, ARENA_SEC) == 0) {
obj->efile.arena_data = data;
obj->efile.arena_data_shndx = idx;
+ } else if (strcmp(name, JUMPTABLES_SEC) == 0) {
+ obj->jumptables_data = malloc(data->d_size);
+ if (!obj->jumptables_data)
+ return -ENOMEM;
+ memcpy(obj->jumptables_data, data->d_buf, data->d_size);
+ obj->jumptables_data_sz = data->d_size;
+ obj->efile.jumptables_data_shndx = idx;
} else {
pr_info("elf: skipping unrecognized data section(%d) %s\n",
idx, name);
@@ -4259,7 +4295,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return ext->btf_id;
}
t = btf__type_by_id(obj->btf, ext->btf_id);
- ext->name = btf__name_by_offset(obj->btf, t->name_off);
+ ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off));
+ if (!ext->name)
+ return -ENOMEM;
ext->sym_idx = i;
ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
@@ -4482,6 +4520,44 @@ bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
}
}
+static int bpf_prog_compute_hash(struct bpf_program *prog)
+{
+ struct bpf_insn *purged;
+ int i, err = 0;
+
+ purged = calloc(prog->insns_cnt, BPF_INSN_SZ);
+ if (!purged)
+ return -ENOMEM;
+
+ /* If relocations have been done, the map_fd needs to be
+ * discarded for the digest calculation.
+ */
+ for (i = 0; i < prog->insns_cnt; i++) {
+ purged[i] = prog->insns[i];
+ if (purged[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ (purged[i].src_reg == BPF_PSEUDO_MAP_FD ||
+ purged[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
+ purged[i].imm = 0;
+ i++;
+ if (i >= prog->insns_cnt ||
+ prog->insns[i].code != 0 ||
+ prog->insns[i].dst_reg != 0 ||
+ prog->insns[i].src_reg != 0 ||
+ prog->insns[i].off != 0) {
+ err = -EINVAL;
+ goto out;
+ }
+ purged[i] = prog->insns[i];
+ purged[i].imm = 0;
+ }
+ }
+ libbpf_sha256(purged, prog->insns_cnt * sizeof(struct bpf_insn),
+ prog->hash);
+out:
+ free(purged);
+ return err;
+}
+
static int bpf_program__record_reloc(struct bpf_program *prog,
struct reloc_desc *reloc_desc,
__u32 insn_idx, const char *sym_name,
@@ -4579,10 +4655,30 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
/* arena data relocation */
if (shdr_idx == obj->efile.arena_data_shndx) {
+ if (obj->arena_map_idx < 0) {
+ pr_warn("prog '%s': bad arena data relocation at insn %u, no arena maps defined\n",
+ prog->name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
reloc_desc->type = RELO_DATA;
reloc_desc->insn_idx = insn_idx;
- reloc_desc->map_idx = obj->arena_map - obj->maps;
+ reloc_desc->map_idx = obj->arena_map_idx;
+ reloc_desc->sym_off = sym->st_value;
+
+ map = &obj->maps[obj->arena_map_idx];
+ pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
+ prog->name, obj->arena_map_idx, map->name, map->sec_idx,
+ map->sec_offset, insn_idx);
+ return 0;
+ }
+
+ /* jump table data relocation */
+ if (shdr_idx == obj->efile.jumptables_data_shndx) {
+ reloc_desc->type = RELO_INSN_ARRAY;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = -1;
reloc_desc->sym_off = sym->st_value;
+ reloc_desc->sym_size = sym->st_size;
return 0;
}
@@ -5080,6 +5176,16 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
return false;
}
+ /*
+ * bpf_get_map_info_by_fd() for DEVMAP will always return flags with
+ * BPF_F_RDONLY_PROG set, but it generally is not set at map creation time.
+ * Thus, ignore the BPF_F_RDONLY_PROG flag in the flags returned from
+ * bpf_get_map_info_by_fd() when checking for compatibility with an
+ * existing DEVMAP.
+ */
+ if (map->def.type == BPF_MAP_TYPE_DEVMAP || map->def.type == BPF_MAP_TYPE_DEVMAP_HASH)
+ map_info.map_flags &= ~BPF_F_RDONLY_PROG;
+
return (map_info.type == map->def.type &&
map_info.key_size == map->def.key_size &&
map_info.value_size == map->def.value_size &&
@@ -5211,6 +5317,14 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.token_fd = obj->token_fd;
if (obj->token_fd)
create_attr.map_flags |= BPF_F_TOKEN_FD;
+ if (map->excl_prog) {
+ err = bpf_prog_compute_hash(map->excl_prog);
+ if (err)
+ return err;
+
+ create_attr.excl_prog_hash = map->excl_prog->hash;
+ create_attr.excl_prog_hash_size = SHA256_DIGEST_LENGTH;
+ }
if (bpf_map__is_struct_ops(map)) {
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
@@ -6078,6 +6192,157 @@ static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
}
+static int find_jt_map(struct bpf_object *obj, struct bpf_program *prog, int sym_off)
+{
+ size_t i;
+
+ for (i = 0; i < obj->jumptable_map_cnt; i++) {
+ /*
+ * This might happen that same offset is used for two different
+ * programs (as jump tables can be the same). However, for
+ * different programs different maps should be created.
+ */
+ if (obj->jumptable_maps[i].sym_off == sym_off &&
+ obj->jumptable_maps[i].prog == prog)
+ return obj->jumptable_maps[i].fd;
+ }
+
+ return -ENOENT;
+}
+
+static int add_jt_map(struct bpf_object *obj, struct bpf_program *prog, int sym_off, int map_fd)
+{
+ size_t cnt = obj->jumptable_map_cnt;
+ size_t size = sizeof(obj->jumptable_maps[0]);
+ void *tmp;
+
+ tmp = libbpf_reallocarray(obj->jumptable_maps, cnt + 1, size);
+ if (!tmp)
+ return -ENOMEM;
+
+ obj->jumptable_maps = tmp;
+ obj->jumptable_maps[cnt].prog = prog;
+ obj->jumptable_maps[cnt].sym_off = sym_off;
+ obj->jumptable_maps[cnt].fd = map_fd;
+ obj->jumptable_map_cnt++;
+
+ return 0;
+}
+
+static int find_subprog_idx(struct bpf_program *prog, int insn_idx)
+{
+ int i;
+
+ for (i = prog->subprog_cnt - 1; i >= 0; i--) {
+ if (insn_idx >= prog->subprogs[i].sub_insn_off)
+ return i;
+ }
+
+ return -1;
+}
+
+static int create_jt_map(struct bpf_object *obj, struct bpf_program *prog, struct reloc_desc *relo)
+{
+ const __u32 jt_entry_size = 8;
+ int sym_off = relo->sym_off;
+ int jt_size = relo->sym_size;
+ __u32 max_entries = jt_size / jt_entry_size;
+ __u32 value_size = sizeof(struct bpf_insn_array_value);
+ struct bpf_insn_array_value val = {};
+ int subprog_idx;
+ int map_fd, err;
+ __u64 insn_off;
+ __u64 *jt;
+ __u32 i;
+
+ map_fd = find_jt_map(obj, prog, sym_off);
+ if (map_fd >= 0)
+ return map_fd;
+
+ if (sym_off % jt_entry_size) {
+ pr_warn("map '.jumptables': jumptable start %d should be multiple of %u\n",
+ sym_off, jt_entry_size);
+ return -EINVAL;
+ }
+
+ if (jt_size % jt_entry_size) {
+ pr_warn("map '.jumptables': jumptable size %d should be multiple of %u\n",
+ jt_size, jt_entry_size);
+ return -EINVAL;
+ }
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_INSN_ARRAY, ".jumptables",
+ 4, value_size, max_entries, NULL);
+ if (map_fd < 0)
+ return map_fd;
+
+ if (!obj->jumptables_data) {
+ pr_warn("map '.jumptables': ELF file is missing jump table data\n");
+ err = -EINVAL;
+ goto err_close;
+ }
+ if (sym_off + jt_size > obj->jumptables_data_sz) {
+ pr_warn("map '.jumptables': jumptables_data size is %zd, trying to access %d\n",
+ obj->jumptables_data_sz, sym_off + jt_size);
+ err = -EINVAL;
+ goto err_close;
+ }
+
+ subprog_idx = -1; /* main program */
+ if (relo->insn_idx < 0 || relo->insn_idx >= prog->insns_cnt) {
+ pr_warn("map '.jumptables': invalid instruction index %d\n", relo->insn_idx);
+ err = -EINVAL;
+ goto err_close;
+ }
+ if (prog->subprogs)
+ subprog_idx = find_subprog_idx(prog, relo->insn_idx);
+
+ jt = (__u64 *)(obj->jumptables_data + sym_off);
+ for (i = 0; i < max_entries; i++) {
+ /*
+ * The offset should be made to be relative to the beginning of
+ * the main function, not the subfunction.
+ */
+ insn_off = jt[i]/sizeof(struct bpf_insn);
+ if (subprog_idx >= 0) {
+ insn_off -= prog->subprogs[subprog_idx].sec_insn_off;
+ insn_off += prog->subprogs[subprog_idx].sub_insn_off;
+ } else {
+ insn_off -= prog->sec_insn_off;
+ }
+
+ /*
+ * LLVM-generated jump tables contain u64 records, however
+ * should contain values that fit in u32.
+ */
+ if (insn_off > UINT32_MAX) {
+ pr_warn("map '.jumptables': invalid jump table value 0x%llx at offset %d\n",
+ (long long)jt[i], sym_off + i * jt_entry_size);
+ err = -EINVAL;
+ goto err_close;
+ }
+
+ val.orig_off = insn_off;
+ err = bpf_map_update_elem(map_fd, &i, &val, 0);
+ if (err)
+ goto err_close;
+ }
+
+ err = bpf_map_freeze(map_fd);
+ if (err)
+ goto err_close;
+
+ err = add_jt_map(obj, prog, sym_off, map_fd);
+ if (err)
+ goto err_close;
+
+ return map_fd;
+
+err_close:
+ close(map_fd);
+ return err;
+}
+
/* Relocate data references within program code:
* - map references;
* - global variable references;
@@ -6169,6 +6434,20 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
case RELO_CORE:
/* will be handled by bpf_program_record_relos() */
break;
+ case RELO_INSN_ARRAY: {
+ int map_fd;
+
+ map_fd = create_jt_map(obj, prog, relo);
+ if (map_fd < 0) {
+ pr_warn("prog '%s': relo #%d: can't create jump table: sym_off %u\n",
+ prog->name, i, relo->sym_off);
+ return map_fd;
+ }
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn->imm = map_fd;
+ insn->off = 0;
+ }
+ break;
default:
pr_warn("prog '%s': relo #%d: bad relo type %d\n",
prog->name, i, relo->type);
@@ -6366,36 +6645,62 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra
return 0;
}
+static int save_subprog_offsets(struct bpf_program *main_prog, struct bpf_program *subprog)
+{
+ size_t size = sizeof(main_prog->subprogs[0]);
+ int cnt = main_prog->subprog_cnt;
+ void *tmp;
+
+ tmp = libbpf_reallocarray(main_prog->subprogs, cnt + 1, size);
+ if (!tmp)
+ return -ENOMEM;
+
+ main_prog->subprogs = tmp;
+ main_prog->subprogs[cnt].sec_insn_off = subprog->sec_insn_off;
+ main_prog->subprogs[cnt].sub_insn_off = subprog->sub_insn_off;
+ main_prog->subprog_cnt++;
+
+ return 0;
+}
+
static int
bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
struct bpf_program *subprog)
{
- struct bpf_insn *insns;
- size_t new_cnt;
- int err;
+ struct bpf_insn *insns;
+ size_t new_cnt;
+ int err;
+
+ subprog->sub_insn_off = main_prog->insns_cnt;
- subprog->sub_insn_off = main_prog->insns_cnt;
+ new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
+ insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
+ if (!insns) {
+ pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
+ return -ENOMEM;
+ }
+ main_prog->insns = insns;
+ main_prog->insns_cnt = new_cnt;
- new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
- insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
- if (!insns) {
- pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
- return -ENOMEM;
- }
- main_prog->insns = insns;
- main_prog->insns_cnt = new_cnt;
+ memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
+ subprog->insns_cnt * sizeof(*insns));
- memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
- subprog->insns_cnt * sizeof(*insns));
+ pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
+ main_prog->name, subprog->insns_cnt, subprog->name);
- pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
- main_prog->name, subprog->insns_cnt, subprog->name);
+ /* The subprog insns are now appended. Append its relos too. */
+ err = append_subprog_relos(main_prog, subprog);
+ if (err)
+ return err;
- /* The subprog insns are now appended. Append its relos too. */
- err = append_subprog_relos(main_prog, subprog);
- if (err)
- return err;
- return 0;
+ err = save_subprog_offsets(main_prog, subprog);
+ if (err) {
+ pr_warn("prog '%s': failed to add subprog offsets: %s\n",
+ main_prog->name, errstr(err));
+ return err;
+ }
+
+ return 0;
}
static int
@@ -9138,8 +9443,10 @@ void bpf_object__close(struct bpf_object *obj)
zfree(&obj->btf_custom_path);
zfree(&obj->kconfig);
- for (i = 0; i < obj->nr_extern; i++)
+ for (i = 0; i < obj->nr_extern; i++) {
+ zfree(&obj->externs[i].name);
zfree(&obj->externs[i].essent_name);
+ }
zfree(&obj->externs);
obj->nr_extern = 0;
@@ -9160,6 +9467,13 @@ void bpf_object__close(struct bpf_object *obj)
zfree(&obj->arena_data);
+ zfree(&obj->jumptables_data);
+ obj->jumptables_data_sz = 0;
+
+ for (i = 0; i < obj->jumptable_map_cnt; i++)
+ close(obj->jumptable_maps[i].fd);
+ zfree(&obj->jumptable_maps);
+
free(obj);
}
@@ -9206,7 +9520,7 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
return libbpf_err(-EFAULT);
if (!OPTS_VALID(opts, gen_loader_opts))
return libbpf_err(-EINVAL);
- gen = calloc(sizeof(*gen), 1);
+ gen = calloc(1, sizeof(*gen));
if (!gen)
return libbpf_err(-ENOMEM);
gen->opts = opts;
@@ -10081,7 +10395,7 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
enum bpf_attach_type attach_type,
int *btf_obj_fd, int *btf_type_id)
{
- int ret, i, mod_len;
+ int ret, i, mod_len = 0;
const char *fn_name, *mod_name = NULL;
fn_name = strchr(attach_name, ':');
@@ -10499,6 +10813,27 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
return 0;
}
+int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog)
+{
+ if (map_is_created(map)) {
+ pr_warn("exclusive programs must be set before map creation\n");
+ return libbpf_err(-EINVAL);
+ }
+
+ if (map->obj != prog->obj) {
+ pr_warn("excl_prog and map must be from the same bpf object\n");
+ return libbpf_err(-EINVAL);
+ }
+
+ map->excl_prog = prog;
+ return 0;
+}
+
+struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map)
+{
+ return map->excl_prog;
+}
+
static struct bpf_map *
__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
{
@@ -10950,11 +11285,14 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p
}
link->link.fd = pfd;
}
- if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
- err = -errno;
- pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
- prog->name, pfd, errstr(err));
- goto err_out;
+
+ if (!OPTS_GET(opts, dont_enable, false)) {
+ if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
+ prog->name, pfd, errstr(err));
+ goto err_out;
+ }
}
return &link->link;
@@ -11233,8 +11571,6 @@ static const char *arch_specific_syscall_pfx(void)
return "ia32";
#elif defined(__s390x__)
return "s390x";
-#elif defined(__s390__)
- return "s390";
#elif defined(__arm__)
return "arm";
#elif defined(__aarch64__)
@@ -12021,8 +12357,6 @@ static const char *arch_specific_lib_paths(void)
return "/lib/i386-linux-gnu";
#elif defined(__s390x__)
return "/lib/s390x-linux-gnu";
-#elif defined(__s390__)
- return "/lib/s390-linux-gnu";
#elif defined(__arm__) && defined(__SOFTFP__)
return "/lib/arm-linux-gnueabi";
#elif defined(__arm__) && !defined(__SOFTFP__)
@@ -12838,6 +13172,34 @@ struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifi
}
struct bpf_link *
+bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd,
+ const struct bpf_cgroup_opts *opts)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
+ __u32 relative_id;
+ int relative_fd;
+
+ if (!OPTS_VALID(opts, bpf_cgroup_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ relative_id = OPTS_GET(opts, relative_id, 0);
+ relative_fd = OPTS_GET(opts, relative_fd, 0);
+
+ if (relative_fd && relative_id) {
+ pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ link_create_opts.cgroup.expected_revision = OPTS_GET(opts, expected_revision, 0);
+ link_create_opts.cgroup.relative_fd = relative_fd;
+ link_create_opts.cgroup.relative_id = relative_id;
+ link_create_opts.flags = OPTS_GET(opts, flags, 0);
+
+ return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", &link_create_opts);
+}
+
+struct bpf_link *
bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
const struct bpf_tcx_opts *opts)
{
@@ -13738,8 +14100,8 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
return libbpf_err(-EINVAL);
if (attach_prog_fd && !attach_func_name) {
- /* remember attach_prog_fd and let bpf_program__load() find
- * BTF ID during the program load
+ /* Store attach_prog_fd. The BTF ID will be resolved later during
+ * the normal object/program load phase.
*/
prog->attach_prog_fd = attach_prog_fd;
return 0;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 1137e7d2e1b5..65e68e964b89 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -24,8 +24,25 @@
extern "C" {
#endif
+/**
+ * @brief **libbpf_major_version()** provides the major version of libbpf.
+ * @return An integer, the major version number
+ */
LIBBPF_API __u32 libbpf_major_version(void);
+
+/**
+ * @brief **libbpf_minor_version()** provides the minor version of libbpf.
+ * @return An integer, the minor version number
+ */
LIBBPF_API __u32 libbpf_minor_version(void);
+
+/**
+ * @brief **libbpf_version_string()** provides the version of libbpf in a
+ * human-readable form, e.g., "v1.7".
+ * @return Pointer to a static string containing the version
+ *
+ * The format is *not* a part of a stable API and may change in the future.
+ */
LIBBPF_API const char *libbpf_version_string(void);
enum libbpf_errno {
@@ -49,6 +66,14 @@ enum libbpf_errno {
__LIBBPF_ERRNO__END,
};
+/**
+ * @brief **libbpf_strerror()** converts the provided error code into a
+ * human-readable string.
+ * @param err The error code to convert
+ * @param buf Pointer to a buffer where the error message will be stored
+ * @param size The number of bytes in the buffer
+ * @return 0, on success; negative error code, otherwise
+ */
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
/**
@@ -252,7 +277,7 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
* @return 0, on success; negative error code, otherwise, error code is
* stored in errno
*/
-int bpf_object__prepare(struct bpf_object *obj);
+LIBBPF_API int bpf_object__prepare(struct bpf_object *obj);
/**
* @brief **bpf_object__load()** loads BPF object into kernel.
@@ -423,7 +448,7 @@ LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
/**
* @brief **bpf_program__unpin()** unpins the BPF program from a file
- * in the BPFFS specified by a path. This decrements the programs
+ * in the BPFFS specified by a path. This decrements program's in-kernel
* reference count.
*
* The file pinning the BPF program can also be unlinked by a different
@@ -456,14 +481,12 @@ LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
/**
* @brief **bpf_link__unpin()** unpins the BPF link from a file
- * in the BPFFS specified by a path. This decrements the links
- * reference count.
+ * in the BPFFS. This decrements link's in-kernel reference count.
*
* The file pinning the BPF link can also be unlinked by a different
* process in which case this function will return an error.
*
- * @param prog BPF program to unpin
- * @param path file path to the pin in a BPF file system
+ * @param link BPF link to unpin
* @return 0, on success; negative error code, otherwise
*/
LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
@@ -499,9 +522,11 @@ struct bpf_perf_event_opts {
__u64 bpf_cookie;
/* don't use BPF link when attach BPF program */
bool force_ioctl_attach;
+ /* don't automatically enable the event */
+ bool dont_enable;
size_t :0;
};
-#define bpf_perf_event_opts__last_field force_ioctl_attach
+#define bpf_perf_event_opts__last_field dont_enable
LIBBPF_API struct bpf_link *
bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
@@ -877,6 +902,21 @@ LIBBPF_API struct bpf_link *
bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
const struct bpf_netkit_opts *opts);
+struct bpf_cgroup_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ __u32 flags;
+ __u32 relative_fd;
+ __u32 relative_id;
+ __u64 expected_revision;
+ size_t :0;
+};
+#define bpf_cgroup_opts__last_field expected_revision
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_cgroup_opts(const struct bpf_program *prog, int cgroup_fd,
+ const struct bpf_cgroup_opts *opts);
+
struct bpf_map;
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
@@ -953,8 +993,13 @@ LIBBPF_API __u32 bpf_program__line_info_cnt(const struct bpf_program *prog);
* - fentry/fexit/fmod_ret;
* - lsm;
* - freplace.
- * @param prog BPF program to set the attach type for
- * @param type attach type to set the BPF map to have
+ * @param prog BPF program to configure; must be not yet loaded.
+ * @param attach_prog_fd FD of target BPF program (for freplace/extension).
+ * If >0 and func name omitted, defers BTF ID resolution.
+ * @param attach_func_name Target function name. Used either with
+ * attach_prog_fd to find destination BTF type ID in that BPF program, or
+ * alone (no attach_prog_fd) to resolve kernel (vmlinux/module) BTF ID.
+ * Must be provided if attach_prog_fd is 0.
* @return error code; or 0 if no error occurred.
*/
LIBBPF_API int
@@ -1056,6 +1101,7 @@ LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
/**
* @brief **bpf_map__set_value_size()** sets map value size.
* @param map the BPF map instance
+ * @param size the new value size
* @return 0, on success; negative error, otherwise
*
* There is a special case for maps with associated memory-mapped regions, like
@@ -1160,7 +1206,7 @@ LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
* per-CPU values value size has to be aligned up to closest 8 bytes for
* alignment reasons, so expected size is: `round_up(value_size, 8)
* * libbpf_num_possible_cpus()`.
- * @flags extra flags passed to kernel for this operation
+ * @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
* **bpf_map__lookup_elem()** is high-level equivalent of
@@ -1184,7 +1230,7 @@ LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map,
* per-CPU values value size has to be aligned up to closest 8 bytes for
* alignment reasons, so expected size is: `round_up(value_size, 8)
* * libbpf_num_possible_cpus()`.
- * @flags extra flags passed to kernel for this operation
+ * @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
* **bpf_map__update_elem()** is high-level equivalent of
@@ -1200,7 +1246,7 @@ LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map,
* @param map BPF map to delete element from
* @param key pointer to memory containing bytes of the key
* @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
- * @flags extra flags passed to kernel for this operation
+ * @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
* **bpf_map__delete_elem()** is high-level equivalent of
@@ -1223,7 +1269,7 @@ LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map,
* per-CPU values value size has to be aligned up to closest 8 bytes for
* alignment reasons, so expected size is: `round_up(value_size, 8)
* * libbpf_num_possible_cpus()`.
- * @flags extra flags passed to kernel for this operation
+ * @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
* **bpf_map__lookup_and_delete_elem()** is high-level equivalent of
@@ -1249,6 +1295,28 @@ LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
*/
LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map,
const void *cur_key, void *next_key, size_t key_sz);
+/**
+ * @brief **bpf_map__set_exclusive_program()** sets a map to be exclusive to the
+ * specified program. This must be called *before* the map is created.
+ *
+ * @param map BPF map to make exclusive.
+ * @param prog BPF program to be the exclusive user of the map. Must belong
+ * to the same bpf_object as the map.
+ * @return 0 on success; a negative error code otherwise.
+ *
+ * This function must be called after the BPF object is opened but before
+ * it is loaded. Once the object is loaded, only the specified program
+ * will be able to access the map's contents.
+ */
+LIBBPF_API int bpf_map__set_exclusive_program(struct bpf_map *map, struct bpf_program *prog);
+
+/**
+ * @brief **bpf_map__exclusive_program()** returns the exclusive program
+ * that is registered with the map (if any).
+ * @param map BPF map to which the exclusive program is registered.
+ * @return the registered exclusive program.
+ */
+LIBBPF_API struct bpf_program *bpf_map__exclusive_program(struct bpf_map *map);
struct bpf_xdp_set_link_opts {
size_t sz;
@@ -1573,6 +1641,7 @@ struct perf_buffer_opts {
* @param sample_cb function called on each received data record
* @param lost_cb function called when record loss has occurred
* @param ctx user-provided extra context passed into *sample_cb* and *lost_cb*
+ * @param opts optional parameters for the perf buffer, can be null
* @return a new instance of struct perf_buffer on success, NULL on error with
* *errno* containing an error code
*/
@@ -1793,9 +1862,10 @@ struct gen_loader_opts {
const char *insns;
__u32 data_sz;
__u32 insns_sz;
+ bool gen_hash;
};
-#define gen_loader_opts__last_field insns_sz
+#define gen_loader_opts__last_field gen_hash
LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj,
struct gen_loader_opts *opts);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 1205f9a4fe04..8ed8749907d4 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -437,6 +437,8 @@ LIBBPF_1.6.0 {
bpf_linker__add_fd;
bpf_linker__new_fd;
bpf_object__prepare;
+ bpf_prog_stream_read;
+ bpf_program__attach_cgroup_opts;
bpf_program__func_info;
bpf_program__func_info_cnt;
bpf_program__line_info;
@@ -444,3 +446,9 @@ LIBBPF_1.6.0 {
btf__add_decl_attr;
btf__add_type_attr;
} LIBBPF_1.5.0;
+
+LIBBPF_1.7.0 {
+ global:
+ bpf_map__set_exclusive_program;
+ bpf_map__exclusive_program;
+} LIBBPF_1.6.0;
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
deleted file mode 100644
index 6b180172ec6b..000000000000
--- a/tools/lib/bpf/libbpf_errno.c
+++ /dev/null
@@ -1,75 +0,0 @@
-// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-/*
- * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
- * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2015 Huawei Inc.
- * Copyright (C) 2017 Nicira, Inc.
- */
-
-#undef _GNU_SOURCE
-#include <stdio.h>
-#include <string.h>
-
-#include "libbpf.h"
-#include "libbpf_internal.h"
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
-#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
-#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
-#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
-
-static const char *libbpf_strerror_table[NR_ERRNO] = {
- [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
- [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
- [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
- [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
- [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
- [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
- [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
- [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
- [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
- [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
- [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
- [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
- [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing",
-};
-
-int libbpf_strerror(int err, char *buf, size_t size)
-{
- int ret;
-
- if (!buf || !size)
- return libbpf_err(-EINVAL);
-
- err = err > 0 ? err : -err;
-
- if (err < __LIBBPF_ERRNO__START) {
- ret = strerror_r(err, buf, size);
- buf[size - 1] = '\0';
- return libbpf_err_errno(ret);
- }
-
- if (err < __LIBBPF_ERRNO__END) {
- const char *msg;
-
- msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
- ret = snprintf(buf, size, "%s", msg);
- buf[size - 1] = '\0';
- /* The length of the buf and msg is positive.
- * A negative number may be returned only when the
- * size exceeds INT_MAX. Not likely to appear.
- */
- if (ret >= size)
- return libbpf_err(-ERANGE);
- return 0;
- }
-
- ret = snprintf(buf, size, "Unknown libbpf error %d", err);
- buf[size - 1] = '\0';
- if (ret >= size)
- return libbpf_err(-ERANGE);
- return libbpf_err(-ENOENT);
-}
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 477a3b3389a0..fc59b21b51b5 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -74,6 +74,8 @@
#define ELF64_ST_VISIBILITY(o) ((o) & 0x03)
#endif
+#define JUMPTABLES_SEC ".jumptables"
+
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
@@ -172,6 +174,16 @@ do { \
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+/**
+ * @brief **libbpf_errstr()** returns string corresponding to numeric errno
+ * @param err negative numeric errno
+ * @return pointer to string representation of the errno, that is invalidated
+ * upon the next call.
+ */
+const char *libbpf_errstr(int err);
+
+#define errstr(err) libbpf_errstr(err)
+
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
@@ -712,6 +724,11 @@ static inline bool is_pow_of_2(size_t x)
return x && (x & (x - 1)) == 0;
}
+static inline __u32 ror32(__u32 v, int bits)
+{
+ return (v >> bits) | (v << (32 - bits));
+}
+
#define PROG_LOAD_ATTEMPTS 5
int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
@@ -736,4 +753,8 @@ int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
int probe_fd(int fd);
+#define SHA256_DIGEST_LENGTH 32
+#define SHA256_DWORD_SIZE SHA256_DIGEST_LENGTH / sizeof(__u64)
+
+void libbpf_sha256(const void *data, size_t len, __u8 out[SHA256_DIGEST_LENGTH]);
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 9dfbe7750f56..bccf4bb747e1 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -364,6 +364,10 @@ static int probe_map_create(enum bpf_map_type map_type)
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
break;
+ case BPF_MAP_TYPE_INSN_ARRAY:
+ key_size = sizeof(__u32);
+ value_size = sizeof(struct bpf_insn_array_value);
+ break;
case BPF_MAP_TYPE_UNSPEC:
default:
return -EOPNOTSUPP;
diff --git a/tools/lib/bpf/libbpf_utils.c b/tools/lib/bpf/libbpf_utils.c
new file mode 100644
index 000000000000..ac3beae54cf6
--- /dev/null
+++ b/tools/lib/bpf/libbpf_utils.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ * Copyright (C) 2017 Nicira, Inc.
+ */
+
+#undef _GNU_SOURCE
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/kernel.h>
+
+#include "libbpf.h"
+#include "libbpf_internal.h"
+
+#ifndef ENOTSUPP
+#define ENOTSUPP 524
+#endif
+
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
+#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
+#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
+
+static const char *libbpf_strerror_table[NR_ERRNO] = {
+ [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
+ [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
+ [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
+ [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
+ [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
+ [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
+ [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
+ [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
+ [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
+ [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
+ [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
+ [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
+ [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing",
+};
+
+int libbpf_strerror(int err, char *buf, size_t size)
+{
+ int ret;
+
+ if (!buf || !size)
+ return libbpf_err(-EINVAL);
+
+ err = err > 0 ? err : -err;
+
+ if (err < __LIBBPF_ERRNO__START) {
+ ret = strerror_r(err, buf, size);
+ buf[size - 1] = '\0';
+ return libbpf_err_errno(ret);
+ }
+
+ if (err < __LIBBPF_ERRNO__END) {
+ const char *msg;
+
+ msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
+ ret = snprintf(buf, size, "%s", msg);
+ buf[size - 1] = '\0';
+ /* The length of the buf and msg is positive.
+ * A negative number may be returned only when the
+ * size exceeds INT_MAX. Not likely to appear.
+ */
+ if (ret >= size)
+ return libbpf_err(-ERANGE);
+ return 0;
+ }
+
+ ret = snprintf(buf, size, "Unknown libbpf error %d", err);
+ buf[size - 1] = '\0';
+ if (ret >= size)
+ return libbpf_err(-ERANGE);
+ return libbpf_err(-ENOENT);
+}
+
+const char *libbpf_errstr(int err)
+{
+ static __thread char buf[12];
+
+ if (err > 0)
+ err = -err;
+
+ switch (err) {
+ case -E2BIG: return "-E2BIG";
+ case -EACCES: return "-EACCES";
+ case -EADDRINUSE: return "-EADDRINUSE";
+ case -EADDRNOTAVAIL: return "-EADDRNOTAVAIL";
+ case -EAGAIN: return "-EAGAIN";
+ case -EALREADY: return "-EALREADY";
+ case -EBADF: return "-EBADF";
+ case -EBADFD: return "-EBADFD";
+ case -EBUSY: return "-EBUSY";
+ case -ECANCELED: return "-ECANCELED";
+ case -ECHILD: return "-ECHILD";
+ case -EDEADLK: return "-EDEADLK";
+ case -EDOM: return "-EDOM";
+ case -EEXIST: return "-EEXIST";
+ case -EFAULT: return "-EFAULT";
+ case -EFBIG: return "-EFBIG";
+ case -EILSEQ: return "-EILSEQ";
+ case -EINPROGRESS: return "-EINPROGRESS";
+ case -EINTR: return "-EINTR";
+ case -EINVAL: return "-EINVAL";
+ case -EIO: return "-EIO";
+ case -EISDIR: return "-EISDIR";
+ case -ELOOP: return "-ELOOP";
+ case -EMFILE: return "-EMFILE";
+ case -EMLINK: return "-EMLINK";
+ case -EMSGSIZE: return "-EMSGSIZE";
+ case -ENAMETOOLONG: return "-ENAMETOOLONG";
+ case -ENFILE: return "-ENFILE";
+ case -ENODATA: return "-ENODATA";
+ case -ENODEV: return "-ENODEV";
+ case -ENOENT: return "-ENOENT";
+ case -ENOEXEC: return "-ENOEXEC";
+ case -ENOLINK: return "-ENOLINK";
+ case -ENOMEM: return "-ENOMEM";
+ case -ENOSPC: return "-ENOSPC";
+ case -ENOTBLK: return "-ENOTBLK";
+ case -ENOTDIR: return "-ENOTDIR";
+ case -ENOTSUPP: return "-ENOTSUPP";
+ case -ENOTTY: return "-ENOTTY";
+ case -ENXIO: return "-ENXIO";
+ case -EOPNOTSUPP: return "-EOPNOTSUPP";
+ case -EOVERFLOW: return "-EOVERFLOW";
+ case -EPERM: return "-EPERM";
+ case -EPIPE: return "-EPIPE";
+ case -EPROTO: return "-EPROTO";
+ case -EPROTONOSUPPORT: return "-EPROTONOSUPPORT";
+ case -ERANGE: return "-ERANGE";
+ case -EROFS: return "-EROFS";
+ case -ESPIPE: return "-ESPIPE";
+ case -ESRCH: return "-ESRCH";
+ case -ETXTBSY: return "-ETXTBSY";
+ case -EUCLEAN: return "-EUCLEAN";
+ case -EXDEV: return "-EXDEV";
+ default:
+ snprintf(buf, sizeof(buf), "%d", err);
+ return buf;
+ }
+}
+
+static inline __u32 get_unaligned_be32(const void *p)
+{
+ __be32 val;
+
+ memcpy(&val, p, sizeof(val));
+ return be32_to_cpu(val);
+}
+
+static inline void put_unaligned_be32(__u32 val, void *p)
+{
+ __be32 be_val = cpu_to_be32(val);
+
+ memcpy(p, &be_val, sizeof(be_val));
+}
+
+#define SHA256_BLOCK_LENGTH 64
+#define Ch(x, y, z) (((x) & (y)) ^ (~(x) & (z)))
+#define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+#define Sigma_0(x) (ror32((x), 2) ^ ror32((x), 13) ^ ror32((x), 22))
+#define Sigma_1(x) (ror32((x), 6) ^ ror32((x), 11) ^ ror32((x), 25))
+#define sigma_0(x) (ror32((x), 7) ^ ror32((x), 18) ^ ((x) >> 3))
+#define sigma_1(x) (ror32((x), 17) ^ ror32((x), 19) ^ ((x) >> 10))
+
+static const __u32 sha256_K[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
+ 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
+ 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
+ 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
+ 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
+ 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+
+#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) \
+ { \
+ __u32 tmp = h + Sigma_1(e) + Ch(e, f, g) + sha256_K[i] + w[i]; \
+ d += tmp; \
+ h = tmp + Sigma_0(a) + Maj(a, b, c); \
+ }
+
+static void sha256_blocks(__u32 state[8], const __u8 *data, size_t nblocks)
+{
+ while (nblocks--) {
+ __u32 a = state[0];
+ __u32 b = state[1];
+ __u32 c = state[2];
+ __u32 d = state[3];
+ __u32 e = state[4];
+ __u32 f = state[5];
+ __u32 g = state[6];
+ __u32 h = state[7];
+ __u32 w[64];
+ int i;
+
+ for (i = 0; i < 16; i++)
+ w[i] = get_unaligned_be32(&data[4 * i]);
+ for (; i < ARRAY_SIZE(w); i++)
+ w[i] = sigma_1(w[i - 2]) + w[i - 7] +
+ sigma_0(w[i - 15]) + w[i - 16];
+ for (i = 0; i < ARRAY_SIZE(w); i += 8) {
+ SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
+ SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
+ SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
+ SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
+ SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
+ SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
+ SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
+ SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
+ }
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
+ data += SHA256_BLOCK_LENGTH;
+ }
+}
+
+void libbpf_sha256(const void *data, size_t len, __u8 out[SHA256_DIGEST_LENGTH])
+{
+ __u32 state[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
+ 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
+ const __be64 bitcount = cpu_to_be64((__u64)len * 8);
+ __u8 final_data[2 * SHA256_BLOCK_LENGTH] = { 0 };
+ size_t final_len = len % SHA256_BLOCK_LENGTH;
+ int i;
+
+ sha256_blocks(state, data, len / SHA256_BLOCK_LENGTH);
+
+ memcpy(final_data, data + len - final_len, final_len);
+ final_data[final_len] = 0x80;
+ final_len = roundup(final_len + 9, SHA256_BLOCK_LENGTH);
+ memcpy(&final_data[final_len - 8], &bitcount, 8);
+
+ sha256_blocks(state, final_data, final_len / SHA256_BLOCK_LENGTH);
+
+ for (i = 0; i < ARRAY_SIZE(state); i++)
+ put_unaligned_be32(state[i], &out[4 * i]);
+}
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
index 28c58fb17250..99331e317dee 100644
--- a/tools/lib/bpf/libbpf_version.h
+++ b/tools/lib/bpf/libbpf_version.h
@@ -4,6 +4,6 @@
#define __LIBBPF_VERSION_H
#define LIBBPF_MAJOR_VERSION 1
-#define LIBBPF_MINOR_VERSION 6
+#define LIBBPF_MINOR_VERSION 7
#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index a469e5d4fee7..f4403e3cf994 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -25,7 +25,6 @@
#include "btf.h"
#include "libbpf_internal.h"
#include "strset.h"
-#include "str_error.h"
#define BTF_EXTERN_SEC ".extern"
@@ -2026,6 +2025,9 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
obj->sym_map[src_sym_idx] = dst_sec->sec_sym_idx;
return 0;
}
+
+ if (strcmp(src_sec->sec_name, JUMPTABLES_SEC) == 0)
+ goto add_sym;
}
if (sym_bind == STB_LOCAL)
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
index 2b83c98a1137..6eea5edba58a 100644
--- a/tools/lib/bpf/relo_core.c
+++ b/tools/lib/bpf/relo_core.c
@@ -64,7 +64,6 @@ enum libbpf_print_level {
#include "libbpf.h"
#include "bpf.h"
#include "btf.h"
-#include "str_error.h"
#include "libbpf_internal.h"
#endif
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 9702b70da444..00ec4837a06d 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -21,7 +21,6 @@
#include "libbpf.h"
#include "libbpf_internal.h"
#include "bpf.h"
-#include "str_error.h"
struct ring {
ring_buffer_sample_fn sample_cb;
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
index 4d5fa079b5d6..6a8f5c7a02eb 100644
--- a/tools/lib/bpf/skel_internal.h
+++ b/tools/lib/bpf/skel_internal.h
@@ -13,10 +13,15 @@
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/mman.h>
+#include <linux/keyctl.h>
#include <stdlib.h>
#include "bpf.h"
#endif
+#ifndef SHA256_DIGEST_LENGTH
+#define SHA256_DIGEST_LENGTH 32
+#endif
+
#ifndef __NR_bpf
# if defined(__mips__) && defined(_ABIO32)
# define __NR_bpf 4355
@@ -64,6 +69,11 @@ struct bpf_load_and_run_opts {
__u32 data_sz;
__u32 insns_sz;
const char *errstr;
+ void *signature;
+ __u32 signature_sz;
+ __s32 keyring_id;
+ void *excl_prog_hash;
+ __u32 excl_prog_hash_sz;
};
long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
@@ -220,14 +230,19 @@ static inline int skel_map_create(enum bpf_map_type map_type,
const char *map_name,
__u32 key_size,
__u32 value_size,
- __u32 max_entries)
+ __u32 max_entries,
+ const void *excl_prog_hash,
+ __u32 excl_prog_hash_sz)
{
- const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
+ const size_t attr_sz = offsetofend(union bpf_attr, excl_prog_hash_size);
union bpf_attr attr;
memset(&attr, 0, attr_sz);
attr.map_type = map_type;
+ attr.excl_prog_hash = (unsigned long) excl_prog_hash;
+ attr.excl_prog_hash_size = excl_prog_hash_sz;
+
strncpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size;
attr.value_size = value_size;
@@ -300,6 +315,35 @@ static inline int skel_link_create(int prog_fd, int target_fd,
return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
}
+static inline int skel_obj_get_info_by_fd(int fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, info);
+ __u8 sha[SHA256_DIGEST_LENGTH];
+ struct bpf_map_info info;
+ __u32 info_len = sizeof(info);
+ union bpf_attr attr;
+
+ memset(&info, 0, sizeof(info));
+ info.hash = (long) &sha;
+ info.hash_size = SHA256_DIGEST_LENGTH;
+
+ memset(&attr, 0, attr_sz);
+ attr.info.bpf_fd = fd;
+ attr.info.info = (long) &info;
+ attr.info.info_len = info_len;
+ return skel_sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
+}
+
+static inline int skel_map_freeze(int fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_sz);
+ attr.map_fd = fd;
+
+ return skel_sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
+}
#ifdef __KERNEL__
#define set_err
#else
@@ -308,12 +352,13 @@ static inline int skel_link_create(int prog_fd, int target_fd,
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
{
- const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array);
+ const size_t prog_load_attr_sz = offsetofend(union bpf_attr, keyring_id);
const size_t test_run_attr_sz = offsetofend(union bpf_attr, test);
int map_fd = -1, prog_fd = -1, key = 0, err;
union bpf_attr attr;
- err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
+ err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1,
+ opts->excl_prog_hash, opts->excl_prog_hash_sz);
if (map_fd < 0) {
opts->errstr = "failed to create loader map";
set_err;
@@ -327,11 +372,34 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
goto out;
}
+#ifndef __KERNEL__
+ err = skel_map_freeze(map_fd);
+ if (err < 0) {
+ opts->errstr = "failed to freeze map";
+ set_err;
+ goto out;
+ }
+ err = skel_obj_get_info_by_fd(map_fd);
+ if (err < 0) {
+ opts->errstr = "failed to fetch obj info";
+ set_err;
+ goto out;
+ }
+#endif
+
memset(&attr, 0, prog_load_attr_sz);
attr.prog_type = BPF_PROG_TYPE_SYSCALL;
attr.insns = (long) opts->insns;
attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
attr.license = (long) "Dual BSD/GPL";
+#ifndef __KERNEL__
+ attr.signature = (long) opts->signature;
+ attr.signature_size = opts->signature_sz;
+#else
+ if (opts->signature || opts->signature_sz)
+ pr_warn("signatures are not supported from bpf_preload\n");
+#endif
+ attr.keyring_id = opts->keyring_id;
memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
attr.fd_array = (long) &map_fd;
attr.log_level = opts->ctx->log_level;
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
deleted file mode 100644
index 9a541762f54c..000000000000
--- a/tools/lib/bpf/str_error.c
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-#undef _GNU_SOURCE
-#include <string.h>
-#include <stdio.h>
-#include <errno.h>
-#include "str_error.h"
-
-#ifndef ENOTSUPP
-#define ENOTSUPP 524
-#endif
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
-/*
- * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl
- * libc, while checking strerror_r() return to avoid having to check this in
- * all places calling it.
- */
-char *libbpf_strerror_r(int err, char *dst, int len)
-{
- int ret = strerror_r(err < 0 ? -err : err, dst, len);
- /* on glibc <2.13, ret == -1 and errno is set, if strerror_r() can't
- * handle the error, on glibc >=2.13 *positive* (errno-like) error
- * code is returned directly
- */
- if (ret == -1)
- ret = errno;
- if (ret) {
- if (ret == EINVAL)
- /* strerror_r() doesn't recognize this specific error */
- snprintf(dst, len, "unknown error (%d)", err < 0 ? err : -err);
- else
- snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
- }
- return dst;
-}
-
-const char *libbpf_errstr(int err)
-{
- static __thread char buf[12];
-
- if (err > 0)
- err = -err;
-
- switch (err) {
- case -E2BIG: return "-E2BIG";
- case -EACCES: return "-EACCES";
- case -EADDRINUSE: return "-EADDRINUSE";
- case -EADDRNOTAVAIL: return "-EADDRNOTAVAIL";
- case -EAGAIN: return "-EAGAIN";
- case -EALREADY: return "-EALREADY";
- case -EBADF: return "-EBADF";
- case -EBADFD: return "-EBADFD";
- case -EBUSY: return "-EBUSY";
- case -ECANCELED: return "-ECANCELED";
- case -ECHILD: return "-ECHILD";
- case -EDEADLK: return "-EDEADLK";
- case -EDOM: return "-EDOM";
- case -EEXIST: return "-EEXIST";
- case -EFAULT: return "-EFAULT";
- case -EFBIG: return "-EFBIG";
- case -EILSEQ: return "-EILSEQ";
- case -EINPROGRESS: return "-EINPROGRESS";
- case -EINTR: return "-EINTR";
- case -EINVAL: return "-EINVAL";
- case -EIO: return "-EIO";
- case -EISDIR: return "-EISDIR";
- case -ELOOP: return "-ELOOP";
- case -EMFILE: return "-EMFILE";
- case -EMLINK: return "-EMLINK";
- case -EMSGSIZE: return "-EMSGSIZE";
- case -ENAMETOOLONG: return "-ENAMETOOLONG";
- case -ENFILE: return "-ENFILE";
- case -ENODATA: return "-ENODATA";
- case -ENODEV: return "-ENODEV";
- case -ENOENT: return "-ENOENT";
- case -ENOEXEC: return "-ENOEXEC";
- case -ENOLINK: return "-ENOLINK";
- case -ENOMEM: return "-ENOMEM";
- case -ENOSPC: return "-ENOSPC";
- case -ENOTBLK: return "-ENOTBLK";
- case -ENOTDIR: return "-ENOTDIR";
- case -ENOTSUPP: return "-ENOTSUPP";
- case -ENOTTY: return "-ENOTTY";
- case -ENXIO: return "-ENXIO";
- case -EOPNOTSUPP: return "-EOPNOTSUPP";
- case -EOVERFLOW: return "-EOVERFLOW";
- case -EPERM: return "-EPERM";
- case -EPIPE: return "-EPIPE";
- case -EPROTO: return "-EPROTO";
- case -EPROTONOSUPPORT: return "-EPROTONOSUPPORT";
- case -ERANGE: return "-ERANGE";
- case -EROFS: return "-EROFS";
- case -ESPIPE: return "-ESPIPE";
- case -ESRCH: return "-ESRCH";
- case -ETXTBSY: return "-ETXTBSY";
- case -EUCLEAN: return "-EUCLEAN";
- case -EXDEV: return "-EXDEV";
- default:
- snprintf(buf, sizeof(buf), "%d", err);
- return buf;
- }
-}
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
deleted file mode 100644
index 53e7fbffc13e..000000000000
--- a/tools/lib/bpf/str_error.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-#ifndef __LIBBPF_STR_ERROR_H
-#define __LIBBPF_STR_ERROR_H
-
-#define STRERR_BUFSIZE 128
-
-char *libbpf_strerror_r(int err, char *dst, int len);
-
-/**
- * @brief **libbpf_errstr()** returns string corresponding to numeric errno
- * @param err negative numeric errno
- * @return pointer to string representation of the errno, that is invalidated
- * upon the next call.
- */
-const char *libbpf_errstr(int err);
-
-#define errstr(err) libbpf_errstr(err)
-
-#endif /* __LIBBPF_STR_ERROR_H */
diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h
index 2a7865c8e3fe..43deb05a5197 100644
--- a/tools/lib/bpf/usdt.bpf.h
+++ b/tools/lib/bpf/usdt.bpf.h
@@ -34,13 +34,32 @@ enum __bpf_usdt_arg_type {
BPF_USDT_ARG_CONST,
BPF_USDT_ARG_REG,
BPF_USDT_ARG_REG_DEREF,
+ BPF_USDT_ARG_SIB,
};
+/*
+ * This struct layout is designed specifically to be backwards/forward
+ * compatible between libbpf versions for ARG_CONST, ARG_REG, and
+ * ARG_REG_DEREF modes. ARG_SIB requires libbpf v1.7+.
+ */
struct __bpf_usdt_arg_spec {
/* u64 scalar interpreted depending on arg_type, see below */
__u64 val_off;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
/* arg location case, see bpf_usdt_arg() for details */
- enum __bpf_usdt_arg_type arg_type;
+ enum __bpf_usdt_arg_type arg_type: 8;
+ /* index register offset within struct pt_regs */
+ __u16 idx_reg_off: 12;
+ /* scale factor for index register (1, 2, 4, or 8) */
+ __u16 scale_bitshift: 4;
+ /* reserved for future use, keeps reg_off offset stable */
+ __u8 __reserved: 8;
+#else
+ __u8 __reserved: 8;
+ __u16 idx_reg_off: 12;
+ __u16 scale_bitshift: 4;
+ enum __bpf_usdt_arg_type arg_type: 8;
+#endif
/* offset of referenced register within struct pt_regs */
short reg_off;
/* whether arg should be interpreted as signed value */
@@ -149,7 +168,7 @@ int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
{
struct __bpf_usdt_spec *spec;
struct __bpf_usdt_arg_spec *arg_spec;
- unsigned long val;
+ unsigned long val, idx;
int err, spec_id;
*res = 0;
@@ -204,6 +223,27 @@ int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
val >>= arg_spec->arg_bitshift;
#endif
break;
+ case BPF_USDT_ARG_SIB:
+ /* Arg is in memory addressed by SIB (Scale-Index-Base) mode
+ * (e.g., "-1@-96(%rbp,%rax,8)" in USDT arg spec). We first
+ * fetch the base register contents and the index register
+ * contents from pt_regs. Then we calculate the final address
+ * as base + (index * scale) + offset, and do a user-space
+ * probe read to fetch the argument value.
+ */
+ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
+ if (err)
+ return err;
+ err = bpf_probe_read_kernel(&idx, sizeof(idx), (void *)ctx + arg_spec->idx_reg_off);
+ if (err)
+ return err;
+ err = bpf_probe_read_user(&val, sizeof(val), (void *)(val + (idx << arg_spec->scale_bitshift) + arg_spec->val_off));
+ if (err)
+ return err;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ val >>= arg_spec->arg_bitshift;
+#endif
+ break;
default:
return -EINVAL;
}
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index 4e4a52742b01..d1524f6f54ae 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -20,7 +20,6 @@
#include "libbpf_common.h"
#include "libbpf_internal.h"
#include "hashmap.h"
-#include "str_error.h"
/* libbpf's USDT support consists of BPF-side state/code and user-space
* state/code working together in concert. BPF-side parts are defined in
@@ -59,7 +58,7 @@
*
* STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
*
- * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each
+ * USDT is identified by its <provider-name>:<probe-name> pair of names. Each
* individual USDT has a fixed number of arguments (3 in the above example)
* and specifies values of each argument as if it was a function call.
*
@@ -81,7 +80,7 @@
* NOP instruction that kernel can replace with an interrupt instruction to
* trigger instrumentation code (BPF program for all that we care about).
*
- * Semaphore above is and optional feature. It records an address of a 2-byte
+ * Semaphore above is an optional feature. It records an address of a 2-byte
* refcount variable (normally in '.probes' ELF section) used for signaling if
* there is anything that is attached to USDT. This is useful for user
* applications if, for example, they need to prepare some arguments that are
@@ -121,7 +120,7 @@
* a uprobe BPF program (which for kernel, at least currently, is just a kprobe
* program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
* that uprobe is usually attached at the function entry, while USDT will
- * normally will be somewhere inside the function. But it should always be
+ * normally be somewhere inside the function. But it should always be
* pointing to NOP instruction, which makes such uprobes the fastest uprobe
* kind.
*
@@ -151,7 +150,7 @@
* libbpf sets to spec ID during attach time, or, if kernel is too old to
* support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
* case. The latter means that some modes of operation can't be supported
- * without BPF cookie. Such mode is attaching to shared library "generically",
+ * without BPF cookie. Such a mode is attaching to shared library "generically",
* without specifying target process. In such case, it's impossible to
* calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
* is not supported without BPF cookie support.
@@ -185,7 +184,7 @@
* as even if USDT spec string is the same, USDT cookie value can be
* different. It was deemed excessive to try to deduplicate across independent
* USDT attachments by taking into account USDT spec string *and* USDT cookie
- * value, which would complicated spec ID accounting significantly for little
+ * value, which would complicate spec ID accounting significantly for little
* gain.
*/
@@ -200,12 +199,23 @@ enum usdt_arg_type {
USDT_ARG_CONST,
USDT_ARG_REG,
USDT_ARG_REG_DEREF,
+ USDT_ARG_SIB,
};
/* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
struct usdt_arg_spec {
__u64 val_off;
- enum usdt_arg_type arg_type;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ enum usdt_arg_type arg_type: 8;
+ __u16 idx_reg_off: 12;
+ __u16 scale_bitshift: 4;
+ __u8 __reserved: 8; /* keep reg_off offset stable */
+#else
+ __u8 __reserved: 8; /* keep reg_off offset stable */
+ __u16 idx_reg_off: 12;
+ __u16 scale_bitshift: 4;
+ enum usdt_arg_type arg_type: 8;
+#endif
short reg_off;
bool arg_signed;
char arg_bitshift;
@@ -570,9 +580,8 @@ static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long o
return NULL;
}
-static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
- const char *data, size_t name_off, size_t desc_off,
- struct usdt_note *usdt_note);
+static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off,
+ size_t desc_off, struct usdt_note *usdt_note);
static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
@@ -626,7 +635,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
struct elf_seg *seg = NULL;
void *tmp;
- err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, &note);
+ err = parse_usdt_note(&nhdr, data->d_buf, name_off, desc_off, &note);
if (err)
goto err_out;
@@ -1132,8 +1141,7 @@ err_out:
/* Parse out USDT ELF note from '.note.stapsdt' section.
* Logic inspired by perf's code.
*/
-static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
- const char *data, size_t name_off, size_t desc_off,
+static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
struct usdt_note *note)
{
const char *provider, *name, *args;
@@ -1283,11 +1291,51 @@ static int calc_pt_regs_off(const char *reg_name)
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
{
- char reg_name[16];
- int len, reg_off;
- long off;
+ char reg_name[16] = {0}, idx_reg_name[16] = {0};
+ int len, reg_off, idx_reg_off, scale = 1;
+ long off = 0;
+
+ if (sscanf(arg_str, " %d @ %ld ( %%%15[^,] , %%%15[^,] , %d ) %n",
+ arg_sz, &off, reg_name, idx_reg_name, &scale, &len) == 5 ||
+ sscanf(arg_str, " %d @ ( %%%15[^,] , %%%15[^,] , %d ) %n",
+ arg_sz, reg_name, idx_reg_name, &scale, &len) == 4 ||
+ sscanf(arg_str, " %d @ %ld ( %%%15[^,] , %%%15[^)] ) %n",
+ arg_sz, &off, reg_name, idx_reg_name, &len) == 4 ||
+ sscanf(arg_str, " %d @ ( %%%15[^,] , %%%15[^)] ) %n",
+ arg_sz, reg_name, idx_reg_name, &len) == 3
+ ) {
+ /*
+ * Scale Index Base case:
+ * 1@-96(%rbp,%rax,8)
+ * 1@(%rbp,%rax,8)
+ * 1@-96(%rbp,%rax)
+ * 1@(%rbp,%rax)
+ */
+ arg->arg_type = USDT_ARG_SIB;
+ arg->val_off = off;
- if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", arg_sz, &off, reg_name, &len) == 3) {
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+
+ idx_reg_off = calc_pt_regs_off(idx_reg_name);
+ if (idx_reg_off < 0)
+ return idx_reg_off;
+ arg->idx_reg_off = idx_reg_off;
+
+ /* validate scale factor and set fields directly */
+ switch (scale) {
+ case 1: arg->scale_bitshift = 0; break;
+ case 2: arg->scale_bitshift = 1; break;
+ case 4: arg->scale_bitshift = 2; break;
+ case 8: arg->scale_bitshift = 3; break;
+ default:
+ pr_warn("usdt: invalid SIB scale %d, expected 1, 2, 4, 8\n", scale);
+ return -EINVAL;
+ }
+ } else if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n",
+ arg_sz, &off, reg_name, &len) == 3) {
/* Memory dereference case, e.g., -4@-20(%rbp) */
arg->arg_type = USDT_ARG_REG_DEREF;
arg->val_off = off;
@@ -1306,6 +1354,7 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
} else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) {
/* Register read case, e.g., -4@%eax */
arg->arg_type = USDT_ARG_REG;
+ /* register read has no memory offset */
arg->val_off = 0;
reg_off = calc_pt_regs_off(reg_name);
@@ -1327,8 +1376,6 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
#elif defined(__s390x__)
-/* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
-
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
{
unsigned int reg;
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
index 59aabdd3cabf..4072bc9b7670 100644
--- a/tools/lib/perf/Documentation/libperf.txt
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -210,6 +210,7 @@ SYNOPSIS
struct perf_record_time_conv;
struct perf_record_header_feature;
struct perf_record_compressed;
+ struct perf_record_compressed2;
--
DESCRIPTION
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index 4454a5987570..4160e7d2e120 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -242,6 +242,16 @@ out:
return cpus;
}
+struct perf_cpu_map *perf_cpu_map__new_int(int cpu)
+{
+ struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
+
+ if (cpus)
+ RC_CHK_ACCESS(cpus)->map[0].cpu = cpu;
+
+ return cpus;
+}
+
static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
return RC_CHK_ACCESS(cpus)->nr;
@@ -358,10 +368,12 @@ struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
.cpu = -1
};
- // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
- return __perf_cpu_map__nr(map) > 0
- ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
- : result;
+ if (!map)
+ return result;
+
+ // The CPUs are always sorted and nr is always > 0 as 0 length map is
+ // encoded as NULL.
+ return __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1);
}
/** Is 'b' a subset of 'a'. */
@@ -443,21 +455,33 @@ int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
struct perf_cpu_map *other)
{
- struct perf_cpu *tmp_cpus;
- int tmp_len;
int i, j, k;
- struct perf_cpu_map *merged = NULL;
+ struct perf_cpu_map *merged;
if (perf_cpu_map__is_subset(other, orig))
return perf_cpu_map__get(orig);
if (perf_cpu_map__is_subset(orig, other))
return perf_cpu_map__get(other);
- tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
- tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
- if (!tmp_cpus)
+ i = j = k = 0;
+ while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
+ i++;
+ else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
+ j++;
+ else { /* CPUs match. */
+ i++;
+ j++;
+ k++;
+ }
+ }
+ if (k == 0) /* Maps are completely disjoint. */
return NULL;
+ merged = perf_cpu_map__alloc(k);
+ if (!merged)
+ return NULL;
+ /* Entries are added to merged in sorted order, so no need to sort again. */
i = j = k = 0;
while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
@@ -466,11 +490,8 @@ struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
j++;
else {
j++;
- tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
+ RC_CHK_ACCESS(merged)->map[k++] = __perf_cpu_map__cpu(orig, i++);
}
}
- if (k)
- merged = cpu_map__trim_new(k, tmp_cpus);
- free(tmp_cpus);
return merged;
}
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index b1f4c8176b32..3ed023f4b190 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -36,49 +36,88 @@ void perf_evlist__init(struct perf_evlist *evlist)
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
- if (evsel->system_wide) {
- /* System wide: set the cpu map of the evsel to all online CPUs. */
- perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__new_online_cpus();
- } else if (evlist->has_user_cpus && evsel->is_pmu_core) {
- /*
- * User requested CPUs on a core PMU, ensure the requested CPUs
- * are valid by intersecting with those of the PMU.
- */
+ if (perf_cpu_map__is_empty(evsel->cpus)) {
+ if (perf_cpu_map__is_empty(evsel->pmu_cpus)) {
+ /*
+ * Assume the unset PMU cpus were for a system-wide
+ * event, like a software or tracepoint.
+ */
+ evsel->pmu_cpus = perf_cpu_map__new_online_cpus();
+ }
+ if (evlist->has_user_cpus && !evsel->system_wide) {
+ /*
+ * Use the user CPUs unless the evsel is set to be
+ * system wide, such as the dummy event.
+ */
+ evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
+ } else {
+ /*
+ * System wide and other modes, assume the cpu map
+ * should be set to all PMU CPUs.
+ */
+ evsel->cpus = perf_cpu_map__get(evsel->pmu_cpus);
+ }
+ }
+ /*
+ * Avoid "any CPU"(-1) for uncore and PMUs that require a CPU, even if
+ * requested.
+ */
+ if (evsel->requires_cpu && perf_cpu_map__has_any_cpu(evsel->cpus)) {
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
+ evsel->cpus = perf_cpu_map__get(evsel->pmu_cpus);
+ }
- /*
- * Empty cpu lists would eventually get opened as "any" so remove
- * genuinely empty ones before they're opened in the wrong place.
- */
- if (perf_cpu_map__is_empty(evsel->cpus)) {
- struct perf_evsel *next = perf_evlist__next(evlist, evsel);
-
- perf_evlist__remove(evlist, evsel);
- /* Keep idx contiguous */
- if (next)
- list_for_each_entry_from(next, &evlist->entries, node)
- next->idx--;
+ /*
+ * Globally requested CPUs replace user requested unless the evsel is
+ * set to be system wide.
+ */
+ if (evlist->has_user_cpus && !evsel->system_wide) {
+ assert(!perf_cpu_map__has_any_cpu(evlist->user_requested_cpus));
+ if (!perf_cpu_map__equal(evsel->cpus, evlist->user_requested_cpus)) {
+ perf_cpu_map__put(evsel->cpus);
+ evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
}
- } else if (!evsel->own_cpus || evlist->has_user_cpus ||
- (!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
- /*
- * The PMU didn't specify a default cpu map, this isn't a core
- * event and the user requested CPUs or the evlist user
- * requested CPUs have the "any CPU" (aka dummy) CPU value. In
- * which case use the user requested CPUs rather than the PMU
- * ones.
- */
+ }
+
+ /* Ensure cpus only references valid PMU CPUs. */
+ if (!perf_cpu_map__has_any_cpu(evsel->cpus) &&
+ !perf_cpu_map__is_subset(evsel->pmu_cpus, evsel->cpus)) {
+ struct perf_cpu_map *tmp = perf_cpu_map__intersect(evsel->pmu_cpus, evsel->cpus);
+
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
- } else if (evsel->cpus != evsel->own_cpus) {
- /*
- * No user requested cpu map but the PMU cpu map doesn't match
- * the evsel's. Reset it back to the PMU cpu map.
- */
+ evsel->cpus = tmp;
+ }
+
+ /*
+ * Was event requested on all the PMU's CPUs but the user requested is
+ * any CPU (-1)? If so switch to using any CPU (-1) to reduce the number
+ * of events.
+ */
+ if (!evsel->system_wide &&
+ !evsel->requires_cpu &&
+ perf_cpu_map__equal(evsel->cpus, evsel->pmu_cpus) &&
+ perf_cpu_map__has_any_cpu(evlist->user_requested_cpus)) {
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
+ evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
+ }
+
+ /* Sanity check assert before the evsel is potentially removed. */
+ assert(!evsel->requires_cpu || !perf_cpu_map__has_any_cpu(evsel->cpus));
+
+ /*
+ * Empty cpu lists would eventually get opened as "any" so remove
+ * genuinely empty ones before they're opened in the wrong place.
+ */
+ if (perf_cpu_map__is_empty(evsel->cpus)) {
+ struct perf_evsel *next = perf_evlist__next(evlist, evsel);
+
+ perf_evlist__remove(evlist, evsel);
+ /* Keep idx contiguous */
+ if (next)
+ list_for_each_entry_from(next, &evlist->entries, node)
+ next->idx--;
+
+ return;
}
if (evsel->system_wide) {
@@ -98,6 +137,10 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
evlist->needs_map_propagation = true;
+ /* Clear the all_cpus set which will be merged into during propagation. */
+ perf_cpu_map__put(evlist->all_cpus);
+ evlist->all_cpus = NULL;
+
list_for_each_entry_safe(evsel, n, &evlist->entries, node)
__perf_evlist__propagate_maps(evlist, evsel);
}
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index c475319e2e41..13a307fc75ae 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -40,8 +40,19 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
return evsel;
}
+void perf_evsel__exit(struct perf_evsel *evsel)
+{
+ assert(evsel->fd == NULL); /* If not fds were not closed. */
+ assert(evsel->mmap == NULL); /* If not munmap wasn't called. */
+ assert(evsel->sample_id == NULL); /* If not free_id wasn't called. */
+ perf_cpu_map__put(evsel->cpus);
+ perf_cpu_map__put(evsel->pmu_cpus);
+ perf_thread_map__put(evsel->threads);
+}
+
void perf_evsel__delete(struct perf_evsel *evsel)
{
+ perf_evsel__exit(evsel);
free(evsel);
}
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
index ea78defa77d0..fefe64ba5e26 100644
--- a/tools/lib/perf/include/internal/evsel.h
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -99,7 +99,7 @@ struct perf_evsel {
* cpu map for opening the event on, for example, the first CPU on a
* socket for an uncore event.
*/
- struct perf_cpu_map *own_cpus;
+ struct perf_cpu_map *pmu_cpus;
struct perf_thread_map *threads;
struct xyarray *fd;
struct xyarray *mmap;
@@ -133,6 +133,7 @@ struct perf_evsel {
void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
int idx);
+void perf_evsel__exit(struct perf_evsel *evsel);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void perf_evsel__close_fd(struct perf_evsel *evsel);
void perf_evsel__free_fd(struct perf_evsel *evsel);
diff --git a/tools/lib/perf/include/perf/core.h b/tools/lib/perf/include/perf/core.h
index a3f6d68edad7..06cc132d88cf 100644
--- a/tools/lib/perf/include/perf/core.h
+++ b/tools/lib/perf/include/perf/core.h
@@ -5,7 +5,7 @@
#include <stdarg.h>
#ifndef LIBPERF_API
-#define LIBPERF_API __attribute__((visibility("default")))
+#define LIBPERF_API extern __attribute__((visibility("default")))
#endif
enum libperf_print_level {
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 8c1ab0f9194e..58cc5c5fa47c 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -37,6 +37,8 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_online_cpus(void);
* perf_cpu_map__new_online_cpus is returned.
*/
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
+/** perf_cpu_map__new_int - create a map with the one given cpu. */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_int(int cpu);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
LIBPERF_API int perf_cpu_map__merge(struct perf_cpu_map **orig,
struct perf_cpu_map *other);
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 37bb7771d914..43a8cb04994f 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -151,6 +151,18 @@ struct perf_record_switch {
__u32 next_prev_tid;
};
+struct perf_record_callchain_deferred {
+ struct perf_event_header header;
+ /*
+ * This is to match kernel and (deferred) user stacks together.
+ * The kernel part will be in the sample callchain array after
+ * the PERF_CONTEXT_USER_DEFERRED entry.
+ */
+ __u64 cookie;
+ __u64 nr;
+ __u64 ips[];
+};
+
struct perf_record_header_attr {
struct perf_event_header header;
struct perf_event_attr attr;
@@ -291,6 +303,7 @@ struct perf_record_header_event_type {
struct perf_record_header_tracing_data {
struct perf_event_header header;
__u32 size;
+ __u32 pad;
};
#define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15)
@@ -457,6 +470,32 @@ struct perf_record_compressed {
char data[];
};
+/*
+ * `header.size` includes the padding we are going to add while writing the record.
+ * `data_size` only includes the size of `data[]` itself.
+ */
+struct perf_record_compressed2 {
+ struct perf_event_header header;
+ __u64 data_size;
+ char data[];
+};
+
+#define BPF_METADATA_KEY_LEN 64
+#define BPF_METADATA_VALUE_LEN 256
+#define BPF_PROG_NAME_LEN KSYM_NAME_LEN
+
+struct perf_record_bpf_metadata_entry {
+ char key[BPF_METADATA_KEY_LEN];
+ char value[BPF_METADATA_VALUE_LEN];
+};
+
+struct perf_record_bpf_metadata {
+ struct perf_event_header header;
+ char prog_name[BPF_PROG_NAME_LEN];
+ __u64 nr_entries;
+ struct perf_record_bpf_metadata_entry entries[];
+};
+
enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_USER_TYPE_START = 64,
PERF_RECORD_HEADER_ATTR = 64,
@@ -478,6 +517,8 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_HEADER_FEATURE = 80,
PERF_RECORD_COMPRESSED = 81,
PERF_RECORD_FINISHED_INIT = 82,
+ PERF_RECORD_COMPRESSED2 = 83,
+ PERF_RECORD_BPF_METADATA = 84,
PERF_RECORD_HEADER_MAX
};
@@ -494,6 +535,7 @@ union perf_event {
struct perf_record_read read;
struct perf_record_throttle throttle;
struct perf_record_sample sample;
+ struct perf_record_callchain_deferred callchain_deferred;
struct perf_record_bpf_event bpf;
struct perf_record_ksymbol ksymbol;
struct perf_record_text_poke_event text_poke;
@@ -518,6 +560,8 @@ union perf_event {
struct perf_record_time_conv time_conv;
struct perf_record_header_feature feat;
struct perf_record_compressed pack;
+ struct perf_record_compressed2 pack2;
+ struct perf_record_bpf_metadata bpf_metadata;
};
#endif /* __LIBPERF_EVENT_H */
diff --git a/tools/lib/perf/include/perf/threadmap.h b/tools/lib/perf/include/perf/threadmap.h
index 8b40e7777cea..44deb815b817 100644
--- a/tools/lib/perf/include/perf/threadmap.h
+++ b/tools/lib/perf/include/perf/threadmap.h
@@ -14,6 +14,7 @@ LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int idx,
LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int idx);
LIBPERF_API int perf_thread_map__nr(struct perf_thread_map *threads);
LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx);
+LIBPERF_API int perf_thread_map__idx(struct perf_thread_map *map, pid_t pid);
LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map);
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index c1a51d925e0e..ec124eb0ec0a 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -508,7 +508,7 @@ int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count
idx = READ_ONCE(pc->index);
cnt = READ_ONCE(pc->offset);
if (pc->cap_user_rdpmc && idx) {
- s64 evcnt = read_perf_counter(idx - 1);
+ u64 evcnt = read_perf_counter(idx - 1);
u16 width = READ_ONCE(pc->pmc_width);
evcnt <<= 64 - width;
diff --git a/tools/lib/perf/threadmap.c b/tools/lib/perf/threadmap.c
index 07968f3ea093..db431b036f57 100644
--- a/tools/lib/perf/threadmap.c
+++ b/tools/lib/perf/threadmap.c
@@ -97,5 +97,22 @@ int perf_thread_map__nr(struct perf_thread_map *threads)
pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx)
{
+ if (!map) {
+ assert(idx == 0);
+ return -1;
+ }
+
return map->map[idx].pid;
}
+
+int perf_thread_map__idx(struct perf_thread_map *threads, pid_t pid)
+{
+ if (!threads)
+ return pid == -1 ? 0 : -1;
+
+ for (int i = 0; i < threads->nr; ++i) {
+ if (threads->map[i].pid == pid)
+ return i;
+ }
+ return -1;
+}
diff --git a/tools/lib/python/__init__.py b/tools/lib/python/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/lib/python/__init__.py
diff --git a/tools/lib/python/abi/__init__.py b/tools/lib/python/abi/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/lib/python/abi/__init__.py
diff --git a/tools/lib/python/abi/abi_parser.py b/tools/lib/python/abi/abi_parser.py
new file mode 100644
index 000000000000..9b8db70067ef
--- /dev/null
+++ b/tools/lib/python/abi/abi_parser.py
@@ -0,0 +1,628 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0903,R0911,R0912,R0913,R0914,R0915,R0917,C0302
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+from argparse import Namespace
+import logging
+import os
+import re
+
+from pprint import pformat
+from random import randrange, seed
+
+# Import Python modules
+
+from abi.helpers import AbiDebug, ABI_DIR
+
+
+class AbiParser:
+ """Main class to parse ABI files"""
+
+ TAGS = r"(what|where|date|kernelversion|contact|description|users)"
+ XREF = r"(?:^|\s|\()(\/(?:sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)(?:[,.:;\)\s]|\Z)"
+
+ def __init__(self, directory, logger=None,
+ enable_lineno=False, show_warnings=True, debug=0):
+ """Stores arguments for the class and initialize class vars"""
+
+ self.directory = directory
+ self.enable_lineno = enable_lineno
+ self.show_warnings = show_warnings
+ self.debug = debug
+
+ if not logger:
+ self.log = logging.getLogger("get_abi")
+ else:
+ self.log = logger
+
+ self.data = {}
+ self.what_symbols = {}
+ self.file_refs = {}
+ self.what_refs = {}
+
+ # Ignore files that contain such suffixes
+ self.ignore_suffixes = (".rej", ".org", ".orig", ".bak", "~")
+
+ # Regular expressions used on parser
+ self.re_abi_dir = re.compile(r"(.*)" + ABI_DIR)
+ self.re_tag = re.compile(r"(\S+)(:\s*)(.*)", re.I)
+ self.re_valid = re.compile(self.TAGS)
+ self.re_start_spc = re.compile(r"(\s*)(\S.*)")
+ self.re_whitespace = re.compile(r"^\s+")
+
+ # Regular used on print
+ self.re_what = re.compile(r"(\/?(?:[\w\-]+\/?){1,2})")
+ self.re_escape = re.compile(r"([\.\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])")
+ self.re_unprintable = re.compile(r"([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff]+)")
+ self.re_title_mark = re.compile(r"\n[\-\*\=\^\~]+\n")
+ self.re_doc = re.compile(r"Documentation/(?!devicetree)(\S+)\.rst")
+ self.re_abi = re.compile(r"(Documentation/ABI/)([\w\/\-]+)")
+ self.re_xref_node = re.compile(self.XREF)
+
+ def warn(self, fdata, msg, extra=None):
+ """Displays a parse error if warning is enabled"""
+
+ if not self.show_warnings:
+ return
+
+ msg = f"{fdata.fname}:{fdata.ln}: {msg}"
+ if extra:
+ msg += "\n\t\t" + extra
+
+ self.log.warning(msg)
+
+ def add_symbol(self, what, fname, ln=None, xref=None):
+ """Create a reference table describing where each 'what' is located"""
+
+ if what not in self.what_symbols:
+ self.what_symbols[what] = {"file": {}}
+
+ if fname not in self.what_symbols[what]["file"]:
+ self.what_symbols[what]["file"][fname] = []
+
+ if ln and ln not in self.what_symbols[what]["file"][fname]:
+ self.what_symbols[what]["file"][fname].append(ln)
+
+ if xref:
+ self.what_symbols[what]["xref"] = xref
+
+ def _parse_line(self, fdata, line):
+ """Parse a single line of an ABI file"""
+
+ new_what = False
+ new_tag = False
+ content = None
+
+ match = self.re_tag.match(line)
+ if match:
+ new = match.group(1).lower()
+ sep = match.group(2)
+ content = match.group(3)
+
+ match = self.re_valid.search(new)
+ if match:
+ new_tag = match.group(1)
+ else:
+ if fdata.tag == "description":
+ # New "tag" is actually part of description.
+ # Don't consider it a tag
+ new_tag = False
+ elif fdata.tag != "":
+ self.warn(fdata, f"tag '{fdata.tag}' is invalid", line)
+
+ if new_tag:
+ # "where" is Invalid, but was a common mistake. Warn if found
+ if new_tag == "where":
+ self.warn(fdata, "tag 'Where' is invalid. Should be 'What:' instead")
+ new_tag = "what"
+
+ if new_tag == "what":
+ fdata.space = None
+
+ if content not in self.what_symbols:
+ self.add_symbol(what=content, fname=fdata.fname, ln=fdata.ln)
+
+ if fdata.tag == "what":
+ fdata.what.append(content.strip("\n"))
+ else:
+ if fdata.key:
+ if "description" not in self.data.get(fdata.key, {}):
+ self.warn(fdata, f"{fdata.key} doesn't have a description")
+
+ for w in fdata.what:
+ self.add_symbol(what=w, fname=fdata.fname,
+ ln=fdata.what_ln, xref=fdata.key)
+
+ fdata.label = content
+ new_what = True
+
+ key = "abi_" + content.lower()
+ fdata.key = self.re_unprintable.sub("_", key).strip("_")
+
+ # Avoid duplicated keys but using a defined seed, to make
+ # the namespace identical if there aren't changes at the
+ # ABI symbols
+ seed(42)
+
+ while fdata.key in self.data:
+ char = randrange(0, 51) + ord("A")
+ if char > ord("Z"):
+ char += ord("a") - ord("Z") - 1
+
+ fdata.key += chr(char)
+
+ if fdata.key and fdata.key not in self.data:
+ self.data[fdata.key] = {
+ "what": [content],
+ "file": [fdata.file_ref],
+ "path": fdata.ftype,
+ "line_no": fdata.ln,
+ }
+
+ fdata.what = self.data[fdata.key]["what"]
+
+ self.what_refs[content] = fdata.key
+ fdata.tag = new_tag
+ fdata.what_ln = fdata.ln
+
+ if fdata.nametag["what"]:
+ t = (content, fdata.key)
+ if t not in fdata.nametag["symbols"]:
+ fdata.nametag["symbols"].append(t)
+
+ return
+
+ if fdata.tag and new_tag:
+ fdata.tag = new_tag
+
+ if new_what:
+ fdata.label = ""
+
+ if "description" in self.data[fdata.key]:
+ self.data[fdata.key]["description"] += "\n\n"
+
+ if fdata.file_ref not in self.data[fdata.key]["file"]:
+ self.data[fdata.key]["file"].append(fdata.file_ref)
+
+ if self.debug == AbiDebug.WHAT_PARSING:
+ self.log.debug("what: %s", fdata.what)
+
+ if not fdata.what:
+ self.warn(fdata, "'What:' should come first:", line)
+ return
+
+ if new_tag == "description":
+ fdata.space = None
+
+ if content:
+ sep = sep.replace(":", " ")
+
+ c = " " * len(new_tag) + sep + content
+ c = c.expandtabs()
+
+ match = self.re_start_spc.match(c)
+ if match:
+ # Preserve initial spaces for the first line
+ fdata.space = match.group(1)
+ content = match.group(2) + "\n"
+
+ self.data[fdata.key][fdata.tag] = content
+
+ return
+
+ # Store any contents before tags at the database
+ if not fdata.tag and "what" in fdata.nametag:
+ fdata.nametag["description"] += line
+ return
+
+ if fdata.tag == "description":
+ content = line.expandtabs()
+
+ if self.re_whitespace.sub("", content) == "":
+ self.data[fdata.key][fdata.tag] += "\n"
+ return
+
+ if fdata.space is None:
+ match = self.re_start_spc.match(content)
+ if match:
+ # Preserve initial spaces for the first line
+ fdata.space = match.group(1)
+
+ content = match.group(2) + "\n"
+ else:
+ if content.startswith(fdata.space):
+ content = content[len(fdata.space):]
+
+ else:
+ fdata.space = ""
+
+ if fdata.tag == "what":
+ w = content.strip("\n")
+ if w:
+ self.data[fdata.key][fdata.tag].append(w)
+ else:
+ self.data[fdata.key][fdata.tag] += content
+ return
+
+ content = line.strip()
+ if fdata.tag:
+ if fdata.tag == "what":
+ w = content.strip("\n")
+ if w:
+ self.data[fdata.key][fdata.tag].append(w)
+ else:
+ self.data[fdata.key][fdata.tag] += "\n" + content.rstrip("\n")
+ return
+
+ # Everything else is error
+ if content:
+ self.warn(fdata, "Unexpected content", line)
+
+ def parse_readme(self, nametag, fname):
+ """Parse ABI README file"""
+
+ nametag["what"] = ["Introduction"]
+ nametag["path"] = "README"
+ with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp:
+ for line in fp:
+ match = self.re_tag.match(line)
+ if match:
+ new = match.group(1).lower()
+
+ match = self.re_valid.search(new)
+ if match:
+ nametag["description"] += "\n:" + line
+ continue
+
+ nametag["description"] += line
+
+ def parse_file(self, fname, path, basename):
+ """Parse a single file"""
+
+ ref = f"abi_file_{path}_{basename}"
+ ref = self.re_unprintable.sub("_", ref).strip("_")
+
+ # Store per-file state into a namespace variable. This will be used
+ # by the per-line parser state machine and by the warning function.
+ fdata = Namespace
+
+ fdata.fname = fname
+ fdata.name = basename
+
+ pos = fname.find(ABI_DIR)
+ if pos > 0:
+ f = fname[pos:]
+ else:
+ f = fname
+
+ fdata.file_ref = (f, ref)
+ self.file_refs[f] = ref
+
+ fdata.ln = 0
+ fdata.what_ln = 0
+ fdata.tag = ""
+ fdata.label = ""
+ fdata.what = []
+ fdata.key = None
+ fdata.xrefs = None
+ fdata.space = None
+ fdata.ftype = path.split("/")[0]
+
+ fdata.nametag = {}
+ fdata.nametag["what"] = [f"ABI file {path}/{basename}"]
+ fdata.nametag["type"] = "File"
+ fdata.nametag["path"] = fdata.ftype
+ fdata.nametag["file"] = [fdata.file_ref]
+ fdata.nametag["line_no"] = 1
+ fdata.nametag["description"] = ""
+ fdata.nametag["symbols"] = []
+
+ self.data[ref] = fdata.nametag
+
+ if self.debug & AbiDebug.WHAT_OPEN:
+ self.log.debug("Opening file %s", fname)
+
+ if basename == "README":
+ self.parse_readme(fdata.nametag, fname)
+ return
+
+ with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp:
+ for line in fp:
+ fdata.ln += 1
+
+ self._parse_line(fdata, line)
+
+ if "description" in fdata.nametag:
+ fdata.nametag["description"] = fdata.nametag["description"].lstrip("\n")
+
+ if fdata.key:
+ if "description" not in self.data.get(fdata.key, {}):
+ self.warn(fdata, f"{fdata.key} doesn't have a description")
+
+ for w in fdata.what:
+ self.add_symbol(what=w, fname=fname, xref=fdata.key)
+
+ def _parse_abi(self, root=None):
+ """Internal function to parse documentation ABI recursively"""
+
+ if not root:
+ root = self.directory
+
+ with os.scandir(root) as obj:
+ for entry in obj:
+ name = os.path.join(root, entry.name)
+
+ if entry.is_dir():
+ self._parse_abi(name)
+ continue
+
+ if not entry.is_file():
+ continue
+
+ basename = os.path.basename(name)
+
+ if basename.startswith("."):
+ continue
+
+ if basename.endswith(self.ignore_suffixes):
+ continue
+
+ path = self.re_abi_dir.sub("", os.path.dirname(name))
+
+ self.parse_file(name, path, basename)
+
+ def parse_abi(self, root=None):
+ """Parse documentation ABI"""
+
+ self._parse_abi(root)
+
+ if self.debug & AbiDebug.DUMP_ABI_STRUCTS:
+ self.log.debug(pformat(self.data))
+
+ def desc_txt(self, desc):
+ """Print description as found inside ABI files"""
+
+ desc = desc.strip(" \t\n")
+
+ return desc + "\n\n"
+
+ def xref(self, fname):
+ """
+ Converts a Documentation/ABI + basename into a ReST cross-reference
+ """
+
+ xref = self.file_refs.get(fname)
+ if not xref:
+ return None
+ else:
+ return xref
+
+ def desc_rst(self, desc):
+ """Enrich ReST output by creating cross-references"""
+
+ # Remove title markups from the description
+ # Having titles inside ABI files will only work if extra
+ # care would be taken in order to strictly follow the same
+ # level order for each markup.
+ desc = self.re_title_mark.sub("\n\n", "\n" + desc)
+ desc = desc.rstrip(" \t\n").lstrip("\n")
+
+ # Python's regex performance for non-compiled expressions is a lot
+ # than Perl, as Perl automatically caches them at their
+ # first usage. Here, we'll need to do the same, as otherwise the
+ # performance penalty is be high
+
+ new_desc = ""
+ for d in desc.split("\n"):
+ if d == "":
+ new_desc += "\n"
+ continue
+
+ # Use cross-references for doc files where needed
+ d = self.re_doc.sub(r":doc:`/\1`", d)
+
+ # Use cross-references for ABI generated docs where needed
+ matches = self.re_abi.findall(d)
+ for m in matches:
+ abi = m[0] + m[1]
+
+ xref = self.file_refs.get(abi)
+ if not xref:
+ # This may happen if ABI is on a separate directory,
+ # like parsing ABI testing and symbol is at stable.
+ # The proper solution is to move this part of the code
+ # for it to be inside sphinx/kernel_abi.py
+ self.log.info("Didn't find ABI reference for '%s'", abi)
+ else:
+ new = self.re_escape.sub(r"\\\1", m[1])
+ d = re.sub(fr"\b{abi}\b", f":ref:`{new} <{xref}>`", d)
+
+ # Seek for cross reference symbols like /sys/...
+ # Need to be careful to avoid doing it on a code block
+ if d[0] not in [" ", "\t"]:
+ matches = self.re_xref_node.findall(d)
+ for m in matches:
+ # Finding ABI here is more complex due to wildcards
+ xref = self.what_refs.get(m)
+ if xref:
+ new = self.re_escape.sub(r"\\\1", m)
+ d = re.sub(fr"\b{m}\b", f":ref:`{new} <{xref}>`", d)
+
+ new_desc += d + "\n"
+
+ return new_desc + "\n\n"
+
+ def doc(self, output_in_txt=False, show_symbols=True, show_file=True,
+ filter_path=None):
+ """Print ABI at stdout"""
+
+ part = None
+ for key, v in sorted(self.data.items(),
+ key=lambda x: (x[1].get("type", ""),
+ x[1].get("what"))):
+
+ wtype = v.get("type", "Symbol")
+ file_ref = v.get("file")
+ names = v.get("what", [""])
+
+ if wtype == "File":
+ if not show_file:
+ continue
+ else:
+ if not show_symbols:
+ continue
+
+ if filter_path:
+ if v.get("path") != filter_path:
+ continue
+
+ msg = ""
+
+ if wtype != "File":
+ cur_part = names[0]
+ if cur_part.find("/") >= 0:
+ match = self.re_what.match(cur_part)
+ if match:
+ symbol = match.group(1).rstrip("/")
+ cur_part = "Symbols under " + symbol
+
+ if cur_part and cur_part != part:
+ part = cur_part
+ msg += part + "\n"+ "-" * len(part) +"\n\n"
+
+ msg += f".. _{key}:\n\n"
+
+ max_len = 0
+ for i in range(0, len(names)): # pylint: disable=C0200
+ names[i] = "**" + self.re_escape.sub(r"\\\1", names[i]) + "**"
+
+ max_len = max(max_len, len(names[i]))
+
+ msg += "+-" + "-" * max_len + "-+\n"
+ for name in names:
+ msg += f"| {name}" + " " * (max_len - len(name)) + " |\n"
+ msg += "+-" + "-" * max_len + "-+\n"
+ msg += "\n"
+
+ for ref in file_ref:
+ if wtype == "File":
+ msg += f".. _{ref[1]}:\n\n"
+ else:
+ base = os.path.basename(ref[0])
+ msg += f"Defined on file :ref:`{base} <{ref[1]}>`\n\n"
+
+ if wtype == "File":
+ msg += names[0] +"\n" + "-" * len(names[0]) +"\n\n"
+
+ desc = v.get("description")
+ if not desc and wtype != "File":
+ msg += f"DESCRIPTION MISSING for {names[0]}\n\n"
+
+ if desc:
+ if output_in_txt:
+ msg += self.desc_txt(desc)
+ else:
+ msg += self.desc_rst(desc)
+
+ symbols = v.get("symbols")
+ if symbols:
+ msg += "Has the following ABI:\n\n"
+
+ for w, label in symbols:
+ # Escape special chars from content
+ content = self.re_escape.sub(r"\\\1", w)
+
+ msg += f"- :ref:`{content} <{label}>`\n\n"
+
+ users = v.get("users")
+ if users and users.strip(" \t\n"):
+ users = users.strip("\n").replace('\n', '\n\t')
+ msg += f"Users:\n\t{users}\n\n"
+
+ ln = v.get("line_no", 1)
+
+ yield (msg, file_ref[0][0], ln)
+
+ def check_issues(self):
+ """Warn about duplicated ABI entries"""
+
+ for what, v in self.what_symbols.items():
+ files = v.get("file")
+ if not files:
+ # Should never happen if the parser works properly
+ self.log.warning("%s doesn't have a file associated", what)
+ continue
+
+ if len(files) == 1:
+ continue
+
+ f = []
+ for fname, lines in sorted(files.items()):
+ if not lines:
+ f.append(f"{fname}")
+ elif len(lines) == 1:
+ f.append(f"{fname}:{lines[0]}")
+ else:
+ m = fname + "lines "
+ m += ", ".join(str(x) for x in lines)
+ f.append(m)
+
+ self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f))
+
+ def search_symbols(self, expr):
+ """ Searches for ABI symbols """
+
+ regex = re.compile(expr, re.I)
+
+ found_keys = 0
+ for t in sorted(self.data.items(), key=lambda x: [0]):
+ v = t[1]
+
+ wtype = v.get("type", "")
+ if wtype == "File":
+ continue
+
+ for what in v.get("what", [""]):
+ if regex.search(what):
+ found_keys += 1
+
+ kernelversion = v.get("kernelversion", "").strip(" \t\n")
+ date = v.get("date", "").strip(" \t\n")
+ contact = v.get("contact", "").strip(" \t\n")
+ users = v.get("users", "").strip(" \t\n")
+ desc = v.get("description", "").strip(" \t\n")
+
+ files = []
+ for f in v.get("file", ()):
+ files.append(f[0])
+
+ what = str(found_keys) + ". " + what
+ title_tag = "-" * len(what)
+
+ print(f"\n{what}\n{title_tag}\n")
+
+ if kernelversion:
+ print(f"Kernel version:\t\t{kernelversion}")
+
+ if date:
+ print(f"Date:\t\t\t{date}")
+
+ if contact:
+ print(f"Contact:\t\t{contact}")
+
+ if users:
+ print(f"Users:\t\t\t{users}")
+
+ print("Defined on file(s):\t" + ", ".join(files))
+
+ if desc:
+ desc = desc.strip("\n")
+ print(f"\n{desc}\n")
+
+ if not found_keys:
+ print(f"Regular expression /{expr}/ not found.")
diff --git a/tools/lib/python/abi/abi_regex.py b/tools/lib/python/abi/abi_regex.py
new file mode 100644
index 000000000000..d5553206de3c
--- /dev/null
+++ b/tools/lib/python/abi/abi_regex.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python3
+# xxpylint: disable=R0903
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Convert ABI what into regular expressions
+"""
+
+import re
+import sys
+
+from pprint import pformat
+
+from abi.abi_parser import AbiParser
+from abi.helpers import AbiDebug
+
+class AbiRegex(AbiParser):
+ """Extends AbiParser to search ABI nodes with regular expressions"""
+
+ # Escape only ASCII visible characters
+ escape_symbols = r"([\x21-\x29\x2b-\x2d\x3a-\x40\x5c\x60\x7b-\x7e])"
+ leave_others = "others"
+
+ # Tuples with regular expressions to be compiled and replacement data
+ re_whats = [
+ # Drop escape characters that might exist
+ (re.compile("\\\\"), ""),
+
+ # Temporarily escape dot characters
+ (re.compile(r"\."), "\xf6"),
+
+ # Temporarily change [0-9]+ type of patterns
+ (re.compile(r"\[0\-9\]\+"), "\xff"),
+
+ # Temporarily change [\d+-\d+] type of patterns
+ (re.compile(r"\[0\-\d+\]"), "\xff"),
+ (re.compile(r"\[0:\d+\]"), "\xff"),
+ (re.compile(r"\[(\d+)\]"), "\xf4\\\\d+\xf5"),
+
+ # Temporarily change [0-9] type of patterns
+ (re.compile(r"\[(\d)\-(\d)\]"), "\xf4\1-\2\xf5"),
+
+ # Handle multiple option patterns
+ (re.compile(r"[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]"), r"(\1|\2)"),
+
+ # Handle wildcards
+ (re.compile(r"([^\/])\*"), "\\1\\\\w\xf7"),
+ (re.compile(r"/\*/"), "/.*/"),
+ (re.compile(r"/\xf6\xf6\xf6"), "/.*"),
+ (re.compile(r"\<[^\>]+\>"), "\\\\w\xf7"),
+ (re.compile(r"\{[^\}]+\}"), "\\\\w\xf7"),
+ (re.compile(r"\[[^\]]+\]"), "\\\\w\xf7"),
+
+ (re.compile(r"XX+"), "\\\\w\xf7"),
+ (re.compile(r"([^A-Z])[XYZ]([^A-Z])"), "\\1\\\\w\xf7\\2"),
+ (re.compile(r"([^A-Z])[XYZ]$"), "\\1\\\\w\xf7"),
+ (re.compile(r"_[AB]_"), "_\\\\w\xf7_"),
+
+ # Recover [0-9] type of patterns
+ (re.compile(r"\xf4"), "["),
+ (re.compile(r"\xf5"), "]"),
+
+ # Remove duplicated spaces
+ (re.compile(r"\s+"), r" "),
+
+ # Special case: drop comparison as in:
+ # What: foo = <something>
+ # (this happens on a few IIO definitions)
+ (re.compile(r"\s*\=.*$"), ""),
+
+ # Escape all other symbols
+ (re.compile(escape_symbols), r"\\\1"),
+ (re.compile(r"\\\\"), r"\\"),
+ (re.compile(r"\\([\[\]\(\)\|])"), r"\1"),
+ (re.compile(r"(\d+)\\(-\d+)"), r"\1\2"),
+
+ (re.compile(r"\xff"), r"\\d+"),
+
+ # Special case: IIO ABI which a parenthesis.
+ (re.compile(r"sqrt(.*)"), r"sqrt(.*)"),
+
+ # Simplify regexes with multiple .*
+ (re.compile(r"(?:\.\*){2,}"), ""),
+
+ # Recover dot characters
+ (re.compile(r"\xf6"), "\\."),
+ # Recover plus characters
+ (re.compile(r"\xf7"), "+"),
+ ]
+ re_has_num = re.compile(r"\\d")
+
+ # Symbol name after escape_chars that are considered a devnode basename
+ re_symbol_name = re.compile(r"(\w|\\[\.\-\:])+$")
+
+ # List of popular group names to be skipped to minimize regex group size
+ # Use AbiDebug.SUBGROUP_SIZE to detect those
+ skip_names = set(["devices", "hwmon"])
+
+ def regex_append(self, what, new):
+ """
+ Get a search group for a subset of regular expressions.
+
+ As ABI may have thousands of symbols, using a for to search all
+ regular expressions is at least O(n^2). When there are wildcards,
+ the complexity increases substantially, eventually becoming exponential.
+
+ To avoid spending too much time on them, use a logic to split
+ them into groups. The smaller the group, the better, as it would
+ mean that searches will be confined to a small number of regular
+ expressions.
+
+ The conversion to a regex subset is tricky, as we need something
+ that can be easily obtained from the sysfs symbol and from the
+ regular expression. So, we need to discard nodes that have
+ wildcards.
+
+ If it can't obtain a subgroup, place the regular expression inside
+ a special group (self.leave_others).
+ """
+
+ search_group = None
+
+ for search_group in reversed(new.split("/")):
+ if not search_group or search_group in self.skip_names:
+ continue
+ if self.re_symbol_name.match(search_group):
+ break
+
+ if not search_group:
+ search_group = self.leave_others
+
+ if self.debug & AbiDebug.SUBGROUP_MAP:
+ self.log.debug("%s: mapped as %s", what, search_group)
+
+ try:
+ if search_group not in self.regex_group:
+ self.regex_group[search_group] = []
+
+ self.regex_group[search_group].append(re.compile(new))
+ if self.search_string:
+ if what.find(self.search_string) >= 0:
+ print(f"What: {what}")
+ except re.PatternError:
+ self.log.warning("Ignoring '%s' as it produced an invalid regex:\n"
+ " '%s'", what, new)
+
+ def get_regexes(self, what):
+ """
+ Given an ABI devnode, return a list of all regular expressions that
+ may match it, based on the sub-groups created by regex_append()
+ """
+
+ re_list = []
+
+ patches = what.split("/")
+ patches.reverse()
+ patches.append(self.leave_others)
+
+ for search_group in patches:
+ if search_group in self.regex_group:
+ re_list += self.regex_group[search_group]
+
+ return re_list
+
+ def __init__(self, *args, **kwargs):
+ """
+ Override init method to get verbose argument
+ """
+
+ self.regex_group = None
+ self.search_string = None
+ self.re_string = None
+
+ if "search_string" in kwargs:
+ self.search_string = kwargs.get("search_string")
+ del kwargs["search_string"]
+
+ if self.search_string:
+
+ try:
+ self.re_string = re.compile(self.search_string)
+ except re.PatternError as e:
+ msg = f"{self.search_string} is not a valid regular expression"
+ raise ValueError(msg) from e
+
+ super().__init__(*args, **kwargs)
+
+ def parse_abi(self, *args, **kwargs):
+
+ super().parse_abi(*args, **kwargs)
+
+ self.regex_group = {}
+
+ print("Converting ABI What fields into regexes...", file=sys.stderr)
+
+ for t in sorted(self.data.items(), key=lambda x: x[0]):
+ v = t[1]
+ if v.get("type") == "File":
+ continue
+
+ v["regex"] = []
+
+ for what in v.get("what", []):
+ if not what.startswith("/sys"):
+ continue
+
+ new = what
+ for r, s in self.re_whats:
+ try:
+ new = r.sub(s, new)
+ except re.PatternError as e:
+ # Help debugging troubles with new regexes
+ raise re.PatternError(f"{e}\nwhile re.sub('{r.pattern}', {s}, str)") from e
+
+ v["regex"].append(new)
+
+ if self.debug & AbiDebug.REGEX:
+ self.log.debug("%-90s <== %s", new, what)
+
+ # Store regex into a subgroup to speedup searches
+ self.regex_append(what, new)
+
+ if self.debug & AbiDebug.SUBGROUP_DICT:
+ self.log.debug("%s", pformat(self.regex_group))
+
+ if self.debug & AbiDebug.SUBGROUP_SIZE:
+ biggestd_keys = sorted(self.regex_group.keys(),
+ key= lambda k: len(self.regex_group[k]),
+ reverse=True)
+
+ print("Top regex subgroups:", file=sys.stderr)
+ for k in biggestd_keys[:10]:
+ print(f"{k} has {len(self.regex_group[k])} elements", file=sys.stderr)
diff --git a/tools/lib/python/abi/helpers.py b/tools/lib/python/abi/helpers.py
new file mode 100644
index 000000000000..639b23e4ca33
--- /dev/null
+++ b/tools/lib/python/abi/helpers.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# pylint: disable=R0903
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Helper classes for ABI parser
+"""
+
+ABI_DIR = "Documentation/ABI/"
+
+
+class AbiDebug:
+ """Debug levels"""
+
+ WHAT_PARSING = 1
+ WHAT_OPEN = 2
+ DUMP_ABI_STRUCTS = 4
+ UNDEFINED = 8
+ REGEX = 16
+ SUBGROUP_MAP = 32
+ SUBGROUP_DICT = 64
+ SUBGROUP_SIZE = 128
+ GRAPH = 256
+
+
+DEBUG_HELP = """
+1 - enable debug parsing logic
+2 - enable debug messages on file open
+4 - enable debug for ABI parse data
+8 - enable extra debug information to identify troubles
+ with ABI symbols found at the local machine that
+ weren't found on ABI documentation (used only for
+ undefined subcommand)
+16 - enable debug for what to regex conversion
+32 - enable debug for symbol regex subgroups
+64 - enable debug for sysfs graph tree variable
+"""
diff --git a/tools/lib/python/abi/system_symbols.py b/tools/lib/python/abi/system_symbols.py
new file mode 100644
index 000000000000..4a2554da217b
--- /dev/null
+++ b/tools/lib/python/abi/system_symbols.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0912,R0914,R0915,R1702
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+import os
+import re
+import sys
+
+from concurrent import futures
+from datetime import datetime
+from random import shuffle
+
+from abi.helpers import AbiDebug
+
+class SystemSymbols:
+ """Stores arguments for the class and initialize class vars"""
+
+ def graph_add_file(self, path, link=None):
+ """
+ add a file path to the sysfs graph stored at self.root
+ """
+
+ if path in self.files:
+ return
+
+ name = ""
+ ref = self.root
+ for edge in path.split("/"):
+ name += edge + "/"
+ if edge not in ref:
+ ref[edge] = {"__name": [name.rstrip("/")]}
+
+ ref = ref[edge]
+
+ if link and link not in ref["__name"]:
+ ref["__name"].append(link.rstrip("/"))
+
+ self.files.add(path)
+
+ def print_graph(self, root_prefix="", root=None, level=0):
+ """Prints a reference tree graph using UTF-8 characters"""
+
+ if not root:
+ root = self.root
+ level = 0
+
+ # Prevent endless traverse
+ if level > 5:
+ return
+
+ if level > 0:
+ prefix = "├──"
+ last_prefix = "└──"
+ else:
+ prefix = ""
+ last_prefix = ""
+
+ items = list(root.items())
+
+ names = root.get("__name", [])
+ for k, edge in items:
+ if k == "__name":
+ continue
+
+ if not k:
+ k = "/"
+
+ if len(names) > 1:
+ k += " links: " + ",".join(names[1:])
+
+ if edge == items[-1][1]:
+ print(root_prefix + last_prefix + k)
+ p = root_prefix
+ if level > 0:
+ p += " "
+ self.print_graph(p, edge, level + 1)
+ else:
+ print(root_prefix + prefix + k)
+ p = root_prefix + "│ "
+ self.print_graph(p, edge, level + 1)
+
+ def _walk(self, root):
+ """
+ Walk through sysfs to get all devnodes that aren't ignored.
+
+ By default, uses /sys as sysfs mounting point. If another
+ directory is used, it replaces them to /sys at the patches.
+ """
+
+ with os.scandir(root) as obj:
+ for entry in obj:
+ path = os.path.join(root, entry.name)
+ if self.sysfs:
+ p = path.replace(self.sysfs, "/sys", count=1)
+ else:
+ p = path
+
+ if self.re_ignore.search(p):
+ return
+
+ # Handle link first to avoid directory recursion
+ if entry.is_symlink():
+ real = os.path.realpath(path)
+ if not self.sysfs:
+ self.aliases[path] = real
+ else:
+ real = real.replace(self.sysfs, "/sys", count=1)
+
+ # Add absfile location to graph if it doesn't exist
+ if not self.re_ignore.search(real):
+ # Add link to the graph
+ self.graph_add_file(real, p)
+
+ elif entry.is_file():
+ self.graph_add_file(p)
+
+ elif entry.is_dir():
+ self._walk(path)
+
+ def __init__(self, abi, sysfs="/sys", hints=False):
+ """
+ Initialize internal variables and get a list of all files inside
+ sysfs that can currently be parsed.
+
+ Please notice that there are several entries on sysfs that aren't
+ documented as ABI. Ignore those.
+
+ The real paths will be stored under self.files. Aliases will be
+ stored in separate, as self.aliases.
+ """
+
+ self.abi = abi
+ self.log = abi.log
+
+ if sysfs != "/sys":
+ self.sysfs = sysfs.rstrip("/")
+ else:
+ self.sysfs = None
+
+ self.hints = hints
+
+ self.root = {}
+ self.aliases = {}
+ self.files = set()
+
+ dont_walk = [
+ # Those require root access and aren't documented at ABI
+ f"^{sysfs}/kernel/debug",
+ f"^{sysfs}/kernel/tracing",
+ f"^{sysfs}/fs/pstore",
+ f"^{sysfs}/fs/bpf",
+ f"^{sysfs}/fs/fuse",
+
+ # This is not documented at ABI
+ f"^{sysfs}/module",
+
+ f"^{sysfs}/fs/cgroup", # this is big and has zero docs under ABI
+ f"^{sysfs}/firmware", # documented elsewhere: ACPI, DT bindings
+ "sections|notes", # aren't actually part of ABI
+
+ # kernel-parameters.txt - not easy to parse
+ "parameters",
+ ]
+
+ self.re_ignore = re.compile("|".join(dont_walk))
+
+ print(f"Reading {sysfs} directory contents...", file=sys.stderr)
+ self._walk(sysfs)
+
+ def check_file(self, refs, found):
+ """Check missing ABI symbols for a given sysfs file"""
+
+ res_list = []
+
+ try:
+ for names in refs:
+ fname = names[0]
+
+ res = {
+ "found": False,
+ "fname": fname,
+ "msg": "",
+ }
+ res_list.append(res)
+
+ re_what = self.abi.get_regexes(fname)
+ if not re_what:
+ self.abi.log.warning(f"missing rules for {fname}")
+ continue
+
+ for name in names:
+ for r in re_what:
+ if self.abi.debug & AbiDebug.UNDEFINED:
+ self.log.debug("check if %s matches '%s'", name, r.pattern)
+ if r.match(name):
+ res["found"] = True
+ if found:
+ res["msg"] += f" {fname}: regex:\n\t"
+ continue
+
+ if self.hints and not res["found"]:
+ res["msg"] += f" {fname} not found. Tested regexes:\n"
+ for r in re_what:
+ res["msg"] += " " + r.pattern + "\n"
+
+ except KeyboardInterrupt:
+ pass
+
+ return res_list
+
+ def _ref_interactor(self, root):
+ """Recursive function to interact over the sysfs tree"""
+
+ for k, v in root.items():
+ if isinstance(v, dict):
+ yield from self._ref_interactor(v)
+
+ if root == self.root or k == "__name":
+ continue
+
+ if self.abi.re_string:
+ fname = v["__name"][0]
+ if self.abi.re_string.search(fname):
+ yield v
+ else:
+ yield v
+
+
+ def get_fileref(self, all_refs, chunk_size):
+ """Interactor to group refs into chunks"""
+
+ n = 0
+ refs = []
+
+ for ref in all_refs:
+ refs.append(ref)
+
+ n += 1
+ if n >= chunk_size:
+ yield refs
+ n = 0
+ refs = []
+
+ yield refs
+
+ def check_undefined_symbols(self, max_workers=None, chunk_size=50,
+ found=None, dry_run=None):
+ """Seach ABI for sysfs symbols missing documentation"""
+
+ self.abi.parse_abi()
+
+ if self.abi.debug & AbiDebug.GRAPH:
+ self.print_graph()
+
+ all_refs = []
+ for ref in self._ref_interactor(self.root):
+ all_refs.append(ref["__name"])
+
+ if dry_run:
+ print("Would check", file=sys.stderr)
+ for ref in all_refs:
+ print(", ".join(ref))
+
+ return
+
+ print("Starting to search symbols (it may take several minutes):",
+ file=sys.stderr)
+ start = datetime.now()
+ old_elapsed = None
+
+ # Python doesn't support multithreading due to limitations on its
+ # global lock (GIL). While Python 3.13 finally made GIL optional,
+ # there are still issues related to it. Also, we want to have
+ # backward compatibility with older versions of Python.
+ #
+ # So, use instead multiprocess. However, Python is very slow passing
+ # data from/to multiple processes. Also, it may consume lots of memory
+ # if the data to be shared is not small. So, we need to group workload
+ # in chunks that are big enough to generate performance gains while
+ # not being so big that would cause out-of-memory.
+
+ num_refs = len(all_refs)
+ print(f"Number of references to parse: {num_refs}", file=sys.stderr)
+
+ if not max_workers:
+ max_workers = os.cpu_count()
+ elif max_workers > os.cpu_count():
+ max_workers = os.cpu_count()
+
+ max_workers = max(max_workers, 1)
+
+ max_chunk_size = int((num_refs + max_workers - 1) / max_workers)
+ chunk_size = min(chunk_size, max_chunk_size)
+ chunk_size = max(1, chunk_size)
+
+ if max_workers > 1:
+ executor = futures.ProcessPoolExecutor
+
+ # Place references in a random order. This may help improving
+ # performance, by mixing complex/simple expressions when creating
+ # chunks
+ shuffle(all_refs)
+ else:
+ # Python has a high overhead with processes. When there's just
+ # one worker, it is faster to not create a new process.
+ # Yet, User still deserves to have a progress print. So, use
+ # python's "thread", which is actually a single process, using
+ # an internal schedule to switch between tasks. No performance
+ # gains for non-IO tasks, but still it can be quickly interrupted
+ # from time to time to display progress.
+ executor = futures.ThreadPoolExecutor
+
+ not_found = []
+ f_list = []
+ with executor(max_workers=max_workers) as exe:
+ for refs in self.get_fileref(all_refs, chunk_size):
+ if refs:
+ try:
+ f_list.append(exe.submit(self.check_file, refs, found))
+
+ except KeyboardInterrupt:
+ return
+
+ total = len(f_list)
+
+ if not total:
+ if self.abi.re_string:
+ print(f"No ABI symbol matches {self.abi.search_string}")
+ else:
+ self.abi.log.warning("No ABI symbols found")
+ return
+
+ print(f"{len(f_list):6d} jobs queued on {max_workers} workers",
+ file=sys.stderr)
+
+ while f_list:
+ try:
+ t = futures.wait(f_list, timeout=1,
+ return_when=futures.FIRST_COMPLETED)
+
+ done = t[0]
+
+ for fut in done:
+ res_list = fut.result()
+
+ for res in res_list:
+ if not res["found"]:
+ not_found.append(res["fname"])
+ if res["msg"]:
+ print(res["msg"])
+
+ f_list.remove(fut)
+ except KeyboardInterrupt:
+ return
+
+ except RuntimeError as e:
+ self.abi.log.warning(f"Future: {e}")
+ break
+
+ if sys.stderr.isatty():
+ elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0]
+ if len(f_list) < total:
+ elapsed += f" ({total - len(f_list)}/{total} jobs completed). "
+ if elapsed != old_elapsed:
+ print(elapsed + "\r", end="", flush=True,
+ file=sys.stderr)
+ old_elapsed = elapsed
+
+ elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0]
+ print(elapsed, file=sys.stderr)
+
+ for f in sorted(not_found):
+ print(f"{f} not found.")
diff --git a/tools/lib/python/feat/parse_features.py b/tools/lib/python/feat/parse_features.py
new file mode 100755
index 000000000000..b88c04d3e2fe
--- /dev/null
+++ b/tools/lib/python/feat/parse_features.py
@@ -0,0 +1,494 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0911,R0912,R0914,R0915
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+
+"""
+Library to parse the Linux Feature files and produce a ReST book.
+"""
+
+import os
+import re
+import sys
+
+from glob import iglob
+
+
+class ParseFeature:
+ """
+ Parses Documentation/features, allowing to generate ReST documentation
+ from it.
+ """
+
+ h_name = "Feature"
+ h_kconfig = "Kconfig"
+ h_description = "Description"
+ h_subsys = "Subsystem"
+ h_status = "Status"
+ h_arch = "Architecture"
+
+ # Sort order for status. Others will be mapped at the end.
+ status_map = {
+ "ok": 0,
+ "TODO": 1,
+ "N/A": 2,
+ # The only missing status is "..", which was mapped as "---",
+ # as this is an special ReST cell value. Let it get the
+ # default order (99).
+ }
+
+ def __init__(self, prefix, debug=0, enable_fname=False):
+ """
+ Sets internal variables
+ """
+
+ self.prefix = prefix
+ self.debug = debug
+ self.enable_fname = enable_fname
+
+ self.data = {}
+
+ # Initial maximum values use just the headers
+ self.max_size_name = len(self.h_name)
+ self.max_size_kconfig = len(self.h_kconfig)
+ self.max_size_description = len(self.h_description)
+ self.max_size_desc_word = 0
+ self.max_size_subsys = len(self.h_subsys)
+ self.max_size_status = len(self.h_status)
+ self.max_size_arch = len(self.h_arch)
+ self.max_size_arch_with_header = self.max_size_arch + self.max_size_arch
+ self.description_size = 1
+
+ self.msg = ""
+
+ def emit(self, msg="", end="\n"):
+ self.msg += msg + end
+
+ def parse_error(self, fname, ln, msg, data=None):
+ """
+ Displays an error message, printing file name and line
+ """
+
+ if ln:
+ fname += f"#{ln}"
+
+ print(f"Warning: file {fname}: {msg}", file=sys.stderr, end="")
+
+ if data:
+ data = data.rstrip()
+ print(f":\n\t{data}", file=sys.stderr)
+ else:
+ print("", file=sys.stderr)
+
+ def parse_feat_file(self, fname):
+ """Parses a single arch-support.txt feature file"""
+
+ if os.path.isdir(fname):
+ return
+
+ base = os.path.basename(fname)
+
+ if base != "arch-support.txt":
+ if self.debug:
+ print(f"ignoring {fname}", file=sys.stderr)
+ return
+
+ subsys = os.path.dirname(fname).split("/")[-2]
+ self.max_size_subsys = max(self.max_size_subsys, len(subsys))
+
+ feature_name = ""
+ kconfig = ""
+ description = ""
+ comments = ""
+ arch_table = {}
+
+ if self.debug > 1:
+ print(f"Opening {fname}", file=sys.stderr)
+
+ if self.enable_fname:
+ full_fname = os.path.abspath(fname)
+ self.emit(f".. FILE {full_fname}")
+
+ with open(fname, encoding="utf-8") as f:
+ for ln, line in enumerate(f, start=1):
+ line = line.strip()
+
+ match = re.match(r"^\#\s+Feature\s+name:\s*(.*\S)", line)
+ if match:
+ feature_name = match.group(1)
+
+ self.max_size_name = max(self.max_size_name,
+ len(feature_name))
+ continue
+
+ match = re.match(r"^\#\s+Kconfig:\s*(.*\S)", line)
+ if match:
+ kconfig = match.group(1)
+
+ self.max_size_kconfig = max(self.max_size_kconfig,
+ len(kconfig))
+ continue
+
+ match = re.match(r"^\#\s+description:\s*(.*\S)", line)
+ if match:
+ description = match.group(1)
+
+ self.max_size_description = max(self.max_size_description,
+ len(description))
+
+ words = re.split(r"\s+", line)[1:]
+ for word in words:
+ self.max_size_desc_word = max(self.max_size_desc_word,
+ len(word))
+
+ continue
+
+ if re.search(r"^\\s*$", line):
+ continue
+
+ if re.match(r"^\s*\-+\s*$", line):
+ continue
+
+ if re.search(r"^\s*\|\s*arch\s*\|\s*status\s*\|\s*$", line):
+ continue
+
+ match = re.match(r"^\#\s*(.*)$", line)
+ if match:
+ comments += match.group(1)
+ continue
+
+ match = re.match(r"^\s*\|\s*(\S+):\s*\|\s*(\S+)\s*\|\s*$", line)
+ if match:
+ arch = match.group(1)
+ status = match.group(2)
+
+ self.max_size_status = max(self.max_size_status,
+ len(status))
+ self.max_size_arch = max(self.max_size_arch, len(arch))
+
+ if status == "..":
+ status = "---"
+
+ arch_table[arch] = status
+
+ continue
+
+ self.parse_error(fname, ln, "Line is invalid", line)
+
+ if not feature_name:
+ self.parse_error(fname, 0, "Feature name not found")
+ return
+ if not subsys:
+ self.parse_error(fname, 0, "Subsystem not found")
+ return
+ if not kconfig:
+ self.parse_error(fname, 0, "Kconfig not found")
+ return
+ if not description:
+ self.parse_error(fname, 0, "Description not found")
+ return
+ if not arch_table:
+ self.parse_error(fname, 0, "Architecture table not found")
+ return
+
+ self.data[feature_name] = {
+ "where": fname,
+ "subsys": subsys,
+ "kconfig": kconfig,
+ "description": description,
+ "comments": comments,
+ "table": arch_table,
+ }
+
+ self.max_size_arch_with_header = self.max_size_arch + len(self.h_arch)
+
+ def parse(self):
+ """Parses all arch-support.txt feature files inside self.prefix"""
+
+ path = os.path.expanduser(self.prefix)
+
+ if self.debug > 2:
+ print(f"Running parser for {path}")
+
+ example_path = os.path.join(path, "arch-support.txt")
+
+ for fname in iglob(os.path.join(path, "**"), recursive=True):
+ if fname != example_path:
+ self.parse_feat_file(fname)
+
+ return self.data
+
+ def output_arch_table(self, arch, feat=None):
+ """
+ Output feature(s) for a given architecture.
+ """
+
+ title = f"Feature status on {arch} architecture"
+
+ self.emit("=" * len(title))
+ self.emit(title)
+ self.emit("=" * len(title))
+ self.emit()
+
+ self.emit("=" * self.max_size_subsys + " ", end="")
+ self.emit("=" * self.max_size_name + " ", end="")
+ self.emit("=" * self.max_size_kconfig + " ", end="")
+ self.emit("=" * self.max_size_status + " ", end="")
+ self.emit("=" * self.max_size_description)
+
+ self.emit(f"{self.h_subsys:<{self.max_size_subsys}} ", end="")
+ self.emit(f"{self.h_name:<{self.max_size_name}} ", end="")
+ self.emit(f"{self.h_kconfig:<{self.max_size_kconfig}} ", end="")
+ self.emit(f"{self.h_status:<{self.max_size_status}} ", end="")
+ self.emit(f"{self.h_description:<{self.max_size_description}}")
+
+ self.emit("=" * self.max_size_subsys + " ", end="")
+ self.emit("=" * self.max_size_name + " ", end="")
+ self.emit("=" * self.max_size_kconfig + " ", end="")
+ self.emit("=" * self.max_size_status + " ", end="")
+ self.emit("=" * self.max_size_description)
+
+ sorted_features = sorted(self.data.keys(),
+ key=lambda x: (self.data[x]["subsys"],
+ x.lower()))
+
+ for name in sorted_features:
+ if feat and name != feat:
+ continue
+
+ arch_table = self.data[name]["table"]
+
+ if not arch in arch_table:
+ continue
+
+ self.emit(f"{self.data[name]['subsys']:<{self.max_size_subsys}} ",
+ end="")
+ self.emit(f"{name:<{self.max_size_name}} ", end="")
+ self.emit(f"{self.data[name]['kconfig']:<{self.max_size_kconfig}} ",
+ end="")
+ self.emit(f"{arch_table[arch]:<{self.max_size_status}} ",
+ end="")
+ self.emit(f"{self.data[name]['description']}")
+
+ self.emit("=" * self.max_size_subsys + " ", end="")
+ self.emit("=" * self.max_size_name + " ", end="")
+ self.emit("=" * self.max_size_kconfig + " ", end="")
+ self.emit("=" * self.max_size_status + " ", end="")
+ self.emit("=" * self.max_size_description)
+
+ return self.msg
+
+ def output_feature(self, feat):
+ """
+ Output a feature on all architectures
+ """
+
+ title = f"Feature {feat}"
+
+ self.emit("=" * len(title))
+ self.emit(title)
+ self.emit("=" * len(title))
+ self.emit()
+
+ if not feat in self.data:
+ return
+
+ if self.data[feat]["subsys"]:
+ self.emit(f":Subsystem: {self.data[feat]['subsys']}")
+ if self.data[feat]["kconfig"]:
+ self.emit(f":Kconfig: {self.data[feat]['kconfig']}")
+
+ desc = self.data[feat]["description"]
+ desc = desc[0].upper() + desc[1:]
+ desc = desc.rstrip(". \t")
+ self.emit(f"\n{desc}.\n")
+
+ com = self.data[feat]["comments"].strip()
+ if com:
+ self.emit("Comments")
+ self.emit("--------")
+ self.emit(f"\n{com}\n")
+
+ self.emit("=" * self.max_size_arch + " ", end="")
+ self.emit("=" * self.max_size_status)
+
+ self.emit(f"{self.h_arch:<{self.max_size_arch}} ", end="")
+ self.emit(f"{self.h_status:<{self.max_size_status}}")
+
+ self.emit("=" * self.max_size_arch + " ", end="")
+ self.emit("=" * self.max_size_status)
+
+ arch_table = self.data[feat]["table"]
+ for arch in sorted(arch_table.keys()):
+ self.emit(f"{arch:<{self.max_size_arch}} ", end="")
+ self.emit(f"{arch_table[arch]:<{self.max_size_status}}")
+
+ self.emit("=" * self.max_size_arch + " ", end="")
+ self.emit("=" * self.max_size_status)
+
+ return self.msg
+
+ def matrix_lines(self, desc_size, max_size_status, header):
+ """
+ Helper function to split element tables at the output matrix
+ """
+
+ if header:
+ ln_marker = "="
+ else:
+ ln_marker = "-"
+
+ self.emit("+" + ln_marker * self.max_size_name + "+", end="")
+ self.emit(ln_marker * desc_size, end="")
+ self.emit("+" + ln_marker * max_size_status + "+")
+
+ def output_matrix(self):
+ """
+ Generates a set of tables, groped by subsystem, containing
+ what's the feature state on each architecture.
+ """
+
+ title = "Feature status on all architectures"
+
+ self.emit("=" * len(title))
+ self.emit(title)
+ self.emit("=" * len(title))
+ self.emit()
+
+ desc_title = f"{self.h_kconfig} / {self.h_description}"
+
+ desc_size = self.max_size_kconfig + 4
+ if not self.description_size:
+ desc_size = max(self.max_size_description, desc_size)
+ else:
+ desc_size = max(self.description_size, desc_size)
+
+ desc_size = max(self.max_size_desc_word, desc_size, len(desc_title))
+
+ notcompat = "Not compatible"
+ self.max_size_status = max(self.max_size_status, len(notcompat))
+
+ min_status_size = self.max_size_status + self.max_size_arch + 4
+ max_size_status = max(min_status_size, self.max_size_status)
+
+ h_status_per_arch = "Status per architecture"
+ max_size_status = max(max_size_status, len(h_status_per_arch))
+
+ cur_subsys = None
+ for name in sorted(self.data.keys(),
+ key=lambda x: (self.data[x]["subsys"], x.lower())):
+ if not cur_subsys or cur_subsys != self.data[name]["subsys"]:
+ if cur_subsys:
+ self.emit()
+
+ cur_subsys = self.data[name]["subsys"]
+
+ title = f"Subsystem: {cur_subsys}"
+ self.emit(title)
+ self.emit("=" * len(title))
+ self.emit()
+
+ self.matrix_lines(desc_size, max_size_status, 0)
+
+ self.emit(f"|{self.h_name:<{self.max_size_name}}", end="")
+ self.emit(f"|{desc_title:<{desc_size}}", end="")
+ self.emit(f"|{h_status_per_arch:<{max_size_status}}|")
+
+ self.matrix_lines(desc_size, max_size_status, 1)
+
+ lines = []
+ descs = []
+ cur_status = ""
+ line = ""
+
+ arch_table = sorted(self.data[name]["table"].items(),
+ key=lambda x: (self.status_map.get(x[1], 99),
+ x[0].lower()))
+
+ for arch, status in arch_table:
+ if status == "---":
+ status = notcompat
+
+ if status != cur_status:
+ if line != "":
+ lines.append(line)
+ line = ""
+ line = f"- **{status}**: {arch}"
+ elif len(line) + len(arch) + 2 < max_size_status:
+ line += f", {arch}"
+ else:
+ lines.append(line)
+ line = f" {arch}"
+ cur_status = status
+
+ if line != "":
+ lines.append(line)
+
+ description = self.data[name]["description"]
+ while len(description) > desc_size:
+ desc_line = description[:desc_size]
+
+ last_space = desc_line.rfind(" ")
+ if last_space != -1:
+ desc_line = desc_line[:last_space]
+ descs.append(desc_line)
+ description = description[last_space + 1:]
+ else:
+ desc_line = desc_line[:-1]
+ descs.append(desc_line + "\\")
+ description = description[len(desc_line):]
+
+ if description:
+ descs.append(description)
+
+ while len(lines) < 2 + len(descs):
+ lines.append("")
+
+ for ln, line in enumerate(lines):
+ col = ["", ""]
+
+ if not ln:
+ col[0] = name
+ col[1] = f"``{self.data[name]['kconfig']}``"
+ else:
+ if ln >= 2 and descs:
+ col[1] = descs.pop(0)
+
+ self.emit(f"|{col[0]:<{self.max_size_name}}", end="")
+ self.emit(f"|{col[1]:<{desc_size}}", end="")
+ self.emit(f"|{line:<{max_size_status}}|")
+
+ self.matrix_lines(desc_size, max_size_status, 0)
+
+ return self.msg
+
+ def list_arch_features(self, arch, feat):
+ """
+ Print a matrix of kernel feature support for the chosen architecture.
+ """
+ self.emit("#")
+ self.emit(f"# Kernel feature support matrix of the '{arch}' architecture:")
+ self.emit("#")
+
+ # Sort by subsystem, then by feature name (case‑insensitive)
+ for name in sorted(self.data.keys(),
+ key=lambda n: (self.data[n]["subsys"].lower(),
+ n.lower())):
+ if feat and name != feat:
+ continue
+
+ feature = self.data[name]
+ arch_table = feature["table"]
+ status = arch_table.get(arch, "")
+ status = " " * ((4 - len(status)) // 2) + status
+
+ self.emit(f"{feature['subsys']:>{self.max_size_subsys + 1}}/ ",
+ end="")
+ self.emit(f"{name:<{self.max_size_name}}: ", end="")
+ self.emit(f"{status:<5}| ", end="")
+ self.emit(f"{feature['kconfig']:>{self.max_size_kconfig}} ",
+ end="")
+ self.emit(f"# {feature['description']}")
+
+ return self.msg
diff --git a/tools/lib/python/jobserver.py b/tools/lib/python/jobserver.py
new file mode 100755
index 000000000000..a24f30ef4fa8
--- /dev/null
+++ b/tools/lib/python/jobserver.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0+
+#
+# pylint: disable=C0103,C0209
+#
+#
+
+"""
+Interacts with the POSIX jobserver during the Kernel build time.
+
+A "normal" jobserver task, like the one initiated by a make subrocess would do:
+
+ - open read/write file descriptors to communicate with the job server;
+ - ask for one slot by calling:
+ claim = os.read(reader, 1)
+ - when the job finshes, call:
+ os.write(writer, b"+") # os.write(writer, claim)
+
+Here, the goal is different: This script aims to get the remaining number
+of slots available, using all of them to run a command which handle tasks in
+parallel. To to that, it has a loop that ends only after there are no
+slots left. It then increments the number by one, in order to allow a
+call equivalent to make -j$((claim+1)), e.g. having a parent make creating
+$claim child to do the actual work.
+
+The end goal here is to keep the total number of build tasks under the
+limit established by the initial make -j$n_proc call.
+
+See:
+ https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html#POSIX-Jobserver
+"""
+
+import errno
+import os
+import subprocess
+import sys
+
+class JobserverExec:
+ """
+ Claim all slots from make using POSIX Jobserver.
+
+ The main methods here are:
+ - open(): reserves all slots;
+ - close(): method returns all used slots back to make;
+ - run(): executes a command setting PARALLELISM=<available slots jobs + 1>
+ """
+
+ def __init__(self):
+ """Initialize internal vars"""
+ self.claim = 0
+ self.jobs = b""
+ self.reader = None
+ self.writer = None
+ self.is_open = False
+
+ def open(self):
+ """Reserve all available slots to be claimed later on"""
+
+ if self.is_open:
+ return
+
+ try:
+ # Fetch the make environment options.
+ flags = os.environ["MAKEFLAGS"]
+ # Look for "--jobserver=R,W"
+ # Note that GNU Make has used --jobserver-fds and --jobserver-auth
+ # so this handles all of them.
+ opts = [x for x in flags.split(" ") if x.startswith("--jobserver")]
+
+ # Parse out R,W file descriptor numbers and set them nonblocking.
+ # If the MAKEFLAGS variable contains multiple instances of the
+ # --jobserver-auth= option, the last one is relevant.
+ fds = opts[-1].split("=", 1)[1]
+
+ # Starting with GNU Make 4.4, named pipes are used for reader
+ # and writer.
+ # Example argument: --jobserver-auth=fifo:/tmp/GMfifo8134
+ _, _, path = fds.partition("fifo:")
+
+ if path:
+ self.reader = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
+ self.writer = os.open(path, os.O_WRONLY)
+ else:
+ self.reader, self.writer = [int(x) for x in fds.split(",", 1)]
+ # Open a private copy of reader to avoid setting nonblocking
+ # on an unexpecting process with the same reader fd.
+ self.reader = os.open("/proc/self/fd/%d" % (self.reader),
+ os.O_RDONLY | os.O_NONBLOCK)
+
+ # Read out as many jobserver slots as possible
+ while True:
+ try:
+ slot = os.read(self.reader, 8)
+ self.jobs += slot
+ except (OSError, IOError) as e:
+ if e.errno == errno.EWOULDBLOCK:
+ # Stop at the end of the jobserver queue.
+ break
+ # If something went wrong, give back the jobs.
+ if self.jobs:
+ os.write(self.writer, self.jobs)
+ raise e
+
+ # Add a bump for our caller's reserveration, since we're just going
+ # to sit here blocked on our child.
+ self.claim = len(self.jobs) + 1
+
+ except (KeyError, IndexError, ValueError, OSError, IOError):
+ # Any missing environment strings or bad fds should result in just
+ # not being parallel.
+ self.claim = None
+
+ self.is_open = True
+
+ def close(self):
+ """Return all reserved slots to Jobserver"""
+
+ if not self.is_open:
+ return
+
+ # Return all the reserved slots.
+ if len(self.jobs):
+ os.write(self.writer, self.jobs)
+
+ self.is_open = False
+
+ def __enter__(self):
+ self.open()
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ self.close()
+
+ def run(self, cmd, *args, **pwargs):
+ """
+ Run a command setting PARALLELISM env variable to the number of
+ available job slots (claim) + 1, e.g. it will reserve claim slots
+ to do the actual build work, plus one to monitor its children.
+ """
+ self.open() # Ensure that self.claim is set
+
+ # We can only claim parallelism if there was a jobserver (i.e. a
+ # top-level "-jN" argument) and there were no other failures. Otherwise
+ # leave out the environment variable and let the child figure out what
+ # is best.
+ if self.claim:
+ os.environ["PARALLELISM"] = str(self.claim)
+
+ return subprocess.call(cmd, *args, **pwargs)
diff --git a/tools/lib/python/kdoc/__init__.py b/tools/lib/python/kdoc/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/lib/python/kdoc/__init__.py
diff --git a/tools/lib/python/kdoc/enrich_formatter.py b/tools/lib/python/kdoc/enrich_formatter.py
new file mode 100644
index 000000000000..bb171567a4ca
--- /dev/null
+++ b/tools/lib/python/kdoc/enrich_formatter.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2025 by Mauro Carvalho Chehab <mchehab@kernel.org>.
+
+"""
+Ancillary argparse HelpFormatter class that works on a similar way as
+argparse.RawDescriptionHelpFormatter, e.g. description maintains line
+breaks, but it also implement transformations to the help text. The
+actual transformations ar given by enrich_text(), if the output is tty.
+
+Currently, the follow transformations are done:
+
+ - Positional arguments are shown in upper cases;
+ - if output is TTY, ``var`` and positional arguments are shown prepended
+ by an ANSI SGR code. This is usually translated to bold. On some
+ terminals, like, konsole, this is translated into a colored bold text.
+"""
+
+import argparse
+import re
+import sys
+
+class EnrichFormatter(argparse.HelpFormatter):
+ """
+ Better format the output, making easier to identify the positional args
+ and how they're used at the __doc__ description.
+ """
+ def __init__(self, *args, **kwargs):
+ """Initialize class and check if is TTY"""
+ super().__init__(*args, **kwargs)
+ self._tty = sys.stdout.isatty()
+
+ def enrich_text(self, text):
+ """Handle ReST markups (currently, only ``foo``)"""
+ if self._tty and text:
+ # Replace ``text`` with ANSI SGR (bold)
+ return re.sub(r'\`\`(.+?)\`\`',
+ lambda m: f'\033[1m{m.group(1)}\033[0m', text)
+ return text
+
+ def _fill_text(self, text, width, indent):
+ """Enrich descriptions with markups on it"""
+ enriched = self.enrich_text(text)
+ return "\n".join(indent + line for line in enriched.splitlines())
+
+ def _format_usage(self, usage, actions, groups, prefix):
+ """Enrich positional arguments at usage: line"""
+
+ prog = self._prog
+ parts = []
+
+ for action in actions:
+ if action.option_strings:
+ opt = action.option_strings[0]
+ if action.nargs != 0:
+ opt += f" {action.dest.upper()}"
+ parts.append(f"[{opt}]")
+ else:
+ # Positional argument
+ parts.append(self.enrich_text(f"``{action.dest.upper()}``"))
+
+ usage_text = f"{prefix or 'usage: '} {prog} {' '.join(parts)}\n"
+ return usage_text
+
+ def _format_action_invocation(self, action):
+ """Enrich argument names"""
+ if not action.option_strings:
+ return self.enrich_text(f"``{action.dest.upper()}``")
+
+ return ", ".join(action.option_strings)
diff --git a/tools/lib/python/kdoc/kdoc_files.py b/tools/lib/python/kdoc/kdoc_files.py
new file mode 100644
index 000000000000..bfe02baf1606
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_files.py
@@ -0,0 +1,294 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+#
+# pylint: disable=R0903,R0913,R0914,R0917
+
+"""
+Parse lernel-doc tags on multiple kernel source files.
+"""
+
+import argparse
+import logging
+import os
+import re
+
+from kdoc.kdoc_parser import KernelDoc
+from kdoc.kdoc_output import OutputFormat
+
+
+class GlobSourceFiles:
+ """
+ Parse C source code file names and directories via an Interactor.
+ """
+
+ def __init__(self, srctree=None, valid_extensions=None):
+ """
+ Initialize valid extensions with a tuple.
+
+ If not defined, assume default C extensions (.c and .h)
+
+ It would be possible to use python's glob function, but it is
+ very slow, and it is not interactive. So, it would wait to read all
+ directories before actually do something.
+
+ So, let's use our own implementation.
+ """
+
+ if not valid_extensions:
+ self.extensions = (".c", ".h")
+ else:
+ self.extensions = valid_extensions
+
+ self.srctree = srctree
+
+ def _parse_dir(self, dirname):
+ """Internal function to parse files recursively"""
+
+ with os.scandir(dirname) as obj:
+ for entry in obj:
+ name = os.path.join(dirname, entry.name)
+
+ if entry.is_dir(follow_symlinks=False):
+ yield from self._parse_dir(name)
+
+ if not entry.is_file():
+ continue
+
+ basename = os.path.basename(name)
+
+ if not basename.endswith(self.extensions):
+ continue
+
+ yield name
+
+ def parse_files(self, file_list, file_not_found_cb):
+ """
+ Define an iterator to parse all source files from file_list,
+ handling directories if any
+ """
+
+ if not file_list:
+ return
+
+ for fname in file_list:
+ if self.srctree:
+ f = os.path.join(self.srctree, fname)
+ else:
+ f = fname
+
+ if os.path.isdir(f):
+ yield from self._parse_dir(f)
+ elif os.path.isfile(f):
+ yield f
+ elif file_not_found_cb:
+ file_not_found_cb(fname)
+
+
+class KernelFiles():
+ """
+ Parse kernel-doc tags on multiple kernel source files.
+
+ There are two type of parsers defined here:
+ - self.parse_file(): parses both kernel-doc markups and
+ EXPORT_SYMBOL* macros;
+ - self.process_export_file(): parses only EXPORT_SYMBOL* macros.
+ """
+
+ def warning(self, msg):
+ """Ancillary routine to output a warning and increment error count"""
+
+ self.config.log.warning(msg)
+ self.errors += 1
+
+ def error(self, msg):
+ """Ancillary routine to output an error and increment error count"""
+
+ self.config.log.error(msg)
+ self.errors += 1
+
+ def parse_file(self, fname):
+ """
+ Parse a single Kernel source.
+ """
+
+ # Prevent parsing the same file twice if results are cached
+ if fname in self.files:
+ return
+
+ doc = KernelDoc(self.config, fname)
+ export_table, entries = doc.parse_kdoc()
+
+ self.export_table[fname] = export_table
+
+ self.files.add(fname)
+ self.export_files.add(fname) # parse_kdoc() already check exports
+
+ self.results[fname] = entries
+
+ def process_export_file(self, fname):
+ """
+ Parses EXPORT_SYMBOL* macros from a single Kernel source file.
+ """
+
+ # Prevent parsing the same file twice if results are cached
+ if fname in self.export_files:
+ return
+
+ doc = KernelDoc(self.config, fname)
+ export_table = doc.parse_export()
+
+ if not export_table:
+ self.error(f"Error: Cannot check EXPORT_SYMBOL* on {fname}")
+ export_table = set()
+
+ self.export_table[fname] = export_table
+ self.export_files.add(fname)
+
+ def file_not_found_cb(self, fname):
+ """
+ Callback to warn if a file was not found.
+ """
+
+ self.error(f"Cannot find file {fname}")
+
+ def __init__(self, verbose=False, out_style=None,
+ werror=False, wreturn=False, wshort_desc=False,
+ wcontents_before_sections=False,
+ logger=None):
+ """
+ Initialize startup variables and parse all files
+ """
+
+ if not verbose:
+ verbose = bool(os.environ.get("KBUILD_VERBOSE", 0))
+
+ if out_style is None:
+ out_style = OutputFormat()
+
+ if not werror:
+ kcflags = os.environ.get("KCFLAGS", None)
+ if kcflags:
+ match = re.search(r"(\s|^)-Werror(\s|$)/", kcflags)
+ if match:
+ werror = True
+
+ # reading this variable is for backwards compat just in case
+ # someone was calling it with the variable from outside the
+ # kernel's build system
+ kdoc_werror = os.environ.get("KDOC_WERROR", None)
+ if kdoc_werror:
+ werror = kdoc_werror
+
+ # Some variables are global to the parser logic as a whole as they are
+ # used to send control configuration to KernelDoc class. As such,
+ # those variables are read-only inside the KernelDoc.
+ self.config = argparse.Namespace
+
+ self.config.verbose = verbose
+ self.config.werror = werror
+ self.config.wreturn = wreturn
+ self.config.wshort_desc = wshort_desc
+ self.config.wcontents_before_sections = wcontents_before_sections
+
+ if not logger:
+ self.config.log = logging.getLogger("kernel-doc")
+ else:
+ self.config.log = logger
+
+ self.config.warning = self.warning
+
+ self.config.src_tree = os.environ.get("SRCTREE", None)
+
+ # Initialize variables that are internal to KernelFiles
+
+ self.out_style = out_style
+
+ self.errors = 0
+ self.results = {}
+
+ self.files = set()
+ self.export_files = set()
+ self.export_table = {}
+
+ def parse(self, file_list, export_file=None):
+ """
+ Parse all files
+ """
+
+ glob = GlobSourceFiles(srctree=self.config.src_tree)
+
+ for fname in glob.parse_files(file_list, self.file_not_found_cb):
+ self.parse_file(fname)
+
+ for fname in glob.parse_files(export_file, self.file_not_found_cb):
+ self.process_export_file(fname)
+
+ def out_msg(self, fname, name, arg):
+ """
+ Return output messages from a file name using the output style
+ filtering.
+
+ If output type was not handled by the styler, return None.
+ """
+
+ # NOTE: we can add rules here to filter out unwanted parts,
+ # although OutputFormat.msg already does that.
+
+ return self.out_style.msg(fname, name, arg)
+
+ def msg(self, enable_lineno=False, export=False, internal=False,
+ symbol=None, nosymbol=None, no_doc_sections=False,
+ filenames=None, export_file=None):
+ """
+ Interacts over the kernel-doc results and output messages,
+ returning kernel-doc markups on each interaction
+ """
+
+ self.out_style.set_config(self.config)
+
+ if not filenames:
+ filenames = sorted(self.results.keys())
+
+ glob = GlobSourceFiles(srctree=self.config.src_tree)
+
+ for fname in filenames:
+ function_table = set()
+
+ if internal or export:
+ if not export_file:
+ export_file = [fname]
+
+ for f in glob.parse_files(export_file, self.file_not_found_cb):
+ function_table |= self.export_table[f]
+
+ if symbol:
+ for s in symbol:
+ function_table.add(s)
+
+ self.out_style.set_filter(export, internal, symbol, nosymbol,
+ function_table, enable_lineno,
+ no_doc_sections)
+
+ msg = ""
+ if fname not in self.results:
+ self.config.log.warning("No kernel-doc for file %s", fname)
+ continue
+
+ symbols = self.results[fname]
+ self.out_style.set_symbols(symbols)
+
+ for arg in symbols:
+ m = self.out_msg(fname, arg.name, arg)
+
+ if m is None:
+ ln = arg.get("ln", 0)
+ dtype = arg.get('type', "")
+
+ self.config.log.warning("%s:%d Can't handle %s",
+ fname, ln, dtype)
+ else:
+ msg += m
+
+ if msg:
+ yield fname, msg
diff --git a/tools/lib/python/kdoc/kdoc_item.py b/tools/lib/python/kdoc/kdoc_item.py
new file mode 100644
index 000000000000..19805301cb2c
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_item.py
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# A class that will, eventually, encapsulate all of the parsed data that we
+# then pass into the output modules.
+#
+
+class KdocItem:
+ def __init__(self, name, fname, type, start_line, **other_stuff):
+ self.name = name
+ self.fname = fname
+ self.type = type
+ self.declaration_start_line = start_line
+ self.sections = {}
+ self.sections_start_lines = {}
+ self.parameterlist = []
+ self.parameterdesc_start_lines = []
+ self.parameterdescs = {}
+ self.parametertypes = {}
+ #
+ # Just save everything else into our own dict so that the output
+ # side can grab it directly as before. As we move things into more
+ # structured data, this will, hopefully, fade away.
+ #
+ self.other_stuff = other_stuff
+
+ def get(self, key, default = None):
+ return self.other_stuff.get(key, default)
+
+ def __getitem__(self, key):
+ return self.get(key)
+
+ #
+ # Tracking of section and parameter information.
+ #
+ def set_sections(self, sections, start_lines):
+ self.sections = sections
+ self.section_start_lines = start_lines
+
+ def set_params(self, names, descs, types, starts):
+ self.parameterlist = names
+ self.parameterdescs = descs
+ self.parametertypes = types
+ self.parameterdesc_start_lines = starts
diff --git a/tools/lib/python/kdoc/kdoc_output.py b/tools/lib/python/kdoc/kdoc_output.py
new file mode 100644
index 000000000000..b1aaa7fc3604
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_output.py
@@ -0,0 +1,824 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+#
+# pylint: disable=C0301,R0902,R0911,R0912,R0913,R0914,R0915,R0917
+
+"""
+Implement output filters to print kernel-doc documentation.
+
+The implementation uses a virtual base class (OutputFormat) which
+contains dispatches to virtual methods, and some code to filter
+out output messages.
+
+The actual implementation is done on one separate class per each type
+of output. Currently, there are output classes for ReST and man/troff.
+"""
+
+import os
+import re
+from datetime import datetime
+
+from kdoc.kdoc_parser import KernelDoc, type_param
+from kdoc.kdoc_re import KernRe
+
+
+function_pointer = KernRe(r"([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)", cache=False)
+
+# match expressions used to find embedded type information
+type_constant = KernRe(r"\b``([^\`]+)``\b", cache=False)
+type_constant2 = KernRe(r"\%([-_*\w]+)", cache=False)
+type_func = KernRe(r"(\w+)\(\)", cache=False)
+type_param_ref = KernRe(r"([\!~\*]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
+
+# Special RST handling for func ptr params
+type_fp_param = KernRe(r"\@(\w+)\(\)", cache=False)
+
+# Special RST handling for structs with func ptr params
+type_fp_param2 = KernRe(r"\@(\w+->\S+)\(\)", cache=False)
+
+type_env = KernRe(r"(\$\w+)", cache=False)
+type_enum = KernRe(r"\&(enum\s*([_\w]+))", cache=False)
+type_struct = KernRe(r"\&(struct\s*([_\w]+))", cache=False)
+type_typedef = KernRe(r"\&(typedef\s*([_\w]+))", cache=False)
+type_union = KernRe(r"\&(union\s*([_\w]+))", cache=False)
+type_member = KernRe(r"\&([_\w]+)(\.|->)([_\w]+)", cache=False)
+type_fallback = KernRe(r"\&([_\w]+)", cache=False)
+type_member_func = type_member + KernRe(r"\(\)", cache=False)
+
+
+class OutputFormat:
+ """
+ Base class for OutputFormat. If used as-is, it means that only
+ warnings will be displayed.
+ """
+
+ # output mode.
+ OUTPUT_ALL = 0 # output all symbols and doc sections
+ OUTPUT_INCLUDE = 1 # output only specified symbols
+ OUTPUT_EXPORTED = 2 # output exported symbols
+ OUTPUT_INTERNAL = 3 # output non-exported symbols
+
+ # Virtual member to be overridden at the inherited classes
+ highlights = []
+
+ def __init__(self):
+ """Declare internal vars and set mode to OUTPUT_ALL"""
+
+ self.out_mode = self.OUTPUT_ALL
+ self.enable_lineno = None
+ self.nosymbol = {}
+ self.symbol = None
+ self.function_table = None
+ self.config = None
+ self.no_doc_sections = False
+
+ self.data = ""
+
+ def set_config(self, config):
+ """
+ Setup global config variables used by both parser and output.
+ """
+
+ self.config = config
+
+ def set_filter(self, export, internal, symbol, nosymbol, function_table,
+ enable_lineno, no_doc_sections):
+ """
+ Initialize filter variables according to the requested mode.
+
+ Only one choice is valid between export, internal and symbol.
+
+ The nosymbol filter can be used on all modes.
+ """
+
+ self.enable_lineno = enable_lineno
+ self.no_doc_sections = no_doc_sections
+ self.function_table = function_table
+
+ if symbol:
+ self.out_mode = self.OUTPUT_INCLUDE
+ elif export:
+ self.out_mode = self.OUTPUT_EXPORTED
+ elif internal:
+ self.out_mode = self.OUTPUT_INTERNAL
+ else:
+ self.out_mode = self.OUTPUT_ALL
+
+ if nosymbol:
+ self.nosymbol = set(nosymbol)
+
+
+ def highlight_block(self, block):
+ """
+ Apply the RST highlights to a sub-block of text.
+ """
+
+ for r, sub in self.highlights:
+ block = r.sub(sub, block)
+
+ return block
+
+ def out_warnings(self, args):
+ """
+ Output warnings for identifiers that will be displayed.
+ """
+
+ for log_msg in args.warnings:
+ self.config.warning(log_msg)
+
+ def check_doc(self, name, args):
+ """Check if DOC should be output"""
+
+ if self.no_doc_sections:
+ return False
+
+ if name in self.nosymbol:
+ return False
+
+ if self.out_mode == self.OUTPUT_ALL:
+ self.out_warnings(args)
+ return True
+
+ if self.out_mode == self.OUTPUT_INCLUDE:
+ if name in self.function_table:
+ self.out_warnings(args)
+ return True
+
+ return False
+
+ def check_declaration(self, dtype, name, args):
+ """
+ Checks if a declaration should be output or not based on the
+ filtering criteria.
+ """
+
+ if name in self.nosymbol:
+ return False
+
+ if self.out_mode == self.OUTPUT_ALL:
+ self.out_warnings(args)
+ return True
+
+ if self.out_mode in [self.OUTPUT_INCLUDE, self.OUTPUT_EXPORTED]:
+ if name in self.function_table:
+ return True
+
+ if self.out_mode == self.OUTPUT_INTERNAL:
+ if dtype != "function":
+ self.out_warnings(args)
+ return True
+
+ if name not in self.function_table:
+ self.out_warnings(args)
+ return True
+
+ return False
+
+ def msg(self, fname, name, args):
+ """
+ Handles a single entry from kernel-doc parser
+ """
+
+ self.data = ""
+
+ dtype = args.type
+
+ if dtype == "doc":
+ self.out_doc(fname, name, args)
+ return self.data
+
+ if not self.check_declaration(dtype, name, args):
+ return self.data
+
+ if dtype == "function":
+ self.out_function(fname, name, args)
+ return self.data
+
+ if dtype == "enum":
+ self.out_enum(fname, name, args)
+ return self.data
+
+ if dtype == "typedef":
+ self.out_typedef(fname, name, args)
+ return self.data
+
+ if dtype in ["struct", "union"]:
+ self.out_struct(fname, name, args)
+ return self.data
+
+ # Warn if some type requires an output logic
+ self.config.log.warning("doesn't know how to output '%s' block",
+ dtype)
+
+ return None
+
+ # Virtual methods to be overridden by inherited classes
+ # At the base class, those do nothing.
+ def set_symbols(self, symbols):
+ """Get a list of all symbols from kernel_doc"""
+
+ def out_doc(self, fname, name, args):
+ """Outputs a DOC block"""
+
+ def out_function(self, fname, name, args):
+ """Outputs a function"""
+
+ def out_enum(self, fname, name, args):
+ """Outputs an enum"""
+
+ def out_typedef(self, fname, name, args):
+ """Outputs a typedef"""
+
+ def out_struct(self, fname, name, args):
+ """Outputs a struct"""
+
+
+class RestFormat(OutputFormat):
+ """Consts and functions used by ReST output"""
+
+ highlights = [
+ (type_constant, r"``\1``"),
+ (type_constant2, r"``\1``"),
+
+ # Note: need to escape () to avoid func matching later
+ (type_member_func, r":c:type:`\1\2\3\\(\\) <\1>`"),
+ (type_member, r":c:type:`\1\2\3 <\1>`"),
+ (type_fp_param, r"**\1\\(\\)**"),
+ (type_fp_param2, r"**\1\\(\\)**"),
+ (type_func, r"\1()"),
+ (type_enum, r":c:type:`\1 <\2>`"),
+ (type_struct, r":c:type:`\1 <\2>`"),
+ (type_typedef, r":c:type:`\1 <\2>`"),
+ (type_union, r":c:type:`\1 <\2>`"),
+
+ # in rst this can refer to any type
+ (type_fallback, r":c:type:`\1`"),
+ (type_param_ref, r"**\1\2**")
+ ]
+ blankline = "\n"
+
+ sphinx_literal = KernRe(r'^[^.].*::$', cache=False)
+ sphinx_cblock = KernRe(r'^\.\.\ +code-block::', cache=False)
+
+ def __init__(self):
+ """
+ Creates class variables.
+
+ Not really mandatory, but it is a good coding style and makes
+ pylint happy.
+ """
+
+ super().__init__()
+ self.lineprefix = ""
+
+ def print_lineno(self, ln):
+ """Outputs a line number"""
+
+ if self.enable_lineno and ln is not None:
+ ln += 1
+ self.data += f".. LINENO {ln}\n"
+
+ def output_highlight(self, args):
+ """
+ Outputs a C symbol that may require being converted to ReST using
+ the self.highlights variable
+ """
+
+ input_text = args
+ output = ""
+ in_literal = False
+ litprefix = ""
+ block = ""
+
+ for line in input_text.strip("\n").split("\n"):
+
+ # If we're in a literal block, see if we should drop out of it.
+ # Otherwise, pass the line straight through unmunged.
+ if in_literal:
+ if line.strip(): # If the line is not blank
+ # If this is the first non-blank line in a literal block,
+ # figure out the proper indent.
+ if not litprefix:
+ r = KernRe(r'^(\s*)')
+ if r.match(line):
+ litprefix = '^' + r.group(1)
+ else:
+ litprefix = ""
+
+ output += line + "\n"
+ elif not KernRe(litprefix).match(line):
+ in_literal = False
+ else:
+ output += line + "\n"
+ else:
+ output += line + "\n"
+
+ # Not in a literal block (or just dropped out)
+ if not in_literal:
+ block += line + "\n"
+ if self.sphinx_literal.match(line) or self.sphinx_cblock.match(line):
+ in_literal = True
+ litprefix = ""
+ output += self.highlight_block(block)
+ block = ""
+
+ # Handle any remaining block
+ if block:
+ output += self.highlight_block(block)
+
+ # Print the output with the line prefix
+ for line in output.strip("\n").split("\n"):
+ self.data += self.lineprefix + line + "\n"
+
+ def out_section(self, args, out_docblock=False):
+ """
+ Outputs a block section.
+
+ This could use some work; it's used to output the DOC: sections, and
+ starts by putting out the name of the doc section itself, but that
+ tends to duplicate a header already in the template file.
+ """
+ for section, text in args.sections.items():
+ # Skip sections that are in the nosymbol_table
+ if section in self.nosymbol:
+ continue
+
+ if out_docblock:
+ if not self.out_mode == self.OUTPUT_INCLUDE:
+ self.data += f".. _{section}:\n\n"
+ self.data += f'{self.lineprefix}**{section}**\n\n'
+ else:
+ self.data += f'{self.lineprefix}**{section}**\n\n'
+
+ self.print_lineno(args.section_start_lines.get(section, 0))
+ self.output_highlight(text)
+ self.data += "\n"
+ self.data += "\n"
+
+ def out_doc(self, fname, name, args):
+ if not self.check_doc(name, args):
+ return
+ self.out_section(args, out_docblock=True)
+
+ def out_function(self, fname, name, args):
+
+ oldprefix = self.lineprefix
+ signature = ""
+
+ func_macro = args.get('func_macro', False)
+ if func_macro:
+ signature = name
+ else:
+ if args.get('functiontype'):
+ signature = args['functiontype'] + " "
+ signature += name + " ("
+
+ ln = args.declaration_start_line
+ count = 0
+ for parameter in args.parameterlist:
+ if count != 0:
+ signature += ", "
+ count += 1
+ dtype = args.parametertypes.get(parameter, "")
+
+ if function_pointer.search(dtype):
+ signature += function_pointer.group(1) + parameter + function_pointer.group(3)
+ else:
+ signature += dtype
+
+ if not func_macro:
+ signature += ")"
+
+ self.print_lineno(ln)
+ if args.get('typedef') or not args.get('functiontype'):
+ self.data += f".. c:macro:: {name}\n\n"
+
+ if args.get('typedef'):
+ self.data += " **Typedef**: "
+ self.lineprefix = ""
+ self.output_highlight(args.get('purpose', ""))
+ self.data += "\n\n**Syntax**\n\n"
+ self.data += f" ``{signature}``\n\n"
+ else:
+ self.data += f"``{signature}``\n\n"
+ else:
+ self.data += f".. c:function:: {signature}\n\n"
+
+ if not args.get('typedef'):
+ self.print_lineno(ln)
+ self.lineprefix = " "
+ self.output_highlight(args.get('purpose', ""))
+ self.data += "\n"
+
+ # Put descriptive text into a container (HTML <div>) to help set
+ # function prototypes apart
+ self.lineprefix = " "
+
+ if args.parameterlist:
+ self.data += ".. container:: kernelindent\n\n"
+ self.data += f"{self.lineprefix}**Parameters**\n\n"
+
+ for parameter in args.parameterlist:
+ parameter_name = KernRe(r'\[.*').sub('', parameter)
+ dtype = args.parametertypes.get(parameter, "")
+
+ if dtype:
+ self.data += f"{self.lineprefix}``{dtype}``\n"
+ else:
+ self.data += f"{self.lineprefix}``{parameter}``\n"
+
+ self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0))
+
+ self.lineprefix = " "
+ if parameter_name in args.parameterdescs and \
+ args.parameterdescs[parameter_name] != KernelDoc.undescribed:
+
+ self.output_highlight(args.parameterdescs[parameter_name])
+ self.data += "\n"
+ else:
+ self.data += f"{self.lineprefix}*undescribed*\n\n"
+ self.lineprefix = " "
+
+ self.out_section(args)
+ self.lineprefix = oldprefix
+
+ def out_enum(self, fname, name, args):
+
+ oldprefix = self.lineprefix
+ ln = args.declaration_start_line
+
+ self.data += f"\n\n.. c:enum:: {name}\n\n"
+
+ self.print_lineno(ln)
+ self.lineprefix = " "
+ self.output_highlight(args.get('purpose', ''))
+ self.data += "\n"
+
+ self.data += ".. container:: kernelindent\n\n"
+ outer = self.lineprefix + " "
+ self.lineprefix = outer + " "
+ self.data += f"{outer}**Constants**\n\n"
+
+ for parameter in args.parameterlist:
+ self.data += f"{outer}``{parameter}``\n"
+
+ if args.parameterdescs.get(parameter, '') != KernelDoc.undescribed:
+ self.output_highlight(args.parameterdescs[parameter])
+ else:
+ self.data += f"{self.lineprefix}*undescribed*\n\n"
+ self.data += "\n"
+
+ self.lineprefix = oldprefix
+ self.out_section(args)
+
+ def out_typedef(self, fname, name, args):
+
+ oldprefix = self.lineprefix
+ ln = args.declaration_start_line
+
+ self.data += f"\n\n.. c:type:: {name}\n\n"
+
+ self.print_lineno(ln)
+ self.lineprefix = " "
+
+ self.output_highlight(args.get('purpose', ''))
+
+ self.data += "\n"
+
+ self.lineprefix = oldprefix
+ self.out_section(args)
+
+ def out_struct(self, fname, name, args):
+
+ purpose = args.get('purpose', "")
+ declaration = args.get('definition', "")
+ dtype = args.type
+ ln = args.declaration_start_line
+
+ self.data += f"\n\n.. c:{dtype}:: {name}\n\n"
+
+ self.print_lineno(ln)
+
+ oldprefix = self.lineprefix
+ self.lineprefix += " "
+
+ self.output_highlight(purpose)
+ self.data += "\n"
+
+ self.data += ".. container:: kernelindent\n\n"
+ self.data += f"{self.lineprefix}**Definition**::\n\n"
+
+ self.lineprefix = self.lineprefix + " "
+
+ declaration = declaration.replace("\t", self.lineprefix)
+
+ self.data += f"{self.lineprefix}{dtype} {name}" + ' {' + "\n"
+ self.data += f"{declaration}{self.lineprefix}" + "};\n\n"
+
+ self.lineprefix = " "
+ self.data += f"{self.lineprefix}**Members**\n\n"
+ for parameter in args.parameterlist:
+ if not parameter or parameter.startswith("#"):
+ continue
+
+ parameter_name = parameter.split("[", maxsplit=1)[0]
+
+ if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed:
+ continue
+
+ self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0))
+
+ self.data += f"{self.lineprefix}``{parameter}``\n"
+
+ self.lineprefix = " "
+ self.output_highlight(args.parameterdescs[parameter_name])
+ self.lineprefix = " "
+
+ self.data += "\n"
+
+ self.data += "\n"
+
+ self.lineprefix = oldprefix
+ self.out_section(args)
+
+
+class ManFormat(OutputFormat):
+ """Consts and functions used by man pages output"""
+
+ highlights = (
+ (type_constant, r"\1"),
+ (type_constant2, r"\1"),
+ (type_func, r"\\fB\1\\fP"),
+ (type_enum, r"\\fI\1\\fP"),
+ (type_struct, r"\\fI\1\\fP"),
+ (type_typedef, r"\\fI\1\\fP"),
+ (type_union, r"\\fI\1\\fP"),
+ (type_param, r"\\fI\1\\fP"),
+ (type_param_ref, r"\\fI\1\2\\fP"),
+ (type_member, r"\\fI\1\2\3\\fP"),
+ (type_fallback, r"\\fI\1\\fP")
+ )
+ blankline = ""
+
+ date_formats = [
+ "%a %b %d %H:%M:%S %Z %Y",
+ "%a %b %d %H:%M:%S %Y",
+ "%Y-%m-%d",
+ "%b %d %Y",
+ "%B %d %Y",
+ "%m %d %Y",
+ ]
+
+ def __init__(self, modulename):
+ """
+ Creates class variables.
+
+ Not really mandatory, but it is a good coding style and makes
+ pylint happy.
+ """
+
+ super().__init__()
+ self.modulename = modulename
+ self.symbols = []
+
+ dt = None
+ tstamp = os.environ.get("KBUILD_BUILD_TIMESTAMP")
+ if tstamp:
+ for fmt in self.date_formats:
+ try:
+ dt = datetime.strptime(tstamp, fmt)
+ break
+ except ValueError:
+ pass
+
+ if not dt:
+ dt = datetime.now()
+
+ self.man_date = dt.strftime("%B %Y")
+
+ def arg_name(self, args, name):
+ """
+ Return the name that will be used for the man page.
+
+ As we may have the same name on different namespaces,
+ prepend the data type for all types except functions and typedefs.
+
+ The doc section is special: it uses the modulename.
+ """
+
+ dtype = args.type
+
+ if dtype == "doc":
+ return self.modulename
+
+ if dtype in ["function", "typedef"]:
+ return name
+
+ return f"{dtype} {name}"
+
+ def set_symbols(self, symbols):
+ """
+ Get a list of all symbols from kernel_doc.
+
+ Man pages will uses it to add a SEE ALSO section with other
+ symbols at the same file.
+ """
+ self.symbols = symbols
+
+ def out_tail(self, fname, name, args):
+ """Adds a tail for all man pages"""
+
+ # SEE ALSO section
+ self.data += f'.SH "SEE ALSO"' + "\n.PP\n"
+ self.data += (f"Kernel file \\fB{args.fname}\\fR\n")
+ if len(self.symbols) >= 2:
+ cur_name = self.arg_name(args, name)
+
+ related = []
+ for arg in self.symbols:
+ out_name = self.arg_name(arg, arg.name)
+
+ if cur_name == out_name:
+ continue
+
+ related.append(f"\\fB{out_name}\\fR(9)")
+
+ self.data += ",\n".join(related) + "\n"
+
+ # TODO: does it make sense to add other sections? Maybe
+ # REPORTING ISSUES? LICENSE?
+
+ def msg(self, fname, name, args):
+ """
+ Handles a single entry from kernel-doc parser.
+
+ Add a tail at the end of man pages output.
+ """
+ super().msg(fname, name, args)
+ self.out_tail(fname, name, args)
+
+ return self.data
+
+ def output_highlight(self, block):
+ """
+ Outputs a C symbol that may require being highlighted with
+ self.highlights variable using troff syntax
+ """
+
+ contents = self.highlight_block(block)
+
+ if isinstance(contents, list):
+ contents = "\n".join(contents)
+
+ for line in contents.strip("\n").split("\n"):
+ line = KernRe(r"^\s*").sub("", line)
+ if not line:
+ continue
+
+ if line[0] == ".":
+ self.data += "\\&" + line + "\n"
+ else:
+ self.data += line + "\n"
+
+ def out_doc(self, fname, name, args):
+ if not self.check_doc(name, args):
+ return
+
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{self.modulename}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
+
+ def out_function(self, fname, name, args):
+ """output function in man"""
+
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{name}" 9 "{out_name}" "{self.man_date}" "Kernel Hacker\'s Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"{name} \\- {args['purpose']}\n"
+
+ self.data += ".SH SYNOPSIS\n"
+ if args.get('functiontype', ''):
+ self.data += f'.B "{args["functiontype"]}" {name}' + "\n"
+ else:
+ self.data += f'.B "{name}' + "\n"
+
+ count = 0
+ parenth = "("
+ post = ","
+
+ for parameter in args.parameterlist:
+ if count == len(args.parameterlist) - 1:
+ post = ");"
+
+ dtype = args.parametertypes.get(parameter, "")
+ if function_pointer.match(dtype):
+ # Pointer-to-function
+ self.data += f'".BI "{parenth}{function_pointer.group(1)}" " ") ({function_pointer.group(2)}){post}"' + "\n"
+ else:
+ dtype = KernRe(r'([^\*])$').sub(r'\1 ', dtype)
+
+ self.data += f'.BI "{parenth}{dtype}" "{post}"' + "\n"
+ count += 1
+ parenth = ""
+
+ if args.parameterlist:
+ self.data += ".SH ARGUMENTS\n"
+
+ for parameter in args.parameterlist:
+ parameter_name = re.sub(r'\[.*', '', parameter)
+
+ self.data += f'.IP "{parameter}" 12' + "\n"
+ self.output_highlight(args.parameterdescs.get(parameter_name, ""))
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section.upper()}"' + "\n"
+ self.output_highlight(text)
+
+ def out_enum(self, fname, name, args):
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{self.modulename}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"enum {name} \\- {args['purpose']}\n"
+
+ self.data += ".SH SYNOPSIS\n"
+ self.data += f"enum {name}" + " {\n"
+
+ count = 0
+ for parameter in args.parameterlist:
+ self.data += f'.br\n.BI " {parameter}"' + "\n"
+ if count == len(args.parameterlist) - 1:
+ self.data += "\n};\n"
+ else:
+ self.data += ", \n.br\n"
+
+ count += 1
+
+ self.data += ".SH Constants\n"
+
+ for parameter in args.parameterlist:
+ parameter_name = KernRe(r'\[.*').sub('', parameter)
+ self.data += f'.IP "{parameter}" 12' + "\n"
+ self.output_highlight(args.parameterdescs.get(parameter_name, ""))
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
+
+ def out_typedef(self, fname, name, args):
+ module = self.modulename
+ purpose = args.get('purpose')
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{module}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"typedef {name} \\- {purpose}\n"
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
+
+ def out_struct(self, fname, name, args):
+ module = self.modulename
+ purpose = args.get('purpose')
+ definition = args.get('definition')
+ out_name = self.arg_name(args, name)
+
+ self.data += f'.TH "{module}" 9 "{out_name}" "{self.man_date}" "API Manual" LINUX' + "\n"
+
+ self.data += ".SH NAME\n"
+ self.data += f"{args.type} {name} \\- {purpose}\n"
+
+ # Replace tabs with two spaces and handle newlines
+ declaration = definition.replace("\t", " ")
+ declaration = KernRe(r"\n").sub('"\n.br\n.BI "', declaration)
+
+ self.data += ".SH SYNOPSIS\n"
+ self.data += f"{args.type} {name} " + "{" + "\n.br\n"
+ self.data += f'.BI "{declaration}\n' + "};\n.br\n\n"
+
+ self.data += ".SH Members\n"
+ for parameter in args.parameterlist:
+ if parameter.startswith("#"):
+ continue
+
+ parameter_name = re.sub(r"\[.*", "", parameter)
+
+ if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed:
+ continue
+
+ self.data += f'.IP "{parameter}" 12' + "\n"
+ self.output_highlight(args.parameterdescs.get(parameter_name))
+
+ for section, text in args.sections.items():
+ self.data += f'.SH "{section}"' + "\n"
+ self.output_highlight(text)
diff --git a/tools/lib/python/kdoc/kdoc_parser.py b/tools/lib/python/kdoc/kdoc_parser.py
new file mode 100644
index 000000000000..500aafc50032
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_parser.py
@@ -0,0 +1,1670 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+#
+# pylint: disable=C0301,C0302,R0904,R0912,R0913,R0914,R0915,R0917,R1702
+
+"""
+kdoc_parser
+===========
+
+Read a C language source or header FILE and extract embedded
+documentation comments
+"""
+
+import sys
+import re
+from pprint import pformat
+
+from kdoc.kdoc_re import NestedMatch, KernRe
+from kdoc.kdoc_item import KdocItem
+
+#
+# Regular expressions used to parse kernel-doc markups at KernelDoc class.
+#
+# Let's declare them in lowercase outside any class to make it easier to
+# convert from the Perl script.
+#
+# As those are evaluated at the beginning, no need to cache them
+#
+
+# Allow whitespace at end of comment start.
+doc_start = KernRe(r'^/\*\*\s*$', cache=False)
+
+doc_end = KernRe(r'\*/', cache=False)
+doc_com = KernRe(r'\s*\*\s*', cache=False)
+doc_com_body = KernRe(r'\s*\* ?', cache=False)
+doc_decl = doc_com + KernRe(r'(\w+)', cache=False)
+
+# @params and a strictly limited set of supported section names
+# Specifically:
+# Match @word:
+# @...:
+# @{section-name}:
+# while trying to not match literal block starts like "example::"
+#
+known_section_names = 'description|context|returns?|notes?|examples?'
+known_sections = KernRe(known_section_names, flags = re.I)
+doc_sect = doc_com + \
+ KernRe(r'\s*(@[.\w]+|@\.\.\.|' + known_section_names + r')\s*:([^:].*)?$',
+ flags=re.I, cache=False)
+
+doc_content = doc_com_body + KernRe(r'(.*)', cache=False)
+doc_inline_start = KernRe(r'^\s*/\*\*\s*$', cache=False)
+doc_inline_sect = KernRe(r'\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)', cache=False)
+doc_inline_end = KernRe(r'^\s*\*/\s*$', cache=False)
+doc_inline_oneline = KernRe(r'^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$', cache=False)
+
+export_symbol = KernRe(r'^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*', cache=False)
+export_symbol_ns = KernRe(r'^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+"\)\s*', cache=False)
+
+type_param = KernRe(r"@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False)
+
+#
+# Tests for the beginning of a kerneldoc block in its various forms.
+#
+doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False)
+doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)", cache = False)
+doc_begin_func = KernRe(str(doc_com) + # initial " * '
+ r"(?:\w+\s*\*\s*)?" + # type (not captured)
+ r'(?:define\s+)?' + # possible "define" (not captured)
+ r'(\w+)\s*(?:\(\w*\))?\s*' + # name and optional "(...)"
+ r'(?:[-:].*)?$', # description (not captured)
+ cache = False)
+
+#
+# Here begins a long set of transformations to turn structure member prefixes
+# and macro invocations into something we can parse and generate kdoc for.
+#
+struct_args_pattern = r'([^,)]+)'
+
+struct_xforms = [
+ # Strip attributes
+ (KernRe(r"__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)", flags=re.I | re.S, cache=False), ' '),
+ (KernRe(r'\s*__aligned\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__counted_by\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__counted_by_(le|be)\s*\([^;]*\)', re.S), ' '),
+ (KernRe(r'\s*__packed\s*', re.S), ' '),
+ (KernRe(r'\s*CRYPTO_MINALIGN_ATTR', re.S), ' '),
+ (KernRe(r'\s*__private', re.S), ' '),
+ (KernRe(r'\s*__rcu', re.S), ' '),
+ (KernRe(r'\s*____cacheline_aligned_in_smp', re.S), ' '),
+ (KernRe(r'\s*____cacheline_aligned', re.S), ' '),
+ (KernRe(r'\s*__cacheline_group_(begin|end)\([^\)]+\);'), ''),
+ #
+ # Unwrap struct_group macros based on this definition:
+ # __struct_group(TAG, NAME, ATTRS, MEMBERS...)
+ # which has variants like: struct_group(NAME, MEMBERS...)
+ # Only MEMBERS arguments require documentation.
+ #
+ # Parsing them happens on two steps:
+ #
+ # 1. drop struct group arguments that aren't at MEMBERS,
+ # storing them as STRUCT_GROUP(MEMBERS)
+ #
+ # 2. remove STRUCT_GROUP() ancillary macro.
+ #
+ # The original logic used to remove STRUCT_GROUP() using an
+ # advanced regex:
+ #
+ # \bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*;
+ #
+ # with two patterns that are incompatible with
+ # Python re module, as it has:
+ #
+ # - a recursive pattern: (?1)
+ # - an atomic grouping: (?>...)
+ #
+ # I tried a simpler version: but it didn't work either:
+ # \bSTRUCT_GROUP\(([^\)]+)\)[^;]*;
+ #
+ # As it doesn't properly match the end parenthesis on some cases.
+ #
+ # So, a better solution was crafted: there's now a NestedMatch
+ # class that ensures that delimiters after a search are properly
+ # matched. So, the implementation to drop STRUCT_GROUP() will be
+ # handled in separate.
+ #
+ (KernRe(r'\bstruct_group\s*\(([^,]*,)', re.S), r'STRUCT_GROUP('),
+ (KernRe(r'\bstruct_group_attr\s*\(([^,]*,){2}', re.S), r'STRUCT_GROUP('),
+ (KernRe(r'\bstruct_group_tagged\s*\(([^,]*),([^,]*),', re.S), r'struct \1 \2; STRUCT_GROUP('),
+ (KernRe(r'\b__struct_group\s*\(([^,]*,){3}', re.S), r'STRUCT_GROUP('),
+ #
+ # Replace macros
+ #
+ # TODO: use NestedMatch for FOO($1, $2, ...) matches
+ #
+ # it is better to also move those to the NestedMatch logic,
+ # to ensure that parentheses will be properly matched.
+ #
+ (KernRe(r'__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)', re.S),
+ r'DECLARE_BITMAP(\1, __ETHTOOL_LINK_MODE_MASK_NBITS)'),
+ (KernRe(r'DECLARE_PHY_INTERFACE_MASK\s*\(([^\)]+)\)', re.S),
+ r'DECLARE_BITMAP(\1, PHY_INTERFACE_MODE_MAX)'),
+ (KernRe(r'DECLARE_BITMAP\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern + r'\)',
+ re.S), r'unsigned long \1[BITS_TO_LONGS(\2)]'),
+ (KernRe(r'DECLARE_HASHTABLE\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern + r'\)',
+ re.S), r'unsigned long \1[1 << ((\2) - 1)]'),
+ (KernRe(r'DECLARE_KFIFO\s*\(' + struct_args_pattern + r',\s*' + struct_args_pattern +
+ r',\s*' + struct_args_pattern + r'\)', re.S), r'\2 *\1'),
+ (KernRe(r'DECLARE_KFIFO_PTR\s*\(' + struct_args_pattern + r',\s*' +
+ struct_args_pattern + r'\)', re.S), r'\2 *\1'),
+ (KernRe(r'(?:__)?DECLARE_FLEX_ARRAY\s*\(' + struct_args_pattern + r',\s*' +
+ struct_args_pattern + r'\)', re.S), r'\1 \2[]'),
+ (KernRe(r'DEFINE_DMA_UNMAP_ADDR\s*\(' + struct_args_pattern + r'\)', re.S), r'dma_addr_t \1'),
+ (KernRe(r'DEFINE_DMA_UNMAP_LEN\s*\(' + struct_args_pattern + r'\)', re.S), r'__u32 \1'),
+]
+#
+# Regexes here are guaranteed to have the end delimiter matching
+# the start delimiter. Yet, right now, only one replace group
+# is allowed.
+#
+struct_nested_prefixes = [
+ (re.compile(r'\bSTRUCT_GROUP\('), r'\1'),
+]
+
+#
+# Transforms for function prototypes
+#
+function_xforms = [
+ (KernRe(r"^static +"), ""),
+ (KernRe(r"^extern +"), ""),
+ (KernRe(r"^asmlinkage +"), ""),
+ (KernRe(r"^inline +"), ""),
+ (KernRe(r"^__inline__ +"), ""),
+ (KernRe(r"^__inline +"), ""),
+ (KernRe(r"^__always_inline +"), ""),
+ (KernRe(r"^noinline +"), ""),
+ (KernRe(r"^__FORTIFY_INLINE +"), ""),
+ (KernRe(r"__init +"), ""),
+ (KernRe(r"__init_or_module +"), ""),
+ (KernRe(r"__deprecated +"), ""),
+ (KernRe(r"__flatten +"), ""),
+ (KernRe(r"__meminit +"), ""),
+ (KernRe(r"__must_check +"), ""),
+ (KernRe(r"__weak +"), ""),
+ (KernRe(r"__sched +"), ""),
+ (KernRe(r"_noprof"), ""),
+ (KernRe(r"__always_unused *"), ""),
+ (KernRe(r"__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +"), ""),
+ (KernRe(r"__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +"), ""),
+ (KernRe(r"__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +"), ""),
+ (KernRe(r"DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)"), r"\1, \2"),
+ (KernRe(r"__attribute_const__ +"), ""),
+ (KernRe(r"__attribute__\s*\(\((?:[\w\s]+(?:\([^)]*\))?\s*,?)+\)\)\s+"), ""),
+]
+
+#
+# Apply a set of transforms to a block of text.
+#
+def apply_transforms(xforms, text):
+ for search, subst in xforms:
+ text = search.sub(subst, text)
+ return text
+
+#
+# A little helper to get rid of excess white space
+#
+multi_space = KernRe(r'\s\s+')
+def trim_whitespace(s):
+ return multi_space.sub(' ', s.strip())
+
+#
+# Remove struct/enum members that have been marked "private".
+#
+def trim_private_members(text):
+ #
+ # First look for a "public:" block that ends a private region, then
+ # handle the "private until the end" case.
+ #
+ text = KernRe(r'/\*\s*private:.*?/\*\s*public:.*?\*/', flags=re.S).sub('', text)
+ text = KernRe(r'/\*\s*private:.*', flags=re.S).sub('', text)
+ #
+ # We needed the comments to do the above, but now we can take them out.
+ #
+ return KernRe(r'\s*/\*.*?\*/\s*', flags=re.S).sub('', text).strip()
+
+class state:
+ """
+ State machine enums
+ """
+
+ # Parser states
+ NORMAL = 0 # normal code
+ NAME = 1 # looking for function name
+ DECLARATION = 2 # We have seen a declaration which might not be done
+ BODY = 3 # the body of the comment
+ SPECIAL_SECTION = 4 # doc section ending with a blank line
+ PROTO = 5 # scanning prototype
+ DOCBLOCK = 6 # documentation block
+ INLINE_NAME = 7 # gathering doc outside main block
+ INLINE_TEXT = 8 # reading the body of inline docs
+
+ name = [
+ "NORMAL",
+ "NAME",
+ "DECLARATION",
+ "BODY",
+ "SPECIAL_SECTION",
+ "PROTO",
+ "DOCBLOCK",
+ "INLINE_NAME",
+ "INLINE_TEXT",
+ ]
+
+
+SECTION_DEFAULT = "Description" # default section
+
+class KernelEntry:
+
+ def __init__(self, config, fname, ln):
+ self.config = config
+ self.fname = fname
+
+ self._contents = []
+ self.prototype = ""
+
+ self.warnings = []
+
+ self.parameterlist = []
+ self.parameterdescs = {}
+ self.parametertypes = {}
+ self.parameterdesc_start_lines = {}
+
+ self.section_start_lines = {}
+ self.sections = {}
+
+ self.anon_struct_union = False
+
+ self.leading_space = None
+
+ self.fname = fname
+
+ # State flags
+ self.brcount = 0
+ self.declaration_start_line = ln + 1
+
+ #
+ # Management of section contents
+ #
+ def add_text(self, text):
+ self._contents.append(text)
+
+ def contents(self):
+ return '\n'.join(self._contents) + '\n'
+
+ # TODO: rename to emit_message after removal of kernel-doc.pl
+ def emit_msg(self, ln, msg, *, warning=True):
+ """Emit a message"""
+
+ log_msg = f"{self.fname}:{ln} {msg}"
+
+ if not warning:
+ self.config.log.info(log_msg)
+ return
+
+ # Delegate warning output to output logic, as this way it
+ # will report warnings/info only for symbols that are output
+
+ self.warnings.append(log_msg)
+ return
+
+ #
+ # Begin a new section.
+ #
+ def begin_section(self, line_no, title = SECTION_DEFAULT, dump = False):
+ if dump:
+ self.dump_section(start_new = True)
+ self.section = title
+ self.new_start_line = line_no
+
+ def dump_section(self, start_new=True):
+ """
+ Dumps section contents to arrays/hashes intended for that purpose.
+ """
+ #
+ # If we have accumulated no contents in the default ("description")
+ # section, don't bother.
+ #
+ if self.section == SECTION_DEFAULT and not self._contents:
+ return
+ name = self.section
+ contents = self.contents()
+
+ if type_param.match(name):
+ name = type_param.group(1)
+
+ self.parameterdescs[name] = contents
+ self.parameterdesc_start_lines[name] = self.new_start_line
+
+ self.new_start_line = 0
+
+ else:
+ if name in self.sections and self.sections[name] != "":
+ # Only warn on user-specified duplicate section names
+ if name != SECTION_DEFAULT:
+ self.emit_msg(self.new_start_line,
+ f"duplicate section name '{name}'")
+ # Treat as a new paragraph - add a blank line
+ self.sections[name] += '\n' + contents
+ else:
+ self.sections[name] = contents
+ self.section_start_lines[name] = self.new_start_line
+ self.new_start_line = 0
+
+# self.config.log.debug("Section: %s : %s", name, pformat(vars(self)))
+
+ if start_new:
+ self.section = SECTION_DEFAULT
+ self._contents = []
+
+python_warning = False
+
+class KernelDoc:
+ """
+ Read a C language source or header FILE and extract embedded
+ documentation comments.
+ """
+
+ # Section names
+
+ section_context = "Context"
+ section_return = "Return"
+
+ undescribed = "-- undescribed --"
+
+ def __init__(self, config, fname):
+ """Initialize internal variables"""
+
+ self.fname = fname
+ self.config = config
+
+ # Initial state for the state machines
+ self.state = state.NORMAL
+
+ # Store entry currently being processed
+ self.entry = None
+
+ # Place all potential outputs into an array
+ self.entries = []
+
+ #
+ # We need Python 3.7 for its "dicts remember the insertion
+ # order" guarantee
+ #
+ global python_warning
+ if (not python_warning and
+ sys.version_info.major == 3 and sys.version_info.minor < 7):
+
+ self.emit_msg(0,
+ 'Python 3.7 or later is required for correct results')
+ python_warning = True
+
+ def emit_msg(self, ln, msg, *, warning=True):
+ """Emit a message"""
+
+ if self.entry:
+ self.entry.emit_msg(ln, msg, warning=warning)
+ return
+
+ log_msg = f"{self.fname}:{ln} {msg}"
+
+ if warning:
+ self.config.log.warning(log_msg)
+ else:
+ self.config.log.info(log_msg)
+
+ def dump_section(self, start_new=True):
+ """
+ Dumps section contents to arrays/hashes intended for that purpose.
+ """
+
+ if self.entry:
+ self.entry.dump_section(start_new)
+
+ # TODO: rename it to store_declaration after removal of kernel-doc.pl
+ def output_declaration(self, dtype, name, **args):
+ """
+ Stores the entry into an entry array.
+
+ The actual output and output filters will be handled elsewhere
+ """
+
+ item = KdocItem(name, self.fname, dtype,
+ self.entry.declaration_start_line, **args)
+ item.warnings = self.entry.warnings
+
+ # Drop empty sections
+ # TODO: improve empty sections logic to emit warnings
+ sections = self.entry.sections
+ for section in ["Description", "Return"]:
+ if section in sections and not sections[section].rstrip():
+ del sections[section]
+ item.set_sections(sections, self.entry.section_start_lines)
+ item.set_params(self.entry.parameterlist, self.entry.parameterdescs,
+ self.entry.parametertypes,
+ self.entry.parameterdesc_start_lines)
+ self.entries.append(item)
+
+ self.config.log.debug("Output: %s:%s = %s", dtype, name, pformat(args))
+
+ def reset_state(self, ln):
+ """
+ Ancillary routine to create a new entry. It initializes all
+ variables used by the state machine.
+ """
+
+ #
+ # Flush the warnings out before we proceed further
+ #
+ if self.entry and self.entry not in self.entries:
+ for log_msg in self.entry.warnings:
+ self.config.log.warning(log_msg)
+
+ self.entry = KernelEntry(self.config, self.fname, ln)
+
+ # State flags
+ self.state = state.NORMAL
+
+ def push_parameter(self, ln, decl_type, param, dtype,
+ org_arg, declaration_name):
+ """
+ Store parameters and their descriptions at self.entry.
+ """
+
+ if self.entry.anon_struct_union and dtype == "" and param == "}":
+ return # Ignore the ending }; from anonymous struct/union
+
+ self.entry.anon_struct_union = False
+
+ param = KernRe(r'[\[\)].*').sub('', param, count=1)
+
+ #
+ # Look at various "anonymous type" cases.
+ #
+ if dtype == '':
+ if param.endswith("..."):
+ if len(param) > 3: # there is a name provided, use that
+ param = param[:-3]
+ if not self.entry.parameterdescs.get(param):
+ self.entry.parameterdescs[param] = "variable arguments"
+
+ elif (not param) or param == "void":
+ param = "void"
+ self.entry.parameterdescs[param] = "no arguments"
+
+ elif param in ["struct", "union"]:
+ # Handle unnamed (anonymous) union or struct
+ dtype = param
+ param = "{unnamed_" + param + "}"
+ self.entry.parameterdescs[param] = "anonymous\n"
+ self.entry.anon_struct_union = True
+
+ # Warn if parameter has no description
+ # (but ignore ones starting with # as these are not parameters
+ # but inline preprocessor statements)
+ if param not in self.entry.parameterdescs and not param.startswith("#"):
+ self.entry.parameterdescs[param] = self.undescribed
+
+ if "." not in param:
+ if decl_type == 'function':
+ dname = f"{decl_type} parameter"
+ else:
+ dname = f"{decl_type} member"
+
+ self.emit_msg(ln,
+ f"{dname} '{param}' not described in '{declaration_name}'")
+
+ # Strip spaces from param so that it is one continuous string on
+ # parameterlist. This fixes a problem where check_sections()
+ # cannot find a parameter like "addr[6 + 2]" because it actually
+ # appears as "addr[6", "+", "2]" on the parameter list.
+ # However, it's better to maintain the param string unchanged for
+ # output, so just weaken the string compare in check_sections()
+ # to ignore "[blah" in a parameter string.
+
+ self.entry.parameterlist.append(param)
+ org_arg = KernRe(r'\s\s+').sub(' ', org_arg)
+ self.entry.parametertypes[param] = org_arg
+
+
+ def create_parameter_list(self, ln, decl_type, args,
+ splitter, declaration_name):
+ """
+ Creates a list of parameters, storing them at self.entry.
+ """
+
+ # temporarily replace all commas inside function pointer definition
+ arg_expr = KernRe(r'(\([^\),]+),')
+ while arg_expr.search(args):
+ args = arg_expr.sub(r"\1#", args)
+
+ for arg in args.split(splitter):
+ # Ignore argument attributes
+ arg = KernRe(r'\sPOS0?\s').sub(' ', arg)
+
+ # Strip leading/trailing spaces
+ arg = arg.strip()
+ arg = KernRe(r'\s+').sub(' ', arg, count=1)
+
+ if arg.startswith('#'):
+ # Treat preprocessor directive as a typeless variable just to fill
+ # corresponding data structures "correctly". Catch it later in
+ # output_* subs.
+
+ # Treat preprocessor directive as a typeless variable
+ self.push_parameter(ln, decl_type, arg, "",
+ "", declaration_name)
+ #
+ # The pointer-to-function case.
+ #
+ elif KernRe(r'\(.+\)\s*\(').search(arg):
+ arg = arg.replace('#', ',')
+ r = KernRe(r'[^\(]+\(\*?\s*' # Everything up to "(*"
+ r'([\w\[\].]*)' # Capture the name and possible [array]
+ r'\s*\)') # Make sure the trailing ")" is there
+ if r.match(arg):
+ param = r.group(1)
+ else:
+ self.emit_msg(ln, f"Invalid param: {arg}")
+ param = arg
+ dtype = arg.replace(param, '')
+ self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name)
+ #
+ # The array-of-pointers case. Dig the parameter name out from the middle
+ # of the declaration.
+ #
+ elif KernRe(r'\(.+\)\s*\[').search(arg):
+ r = KernRe(r'[^\(]+\(\s*\*\s*' # Up to "(" and maybe "*"
+ r'([\w.]*?)' # The actual pointer name
+ r'\s*(\[\s*\w+\s*\]\s*)*\)') # The [array portion]
+ if r.match(arg):
+ param = r.group(1)
+ else:
+ self.emit_msg(ln, f"Invalid param: {arg}")
+ param = arg
+ dtype = arg.replace(param, '')
+ self.push_parameter(ln, decl_type, param, dtype, arg, declaration_name)
+ elif arg:
+ #
+ # Clean up extraneous spaces and split the string at commas; the first
+ # element of the resulting list will also include the type information.
+ #
+ arg = KernRe(r'\s*:\s*').sub(":", arg)
+ arg = KernRe(r'\s*\[').sub('[', arg)
+ args = KernRe(r'\s*,\s*').split(arg)
+ args[0] = re.sub(r'(\*+)\s*', r' \1', args[0])
+ #
+ # args[0] has a string of "type a". If "a" includes an [array]
+ # declaration, we want to not be fooled by any white space inside
+ # the brackets, so detect and handle that case specially.
+ #
+ r = KernRe(r'^([^[\]]*\s+)(.*)$')
+ if r.match(args[0]):
+ args[0] = r.group(2)
+ dtype = r.group(1)
+ else:
+ # No space in args[0]; this seems wrong but preserves previous behavior
+ dtype = ''
+
+ bitfield_re = KernRe(r'(.*?):(\w+)')
+ for param in args:
+ #
+ # For pointers, shift the star(s) from the variable name to the
+ # type declaration.
+ #
+ r = KernRe(r'^(\*+)\s*(.*)')
+ if r.match(param):
+ self.push_parameter(ln, decl_type, r.group(2),
+ f"{dtype} {r.group(1)}",
+ arg, declaration_name)
+ #
+ # Perform a similar shift for bitfields.
+ #
+ elif bitfield_re.search(param):
+ if dtype != "": # Skip unnamed bit-fields
+ self.push_parameter(ln, decl_type, bitfield_re.group(1),
+ f"{dtype}:{bitfield_re.group(2)}",
+ arg, declaration_name)
+ else:
+ self.push_parameter(ln, decl_type, param, dtype,
+ arg, declaration_name)
+
+ def check_sections(self, ln, decl_name, decl_type):
+ """
+ Check for errors inside sections, emitting warnings if not found
+ parameters are described.
+ """
+ for section in self.entry.sections:
+ if section not in self.entry.parameterlist and \
+ not known_sections.search(section):
+ if decl_type == 'function':
+ dname = f"{decl_type} parameter"
+ else:
+ dname = f"{decl_type} member"
+ self.emit_msg(ln,
+ f"Excess {dname} '{section}' description in '{decl_name}'")
+
+ def check_return_section(self, ln, declaration_name, return_type):
+ """
+ If the function doesn't return void, warns about the lack of a
+ return description.
+ """
+
+ if not self.config.wreturn:
+ return
+
+ # Ignore an empty return type (It's a macro)
+ # Ignore functions with a "void" return type (but not "void *")
+ if not return_type or KernRe(r'void\s*\w*\s*$').search(return_type):
+ return
+
+ if not self.entry.sections.get("Return", None):
+ self.emit_msg(ln,
+ f"No description found for return value of '{declaration_name}'")
+
+ #
+ # Split apart a structure prototype; returns (struct|union, name, members) or None
+ #
+ def split_struct_proto(self, proto):
+ type_pattern = r'(struct|union)'
+ qualifiers = [
+ "__attribute__",
+ "__packed",
+ "__aligned",
+ "____cacheline_aligned_in_smp",
+ "____cacheline_aligned",
+ ]
+ definition_body = r'\{(.*)\}\s*' + "(?:" + '|'.join(qualifiers) + ")?"
+
+ r = KernRe(type_pattern + r'\s+(\w+)\s*' + definition_body)
+ if r.search(proto):
+ return (r.group(1), r.group(2), r.group(3))
+ else:
+ r = KernRe(r'typedef\s+' + type_pattern + r'\s*' + definition_body + r'\s*(\w+)\s*;')
+ if r.search(proto):
+ return (r.group(1), r.group(3), r.group(2))
+ return None
+ #
+ # Rewrite the members of a structure or union for easier formatting later on.
+ # Among other things, this function will turn a member like:
+ #
+ # struct { inner_members; } foo;
+ #
+ # into:
+ #
+ # struct foo; inner_members;
+ #
+ def rewrite_struct_members(self, members):
+ #
+ # Process struct/union members from the most deeply nested outward. The
+ # trick is in the ^{ below - it prevents a match of an outer struct/union
+ # until the inner one has been munged (removing the "{" in the process).
+ #
+ struct_members = KernRe(r'(struct|union)' # 0: declaration type
+ r'([^\{\};]+)' # 1: possible name
+ r'(\{)'
+ r'([^\{\}]*)' # 3: Contents of declaration
+ r'(\})'
+ r'([^\{\};]*)(;)') # 5: Remaining stuff after declaration
+ tuples = struct_members.findall(members)
+ while tuples:
+ for t in tuples:
+ newmember = ""
+ oldmember = "".join(t) # Reconstruct the original formatting
+ dtype, name, lbr, content, rbr, rest, semi = t
+ #
+ # Pass through each field name, normalizing the form and formatting.
+ #
+ for s_id in rest.split(','):
+ s_id = s_id.strip()
+ newmember += f"{dtype} {s_id}; "
+ #
+ # Remove bitfield/array/pointer info, getting the bare name.
+ #
+ s_id = KernRe(r'[:\[].*').sub('', s_id)
+ s_id = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', s_id)
+ #
+ # Pass through the members of this inner structure/union.
+ #
+ for arg in content.split(';'):
+ arg = arg.strip()
+ #
+ # Look for (type)(*name)(args) - pointer to function
+ #
+ r = KernRe(r'^([^\(]+\(\*?\s*)([\w.]*)(\s*\).*)')
+ if r.match(arg):
+ dtype, name, extra = r.group(1), r.group(2), r.group(3)
+ # Pointer-to-function
+ if not s_id:
+ # Anonymous struct/union
+ newmember += f"{dtype}{name}{extra}; "
+ else:
+ newmember += f"{dtype}{s_id}.{name}{extra}; "
+ #
+ # Otherwise a non-function member.
+ #
+ else:
+ #
+ # Remove bitmap and array portions and spaces around commas
+ #
+ arg = KernRe(r':\s*\d+\s*').sub('', arg)
+ arg = KernRe(r'\[.*\]').sub('', arg)
+ arg = KernRe(r'\s*,\s*').sub(',', arg)
+ #
+ # Look for a normal decl - "type name[,name...]"
+ #
+ r = KernRe(r'(.*)\s+([\S+,]+)')
+ if r.search(arg):
+ for name in r.group(2).split(','):
+ name = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', name)
+ if not s_id:
+ # Anonymous struct/union
+ newmember += f"{r.group(1)} {name}; "
+ else:
+ newmember += f"{r.group(1)} {s_id}.{name}; "
+ else:
+ newmember += f"{arg}; "
+ #
+ # At the end of the s_id loop, replace the original declaration with
+ # the munged version.
+ #
+ members = members.replace(oldmember, newmember)
+ #
+ # End of the tuple loop - search again and see if there are outer members
+ # that now turn up.
+ #
+ tuples = struct_members.findall(members)
+ return members
+
+ #
+ # Format the struct declaration into a standard form for inclusion in the
+ # resulting docs.
+ #
+ def format_struct_decl(self, declaration):
+ #
+ # Insert newlines, get rid of extra spaces.
+ #
+ declaration = KernRe(r'([\{;])').sub(r'\1\n', declaration)
+ declaration = KernRe(r'\}\s+;').sub('};', declaration)
+ #
+ # Format inline enums with each member on its own line.
+ #
+ r = KernRe(r'(enum\s+\{[^\}]+),([^\n])')
+ while r.search(declaration):
+ declaration = r.sub(r'\1,\n\2', declaration)
+ #
+ # Now go through and supply the right number of tabs
+ # for each line.
+ #
+ def_args = declaration.split('\n')
+ level = 1
+ declaration = ""
+ for clause in def_args:
+ clause = KernRe(r'\s+').sub(' ', clause.strip(), count=1)
+ if clause:
+ if '}' in clause and level > 1:
+ level -= 1
+ if not clause.startswith('#'):
+ declaration += "\t" * level
+ declaration += "\t" + clause + "\n"
+ if "{" in clause and "}" not in clause:
+ level += 1
+ return declaration
+
+
+ def dump_struct(self, ln, proto):
+ """
+ Store an entry for a struct or union
+ """
+ #
+ # Do the basic parse to get the pieces of the declaration.
+ #
+ struct_parts = self.split_struct_proto(proto)
+ if not struct_parts:
+ self.emit_msg(ln, f"{proto} error: Cannot parse struct or union!")
+ return
+ decl_type, declaration_name, members = struct_parts
+
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln, f"expecting prototype for {decl_type} {self.entry.identifier}. "
+ f"Prototype was for {decl_type} {declaration_name} instead\n")
+ return
+ #
+ # Go through the list of members applying all of our transformations.
+ #
+ members = trim_private_members(members)
+ members = apply_transforms(struct_xforms, members)
+
+ nested = NestedMatch()
+ for search, sub in struct_nested_prefixes:
+ members = nested.sub(search, sub, members)
+ #
+ # Deal with embedded struct and union members, and drop enums entirely.
+ #
+ declaration = members
+ members = self.rewrite_struct_members(members)
+ members = re.sub(r'(\{[^\{\}]*\})', '', members)
+ #
+ # Output the result and we are done.
+ #
+ self.create_parameter_list(ln, decl_type, members, ';',
+ declaration_name)
+ self.check_sections(ln, declaration_name, decl_type)
+ self.output_declaration(decl_type, declaration_name,
+ definition=self.format_struct_decl(declaration),
+ purpose=self.entry.declaration_purpose)
+
+ def dump_enum(self, ln, proto):
+ """
+ Stores an enum inside self.entries array.
+ """
+ #
+ # Strip preprocessor directives. Note that this depends on the
+ # trailing semicolon we added in process_proto_type().
+ #
+ proto = KernRe(r'#\s*((define|ifdef|if)\s+|endif)[^;]*;', flags=re.S).sub('', proto)
+ #
+ # Parse out the name and members of the enum. Typedef form first.
+ #
+ r = KernRe(r'typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;')
+ if r.search(proto):
+ declaration_name = r.group(2)
+ members = trim_private_members(r.group(1))
+ #
+ # Failing that, look for a straight enum
+ #
+ else:
+ r = KernRe(r'enum\s+(\w*)\s*\{(.*)\}')
+ if r.match(proto):
+ declaration_name = r.group(1)
+ members = trim_private_members(r.group(2))
+ #
+ # OK, this isn't going to work.
+ #
+ else:
+ self.emit_msg(ln, f"{proto}: error: Cannot parse enum!")
+ return
+ #
+ # Make sure we found what we were expecting.
+ #
+ if self.entry.identifier != declaration_name:
+ if self.entry.identifier == "":
+ self.emit_msg(ln,
+ f"{proto}: wrong kernel-doc identifier on prototype")
+ else:
+ self.emit_msg(ln,
+ f"expecting prototype for enum {self.entry.identifier}. "
+ f"Prototype was for enum {declaration_name} instead")
+ return
+
+ if not declaration_name:
+ declaration_name = "(anonymous)"
+ #
+ # Parse out the name of each enum member, and verify that we
+ # have a description for it.
+ #
+ member_set = set()
+ members = KernRe(r'\([^;)]*\)').sub('', members)
+ for arg in members.split(','):
+ if not arg:
+ continue
+ arg = KernRe(r'^\s*(\w+).*').sub(r'\1', arg)
+ self.entry.parameterlist.append(arg)
+ if arg not in self.entry.parameterdescs:
+ self.entry.parameterdescs[arg] = self.undescribed
+ self.emit_msg(ln,
+ f"Enum value '{arg}' not described in enum '{declaration_name}'")
+ member_set.add(arg)
+ #
+ # Ensure that every described member actually exists in the enum.
+ #
+ for k in self.entry.parameterdescs:
+ if k not in member_set:
+ self.emit_msg(ln,
+ f"Excess enum value '@{k}' description in '{declaration_name}'")
+
+ self.output_declaration('enum', declaration_name,
+ purpose=self.entry.declaration_purpose)
+
+ def dump_declaration(self, ln, prototype):
+ """
+ Stores a data declaration inside self.entries array.
+ """
+
+ if self.entry.decl_type == "enum":
+ self.dump_enum(ln, prototype)
+ elif self.entry.decl_type == "typedef":
+ self.dump_typedef(ln, prototype)
+ elif self.entry.decl_type in ["union", "struct"]:
+ self.dump_struct(ln, prototype)
+ else:
+ # This would be a bug
+ self.emit_message(ln, f'Unknown declaration type: {self.entry.decl_type}')
+
+ def dump_function(self, ln, prototype):
+ """
+ Stores a function or function macro inside self.entries array.
+ """
+
+ found = func_macro = False
+ return_type = ''
+ decl_type = 'function'
+ #
+ # Apply the initial transformations.
+ #
+ prototype = apply_transforms(function_xforms, prototype)
+ #
+ # If we have a macro, remove the "#define" at the front.
+ #
+ new_proto = KernRe(r"^#\s*define\s+").sub("", prototype)
+ if new_proto != prototype:
+ prototype = new_proto
+ #
+ # Dispense with the simple "#define A B" case here; the key
+ # is the space after the name of the symbol being defined.
+ # NOTE that the seemingly misnamed "func_macro" indicates a
+ # macro *without* arguments.
+ #
+ r = KernRe(r'^(\w+)\s+')
+ if r.search(prototype):
+ return_type = ''
+ declaration_name = r.group(1)
+ func_macro = True
+ found = True
+
+ # Yes, this truly is vile. We are looking for:
+ # 1. Return type (may be nothing if we're looking at a macro)
+ # 2. Function name
+ # 3. Function parameters.
+ #
+ # All the while we have to watch out for function pointer parameters
+ # (which IIRC is what the two sections are for), C types (these
+ # regexps don't even start to express all the possibilities), and
+ # so on.
+ #
+ # If you mess with these regexps, it's a good idea to check that
+ # the following functions' documentation still comes out right:
+ # - parport_register_device (function pointer parameters)
+ # - atomic_set (macro)
+ # - pci_match_device, __copy_to_user (long return type)
+
+ name = r'\w+'
+ type1 = r'(?:[\w\s]+)?'
+ type2 = r'(?:[\w\s]+\*+)+'
+ #
+ # Attempt to match first on (args) with no internal parentheses; this
+ # lets us easily filter out __acquires() and other post-args stuff. If
+ # that fails, just grab the rest of the line to the last closing
+ # parenthesis.
+ #
+ proto_args = r'\(([^\(]*|.*)\)'
+ #
+ # (Except for the simple macro case) attempt to split up the prototype
+ # in the various ways we understand.
+ #
+ if not found:
+ patterns = [
+ rf'^()({name})\s*{proto_args}',
+ rf'^({type1})\s+({name})\s*{proto_args}',
+ rf'^({type2})\s*({name})\s*{proto_args}',
+ ]
+
+ for p in patterns:
+ r = KernRe(p)
+ if r.match(prototype):
+ return_type = r.group(1)
+ declaration_name = r.group(2)
+ args = r.group(3)
+ self.create_parameter_list(ln, decl_type, args, ',',
+ declaration_name)
+ found = True
+ break
+ #
+ # Parsing done; make sure that things are as we expect.
+ #
+ if not found:
+ self.emit_msg(ln,
+ f"cannot understand function prototype: '{prototype}'")
+ return
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln, f"expecting prototype for {self.entry.identifier}(). "
+ f"Prototype was for {declaration_name}() instead")
+ return
+ self.check_sections(ln, declaration_name, "function")
+ self.check_return_section(ln, declaration_name, return_type)
+ #
+ # Store the result.
+ #
+ self.output_declaration(decl_type, declaration_name,
+ typedef=('typedef' in return_type),
+ functiontype=return_type,
+ purpose=self.entry.declaration_purpose,
+ func_macro=func_macro)
+
+
+ def dump_typedef(self, ln, proto):
+ """
+ Stores a typedef inside self.entries array.
+ """
+ #
+ # We start by looking for function typedefs.
+ #
+ typedef_type = r'typedef((?:\s+[\w*]+\b){0,7}\s+(?:\w+\b|\*+))\s*'
+ typedef_ident = r'\*?\s*(\w\S+)\s*'
+ typedef_args = r'\s*\((.*)\);'
+
+ typedef1 = KernRe(typedef_type + r'\(' + typedef_ident + r'\)' + typedef_args)
+ typedef2 = KernRe(typedef_type + typedef_ident + typedef_args)
+
+ # Parse function typedef prototypes
+ for r in [typedef1, typedef2]:
+ if not r.match(proto):
+ continue
+
+ return_type = r.group(1).strip()
+ declaration_name = r.group(2)
+ args = r.group(3)
+
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln,
+ f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n")
+ return
+
+ self.create_parameter_list(ln, 'function', args, ',', declaration_name)
+
+ self.output_declaration('function', declaration_name,
+ typedef=True,
+ functiontype=return_type,
+ purpose=self.entry.declaration_purpose)
+ return
+ #
+ # Not a function, try to parse a simple typedef.
+ #
+ r = KernRe(r'typedef.*\s+(\w+)\s*;')
+ if r.match(proto):
+ declaration_name = r.group(1)
+
+ if self.entry.identifier != declaration_name:
+ self.emit_msg(ln,
+ f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n")
+ return
+
+ self.output_declaration('typedef', declaration_name,
+ purpose=self.entry.declaration_purpose)
+ return
+
+ self.emit_msg(ln, "error: Cannot parse typedef!")
+
+ @staticmethod
+ def process_export(function_set, line):
+ """
+ process EXPORT_SYMBOL* tags
+
+ This method doesn't use any variable from the class, so declare it
+ with a staticmethod decorator.
+ """
+
+ # We support documenting some exported symbols with different
+ # names. A horrible hack.
+ suffixes = [ '_noprof' ]
+
+ # Note: it accepts only one EXPORT_SYMBOL* per line, as having
+ # multiple export lines would violate Kernel coding style.
+
+ if export_symbol.search(line):
+ symbol = export_symbol.group(2)
+ elif export_symbol_ns.search(line):
+ symbol = export_symbol_ns.group(2)
+ else:
+ return False
+ #
+ # Found an export, trim out any special suffixes
+ #
+ for suffix in suffixes:
+ # Be backward compatible with Python < 3.9
+ if symbol.endswith(suffix):
+ symbol = symbol[:-len(suffix)]
+ function_set.add(symbol)
+ return True
+
+ def process_normal(self, ln, line):
+ """
+ STATE_NORMAL: looking for the /** to begin everything.
+ """
+
+ if not doc_start.match(line):
+ return
+
+ # start a new entry
+ self.reset_state(ln)
+
+ # next line is always the function name
+ self.state = state.NAME
+
+ def process_name(self, ln, line):
+ """
+ STATE_NAME: Looking for the "name - description" line
+ """
+ #
+ # Check for a DOC: block and handle them specially.
+ #
+ if doc_block.search(line):
+
+ if not doc_block.group(1):
+ self.entry.begin_section(ln, "Introduction")
+ else:
+ self.entry.begin_section(ln, doc_block.group(1))
+
+ self.entry.identifier = self.entry.section
+ self.state = state.DOCBLOCK
+ #
+ # Otherwise we're looking for a normal kerneldoc declaration line.
+ #
+ elif doc_decl.search(line):
+ self.entry.identifier = doc_decl.group(1)
+
+ # Test for data declaration
+ if doc_begin_data.search(line):
+ self.entry.decl_type = doc_begin_data.group(1)
+ self.entry.identifier = doc_begin_data.group(2)
+ #
+ # Look for a function description
+ #
+ elif doc_begin_func.search(line):
+ self.entry.identifier = doc_begin_func.group(1)
+ self.entry.decl_type = "function"
+ #
+ # We struck out.
+ #
+ else:
+ self.emit_msg(ln,
+ f"This comment starts with '/**', but isn't a kernel-doc comment. Refer to Documentation/doc-guide/kernel-doc.rst\n{line}")
+ self.state = state.NORMAL
+ return
+ #
+ # OK, set up for a new kerneldoc entry.
+ #
+ self.state = state.BODY
+ self.entry.identifier = self.entry.identifier.strip(" ")
+ # if there's no @param blocks need to set up default section here
+ self.entry.begin_section(ln + 1)
+ #
+ # Find the description portion, which *should* be there but
+ # isn't always.
+ # (We should be able to capture this from the previous parsing - someday)
+ #
+ r = KernRe("[-:](.*)")
+ if r.search(line):
+ self.entry.declaration_purpose = trim_whitespace(r.group(1))
+ self.state = state.DECLARATION
+ else:
+ self.entry.declaration_purpose = ""
+
+ if not self.entry.declaration_purpose and self.config.wshort_desc:
+ self.emit_msg(ln,
+ f"missing initial short description on line:\n{line}")
+
+ if not self.entry.identifier and self.entry.decl_type != "enum":
+ self.emit_msg(ln,
+ f"wrong kernel-doc identifier on line:\n{line}")
+ self.state = state.NORMAL
+
+ if self.config.verbose:
+ self.emit_msg(ln,
+ f"Scanning doc for {self.entry.decl_type} {self.entry.identifier}",
+ warning=False)
+ #
+ # Failed to find an identifier. Emit a warning
+ #
+ else:
+ self.emit_msg(ln, f"Cannot find identifier on line:\n{line}")
+
+ #
+ # Helper function to determine if a new section is being started.
+ #
+ def is_new_section(self, ln, line):
+ if doc_sect.search(line):
+ self.state = state.BODY
+ #
+ # Pick out the name of our new section, tweaking it if need be.
+ #
+ newsection = doc_sect.group(1)
+ if newsection.lower() == 'description':
+ newsection = 'Description'
+ elif newsection.lower() == 'context':
+ newsection = 'Context'
+ self.state = state.SPECIAL_SECTION
+ elif newsection.lower() in ["@return", "@returns",
+ "return", "returns"]:
+ newsection = "Return"
+ self.state = state.SPECIAL_SECTION
+ elif newsection[0] == '@':
+ self.state = state.SPECIAL_SECTION
+ #
+ # Initialize the contents, and get the new section going.
+ #
+ newcontents = doc_sect.group(2)
+ if not newcontents:
+ newcontents = ""
+ self.dump_section()
+ self.entry.begin_section(ln, newsection)
+ self.entry.leading_space = None
+
+ self.entry.add_text(newcontents.lstrip())
+ return True
+ return False
+
+ #
+ # Helper function to detect (and effect) the end of a kerneldoc comment.
+ #
+ def is_comment_end(self, ln, line):
+ if doc_end.search(line):
+ self.dump_section()
+
+ # Look for doc_com + <text> + doc_end:
+ r = KernRe(r'\s*\*\s*[a-zA-Z_0-9:.]+\*/')
+ if r.match(line):
+ self.emit_msg(ln, f"suspicious ending line: {line}")
+
+ self.entry.prototype = ""
+ self.entry.new_start_line = ln + 1
+
+ self.state = state.PROTO
+ return True
+ return False
+
+
+ def process_decl(self, ln, line):
+ """
+ STATE_DECLARATION: We've seen the beginning of a declaration
+ """
+ if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
+ return
+ #
+ # Look for anything with the " * " line beginning.
+ #
+ if doc_content.search(line):
+ cont = doc_content.group(1)
+ #
+ # A blank line means that we have moved out of the declaration
+ # part of the comment (without any "special section" parameter
+ # descriptions).
+ #
+ if cont == "":
+ self.state = state.BODY
+ #
+ # Otherwise we have more of the declaration section to soak up.
+ #
+ else:
+ self.entry.declaration_purpose = \
+ trim_whitespace(self.entry.declaration_purpose + ' ' + cont)
+ else:
+ # Unknown line, ignore
+ self.emit_msg(ln, f"bad line: {line}")
+
+
+ def process_special(self, ln, line):
+ """
+ STATE_SPECIAL_SECTION: a section ending with a blank line
+ """
+ #
+ # If we have hit a blank line (only the " * " marker), then this
+ # section is done.
+ #
+ if KernRe(r"\s*\*\s*$").match(line):
+ self.entry.begin_section(ln, dump = True)
+ self.state = state.BODY
+ return
+ #
+ # Not a blank line, look for the other ways to end the section.
+ #
+ if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
+ return
+ #
+ # OK, we should have a continuation of the text for this section.
+ #
+ if doc_content.search(line):
+ cont = doc_content.group(1)
+ #
+ # If the lines of text after the first in a special section have
+ # leading white space, we need to trim it out or Sphinx will get
+ # confused. For the second line (the None case), see what we
+ # find there and remember it.
+ #
+ if self.entry.leading_space is None:
+ r = KernRe(r'^(\s+)')
+ if r.match(cont):
+ self.entry.leading_space = len(r.group(1))
+ else:
+ self.entry.leading_space = 0
+ #
+ # Otherwise, before trimming any leading chars, be *sure*
+ # that they are white space. We should maybe warn if this
+ # isn't the case.
+ #
+ for i in range(0, self.entry.leading_space):
+ if cont[i] != " ":
+ self.entry.leading_space = i
+ break
+ #
+ # Add the trimmed result to the section and we're done.
+ #
+ self.entry.add_text(cont[self.entry.leading_space:])
+ else:
+ # Unknown line, ignore
+ self.emit_msg(ln, f"bad line: {line}")
+
+ def process_body(self, ln, line):
+ """
+ STATE_BODY: the bulk of a kerneldoc comment.
+ """
+ if self.is_new_section(ln, line) or self.is_comment_end(ln, line):
+ return
+
+ if doc_content.search(line):
+ cont = doc_content.group(1)
+ self.entry.add_text(cont)
+ else:
+ # Unknown line, ignore
+ self.emit_msg(ln, f"bad line: {line}")
+
+ def process_inline_name(self, ln, line):
+ """STATE_INLINE_NAME: beginning of docbook comments within a prototype."""
+
+ if doc_inline_sect.search(line):
+ self.entry.begin_section(ln, doc_inline_sect.group(1))
+ self.entry.add_text(doc_inline_sect.group(2).lstrip())
+ self.state = state.INLINE_TEXT
+ elif doc_inline_end.search(line):
+ self.dump_section()
+ self.state = state.PROTO
+ elif doc_content.search(line):
+ self.emit_msg(ln, f"Incorrect use of kernel-doc format: {line}")
+ self.state = state.PROTO
+ # else ... ??
+
+ def process_inline_text(self, ln, line):
+ """STATE_INLINE_TEXT: docbook comments within a prototype."""
+
+ if doc_inline_end.search(line):
+ self.dump_section()
+ self.state = state.PROTO
+ elif doc_content.search(line):
+ self.entry.add_text(doc_content.group(1))
+ # else ... ??
+
+ def syscall_munge(self, ln, proto): # pylint: disable=W0613
+ """
+ Handle syscall definitions
+ """
+
+ is_void = False
+
+ # Strip newlines/CR's
+ proto = re.sub(r'[\r\n]+', ' ', proto)
+
+ # Check if it's a SYSCALL_DEFINE0
+ if 'SYSCALL_DEFINE0' in proto:
+ is_void = True
+
+ # Replace SYSCALL_DEFINE with correct return type & function name
+ proto = KernRe(r'SYSCALL_DEFINE.*\(').sub('long sys_', proto)
+
+ r = KernRe(r'long\s+(sys_.*?),')
+ if r.search(proto):
+ proto = KernRe(',').sub('(', proto, count=1)
+ elif is_void:
+ proto = KernRe(r'\)').sub('(void)', proto, count=1)
+
+ # Now delete all of the odd-numbered commas in the proto
+ # so that argument types & names don't have a comma between them
+ count = 0
+ length = len(proto)
+
+ if is_void:
+ length = 0 # skip the loop if is_void
+
+ for ix in range(length):
+ if proto[ix] == ',':
+ count += 1
+ if count % 2 == 1:
+ proto = proto[:ix] + ' ' + proto[ix + 1:]
+
+ return proto
+
+ def tracepoint_munge(self, ln, proto):
+ """
+ Handle tracepoint definitions
+ """
+
+ tracepointname = None
+ tracepointargs = None
+
+ # Match tracepoint name based on different patterns
+ r = KernRe(r'TRACE_EVENT\((.*?),')
+ if r.search(proto):
+ tracepointname = r.group(1)
+
+ r = KernRe(r'DEFINE_SINGLE_EVENT\((.*?),')
+ if r.search(proto):
+ tracepointname = r.group(1)
+
+ r = KernRe(r'DEFINE_EVENT\((.*?),(.*?),')
+ if r.search(proto):
+ tracepointname = r.group(2)
+
+ if tracepointname:
+ tracepointname = tracepointname.lstrip()
+
+ r = KernRe(r'TP_PROTO\((.*?)\)')
+ if r.search(proto):
+ tracepointargs = r.group(1)
+
+ if not tracepointname or not tracepointargs:
+ self.emit_msg(ln,
+ f"Unrecognized tracepoint format:\n{proto}\n")
+ else:
+ proto = f"static inline void trace_{tracepointname}({tracepointargs})"
+ self.entry.identifier = f"trace_{self.entry.identifier}"
+
+ return proto
+
+ def process_proto_function(self, ln, line):
+ """Ancillary routine to process a function prototype"""
+
+ # strip C99-style comments to end of line
+ line = KernRe(r"//.*$", re.S).sub('', line)
+ #
+ # Soak up the line's worth of prototype text, stopping at { or ; if present.
+ #
+ if KernRe(r'\s*#\s*define').match(line):
+ self.entry.prototype = line
+ elif not line.startswith('#'): # skip other preprocessor stuff
+ r = KernRe(r'([^\{]*)')
+ if r.match(line):
+ self.entry.prototype += r.group(1) + " "
+ #
+ # If we now have the whole prototype, clean it up and declare victory.
+ #
+ if '{' in line or ';' in line or KernRe(r'\s*#\s*define').match(line):
+ # strip comments and surrounding spaces
+ self.entry.prototype = KernRe(r'/\*.*\*/').sub('', self.entry.prototype).strip()
+ #
+ # Handle self.entry.prototypes for function pointers like:
+ # int (*pcs_config)(struct foo)
+ # by turning it into
+ # int pcs_config(struct foo)
+ #
+ r = KernRe(r'^(\S+\s+)\(\s*\*(\S+)\)')
+ self.entry.prototype = r.sub(r'\1\2', self.entry.prototype)
+ #
+ # Handle special declaration syntaxes
+ #
+ if 'SYSCALL_DEFINE' in self.entry.prototype:
+ self.entry.prototype = self.syscall_munge(ln,
+ self.entry.prototype)
+ else:
+ r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT')
+ if r.search(self.entry.prototype):
+ self.entry.prototype = self.tracepoint_munge(ln,
+ self.entry.prototype)
+ #
+ # ... and we're done
+ #
+ self.dump_function(ln, self.entry.prototype)
+ self.reset_state(ln)
+
+ def process_proto_type(self, ln, line):
+ """Ancillary routine to process a type"""
+
+ # Strip C99-style comments and surrounding whitespace
+ line = KernRe(r"//.*$", re.S).sub('', line).strip()
+ if not line:
+ return # nothing to see here
+
+ # To distinguish preprocessor directive from regular declaration later.
+ if line.startswith('#'):
+ line += ";"
+ #
+ # Split the declaration on any of { } or ;, and accumulate pieces
+ # until we hit a semicolon while not inside {brackets}
+ #
+ r = KernRe(r'(.*?)([{};])')
+ for chunk in r.split(line):
+ if chunk: # Ignore empty matches
+ self.entry.prototype += chunk
+ #
+ # This cries out for a match statement ... someday after we can
+ # drop Python 3.9 ...
+ #
+ if chunk == '{':
+ self.entry.brcount += 1
+ elif chunk == '}':
+ self.entry.brcount -= 1
+ elif chunk == ';' and self.entry.brcount <= 0:
+ self.dump_declaration(ln, self.entry.prototype)
+ self.reset_state(ln)
+ return
+ #
+ # We hit the end of the line while still in the declaration; put
+ # in a space to represent the newline.
+ #
+ self.entry.prototype += ' '
+
+ def process_proto(self, ln, line):
+ """STATE_PROTO: reading a function/whatever prototype."""
+
+ if doc_inline_oneline.search(line):
+ self.entry.begin_section(ln, doc_inline_oneline.group(1))
+ self.entry.add_text(doc_inline_oneline.group(2))
+ self.dump_section()
+
+ elif doc_inline_start.search(line):
+ self.state = state.INLINE_NAME
+
+ elif self.entry.decl_type == 'function':
+ self.process_proto_function(ln, line)
+
+ else:
+ self.process_proto_type(ln, line)
+
+ def process_docblock(self, ln, line):
+ """STATE_DOCBLOCK: within a DOC: block."""
+
+ if doc_end.search(line):
+ self.dump_section()
+ self.output_declaration("doc", self.entry.identifier)
+ self.reset_state(ln)
+
+ elif doc_content.search(line):
+ self.entry.add_text(doc_content.group(1))
+
+ def parse_export(self):
+ """
+ Parses EXPORT_SYMBOL* macros from a single Kernel source file.
+ """
+
+ export_table = set()
+
+ try:
+ with open(self.fname, "r", encoding="utf8",
+ errors="backslashreplace") as fp:
+
+ for line in fp:
+ self.process_export(export_table, line)
+
+ except IOError:
+ return None
+
+ return export_table
+
+ #
+ # The state/action table telling us which function to invoke in
+ # each state.
+ #
+ state_actions = {
+ state.NORMAL: process_normal,
+ state.NAME: process_name,
+ state.BODY: process_body,
+ state.DECLARATION: process_decl,
+ state.SPECIAL_SECTION: process_special,
+ state.INLINE_NAME: process_inline_name,
+ state.INLINE_TEXT: process_inline_text,
+ state.PROTO: process_proto,
+ state.DOCBLOCK: process_docblock,
+ }
+
+ def parse_kdoc(self):
+ """
+ Open and process each line of a C source file.
+ The parsing is controlled via a state machine, and the line is passed
+ to a different process function depending on the state. The process
+ function may update the state as needed.
+
+ Besides parsing kernel-doc tags, it also parses export symbols.
+ """
+
+ prev = ""
+ prev_ln = None
+ export_table = set()
+
+ try:
+ with open(self.fname, "r", encoding="utf8",
+ errors="backslashreplace") as fp:
+ for ln, line in enumerate(fp):
+
+ line = line.expandtabs().strip("\n")
+
+ # Group continuation lines on prototypes
+ if self.state == state.PROTO:
+ if line.endswith("\\"):
+ prev += line.rstrip("\\")
+ if not prev_ln:
+ prev_ln = ln
+ continue
+
+ if prev:
+ ln = prev_ln
+ line = prev + line
+ prev = ""
+ prev_ln = None
+
+ self.config.log.debug("%d %s: %s",
+ ln, state.name[self.state],
+ line)
+
+ # This is an optimization over the original script.
+ # There, when export_file was used for the same file,
+ # it was read twice. Here, we use the already-existing
+ # loop to parse exported symbols as well.
+ #
+ if (self.state != state.NORMAL) or \
+ not self.process_export(export_table, line):
+ # Hand this line to the appropriate state handler
+ self.state_actions[self.state](self, ln, line)
+
+ except OSError:
+ self.config.log.error(f"Error: Cannot open file {self.fname}")
+
+ return export_table, self.entries
diff --git a/tools/lib/python/kdoc/kdoc_re.py b/tools/lib/python/kdoc/kdoc_re.py
new file mode 100644
index 000000000000..2dfa1bf83d64
--- /dev/null
+++ b/tools/lib/python/kdoc/kdoc_re.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+
+"""
+Regular expression ancillary classes.
+
+Those help caching regular expressions and do matching for kernel-doc.
+"""
+
+import re
+
+# Local cache for regular expressions
+re_cache = {}
+
+
+class KernRe:
+ """
+ Helper class to simplify regex declaration and usage.
+
+ It calls re.compile for a given pattern. It also allows adding
+ regular expressions and define sub at class init time.
+
+ Regular expressions can be cached via an argument, helping to speedup
+ searches.
+ """
+
+ def _add_regex(self, string, flags):
+ """
+ Adds a new regex or reuses it from the cache.
+ """
+ self.regex = re_cache.get(string, None)
+ if not self.regex:
+ self.regex = re.compile(string, flags=flags)
+ if self.cache:
+ re_cache[string] = self.regex
+
+ def __init__(self, string, cache=True, flags=0):
+ """
+ Compile a regular expression and initialize internal vars.
+ """
+
+ self.cache = cache
+ self.last_match = None
+
+ self._add_regex(string, flags)
+
+ def __str__(self):
+ """
+ Return the regular expression pattern.
+ """
+ return self.regex.pattern
+
+ def __add__(self, other):
+ """
+ Allows adding two regular expressions into one.
+ """
+
+ return KernRe(str(self) + str(other), cache=self.cache or other.cache,
+ flags=self.regex.flags | other.regex.flags)
+
+ def match(self, string):
+ """
+ Handles a re.match storing its results
+ """
+
+ self.last_match = self.regex.match(string)
+ return self.last_match
+
+ def search(self, string):
+ """
+ Handles a re.search storing its results
+ """
+
+ self.last_match = self.regex.search(string)
+ return self.last_match
+
+ def findall(self, string):
+ """
+ Alias to re.findall
+ """
+
+ return self.regex.findall(string)
+
+ def split(self, string):
+ """
+ Alias to re.split
+ """
+
+ return self.regex.split(string)
+
+ def sub(self, sub, string, count=0):
+ """
+ Alias to re.sub
+ """
+
+ return self.regex.sub(sub, string, count=count)
+
+ def group(self, num):
+ """
+ Returns the group results of the last match
+ """
+
+ return self.last_match.group(num)
+
+
+class NestedMatch:
+ """
+ Finding nested delimiters is hard with regular expressions. It is
+ even harder on Python with its normal re module, as there are several
+ advanced regular expressions that are missing.
+
+ This is the case of this pattern:
+
+ '\\bSTRUCT_GROUP(\\(((?:(?>[^)(]+)|(?1))*)\\))[^;]*;'
+
+ which is used to properly match open/close parentheses of the
+ string search STRUCT_GROUP(),
+
+ Add a class that counts pairs of delimiters, using it to match and
+ replace nested expressions.
+
+ The original approach was suggested by:
+ https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
+
+ Although I re-implemented it to make it more generic and match 3 types
+ of delimiters. The logic checks if delimiters are paired. If not, it
+ will ignore the search string.
+ """
+
+ # TODO: make NestedMatch handle multiple match groups
+ #
+ # Right now, regular expressions to match it are defined only up to
+ # the start delimiter, e.g.:
+ #
+ # \bSTRUCT_GROUP\(
+ #
+ # is similar to: STRUCT_GROUP\((.*)\)
+ # except that the content inside the match group is delimiter-aligned.
+ #
+ # The content inside parentheses is converted into a single replace
+ # group (e.g. r`\1').
+ #
+ # It would be nice to change such definition to support multiple
+ # match groups, allowing a regex equivalent to:
+ #
+ # FOO\((.*), (.*), (.*)\)
+ #
+ # it is probably easier to define it not as a regular expression, but
+ # with some lexical definition like:
+ #
+ # FOO(arg1, arg2, arg3)
+
+ DELIMITER_PAIRS = {
+ '{': '}',
+ '(': ')',
+ '[': ']',
+ }
+
+ RE_DELIM = re.compile(r'[\{\}\[\]\(\)]')
+
+ def _search(self, regex, line):
+ """
+ Finds paired blocks for a regex that ends with a delimiter.
+
+ The suggestion of using finditer to match pairs came from:
+ https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex
+ but I ended using a different implementation to align all three types
+ of delimiters and seek for an initial regular expression.
+
+ The algorithm seeks for open/close paired delimiters and places them
+ into a stack, yielding a start/stop position of each match when the
+ stack is zeroed.
+
+ The algorithm should work fine for properly paired lines, but will
+ silently ignore end delimiters that precede a start delimiter.
+ This should be OK for kernel-doc parser, as unaligned delimiters
+ would cause compilation errors. So, we don't need to raise exceptions
+ to cover such issues.
+ """
+
+ stack = []
+
+ for match_re in regex.finditer(line):
+ start = match_re.start()
+ offset = match_re.end()
+
+ d = line[offset - 1]
+ if d not in self.DELIMITER_PAIRS:
+ continue
+
+ end = self.DELIMITER_PAIRS[d]
+ stack.append(end)
+
+ for match in self.RE_DELIM.finditer(line[offset:]):
+ pos = match.start() + offset
+
+ d = line[pos]
+
+ if d in self.DELIMITER_PAIRS:
+ end = self.DELIMITER_PAIRS[d]
+
+ stack.append(end)
+ continue
+
+ # Does the end delimiter match what is expected?
+ if stack and d == stack[-1]:
+ stack.pop()
+
+ if not stack:
+ yield start, offset, pos + 1
+ break
+
+ def search(self, regex, line):
+ """
+ This is similar to re.search:
+
+ It matches a regex that it is followed by a delimiter,
+ returning occurrences only if all delimiters are paired.
+ """
+
+ for t in self._search(regex, line):
+
+ yield line[t[0]:t[2]]
+
+ def sub(self, regex, sub, line, count=0):
+ """
+ This is similar to re.sub:
+
+ It matches a regex that it is followed by a delimiter,
+ replacing occurrences only if all delimiters are paired.
+
+ if r'\1' is used, it works just like re: it places there the
+ matched paired data with the delimiter stripped.
+
+ If count is different than zero, it will replace at most count
+ items.
+ """
+ out = ""
+
+ cur_pos = 0
+ n = 0
+
+ for start, end, pos in self._search(regex, line):
+ out += line[cur_pos:start]
+
+ # Value, ignoring start/end delimiters
+ value = line[end:pos - 1]
+
+ # replaces \1 at the sub string, if \1 is used there
+ new_sub = sub
+ new_sub = new_sub.replace(r'\1', value)
+
+ out += new_sub
+
+ # Drop end ';' if any
+ if line[pos] == ';':
+ pos += 1
+
+ cur_pos = pos
+ n += 1
+
+ if count and count >= n:
+ break
+
+ # Append the remaining string
+ l = len(line)
+ out += line[cur_pos:l]
+
+ return out
diff --git a/tools/lib/python/kdoc/latex_fonts.py b/tools/lib/python/kdoc/latex_fonts.py
new file mode 100755
index 000000000000..29317f8006ea
--- /dev/null
+++ b/tools/lib/python/kdoc/latex_fonts.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) Akira Yokosawa, 2024
+#
+# Ported to Python by (c) Mauro Carvalho Chehab, 2025
+
+"""
+Detect problematic Noto CJK variable fonts.
+
+For "make pdfdocs", reports of build errors of translations.pdf started
+arriving early 2024 [1, 2]. It turned out that Fedora and openSUSE
+tumbleweed have started deploying variable-font [3] format of "Noto CJK"
+fonts [4, 5]. For PDF, a LaTeX package named xeCJK is used for CJK
+(Chinese, Japanese, Korean) pages. xeCJK requires XeLaTeX/XeTeX, which
+does not (and likely never will) understand variable fonts for historical
+reasons.
+
+The build error happens even when both of variable- and non-variable-format
+fonts are found on the build system. To make matters worse, Fedora enlists
+variable "Noto CJK" fonts in the requirements of langpacks-ja, -ko, -zh_CN,
+-zh_TW, etc. Hence developers who have interest in CJK pages are more
+likely to encounter the build errors.
+
+This script is invoked from the error path of "make pdfdocs" and emits
+suggestions if variable-font files of "Noto CJK" fonts are in the list of
+fonts accessible from XeTeX.
+
+References:
+[1]: https://lore.kernel.org/r/8734tqsrt7.fsf@meer.lwn.net/
+[2]: https://lore.kernel.org/r/1708585803.600323099@f111.i.mail.ru/
+[3]: https://en.wikipedia.org/wiki/Variable_font
+[4]: https://fedoraproject.org/wiki/Changes/Noto_CJK_Variable_Fonts
+[5]: https://build.opensuse.org/request/show/1157217
+
+#===========================================================================
+Workarounds for building translations.pdf
+#===========================================================================
+
+* Denylist "variable font" Noto CJK fonts.
+ - Create $HOME/deny-vf/fontconfig/fonts.conf from template below, with
+ tweaks if necessary. Remove leading "".
+ - Path of fontconfig/fonts.conf can be overridden by setting an env
+ variable FONTS_CONF_DENY_VF.
+
+ * Template:
+-----------------------------------------------------------------
+<?xml version="1.0"?>
+<!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
+<fontconfig>
+<!--
+ Ignore variable-font glob (not to break xetex)
+-->
+ <selectfont>
+ <rejectfont>
+ <!--
+ for Fedora
+ -->
+ <glob>/usr/share/fonts/google-noto-*-cjk-vf-fonts</glob>
+ <!--
+ for openSUSE tumbleweed
+ -->
+ <glob>/usr/share/fonts/truetype/Noto*CJK*-VF.otf</glob>
+ </rejectfont>
+ </selectfont>
+</fontconfig>
+-----------------------------------------------------------------
+
+ The denylisting is activated for "make pdfdocs".
+
+* For skipping CJK pages in PDF
+ - Uninstall texlive-xecjk.
+ Denylisting is not needed in this case.
+
+* For printing CJK pages in PDF
+ - Need non-variable "Noto CJK" fonts.
+ * Fedora
+ - google-noto-sans-cjk-fonts
+ - google-noto-serif-cjk-fonts
+ * openSUSE tumbleweed
+ - Non-variable "Noto CJK" fonts are not available as distro packages
+ as of April, 2024. Fetch a set of font files from upstream Noto
+ CJK Font released at:
+ https://github.com/notofonts/noto-cjk/tree/main/Sans#super-otc
+ and at:
+ https://github.com/notofonts/noto-cjk/tree/main/Serif#super-otc
+ , then uncompress and deploy them.
+ - Remember to update fontconfig cache by running fc-cache.
+
+!!! Caution !!!
+ Uninstalling "variable font" packages can be dangerous.
+ They might be depended upon by other packages important for your work.
+ Denylisting should be less invasive, as it is effective only while
+ XeLaTeX runs in "make pdfdocs".
+"""
+
+import os
+import re
+import subprocess
+import textwrap
+import sys
+
+class LatexFontChecker:
+ """
+ Detect problems with CJK variable fonts that affect PDF builds for
+ translations.
+ """
+
+ def __init__(self, deny_vf=None):
+ if not deny_vf:
+ deny_vf = os.environ.get('FONTS_CONF_DENY_VF', "~/deny-vf")
+
+ self.environ = os.environ.copy()
+ self.environ['XDG_CONFIG_HOME'] = os.path.expanduser(deny_vf)
+
+ self.re_cjk = re.compile(r"([^:]+):\s*Noto\s+(Sans|Sans Mono|Serif) CJK")
+
+ def description(self):
+ return __doc__
+
+ def get_noto_cjk_vf_fonts(self):
+ """Get Noto CJK fonts"""
+
+ cjk_fonts = set()
+ cmd = ["fc-list", ":", "file", "family", "variable"]
+ try:
+ result = subprocess.run(cmd,stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ env=self.environ,
+ check=True)
+
+ except subprocess.CalledProcessError as exc:
+ sys.exit(f"Error running fc-list: {repr(exc)}")
+
+ for line in result.stdout.splitlines():
+ if 'variable=True' not in line:
+ continue
+
+ match = self.re_cjk.search(line)
+ if match:
+ cjk_fonts.add(match.group(1))
+
+ return sorted(cjk_fonts)
+
+ def check(self):
+ """Check for problems with CJK fonts"""
+
+ fonts = textwrap.indent("\n".join(self.get_noto_cjk_vf_fonts()), " ")
+ if not fonts:
+ return None
+
+ rel_file = os.path.relpath(__file__, os.getcwd())
+
+ msg = "=" * 77 + "\n"
+ msg += 'XeTeX is confused by "variable font" files listed below:\n'
+ msg += fonts + "\n"
+ msg += textwrap.dedent(f"""
+ For CJK pages in PDF, they need to be hidden from XeTeX by denylisting.
+ Or, CJK pages can be skipped by uninstalling texlive-xecjk.
+
+ For more info on denylisting, other options, and variable font, run:
+
+ tools/docs/check-variable-fonts.py -h
+ """)
+ msg += "=" * 77
+
+ return msg
diff --git a/tools/lib/python/kdoc/parse_data_structs.py b/tools/lib/python/kdoc/parse_data_structs.py
new file mode 100755
index 000000000000..25361996cd20
--- /dev/null
+++ b/tools/lib/python/kdoc/parse_data_structs.py
@@ -0,0 +1,482 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2016-2025 by Mauro Carvalho Chehab <mchehab@kernel.org>.
+# pylint: disable=R0912,R0915
+
+"""
+Parse a source file or header, creating ReStructured Text cross references.
+
+It accepts an optional file to change the default symbol reference or to
+suppress symbols from the output.
+
+It is capable of identifying defines, functions, structs, typedefs,
+enums and enum symbols and create cross-references for all of them.
+It is also capable of distinguish #define used for specifying a Linux
+ioctl.
+
+The optional rules file contains a set of rules like:
+
+ ignore ioctl VIDIOC_ENUM_FMT
+ replace ioctl VIDIOC_DQBUF vidioc_qbuf
+ replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ :c:type:`v4l2_event_motion_det`
+"""
+
+import os
+import re
+import sys
+
+
+class ParseDataStructs:
+ """
+ Creates an enriched version of a Kernel header file with cross-links
+ to each C data structure type.
+
+ It is meant to allow having a more comprehensive documentation, where
+ uAPI headers will create cross-reference links to the code.
+
+ It is capable of identifying defines, functions, structs, typedefs,
+ enums and enum symbols and create cross-references for all of them.
+ It is also capable of distinguish #define used for specifying a Linux
+ ioctl.
+
+ By default, it create rules for all symbols and defines, but it also
+ allows parsing an exception file. Such file contains a set of rules
+ using the syntax below:
+
+ 1. Ignore rules:
+
+ ignore <type> <symbol>`
+
+ Removes the symbol from reference generation.
+
+ 2. Replace rules:
+
+ replace <type> <old_symbol> <new_reference>
+
+ Replaces how old_symbol with a new reference. The new_reference can be:
+
+ - A simple symbol name;
+ - A full Sphinx reference.
+
+ 3. Namespace rules
+
+ namespace <namespace>
+
+ Sets C namespace to be used during cross-reference generation. Can
+ be overridden by replace rules.
+
+ On ignore and replace rules, <type> can be:
+ - ioctl: for defines that end with _IO*, e.g. ioctl definitions
+ - define: for other defines
+ - symbol: for symbols defined within enums;
+ - typedef: for typedefs;
+ - enum: for the name of a non-anonymous enum;
+ - struct: for structs.
+
+ Examples:
+
+ ignore define __LINUX_MEDIA_H
+ ignore ioctl VIDIOC_ENUM_FMT
+ replace ioctl VIDIOC_DQBUF vidioc_qbuf
+ replace define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ :c:type:`v4l2_event_motion_det`
+
+ namespace MC
+ """
+
+ # Parser regexes with multiple ways to capture enums and structs
+ RE_ENUMS = [
+ re.compile(r"^\s*enum\s+([\w_]+)\s*\{"),
+ re.compile(r"^\s*enum\s+([\w_]+)\s*$"),
+ re.compile(r"^\s*typedef\s*enum\s+([\w_]+)\s*\{"),
+ re.compile(r"^\s*typedef\s*enum\s+([\w_]+)\s*$"),
+ ]
+ RE_STRUCTS = [
+ re.compile(r"^\s*struct\s+([_\w][\w\d_]+)\s*\{"),
+ re.compile(r"^\s*struct\s+([_\w][\w\d_]+)$"),
+ re.compile(r"^\s*typedef\s*struct\s+([_\w][\w\d_]+)\s*\{"),
+ re.compile(r"^\s*typedef\s*struct\s+([_\w][\w\d_]+)$"),
+ ]
+
+ # FIXME: the original code was written a long time before Sphinx C
+ # domain to have multiple namespaces. To avoid to much turn at the
+ # existing hyperlinks, the code kept using "c:type" instead of the
+ # right types. To change that, we need to change the types not only
+ # here, but also at the uAPI media documentation.
+ DEF_SYMBOL_TYPES = {
+ "ioctl": {
+ "prefix": "\\ ",
+ "suffix": "\\ ",
+ "ref_type": ":ref",
+ "description": "IOCTL Commands",
+ },
+ "define": {
+ "prefix": "\\ ",
+ "suffix": "\\ ",
+ "ref_type": ":ref",
+ "description": "Macros and Definitions",
+ },
+ # We're calling each definition inside an enum as "symbol"
+ "symbol": {
+ "prefix": "\\ ",
+ "suffix": "\\ ",
+ "ref_type": ":ref",
+ "description": "Enumeration values",
+ },
+ "typedef": {
+ "prefix": "\\ ",
+ "suffix": "\\ ",
+ "ref_type": ":c:type",
+ "description": "Type Definitions",
+ },
+ # This is the description of the enum itself
+ "enum": {
+ "prefix": "\\ ",
+ "suffix": "\\ ",
+ "ref_type": ":c:type",
+ "description": "Enumerations",
+ },
+ "struct": {
+ "prefix": "\\ ",
+ "suffix": "\\ ",
+ "ref_type": ":c:type",
+ "description": "Structures",
+ },
+ }
+
+ def __init__(self, debug: bool = False):
+ """Initialize internal vars"""
+ self.debug = debug
+ self.data = ""
+
+ self.symbols = {}
+
+ self.namespace = None
+ self.ignore = []
+ self.replace = []
+
+ for symbol_type in self.DEF_SYMBOL_TYPES:
+ self.symbols[symbol_type] = {}
+
+ def read_exceptions(self, fname: str):
+ if not fname:
+ return
+
+ name = os.path.basename(fname)
+
+ with open(fname, "r", encoding="utf-8", errors="backslashreplace") as f:
+ for ln, line in enumerate(f):
+ ln += 1
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+
+ # ignore rules
+ match = re.match(r"^ignore\s+(\w+)\s+(\S+)", line)
+
+ if match:
+ self.ignore.append((ln, match.group(1), match.group(2)))
+ continue
+
+ # replace rules
+ match = re.match(r"^replace\s+(\S+)\s+(\S+)\s+(\S+)", line)
+ if match:
+ self.replace.append((ln, match.group(1), match.group(2),
+ match.group(3)))
+ continue
+
+ match = re.match(r"^namespace\s+(\S+)", line)
+ if match:
+ self.namespace = match.group(1)
+ continue
+
+ sys.exit(f"{name}:{ln}: invalid line: {line}")
+
+ def apply_exceptions(self):
+ """
+ Process exceptions file with rules to ignore or replace references.
+ """
+
+ # Handle ignore rules
+ for ln, c_type, symbol in self.ignore:
+ if c_type not in self.DEF_SYMBOL_TYPES:
+ sys.exit(f"{name}:{ln}: {c_type} is invalid")
+
+ d = self.symbols[c_type]
+ if symbol in d:
+ del d[symbol]
+
+ # Handle replace rules
+ for ln, c_type, old, new in self.replace:
+ if c_type not in self.DEF_SYMBOL_TYPES:
+ sys.exit(f"{name}:{ln}: {c_type} is invalid")
+
+ reftype = None
+
+ # Parse reference type when the type is specified
+
+ match = re.match(r"^\:c\:(\w+)\:\`(.+)\`", new)
+ if match:
+ reftype = f":c:{match.group(1)}"
+ new = match.group(2)
+ else:
+ match = re.search(r"(\:ref)\:\`(.+)\`", new)
+ if match:
+ reftype = match.group(1)
+ new = match.group(2)
+
+ # If the replacement rule doesn't have a type, get default
+ if not reftype:
+ reftype = self.DEF_SYMBOL_TYPES[c_type].get("ref_type")
+ if not reftype:
+ reftype = self.DEF_SYMBOL_TYPES[c_type].get("real_type")
+
+ new_ref = f"{reftype}:`{old} <{new}>`"
+
+ # Change self.symbols to use the replacement rule
+ if old in self.symbols[c_type]:
+ (_, ln) = self.symbols[c_type][old]
+ self.symbols[c_type][old] = (new_ref, ln)
+ else:
+ print(f"{name}:{ln}: Warning: can't find {old} {c_type}")
+
+ def store_type(self, ln, symbol_type: str, symbol: str,
+ ref_name: str = None, replace_underscores: bool = True):
+ """
+ Stores a new symbol at self.symbols under symbol_type.
+
+ By default, underscores are replaced by "-"
+ """
+ defs = self.DEF_SYMBOL_TYPES[symbol_type]
+
+ prefix = defs.get("prefix", "")
+ suffix = defs.get("suffix", "")
+ ref_type = defs.get("ref_type")
+
+ # Determine ref_link based on symbol type
+ if ref_type or self.namespace:
+ if not ref_name:
+ ref_name = symbol.lower()
+
+ # c-type references don't support hash
+ if ref_type == ":ref" and replace_underscores:
+ ref_name = ref_name.replace("_", "-")
+
+ # C domain references may have namespaces
+ if ref_type.startswith(":c:"):
+ if self.namespace:
+ ref_name = f"{self.namespace}.{ref_name}"
+
+ if ref_type:
+ ref_link = f"{ref_type}:`{symbol} <{ref_name}>`"
+ else:
+ ref_link = f"`{symbol} <{ref_name}>`"
+ else:
+ ref_link = symbol
+
+ self.symbols[symbol_type][symbol] = (f"{prefix}{ref_link}{suffix}", ln)
+
+ def store_line(self, line):
+ """Stores a line at self.data, properly indented"""
+ line = " " + line.expandtabs()
+ self.data += line.rstrip(" ")
+
+ def parse_file(self, file_in: str, exceptions: str = None):
+ """Reads a C source file and get identifiers"""
+ self.data = ""
+ is_enum = False
+ is_comment = False
+ multiline = ""
+
+ self.read_exceptions(exceptions)
+
+ with open(file_in, "r",
+ encoding="utf-8", errors="backslashreplace") as f:
+ for line_no, line in enumerate(f):
+ self.store_line(line)
+ line = line.strip("\n")
+
+ # Handle continuation lines
+ if line.endswith(r"\\"):
+ multiline += line[-1]
+ continue
+
+ if multiline:
+ line = multiline + line
+ multiline = ""
+
+ # Handle comments. They can be multilined
+ if not is_comment:
+ if re.search(r"/\*.*", line):
+ is_comment = True
+ else:
+ # Strip C99-style comments
+ line = re.sub(r"(//.*)", "", line)
+
+ if is_comment:
+ if re.search(r".*\*/", line):
+ is_comment = False
+ else:
+ multiline = line
+ continue
+
+ # At this point, line variable may be a multilined statement,
+ # if lines end with \ or if they have multi-line comments
+ # With that, it can safely remove the entire comments,
+ # and there's no need to use re.DOTALL for the logic below
+
+ line = re.sub(r"(/\*.*\*/)", "", line)
+ if not line.strip():
+ continue
+
+ # It can be useful for debug purposes to print the file after
+ # having comments stripped and multi-lines grouped.
+ if self.debug > 1:
+ print(f"line {line_no + 1}: {line}")
+
+ # Now the fun begins: parse each type and store it.
+
+ # We opted for a two parsing logic here due to:
+ # 1. it makes easier to debug issues not-parsed symbols;
+ # 2. we want symbol replacement at the entire content, not
+ # just when the symbol is detected.
+
+ if is_enum:
+ match = re.match(r"^\s*([_\w][\w\d_]+)\s*[\,=]?", line)
+ if match:
+ self.store_type(line_no, "symbol", match.group(1))
+ if "}" in line:
+ is_enum = False
+ continue
+
+ match = re.match(r"^\s*#\s*define\s+([\w_]+)\s+_IO", line)
+ if match:
+ self.store_type(line_no, "ioctl", match.group(1),
+ replace_underscores=False)
+ continue
+
+ match = re.match(r"^\s*#\s*define\s+([\w_]+)(\s+|$)", line)
+ if match:
+ self.store_type(line_no, "define", match.group(1))
+ continue
+
+ match = re.match(r"^\s*typedef\s+([_\w][\w\d_]+)\s+(.*)\s+([_\w][\w\d_]+);",
+ line)
+ if match:
+ name = match.group(2).strip()
+ symbol = match.group(3)
+ self.store_type(line_no, "typedef", symbol, ref_name=name)
+ continue
+
+ for re_enum in self.RE_ENUMS:
+ match = re_enum.match(line)
+ if match:
+ self.store_type(line_no, "enum", match.group(1))
+ is_enum = True
+ break
+
+ for re_struct in self.RE_STRUCTS:
+ match = re_struct.match(line)
+ if match:
+ self.store_type(line_no, "struct", match.group(1))
+ break
+
+ self.apply_exceptions()
+
+ def debug_print(self):
+ """
+ Print debug information containing the replacement rules per symbol.
+ To make easier to check, group them per type.
+ """
+ if not self.debug:
+ return
+
+ for c_type, refs in self.symbols.items():
+ if not refs: # Skip empty dictionaries
+ continue
+
+ print(f"{c_type}:")
+
+ for symbol, (ref, ln) in sorted(refs.items()):
+ print(f" #{ln:<5d} {symbol} -> {ref}")
+
+ print()
+
+ def gen_output(self):
+ """Write the formatted output to a file."""
+
+ # Avoid extra blank lines
+ text = re.sub(r"\s+$", "", self.data) + "\n"
+ text = re.sub(r"\n\s+\n", "\n\n", text)
+
+ # Escape Sphinx special characters
+ text = re.sub(r"([\_\`\*\<\>\&\\\\:\/\|\%\$\#\{\}\~\^])", r"\\\1", text)
+
+ # Source uAPI files may have special notes. Use bold font for them
+ text = re.sub(r"DEPRECATED", "**DEPRECATED**", text)
+
+ # Delimiters to catch the entire symbol after escaped
+ start_delim = r"([ \n\t\(=\*\@])"
+ end_delim = r"(\s|,|\\=|\\:|\;|\)|\}|\{)"
+
+ # Process all reference types
+ for ref_dict in self.symbols.values():
+ for symbol, (replacement, _) in ref_dict.items():
+ symbol = re.escape(re.sub(r"([\_\`\*\<\>\&\\\\:\/])", r"\\\1", symbol))
+ text = re.sub(fr'{start_delim}{symbol}{end_delim}',
+ fr'\1{replacement}\2', text)
+
+ # Remove "\ " where not needed: before spaces and at the end of lines
+ text = re.sub(r"\\ ([\n ])", r"\1", text)
+ text = re.sub(r" \\ ", " ", text)
+
+ return text
+
+ def gen_toc(self):
+ """
+ Create a list of symbols to be part of a TOC contents table
+ """
+ text = []
+
+ # Sort symbol types per description
+ symbol_descriptions = []
+ for k, v in self.DEF_SYMBOL_TYPES.items():
+ symbol_descriptions.append((v['description'], k))
+
+ symbol_descriptions.sort()
+
+ # Process each category
+ for description, c_type in symbol_descriptions:
+
+ refs = self.symbols[c_type]
+ if not refs: # Skip empty categories
+ continue
+
+ text.append(f"{description}")
+ text.append("-" * len(description))
+ text.append("")
+
+ # Sort symbols alphabetically
+ for symbol, (ref, ln) in sorted(refs.items()):
+ text.append(f"- LINENO_{ln}: {ref}")
+
+ text.append("") # Add empty line between categories
+
+ return "\n".join(text)
+
+ def write_output(self, file_in: str, file_out: str, toc: bool):
+ title = os.path.basename(file_in)
+
+ if toc:
+ text = self.gen_toc()
+ else:
+ text = self.gen_output()
+
+ with open(file_out, "w", encoding="utf-8", errors="backslashreplace") as f:
+ f.write(".. -*- coding: utf-8; mode: rst -*-\n\n")
+ f.write(f"{title}\n")
+ f.write("=" * len(title) + "\n\n")
+
+ if not toc:
+ f.write(".. parsed-literal::\n\n")
+
+ f.write(text)
diff --git a/tools/lib/python/kdoc/python_version.py b/tools/lib/python/kdoc/python_version.py
new file mode 100644
index 000000000000..e83088013db2
--- /dev/null
+++ b/tools/lib/python/kdoc/python_version.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2017-2025 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
+"""
+Handle Python version check logic.
+
+Not all Python versions are supported by scripts. Yet, on some cases,
+like during documentation build, a newer version of python could be
+available.
+
+This class allows checking if the minimal requirements are followed.
+
+Better than that, PythonVersion.check_python() not only checks the minimal
+requirements, but it automatically switches to a the newest available
+Python version if present.
+
+"""
+
+import os
+import re
+import subprocess
+import shlex
+import sys
+
+from glob import glob
+from textwrap import indent
+
+class PythonVersion:
+ """
+ Ancillary methods that checks for missing dependencies for different
+ types of types, like binaries, python modules, rpm deps, etc.
+ """
+
+ def __init__(self, version):
+ """Ïnitialize self.version tuple from a version string"""
+ self.version = self.parse_version(version)
+
+ @staticmethod
+ def parse_version(version):
+ """Convert a major.minor.patch version into a tuple"""
+ return tuple(int(x) for x in version.split("."))
+
+ @staticmethod
+ def ver_str(version):
+ """Returns a version tuple as major.minor.patch"""
+ return ".".join([str(x) for x in version])
+
+ @staticmethod
+ def cmd_print(cmd, max_len=80):
+ cmd_line = []
+
+ for w in cmd:
+ w = shlex.quote(w)
+
+ if cmd_line:
+ if not max_len or len(cmd_line[-1]) + len(w) < max_len:
+ cmd_line[-1] += " " + w
+ continue
+ else:
+ cmd_line[-1] += " \\"
+ cmd_line.append(w)
+ else:
+ cmd_line.append(w)
+
+ return "\n ".join(cmd_line)
+
+ def __str__(self):
+ """Returns a version tuple as major.minor.patch from self.version"""
+ return self.ver_str(self.version)
+
+ @staticmethod
+ def get_python_version(cmd):
+ """
+ Get python version from a Python binary. As we need to detect if
+ are out there newer python binaries, we can't rely on sys.release here.
+ """
+
+ kwargs = {}
+ if sys.version_info < (3, 7):
+ kwargs['universal_newlines'] = True
+ else:
+ kwargs['text'] = True
+
+ result = subprocess.run([cmd, "--version"],
+ stdout = subprocess.PIPE,
+ stderr = subprocess.PIPE,
+ **kwargs, check=False)
+
+ version = result.stdout.strip()
+
+ match = re.search(r"(\d+\.\d+\.\d+)", version)
+ if match:
+ return PythonVersion.parse_version(match.group(1))
+
+ print(f"Can't parse version {version}")
+ return (0, 0, 0)
+
+ @staticmethod
+ def find_python(min_version):
+ """
+ Detect if are out there any python 3.xy version newer than the
+ current one.
+
+ Note: this routine is limited to up to 2 digits for python3. We
+ may need to update it one day, hopefully on a distant future.
+ """
+ patterns = [
+ "python3.[0-9][0-9]",
+ "python3.[0-9]",
+ ]
+
+ python_cmd = []
+
+ # Seek for a python binary newer than min_version
+ for path in os.getenv("PATH", "").split(":"):
+ for pattern in patterns:
+ for cmd in glob(os.path.join(path, pattern)):
+ if os.path.isfile(cmd) and os.access(cmd, os.X_OK):
+ version = PythonVersion.get_python_version(cmd)
+ if version >= min_version:
+ python_cmd.append((version, cmd))
+
+ return sorted(python_cmd, reverse=True)
+
+ @staticmethod
+ def check_python(min_version, show_alternatives=False, bail_out=False,
+ success_on_error=False):
+ """
+ Check if the current python binary satisfies our minimal requirement
+ for Sphinx build. If not, re-run with a newer version if found.
+ """
+ cur_ver = sys.version_info[:3]
+ if cur_ver >= min_version:
+ ver = PythonVersion.ver_str(cur_ver)
+ return
+
+ python_ver = PythonVersion.ver_str(cur_ver)
+
+ available_versions = PythonVersion.find_python(min_version)
+ if not available_versions:
+ print(f"ERROR: Python version {python_ver} is not supported anymore\n")
+ print(" Can't find a new version. This script may fail")
+ return
+
+ script_path = os.path.abspath(sys.argv[0])
+
+ # Check possible alternatives
+ if available_versions:
+ new_python_cmd = available_versions[0][1]
+ else:
+ new_python_cmd = None
+
+ if show_alternatives and available_versions:
+ print("You could run, instead:")
+ for _, cmd in available_versions:
+ args = [cmd, script_path] + sys.argv[1:]
+
+ cmd_str = indent(PythonVersion.cmd_print(args), " ")
+ print(f"{cmd_str}\n")
+
+ if bail_out:
+ msg = f"Python {python_ver} not supported. Bailing out"
+ if success_on_error:
+ print(msg, file=sys.stderr)
+ sys.exit(0)
+ else:
+ sys.exit(msg)
+
+ print(f"Python {python_ver} not supported. Changing to {new_python_cmd}")
+
+ # Restart script using the newer version
+ args = [new_python_cmd, script_path] + sys.argv[1:]
+
+ try:
+ os.execv(new_python_cmd, args)
+ except OSError as e:
+ sys.exit(f"Failed to restart with {new_python_cmd}: {e}")
diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
index 8561b0f01a24..ddaeb4eb3e24 100644
--- a/tools/lib/subcmd/help.c
+++ b/tools/lib/subcmd/help.c
@@ -9,6 +9,7 @@
#include <sys/stat.h>
#include <unistd.h>
#include <dirent.h>
+#include <assert.h>
#include "subcmd-util.h"
#include "help.h"
#include "exec-cmd.h"
@@ -74,6 +75,9 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
size_t ci, cj, ei;
int cmp;
+ if (!excludes->cnt)
+ return;
+
ci = cj = ei = 0;
while (ci < cmds->cnt && ei < excludes->cnt) {
cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name);
@@ -82,10 +86,11 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
ci++;
cj++;
} else {
- zfree(&cmds->names[cj]);
- cmds->names[cj++] = cmds->names[ci++];
+ cmds->names[cj++] = cmds->names[ci];
+ cmds->names[ci++] = NULL;
}
} else if (cmp == 0) {
+ zfree(&cmds->names[ci]);
ci++;
ei++;
} else if (cmp > 0) {
@@ -94,12 +99,12 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
}
if (ci != cj) {
while (ci < cmds->cnt) {
- zfree(&cmds->names[cj]);
- cmds->names[cj++] = cmds->names[ci++];
+ cmds->names[cj++] = cmds->names[ci];
+ cmds->names[ci++] = NULL;
}
}
for (ci = cj; ci < cmds->cnt; ci++)
- zfree(&cmds->names[ci]);
+ assert(cmds->names[ci] == NULL);
cmds->cnt = cj;
}
diff --git a/tools/lib/subcmd/run-command.c b/tools/lib/subcmd/run-command.c
index 0a764c25c384..b7510f83209a 100644
--- a/tools/lib/subcmd/run-command.c
+++ b/tools/lib/subcmd/run-command.c
@@ -5,6 +5,7 @@
#include <ctype.h>
#include <fcntl.h>
#include <string.h>
+#include <linux/compiler.h>
#include <linux/string.h>
#include <errno.h>
#include <sys/wait.h>
@@ -216,10 +217,20 @@ static int wait_or_whine(struct child_process *cmd, bool block)
return result;
}
+/*
+ * Conservative estimate of number of characaters needed to hold an a decoded
+ * integer, assume each 3 bits needs a character byte and plus a possible sign
+ * character.
+ */
+#ifndef is_signed_type
+#define is_signed_type(type) (((type)(-1)) < (type)1)
+#endif
+#define MAX_STRLEN_TYPE(type) (sizeof(type) * 8 / 3 + (is_signed_type(type) ? 1 : 0))
+
int check_if_command_finished(struct child_process *cmd)
{
#ifdef __linux__
- char filename[FILENAME_MAX + 12];
+ char filename[6 + MAX_STRLEN_TYPE(typeof(cmd->pid)) + 7 + 1];
char status_line[256];
FILE *status_file;
@@ -227,7 +238,7 @@ int check_if_command_finished(struct child_process *cmd)
* Check by reading /proc/<pid>/status as calling waitpid causes
* stdout/stderr to be closed and data lost.
*/
- sprintf(filename, "/proc/%d/status", cmd->pid);
+ sprintf(filename, "/proc/%u/status", cmd->pid);
status_file = fopen(filename, "r");
if (status_file == NULL) {
/* Open failed assume finish_command was called. */
diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile
index a1f5e388644d..41aa7a324ff4 100644
--- a/tools/lib/thermal/Makefile
+++ b/tools/lib/thermal/Makefile
@@ -46,8 +46,12 @@ else
CFLAGS := -g -Wall
endif
+NL3_CFLAGS = $(shell pkg-config --cflags libnl-3.0 2>/dev/null)
+ifeq ($(NL3_CFLAGS),)
+NL3_CFLAGS = -I/usr/include/libnl3
+endif
+
INCLUDES = \
--I/usr/include/libnl3 \
-I$(srctree)/tools/lib/thermal/include \
-I$(srctree)/tools/lib/ \
-I$(srctree)/tools/include \
@@ -59,6 +63,7 @@ INCLUDES = \
override CFLAGS += $(EXTRA_WARNINGS)
override CFLAGS += -Werror -Wall
override CFLAGS += -fPIC
+override CFLAGS += $(NL3_CFLAGS)
override CFLAGS += $(INCLUDES)
override CFLAGS += -fvisibility=hidden
override CFGLAS += -Wl,-L.
@@ -134,7 +139,7 @@ endef
install_lib: libs
$(call QUIET_INSTALL, $(LIBTHERMAL_ALL)) \
$(call do_install_mkdir,$(libdir_SQ)); \
- cp -fpR $(LIBTHERMAL_ALL) $(DESTDIR)$(libdir_SQ)
+ cp -fR --preserve=mode,timestamp $(LIBTHERMAL_ALL) $(DESTDIR)$(libdir_SQ)
install_headers:
$(call QUIET_INSTALL, headers) \
diff --git a/tools/lib/thermal/libthermal.map b/tools/lib/thermal/libthermal.map
index d657176aa47f..1d3d0c04e4b6 100644
--- a/tools/lib/thermal/libthermal.map
+++ b/tools/lib/thermal/libthermal.map
@@ -1,6 +1,5 @@
LIBTHERMAL_0.0.1 {
global:
- thermal_init;
for_each_thermal_zone;
for_each_thermal_trip;
for_each_thermal_cdev;
@@ -9,9 +8,12 @@ LIBTHERMAL_0.0.1 {
thermal_zone_find_by_id;
thermal_zone_discover;
thermal_init;
+ thermal_exit;
+ thermal_events_exit;
thermal_events_init;
thermal_events_handle;
thermal_events_fd;
+ thermal_cmd_exit;
thermal_cmd_init;
thermal_cmd_get_tz;
thermal_cmd_get_cdev;
@@ -22,6 +24,7 @@ LIBTHERMAL_0.0.1 {
thermal_cmd_threshold_add;
thermal_cmd_threshold_delete;
thermal_cmd_threshold_flush;
+ thermal_sampling_exit;
thermal_sampling_init;
thermal_sampling_handle;
thermal_sampling_fd;
diff --git a/tools/mm/page_owner_sort.c b/tools/mm/page_owner_sort.c
index 880e36df0c11..14c67e9e84c4 100644
--- a/tools/mm/page_owner_sort.c
+++ b/tools/mm/page_owner_sort.c
@@ -13,6 +13,7 @@
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -23,9 +24,6 @@
#include <linux/types.h>
#include <getopt.h>
-#define bool int
-#define true 1
-#define false 0
#define TASK_COMM_LEN 16
struct block_list {
@@ -669,14 +667,15 @@ int main(int argc, char **argv)
{ "pid", required_argument, NULL, 1 },
{ "tgid", required_argument, NULL, 2 },
{ "name", required_argument, NULL, 3 },
- { "cull", required_argument, NULL, 4 },
- { "sort", required_argument, NULL, 5 },
+ { "cull", required_argument, NULL, 4 },
+ { "sort", required_argument, NULL, 5 },
+ { "help", no_argument, NULL, 'h' },
{ 0, 0, 0, 0},
};
compare_flag = COMP_NO_FLAG;
- while ((opt = getopt_long(argc, argv, "admnpstP", longopts, NULL)) != -1)
+ while ((opt = getopt_long(argc, argv, "admnpstPh", longopts, NULL)) != -1)
switch (opt) {
case 'a':
compare_flag |= COMP_ALLOC;
@@ -702,6 +701,9 @@ int main(int argc, char **argv)
case 'n':
compare_flag |= COMP_COMM;
break;
+ case 'h':
+ usage();
+ exit(0);
case 1:
filter = filter | FILTER_PID;
fc.pids = parse_nums_list(optarg, &fc.pids_size);
diff --git a/tools/mm/show_page_info.py b/tools/mm/show_page_info.py
new file mode 100644
index 000000000000..c46d8ea283d7
--- /dev/null
+++ b/tools/mm/show_page_info.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env drgn
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2025 Ye Liu <liuye@kylinos.cn>
+
+import argparse
+import sys
+from drgn import Object, FaultError, PlatformFlags, cast
+from drgn.helpers.linux import find_task, follow_page, page_size
+from drgn.helpers.linux.mm import (
+ decode_page_flags, page_to_pfn, page_to_phys, page_to_virt, vma_find,
+ PageSlab, PageCompound, PageHead, PageTail, compound_head, compound_order, compound_nr
+)
+from drgn.helpers.linux.cgroup import cgroup_name, cgroup_path
+
+DESC = """
+This is a drgn script to show the page state.
+For more info on drgn, visit https://github.com/osandov/drgn.
+"""
+
+def format_page_data(page):
+ """
+ Format raw page data into a readable hex dump with "RAW:" prefix.
+
+ :param page: drgn.Object instance representing the page.
+ :return: Formatted string of memory contents.
+ """
+ try:
+ address = page.value_()
+ size = prog.type("struct page").size
+
+ if prog.platform.flags & PlatformFlags.IS_64_BIT:
+ word_size = 8
+ else:
+ word_size = 4
+ num_words = size // word_size
+
+ values = []
+ for i in range(num_words):
+ word_address = address + i * word_size
+ word = prog.read_word(word_address)
+ values.append(f"{word:0{word_size * 2}x}")
+
+ lines = [f"RAW: {' '.join(values[i:i + 4])}" for i in range(0, len(values), 4)]
+
+ return "\n".join(lines)
+
+ except FaultError as e:
+ return f"Error reading memory: {e}"
+ except Exception as e:
+ return f"Unexpected error: {e}"
+
+def get_memcg_info(page):
+ """Retrieve memory cgroup information for a page."""
+ try:
+ MEMCG_DATA_OBJEXTS = prog.constant("MEMCG_DATA_OBJEXTS").value_()
+ MEMCG_DATA_KMEM = prog.constant("MEMCG_DATA_KMEM").value_()
+ mask = prog.constant('__NR_MEMCG_DATA_FLAGS').value_() - 1
+ memcg_data = page.memcg_data.read_()
+ if memcg_data & MEMCG_DATA_OBJEXTS:
+ slabobj_ext = cast("struct slabobj_ext *", memcg_data & ~mask)
+ memcg = slabobj_ext.objcg.memcg.value_()
+ elif memcg_data & MEMCG_DATA_KMEM:
+ objcg = cast("struct obj_cgroup *", memcg_data & ~mask)
+ memcg = objcg.memcg.value_()
+ else:
+ memcg = cast("struct mem_cgroup *", memcg_data & ~mask)
+
+ if memcg.value_() == 0:
+ return "none", "/sys/fs/cgroup/memory/"
+ cgrp = memcg.css.cgroup
+ return cgroup_name(cgrp).decode(), f"/sys/fs/cgroup/memory{cgroup_path(cgrp).decode()}"
+ except FaultError as e:
+ return "unknown", f"Error retrieving memcg info: {e}"
+ except Exception as e:
+ return "unknown", f"Unexpected error: {e}"
+
+def show_page_state(page, addr, mm, pid, task):
+ """Display detailed information about a page."""
+ try:
+ print(f'PID: {pid} Comm: {task.comm.string_().decode()} mm: {hex(mm)}')
+ try:
+ print(format_page_data(page))
+ except FaultError as e:
+ print(f"Error reading page data: {e}")
+ fields = {
+ "Page Address": hex(page.value_()),
+ "Page Flags": decode_page_flags(page),
+ "Page Size": prog["PAGE_SIZE"].value_(),
+ "Page PFN": hex(page_to_pfn(page).value_()),
+ "Page Physical": hex(page_to_phys(page).value_()),
+ "Page Virtual": hex(page_to_virt(page).value_()),
+ "Page Refcount": page._refcount.counter.value_(),
+ "Page Mapcount": page._mapcount.counter.value_(),
+ "Page Index": hex(page.__folio_index.value_()),
+ "Page Memcg Data": hex(page.memcg_data.value_()),
+ }
+
+ memcg_name, memcg_path = get_memcg_info(page)
+ fields["Memcg Name"] = memcg_name
+ fields["Memcg Path"] = memcg_path
+ fields["Page Mapping"] = hex(page.mapping.value_())
+ fields["Page Anon/File"] = "Anon" if page.mapping.value_() & 0x1 else "File"
+
+ try:
+ vma = vma_find(mm, addr)
+ fields["Page VMA"] = hex(vma.value_())
+ fields["VMA Start"] = hex(vma.vm_start.value_())
+ fields["VMA End"] = hex(vma.vm_end.value_())
+ except FaultError as e:
+ fields["Page VMA"] = "Unavailable"
+ fields["VMA Start"] = "Unavailable"
+ fields["VMA End"] = "Unavailable"
+ print(f"Error retrieving VMA information: {e}")
+
+ # Calculate the maximum field name length for alignment
+ max_field_len = max(len(field) for field in fields)
+
+ # Print aligned fields
+ for field, value in fields.items():
+ print(f"{field}:".ljust(max_field_len + 2) + f"{value}")
+
+ # Additional information about the page
+ if PageSlab(page):
+ print("This page belongs to the slab allocator.")
+
+ if PageCompound(page):
+ print("This page is part of a compound page.")
+ if PageHead(page):
+ print("This page is the head page of a compound page.")
+ if PageTail(page):
+ print("This page is the tail page of a compound page.")
+ print(f"{'Head Page:'.ljust(max_field_len + 2)}{hex(compound_head(page).value_())}")
+ print(f"{'Compound Order:'.ljust(max_field_len + 2)}{compound_order(page).value_()}")
+ print(f"{'Number of Pages:'.ljust(max_field_len + 2)}{compound_nr(page).value_()}")
+ else:
+ print("This page is not part of a compound page.")
+ except FaultError as e:
+ print(f"Error accessing page state: {e}")
+ except Exception as e:
+ print(f"Unexpected error: {e}")
+
+def main():
+ """Main function to parse arguments and display page state."""
+ parser = argparse.ArgumentParser(description=DESC, formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('pid', metavar='PID', type=int, help='Target process ID (PID)')
+ parser.add_argument('vaddr', metavar='VADDR', type=str, help='Target virtual address in hexadecimal format (e.g., 0x7fff1234abcd)')
+ args = parser.parse_args()
+
+ try:
+ vaddr = int(args.vaddr, 16)
+ except ValueError:
+ sys.exit(f"Error: Invalid virtual address format: {args.vaddr}")
+
+ try:
+ task = find_task(args.pid)
+ mm = task.mm
+ page = follow_page(mm, vaddr)
+
+ if page:
+ show_page_state(page, vaddr, mm, args.pid, task)
+ else:
+ sys.exit(f"Address {hex(vaddr)} is not mapped.")
+ except FaultError as e:
+ sys.exit(f"Error accessing task or memory: {e}")
+ except Exception as e:
+ sys.exit(f"Unexpected error: {e}")
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/mm/slabinfo.c b/tools/mm/slabinfo.c
index 1433eff99feb..80cdbd3db82d 100644
--- a/tools/mm/slabinfo.c
+++ b/tools/mm/slabinfo.c
@@ -155,6 +155,7 @@ static void usage(void)
static unsigned long read_obj(const char *name)
{
+ size_t len;
FILE *f = fopen(name, "r");
if (!f) {
@@ -165,8 +166,10 @@ static unsigned long read_obj(const char *name)
if (!fgets(buffer, sizeof(buffer), f))
buffer[0] = 0;
fclose(f);
- if (buffer[strlen(buffer)] == '\n')
- buffer[strlen(buffer)] = 0;
+ len = strlen(buffer);
+
+ if (len > 0 && buffer[len - 1] == '\n')
+ buffer[len - 1] = 0;
}
return strlen(buffer);
}
diff --git a/tools/net/sunrpc/xdrgen/generators/__init__.py b/tools/net/sunrpc/xdrgen/generators/__init__.py
index b98574a36a4a..e22632cf38fb 100644
--- a/tools/net/sunrpc/xdrgen/generators/__init__.py
+++ b/tools/net/sunrpc/xdrgen/generators/__init__.py
@@ -2,7 +2,7 @@
"""Define a base code generator class"""
-import sys
+from pathlib import Path
from jinja2 import Environment, FileSystemLoader, Template
from xdr_ast import _XdrAst, Specification, _RpcProgram, _XdrTypeSpecifier
@@ -14,8 +14,11 @@ def create_jinja2_environment(language: str, xdr_type: str) -> Environment:
"""Open a set of templates based on output language"""
match language:
case "C":
+ templates_dir = (
+ Path(__file__).parent.parent / "templates" / language / xdr_type
+ )
environment = Environment(
- loader=FileSystemLoader(sys.path[0] + "/templates/C/" + xdr_type + "/"),
+ loader=FileSystemLoader(templates_dir),
trim_blocks=True,
lstrip_blocks=True,
)
@@ -48,9 +51,7 @@ def find_xdr_program_name(root: Specification) -> str:
def header_guard_infix(filename: str) -> str:
"""Extract the header guard infix from the specification filename"""
- basename = filename.split("/")[-1]
- program = basename.replace(".x", "")
- return program.upper()
+ return Path(filename).stem.upper()
def kernel_c_type(spec: _XdrTypeSpecifier) -> str:
diff --git a/tools/net/sunrpc/xdrgen/generators/union.py b/tools/net/sunrpc/xdrgen/generators/union.py
index 2cca00e279cd..ad1f214ef22a 100644
--- a/tools/net/sunrpc/xdrgen/generators/union.py
+++ b/tools/net/sunrpc/xdrgen/generators/union.py
@@ -8,7 +8,7 @@ from jinja2 import Environment
from generators import SourceGenerator
from generators import create_jinja2_environment, get_jinja2_template
-from xdr_ast import _XdrBasic, _XdrUnion, _XdrVoid, get_header_name
+from xdr_ast import _XdrBasic, _XdrUnion, _XdrVoid, _XdrString, get_header_name
from xdr_ast import _XdrDeclaration, _XdrCaseSpec, public_apis, big_endian
@@ -40,13 +40,20 @@ def emit_union_case_spec_definition(
"""Emit a definition for an XDR union's case arm"""
if isinstance(node.arm, _XdrVoid):
return
- assert isinstance(node.arm, _XdrBasic)
+ if isinstance(node.arm, _XdrString):
+ type_name = "char *"
+ classifier = ""
+ else:
+ type_name = node.arm.spec.type_name
+ classifier = node.arm.spec.c_classifier
+
+ assert isinstance(node.arm, (_XdrBasic, _XdrString))
template = get_jinja2_template(environment, "definition", "case_spec")
print(
template.render(
name=node.arm.name,
- type=node.arm.spec.type_name,
- classifier=node.arm.spec.c_classifier,
+ type=type_name,
+ classifier=classifier,
)
)
@@ -84,6 +91,12 @@ def emit_union_case_spec_decoder(
if isinstance(node.arm, _XdrVoid):
return
+ if isinstance(node.arm, _XdrString):
+ type_name = "char *"
+ classifier = ""
+ else:
+ type_name = node.arm.spec.type_name
+ classifier = node.arm.spec.c_classifier
if big_endian_discriminant:
template = get_jinja2_template(environment, "decoder", "case_spec_be")
@@ -92,13 +105,13 @@ def emit_union_case_spec_decoder(
for case in node.values:
print(template.render(case=case))
- assert isinstance(node.arm, _XdrBasic)
+ assert isinstance(node.arm, (_XdrBasic, _XdrString))
template = get_jinja2_template(environment, "decoder", node.arm.template)
print(
template.render(
name=node.arm.name,
- type=node.arm.spec.type_name,
- classifier=node.arm.spec.c_classifier,
+ type=type_name,
+ classifier=classifier,
)
)
@@ -169,7 +182,10 @@ def emit_union_case_spec_encoder(
if isinstance(node.arm, _XdrVoid):
return
-
+ if isinstance(node.arm, _XdrString):
+ type_name = "char *"
+ else:
+ type_name = node.arm.spec.type_name
if big_endian_discriminant:
template = get_jinja2_template(environment, "encoder", "case_spec_be")
else:
@@ -181,7 +197,7 @@ def emit_union_case_spec_encoder(
print(
template.render(
name=node.arm.name,
- type=node.arm.spec.type_name,
+ type=type_name,
)
)
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j2
index 5bf010665f84..3dbd724d7f17 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j2
@@ -1,3 +1,3 @@
{# SPDX-License-Identifier: GPL-2.0 #}
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j2
index 5bf010665f84..3dbd724d7f17 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j2
@@ -1,3 +1,3 @@
{# SPDX-License-Identifier: GPL-2.0 #}
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j2
index 5bf010665f84..3dbd724d7f17 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j2
@@ -1,3 +1,3 @@
{# SPDX-License-Identifier: GPL-2.0 #}
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j2
index 9a814de54ae8..65698e20d8cd 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j2
@@ -2,5 +2,5 @@
{% if annotate %}
/* member {{ name }} (variable-length opaque) */
{% endif %}
- if (!xdrgen_decode_opaque(xdr, (opaque *)ptr, {{ maxsize }}))
+ if (!xdrgen_decode_opaque(xdr, &ptr->{{ name }}, {{ maxsize }}))
return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j2
index 5bf010665f84..3dbd724d7f17 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j2
@@ -1,3 +1,3 @@
{# SPDX-License-Identifier: GPL-2.0 #}
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j2
index da4709403dc9..b215e157dfa7 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j2
@@ -14,4 +14,4 @@ xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ name }} *ptr)
/* (basic) */
{% endif %}
return xdrgen_decode_{{ type }}(xdr, ptr);
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j2
index d7c80e472fe3..c8953719e626 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j2
@@ -22,4 +22,4 @@ xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr
return false;
}
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2
index 8b4ff08c49e5..c854fc8c74e3 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2
@@ -13,5 +13,5 @@ xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr
{% if annotate %}
/* (fixed-length opaque) */
{% endif %}
- return xdr_stream_decode_opaque_fixed(xdr, ptr, {{ size }}) >= 0;
-};
+ return xdr_stream_decode_opaque_fixed(xdr, ptr, {{ size }}) == 0;
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/string.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/string.j2
index 56c5a17d6a70..bcbc1758aae9 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/string.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/string.j2
@@ -14,4 +14,4 @@ xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr
/* (variable-length string) */
{% endif %}
return xdrgen_decode_string(xdr, ptr, {{ maxsize }});
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j2
index e74ffdd98463..a59cc1f38eed 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j2
@@ -23,4 +23,4 @@ xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr
if (!xdrgen_decode_{{ type }}(xdr, &ptr->element[i]))
return false;
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j2
index f28f8b228ad5..eb05f53e1041 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j2
@@ -14,4 +14,4 @@ xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr
/* (variable-length opaque) */
{% endif %}
return xdrgen_decode_opaque(xdr, ptr, {{ maxsize }});
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j2
index 35effe67e4ef..0d21dd0b723a 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j2
@@ -18,4 +18,4 @@ xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }
/* (basic) */
{% endif %}
return xdrgen_encode_{{ type }}(xdr, value);
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j2
index 95202ad5ad2d..ec8cd6509514 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j2
@@ -22,4 +22,4 @@ xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }
return false;
}
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j2
index 9c66a11b9912..b53fa87e1858 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j2
@@ -14,4 +14,4 @@ xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }
/* (fixed-length opaque) */
{% endif %}
return xdr_stream_encode_opaque_fixed(xdr, value, {{ size }}) >= 0;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/string.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/string.j2
index 3d490ff180d0..28b81f1d0bd6 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/string.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/string.j2
@@ -14,4 +14,4 @@ xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }
/* (variable-length string) */
{% endif %}
return xdr_stream_encode_opaque(xdr, value.data, value.len) >= 0;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j2
index 2d2384f64918..ff093c281d51 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j2
@@ -27,4 +27,4 @@ xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }
{% endif %}
return false;
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j2
index 8508f13c95b9..2e89592fa702 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j2
@@ -14,4 +14,4 @@ xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }
/* (variable-length opaque) */
{% endif %}
return xdr_stream_encode_opaque(xdr, value.data, value.len) >= 0;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/declaration/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/declaration/close.j2
new file mode 100644
index 000000000000..816291184e8c
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/declaration/close.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, struct {{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const struct {{ name }} *value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j2
index fdc2dfd1843b..39d8d6c5094d 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j2
@@ -1,4 +1,4 @@
{# SPDX-License-Identifier: GPL-2.0 #}
}
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j2
index fdc2dfd1843b..39d8d6c5094d 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j2
@@ -1,4 +1,4 @@
{# SPDX-License-Identifier: GPL-2.0 #}
}
return true;
-};
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/string.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/string.j2
new file mode 100644
index 000000000000..2f035a64f1f4
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/string.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length string) */
+{% endif %}
+ if (!xdrgen_encode_string(xdr, ptr->u.{{ name }}, {{ maxsize }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/xdrgen b/tools/net/sunrpc/xdrgen/xdrgen
index 43762be39252..3afd0547d67c 100755
--- a/tools/net/sunrpc/xdrgen/xdrgen
+++ b/tools/net/sunrpc/xdrgen/xdrgen
@@ -10,8 +10,13 @@ __license__ = "GPL-2.0 only"
__version__ = "0.2"
import sys
+from pathlib import Path
import argparse
+_XDRGEN_DIR = Path(__file__).resolve().parent
+if str(_XDRGEN_DIR) not in sys.path:
+ sys.path.insert(0, str(_XDRGEN_DIR))
+
from subcmds import definitions
from subcmds import declarations
from subcmds import lint
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index 211df5a93ad9..7736b492f559 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -12,10 +12,13 @@ endif
libdir ?= $(prefix)/$(libdir_relative)
includedir ?= $(prefix)/include
-SUBDIRS = lib generated samples
+SPECDIR=../../../Documentation/netlink/specs
+
+SUBDIRS = lib generated samples ynltool tests
all: $(SUBDIRS) libynl.a
+ynltool: | lib generated libynl.a
samples: | lib generated
libynl.a: | lib generated
@echo -e "\tAR $@"
@@ -48,5 +51,27 @@ install: libynl.a lib/*.h
@echo -e "\tINSTALL pyynl"
@pip install --prefix=$(DESTDIR)$(prefix) .
@make -C generated install
+ @make -C tests install
+
+run_tests:
+ @$(MAKE) -C tests run_tests
+
+lint:
+ yamllint $(SPECDIR)
+
+schema_check:
+ @N=1; \
+ for spec in $(SPECDIR)/*.yaml ; do \
+ NAME=$$(basename $$spec) ; \
+ OUTPUT=$$(./pyynl/cli.py --spec $$spec --validate) ; \
+ if [ $$? -eq 0 ] ; then \
+ echo "ok $$N $$NAME schema validation" ; \
+ else \
+ echo "not ok $$N $$NAME schema validation" ; \
+ echo "$$OUTPUT" ; \
+ echo ; \
+ fi ; \
+ N=$$((N+1)) ; \
+ done
-.PHONY: all clean distclean install $(SUBDIRS)
+.PHONY: all clean distclean install run_tests lint schema_check $(SUBDIRS)
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
index 90686e241157..865fd2e8519e 100644
--- a/tools/net/ynl/Makefile.deps
+++ b/tools/net/ynl/Makefile.deps
@@ -31,6 +31,7 @@ CFLAGS_ovpn:=$(call get_hdr_inc,_LINUX_OVPN_H,ovpn.h)
CFLAGS_ovs_datapath:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
CFLAGS_ovs_flow:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
CFLAGS_ovs_vport:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
+CFLAGS_psp:=$(call get_hdr_inc,_LINUX_PSP_H,psp.h)
CFLAGS_rt-addr:=$(call get_hdr_inc,__LINUX_RTNETLINK_H,rtnetlink.h) \
$(call get_hdr_inc,__LINUX_IF_ADDR_H,if_addr.h)
CFLAGS_rt-link:=$(call get_hdr_inc,__LINUX_RTNETLINK_H,rtnetlink.h) \
diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h
index 824777d7e05e..ced7dce44efb 100644
--- a/tools/net/ynl/lib/ynl-priv.h
+++ b/tools/net/ynl/lib/ynl-priv.h
@@ -106,7 +106,6 @@ ynl_gemsg_start_req(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version);
struct nlmsghdr *
ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version);
-int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr);
int ynl_submsg_failed(struct ynl_parse_arg *yarg, const char *field_name,
const char *sel_name);
@@ -314,7 +313,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
struct nlattr *attr;
size_t len;
- len = strlen(str);
+ len = strlen(str) + 1;
if (__ynl_attr_put_overflow(nlh, len))
return;
@@ -322,7 +321,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
attr->nla_type = attr_type;
strcpy((char *)ynl_attr_data(attr), str);
- attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len);
+ attr->nla_len = NLA_HDRLEN + len;
nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
}
@@ -467,4 +466,13 @@ ynl_attr_put_sint(struct nlmsghdr *nlh, __u16 type, __s64 data)
else
ynl_attr_put_s64(nlh, type, data);
}
+
+int __ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr,
+ unsigned int type);
+
+static inline int ynl_attr_validate(struct ynl_parse_arg *yarg,
+ const struct nlattr *attr)
+{
+ return __ynl_attr_validate(yarg, attr, ynl_attr_type(attr));
+}
#endif
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index 2a169c3c0797..2bcd781111d7 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -360,15 +360,15 @@ static int ynl_cb_done(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
/* Attribute validation */
-int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
+int __ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr,
+ unsigned int type)
{
const struct ynl_policy_attr *policy;
- unsigned int type, len;
unsigned char *data;
+ unsigned int len;
data = ynl_attr_data(attr);
len = ynl_attr_data_len(attr);
- type = ynl_attr_type(attr);
if (type > yarg->rsp_policy->max_attr) {
yerr(yarg->ys, YNL_ERROR_INTERNAL,
"Internal error, validating unknown attribute");
diff --git a/tools/net/ynl/pyynl/cli.py b/tools/net/ynl/pyynl/cli.py
index 33ccc5c1843b..af02a5b7e5a2 100755
--- a/tools/net/ynl/pyynl/cli.py
+++ b/tools/net/ynl/pyynl/cli.py
@@ -7,9 +7,10 @@ import os
import pathlib
import pprint
import sys
+import textwrap
sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
-from lib import YnlFamily, Netlink, NlError
+from lib import YnlFamily, Netlink, NlError, SpecFamily
sys_schema_dir='/usr/share/ynl'
relative_schema_dir='../../../../Documentation/netlink'
@@ -39,6 +40,60 @@ class YnlEncoder(json.JSONEncoder):
return json.JSONEncoder.default(self, obj)
+def print_attr_list(ynl, attr_names, attr_set, indent=2):
+ """Print a list of attributes with their types and documentation."""
+ prefix = ' ' * indent
+ for attr_name in attr_names:
+ if attr_name in attr_set.attrs:
+ attr = attr_set.attrs[attr_name]
+ attr_info = f'{prefix}- {attr_name}: {attr.type}'
+ if 'enum' in attr.yaml:
+ enum_name = attr.yaml['enum']
+ attr_info += f" (enum: {enum_name})"
+ # Print enum values if available
+ if enum_name in ynl.consts:
+ const = ynl.consts[enum_name]
+ enum_values = list(const.entries.keys())
+ attr_info += f"\n{prefix} {const.type.capitalize()}: {', '.join(enum_values)}"
+
+ # Show nested attributes reference and recursively display them
+ nested_set_name = None
+ if attr.type == 'nest' and 'nested-attributes' in attr.yaml:
+ nested_set_name = attr.yaml['nested-attributes']
+ attr_info += f" -> {nested_set_name}"
+
+ if attr.yaml.get('doc'):
+ doc_text = textwrap.indent(attr.yaml['doc'], prefix + ' ')
+ attr_info += f"\n{doc_text}"
+ print(attr_info)
+
+ # Recursively show nested attributes
+ if nested_set_name in ynl.attr_sets:
+ nested_set = ynl.attr_sets[nested_set_name]
+ # Filter out 'unspec' and other unused attrs
+ nested_names = [n for n in nested_set.attrs.keys()
+ if nested_set.attrs[n].type != 'unused']
+ if nested_names:
+ print_attr_list(ynl, nested_names, nested_set, indent + 4)
+
+
+def print_mode_attrs(ynl, mode, mode_spec, attr_set, print_request=True):
+ """Print a given mode (do/dump/event/notify)."""
+ mode_title = mode.capitalize()
+
+ if print_request and 'request' in mode_spec and 'attributes' in mode_spec['request']:
+ print(f'\n{mode_title} request attributes:')
+ print_attr_list(ynl, mode_spec['request']['attributes'], attr_set)
+
+ if 'reply' in mode_spec and 'attributes' in mode_spec['reply']:
+ print(f'\n{mode_title} reply attributes:')
+ print_attr_list(ynl, mode_spec['reply']['attributes'], attr_set)
+
+ if 'attributes' in mode_spec:
+ print(f'\n{mode_title} attributes:')
+ print_attr_list(ynl, mode_spec['attributes'], attr_set)
+
+
def main():
description = """
YNL CLI utility - a general purpose netlink utility that uses YAML
@@ -70,6 +125,9 @@ def main():
group.add_argument('--dump', dest='dump', metavar='DUMP-OPERATION', type=str)
group.add_argument('--list-ops', action='store_true')
group.add_argument('--list-msgs', action='store_true')
+ group.add_argument('--list-attrs', dest='list_attrs', metavar='OPERATION', type=str,
+ help='List attributes for an operation')
+ group.add_argument('--validate', action='store_true')
parser.add_argument('--duration', dest='duration', type=int,
help='when subscribed, watch for DURATION seconds')
@@ -111,13 +169,25 @@ def main():
if args.family:
spec = f"{spec_dir()}/{args.family}.yaml"
- if args.schema is None and spec.startswith(sys_schema_dir):
- args.schema = '' # disable schema validation when installed
else:
spec = args.spec
if not os.path.isfile(spec):
raise Exception(f"Spec file {spec} does not exist")
+ if args.validate:
+ try:
+ SpecFamily(spec, args.schema)
+ except Exception as error:
+ print(error)
+ exit(1)
+ return
+
+ if args.family: # set behaviour when using installed specs
+ if args.schema is None and spec.startswith(sys_schema_dir):
+ args.schema = '' # disable schema validation when installed
+ if args.process_unknown is None:
+ args.process_unknown = True
+
ynl = YnlFamily(spec, args.schema, args.process_unknown,
recv_size=args.dbg_small_recv)
if args.dbg_small_recv:
@@ -133,6 +203,28 @@ def main():
for op_name, op in ynl.msgs.items():
print(op_name, " [", ", ".join(op.modes), "]")
+ if args.list_attrs:
+ op = ynl.msgs.get(args.list_attrs)
+ if not op:
+ print(f'Operation {args.list_attrs} not found')
+ exit(1)
+
+ print(f'Operation: {op.name}')
+ print(op.yaml['doc'])
+
+ for mode in ['do', 'dump', 'event']:
+ if mode in op.yaml:
+ print_mode_attrs(ynl, mode, op.yaml[mode], op.attr_set, True)
+
+ if 'notify' in op.yaml:
+ mode_spec = op.yaml['notify']
+ ref_spec = ynl.msgs.get(mode_spec).yaml.get('do')
+ if ref_spec:
+ print_mode_attrs(ynl, 'notify', ref_spec, op.attr_set, False)
+
+ if 'mcgrp' in op.yaml:
+ print(f"\nMulticast group: {op.yaml['mcgrp']}")
+
try:
if args.do:
reply = ynl.do(args.do, attrs, args.flags)
diff --git a/tools/net/ynl/pyynl/ethtool.py b/tools/net/ynl/pyynl/ethtool.py
index cab6b576c876..fd0f6b8d54d1 100755
--- a/tools/net/ynl/pyynl/ethtool.py
+++ b/tools/net/ynl/pyynl/ethtool.py
@@ -2,7 +2,6 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
import argparse
-import json
import pathlib
import pprint
import sys
@@ -45,13 +44,16 @@ def print_field(reply, *desc):
Pretty-print a set of fields from the reply. desc specifies the
fields and the optional type (bool/yn).
"""
+ if not reply:
+ return
+
if len(desc) == 0:
return print_field(reply, *zip(reply.keys(), reply.keys()))
for spec in desc:
try:
field, name, tp = spec
- except:
+ except ValueError:
field, name = spec
tp = 'int'
@@ -156,7 +158,6 @@ def main():
global args
args = parser.parse_args()
- script_abs_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
spec = os.path.join(spec_dir(), 'ethtool.yaml')
schema = os.path.join(schema_dir(), 'genetlink-legacy.yaml')
@@ -255,14 +256,14 @@ def main():
reply = dumpit(ynl, args, 'channels-get')
print(f'Channel parameters for {args.device}:')
- print(f'Pre-set maximums:')
+ print('Pre-set maximums:')
print_field(reply,
('rx-max', 'RX'),
('tx-max', 'TX'),
('other-max', 'Other'),
('combined-max', 'Combined'))
- print(f'Current hardware settings:')
+ print('Current hardware settings:')
print_field(reply,
('rx-count', 'RX'),
('tx-count', 'TX'),
@@ -276,14 +277,14 @@ def main():
print(f'Ring parameters for {args.device}:')
- print(f'Pre-set maximums:')
+ print('Pre-set maximums:')
print_field(reply,
('rx-max', 'RX'),
('rx-mini-max', 'RX Mini'),
('rx-jumbo-max', 'RX Jumbo'),
('tx-max', 'TX'))
- print(f'Current hardware settings:')
+ print('Current hardware settings:')
print_field(reply,
('rx', 'RX'),
('rx-mini', 'RX Mini'),
@@ -298,7 +299,7 @@ def main():
return
if args.statistics:
- print(f'NIC statistics:')
+ print('NIC statistics:')
# TODO: pass id?
strset = dumpit(ynl, args, 'strset-get')
diff --git a/tools/net/ynl/pyynl/lib/__init__.py b/tools/net/ynl/pyynl/lib/__init__.py
index 71518b9842ee..ec9ea00071be 100644
--- a/tools/net/ynl/pyynl/lib/__init__.py
+++ b/tools/net/ynl/pyynl/lib/__init__.py
@@ -4,6 +4,8 @@ from .nlspec import SpecAttr, SpecAttrSet, SpecEnumEntry, SpecEnumSet, \
SpecFamily, SpecOperation, SpecSubMessage, SpecSubMessageFormat
from .ynl import YnlFamily, Netlink, NlError
+from .doc_generator import YnlDocGenerator
+
__all__ = ["SpecAttr", "SpecAttrSet", "SpecEnumEntry", "SpecEnumSet",
"SpecFamily", "SpecOperation", "SpecSubMessage", "SpecSubMessageFormat",
- "YnlFamily", "Netlink", "NlError"]
+ "YnlFamily", "Netlink", "NlError", "YnlDocGenerator"]
diff --git a/tools/net/ynl/pyynl/lib/doc_generator.py b/tools/net/ynl/pyynl/lib/doc_generator.py
new file mode 100644
index 000000000000..3a16b8eb01ca
--- /dev/null
+++ b/tools/net/ynl/pyynl/lib/doc_generator.py
@@ -0,0 +1,402 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# -*- coding: utf-8; mode: python -*-
+
+"""
+ Class to auto generate the documentation for Netlink specifications.
+
+ :copyright: Copyright (C) 2023 Breno Leitao <leitao@debian.org>
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ This class performs extensive parsing to the Linux kernel's netlink YAML
+ spec files, in an effort to avoid needing to heavily mark up the original
+ YAML file.
+
+ This code is split in two classes:
+ 1) RST formatters: Use to convert a string to a RST output
+ 2) YAML Netlink (YNL) doc generator: Generate docs from YAML data
+"""
+
+from typing import Any, Dict, List
+import yaml
+
+LINE_STR = '__lineno__'
+
+class NumberedSafeLoader(yaml.SafeLoader): # pylint: disable=R0901
+ """Override the SafeLoader class to add line number to parsed data"""
+
+ def construct_mapping(self, node, *args, **kwargs):
+ mapping = super().construct_mapping(node, *args, **kwargs)
+ mapping[LINE_STR] = node.start_mark.line
+
+ return mapping
+
+class RstFormatters:
+ """RST Formatters"""
+
+ SPACE_PER_LEVEL = 4
+
+ @staticmethod
+ def headroom(level: int) -> str:
+ """Return space to format"""
+ return " " * (level * RstFormatters.SPACE_PER_LEVEL)
+
+ @staticmethod
+ def bold(text: str) -> str:
+ """Format bold text"""
+ return f"**{text}**"
+
+ @staticmethod
+ def inline(text: str) -> str:
+ """Format inline text"""
+ return f"``{text}``"
+
+ @staticmethod
+ def sanitize(text: str) -> str:
+ """Remove newlines and multiple spaces"""
+ # This is useful for some fields that are spread across multiple lines
+ return str(text).replace("\n", " ").strip()
+
+ def rst_fields(self, key: str, value: str, level: int = 0) -> str:
+ """Return a RST formatted field"""
+ return self.headroom(level) + f":{key}: {value}"
+
+ def rst_definition(self, key: str, value: Any, level: int = 0) -> str:
+ """Format a single rst definition"""
+ return self.headroom(level) + key + "\n" + self.headroom(level + 1) + str(value)
+
+ def rst_paragraph(self, paragraph: str, level: int = 0) -> str:
+ """Return a formatted paragraph"""
+ return self.headroom(level) + paragraph
+
+ def rst_bullet(self, item: str, level: int = 0) -> str:
+ """Return a formatted a bullet"""
+ return self.headroom(level) + f"- {item}"
+
+ @staticmethod
+ def rst_subsection(title: str) -> str:
+ """Add a sub-section to the document"""
+ return f"{title}\n" + "-" * len(title)
+
+ @staticmethod
+ def rst_subsubsection(title: str) -> str:
+ """Add a sub-sub-section to the document"""
+ return f"{title}\n" + "~" * len(title)
+
+ @staticmethod
+ def rst_section(namespace: str, prefix: str, title: str) -> str:
+ """Add a section to the document"""
+ return f".. _{namespace}-{prefix}-{title}:\n\n{title}\n" + "=" * len(title)
+
+ @staticmethod
+ def rst_subtitle(title: str) -> str:
+ """Add a subtitle to the document"""
+ return "\n" + "-" * len(title) + f"\n{title}\n" + "-" * len(title) + "\n\n"
+
+ @staticmethod
+ def rst_title(title: str) -> str:
+ """Add a title to the document"""
+ return "=" * len(title) + f"\n{title}\n" + "=" * len(title) + "\n\n"
+
+ def rst_list_inline(self, list_: List[str], level: int = 0) -> str:
+ """Format a list using inlines"""
+ return self.headroom(level) + "[" + ", ".join(self.inline(i) for i in list_) + "]"
+
+ @staticmethod
+ def rst_ref(namespace: str, prefix: str, name: str) -> str:
+ """Add a hyperlink to the document"""
+ mappings = {'enum': 'definition',
+ 'fixed-header': 'definition',
+ 'nested-attributes': 'attribute-set',
+ 'struct': 'definition'}
+ if prefix in mappings:
+ prefix = mappings[prefix]
+ return f":ref:`{namespace}-{prefix}-{name}`"
+
+ def rst_header(self) -> str:
+ """The headers for all the auto generated RST files"""
+ lines = []
+
+ lines.append(self.rst_paragraph(".. SPDX-License-Identifier: GPL-2.0"))
+ lines.append(self.rst_paragraph(".. NOTE: This document was auto-generated.\n\n"))
+
+ return "\n".join(lines)
+
+ @staticmethod
+ def rst_toctree(maxdepth: int = 2) -> str:
+ """Generate a toctree RST primitive"""
+ lines = []
+
+ lines.append(".. toctree::")
+ lines.append(f" :maxdepth: {maxdepth}\n\n")
+
+ return "\n".join(lines)
+
+ @staticmethod
+ def rst_label(title: str) -> str:
+ """Return a formatted label"""
+ return f".. _{title}:\n\n"
+
+ @staticmethod
+ def rst_lineno(lineno: int) -> str:
+ """Return a lineno comment"""
+ return f".. LINENO {lineno}\n"
+
+class YnlDocGenerator:
+ """YAML Netlink specs Parser"""
+
+ fmt = RstFormatters()
+
+ def parse_mcast_group(self, mcast_group: List[Dict[str, Any]]) -> str:
+ """Parse 'multicast' group list and return a formatted string"""
+ lines = []
+ for group in mcast_group:
+ lines.append(self.fmt.rst_bullet(group["name"]))
+
+ return "\n".join(lines)
+
+ def parse_do(self, do_dict: Dict[str, Any], level: int = 0) -> str:
+ """Parse 'do' section and return a formatted string"""
+ lines = []
+ if LINE_STR in do_dict:
+ lines.append(self.fmt.rst_lineno(do_dict[LINE_STR]))
+
+ for key in do_dict.keys():
+ if key == LINE_STR:
+ continue
+ lines.append(self.fmt.rst_paragraph(self.fmt.bold(key), level + 1))
+ if key in ['request', 'reply']:
+ lines.append(self.parse_do_attributes(do_dict[key], level + 1) + "\n")
+ else:
+ lines.append(self.fmt.headroom(level + 2) + do_dict[key] + "\n")
+
+ return "\n".join(lines)
+
+ def parse_do_attributes(self, attrs: Dict[str, Any], level: int = 0) -> str:
+ """Parse 'attributes' section"""
+ if "attributes" not in attrs:
+ return ""
+ lines = [self.fmt.rst_fields("attributes",
+ self.fmt.rst_list_inline(attrs["attributes"]),
+ level + 1)]
+
+ return "\n".join(lines)
+
+ def parse_operations(self, operations: List[Dict[str, Any]], namespace: str) -> str:
+ """Parse operations block"""
+ preprocessed = ["name", "doc", "title", "do", "dump", "flags"]
+ linkable = ["fixed-header", "attribute-set"]
+ lines = []
+
+ for operation in operations:
+ if LINE_STR in operation:
+ lines.append(self.fmt.rst_lineno(operation[LINE_STR]))
+
+ lines.append(self.fmt.rst_section(namespace, 'operation',
+ operation["name"]))
+ lines.append(self.fmt.rst_paragraph(operation["doc"]) + "\n")
+
+ for key in operation.keys():
+ if key == LINE_STR:
+ continue
+
+ if key in preprocessed:
+ # Skip the special fields
+ continue
+ value = operation[key]
+ if key in linkable:
+ value = self.fmt.rst_ref(namespace, key, value)
+ lines.append(self.fmt.rst_fields(key, value, 0))
+ if 'flags' in operation:
+ lines.append(self.fmt.rst_fields('flags',
+ self.fmt.rst_list_inline(operation['flags'])))
+
+ if "do" in operation:
+ lines.append(self.fmt.rst_paragraph(":do:", 0))
+ lines.append(self.parse_do(operation["do"], 0))
+ if "dump" in operation:
+ lines.append(self.fmt.rst_paragraph(":dump:", 0))
+ lines.append(self.parse_do(operation["dump"], 0))
+
+ # New line after fields
+ lines.append("\n")
+
+ return "\n".join(lines)
+
+ def parse_entries(self, entries: List[Dict[str, Any]], level: int) -> str:
+ """Parse a list of entries"""
+ ignored = ["pad"]
+ lines = []
+ for entry in entries:
+ if isinstance(entry, dict):
+ # entries could be a list or a dictionary
+ field_name = entry.get("name", "")
+ if field_name in ignored:
+ continue
+ type_ = entry.get("type")
+ if type_:
+ field_name += f" ({self.fmt.inline(type_)})"
+ lines.append(
+ self.fmt.rst_fields(field_name,
+ self.fmt.sanitize(entry.get("doc", "")),
+ level)
+ )
+ elif isinstance(entry, list):
+ lines.append(self.fmt.rst_list_inline(entry, level))
+ else:
+ lines.append(self.fmt.rst_bullet(self.fmt.inline(self.fmt.sanitize(entry)),
+ level))
+
+ lines.append("\n")
+ return "\n".join(lines)
+
+ def parse_definitions(self, defs: Dict[str, Any], namespace: str) -> str:
+ """Parse definitions section"""
+ preprocessed = ["name", "entries", "members"]
+ ignored = ["render-max"] # This is not printed
+ lines = []
+
+ for definition in defs:
+ if LINE_STR in definition:
+ lines.append(self.fmt.rst_lineno(definition[LINE_STR]))
+
+ lines.append(self.fmt.rst_section(namespace, 'definition', definition["name"]))
+ for k in definition.keys():
+ if k == LINE_STR:
+ continue
+ if k in preprocessed + ignored:
+ continue
+ lines.append(self.fmt.rst_fields(k, self.fmt.sanitize(definition[k]), 0))
+
+ # Field list needs to finish with a new line
+ lines.append("\n")
+ if "entries" in definition:
+ lines.append(self.fmt.rst_paragraph(":entries:", 0))
+ lines.append(self.parse_entries(definition["entries"], 1))
+ if "members" in definition:
+ lines.append(self.fmt.rst_paragraph(":members:", 0))
+ lines.append(self.parse_entries(definition["members"], 1))
+
+ return "\n".join(lines)
+
+ def parse_attr_sets(self, entries: List[Dict[str, Any]], namespace: str) -> str:
+ """Parse attribute from attribute-set"""
+ preprocessed = ["name", "type"]
+ linkable = ["enum", "nested-attributes", "struct", "sub-message"]
+ ignored = ["checks"]
+ lines = []
+
+ for entry in entries:
+ lines.append(self.fmt.rst_section(namespace, 'attribute-set',
+ entry["name"]))
+
+ if "doc" in entry:
+ lines.append(self.fmt.rst_paragraph(entry["doc"], 0) + "\n")
+
+ for attr in entry["attributes"]:
+ if LINE_STR in attr:
+ lines.append(self.fmt.rst_lineno(attr[LINE_STR]))
+
+ type_ = attr.get("type")
+ attr_line = attr["name"]
+ if type_:
+ # Add the attribute type in the same line
+ attr_line += f" ({self.fmt.inline(type_)})"
+
+ lines.append(self.fmt.rst_subsubsection(attr_line))
+
+ for k in attr.keys():
+ if k == LINE_STR:
+ continue
+ if k in preprocessed + ignored:
+ continue
+ if k in linkable:
+ value = self.fmt.rst_ref(namespace, k, attr[k])
+ else:
+ value = self.fmt.sanitize(attr[k])
+ lines.append(self.fmt.rst_fields(k, value, 0))
+ lines.append("\n")
+
+ return "\n".join(lines)
+
+ def parse_sub_messages(self, entries: List[Dict[str, Any]], namespace: str) -> str:
+ """Parse sub-message definitions"""
+ lines = []
+
+ for entry in entries:
+ lines.append(self.fmt.rst_section(namespace, 'sub-message',
+ entry["name"]))
+ for fmt in entry["formats"]:
+ value = fmt["value"]
+
+ lines.append(self.fmt.rst_bullet(self.fmt.bold(value)))
+ for attr in ['fixed-header', 'attribute-set']:
+ if attr in fmt:
+ lines.append(self.fmt.rst_fields(attr,
+ self.fmt.rst_ref(namespace,
+ attr,
+ fmt[attr]),
+ 1))
+ lines.append("\n")
+
+ return "\n".join(lines)
+
+ def parse_yaml(self, obj: Dict[str, Any]) -> str:
+ """Format the whole YAML into a RST string"""
+ lines = []
+
+ # Main header
+ lineno = obj.get('__lineno__', 0)
+ lines.append(self.fmt.rst_lineno(lineno))
+
+ family = obj['name']
+
+ lines.append(self.fmt.rst_header())
+ lines.append(self.fmt.rst_label("netlink-" + family))
+
+ title = f"Family ``{family}`` netlink specification"
+ lines.append(self.fmt.rst_title(title))
+ lines.append(self.fmt.rst_paragraph(".. contents:: :depth: 3\n"))
+
+ if "doc" in obj:
+ lines.append(self.fmt.rst_subtitle("Summary"))
+ lines.append(self.fmt.rst_paragraph(obj["doc"], 0))
+
+ # Operations
+ if "operations" in obj:
+ lines.append(self.fmt.rst_subtitle("Operations"))
+ lines.append(self.parse_operations(obj["operations"]["list"],
+ family))
+
+ # Multicast groups
+ if "mcast-groups" in obj:
+ lines.append(self.fmt.rst_subtitle("Multicast groups"))
+ lines.append(self.parse_mcast_group(obj["mcast-groups"]["list"]))
+
+ # Definitions
+ if "definitions" in obj:
+ lines.append(self.fmt.rst_subtitle("Definitions"))
+ lines.append(self.parse_definitions(obj["definitions"], family))
+
+ # Attributes set
+ if "attribute-sets" in obj:
+ lines.append(self.fmt.rst_subtitle("Attribute sets"))
+ lines.append(self.parse_attr_sets(obj["attribute-sets"], family))
+
+ # Sub-messages
+ if "sub-messages" in obj:
+ lines.append(self.fmt.rst_subtitle("Sub-messages"))
+ lines.append(self.parse_sub_messages(obj["sub-messages"], family))
+
+ return "\n".join(lines)
+
+ # Main functions
+ # ==============
+
+ def parse_yaml_file(self, filename: str) -> str:
+ """Transform the YAML specified by filename into an RST-formatted string"""
+ with open(filename, "r", encoding="utf-8") as spec_file:
+ numbered_yaml = yaml.load(spec_file, Loader=NumberedSafeLoader)
+ content = self.parse_yaml(numbered_yaml)
+
+ return content
diff --git a/tools/net/ynl/pyynl/lib/nlspec.py b/tools/net/ynl/pyynl/lib/nlspec.py
index 314ec8007496..85c17fe01e35 100644
--- a/tools/net/ynl/pyynl/lib/nlspec.py
+++ b/tools/net/ynl/pyynl/lib/nlspec.py
@@ -501,7 +501,7 @@ class SpecFamily(SpecElement):
return SpecStruct(self, elem)
def new_sub_message(self, elem):
- return SpecSubMessage(self, elem);
+ return SpecSubMessage(self, elem)
def new_operation(self, elem, req_val, rsp_val):
return SpecOperation(self, elem, req_val, rsp_val)
diff --git a/tools/net/ynl/pyynl/lib/ynl.py b/tools/net/ynl/pyynl/lib/ynl.py
index 55b59f6c79b8..36d36eb7e3b8 100644
--- a/tools/net/ynl/pyynl/lib/ynl.py
+++ b/tools/net/ynl/pyynl/lib/ynl.py
@@ -9,7 +9,6 @@ import socket
import struct
from struct import Struct
import sys
-import yaml
import ipaddress
import uuid
import queue
@@ -101,12 +100,21 @@ class Netlink:
'bitfield32', 'sint', 'uint'])
class NlError(Exception):
- def __init__(self, nl_msg):
- self.nl_msg = nl_msg
- self.error = -nl_msg.error
-
- def __str__(self):
- return f"Netlink error: {os.strerror(self.error)}\n{self.nl_msg}"
+ def __init__(self, nl_msg):
+ self.nl_msg = nl_msg
+ self.error = -nl_msg.error
+
+ def __str__(self):
+ msg = "Netlink error: "
+
+ extack = self.nl_msg.extack.copy() if self.nl_msg.extack else {}
+ if 'msg' in extack:
+ msg += extack['msg'] + ': '
+ del extack['msg']
+ msg += os.strerror(self.error)
+ if extack:
+ msg += ' ' + str(extack)
+ return msg
class ConfigError(Exception):
@@ -231,14 +239,7 @@ class NlMsg:
self.extack['unknown'].append(extack)
if attr_space:
- # We don't have the ability to parse nests yet, so only do global
- if 'miss-type' in self.extack and 'miss-nest' not in self.extack:
- miss_type = self.extack['miss-type']
- if miss_type in attr_space.attrs_by_val:
- spec = attr_space.attrs_by_val[miss_type]
- self.extack['miss-type'] = spec['name']
- if 'doc' in spec:
- self.extack['miss-type-doc'] = spec['doc']
+ self.annotate_extack(attr_space)
def _decode_policy(self, raw):
policy = {}
@@ -264,6 +265,18 @@ class NlMsg:
policy['mask'] = attr.as_scalar('u64')
return policy
+ def annotate_extack(self, attr_space):
+ """ Make extack more human friendly with attribute information """
+
+ # We don't have the ability to parse nests yet, so only do global
+ if 'miss-type' in self.extack and 'miss-nest' not in self.extack:
+ miss_type = self.extack['miss-type']
+ if miss_type in attr_space.attrs_by_val:
+ spec = attr_space.attrs_by_val[miss_type]
+ self.extack['miss-type'] = spec['name']
+ if 'doc' in spec:
+ self.extack['miss-type-doc'] = spec['doc']
+
def cmd(self):
return self.nl_type
@@ -277,12 +290,12 @@ class NlMsg:
class NlMsgs:
- def __init__(self, data, attr_space=None):
+ def __init__(self, data):
self.msgs = []
offset = 0
while offset < len(data):
- msg = NlMsg(data, offset, attr_space=attr_space)
+ msg = NlMsg(data, offset)
offset += msg.nl_len
self.msgs.append(msg)
@@ -557,11 +570,13 @@ class YnlFamily(SpecFamily):
if attr["type"] == 'nest':
nl_type |= Netlink.NLA_F_NESTED
- attr_payload = b''
sub_space = attr['nested-attributes']
- sub_attrs = SpaceAttrs(self.attr_sets[sub_space], value, search_attrs)
- for subname, subvalue in value.items():
- attr_payload += self._add_attr(sub_space, subname, subvalue, sub_attrs)
+ attr_payload = self._add_nest_attrs(value, sub_space, search_attrs)
+ elif attr['type'] == 'indexed-array' and attr['sub-type'] == 'nest':
+ nl_type |= Netlink.NLA_F_NESTED
+ sub_space = attr['nested-attributes']
+ attr_payload = self._encode_indexed_array(value, sub_space,
+ search_attrs)
elif attr["type"] == 'flag':
if not value:
# If value is absent or false then skip attribute creation.
@@ -570,7 +585,9 @@ class YnlFamily(SpecFamily):
elif attr["type"] == 'string':
attr_payload = str(value).encode('ascii') + b'\x00'
elif attr["type"] == 'binary':
- if isinstance(value, bytes):
+ if value is None:
+ attr_payload = b''
+ elif isinstance(value, bytes):
attr_payload = value
elif isinstance(value, str):
if attr.display_hint:
@@ -579,6 +596,9 @@ class YnlFamily(SpecFamily):
attr_payload = bytes.fromhex(value)
elif isinstance(value, dict) and attr.struct_name:
attr_payload = self._encode_struct(attr.struct_name, value)
+ elif isinstance(value, list) and attr.sub_type in NlAttr.type_formats:
+ format = NlAttr.get_format(attr.sub_type)
+ attr_payload = b''.join([format.pack(x) for x in value])
else:
raise Exception(f'Unknown type for binary attribute, value: {value}')
elif attr['type'] in NlAttr.type_formats or attr.is_auto_scalar:
@@ -610,9 +630,38 @@ class YnlFamily(SpecFamily):
else:
raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
+ return self._add_attr_raw(nl_type, attr_payload)
+
+ def _add_attr_raw(self, nl_type, attr_payload):
pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4)
return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad
+ def _add_nest_attrs(self, value, sub_space, search_attrs):
+ sub_attrs = SpaceAttrs(self.attr_sets[sub_space], value, search_attrs)
+ attr_payload = b''
+ for subname, subvalue in value.items():
+ attr_payload += self._add_attr(sub_space, subname, subvalue,
+ sub_attrs)
+ return attr_payload
+
+ def _encode_indexed_array(self, vals, sub_space, search_attrs):
+ attr_payload = b''
+ for i, val in enumerate(vals):
+ idx = i | Netlink.NLA_F_NESTED
+ val_payload = self._add_nest_attrs(val, sub_space, search_attrs)
+ attr_payload += self._add_attr_raw(idx, val_payload)
+ return attr_payload
+
+ def _get_enum_or_unknown(self, enum, raw):
+ try:
+ name = enum.entries_by_val[raw].name
+ except KeyError as error:
+ if self.process_unknown:
+ name = f"Unknown({raw})"
+ else:
+ raise error
+ return name
+
def _decode_enum(self, raw, attr_spec):
enum = self.consts[attr_spec['enum']]
if enum.type == 'flags' or attr_spec.get('enum-as-flags', False):
@@ -620,11 +669,11 @@ class YnlFamily(SpecFamily):
value = set()
while raw:
if raw & 1:
- value.add(enum.entries_by_val[i].name)
+ value.add(self._get_enum_or_unknown(enum, i))
raw >>= 1
i += 1
else:
- value = enum.entries_by_val[raw].name
+ value = self._get_enum_or_unknown(enum, raw)
return value
def _decode_binary(self, attr, attr_spec):
@@ -686,7 +735,7 @@ class YnlFamily(SpecFamily):
return attr.as_bin()
def _rsp_add(self, rsp, name, is_multi, decoded):
- if is_multi == None:
+ if is_multi is None:
if name in rsp and type(rsp[name]) is not list:
rsp[name] = [rsp[name]]
is_multi = True
@@ -719,14 +768,14 @@ class YnlFamily(SpecFamily):
decoded = {}
offset = 0
if msg_format.fixed_header:
- decoded.update(self._decode_struct(attr.raw, msg_format.fixed_header));
+ decoded.update(self._decode_struct(attr.raw, msg_format.fixed_header))
offset = self._struct_size(msg_format.fixed_header)
if msg_format.attr_set:
if msg_format.attr_set in self.attr_sets:
subdict = self._decode(NlAttrs(attr.raw, offset), msg_format.attr_set)
decoded.update(subdict)
else:
- raise Exception(f"Unknown attribute-set '{attr_space}' when decoding '{attr_spec.name}'")
+ raise Exception(f"Unknown attribute-set '{msg_format.attr_set}' when decoding '{attr_spec.name}'")
return decoded
def _decode(self, attrs, space, outer_attrs = None):
@@ -757,6 +806,8 @@ class YnlFamily(SpecFamily):
decoded = True
elif attr_spec.is_auto_scalar:
decoded = attr.as_auto_scalar(attr_spec['type'], attr_spec.byte_order)
+ if 'enum' in attr_spec:
+ decoded = self._decode_enum(decoded, attr_spec)
elif attr_spec["type"] in NlAttr.type_formats:
decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order)
if 'enum' in attr_spec:
@@ -914,7 +965,7 @@ class YnlFamily(SpecFamily):
formatted = hex(raw)
else:
formatted = bytes.hex(raw, ' ')
- elif display_hint in [ 'ipv4', 'ipv6' ]:
+ elif display_hint in [ 'ipv4', 'ipv6', 'ipv4-or-v6' ]:
formatted = format(ipaddress.ip_address(raw))
elif display_hint == 'uuid':
formatted = str(uuid.UUID(bytes=raw))
@@ -923,12 +974,26 @@ class YnlFamily(SpecFamily):
return formatted
def _from_string(self, string, attr_spec):
- if attr_spec.display_hint in ['ipv4', 'ipv6']:
+ if attr_spec.display_hint in ['ipv4', 'ipv6', 'ipv4-or-v6']:
ip = ipaddress.ip_address(string)
if attr_spec['type'] == 'binary':
raw = ip.packed
else:
raw = int(ip)
+ elif attr_spec.display_hint == 'hex':
+ if attr_spec['type'] == 'binary':
+ raw = bytes.fromhex(string)
+ else:
+ raw = int(string, 16)
+ elif attr_spec.display_hint == 'mac':
+ # Parse MAC address in format "00:11:22:33:44:55" or "001122334455"
+ if ':' in string:
+ mac_bytes = [int(x, 16) for x in string.split(':')]
+ else:
+ if len(string) % 2 != 0:
+ raise Exception(f"Invalid MAC address format: {string}")
+ mac_bytes = [int(string[i:i+2], 16) for i in range(0, len(string), 2)]
+ raw = bytes(mac_bytes)
else:
raise Exception(f"Display hint '{attr_spec.display_hint}' not implemented"
f" when parsing '{attr_spec['name']}'")
@@ -992,15 +1057,15 @@ class YnlFamily(SpecFamily):
self.check_ntf()
def operation_do_attributes(self, name):
- """
- For a given operation name, find and return a supported
- set of attributes (as a dict).
- """
- op = self.find_operation(name)
- if not op:
- return None
+ """
+ For a given operation name, find and return a supported
+ set of attributes (as a dict).
+ """
+ op = self.find_operation(name)
+ if not op:
+ return None
- return op['do']['request']['attributes'].copy()
+ return op['do']['request']['attributes'].copy()
def _encode_message(self, op, vals, flags, req_seq):
nl_flags = Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK
@@ -1034,12 +1099,13 @@ class YnlFamily(SpecFamily):
op_rsp = []
while not done:
reply = self.sock.recv(self._recv_size)
- nms = NlMsgs(reply, attr_space=op.attr_set)
+ nms = NlMsgs(reply)
self._recv_dbg_print(reply, nms)
for nl_msg in nms:
if nl_msg.nl_seq in reqs_by_seq:
(op, vals, req_msg, req_flags) = reqs_by_seq[nl_msg.nl_seq]
if nl_msg.extack:
+ nl_msg.annotate_extack(op.attr_set)
self._decode_extack(req_msg, op, nl_msg.extack, vals)
else:
op = None
diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py
index 76032e01c2e7..b517d0c605ad 100755
--- a/tools/net/ynl/pyynl/ynl_gen_c.py
+++ b/tools/net/ynl/pyynl/ynl_gen_c.py
@@ -2,7 +2,6 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
import argparse
-import collections
import filecmp
import pathlib
import os
@@ -14,7 +13,7 @@ import yaml
sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation, SpecEnumSet, SpecEnumEntry
-from lib import SpecSubMessage, SpecSubMessageFormat
+from lib import SpecSubMessage
def c_upper(name):
@@ -243,7 +242,7 @@ class Type(SpecAttr):
raise Exception(f"Attr get not implemented for class type {self.type}")
def attr_get(self, ri, var, first):
- lines, init_lines, local_vars = self._attr_get(ri, var)
+ lines, init_lines, _ = self._attr_get(ri, var)
if type(lines) is str:
lines = [lines]
if type(init_lines) is str:
@@ -251,10 +250,6 @@ class Type(SpecAttr):
kw = 'if' if first else 'else if'
ri.cw.block_start(line=f"{kw} (type == {self.enum_name})")
- if local_vars:
- for local in local_vars:
- ri.cw.p(local)
- ri.cw.nl()
if not self.is_multi_val():
ri.cw.p("if (ynl_attr_validate(yarg, attr))")
@@ -275,9 +270,8 @@ class Type(SpecAttr):
def _setter_lines(self, ri, member, presence):
raise Exception(f"Setter not implemented for class type {self.type}")
- def setter(self, ri, space, direction, deref=False, ref=None):
+ def setter(self, ri, space, direction, deref=False, ref=None, var="req"):
ref = (ref if ref else []) + [self.c_name]
- var = "req"
member = f"{var}->{'.'.join(ref)}"
local_vars = []
@@ -332,7 +326,7 @@ class TypeUnused(Type):
def attr_get(self, ri, var, first):
pass
- def setter(self, ri, space, direction, deref=False, ref=None):
+ def setter(self, ri, space, direction, deref=False, ref=None, var=None):
pass
@@ -355,7 +349,7 @@ class TypePad(Type):
def attr_policy(self, cw):
pass
- def setter(self, ri, space, direction, deref=False, ref=None):
+ def setter(self, ri, space, direction, deref=False, ref=None, var=None):
pass
@@ -399,7 +393,7 @@ class TypeScalar(Type):
if 'enum' in self.attr:
enum = self.family.consts[self.attr['enum']]
low, high = enum.value_range()
- if low == None and high == None:
+ if low is None and high is None:
self.checks['sparse'] = True
else:
if 'min' not in self.checks:
@@ -486,7 +480,7 @@ class TypeString(Type):
ri.cw.p(f"char *{self.c_name};")
def _attr_typol(self):
- typol = f'.type = YNL_PT_NUL_STR, '
+ typol = '.type = YNL_PT_NUL_STR, '
if self.is_selector:
typol += '.is_selector = 1, '
return typol
@@ -540,7 +534,7 @@ class TypeBinary(Type):
ri.cw.p(f"void *{self.c_name};")
def _attr_typol(self):
- return f'.type = YNL_PT_BINARY,'
+ return '.type = YNL_PT_BINARY,'
def _attr_policy(self, policy):
if len(self.checks) == 0:
@@ -557,7 +551,7 @@ class TypeBinary(Type):
elif 'exact-len' in self.checks:
mem = 'NLA_POLICY_EXACT_LEN(' + self.get_limit_str('exact-len') + ')'
elif 'min-len' in self.checks:
- mem = '{ .len = ' + self.get_limit_str('min-len') + ', }'
+ mem = 'NLA_POLICY_MIN_LEN(' + self.get_limit_str('min-len') + ')'
elif 'max-len' in self.checks:
mem = 'NLA_POLICY_MAX_LEN(' + self.get_limit_str('max-len') + ')'
@@ -637,10 +631,10 @@ class TypeBitfield32(Type):
return "struct nla_bitfield32"
def _attr_typol(self):
- return f'.type = YNL_PT_BITFIELD32, '
+ return '.type = YNL_PT_BITFIELD32, '
def _attr_policy(self, policy):
- if not 'enum' in self.attr:
+ if 'enum' not in self.attr:
raise Exception('Enum required for bitfield32 attr')
enum = self.family.consts[self.attr['enum']]
mask = enum.get_mask(as_flags=True)
@@ -695,13 +689,14 @@ class TypeNest(Type):
f"parg.data = &{var}->{self.c_name};"]
return get_lines, init_lines, None
- def setter(self, ri, space, direction, deref=False, ref=None):
+ def setter(self, ri, space, direction, deref=False, ref=None, var="req"):
ref = (ref if ref else []) + [self.c_name]
for _, attr in ri.family.pure_nested_structs[self.nested_attrs].member_list():
if attr.is_recursive():
continue
- attr.setter(ri, self.nested_attrs, direction, deref=deref, ref=ref)
+ attr.setter(ri, self.nested_attrs, direction, deref=deref, ref=ref,
+ var=var)
class TypeMultiAttr(Type):
@@ -725,7 +720,11 @@ class TypeMultiAttr(Type):
return 'struct ynl_string *'
elif self.attr['type'] in scalars:
scalar_pfx = '__' if ri.ku_space == 'user' else ''
- return scalar_pfx + self.attr['type']
+ if self.is_auto_scalar:
+ name = self.type[0] + '64'
+ else:
+ name = self.attr['type']
+ return scalar_pfx + name
else:
raise Exception(f"Sub-type {self.attr['type']} not supported yet")
@@ -792,7 +791,7 @@ class TypeMultiAttr(Type):
f"{presence} = n_{self.c_name};"]
-class TypeArrayNest(Type):
+class TypeIndexedArray(Type):
def is_multi_val(self):
return True
@@ -816,21 +815,28 @@ class TypeArrayNest(Type):
f'unsigned int n_{self.c_name}']
return super().arg_member(ri)
+ def _attr_policy(self, policy):
+ if self.attr['sub-type'] == 'nest':
+ return f'NLA_POLICY_NESTED_ARRAY({self.nested_render_name}_nl_policy)'
+ return super()._attr_policy(policy)
+
def _attr_typol(self):
if self.attr['sub-type'] in scalars:
return f'.type = YNL_PT_U{c_upper(self.sub_type[1:])}, '
elif self.attr['sub-type'] == 'binary' and 'exact-len' in self.checks:
return f'.type = YNL_PT_BINARY, .len = {self.checks["exact-len"]}, '
- else:
+ elif self.attr['sub-type'] == 'nest':
return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+ else:
+ raise Exception(f"Typol for IndexedArray sub-type {self.attr['sub-type']} not supported, yet")
def _attr_get(self, ri, var):
local_vars = ['const struct nlattr *attr2;']
get_lines = [f'attr_{self.c_name} = attr;',
'ynl_attr_for_each_nested(attr2, attr) {',
- '\tif (ynl_attr_validate(yarg, attr2))',
+ '\tif (__ynl_attr_validate(yarg, attr2, type))',
'\t\treturn YNL_PARSE_CB_ERROR;',
- f'\t{var}->_count.{self.c_name}++;',
+ f'\tn_{self.c_name}++;',
'}']
return get_lines, None, local_vars
@@ -848,13 +854,25 @@ class TypeArrayNest(Type):
ri.cw.p(f'for (i = 0; i < {var}->_count.{self.c_name}; i++)')
ri.cw.p(f"{self.nested_render_name}_put(nlh, i, &{var}->{self.c_name}[i]);")
else:
- raise Exception(f"Put for ArrayNest sub-type {self.attr['sub-type']} not supported, yet")
+ raise Exception(f"Put for IndexedArray sub-type {self.attr['sub-type']} not supported, yet")
ri.cw.p('ynl_attr_nest_end(nlh, array);')
def _setter_lines(self, ri, member, presence):
return [f"{member} = {self.c_name};",
f"{presence} = n_{self.c_name};"]
+ def free_needs_iter(self):
+ return self.sub_type == 'nest'
+
+ def _free_lines(self, ri, var, ref):
+ lines = []
+ if self.sub_type == 'nest':
+ lines += [
+ f"for (i = 0; i < {var}->{ref}_count.{self.c_name}; i++)",
+ f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name}[i]);',
+ ]
+ lines += f"free({var}->{ref}{self.c_name});",
+ return lines
class TypeNestTypeValue(Type):
def _complex_member_type(self, ri):
@@ -909,7 +927,7 @@ class TypeSubMessage(TypeNest):
else:
sel_var = f"{var}->{sel}"
get_lines = [f'if (!{sel_var})',
- f'return ynl_submsg_failed(yarg, "%s", "%s");' %
+ 'return ynl_submsg_failed(yarg, "%s", "%s");' %
(self.name, self['selector']),
f"if ({self.nested_render_name}_parse(&parg, {sel_var}, attr))",
"return YNL_PARSE_CB_ERROR;"]
@@ -1125,7 +1143,7 @@ class AttrSet(SpecAttrSet):
t = TypeNest(self.family, self, elem, value)
elif elem['type'] == 'indexed-array' and 'sub-type' in elem:
if elem["sub-type"] in ['binary', 'nest', 'u32']:
- t = TypeArrayNest(self.family, self, elem, value)
+ t = TypeIndexedArray(self.family, self, elem, value)
else:
raise Exception(f'new_attr: unsupported sub-type {elem["sub-type"]}')
elif elem['type'] == 'nest-type-value':
@@ -1187,7 +1205,7 @@ class SubMessage(SpecSubMessage):
class Family(SpecFamily):
- def __init__(self, file_name, exclude_ops):
+ def __init__(self, file_name, exclude_ops, fn_prefix):
# Added by resolve:
self.c_name = None
delattr(self, "c_name")
@@ -1219,6 +1237,8 @@ class Family(SpecFamily):
else:
self.uapi_header_name = self.ident_name
+ self.fn_prefix = fn_prefix if fn_prefix else f'{self.ident_name}-nl'
+
def resolve(self):
self.resolve_up(super())
@@ -1563,7 +1583,7 @@ class RenderInfo:
if family.is_classic():
self.fixed_hdr_len = f"sizeof(struct {c_lower(fixed_hdr)})"
else:
- raise Exception(f"Per-op fixed header not supported, yet")
+ raise Exception("Per-op fixed header not supported, yet")
# 'do' and 'dump' response parsing is identical
@@ -1879,7 +1899,9 @@ def rdir(direction):
def op_prefix(ri, direction, deref=False):
suffix = f"_{ri.type_name}"
- if not ri.op_mode or ri.op_mode == 'do':
+ if not ri.op_mode:
+ pass
+ elif ri.op_mode == 'do':
suffix += f"{direction_to_suffix[direction]}"
else:
if direction == 'request':
@@ -2032,6 +2054,20 @@ def put_enum_to_str(family, cw, enum):
_put_enum_to_str_helper(cw, enum.render_name, map_name, 'value', enum=enum)
+def put_local_vars(struct):
+ local_vars = []
+ has_array = False
+ has_count = False
+ for _, arg in struct.member_list():
+ has_array |= arg.type == 'indexed-array'
+ has_count |= arg.presence_type() == 'count'
+ if has_array:
+ local_vars.append('struct nlattr *array;')
+ if has_count:
+ local_vars.append('unsigned int i;')
+ return local_vars
+
+
def put_req_nested_prototype(ri, struct, suffix=';'):
func_args = ['struct nlmsghdr *nlh',
'unsigned int attr_type',
@@ -2054,15 +2090,7 @@ def put_req_nested(ri, struct):
init_lines.append(f"hdr = ynl_nlmsg_put_extra_header(nlh, {struct_sz});")
init_lines.append(f"memcpy(hdr, &obj->_hdr, {struct_sz});")
- has_anest = False
- has_count = False
- for _, arg in struct.member_list():
- has_anest |= arg.type == 'indexed-array'
- has_count |= arg.presence_type() == 'count'
- if has_anest:
- local_vars.append('struct nlattr *array;')
- if has_count:
- local_vars.append('unsigned int i;')
+ local_vars += put_local_vars(struct)
put_req_nested_prototype(ri, struct, suffix='')
ri.cw.block_start()
@@ -2097,35 +2125,43 @@ def _multi_parse(ri, struct, init_lines, local_vars):
if ri.family.is_classic():
iter_line = f"ynl_attr_for_each(attr, nlh, sizeof({struct.fixed_header}))"
else:
- raise Exception(f"Per-op fixed header not supported, yet")
+ raise Exception("Per-op fixed header not supported, yet")
- array_nests = set()
+ indexed_arrays = set()
multi_attrs = set()
needs_parg = False
+ var_set = set()
for arg, aspec in struct.member_list():
if aspec['type'] == 'indexed-array' and 'sub-type' in aspec:
if aspec["sub-type"] in {'binary', 'nest'}:
- local_vars.append(f'const struct nlattr *attr_{aspec.c_name};')
- array_nests.add(arg)
+ local_vars.append(f'const struct nlattr *attr_{aspec.c_name} = NULL;')
+ indexed_arrays.add(arg)
elif aspec['sub-type'] in scalars:
- local_vars.append(f'const struct nlattr *attr_{aspec.c_name};')
- array_nests.add(arg)
+ local_vars.append(f'const struct nlattr *attr_{aspec.c_name} = NULL;')
+ indexed_arrays.add(arg)
else:
raise Exception(f'Not supported sub-type {aspec["sub-type"]}')
if 'multi-attr' in aspec:
multi_attrs.add(arg)
needs_parg |= 'nested-attributes' in aspec
needs_parg |= 'sub-message' in aspec
- if array_nests or multi_attrs:
+
+ try:
+ _, _, l_vars = aspec._attr_get(ri, '')
+ var_set |= set(l_vars) if l_vars else set()
+ except Exception:
+ pass # _attr_get() not implemented by simple types, ignore
+ local_vars += list(var_set)
+ if indexed_arrays or multi_attrs:
local_vars.append('int i;')
if needs_parg:
local_vars.append('struct ynl_parse_arg parg;')
init_lines.append('parg.ys = yarg->ys;')
- all_multi = array_nests | multi_attrs
+ all_multi = indexed_arrays | multi_attrs
- for anest in sorted(all_multi):
- local_vars.append(f"unsigned int n_{struct[anest].c_name} = 0;")
+ for arg in sorted(all_multi):
+ local_vars.append(f"unsigned int n_{struct[arg].c_name} = 0;")
ri.cw.block_start()
ri.cw.write_func_lvar(local_vars)
@@ -2145,8 +2181,8 @@ def _multi_parse(ri, struct, init_lines, local_vars):
else:
ri.cw.p('hdr = ynl_nlmsg_data_offset(nlh, sizeof(struct genlmsghdr));')
ri.cw.p(f"memcpy(&dst->_hdr, hdr, sizeof({struct.fixed_header}));")
- for anest in sorted(all_multi):
- aspec = struct[anest]
+ for arg in sorted(all_multi):
+ aspec = struct[arg]
ri.cw.p(f"if (dst->{aspec.c_name})")
ri.cw.p(f'return ynl_error_parse(yarg, "attribute already present ({struct.attr_set.name}.{aspec.name})");')
@@ -2164,8 +2200,8 @@ def _multi_parse(ri, struct, init_lines, local_vars):
ri.cw.block_end()
ri.cw.nl()
- for anest in sorted(array_nests):
- aspec = struct[anest]
+ for arg in sorted(indexed_arrays):
+ aspec = struct[arg]
ri.cw.block_start(line=f"if (n_{aspec.c_name})")
ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
@@ -2190,8 +2226,8 @@ def _multi_parse(ri, struct, init_lines, local_vars):
ri.cw.block_end()
ri.cw.nl()
- for anest in sorted(multi_attrs):
- aspec = struct[anest]
+ for arg in sorted(multi_attrs):
+ aspec = struct[arg]
ri.cw.block_start(line=f"if (n_{aspec.c_name})")
ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
ri.cw.p(f"dst->_count.{aspec.c_name} = n_{aspec.c_name};")
@@ -2346,10 +2382,7 @@ def print_req(ri):
local_vars += ['size_t hdr_len;',
'void *hdr;']
- for _, attr in ri.struct["request"].member_list():
- if attr.presence_type() == 'count':
- local_vars += ['unsigned int i;']
- break
+ local_vars += put_local_vars(ri.struct['request'])
print_prototype(ri, direction, terminate=False)
ri.cw.block_start()
@@ -2416,6 +2449,9 @@ def print_dump(ri):
local_vars += ['size_t hdr_len;',
'void *hdr;']
+ if 'request' in ri.op[ri.op_mode]:
+ local_vars += put_local_vars(ri.struct['request'])
+
ri.cw.write_func_lvar(local_vars)
ri.cw.p('yds.yarg.ys = ys;')
@@ -2470,11 +2506,22 @@ def free_arg_name(direction):
return 'obj'
-def print_alloc_wrapper(ri, direction):
+def print_alloc_wrapper(ri, direction, struct=None):
name = op_prefix(ri, direction)
- ri.cw.write_func_prot(f'static inline struct {name} *', f"{name}_alloc", [f"void"])
+ struct_name = name
+ if ri.type_name_conflict:
+ struct_name += '_'
+
+ args = ["void"]
+ cnt = "1"
+ if struct and struct.in_multi_val:
+ args = ["unsigned int n"]
+ cnt = "n"
+
+ ri.cw.write_func_prot(f'static inline struct {struct_name} *',
+ f"{name}_alloc", args)
ri.cw.block_start()
- ri.cw.p(f'return calloc(1, sizeof(struct {name}));')
+ ri.cw.p(f'return calloc({cnt}, sizeof(struct {struct_name}));')
ri.cw.block_end()
@@ -2489,7 +2536,7 @@ def print_free_prototype(ri, direction, suffix=';'):
def print_nlflags_set(ri, direction):
name = op_prefix(ri, direction)
- ri.cw.write_func_prot(f'static inline void', f"{name}_set_nlflags",
+ ri.cw.write_func_prot('static inline void', f"{name}_set_nlflags",
[f"struct {name} *req", "__u16 nl_flags"])
ri.cw.block_start()
ri.cw.p('req->_nlmsg_flags = nl_flags;')
@@ -2520,7 +2567,7 @@ def _print_type(ri, direction, struct):
line = attr.presence_member(ri.ku_space, type_filter)
if line:
if not meta_started:
- ri.cw.block_start(line=f"struct")
+ ri.cw.block_start(line="struct")
meta_started = True
ri.cw.p(line)
if meta_started:
@@ -2544,6 +2591,19 @@ def print_type(ri, direction):
def print_type_full(ri, struct):
_print_type(ri, "", struct)
+ if struct.request and struct.in_multi_val:
+ print_alloc_wrapper(ri, "", struct)
+ ri.cw.nl()
+ free_rsp_nested_prototype(ri)
+ ri.cw.nl()
+
+ # Name conflicts are too hard to deal with with the current code base,
+ # they are very rare so don't bother printing setters in that case.
+ if ri.ku_space == 'user' and not ri.type_name_conflict:
+ for _, attr in struct.member_list():
+ attr.setter(ri, ri.attr_set, "", var="obj")
+ ri.cw.nl()
+
def print_type_helpers(ri, direction, deref=False):
print_free_prototype(ri, direction)
@@ -2671,7 +2731,7 @@ def print_dump_type_free(ri):
ri.cw.nl()
_free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
- ri.cw.p(f'free(rsp);')
+ ri.cw.p('free(rsp);')
ri.cw.block_end()
ri.cw.block_end()
ri.cw.nl()
@@ -2682,7 +2742,7 @@ def print_ntf_type_free(ri):
ri.cw.block_start()
_free_type_members_iter(ri, ri.struct['reply'])
_free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
- ri.cw.p(f'free(rsp);')
+ ri.cw.p('free(rsp);')
ri.cw.block_end()
ri.cw.nl()
@@ -2777,8 +2837,6 @@ def print_kernel_policy_sparse_enum_validates(family, cw):
cw.p('/* Sparse enums validation callbacks */')
first = False
- sign = '' if attr.type[0] == 'u' else '_signed'
- suffix = 'ULL' if attr.type[0] == 'u' else 'LL'
cw.write_func_prot('static int', f'{c_lower(attr.enum_name)}_validate',
['const struct nlattr *attr', 'struct netlink_ext_ack *extack'])
cw.block_start()
@@ -2855,12 +2913,12 @@ def print_kernel_op_table_fwd(family, cw, terminate):
continue
if 'do' in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-doit")
+ name = c_lower(f"{family.fn_prefix}-{op_name}-doit")
cw.write_func_prot('int', name,
['struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
if 'dump' in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-dumpit")
+ name = c_lower(f"{family.fn_prefix}-{op_name}-dumpit")
cw.write_func_prot('int', name,
['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';')
cw.nl()
@@ -2886,7 +2944,7 @@ def print_kernel_op_table(family, cw):
for x in op['dont-validate']])), )
for op_mode in ['do', 'dump']:
if op_mode in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
+ name = c_lower(f"{family.fn_prefix}-{op_name}-{op_mode}it")
members.append((op_mode + 'it', name))
if family.kernel_policy == 'per-op':
struct = Struct(family, op['attribute-set'],
@@ -2924,7 +2982,7 @@ def print_kernel_op_table(family, cw):
members.append(('validate',
' | '.join([c_upper('genl-dont-validate-' + x)
for x in dont_validate])), )
- name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
+ name = c_lower(f"{family.fn_prefix}-{op_name}-{op_mode}it")
if 'pre' in op[op_mode]:
members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))
members.append((op_mode + 'it', name))
@@ -3185,8 +3243,9 @@ def render_uapi(family, cw):
cw.block_end(line=';')
cw.nl()
elif const['type'] == 'const':
+ name_pfx = const.get('name-prefix', f"{family.ident_name}-")
defines.append([c_upper(family.get('c-define-name',
- f"{family.ident_name}-{const['name']}")),
+ f"{name_pfx}{const['name']}")),
const['value']])
if defines:
@@ -3298,7 +3357,7 @@ def render_user_family(family, cw, prototype):
cw.block_start(f'{symbol} = ')
cw.p(f'.name\t\t= "{family.c_name}",')
if family.is_classic():
- cw.p(f'.is_classic\t= true,')
+ cw.p('.is_classic\t= true,')
cw.p(f'.classic_id\t= {family.get("protonum")},')
if family.is_classic():
if family.fixed_header:
@@ -3345,6 +3404,7 @@ def main():
help='Do not overwrite the output file if the new output is identical to the old')
parser.add_argument('--exclude-op', action='append', default=[])
parser.add_argument('-o', dest='out_file', type=str, default=None)
+ parser.add_argument('--function-prefix', dest='fn_prefix', type=str)
args = parser.parse_args()
if args.header is None:
@@ -3353,7 +3413,7 @@ def main():
exclude_ops = [re.compile(expr) for expr in args.exclude_op]
try:
- parsed = Family(args.spec, exclude_ops)
+ parsed = Family(args.spec, exclude_ops, args.fn_prefix)
if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)':
print('Spec license:', parsed.license)
print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)')
@@ -3373,11 +3433,16 @@ def main():
cw.p("/* Do not edit directly, auto-generated from: */")
cw.p(f"/*\t{spec_kernel} */")
cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
- if args.exclude_op or args.user_header:
+ if args.exclude_op or args.user_header or args.fn_prefix:
line = ''
- line += ' --user-header '.join([''] + args.user_header)
- line += ' --exclude-op '.join([''] + args.exclude_op)
+ if args.user_header:
+ line += ' --user-header '.join([''] + args.user_header)
+ if args.exclude_op:
+ line += ' --exclude-op '.join([''] + args.exclude_op)
+ if args.fn_prefix:
+ line += f' --function-prefix {args.fn_prefix}'
cw.p(f'/* YNL-ARG{line} */')
+ cw.p('/* To regenerate run: tools/net/ynl/ynl-regen.sh */')
cw.nl()
if args.mode == 'uapi':
@@ -3515,9 +3580,6 @@ def main():
for attr_set, struct in parsed.pure_nested_structs.items():
ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
print_type_full(ri, struct)
- if struct.request and struct.in_multi_val:
- free_rsp_nested_prototype(ri)
- cw.nl()
for op_name, op in parsed.ops.items():
cw.p(f"/* ============== {op.enum_name} ============== */")
diff --git a/tools/net/ynl/pyynl/ynl_gen_rst.py b/tools/net/ynl/pyynl/ynl_gen_rst.py
index 0cb6348e28d3..90ae19aac89d 100755
--- a/tools/net/ynl/pyynl/ynl_gen_rst.py
+++ b/tools/net/ynl/pyynl/ynl_gen_rst.py
@@ -10,353 +10,17 @@
This script performs extensive parsing to the Linux kernel's netlink YAML
spec files, in an effort to avoid needing to heavily mark up the original
- YAML file.
-
- This code is split in three big parts:
- 1) RST formatters: Use to convert a string to a RST output
- 2) Parser helpers: Functions to parse the YAML data structure
- 3) Main function and small helpers
+ YAML file. It uses the library code from scripts/lib.
"""
-from typing import Any, Dict, List
import os.path
+import pathlib
import sys
import argparse
import logging
-import yaml
-
-
-SPACE_PER_LEVEL = 4
-
-
-# RST Formatters
-# ==============
-def headroom(level: int) -> str:
- """Return space to format"""
- return " " * (level * SPACE_PER_LEVEL)
-
-
-def bold(text: str) -> str:
- """Format bold text"""
- return f"**{text}**"
-
-
-def inline(text: str) -> str:
- """Format inline text"""
- return f"``{text}``"
-
-
-def sanitize(text: str) -> str:
- """Remove newlines and multiple spaces"""
- # This is useful for some fields that are spread across multiple lines
- return str(text).replace("\n", " ").strip()
-
-
-def rst_fields(key: str, value: str, level: int = 0) -> str:
- """Return a RST formatted field"""
- return headroom(level) + f":{key}: {value}"
-
-
-def rst_definition(key: str, value: Any, level: int = 0) -> str:
- """Format a single rst definition"""
- return headroom(level) + key + "\n" + headroom(level + 1) + str(value)
-
-
-def rst_paragraph(paragraph: str, level: int = 0) -> str:
- """Return a formatted paragraph"""
- return headroom(level) + paragraph
-
-
-def rst_bullet(item: str, level: int = 0) -> str:
- """Return a formatted a bullet"""
- return headroom(level) + f"- {item}"
-
-
-def rst_subsection(title: str) -> str:
- """Add a sub-section to the document"""
- return f"{title}\n" + "-" * len(title)
-
-
-def rst_subsubsection(title: str) -> str:
- """Add a sub-sub-section to the document"""
- return f"{title}\n" + "~" * len(title)
-
-
-def rst_section(namespace: str, prefix: str, title: str) -> str:
- """Add a section to the document"""
- return f".. _{namespace}-{prefix}-{title}:\n\n{title}\n" + "=" * len(title)
-
-
-def rst_subtitle(title: str) -> str:
- """Add a subtitle to the document"""
- return "\n" + "-" * len(title) + f"\n{title}\n" + "-" * len(title) + "\n\n"
-
-
-def rst_title(title: str) -> str:
- """Add a title to the document"""
- return "=" * len(title) + f"\n{title}\n" + "=" * len(title) + "\n\n"
-
-
-def rst_list_inline(list_: List[str], level: int = 0) -> str:
- """Format a list using inlines"""
- return headroom(level) + "[" + ", ".join(inline(i) for i in list_) + "]"
-
-
-def rst_ref(namespace: str, prefix: str, name: str) -> str:
- """Add a hyperlink to the document"""
- mappings = {'enum': 'definition',
- 'fixed-header': 'definition',
- 'nested-attributes': 'attribute-set',
- 'struct': 'definition'}
- if prefix in mappings:
- prefix = mappings[prefix]
- return f":ref:`{namespace}-{prefix}-{name}`"
-
-
-def rst_header() -> str:
- """The headers for all the auto generated RST files"""
- lines = []
-
- lines.append(rst_paragraph(".. SPDX-License-Identifier: GPL-2.0"))
- lines.append(rst_paragraph(".. NOTE: This document was auto-generated.\n\n"))
-
- return "\n".join(lines)
-
-
-def rst_toctree(maxdepth: int = 2) -> str:
- """Generate a toctree RST primitive"""
- lines = []
-
- lines.append(".. toctree::")
- lines.append(f" :maxdepth: {maxdepth}\n\n")
-
- return "\n".join(lines)
-
-
-def rst_label(title: str) -> str:
- """Return a formatted label"""
- return f".. _{title}:\n\n"
-
-
-# Parsers
-# =======
-
-
-def parse_mcast_group(mcast_group: List[Dict[str, Any]]) -> str:
- """Parse 'multicast' group list and return a formatted string"""
- lines = []
- for group in mcast_group:
- lines.append(rst_bullet(group["name"]))
-
- return "\n".join(lines)
-
-
-def parse_do(do_dict: Dict[str, Any], level: int = 0) -> str:
- """Parse 'do' section and return a formatted string"""
- lines = []
- for key in do_dict.keys():
- lines.append(rst_paragraph(bold(key), level + 1))
- if key in ['request', 'reply']:
- lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n")
- else:
- lines.append(headroom(level + 2) + do_dict[key] + "\n")
-
- return "\n".join(lines)
-
-
-def parse_do_attributes(attrs: Dict[str, Any], level: int = 0) -> str:
- """Parse 'attributes' section"""
- if "attributes" not in attrs:
- return ""
- lines = [rst_fields("attributes", rst_list_inline(attrs["attributes"]), level + 1)]
-
- return "\n".join(lines)
-
-
-def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str:
- """Parse operations block"""
- preprocessed = ["name", "doc", "title", "do", "dump", "flags"]
- linkable = ["fixed-header", "attribute-set"]
- lines = []
-
- for operation in operations:
- lines.append(rst_section(namespace, 'operation', operation["name"]))
- lines.append(rst_paragraph(operation["doc"]) + "\n")
-
- for key in operation.keys():
- if key in preprocessed:
- # Skip the special fields
- continue
- value = operation[key]
- if key in linkable:
- value = rst_ref(namespace, key, value)
- lines.append(rst_fields(key, value, 0))
- if 'flags' in operation:
- lines.append(rst_fields('flags', rst_list_inline(operation['flags'])))
-
- if "do" in operation:
- lines.append(rst_paragraph(":do:", 0))
- lines.append(parse_do(operation["do"], 0))
- if "dump" in operation:
- lines.append(rst_paragraph(":dump:", 0))
- lines.append(parse_do(operation["dump"], 0))
-
- # New line after fields
- lines.append("\n")
-
- return "\n".join(lines)
-
-
-def parse_entries(entries: List[Dict[str, Any]], level: int) -> str:
- """Parse a list of entries"""
- ignored = ["pad"]
- lines = []
- for entry in entries:
- if isinstance(entry, dict):
- # entries could be a list or a dictionary
- field_name = entry.get("name", "")
- if field_name in ignored:
- continue
- type_ = entry.get("type")
- if type_:
- field_name += f" ({inline(type_)})"
- lines.append(
- rst_fields(field_name, sanitize(entry.get("doc", "")), level)
- )
- elif isinstance(entry, list):
- lines.append(rst_list_inline(entry, level))
- else:
- lines.append(rst_bullet(inline(sanitize(entry)), level))
-
- lines.append("\n")
- return "\n".join(lines)
-
-
-def parse_definitions(defs: Dict[str, Any], namespace: str) -> str:
- """Parse definitions section"""
- preprocessed = ["name", "entries", "members"]
- ignored = ["render-max"] # This is not printed
- lines = []
-
- for definition in defs:
- lines.append(rst_section(namespace, 'definition', definition["name"]))
- for k in definition.keys():
- if k in preprocessed + ignored:
- continue
- lines.append(rst_fields(k, sanitize(definition[k]), 0))
-
- # Field list needs to finish with a new line
- lines.append("\n")
- if "entries" in definition:
- lines.append(rst_paragraph(":entries:", 0))
- lines.append(parse_entries(definition["entries"], 1))
- if "members" in definition:
- lines.append(rst_paragraph(":members:", 0))
- lines.append(parse_entries(definition["members"], 1))
-
- return "\n".join(lines)
-
-
-def parse_attr_sets(entries: List[Dict[str, Any]], namespace: str) -> str:
- """Parse attribute from attribute-set"""
- preprocessed = ["name", "type"]
- linkable = ["enum", "nested-attributes", "struct", "sub-message"]
- ignored = ["checks"]
- lines = []
-
- for entry in entries:
- lines.append(rst_section(namespace, 'attribute-set', entry["name"]))
- for attr in entry["attributes"]:
- type_ = attr.get("type")
- attr_line = attr["name"]
- if type_:
- # Add the attribute type in the same line
- attr_line += f" ({inline(type_)})"
-
- lines.append(rst_subsubsection(attr_line))
-
- for k in attr.keys():
- if k in preprocessed + ignored:
- continue
- if k in linkable:
- value = rst_ref(namespace, k, attr[k])
- else:
- value = sanitize(attr[k])
- lines.append(rst_fields(k, value, 0))
- lines.append("\n")
-
- return "\n".join(lines)
-
-
-def parse_sub_messages(entries: List[Dict[str, Any]], namespace: str) -> str:
- """Parse sub-message definitions"""
- lines = []
-
- for entry in entries:
- lines.append(rst_section(namespace, 'sub-message', entry["name"]))
- for fmt in entry["formats"]:
- value = fmt["value"]
-
- lines.append(rst_bullet(bold(value)))
- for attr in ['fixed-header', 'attribute-set']:
- if attr in fmt:
- lines.append(rst_fields(attr,
- rst_ref(namespace, attr, fmt[attr]),
- 1))
- lines.append("\n")
-
- return "\n".join(lines)
-
-
-def parse_yaml(obj: Dict[str, Any]) -> str:
- """Format the whole YAML into a RST string"""
- lines = []
-
- # Main header
-
- lines.append(rst_header())
-
- family = obj['name']
-
- title = f"Family ``{family}`` netlink specification"
- lines.append(rst_title(title))
- lines.append(rst_paragraph(".. contents:: :depth: 3\n"))
-
- if "doc" in obj:
- lines.append(rst_subtitle("Summary"))
- lines.append(rst_paragraph(obj["doc"], 0))
-
- # Operations
- if "operations" in obj:
- lines.append(rst_subtitle("Operations"))
- lines.append(parse_operations(obj["operations"]["list"], family))
-
- # Multicast groups
- if "mcast-groups" in obj:
- lines.append(rst_subtitle("Multicast groups"))
- lines.append(parse_mcast_group(obj["mcast-groups"]["list"]))
-
- # Definitions
- if "definitions" in obj:
- lines.append(rst_subtitle("Definitions"))
- lines.append(parse_definitions(obj["definitions"], family))
-
- # Attributes set
- if "attribute-sets" in obj:
- lines.append(rst_subtitle("Attribute sets"))
- lines.append(parse_attr_sets(obj["attribute-sets"], family))
-
- # Sub-messages
- if "sub-messages" in obj:
- lines.append(rst_subtitle("Sub-messages"))
- lines.append(parse_sub_messages(obj["sub-messages"], family))
-
- return "\n".join(lines)
-
-
-# Main functions
-# ==============
+sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
+from lib import YnlDocGenerator # pylint: disable=C0413
def parse_arguments() -> argparse.Namespace:
"""Parse arguments from user"""
@@ -367,9 +31,6 @@ def parse_arguments() -> argparse.Namespace:
# Index and input are mutually exclusive
group = parser.add_mutually_exclusive_group()
- group.add_argument(
- "-x", "--index", action="store_true", help="Generate the index page"
- )
group.add_argument("-i", "--input", help="YAML file name")
args = parser.parse_args()
@@ -391,15 +52,6 @@ def parse_arguments() -> argparse.Namespace:
return args
-def parse_yaml_file(filename: str) -> str:
- """Transform the YAML specified by filename into an RST-formatted string"""
- with open(filename, "r", encoding="utf-8") as spec_file:
- yaml_data = yaml.safe_load(spec_file)
- content = parse_yaml(yaml_data)
-
- return content
-
-
def write_to_rstfile(content: str, filename: str) -> None:
"""Write the generated content into an RST file"""
logging.debug("Saving RST file to %s", filename)
@@ -408,35 +60,17 @@ def write_to_rstfile(content: str, filename: str) -> None:
rst_file.write(content)
-def generate_main_index_rst(output: str) -> None:
- """Generate the `networking_spec/index` content and write to the file"""
- lines = []
-
- lines.append(rst_header())
- lines.append(rst_label("specs"))
- lines.append(rst_title("Netlink Family Specifications"))
- lines.append(rst_toctree(1))
-
- index_dir = os.path.dirname(output)
- logging.debug("Looking for .rst files in %s", index_dir)
- for filename in sorted(os.listdir(index_dir)):
- if not filename.endswith(".rst") or filename == "index.rst":
- continue
- lines.append(f" {filename.replace('.rst', '')}\n")
-
- logging.debug("Writing an index file at %s", output)
- write_to_rstfile("".join(lines), output)
-
-
def main() -> None:
"""Main function that reads the YAML files and generates the RST files"""
args = parse_arguments()
+ parser = YnlDocGenerator()
+
if args.input:
logging.debug("Parsing %s", args.input)
try:
- content = parse_yaml_file(os.path.join(args.input))
+ content = parser.parse_yaml_file(os.path.join(args.input))
except Exception as exception:
logging.warning("Failed to parse %s.", args.input)
logging.warning(exception)
@@ -444,10 +78,6 @@ def main() -> None:
write_to_rstfile(content, args.output)
- if args.index:
- # Generate the index RST file
- generate_main_index_rst(args.output)
-
if __name__ == "__main__":
main()
diff --git a/tools/net/ynl/samples/.gitignore b/tools/net/ynl/samples/.gitignore
index 7f5fca7682d7..05087ee323ba 100644
--- a/tools/net/ynl/samples/.gitignore
+++ b/tools/net/ynl/samples/.gitignore
@@ -7,3 +7,4 @@ rt-addr
rt-link
rt-route
tc
+tc-filter-add
diff --git a/tools/net/ynl/samples/Makefile b/tools/net/ynl/samples/Makefile
index c9494a564da4..d76cbd41cbb1 100644
--- a/tools/net/ynl/samples/Makefile
+++ b/tools/net/ynl/samples/Makefile
@@ -19,6 +19,7 @@ include $(wildcard *.d)
all: $(BINS)
CFLAGS_page-pool=$(CFLAGS_netdev)
+CFLAGS_tc-filter-add:=$(CFLAGS_tc)
$(BINS): ../lib/ynl.a ../generated/protos.a $(SRCS)
@echo -e '\tCC sample $@'
diff --git a/tools/net/ynl/samples/page-pool.c b/tools/net/ynl/samples/page-pool.c
deleted file mode 100644
index e5d521320fbf..000000000000
--- a/tools/net/ynl/samples/page-pool.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define _GNU_SOURCE
-
-#include <stdio.h>
-#include <string.h>
-
-#include <ynl.h>
-
-#include <net/if.h>
-
-#include "netdev-user.h"
-
-struct stat {
- unsigned int ifc;
-
- struct {
- unsigned int cnt;
- size_t refs, bytes;
- } live[2];
-
- size_t alloc_slow, alloc_fast, recycle_ring, recycle_cache;
-};
-
-struct stats_array {
- unsigned int i, max;
- struct stat *s;
-};
-
-static struct stat *find_ifc(struct stats_array *a, unsigned int ifindex)
-{
- unsigned int i;
-
- for (i = 0; i < a->i; i++) {
- if (a->s[i].ifc == ifindex)
- return &a->s[i];
- }
-
- a->i++;
- if (a->i == a->max) {
- a->max *= 2;
- a->s = reallocarray(a->s, a->max, sizeof(*a->s));
- }
- a->s[i].ifc = ifindex;
- return &a->s[i];
-}
-
-static void count(struct stat *s, unsigned int l,
- struct netdev_page_pool_get_rsp *pp)
-{
- s->live[l].cnt++;
- if (pp->_present.inflight)
- s->live[l].refs += pp->inflight;
- if (pp->_present.inflight_mem)
- s->live[l].bytes += pp->inflight_mem;
-}
-
-int main(int argc, char **argv)
-{
- struct netdev_page_pool_stats_get_list *pp_stats;
- struct netdev_page_pool_get_list *pools;
- struct stats_array a = {};
- struct ynl_error yerr;
- struct ynl_sock *ys;
-
- ys = ynl_sock_create(&ynl_netdev_family, &yerr);
- if (!ys) {
- fprintf(stderr, "YNL: %s\n", yerr.msg);
- return 1;
- }
-
- a.max = 128;
- a.s = calloc(a.max, sizeof(*a.s));
- if (!a.s)
- goto err_close;
-
- pools = netdev_page_pool_get_dump(ys);
- if (!pools)
- goto err_free;
-
- ynl_dump_foreach(pools, pp) {
- struct stat *s = find_ifc(&a, pp->ifindex);
-
- count(s, 1, pp);
- if (pp->_present.detach_time)
- count(s, 0, pp);
- }
- netdev_page_pool_get_list_free(pools);
-
- pp_stats = netdev_page_pool_stats_get_dump(ys);
- if (!pp_stats)
- goto err_free;
-
- ynl_dump_foreach(pp_stats, pp) {
- struct stat *s = find_ifc(&a, pp->info.ifindex);
-
- if (pp->_present.alloc_fast)
- s->alloc_fast += pp->alloc_fast;
- if (pp->_present.alloc_refill)
- s->alloc_fast += pp->alloc_refill;
- if (pp->_present.alloc_slow)
- s->alloc_slow += pp->alloc_slow;
- if (pp->_present.recycle_ring)
- s->recycle_ring += pp->recycle_ring;
- if (pp->_present.recycle_cached)
- s->recycle_cache += pp->recycle_cached;
- }
- netdev_page_pool_stats_get_list_free(pp_stats);
-
- for (unsigned int i = 0; i < a.i; i++) {
- char ifname[IF_NAMESIZE];
- struct stat *s = &a.s[i];
- const char *name;
- double recycle;
-
- if (!s->ifc) {
- name = "<orphan>\t";
- } else {
- name = if_indextoname(s->ifc, ifname);
- if (name)
- printf("%8s", name);
- printf("[%u]\t", s->ifc);
- }
-
- printf("page pools: %u (zombies: %u)\n",
- s->live[1].cnt, s->live[0].cnt);
- printf("\t\trefs: %zu bytes: %zu (refs: %zu bytes: %zu)\n",
- s->live[1].refs, s->live[1].bytes,
- s->live[0].refs, s->live[0].bytes);
-
- /* We don't know how many pages are sitting in cache and ring
- * so we will under-count the recycling rate a bit.
- */
- recycle = (double)(s->recycle_ring + s->recycle_cache) /
- (s->alloc_fast + s->alloc_slow) * 100;
- printf("\t\trecycling: %.1lf%% (alloc: %zu:%zu recycle: %zu:%zu)\n",
- recycle, s->alloc_slow, s->alloc_fast,
- s->recycle_ring, s->recycle_cache);
- }
-
- ynl_sock_destroy(ys);
- return 0;
-
-err_free:
- free(a.s);
-err_close:
- fprintf(stderr, "YNL: %s\n", ys->err.msg);
- ynl_sock_destroy(ys);
- return 2;
-}
diff --git a/tools/net/ynl/samples/tc-filter-add.c b/tools/net/ynl/samples/tc-filter-add.c
new file mode 100644
index 000000000000..97871e9e9edc
--- /dev/null
+++ b/tools/net/ynl/samples/tc-filter-add.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <linux/pkt_sched.h>
+#include <linux/tc_act/tc_vlan.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/if_ether.h>
+#include <net/if.h>
+
+#include <ynl.h>
+
+#include "tc-user.h"
+
+#define TC_HANDLE (0xFFFF << 16)
+
+const char *vlan_act_name(struct tc_vlan *p)
+{
+ switch (p->v_action) {
+ case TCA_VLAN_ACT_POP:
+ return "pop";
+ case TCA_VLAN_ACT_PUSH:
+ return "push";
+ case TCA_VLAN_ACT_MODIFY:
+ return "modify";
+ default:
+ break;
+ }
+
+ return "not supported";
+}
+
+const char *gact_act_name(struct tc_gact *p)
+{
+ switch (p->action) {
+ case TC_ACT_SHOT:
+ return "drop";
+ case TC_ACT_OK:
+ return "ok";
+ case TC_ACT_PIPE:
+ return "pipe";
+ default:
+ break;
+ }
+
+ return "not supported";
+}
+
+static void print_vlan(struct tc_act_vlan_attrs *vlan)
+{
+ printf("%s ", vlan_act_name(vlan->parms));
+ if (vlan->_present.push_vlan_id)
+ printf("id %u ", vlan->push_vlan_id);
+ if (vlan->_present.push_vlan_protocol)
+ printf("protocol %#x ", ntohs(vlan->push_vlan_protocol));
+ if (vlan->_present.push_vlan_priority)
+ printf("priority %u ", vlan->push_vlan_priority);
+}
+
+static void print_gact(struct tc_act_gact_attrs *gact)
+{
+ struct tc_gact *p = gact->parms;
+
+ printf("%s ", gact_act_name(p));
+}
+
+static void flower_print(struct tc_flower_attrs *flower, const char *kind)
+{
+ struct tc_act_attrs *a;
+ unsigned int i;
+
+ printf("%s:\n", kind);
+
+ if (flower->_present.key_vlan_id)
+ printf(" vlan_id: %u\n", flower->key_vlan_id);
+ if (flower->_present.key_vlan_prio)
+ printf(" vlan_prio: %u\n", flower->key_vlan_prio);
+ if (flower->_present.key_num_of_vlans)
+ printf(" num_of_vlans: %u\n", flower->key_num_of_vlans);
+
+ for (i = 0; i < flower->_count.act; i++) {
+ a = &flower->act[i];
+ printf("action order: %i %s ", i + 1, a->kind);
+ if (a->options._present.vlan)
+ print_vlan(&a->options.vlan);
+ else if (a->options._present.gact)
+ print_gact(&a->options.gact);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+static void tc_filter_print(struct tc_gettfilter_rsp *f)
+{
+ struct tc_options_msg *opt = &f->options;
+
+ if (opt->_present.flower)
+ flower_print(&opt->flower, f->kind);
+ else if (f->_len.kind)
+ printf("%s pref %u proto: %#x\n", f->kind,
+ (f->_hdr.tcm_info >> 16),
+ ntohs(TC_H_MIN(f->_hdr.tcm_info)));
+}
+
+static int tc_filter_add(struct ynl_sock *ys, int ifi)
+{
+ struct tc_newtfilter_req *req;
+ struct tc_act_attrs *acts;
+ struct tc_vlan p = {
+ .action = TC_ACT_PIPE,
+ .v_action = TCA_VLAN_ACT_PUSH
+ };
+ __u16 flags = NLM_F_REQUEST | NLM_F_EXCL | NLM_F_CREATE;
+ int ret;
+
+ req = tc_newtfilter_req_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_newtfilter_req_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ acts = tc_act_attrs_alloc(3);
+ if (!acts) {
+ fprintf(stderr, "tc_act_attrs_alloc\n");
+ tc_newtfilter_req_free(req);
+ return -1;
+ }
+ memset(acts, 0, sizeof(*acts) * 3);
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ req->_hdr.tcm_info = TC_H_MAKE(1 << 16, htons(ETH_P_8021Q));
+ req->chain = 0;
+
+ tc_newtfilter_req_set_nlflags(req, flags);
+ tc_newtfilter_req_set_kind(req, "flower");
+ tc_newtfilter_req_set_options_flower_key_vlan_id(req, 100);
+ tc_newtfilter_req_set_options_flower_key_vlan_prio(req, 5);
+ tc_newtfilter_req_set_options_flower_key_num_of_vlans(req, 3);
+
+ __tc_newtfilter_req_set_options_flower_act(req, acts, 3);
+
+ /* Skip action at index 0 because in TC, the action array
+ * index starts at 1, with each index defining the action's
+ * order. In contrast, in YNL indexed arrays start at index 0.
+ */
+ tc_act_attrs_set_kind(&acts[1], "vlan");
+ tc_act_attrs_set_options_vlan_parms(&acts[1], &p, sizeof(p));
+ tc_act_attrs_set_options_vlan_push_vlan_id(&acts[1], 200);
+ tc_act_attrs_set_kind(&acts[2], "vlan");
+ tc_act_attrs_set_options_vlan_parms(&acts[2], &p, sizeof(p));
+ tc_act_attrs_set_options_vlan_push_vlan_id(&acts[2], 300);
+
+ tc_newtfilter_req_set_options_flower_flags(req, 0);
+ tc_newtfilter_req_set_options_flower_key_eth_type(req, htons(0x8100));
+
+ ret = tc_newtfilter(ys, req);
+ if (ret)
+ fprintf(stderr, "tc_newtfilter: %s\n", ys->err.msg);
+
+ tc_newtfilter_req_free(req);
+
+ return ret;
+}
+
+static int tc_filter_show(struct ynl_sock *ys, int ifi)
+{
+ struct tc_gettfilter_req_dump *req;
+ struct tc_gettfilter_list *rsp;
+
+ req = tc_gettfilter_req_dump_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_gettfilter_req_dump_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ req->_present.chain = 1;
+ req->chain = 0;
+
+ rsp = tc_gettfilter_dump(ys, req);
+ tc_gettfilter_req_dump_free(req);
+ if (!rsp) {
+ fprintf(stderr, "YNL: %s\n", ys->err.msg);
+ return -1;
+ }
+
+ if (ynl_dump_empty(rsp))
+ fprintf(stderr, "Error: no filters reported\n");
+ else
+ ynl_dump_foreach(rsp, flt) tc_filter_print(flt);
+
+ tc_gettfilter_list_free(rsp);
+
+ return 0;
+}
+
+static int tc_filter_del(struct ynl_sock *ys, int ifi)
+{
+ struct tc_deltfilter_req *req;
+ __u16 flags = NLM_F_REQUEST;
+ int ret;
+
+ req = tc_deltfilter_req_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_deltfilter_req_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ req->_hdr.tcm_info = TC_H_MAKE(1 << 16, htons(ETH_P_8021Q));
+ tc_deltfilter_req_set_nlflags(req, flags);
+
+ ret = tc_deltfilter(ys, req);
+ if (ret)
+ fprintf(stderr, "tc_deltfilter failed: %s\n", ys->err.msg);
+
+ tc_deltfilter_req_free(req);
+
+ return ret;
+}
+
+static int tc_clsact_add(struct ynl_sock *ys, int ifi)
+{
+ struct tc_newqdisc_req *req;
+ __u16 flags = NLM_F_REQUEST | NLM_F_EXCL | NLM_F_CREATE;
+ int ret;
+
+ req = tc_newqdisc_req_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_newqdisc_req_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_CLSACT;
+ req->_hdr.tcm_handle = TC_HANDLE;
+ tc_newqdisc_req_set_nlflags(req, flags);
+ tc_newqdisc_req_set_kind(req, "clsact");
+
+ ret = tc_newqdisc(ys, req);
+ if (ret)
+ fprintf(stderr, "tc_newqdisc failed: %s\n", ys->err.msg);
+
+ tc_newqdisc_req_free(req);
+
+ return ret;
+}
+
+static int tc_clsact_del(struct ynl_sock *ys, int ifi)
+{
+ struct tc_delqdisc_req *req;
+ __u16 flags = NLM_F_REQUEST;
+ int ret;
+
+ req = tc_delqdisc_req_alloc();
+ if (!req) {
+ fprintf(stderr, "tc_delqdisc_req_alloc failed\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->_hdr.tcm_ifindex = ifi;
+ req->_hdr.tcm_parent = TC_H_CLSACT;
+ req->_hdr.tcm_handle = TC_HANDLE;
+ tc_delqdisc_req_set_nlflags(req, flags);
+
+ ret = tc_delqdisc(ys, req);
+ if (ret)
+ fprintf(stderr, "tc_delqdisc failed: %s\n", ys->err.msg);
+
+ tc_delqdisc_req_free(req);
+
+ return ret;
+}
+
+static int tc_filter_config(struct ynl_sock *ys, int ifi)
+{
+ int ret = 0;
+
+ if (tc_filter_add(ys, ifi))
+ return -1;
+
+ ret = tc_filter_show(ys, ifi);
+
+ if (tc_filter_del(ys, ifi))
+ return -1;
+
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int ifi, ret = 0;
+
+ if (argc < 2) {
+ fprintf(stderr, "Usage: %s <interface_name>\n", argv[0]);
+ return 1;
+ }
+ ifi = if_nametoindex(argv[1]);
+ if (!ifi) {
+ perror("if_nametoindex");
+ return 1;
+ }
+
+ ys = ynl_sock_create(&ynl_tc_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return 1;
+ }
+
+ if (tc_clsact_add(ys, ifi)) {
+ ret = 2;
+ goto err_destroy;
+ }
+
+ if (tc_filter_config(ys, ifi))
+ ret = 3;
+
+ if (tc_clsact_del(ys, ifi))
+ ret = 4;
+
+err_destroy:
+ ynl_sock_destroy(ys);
+ return ret;
+}
diff --git a/tools/net/ynl/tests/Makefile b/tools/net/ynl/tests/Makefile
new file mode 100644
index 000000000000..c1df2e001255
--- /dev/null
+++ b/tools/net/ynl/tests/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for YNL tests
+
+TESTS := \
+ test_ynl_cli.sh \
+ test_ynl_ethtool.sh \
+# end of TESTS
+
+all: $(TESTS)
+
+run_tests:
+ @for test in $(TESTS); do \
+ ./$$test; \
+ done
+
+install: $(TESTS)
+ @mkdir -p $(DESTDIR)/usr/bin
+ @mkdir -p $(DESTDIR)/usr/share/kselftest
+ @cp ../../../testing/selftests/kselftest/ktap_helpers.sh $(DESTDIR)/usr/share/kselftest/
+ @for test in $(TESTS); do \
+ name=$$(basename $$test .sh); \
+ sed -e 's|^ynl=.*|ynl="ynl"|' \
+ -e 's|^ynl_ethtool=.*|ynl_ethtool="ynl-ethtool"|' \
+ -e 's|KSELFTEST_KTAP_HELPERS=.*|KSELFTEST_KTAP_HELPERS="/usr/share/kselftest/ktap_helpers.sh"|' \
+ $$test > $(DESTDIR)/usr/bin/$$name; \
+ chmod +x $(DESTDIR)/usr/bin/$$name; \
+ done
+
+clean distclean:
+ @# Nothing to clean
+
+.PHONY: all install clean run_tests
diff --git a/tools/net/ynl/tests/config b/tools/net/ynl/tests/config
new file mode 100644
index 000000000000..339f1309c03f
--- /dev/null
+++ b/tools/net/ynl/tests/config
@@ -0,0 +1,6 @@
+CONFIG_DUMMY=m
+CONFIG_INET_DIAG=y
+CONFIG_IPV6=y
+CONFIG_NET_NS=y
+CONFIG_NETDEVSIM=m
+CONFIG_VETH=m
diff --git a/tools/net/ynl/tests/test_ynl_cli.sh b/tools/net/ynl/tests/test_ynl_cli.sh
new file mode 100755
index 000000000000..7c0722a08117
--- /dev/null
+++ b/tools/net/ynl/tests/test_ynl_cli.sh
@@ -0,0 +1,327 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Test YNL CLI functionality
+
+# Load KTAP test helpers
+KSELFTEST_KTAP_HELPERS="$(dirname "$(realpath "$0")")/../../../testing/selftests/kselftest/ktap_helpers.sh"
+# shellcheck source=../../../testing/selftests/kselftest/ktap_helpers.sh
+source "$KSELFTEST_KTAP_HELPERS"
+
+# Default ynl path for direct execution, can be overridden by make install
+ynl="../pyynl/cli.py"
+
+readonly NSIM_ID="1338"
+readonly NSIM_DEV_NAME="nsim${NSIM_ID}"
+readonly VETH_A="veth_a"
+readonly VETH_B="veth_b"
+
+testns="ynl-$(mktemp -u XXXXXX)"
+TESTS_NO=0
+
+# Test listing available families
+cli_list_families()
+{
+ if $ynl --list-families &>/dev/null; then
+ ktap_test_pass "YNL CLI list families"
+ else
+ ktap_test_fail "YNL CLI list families"
+ fi
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test netdev family operations (dev-get, queue-get)
+cli_netdev_ops()
+{
+ local dev_output
+ local ifindex
+
+ ifindex=$(ip netns exec "$testns" cat /sys/class/net/"$NSIM_DEV_NAME"/ifindex 2>/dev/null)
+
+ dev_output=$(ip netns exec "$testns" $ynl --family netdev \
+ --do dev-get --json "{\"ifindex\": $ifindex}" 2>/dev/null)
+
+ if ! echo "$dev_output" | grep -q "ifindex"; then
+ ktap_test_fail "YNL CLI netdev operations (netdev dev-get output missing ifindex)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl --family netdev \
+ --dump queue-get --json "{\"ifindex\": $ifindex}" &>/dev/null; then
+ ktap_test_fail "YNL CLI netdev operations (failed to get netdev queue info)"
+ return
+ fi
+
+ ktap_test_pass "YNL CLI netdev operations"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test ethtool family operations (rings-get, linkinfo-get)
+cli_ethtool_ops()
+{
+ local rings_output
+ local linkinfo_output
+
+ rings_output=$(ip netns exec "$testns" $ynl --family ethtool \
+ --do rings-get --json "{\"header\": {\"dev-name\": \"$NSIM_DEV_NAME\"}}" 2>/dev/null)
+
+ if ! echo "$rings_output" | grep -q "header"; then
+ ktap_test_fail "YNL CLI ethtool operations (ethtool rings-get output missing header)"
+ return
+ fi
+
+ linkinfo_output=$(ip netns exec "$testns" $ynl --family ethtool \
+ --do linkinfo-get --json "{\"header\": {\"dev-name\": \"$VETH_A\"}}" 2>/dev/null)
+
+ if ! echo "$linkinfo_output" | grep -q "header"; then
+ ktap_test_fail "YNL CLI ethtool operations (ethtool linkinfo-get output missing header)"
+ return
+ fi
+
+ ktap_test_pass "YNL CLI ethtool operations"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-route family operations
+cli_rt_route_ops()
+{
+ local ifindex
+
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-route"; then
+ ktap_test_skip "YNL CLI rt-route operations (rt-route family not available)"
+ return
+ fi
+
+ ifindex=$(ip netns exec "$testns" cat /sys/class/net/"$NSIM_DEV_NAME"/ifindex 2>/dev/null)
+
+ # Add route: 192.0.2.0/24 dev $dev scope link
+ if ! ip netns exec "$testns" $ynl --family rt-route --do newroute --create \
+ --json "{\"dst\": \"192.0.2.0\", \"oif\": $ifindex, \"rtm-dst-len\": 24, \"rtm-family\": 2, \"rtm-scope\": 253, \"rtm-type\": 1, \"rtm-protocol\": 3, \"rtm-table\": 254}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-route operations (failed to add route)"
+ return
+ fi
+
+ local route_output
+ route_output=$(ip netns exec "$testns" $ynl --family rt-route --dump getroute 2>/dev/null)
+ if echo "$route_output" | grep -q "192.0.2.0"; then
+ ktap_test_pass "YNL CLI rt-route operations"
+ else
+ ktap_test_fail "YNL CLI rt-route operations (failed to verify route)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-route --do delroute \
+ --json "{\"dst\": \"192.0.2.0\", \"oif\": $ifindex, \"rtm-dst-len\": 24, \"rtm-family\": 2, \"rtm-scope\": 253, \"rtm-type\": 1, \"rtm-protocol\": 3, \"rtm-table\": 254}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-addr family operations
+cli_rt_addr_ops()
+{
+ local ifindex
+
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-addr"; then
+ ktap_test_skip "YNL CLI rt-addr operations (rt-addr family not available)"
+ return
+ fi
+
+ ifindex=$(ip netns exec "$testns" cat /sys/class/net/"$NSIM_DEV_NAME"/ifindex 2>/dev/null)
+
+ if ! ip netns exec "$testns" $ynl --family rt-addr --do newaddr \
+ --json "{\"ifa-index\": $ifindex, \"local\": \"192.0.2.100\", \"ifa-prefixlen\": 24, \"ifa-family\": 2}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-addr operations (failed to add address)"
+ return
+ fi
+
+ local addr_output
+ addr_output=$(ip netns exec "$testns" $ynl --family rt-addr --dump getaddr 2>/dev/null)
+ if echo "$addr_output" | grep -q "192.0.2.100"; then
+ ktap_test_pass "YNL CLI rt-addr operations"
+ else
+ ktap_test_fail "YNL CLI rt-addr operations (failed to verify address)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-addr --do deladdr \
+ --json "{\"ifa-index\": $ifindex, \"local\": \"192.0.2.100\", \"ifa-prefixlen\": 24, \"ifa-family\": 2}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-link family operations
+cli_rt_link_ops()
+{
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-link"; then
+ ktap_test_skip "YNL CLI rt-link operations (rt-link family not available)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl --family rt-link --do newlink --create \
+ --json "{\"ifname\": \"dummy0\", \"linkinfo\": {\"kind\": \"dummy\"}}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-link operations (failed to add link)"
+ return
+ fi
+
+ local link_output
+ link_output=$(ip netns exec "$testns" $ynl --family rt-link --dump getlink 2>/dev/null)
+ if echo "$link_output" | grep -q "$NSIM_DEV_NAME" && echo "$link_output" | grep -q "dummy0"; then
+ ktap_test_pass "YNL CLI rt-link operations"
+ else
+ ktap_test_fail "YNL CLI rt-link operations (failed to verify link)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-link --do dellink \
+ --json "{\"ifname\": \"dummy0\"}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-neigh family operations
+cli_rt_neigh_ops()
+{
+ local ifindex
+
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-neigh"; then
+ ktap_test_skip "YNL CLI rt-neigh operations (rt-neigh family not available)"
+ return
+ fi
+
+ ifindex=$(ip netns exec "$testns" cat /sys/class/net/"$NSIM_DEV_NAME"/ifindex 2>/dev/null)
+
+ # Add neighbor: 192.0.2.1 dev nsim1338 lladdr 11:22:33:44:55:66 PERMANENT
+ if ! ip netns exec "$testns" $ynl --family rt-neigh --do newneigh --create \
+ --json "{\"ndm-ifindex\": $ifindex, \"dst\": \"192.0.2.1\", \"lladdr\": \"11:22:33:44:55:66\", \"ndm-family\": 2, \"ndm-state\": 128}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-neigh operations (failed to add neighbor)"
+ fi
+
+ local neigh_output
+ neigh_output=$(ip netns exec "$testns" $ynl --family rt-neigh --dump getneigh 2>/dev/null)
+ if echo "$neigh_output" | grep -q "192.0.2.1"; then
+ ktap_test_pass "YNL CLI rt-neigh operations"
+ else
+ ktap_test_fail "YNL CLI rt-neigh operations (failed to verify neighbor)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-neigh --do delneigh \
+ --json "{\"ndm-ifindex\": $ifindex, \"dst\": \"192.0.2.1\", \"lladdr\": \"11:22:33:44:55:66\", \"ndm-family\": 2}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test rt-rule family operations
+cli_rt_rule_ops()
+{
+ if ! $ynl --list-families 2>/dev/null | grep -q "rt-rule"; then
+ ktap_test_skip "YNL CLI rt-rule operations (rt-rule family not available)"
+ return
+ fi
+
+ # Add rule: from 192.0.2.0/24 lookup 100 none
+ if ! ip netns exec "$testns" $ynl --family rt-rule --do newrule \
+ --json "{\"family\": 2, \"src-len\": 24, \"src\": \"192.0.2.0\", \"table\": 100}" &>/dev/null; then
+ ktap_test_fail "YNL CLI rt-rule operations (failed to add rule)"
+ return
+ fi
+
+ local rule_output
+ rule_output=$(ip netns exec "$testns" $ynl --family rt-rule --dump getrule 2>/dev/null)
+ if echo "$rule_output" | grep -q "192.0.2.0"; then
+ ktap_test_pass "YNL CLI rt-rule operations"
+ else
+ ktap_test_fail "YNL CLI rt-rule operations (failed to verify rule)"
+ fi
+
+ ip netns exec "$testns" $ynl --family rt-rule --do delrule \
+ --json "{\"family\": 2, \"src-len\": 24, \"src\": \"192.0.2.0\", \"table\": 100}" &>/dev/null
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+# Test nlctrl family operations
+cli_nlctrl_ops()
+{
+ local family_output
+
+ if ! family_output=$($ynl --family nlctrl \
+ --do getfamily --json "{\"family-name\": \"netdev\"}" 2>/dev/null); then
+ ktap_test_fail "YNL CLI nlctrl getfamily (failed to get nlctrl family info)"
+ return
+ fi
+
+ if ! echo "$family_output" | grep -q "family-name"; then
+ ktap_test_fail "YNL CLI nlctrl getfamily (nlctrl getfamily output missing family-name)"
+ return
+ fi
+
+ if ! echo "$family_output" | grep -q "family-id"; then
+ ktap_test_fail "YNL CLI nlctrl getfamily (nlctrl getfamily output missing family-id)"
+ return
+ fi
+
+ ktap_test_pass "YNL CLI nlctrl getfamily"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+setup()
+{
+ modprobe netdevsim &> /dev/null
+ if ! [ -f /sys/bus/netdevsim/new_device ]; then
+ ktap_skip_all "netdevsim module not available"
+ exit "$KSFT_SKIP"
+ fi
+
+ if ! ip netns add "$testns" 2>/dev/null; then
+ ktap_skip_all "failed to create test namespace"
+ exit "$KSFT_SKIP"
+ fi
+
+ echo "$NSIM_ID 1" | ip netns exec "$testns" tee /sys/bus/netdevsim/new_device >/dev/null 2>&1 || {
+ ktap_skip_all "failed to create netdevsim device"
+ exit "$KSFT_SKIP"
+ }
+
+ local dev
+ dev=$(ip netns exec "$testns" ls /sys/bus/netdevsim/devices/netdevsim$NSIM_ID/net 2>/dev/null | head -1)
+ if [[ -z "$dev" ]]; then
+ ktap_skip_all "failed to find netdevsim device"
+ exit "$KSFT_SKIP"
+ fi
+
+ ip -netns "$testns" link set dev "$dev" name "$NSIM_DEV_NAME" 2>/dev/null || {
+ ktap_skip_all "failed to rename netdevsim device"
+ exit "$KSFT_SKIP"
+ }
+
+ ip -netns "$testns" link set dev "$NSIM_DEV_NAME" up 2>/dev/null
+
+ if ! ip -n "$testns" link add "$VETH_A" type veth peer name "$VETH_B" 2>/dev/null; then
+ ktap_skip_all "failed to create veth pair"
+ exit "$KSFT_SKIP"
+ fi
+
+ ip -n "$testns" link set "$VETH_A" up 2>/dev/null
+ ip -n "$testns" link set "$VETH_B" up 2>/dev/null
+}
+
+cleanup()
+{
+ ip netns exec "$testns" bash -c "echo $NSIM_ID > /sys/bus/netdevsim/del_device" 2>/dev/null || true
+ ip netns del "$testns" 2>/dev/null || true
+}
+
+# Check if ynl command is available
+if ! command -v $ynl &>/dev/null && [[ ! -x $ynl ]]; then
+ ktap_skip_all "ynl command not found: $ynl"
+ exit "$KSFT_SKIP"
+fi
+
+trap cleanup EXIT
+
+ktap_print_header
+setup
+ktap_set_plan "${TESTS_NO}"
+
+cli_list_families
+cli_netdev_ops
+cli_ethtool_ops
+cli_rt_route_ops
+cli_rt_addr_ops
+cli_rt_link_ops
+cli_rt_neigh_ops
+cli_rt_rule_ops
+cli_nlctrl_ops
+
+ktap_finished
diff --git a/tools/net/ynl/tests/test_ynl_ethtool.sh b/tools/net/ynl/tests/test_ynl_ethtool.sh
new file mode 100755
index 000000000000..b826269017f4
--- /dev/null
+++ b/tools/net/ynl/tests/test_ynl_ethtool.sh
@@ -0,0 +1,222 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Test YNL ethtool functionality
+
+# Load KTAP test helpers
+KSELFTEST_KTAP_HELPERS="$(dirname "$(realpath "$0")")/../../../testing/selftests/kselftest/ktap_helpers.sh"
+# shellcheck source=../../../testing/selftests/kselftest/ktap_helpers.sh
+source "$KSELFTEST_KTAP_HELPERS"
+
+# Default ynl-ethtool path for direct execution, can be overridden by make install
+ynl_ethtool="../pyynl/ethtool.py"
+
+readonly NSIM_ID="1337"
+readonly NSIM_DEV_NAME="nsim${NSIM_ID}"
+readonly VETH_A="veth_a"
+readonly VETH_B="veth_b"
+
+testns="ynl-ethtool-$(mktemp -u XXXXXX)"
+TESTS_NO=0
+
+# Uses veth device as netdevsim doesn't support basic ethtool device info
+ethtool_device_info()
+{
+ local info_output
+
+ info_output=$(ip netns exec "$testns" $ynl_ethtool "$VETH_A" 2>/dev/null)
+
+ if ! echo "$info_output" | grep -q "Settings for"; then
+ ktap_test_fail "YNL ethtool device info (device info output missing expected content)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool device info"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_statistics()
+{
+ local stats_output
+
+ stats_output=$(ip netns exec "$testns" $ynl_ethtool --statistics "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$stats_output" | grep -q -E "(NIC statistics|packets|bytes)"; then
+ ktap_test_fail "YNL ethtool statistics (statistics output missing expected content)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool statistics"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_ring_params()
+{
+ local ring_output
+
+ ring_output=$(ip netns exec "$testns" $ynl_ethtool --show-ring "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$ring_output" | grep -q -E "(Ring parameters|RX|TX)"; then
+ ktap_test_fail "YNL ethtool ring parameters (ring parameters output missing expected content)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl_ethtool --set-ring "$NSIM_DEV_NAME" rx 64 2>/dev/null; then
+ ktap_test_fail "YNL ethtool ring parameters (set-ring command failed unexpectedly)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool ring parameters (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_coalesce_params()
+{
+ if ! ip netns exec "$testns" $ynl_ethtool --show-coalesce "$NSIM_DEV_NAME" &>/dev/null; then
+ ktap_test_fail "YNL ethtool coalesce parameters (failed to get coalesce parameters)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl_ethtool --set-coalesce "$NSIM_DEV_NAME" rx-usecs 50 2>/dev/null; then
+ ktap_test_fail "YNL ethtool coalesce parameters (set-coalesce command failed unexpectedly)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool coalesce parameters (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_pause_params()
+{
+ if ! ip netns exec "$testns" $ynl_ethtool --show-pause "$NSIM_DEV_NAME" &>/dev/null; then
+ ktap_test_fail "YNL ethtool pause parameters (failed to get pause parameters)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl_ethtool --set-pause "$NSIM_DEV_NAME" tx 1 rx 1 2>/dev/null; then
+ ktap_test_fail "YNL ethtool pause parameters (set-pause command failed unexpectedly)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool pause parameters (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_features_info()
+{
+ local features_output
+
+ features_output=$(ip netns exec "$testns" $ynl_ethtool --show-features "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$features_output" | grep -q -E "(Features|offload)"; then
+ ktap_test_fail "YNL ethtool features info (features output missing expected content)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool features info (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_channels_info()
+{
+ local channels_output
+
+ channels_output=$(ip netns exec "$testns" $ynl_ethtool --show-channels "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$channels_output" | grep -q -E "(Channel|Combined|RX|TX)"; then
+ ktap_test_fail "YNL ethtool channels info (channels output missing expected content)"
+ return
+ fi
+
+ if ! ip netns exec "$testns" $ynl_ethtool --set-channels "$NSIM_DEV_NAME" combined-count 1 2>/dev/null; then
+ ktap_test_fail "YNL ethtool channels info (set-channels command failed unexpectedly)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool channels info (show/set)"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+ethtool_time_stamping()
+{
+ local ts_output
+
+ ts_output=$(ip netns exec "$testns" $ynl_ethtool --show-time-stamping "$NSIM_DEV_NAME" 2>/dev/null)
+
+ if ! echo "$ts_output" | grep -q -E "(Time stamping|timestamping|SOF_TIMESTAMPING)"; then
+ ktap_test_fail "YNL ethtool time stamping (time stamping output missing expected content)"
+ return
+ fi
+
+ ktap_test_pass "YNL ethtool time stamping"
+}
+TESTS_NO=$((TESTS_NO + 1))
+
+setup()
+{
+ modprobe netdevsim &> /dev/null
+ if ! [ -f /sys/bus/netdevsim/new_device ]; then
+ ktap_skip_all "netdevsim module not available"
+ exit "$KSFT_SKIP"
+ fi
+
+ if ! ip netns add "$testns" 2>/dev/null; then
+ ktap_skip_all "failed to create test namespace"
+ exit "$KSFT_SKIP"
+ fi
+
+ echo "$NSIM_ID 1" | ip netns exec "$testns" tee /sys/bus/netdevsim/new_device >/dev/null 2>&1 || {
+ ktap_skip_all "failed to create netdevsim device"
+ exit "$KSFT_SKIP"
+ }
+
+ local dev
+ dev=$(ip netns exec "$testns" ls /sys/bus/netdevsim/devices/netdevsim$NSIM_ID/net 2>/dev/null | head -1)
+ if [[ -z "$dev" ]]; then
+ ktap_skip_all "failed to find netdevsim device"
+ exit "$KSFT_SKIP"
+ fi
+
+ ip -netns "$testns" link set dev "$dev" name "$NSIM_DEV_NAME" 2>/dev/null || {
+ ktap_skip_all "failed to rename netdevsim device"
+ exit "$KSFT_SKIP"
+ }
+
+ ip -netns "$testns" link set dev "$NSIM_DEV_NAME" up 2>/dev/null
+
+ if ! ip -n "$testns" link add "$VETH_A" type veth peer name "$VETH_B" 2>/dev/null; then
+ ktap_skip_all "failed to create veth pair"
+ exit "$KSFT_SKIP"
+ fi
+
+ ip -n "$testns" link set "$VETH_A" up 2>/dev/null
+ ip -n "$testns" link set "$VETH_B" up 2>/dev/null
+}
+
+cleanup()
+{
+ ip netns exec "$testns" bash -c "echo $NSIM_ID > /sys/bus/netdevsim/del_device" 2>/dev/null || true
+ ip netns del "$testns" 2>/dev/null || true
+}
+
+# Check if ynl-ethtool command is available
+if ! command -v $ynl_ethtool &>/dev/null && [[ ! -x $ynl_ethtool ]]; then
+ ktap_skip_all "ynl-ethtool command not found: $ynl_ethtool"
+ exit "$KSFT_SKIP"
+fi
+
+trap cleanup EXIT
+
+ktap_print_header
+setup
+ktap_set_plan "${TESTS_NO}"
+
+ethtool_device_info
+ethtool_statistics
+ethtool_ring_params
+ethtool_coalesce_params
+ethtool_pause_params
+ethtool_features_info
+ethtool_channels_info
+ethtool_time_stamping
+
+ktap_finished
diff --git a/tools/net/ynl/ynltool/.gitignore b/tools/net/ynl/ynltool/.gitignore
new file mode 100644
index 000000000000..690d399c921a
--- /dev/null
+++ b/tools/net/ynl/ynltool/.gitignore
@@ -0,0 +1,2 @@
+ynltool
+*.d
diff --git a/tools/net/ynl/ynltool/Makefile b/tools/net/ynl/ynltool/Makefile
new file mode 100644
index 000000000000..f5b1de32daa5
--- /dev/null
+++ b/tools/net/ynl/ynltool/Makefile
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+include ../Makefile.deps
+
+INSTALL ?= install
+prefix ?= /usr
+
+CC := gcc
+CFLAGS := -Wall -Wextra -Werror -O2
+ifeq ("$(DEBUG)","1")
+ CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan
+endif
+CFLAGS += -I../lib -I../generated -I../../../include/uapi/
+
+SRC_VERSION := \
+ $(shell make --no-print-directory -sC ../../../.. kernelversion || \
+ echo "unknown")
+
+CFLAGS += -DSRC_VERSION='"$(SRC_VERSION)"'
+
+SRCS := $(wildcard *.c)
+OBJS := $(patsubst %.c,$(OUTPUT)%.o,$(SRCS))
+
+YNLTOOL := $(OUTPUT)ynltool
+
+include $(wildcard *.d)
+
+all: $(YNLTOOL)
+
+Q = @
+
+$(YNLTOOL): ../libynl.a $(OBJS)
+ $(Q)echo -e "\tLINK $@"
+ $(Q)$(CC) $(CFLAGS) -o $@ $(OBJS) ../libynl.a -lm
+
+%.o: %.c ../libynl.a
+ $(Q)echo -e "\tCC $@"
+ $(Q)$(COMPILE.c) -MMD -c -o $@ $<
+
+../libynl.a:
+ $(Q)$(MAKE) -C ../
+
+clean:
+ rm -f *.o *.d *~
+
+distclean: clean
+ rm -f $(YNLTOOL)
+
+bindir ?= /usr/bin
+
+install: $(YNLTOOL)
+ $(INSTALL) -m 0755 $(YNLTOOL) $(DESTDIR)$(bindir)/$(YNLTOOL)
+
+.PHONY: all clean distclean
+.DEFAULT_GOAL=all
diff --git a/tools/net/ynl/ynltool/json_writer.c b/tools/net/ynl/ynltool/json_writer.c
new file mode 100644
index 000000000000..c8685e592cd3
--- /dev/null
+++ b/tools/net/ynl/ynltool/json_writer.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Simple streaming JSON writer
+ *
+ * This takes care of the annoying bits of JSON syntax like the commas
+ * after elements
+ *
+ * Authors: Stephen Hemminger <stephen@networkplumber.org>
+ */
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <malloc.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include "json_writer.h"
+
+struct json_writer {
+ FILE *out;
+ unsigned depth;
+ bool pretty;
+ char sep;
+};
+
+static void jsonw_indent(json_writer_t *self)
+{
+ unsigned i;
+ for (i = 0; i < self->depth; ++i)
+ fputs(" ", self->out);
+}
+
+static void jsonw_eol(json_writer_t *self)
+{
+ if (!self->pretty)
+ return;
+
+ putc('\n', self->out);
+ jsonw_indent(self);
+}
+
+static void jsonw_eor(json_writer_t *self)
+{
+ if (self->sep != '\0')
+ putc(self->sep, self->out);
+ self->sep = ',';
+}
+
+static void jsonw_puts(json_writer_t *self, const char *str)
+{
+ putc('"', self->out);
+ for (; *str; ++str)
+ switch (*str) {
+ case '\t':
+ fputs("\\t", self->out);
+ break;
+ case '\n':
+ fputs("\\n", self->out);
+ break;
+ case '\r':
+ fputs("\\r", self->out);
+ break;
+ case '\f':
+ fputs("\\f", self->out);
+ break;
+ case '\b':
+ fputs("\\b", self->out);
+ break;
+ case '\\':
+ fputs("\\\\", self->out);
+ break;
+ case '"':
+ fputs("\\\"", self->out);
+ break;
+ default:
+ putc(*str, self->out);
+ }
+ putc('"', self->out);
+}
+
+json_writer_t *jsonw_new(FILE *f)
+{
+ json_writer_t *self = malloc(sizeof(*self));
+ if (self) {
+ self->out = f;
+ self->depth = 0;
+ self->pretty = false;
+ self->sep = '\0';
+ }
+ return self;
+}
+
+void jsonw_destroy(json_writer_t **self_p)
+{
+ json_writer_t *self = *self_p;
+
+ assert(self->depth == 0);
+ fputs("\n", self->out);
+ fflush(self->out);
+ free(self);
+ *self_p = NULL;
+}
+
+void jsonw_pretty(json_writer_t *self, bool on)
+{
+ self->pretty = on;
+}
+
+void jsonw_reset(json_writer_t *self)
+{
+ assert(self->depth == 0);
+ self->sep = '\0';
+}
+
+static void jsonw_begin(json_writer_t *self, int c)
+{
+ jsonw_eor(self);
+ putc(c, self->out);
+ ++self->depth;
+ self->sep = '\0';
+}
+
+static void jsonw_end(json_writer_t *self, int c)
+{
+ assert(self->depth > 0);
+
+ --self->depth;
+ if (self->sep != '\0')
+ jsonw_eol(self);
+ putc(c, self->out);
+ self->sep = ',';
+}
+
+void jsonw_name(json_writer_t *self, const char *name)
+{
+ jsonw_eor(self);
+ jsonw_eol(self);
+ self->sep = '\0';
+ jsonw_puts(self, name);
+ putc(':', self->out);
+ if (self->pretty)
+ putc(' ', self->out);
+}
+
+void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
+{
+ jsonw_eor(self);
+ putc('"', self->out);
+ vfprintf(self->out, fmt, ap);
+ putc('"', self->out);
+}
+
+void jsonw_printf(json_writer_t *self, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ jsonw_eor(self);
+ vfprintf(self->out, fmt, ap);
+ va_end(ap);
+}
+
+void jsonw_start_object(json_writer_t *self)
+{
+ jsonw_begin(self, '{');
+}
+
+void jsonw_end_object(json_writer_t *self)
+{
+ jsonw_end(self, '}');
+}
+
+void jsonw_start_array(json_writer_t *self)
+{
+ jsonw_begin(self, '[');
+}
+
+void jsonw_end_array(json_writer_t *self)
+{
+ jsonw_end(self, ']');
+}
+
+void jsonw_string(json_writer_t *self, const char *value)
+{
+ jsonw_eor(self);
+ jsonw_puts(self, value);
+}
+
+void jsonw_bool(json_writer_t *self, bool val)
+{
+ jsonw_printf(self, "%s", val ? "true" : "false");
+}
+
+void jsonw_null(json_writer_t *self)
+{
+ jsonw_printf(self, "null");
+}
+
+void jsonw_float_fmt(json_writer_t *self, const char *fmt, double num)
+{
+ jsonw_printf(self, fmt, num);
+}
+
+void jsonw_float(json_writer_t *self, double num)
+{
+ jsonw_printf(self, "%g", num);
+}
+
+void jsonw_hu(json_writer_t *self, unsigned short num)
+{
+ jsonw_printf(self, "%hu", num);
+}
+
+void jsonw_uint(json_writer_t *self, uint64_t num)
+{
+ jsonw_printf(self, "%"PRIu64, num);
+}
+
+void jsonw_lluint(json_writer_t *self, unsigned long long int num)
+{
+ jsonw_printf(self, "%llu", num);
+}
+
+void jsonw_int(json_writer_t *self, int64_t num)
+{
+ jsonw_printf(self, "%"PRId64, num);
+}
+
+void jsonw_string_field(json_writer_t *self, const char *prop, const char *val)
+{
+ jsonw_name(self, prop);
+ jsonw_string(self, val);
+}
+
+void jsonw_bool_field(json_writer_t *self, const char *prop, bool val)
+{
+ jsonw_name(self, prop);
+ jsonw_bool(self, val);
+}
+
+void jsonw_float_field(json_writer_t *self, const char *prop, double val)
+{
+ jsonw_name(self, prop);
+ jsonw_float(self, val);
+}
+
+void jsonw_float_field_fmt(json_writer_t *self,
+ const char *prop,
+ const char *fmt,
+ double val)
+{
+ jsonw_name(self, prop);
+ jsonw_float_fmt(self, fmt, val);
+}
+
+void jsonw_uint_field(json_writer_t *self, const char *prop, uint64_t num)
+{
+ jsonw_name(self, prop);
+ jsonw_uint(self, num);
+}
+
+void jsonw_hu_field(json_writer_t *self, const char *prop, unsigned short num)
+{
+ jsonw_name(self, prop);
+ jsonw_hu(self, num);
+}
+
+void jsonw_lluint_field(json_writer_t *self,
+ const char *prop,
+ unsigned long long int num)
+{
+ jsonw_name(self, prop);
+ jsonw_lluint(self, num);
+}
+
+void jsonw_int_field(json_writer_t *self, const char *prop, int64_t num)
+{
+ jsonw_name(self, prop);
+ jsonw_int(self, num);
+}
+
+void jsonw_null_field(json_writer_t *self, const char *prop)
+{
+ jsonw_name(self, prop);
+ jsonw_null(self);
+}
diff --git a/tools/net/ynl/ynltool/json_writer.h b/tools/net/ynl/ynltool/json_writer.h
new file mode 100644
index 000000000000..0f1e63c88f6a
--- /dev/null
+++ b/tools/net/ynl/ynltool/json_writer.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Simple streaming JSON writer
+ *
+ * This takes care of the annoying bits of JSON syntax like the commas
+ * after elements
+ *
+ * Authors: Stephen Hemminger <stephen@networkplumber.org>
+ */
+
+#ifndef _JSON_WRITER_H_
+#define _JSON_WRITER_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+/* Opaque class structure */
+typedef struct json_writer json_writer_t;
+
+/* Create a new JSON stream */
+json_writer_t *jsonw_new(FILE *f);
+/* End output to JSON stream */
+void jsonw_destroy(json_writer_t **self_p);
+
+/* Cause output to have pretty whitespace */
+void jsonw_pretty(json_writer_t *self, bool on);
+
+/* Reset separator to create new JSON */
+void jsonw_reset(json_writer_t *self);
+
+/* Add property name */
+void jsonw_name(json_writer_t *self, const char *name);
+
+/* Add value */
+void __attribute__((format(printf, 2, 0))) jsonw_vprintf_enquote(json_writer_t *self,
+ const char *fmt,
+ va_list ap);
+void __attribute__((format(printf, 2, 3))) jsonw_printf(json_writer_t *self,
+ const char *fmt, ...);
+void jsonw_string(json_writer_t *self, const char *value);
+void jsonw_bool(json_writer_t *self, bool value);
+void jsonw_float(json_writer_t *self, double number);
+void jsonw_float_fmt(json_writer_t *self, const char *fmt, double num);
+void jsonw_uint(json_writer_t *self, uint64_t number);
+void jsonw_hu(json_writer_t *self, unsigned short number);
+void jsonw_int(json_writer_t *self, int64_t number);
+void jsonw_null(json_writer_t *self);
+void jsonw_lluint(json_writer_t *self, unsigned long long int num);
+
+/* Useful Combinations of name and value */
+void jsonw_string_field(json_writer_t *self, const char *prop, const char *val);
+void jsonw_bool_field(json_writer_t *self, const char *prop, bool value);
+void jsonw_float_field(json_writer_t *self, const char *prop, double num);
+void jsonw_uint_field(json_writer_t *self, const char *prop, uint64_t num);
+void jsonw_hu_field(json_writer_t *self, const char *prop, unsigned short num);
+void jsonw_int_field(json_writer_t *self, const char *prop, int64_t num);
+void jsonw_null_field(json_writer_t *self, const char *prop);
+void jsonw_lluint_field(json_writer_t *self, const char *prop,
+ unsigned long long int num);
+void jsonw_float_field_fmt(json_writer_t *self, const char *prop,
+ const char *fmt, double val);
+
+/* Collections */
+void jsonw_start_object(json_writer_t *self);
+void jsonw_end_object(json_writer_t *self);
+
+void jsonw_start_array(json_writer_t *self);
+void jsonw_end_array(json_writer_t *self);
+
+/* Override default exception handling */
+typedef void (jsonw_err_handler_fn)(const char *);
+
+#endif /* _JSON_WRITER_H_ */
diff --git a/tools/net/ynl/ynltool/main.c b/tools/net/ynl/ynltool/main.c
new file mode 100644
index 000000000000..5d0f428eed0a
--- /dev/null
+++ b/tools/net/ynl/ynltool/main.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+/* Copyright Meta Platforms, Inc. and affiliates */
+
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+
+#include "main.h"
+
+const char *bin_name;
+static int last_argc;
+static char **last_argv;
+static int (*last_do_help)(int argc, char **argv);
+json_writer_t *json_wtr;
+bool pretty_output;
+bool json_output;
+
+static void __attribute__((noreturn)) clean_and_exit(int i)
+{
+ if (json_output)
+ jsonw_destroy(&json_wtr);
+
+ exit(i);
+}
+
+void usage(void)
+{
+ last_do_help(last_argc - 1, last_argv + 1);
+
+ clean_and_exit(-1);
+}
+
+static int do_help(int argc __attribute__((unused)),
+ char **argv __attribute__((unused)))
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s [OPTIONS] OBJECT { COMMAND | help }\n"
+ " %s version\n"
+ "\n"
+ " OBJECT := { page-pool | qstats }\n"
+ " " HELP_SPEC_OPTIONS "\n"
+ "",
+ bin_name, bin_name);
+
+ return 0;
+}
+
+static int do_version(int argc __attribute__((unused)),
+ char **argv __attribute__((unused)))
+{
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "version");
+ jsonw_printf(json_wtr, SRC_VERSION);
+ jsonw_end_object(json_wtr);
+ } else {
+ printf("%s " SRC_VERSION "\n", bin_name);
+ }
+ return 0;
+}
+
+static const struct cmd commands[] = {
+ { "help", do_help },
+ { "page-pool", do_page_pool },
+ { "qstats", do_qstats },
+ { "version", do_version },
+ { 0 }
+};
+
+int cmd_select(const struct cmd *cmds, int argc, char **argv,
+ int (*help)(int argc, char **argv))
+{
+ unsigned int i;
+
+ last_argc = argc;
+ last_argv = argv;
+ last_do_help = help;
+
+ if (argc < 1 && cmds[0].func)
+ return cmds[0].func(argc, argv);
+
+ for (i = 0; cmds[i].cmd; i++) {
+ if (is_prefix(*argv, cmds[i].cmd)) {
+ if (!cmds[i].func) {
+ p_err("command '%s' is not available", cmds[i].cmd);
+ return -1;
+ }
+ return cmds[i].func(argc - 1, argv + 1);
+ }
+ }
+
+ help(argc - 1, argv + 1);
+
+ return -1;
+}
+
+bool is_prefix(const char *pfx, const char *str)
+{
+ if (!pfx)
+ return false;
+ if (strlen(str) < strlen(pfx))
+ return false;
+
+ return !memcmp(str, pfx, strlen(pfx));
+}
+
+/* Last argument MUST be NULL pointer */
+int detect_common_prefix(const char *arg, ...)
+{
+ unsigned int count = 0;
+ const char *ref;
+ char msg[256];
+ va_list ap;
+
+ snprintf(msg, sizeof(msg), "ambiguous prefix: '%s' could be '", arg);
+ va_start(ap, arg);
+ while ((ref = va_arg(ap, const char *))) {
+ if (!is_prefix(arg, ref))
+ continue;
+ count++;
+ if (count > 1)
+ strncat(msg, "' or '", sizeof(msg) - strlen(msg) - 1);
+ strncat(msg, ref, sizeof(msg) - strlen(msg) - 1);
+ }
+ va_end(ap);
+ strncat(msg, "'", sizeof(msg) - strlen(msg) - 1);
+
+ if (count >= 2) {
+ p_err("%s", msg);
+ return -1;
+ }
+
+ return 0;
+}
+
+void p_err(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "error");
+ jsonw_vprintf_enquote(json_wtr, fmt, ap);
+ jsonw_end_object(json_wtr);
+ } else {
+ fprintf(stderr, "Error: ");
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ }
+ va_end(ap);
+}
+
+void p_info(const char *fmt, ...)
+{
+ va_list ap;
+
+ if (json_output)
+ return;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+}
+
+int main(int argc, char **argv)
+{
+ static const struct option options[] = {
+ { "json", no_argument, NULL, 'j' },
+ { "help", no_argument, NULL, 'h' },
+ { "pretty", no_argument, NULL, 'p' },
+ { "version", no_argument, NULL, 'V' },
+ { 0 }
+ };
+ bool version_requested = false;
+ int opt, ret;
+
+ setlinebuf(stdout);
+
+ last_do_help = do_help;
+ pretty_output = false;
+ json_output = false;
+ bin_name = "ynltool";
+
+ opterr = 0;
+ while ((opt = getopt_long(argc, argv, "Vhjp",
+ options, NULL)) >= 0) {
+ switch (opt) {
+ case 'V':
+ version_requested = true;
+ break;
+ case 'h':
+ return do_help(argc, argv);
+ case 'p':
+ pretty_output = true;
+ /* fall through */
+ case 'j':
+ if (!json_output) {
+ json_wtr = jsonw_new(stdout);
+ if (!json_wtr) {
+ p_err("failed to create JSON writer");
+ return -1;
+ }
+ json_output = true;
+ }
+ jsonw_pretty(json_wtr, pretty_output);
+ break;
+ default:
+ p_err("unrecognized option '%s'", argv[optind - 1]);
+ if (json_output)
+ clean_and_exit(-1);
+ else
+ usage();
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+ if (argc < 0)
+ usage();
+
+ if (version_requested)
+ ret = do_version(argc, argv);
+ else
+ ret = cmd_select(commands, argc, argv, do_help);
+
+ if (json_output)
+ jsonw_destroy(&json_wtr);
+
+ return ret;
+}
diff --git a/tools/net/ynl/ynltool/main.h b/tools/net/ynl/ynltool/main.h
new file mode 100644
index 000000000000..c7039f9ac55a
--- /dev/null
+++ b/tools/net/ynl/ynltool/main.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+/* Copyright Meta Platforms, Inc. and affiliates */
+
+#ifndef __YNLTOOL_H
+#define __YNLTOOL_H
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+
+#include "json_writer.h"
+
+#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
+#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
+#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
+#define GET_ARG() ({ argc--; *argv++; })
+#define REQ_ARGS(cnt) \
+ ({ \
+ int _cnt = (cnt); \
+ bool _res; \
+ \
+ if (argc < _cnt) { \
+ p_err("'%s' needs at least %d arguments, %d found", \
+ argv[-1], _cnt, argc); \
+ _res = false; \
+ } else { \
+ _res = true; \
+ } \
+ _res; \
+ })
+
+#define HELP_SPEC_OPTIONS \
+ "OPTIONS := { {-j|--json} [{-p|--pretty}] }"
+
+extern const char *bin_name;
+
+extern json_writer_t *json_wtr;
+extern bool json_output;
+extern bool pretty_output;
+
+void __attribute__((format(printf, 1, 2))) p_err(const char *fmt, ...);
+void __attribute__((format(printf, 1, 2))) p_info(const char *fmt, ...);
+
+bool is_prefix(const char *pfx, const char *str);
+int detect_common_prefix(const char *arg, ...);
+void usage(void) __attribute__((noreturn));
+
+struct cmd {
+ const char *cmd;
+ int (*func)(int argc, char **argv);
+};
+
+int cmd_select(const struct cmd *cmds, int argc, char **argv,
+ int (*help)(int argc, char **argv));
+
+/* subcommands */
+int do_page_pool(int argc, char **argv);
+int do_qstats(int argc, char **argv);
+
+#endif /* __YNLTOOL_H */
diff --git a/tools/net/ynl/ynltool/page-pool.c b/tools/net/ynl/ynltool/page-pool.c
new file mode 100644
index 000000000000..4b24492abab7
--- /dev/null
+++ b/tools/net/ynl/ynltool/page-pool.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <net/if.h>
+
+#include <ynl.h>
+#include "netdev-user.h"
+
+#include "main.h"
+
+struct pp_stat {
+ unsigned int ifc;
+
+ struct {
+ unsigned int cnt;
+ size_t refs, bytes;
+ } live[2];
+
+ size_t alloc_slow, alloc_fast, recycle_ring, recycle_cache;
+};
+
+struct pp_stats_array {
+ unsigned int i, max;
+ struct pp_stat *s;
+};
+
+static struct pp_stat *find_ifc(struct pp_stats_array *a, unsigned int ifindex)
+{
+ unsigned int i;
+
+ for (i = 0; i < a->i; i++) {
+ if (a->s[i].ifc == ifindex)
+ return &a->s[i];
+ }
+
+ a->i++;
+ if (a->i == a->max) {
+ a->max *= 2;
+ a->s = reallocarray(a->s, a->max, sizeof(*a->s));
+ }
+ a->s[i].ifc = ifindex;
+ return &a->s[i];
+}
+
+static void count_pool(struct pp_stat *s, unsigned int l,
+ struct netdev_page_pool_get_rsp *pp)
+{
+ s->live[l].cnt++;
+ if (pp->_present.inflight)
+ s->live[l].refs += pp->inflight;
+ if (pp->_present.inflight_mem)
+ s->live[l].bytes += pp->inflight_mem;
+}
+
+/* We don't know how many pages are sitting in cache and ring
+ * so we will under-count the recycling rate a bit.
+ */
+static void print_json_recycling_stats(struct pp_stat *s)
+{
+ double recycle;
+
+ if (s->alloc_fast + s->alloc_slow) {
+ recycle = (double)(s->recycle_ring + s->recycle_cache) /
+ (s->alloc_fast + s->alloc_slow) * 100;
+ jsonw_float_field(json_wtr, "recycling_pct", recycle);
+ }
+
+ jsonw_name(json_wtr, "alloc");
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "slow", s->alloc_slow);
+ jsonw_uint_field(json_wtr, "fast", s->alloc_fast);
+ jsonw_end_object(json_wtr);
+
+ jsonw_name(json_wtr, "recycle");
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "ring", s->recycle_ring);
+ jsonw_uint_field(json_wtr, "cache", s->recycle_cache);
+ jsonw_end_object(json_wtr);
+}
+
+static void print_plain_recycling_stats(struct pp_stat *s)
+{
+ double recycle;
+
+ if (s->alloc_fast + s->alloc_slow) {
+ recycle = (double)(s->recycle_ring + s->recycle_cache) /
+ (s->alloc_fast + s->alloc_slow) * 100;
+ printf("recycling: %.1lf%% (alloc: %zu:%zu recycle: %zu:%zu)",
+ recycle, s->alloc_slow, s->alloc_fast,
+ s->recycle_ring, s->recycle_cache);
+ }
+}
+
+static void print_json_stats(struct pp_stats_array *a)
+{
+ jsonw_start_array(json_wtr);
+
+ for (unsigned int i = 0; i < a->i; i++) {
+ char ifname[IF_NAMESIZE];
+ struct pp_stat *s = &a->s[i];
+ const char *name;
+
+ jsonw_start_object(json_wtr);
+
+ if (!s->ifc) {
+ jsonw_string_field(json_wtr, "ifname", "<orphan>");
+ jsonw_uint_field(json_wtr, "ifindex", 0);
+ } else {
+ name = if_indextoname(s->ifc, ifname);
+ if (name)
+ jsonw_string_field(json_wtr, "ifname", name);
+ jsonw_uint_field(json_wtr, "ifindex", s->ifc);
+ }
+
+ jsonw_uint_field(json_wtr, "page_pools", s->live[1].cnt);
+ jsonw_uint_field(json_wtr, "zombies", s->live[0].cnt);
+
+ jsonw_name(json_wtr, "live");
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "refs", s->live[1].refs);
+ jsonw_uint_field(json_wtr, "bytes", s->live[1].bytes);
+ jsonw_end_object(json_wtr);
+
+ jsonw_name(json_wtr, "zombie");
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "refs", s->live[0].refs);
+ jsonw_uint_field(json_wtr, "bytes", s->live[0].bytes);
+ jsonw_end_object(json_wtr);
+
+ if (s->alloc_fast || s->alloc_slow)
+ print_json_recycling_stats(s);
+
+ jsonw_end_object(json_wtr);
+ }
+
+ jsonw_end_array(json_wtr);
+}
+
+static void print_plain_stats(struct pp_stats_array *a)
+{
+ for (unsigned int i = 0; i < a->i; i++) {
+ char ifname[IF_NAMESIZE];
+ struct pp_stat *s = &a->s[i];
+ const char *name;
+
+ if (!s->ifc) {
+ printf("<orphan>\t");
+ } else {
+ name = if_indextoname(s->ifc, ifname);
+ if (name)
+ printf("%8s", name);
+ printf("[%u]\t", s->ifc);
+ }
+
+ printf("page pools: %u (zombies: %u)\n",
+ s->live[1].cnt, s->live[0].cnt);
+ printf("\t\trefs: %zu bytes: %zu (refs: %zu bytes: %zu)\n",
+ s->live[1].refs, s->live[1].bytes,
+ s->live[0].refs, s->live[0].bytes);
+
+ if (s->alloc_fast || s->alloc_slow) {
+ printf("\t\t");
+ print_plain_recycling_stats(s);
+ printf("\n");
+ }
+ }
+}
+
+static bool
+find_pool_stat_in_list(struct netdev_page_pool_stats_get_list *pp_stats,
+ __u64 pool_id, struct pp_stat *pstat)
+{
+ ynl_dump_foreach(pp_stats, pp) {
+ if (!pp->_present.info || !pp->info._present.id)
+ continue;
+ if (pp->info.id != pool_id)
+ continue;
+
+ memset(pstat, 0, sizeof(*pstat));
+ if (pp->_present.alloc_fast)
+ pstat->alloc_fast = pp->alloc_fast;
+ if (pp->_present.alloc_refill)
+ pstat->alloc_fast += pp->alloc_refill;
+ if (pp->_present.alloc_slow)
+ pstat->alloc_slow = pp->alloc_slow;
+ if (pp->_present.recycle_ring)
+ pstat->recycle_ring = pp->recycle_ring;
+ if (pp->_present.recycle_cached)
+ pstat->recycle_cache = pp->recycle_cached;
+ return true;
+ }
+ return false;
+}
+
+static void
+print_json_pool_list(struct netdev_page_pool_get_list *pools,
+ struct netdev_page_pool_stats_get_list *pp_stats,
+ bool zombies_only)
+{
+ jsonw_start_array(json_wtr);
+
+ ynl_dump_foreach(pools, pp) {
+ char ifname[IF_NAMESIZE];
+ struct pp_stat pstat;
+ const char *name;
+
+ if (zombies_only && !pp->_present.detach_time)
+ continue;
+
+ jsonw_start_object(json_wtr);
+
+ jsonw_uint_field(json_wtr, "id", pp->id);
+
+ if (pp->_present.ifindex) {
+ name = if_indextoname(pp->ifindex, ifname);
+ if (name)
+ jsonw_string_field(json_wtr, "ifname", name);
+ jsonw_uint_field(json_wtr, "ifindex", pp->ifindex);
+ }
+
+ if (pp->_present.napi_id)
+ jsonw_uint_field(json_wtr, "napi_id", pp->napi_id);
+
+ if (pp->_present.inflight)
+ jsonw_uint_field(json_wtr, "refs", pp->inflight);
+
+ if (pp->_present.inflight_mem)
+ jsonw_uint_field(json_wtr, "bytes", pp->inflight_mem);
+
+ if (pp->_present.detach_time)
+ jsonw_uint_field(json_wtr, "detach_time", pp->detach_time);
+
+ if (pp->_present.dmabuf)
+ jsonw_uint_field(json_wtr, "dmabuf", pp->dmabuf);
+
+ if (find_pool_stat_in_list(pp_stats, pp->id, &pstat) &&
+ (pstat.alloc_fast || pstat.alloc_slow))
+ print_json_recycling_stats(&pstat);
+
+ jsonw_end_object(json_wtr);
+ }
+
+ jsonw_end_array(json_wtr);
+}
+
+static void
+print_plain_pool_list(struct netdev_page_pool_get_list *pools,
+ struct netdev_page_pool_stats_get_list *pp_stats,
+ bool zombies_only)
+{
+ ynl_dump_foreach(pools, pp) {
+ char ifname[IF_NAMESIZE];
+ struct pp_stat pstat;
+ const char *name;
+
+ if (zombies_only && !pp->_present.detach_time)
+ continue;
+
+ printf("pool id: %llu", pp->id);
+
+ if (pp->_present.ifindex) {
+ name = if_indextoname(pp->ifindex, ifname);
+ if (name)
+ printf(" dev: %s", name);
+ printf("[%u]", pp->ifindex);
+ }
+
+ if (pp->_present.napi_id)
+ printf(" napi: %llu", pp->napi_id);
+
+ printf("\n");
+
+ if (pp->_present.inflight || pp->_present.inflight_mem) {
+ printf(" inflight:");
+ if (pp->_present.inflight)
+ printf(" %llu pages", pp->inflight);
+ if (pp->_present.inflight_mem)
+ printf(" %llu bytes", pp->inflight_mem);
+ printf("\n");
+ }
+
+ if (pp->_present.detach_time)
+ printf(" detached: %llu\n", pp->detach_time);
+
+ if (pp->_present.dmabuf)
+ printf(" dmabuf: %u\n", pp->dmabuf);
+
+ if (find_pool_stat_in_list(pp_stats, pp->id, &pstat) &&
+ (pstat.alloc_fast || pstat.alloc_slow)) {
+ printf(" ");
+ print_plain_recycling_stats(&pstat);
+ printf("\n");
+ }
+ }
+}
+
+static void aggregate_device_stats(struct pp_stats_array *a,
+ struct netdev_page_pool_get_list *pools,
+ struct netdev_page_pool_stats_get_list *pp_stats)
+{
+ ynl_dump_foreach(pools, pp) {
+ struct pp_stat *s = find_ifc(a, pp->ifindex);
+
+ count_pool(s, 1, pp);
+ if (pp->_present.detach_time)
+ count_pool(s, 0, pp);
+ }
+
+ ynl_dump_foreach(pp_stats, pp) {
+ struct pp_stat *s = find_ifc(a, pp->info.ifindex);
+
+ if (pp->_present.alloc_fast)
+ s->alloc_fast += pp->alloc_fast;
+ if (pp->_present.alloc_refill)
+ s->alloc_fast += pp->alloc_refill;
+ if (pp->_present.alloc_slow)
+ s->alloc_slow += pp->alloc_slow;
+ if (pp->_present.recycle_ring)
+ s->recycle_ring += pp->recycle_ring;
+ if (pp->_present.recycle_cached)
+ s->recycle_cache += pp->recycle_cached;
+ }
+}
+
+static int do_stats(int argc, char **argv)
+{
+ struct netdev_page_pool_stats_get_list *pp_stats;
+ struct netdev_page_pool_get_list *pools;
+ enum {
+ GROUP_BY_DEVICE,
+ GROUP_BY_POOL,
+ } group_by = GROUP_BY_DEVICE;
+ bool zombies_only = false;
+ struct pp_stats_array a = {};
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int ret = 0;
+
+ /* Parse options */
+ while (argc > 0) {
+ if (is_prefix(*argv, "group-by")) {
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ return -1;
+
+ if (is_prefix(*argv, "device")) {
+ group_by = GROUP_BY_DEVICE;
+ } else if (is_prefix(*argv, "pp") ||
+ is_prefix(*argv, "page-pool") ||
+ is_prefix(*argv, "none")) {
+ group_by = GROUP_BY_POOL;
+ } else {
+ p_err("invalid group-by value '%s'", *argv);
+ return -1;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "zombies")) {
+ zombies_only = true;
+ group_by = GROUP_BY_POOL;
+ NEXT_ARG();
+ } else {
+ p_err("unknown option '%s'", *argv);
+ return -1;
+ }
+ }
+
+ ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+ if (!ys) {
+ p_err("YNL: %s", yerr.msg);
+ return -1;
+ }
+
+ pools = netdev_page_pool_get_dump(ys);
+ if (!pools) {
+ p_err("failed to get page pools: %s", ys->err.msg);
+ ret = -1;
+ goto exit_close;
+ }
+
+ pp_stats = netdev_page_pool_stats_get_dump(ys);
+ if (!pp_stats) {
+ p_err("failed to get page pool stats: %s", ys->err.msg);
+ ret = -1;
+ goto exit_free_pp_list;
+ }
+
+ /* If grouping by pool, print individual pools */
+ if (group_by == GROUP_BY_POOL) {
+ if (json_output)
+ print_json_pool_list(pools, pp_stats, zombies_only);
+ else
+ print_plain_pool_list(pools, pp_stats, zombies_only);
+ } else {
+ /* Aggregated stats mode (group-by device) */
+ a.max = 64;
+ a.s = calloc(a.max, sizeof(*a.s));
+ if (!a.s) {
+ p_err("failed to allocate stats array");
+ ret = -1;
+ goto exit_free_stats_list;
+ }
+
+ aggregate_device_stats(&a, pools, pp_stats);
+
+ if (json_output)
+ print_json_stats(&a);
+ else
+ print_plain_stats(&a);
+
+ free(a.s);
+ }
+
+exit_free_stats_list:
+ netdev_page_pool_stats_get_list_free(pp_stats);
+exit_free_pp_list:
+ netdev_page_pool_get_list_free(pools);
+exit_close:
+ ynl_sock_destroy(ys);
+ return ret;
+}
+
+static int do_help(int argc __attribute__((unused)),
+ char **argv __attribute__((unused)))
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s page-pool { COMMAND | help }\n"
+ " %s page-pool stats [ OPTIONS ]\n"
+ "\n"
+ " OPTIONS := { group-by { device | page-pool | none } | zombies }\n"
+ "\n"
+ " stats - Display page pool statistics\n"
+ " stats group-by device - Group statistics by network device (default)\n"
+ " stats group-by page-pool | pp | none\n"
+ " - Show individual page pool details (no grouping)\n"
+ " stats zombies - Show only zombie page pools (detached but with\n"
+ " pages in flight). Implies group-by page-pool.\n"
+ "",
+ bin_name, bin_name);
+
+ return 0;
+}
+
+static const struct cmd page_pool_cmds[] = {
+ { "help", do_help },
+ { "stats", do_stats },
+ { 0 }
+};
+
+int do_page_pool(int argc, char **argv)
+{
+ return cmd_select(page_pool_cmds, argc, argv, do_help);
+}
diff --git a/tools/net/ynl/ynltool/qstats.c b/tools/net/ynl/ynltool/qstats.c
new file mode 100644
index 000000000000..31fb45709ffa
--- /dev/null
+++ b/tools/net/ynl/ynltool/qstats.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <net/if.h>
+#include <math.h>
+
+#include <ynl.h>
+#include "netdev-user.h"
+
+#include "main.h"
+
+static enum netdev_qstats_scope scope; /* default - device */
+
+struct queue_balance {
+ unsigned int ifindex;
+ enum netdev_queue_type type;
+ unsigned int queue_count;
+ __u64 *rx_packets;
+ __u64 *rx_bytes;
+ __u64 *tx_packets;
+ __u64 *tx_bytes;
+};
+
+static void print_json_qstats(struct netdev_qstats_get_list *qstats)
+{
+ jsonw_start_array(json_wtr);
+
+ ynl_dump_foreach(qstats, qs) {
+ char ifname[IF_NAMESIZE];
+ const char *name;
+
+ jsonw_start_object(json_wtr);
+
+ name = if_indextoname(qs->ifindex, ifname);
+ if (name)
+ jsonw_string_field(json_wtr, "ifname", name);
+ jsonw_uint_field(json_wtr, "ifindex", qs->ifindex);
+
+ if (qs->_present.queue_type)
+ jsonw_string_field(json_wtr, "queue-type",
+ netdev_queue_type_str(qs->queue_type));
+ if (qs->_present.queue_id)
+ jsonw_uint_field(json_wtr, "queue-id", qs->queue_id);
+
+ if (qs->_present.rx_packets || qs->_present.rx_bytes ||
+ qs->_present.rx_alloc_fail || qs->_present.rx_hw_drops ||
+ qs->_present.rx_csum_complete || qs->_present.rx_hw_gro_packets) {
+ jsonw_name(json_wtr, "rx");
+ jsonw_start_object(json_wtr);
+ if (qs->_present.rx_packets)
+ jsonw_uint_field(json_wtr, "packets", qs->rx_packets);
+ if (qs->_present.rx_bytes)
+ jsonw_uint_field(json_wtr, "bytes", qs->rx_bytes);
+ if (qs->_present.rx_alloc_fail)
+ jsonw_uint_field(json_wtr, "alloc-fail", qs->rx_alloc_fail);
+ if (qs->_present.rx_hw_drops)
+ jsonw_uint_field(json_wtr, "hw-drops", qs->rx_hw_drops);
+ if (qs->_present.rx_hw_drop_overruns)
+ jsonw_uint_field(json_wtr, "hw-drop-overruns", qs->rx_hw_drop_overruns);
+ if (qs->_present.rx_hw_drop_ratelimits)
+ jsonw_uint_field(json_wtr, "hw-drop-ratelimits", qs->rx_hw_drop_ratelimits);
+ if (qs->_present.rx_csum_complete)
+ jsonw_uint_field(json_wtr, "csum-complete", qs->rx_csum_complete);
+ if (qs->_present.rx_csum_unnecessary)
+ jsonw_uint_field(json_wtr, "csum-unnecessary", qs->rx_csum_unnecessary);
+ if (qs->_present.rx_csum_none)
+ jsonw_uint_field(json_wtr, "csum-none", qs->rx_csum_none);
+ if (qs->_present.rx_csum_bad)
+ jsonw_uint_field(json_wtr, "csum-bad", qs->rx_csum_bad);
+ if (qs->_present.rx_hw_gro_packets)
+ jsonw_uint_field(json_wtr, "hw-gro-packets", qs->rx_hw_gro_packets);
+ if (qs->_present.rx_hw_gro_bytes)
+ jsonw_uint_field(json_wtr, "hw-gro-bytes", qs->rx_hw_gro_bytes);
+ if (qs->_present.rx_hw_gro_wire_packets)
+ jsonw_uint_field(json_wtr, "hw-gro-wire-packets", qs->rx_hw_gro_wire_packets);
+ if (qs->_present.rx_hw_gro_wire_bytes)
+ jsonw_uint_field(json_wtr, "hw-gro-wire-bytes", qs->rx_hw_gro_wire_bytes);
+ jsonw_end_object(json_wtr);
+ }
+
+ if (qs->_present.tx_packets || qs->_present.tx_bytes ||
+ qs->_present.tx_hw_drops || qs->_present.tx_csum_none ||
+ qs->_present.tx_hw_gso_packets) {
+ jsonw_name(json_wtr, "tx");
+ jsonw_start_object(json_wtr);
+ if (qs->_present.tx_packets)
+ jsonw_uint_field(json_wtr, "packets", qs->tx_packets);
+ if (qs->_present.tx_bytes)
+ jsonw_uint_field(json_wtr, "bytes", qs->tx_bytes);
+ if (qs->_present.tx_hw_drops)
+ jsonw_uint_field(json_wtr, "hw-drops", qs->tx_hw_drops);
+ if (qs->_present.tx_hw_drop_errors)
+ jsonw_uint_field(json_wtr, "hw-drop-errors", qs->tx_hw_drop_errors);
+ if (qs->_present.tx_hw_drop_ratelimits)
+ jsonw_uint_field(json_wtr, "hw-drop-ratelimits", qs->tx_hw_drop_ratelimits);
+ if (qs->_present.tx_csum_none)
+ jsonw_uint_field(json_wtr, "csum-none", qs->tx_csum_none);
+ if (qs->_present.tx_needs_csum)
+ jsonw_uint_field(json_wtr, "needs-csum", qs->tx_needs_csum);
+ if (qs->_present.tx_hw_gso_packets)
+ jsonw_uint_field(json_wtr, "hw-gso-packets", qs->tx_hw_gso_packets);
+ if (qs->_present.tx_hw_gso_bytes)
+ jsonw_uint_field(json_wtr, "hw-gso-bytes", qs->tx_hw_gso_bytes);
+ if (qs->_present.tx_hw_gso_wire_packets)
+ jsonw_uint_field(json_wtr, "hw-gso-wire-packets", qs->tx_hw_gso_wire_packets);
+ if (qs->_present.tx_hw_gso_wire_bytes)
+ jsonw_uint_field(json_wtr, "hw-gso-wire-bytes", qs->tx_hw_gso_wire_bytes);
+ if (qs->_present.tx_stop)
+ jsonw_uint_field(json_wtr, "stop", qs->tx_stop);
+ if (qs->_present.tx_wake)
+ jsonw_uint_field(json_wtr, "wake", qs->tx_wake);
+ jsonw_end_object(json_wtr);
+ }
+
+ jsonw_end_object(json_wtr);
+ }
+
+ jsonw_end_array(json_wtr);
+}
+
+static void print_one(bool present, const char *name, unsigned long long val,
+ int *line)
+{
+ if (!present)
+ return;
+
+ if (!*line) {
+ printf(" ");
+ ++(*line);
+ }
+
+ /* Don't waste space on tx- and rx- prefix, its implied by queue type */
+ if (scope == NETDEV_QSTATS_SCOPE_QUEUE &&
+ (name[0] == 'r' || name[0] == 't') &&
+ name[1] == 'x' && name[2] == '-')
+ name += 3;
+
+ printf(" %15s: %15llu", name, val);
+
+ if (++(*line) == 3) {
+ printf("\n");
+ *line = 0;
+ }
+}
+
+static void print_plain_qstats(struct netdev_qstats_get_list *qstats)
+{
+ ynl_dump_foreach(qstats, qs) {
+ char ifname[IF_NAMESIZE];
+ const char *name;
+ int n;
+
+ name = if_indextoname(qs->ifindex, ifname);
+ if (name)
+ printf("%s", name);
+ else
+ printf("ifindex:%u", qs->ifindex);
+
+ if (qs->_present.queue_type && qs->_present.queue_id)
+ printf("\t%s-%-3u",
+ netdev_queue_type_str(qs->queue_type),
+ qs->queue_id);
+ else
+ printf("\t ");
+
+ n = 1;
+
+ /* Basic counters */
+ print_one(qs->_present.rx_packets, "rx-packets", qs->rx_packets, &n);
+ print_one(qs->_present.rx_bytes, "rx-bytes", qs->rx_bytes, &n);
+ print_one(qs->_present.tx_packets, "tx-packets", qs->tx_packets, &n);
+ print_one(qs->_present.tx_bytes, "tx-bytes", qs->tx_bytes, &n);
+
+ /* RX error/drop counters */
+ print_one(qs->_present.rx_alloc_fail, "rx-alloc-fail",
+ qs->rx_alloc_fail, &n);
+ print_one(qs->_present.rx_hw_drops, "rx-hw-drops",
+ qs->rx_hw_drops, &n);
+ print_one(qs->_present.rx_hw_drop_overruns, "rx-hw-drop-overruns",
+ qs->rx_hw_drop_overruns, &n);
+ print_one(qs->_present.rx_hw_drop_ratelimits, "rx-hw-drop-ratelimits",
+ qs->rx_hw_drop_ratelimits, &n);
+
+ /* RX checksum counters */
+ print_one(qs->_present.rx_csum_complete, "rx-csum-complete",
+ qs->rx_csum_complete, &n);
+ print_one(qs->_present.rx_csum_unnecessary, "rx-csum-unnecessary",
+ qs->rx_csum_unnecessary, &n);
+ print_one(qs->_present.rx_csum_none, "rx-csum-none",
+ qs->rx_csum_none, &n);
+ print_one(qs->_present.rx_csum_bad, "rx-csum-bad",
+ qs->rx_csum_bad, &n);
+
+ /* RX GRO counters */
+ print_one(qs->_present.rx_hw_gro_packets, "rx-hw-gro-packets",
+ qs->rx_hw_gro_packets, &n);
+ print_one(qs->_present.rx_hw_gro_bytes, "rx-hw-gro-bytes",
+ qs->rx_hw_gro_bytes, &n);
+ print_one(qs->_present.rx_hw_gro_wire_packets, "rx-hw-gro-wire-packets",
+ qs->rx_hw_gro_wire_packets, &n);
+ print_one(qs->_present.rx_hw_gro_wire_bytes, "rx-hw-gro-wire-bytes",
+ qs->rx_hw_gro_wire_bytes, &n);
+
+ /* TX error/drop counters */
+ print_one(qs->_present.tx_hw_drops, "tx-hw-drops",
+ qs->tx_hw_drops, &n);
+ print_one(qs->_present.tx_hw_drop_errors, "tx-hw-drop-errors",
+ qs->tx_hw_drop_errors, &n);
+ print_one(qs->_present.tx_hw_drop_ratelimits, "tx-hw-drop-ratelimits",
+ qs->tx_hw_drop_ratelimits, &n);
+
+ /* TX checksum counters */
+ print_one(qs->_present.tx_csum_none, "tx-csum-none",
+ qs->tx_csum_none, &n);
+ print_one(qs->_present.tx_needs_csum, "tx-needs-csum",
+ qs->tx_needs_csum, &n);
+
+ /* TX GSO counters */
+ print_one(qs->_present.tx_hw_gso_packets, "tx-hw-gso-packets",
+ qs->tx_hw_gso_packets, &n);
+ print_one(qs->_present.tx_hw_gso_bytes, "tx-hw-gso-bytes",
+ qs->tx_hw_gso_bytes, &n);
+ print_one(qs->_present.tx_hw_gso_wire_packets, "tx-hw-gso-wire-packets",
+ qs->tx_hw_gso_wire_packets, &n);
+ print_one(qs->_present.tx_hw_gso_wire_bytes, "tx-hw-gso-wire-bytes",
+ qs->tx_hw_gso_wire_bytes, &n);
+
+ /* TX queue control */
+ print_one(qs->_present.tx_stop, "tx-stop", qs->tx_stop, &n);
+ print_one(qs->_present.tx_wake, "tx-wake", qs->tx_wake, &n);
+
+ if (n)
+ printf("\n");
+ }
+}
+
+static int do_show(int argc, char **argv)
+{
+ struct netdev_qstats_get_list *qstats;
+ struct netdev_qstats_get_req *req;
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int ret = 0;
+
+ /* Parse options */
+ while (argc > 0) {
+ if (is_prefix(*argv, "scope") || is_prefix(*argv, "group-by")) {
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ return -1;
+
+ if (is_prefix(*argv, "queue")) {
+ scope = NETDEV_QSTATS_SCOPE_QUEUE;
+ } else if (is_prefix(*argv, "device")) {
+ scope = 0;
+ } else {
+ p_err("invalid scope value '%s'", *argv);
+ return -1;
+ }
+ NEXT_ARG();
+ } else {
+ p_err("unknown option '%s'", *argv);
+ return -1;
+ }
+ }
+
+ ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+ if (!ys) {
+ p_err("YNL: %s", yerr.msg);
+ return -1;
+ }
+
+ req = netdev_qstats_get_req_alloc();
+ if (!req) {
+ p_err("failed to allocate qstats request");
+ ret = -1;
+ goto exit_close;
+ }
+
+ if (scope)
+ netdev_qstats_get_req_set_scope(req, scope);
+
+ qstats = netdev_qstats_get_dump(ys, req);
+ netdev_qstats_get_req_free(req);
+ if (!qstats) {
+ p_err("failed to get queue stats: %s", ys->err.msg);
+ ret = -1;
+ goto exit_close;
+ }
+
+ /* Print the stats as returned by the kernel */
+ if (json_output)
+ print_json_qstats(qstats);
+ else
+ print_plain_qstats(qstats);
+
+ netdev_qstats_get_list_free(qstats);
+exit_close:
+ ynl_sock_destroy(ys);
+ return ret;
+}
+
+static void compute_stats(__u64 *values, unsigned int count,
+ double *mean, double *stddev, __u64 *min, __u64 *max)
+{
+ double sum = 0.0, variance = 0.0;
+ unsigned int i;
+
+ *min = ~0ULL;
+ *max = 0;
+
+ if (count == 0) {
+ *mean = 0;
+ *stddev = 0;
+ *min = 0;
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ sum += values[i];
+ if (values[i] < *min)
+ *min = values[i];
+ if (values[i] > *max)
+ *max = values[i];
+ }
+
+ *mean = sum / count;
+
+ if (count > 1) {
+ for (i = 0; i < count; i++) {
+ double diff = values[i] - *mean;
+
+ variance += diff * diff;
+ }
+ *stddev = sqrt(variance / (count - 1));
+ } else {
+ *stddev = 0;
+ }
+}
+
+static void print_balance_stats(const char *name, enum netdev_queue_type type,
+ __u64 *values, unsigned int count)
+{
+ double mean, stddev, cv, ns;
+ __u64 min, max;
+
+ if ((name[0] == 'r' && type != NETDEV_QUEUE_TYPE_RX) ||
+ (name[0] == 't' && type != NETDEV_QUEUE_TYPE_TX))
+ return;
+
+ compute_stats(values, count, &mean, &stddev, &min, &max);
+
+ cv = mean > 0 ? (stddev / mean) * 100.0 : 0.0;
+ ns = min + max > 0 ? (double)2 * (max - min) / (max + min) * 100 : 0.0;
+
+ printf(" %-12s: cv=%.1f%% ns=%.1f%% stddev=%.0f\n",
+ name, cv, ns, stddev);
+ printf(" %-12s min=%llu max=%llu mean=%.0f\n",
+ "", min, max, mean);
+}
+
+static void
+print_balance_stats_json(const char *name, enum netdev_queue_type type,
+ __u64 *values, unsigned int count)
+{
+ double mean, stddev, cv, ns;
+ __u64 min, max;
+
+ if ((name[0] == 'r' && type != NETDEV_QUEUE_TYPE_RX) ||
+ (name[0] == 't' && type != NETDEV_QUEUE_TYPE_TX))
+ return;
+
+ compute_stats(values, count, &mean, &stddev, &min, &max);
+
+ cv = mean > 0 ? (stddev / mean) * 100.0 : 0.0;
+ ns = min + max > 0 ? (double)2 * (max - min) / (max + min) * 100 : 0.0;
+
+ jsonw_name(json_wtr, name);
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "queue-count", count);
+ jsonw_uint_field(json_wtr, "min", min);
+ jsonw_uint_field(json_wtr, "max", max);
+ jsonw_float_field(json_wtr, "mean", mean);
+ jsonw_float_field(json_wtr, "stddev", stddev);
+ jsonw_float_field(json_wtr, "coefficient-of-variation", cv);
+ jsonw_float_field(json_wtr, "normalized-spread", ns);
+ jsonw_end_object(json_wtr);
+}
+
+static int cmp_ifindex_type(const void *a, const void *b)
+{
+ const struct netdev_qstats_get_rsp *qa = a;
+ const struct netdev_qstats_get_rsp *qb = b;
+
+ if (qa->ifindex != qb->ifindex)
+ return qa->ifindex - qb->ifindex;
+ if (qa->queue_type != qb->queue_type)
+ return qa->queue_type - qb->queue_type;
+ return qa->queue_id - qb->queue_id;
+}
+
+static int do_balance(int argc, char **argv __attribute__((unused)))
+{
+ struct netdev_qstats_get_list *qstats;
+ struct netdev_qstats_get_req *req;
+ struct netdev_qstats_get_rsp **sorted;
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ unsigned int count = 0;
+ unsigned int i, j;
+ int ret = 0;
+
+ if (argc > 0) {
+ p_err("balance command takes no arguments");
+ return -1;
+ }
+
+ ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+ if (!ys) {
+ p_err("YNL: %s", yerr.msg);
+ return -1;
+ }
+
+ req = netdev_qstats_get_req_alloc();
+ if (!req) {
+ p_err("failed to allocate qstats request");
+ ret = -1;
+ goto exit_close;
+ }
+
+ /* Always use queue scope for balance analysis */
+ netdev_qstats_get_req_set_scope(req, NETDEV_QSTATS_SCOPE_QUEUE);
+
+ qstats = netdev_qstats_get_dump(ys, req);
+ netdev_qstats_get_req_free(req);
+ if (!qstats) {
+ p_err("failed to get queue stats: %s", ys->err.msg);
+ ret = -1;
+ goto exit_close;
+ }
+
+ /* Count and sort queues */
+ ynl_dump_foreach(qstats, qs)
+ count++;
+
+ if (count == 0) {
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ else
+ printf("No queue statistics available\n");
+ goto exit_free_qstats;
+ }
+
+ sorted = calloc(count, sizeof(*sorted));
+ if (!sorted) {
+ p_err("failed to allocate sorted array");
+ ret = -1;
+ goto exit_free_qstats;
+ }
+
+ i = 0;
+ ynl_dump_foreach(qstats, qs)
+ sorted[i++] = qs;
+
+ qsort(sorted, count, sizeof(*sorted), cmp_ifindex_type);
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+
+ /* Process each device/queue-type combination */
+ i = 0;
+ while (i < count) {
+ __u64 *rx_packets, *rx_bytes, *tx_packets, *tx_bytes;
+ enum netdev_queue_type type = sorted[i]->queue_type;
+ unsigned int ifindex = sorted[i]->ifindex;
+ unsigned int queue_count = 0;
+ char ifname[IF_NAMESIZE];
+ const char *name;
+
+ /* Count queues for this device/type */
+ for (j = i; j < count && sorted[j]->ifindex == ifindex &&
+ sorted[j]->queue_type == type; j++)
+ queue_count++;
+
+ /* Skip if no packets/bytes (inactive queues) */
+ if (!sorted[i]->_present.rx_packets &&
+ !sorted[i]->_present.rx_bytes &&
+ !sorted[i]->_present.tx_packets &&
+ !sorted[i]->_present.tx_bytes)
+ goto next_ifc;
+
+ /* Allocate arrays for statistics */
+ rx_packets = calloc(queue_count, sizeof(*rx_packets));
+ rx_bytes = calloc(queue_count, sizeof(*rx_bytes));
+ tx_packets = calloc(queue_count, sizeof(*tx_packets));
+ tx_bytes = calloc(queue_count, sizeof(*tx_bytes));
+
+ if (!rx_packets || !rx_bytes || !tx_packets || !tx_bytes) {
+ p_err("failed to allocate statistics arrays");
+ free(rx_packets);
+ free(rx_bytes);
+ free(tx_packets);
+ free(tx_bytes);
+ ret = -1;
+ goto exit_free_sorted;
+ }
+
+ /* Collect statistics */
+ for (j = 0; j < queue_count; j++) {
+ rx_packets[j] = sorted[i + j]->_present.rx_packets ?
+ sorted[i + j]->rx_packets : 0;
+ rx_bytes[j] = sorted[i + j]->_present.rx_bytes ?
+ sorted[i + j]->rx_bytes : 0;
+ tx_packets[j] = sorted[i + j]->_present.tx_packets ?
+ sorted[i + j]->tx_packets : 0;
+ tx_bytes[j] = sorted[i + j]->_present.tx_bytes ?
+ sorted[i + j]->tx_bytes : 0;
+ }
+
+ name = if_indextoname(ifindex, ifname);
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ if (name)
+ jsonw_string_field(json_wtr, "ifname", name);
+ jsonw_uint_field(json_wtr, "ifindex", ifindex);
+ jsonw_string_field(json_wtr, "queue-type",
+ netdev_queue_type_str(type));
+
+ print_balance_stats_json("rx-packets", type,
+ rx_packets, queue_count);
+ print_balance_stats_json("rx-bytes", type,
+ rx_bytes, queue_count);
+ print_balance_stats_json("tx-packets", type,
+ tx_packets, queue_count);
+ print_balance_stats_json("tx-bytes", type,
+ tx_bytes, queue_count);
+
+ jsonw_end_object(json_wtr);
+ } else {
+ if (name)
+ printf("%s", name);
+ else
+ printf("ifindex:%u", ifindex);
+ printf(" %s %d queues:\n",
+ netdev_queue_type_str(type), queue_count);
+
+ print_balance_stats("rx-packets", type,
+ rx_packets, queue_count);
+ print_balance_stats("rx-bytes", type,
+ rx_bytes, queue_count);
+ print_balance_stats("tx-packets", type,
+ tx_packets, queue_count);
+ print_balance_stats("tx-bytes", type,
+ tx_bytes, queue_count);
+ printf("\n");
+ }
+
+ free(rx_packets);
+ free(rx_bytes);
+ free(tx_packets);
+ free(tx_bytes);
+
+next_ifc:
+ i += queue_count;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+exit_free_sorted:
+ free(sorted);
+exit_free_qstats:
+ netdev_qstats_get_list_free(qstats);
+exit_close:
+ ynl_sock_destroy(ys);
+ return ret;
+}
+
+static int do_help(int argc __attribute__((unused)),
+ char **argv __attribute__((unused)))
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s qstats { COMMAND | help }\n"
+ " %s qstats [ show ] [ OPTIONS ]\n"
+ " %s qstats balance\n"
+ "\n"
+ " OPTIONS := { scope queue | group-by { device | queue } }\n"
+ "\n"
+ " show - Display queue statistics (default)\n"
+ " Statistics are aggregated for the entire device.\n"
+ " show scope queue - Display per-queue statistics\n"
+ " show group-by device - Display device-aggregated statistics (default)\n"
+ " show group-by queue - Display per-queue statistics\n"
+ " balance - Analyze traffic distribution balance.\n"
+ "",
+ bin_name, bin_name, bin_name);
+
+ return 0;
+}
+
+static const struct cmd qstats_cmds[] = {
+ { "show", do_show },
+ { "balance", do_balance },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_qstats(int argc, char **argv)
+{
+ return cmd_select(qstats_cmds, argc, argv, do_help);
+}
diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore
index 4faa4dd72f35..73d883128511 100644
--- a/tools/objtool/.gitignore
+++ b/tools/objtool/.gitignore
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
+arch/x86/lib/cpu-feature-names.c
arch/x86/lib/inat-tables.c
/objtool
+feature
+FEATURE-DUMP.objtool
fixdep
libsubcmd/
diff --git a/tools/objtool/Build b/tools/objtool/Build
index a3cdf8af6635..600da051af12 100644
--- a/tools/objtool/Build
+++ b/tools/objtool/Build
@@ -8,13 +8,17 @@ objtool-y += builtin-check.o
objtool-y += elf.o
objtool-y += objtool.o
-objtool-$(BUILD_ORC) += orc_gen.o
-objtool-$(BUILD_ORC) += orc_dump.o
+objtool-$(BUILD_DISAS) += disas.o
+objtool-$(BUILD_DISAS) += trace.o
+
+objtool-$(BUILD_ORC) += orc_gen.o orc_dump.o
+objtool-$(BUILD_KLP) += builtin-klp.o klp-diff.o klp-post-link.o
objtool-y += libstring.o
objtool-y += libctype.o
objtool-y += str_error_r.o
objtool-y += librbtree.o
+objtool-y += signal.o
$(OUTPUT)libstring.o: ../lib/string.c FORCE
$(call rule_mkdir)
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 8c20361dd100..ad6e1ec706ce 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -2,6 +2,28 @@
include ../scripts/Makefile.include
include ../scripts/Makefile.arch
+ifeq ($(SRCARCH),x86)
+ BUILD_ORC := y
+ ARCH_HAS_KLP := y
+endif
+
+ifeq ($(SRCARCH),loongarch)
+ BUILD_ORC := y
+endif
+
+ifeq ($(ARCH_HAS_KLP),y)
+ HAVE_XXHASH = $(shell printf "$(pound)include <xxhash.h>\nXXH3_state_t *state;int main() {}" | \
+ $(HOSTCC) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n)
+ ifeq ($(HAVE_XXHASH),y)
+ BUILD_KLP := y
+ LIBXXHASH_CFLAGS := $(shell $(HOSTPKG_CONFIG) libxxhash --cflags 2>/dev/null) \
+ -DBUILD_KLP
+ LIBXXHASH_LIBS := $(shell $(HOSTPKG_CONFIG) libxxhash --libs 2>/dev/null || echo -lxxhash)
+ endif
+endif
+
+export BUILD_ORC BUILD_KLP
+
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
@@ -23,6 +45,11 @@ LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lel
all: $(OBJTOOL)
+WARNINGS := -Werror -Wall -Wextra -Wmissing-prototypes \
+ -Wmissing-declarations -Wwrite-strings \
+ -Wno-implicit-fallthrough -Wno-sign-compare \
+ -Wno-unused-parameter
+
INCLUDES := -I$(srctree)/tools/include \
-I$(srctree)/tools/include/uapi \
-I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
@@ -30,11 +57,11 @@ INCLUDES := -I$(srctree)/tools/include \
-I$(srctree)/tools/objtool/include \
-I$(srctree)/tools/objtool/arch/$(SRCARCH)/include \
-I$(LIBSUBCMD_OUTPUT)/include
-# Note, EXTRA_WARNINGS here was determined for CC and not HOSTCC, it
-# is passed here to match a legacy behavior.
-WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -Wno-nested-externs
-OBJTOOL_CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
-OBJTOOL_LDFLAGS := $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+
+OBJTOOL_CFLAGS := -std=gnu11 -fomit-frame-pointer -O2 -g $(WARNINGS) \
+ $(INCLUDES) $(LIBELF_FLAGS) $(LIBXXHASH_CFLAGS) $(HOSTCFLAGS)
+
+OBJTOOL_LDFLAGS := $(LIBSUBCMD) $(LIBELF_LIBS) $(LIBXXHASH_LIBS) $(HOSTLDFLAGS)
# Allow old libelf to be used:
elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(HOSTCC) $(OBJTOOL_CFLAGS) -x c -E - 2>/dev/null | grep elf_getshdr)
@@ -43,20 +70,32 @@ OBJTOOL_CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED)
# Always want host compilation.
HOST_OVERRIDES := CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)"
-AWK = awk
-MKDIR = mkdir
+#
+# To support disassembly, objtool needs libopcodes which is provided
+# with libbdf (binutils-dev or binutils-devel package).
+#
+FEATURE_USER = .objtool
+FEATURE_TESTS = libbfd disassembler-init-styled
+FEATURE_DISPLAY =
+include $(srctree)/tools/build/Makefile.feature
+
+ifeq ($(feature-disassembler-init-styled), 1)
+ OBJTOOL_CFLAGS += -DDISASM_INIT_STYLED
+endif
-BUILD_ORC := n
+BUILD_DISAS := n
-ifeq ($(SRCARCH),x86)
- BUILD_ORC := y
+ifeq ($(feature-libbfd),1)
+ BUILD_DISAS := y
+ OBJTOOL_CFLAGS += -DDISAS -DPACKAGE="objtool"
+ OBJTOOL_LDFLAGS += -lopcodes
endif
-ifeq ($(SRCARCH),loongarch)
- BUILD_ORC := y
-endif
+export BUILD_DISAS
+
+AWK = awk
+MKDIR = mkdir
-export BUILD_ORC
export srctree OUTPUT CFLAGS SRCARCH AWK
include $(srctree)/tools/build/Makefile.include
@@ -86,7 +125,10 @@ $(LIBSUBCMD)-clean:
clean: $(LIBSUBCMD)-clean
$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+ $(Q)$(RM) $(OUTPUT)arch/x86/lib/cpu-feature-names.c $(OUTPUT)fixdep
$(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
+ $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.objtool
+ $(Q)$(RM) -r -- $(OUTPUT)feature
FORCE:
diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c
index b6fdc68053cc..6cd288150f49 100644
--- a/tools/objtool/arch/loongarch/decode.c
+++ b/tools/objtool/arch/loongarch/decode.c
@@ -1,13 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <string.h>
#include <objtool/check.h>
+#include <objtool/disas.h>
#include <objtool/warn.h>
#include <asm/inst.h>
#include <asm/orc_types.h>
#include <linux/objtool_types.h>
#include <arch/elf.h>
-int arch_ftrace_match(char *name)
+const char *arch_reg_name[CFI_NUM_REGS] = {
+ "zero", "ra", "tp", "sp",
+ "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7",
+ "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7",
+ "t8", "u0", "fp", "s0",
+ "s1", "s2", "s3", "s4",
+ "s5", "s6", "s7", "s8"
+};
+
+int arch_ftrace_match(const char *name)
{
return !strcmp(name, "_mcount");
}
@@ -17,9 +29,9 @@ unsigned long arch_jump_destination(struct instruction *insn)
return insn->offset + (insn->immediate << 2);
}
-unsigned long arch_dest_reloc_offset(int addend)
+s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc)
{
- return addend;
+ return reloc_addend(reloc);
}
bool arch_pc_relative_reloc(struct reloc *reloc)
@@ -278,6 +290,25 @@ static bool decode_insn_reg2i16_fomat(union loongarch_instruction inst,
return true;
}
+static bool decode_insn_reg3_fomat(union loongarch_instruction inst,
+ struct instruction *insn)
+{
+ switch (inst.reg3_format.opcode) {
+ case amswapw_op:
+ if (inst.reg3_format.rd == LOONGARCH_GPR_ZERO &&
+ inst.reg3_format.rk == LOONGARCH_GPR_RA &&
+ inst.reg3_format.rj == LOONGARCH_GPR_ZERO) {
+ /* amswap.w $zero, $ra, $zero */
+ insn->type = INSN_BUG;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
unsigned long offset, unsigned int maxlen,
struct instruction *insn)
@@ -309,11 +340,19 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
return 0;
if (decode_insn_reg2i16_fomat(inst, insn))
return 0;
+ if (decode_insn_reg3_fomat(inst, insn))
+ return 0;
- if (inst.word == 0)
+ if (inst.word == 0) {
+ /* andi $zero, $zero, 0x0 */
insn->type = INSN_NOP;
- else if (inst.reg0i15_format.opcode == break_op) {
- /* break */
+ } else if (inst.reg0i15_format.opcode == break_op &&
+ inst.reg0i15_format.immediate == 0x0) {
+ /* break 0x0 */
+ insn->type = INSN_TRAP;
+ } else if (inst.reg0i15_format.opcode == break_op &&
+ inst.reg0i15_format.immediate == 0x1) {
+ /* break 0x1 */
insn->type = INSN_BUG;
} else if (inst.reg2_format.opcode == ertn_op) {
/* ertn */
@@ -387,3 +426,14 @@ unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *tabl
return reloc->sym->offset + reloc_addend(reloc);
}
}
+
+#ifdef DISAS
+
+int arch_disas_info_init(struct disassemble_info *dinfo)
+{
+ return disas_info_init(dinfo, bfd_arch_loongarch,
+ bfd_mach_loongarch32, bfd_mach_loongarch64,
+ NULL);
+}
+
+#endif /* DISAS */
diff --git a/tools/objtool/arch/loongarch/orc.c b/tools/objtool/arch/loongarch/orc.c
index b58c5ff443c9..ffd3a3c858ae 100644
--- a/tools/objtool/arch/loongarch/orc.c
+++ b/tools/objtool/arch/loongarch/orc.c
@@ -5,7 +5,6 @@
#include <objtool/check.h>
#include <objtool/orc.h>
#include <objtool/warn.h>
-#include <objtool/endianness.h>
int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn)
{
diff --git a/tools/objtool/arch/loongarch/special.c b/tools/objtool/arch/loongarch/special.c
index e39f86d97002..aba774109437 100644
--- a/tools/objtool/arch/loongarch/special.c
+++ b/tools/objtool/arch/loongarch/special.c
@@ -27,6 +27,7 @@ static void get_rodata_table_size_by_table_annotate(struct objtool_file *file,
struct table_info *next_table;
unsigned long tmp_insn_offset;
unsigned long tmp_rodata_offset;
+ bool is_valid_list = false;
rsec = find_section_by_name(file->elf, ".rela.discard.tablejump_annotate");
if (!rsec)
@@ -35,6 +36,12 @@ static void get_rodata_table_size_by_table_annotate(struct objtool_file *file,
INIT_LIST_HEAD(&table_list);
for_each_reloc(rsec, reloc) {
+ if (reloc->sym->sec->rodata)
+ continue;
+
+ if (strcmp(insn->sec->name, reloc->sym->sec->name))
+ continue;
+
orig_table = malloc(sizeof(struct table_info));
if (!orig_table) {
WARN("malloc failed");
@@ -49,6 +56,22 @@ static void get_rodata_table_size_by_table_annotate(struct objtool_file *file,
if (reloc_idx(reloc) + 1 == sec_num_entries(rsec))
break;
+
+ if (strcmp(insn->sec->name, (reloc + 1)->sym->sec->name)) {
+ list_for_each_entry(orig_table, &table_list, jump_info) {
+ if (orig_table->insn_offset == insn->offset) {
+ is_valid_list = true;
+ break;
+ }
+ }
+
+ if (!is_valid_list) {
+ list_del_init(&table_list);
+ continue;
+ }
+
+ break;
+ }
}
list_for_each_entry(orig_table, &table_list, jump_info) {
@@ -171,3 +194,8 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
return rodata_reloc;
}
+
+const char *arch_cpu_feature_name(int feature_number)
+{
+ return NULL;
+}
diff --git a/tools/objtool/arch/powerpc/decode.c b/tools/objtool/arch/powerpc/decode.c
index c851c51d4bd3..e534ac1123b3 100644
--- a/tools/objtool/arch/powerpc/decode.c
+++ b/tools/objtool/arch/powerpc/decode.c
@@ -3,20 +3,32 @@
#include <stdio.h>
#include <stdlib.h>
#include <objtool/check.h>
+#include <objtool/disas.h>
#include <objtool/elf.h>
#include <objtool/arch.h>
#include <objtool/warn.h>
#include <objtool/builtin.h>
-#include <objtool/endianness.h>
-int arch_ftrace_match(char *name)
+const char *arch_reg_name[CFI_NUM_REGS] = {
+ "r0", "sp", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19",
+ "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27",
+ "r28", "r29", "r30", "r31",
+ "ra"
+};
+
+int arch_ftrace_match(const char *name)
{
return !strcmp(name, "_mcount");
}
-unsigned long arch_dest_reloc_offset(int addend)
+s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc)
{
- return addend;
+ return reloc_addend(reloc);
}
bool arch_callee_saved_reg(unsigned char reg)
@@ -128,3 +140,14 @@ unsigned int arch_reloc_size(struct reloc *reloc)
return 8;
}
}
+
+#ifdef DISAS
+
+int arch_disas_info_init(struct disassemble_info *dinfo)
+{
+ return disas_info_init(dinfo, bfd_arch_powerpc,
+ bfd_mach_ppc, bfd_mach_ppc64,
+ NULL);
+}
+
+#endif /* DISAS */
diff --git a/tools/objtool/arch/powerpc/special.c b/tools/objtool/arch/powerpc/special.c
index 51610689abf7..8f9bf61ca089 100644
--- a/tools/objtool/arch/powerpc/special.c
+++ b/tools/objtool/arch/powerpc/special.c
@@ -18,3 +18,8 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
{
exit(-1);
}
+
+const char *arch_cpu_feature_name(int feature_number)
+{
+ return NULL;
+}
diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
index 3dedb2fd8f3a..febee0b8ee0b 100644
--- a/tools/objtool/arch/x86/Build
+++ b/tools/objtool/arch/x86/Build
@@ -1,5 +1,5 @@
-objtool-y += special.o
objtool-y += decode.o
+objtool-y += special.o
objtool-y += orc.o
inat_tables_script = ../arch/x86/tools/gen-insn-attr-x86.awk
@@ -12,3 +12,14 @@ $(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
+
+cpu_features = ../arch/x86/include/asm/cpufeatures.h
+cpu_features_script = ../arch/x86/tools/gen-cpu-feature-names-x86.awk
+
+$(OUTPUT)arch/x86/lib/cpu-feature-names.c: $(cpu_features_script) $(cpu_features)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)$(AWK) -f $(cpu_features_script) $(cpu_features) > $@
+
+$(OUTPUT)arch/x86/special.o: $(OUTPUT)arch/x86/lib/cpu-feature-names.c
+
+CFLAGS_special.o += -I$(OUTPUT)arch/x86/lib
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 98c4713c1b09..f4af82508228 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -16,14 +16,22 @@
#include <asm/orc_types.h>
#include <objtool/check.h>
+#include <objtool/disas.h>
#include <objtool/elf.h>
#include <objtool/arch.h>
#include <objtool/warn.h>
-#include <objtool/endianness.h>
#include <objtool/builtin.h>
#include <arch/elf.h>
-int arch_ftrace_match(char *name)
+const char *arch_reg_name[CFI_NUM_REGS] = {
+ "rax", "rcx", "rdx", "rbx",
+ "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+ "ra"
+};
+
+int arch_ftrace_match(const char *name)
{
return !strcmp(name, "__fentry__");
}
@@ -68,9 +76,65 @@ bool arch_callee_saved_reg(unsigned char reg)
}
}
-unsigned long arch_dest_reloc_offset(int addend)
+/* Undo the effects of __pa_symbol() if necessary */
+static unsigned long phys_to_virt(unsigned long pa)
+{
+ s64 va = pa;
+
+ if (va > 0)
+ va &= ~(0x80000000);
+
+ return va;
+}
+
+s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc)
+{
+ s64 addend = reloc_addend(reloc);
+
+ if (arch_pc_relative_reloc(reloc))
+ addend += insn->offset + insn->len - reloc_offset(reloc);
+
+ return phys_to_virt(addend);
+}
+
+static void scan_for_insn(struct section *sec, unsigned long offset,
+ unsigned long *insn_off, unsigned int *insn_len)
+{
+ unsigned long o = 0;
+ struct insn insn;
+
+ while (1) {
+
+ insn_decode(&insn, sec->data->d_buf + o, sec_size(sec) - o,
+ INSN_MODE_64);
+
+ if (o + insn.length > offset) {
+ *insn_off = o;
+ *insn_len = insn.length;
+ return;
+ }
+
+ o += insn.length;
+ }
+}
+
+u64 arch_adjusted_addend(struct reloc *reloc)
{
- return addend + 4;
+ unsigned int type = reloc_type(reloc);
+ s64 addend = reloc_addend(reloc);
+ unsigned long insn_off;
+ unsigned int insn_len;
+
+ if (type == R_X86_64_PLT32)
+ return addend + 4;
+
+ if (type != R_X86_64_PC32 || !is_text_sec(reloc->sec->base))
+ return addend;
+
+ scan_for_insn(reloc->sec->base, reloc_offset(reloc),
+ &insn_off, &insn_len);
+
+ return addend + insn_off + insn_len - reloc_offset(reloc);
}
unsigned long arch_jump_destination(struct instruction *insn)
@@ -189,15 +253,6 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
op2 = ins.opcode.bytes[1];
op3 = ins.opcode.bytes[2];
- /*
- * XXX hack, decoder is buggered and thinks 0xea is 7 bytes long.
- */
- if (op1 == 0xea) {
- insn->len = 1;
- insn->type = INSN_BUG;
- return 0;
- }
-
if (ins.rex_prefix.nbytes) {
rex = ins.rex_prefix.bytes[0];
rex_w = X86_REX_W(rex) >> 3;
@@ -503,6 +558,12 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
break;
case 0x90:
+ if (rex_b) /* XCHG %r8, %rax */
+ break;
+
+ if (prefix == 0xf3) /* REP NOP := PAUSE */
+ break;
+
insn->type = INSN_NOP;
break;
@@ -556,13 +617,14 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
} else if (op2 == 0x0b || op2 == 0xb9) {
- /* ud2 */
+ /* ud2, ud1 */
insn->type = INSN_BUG;
- } else if (op2 == 0x0d || op2 == 0x1f) {
+ } else if (op2 == 0x1f) {
- /* nopl/nopw */
- insn->type = INSN_NOP;
+ /* 0f 1f /0 := NOPL */
+ if (modrm_reg == 0)
+ insn->type = INSN_NOP;
} else if (op2 == 0x1e) {
@@ -692,6 +754,10 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
insn->type = INSN_SYSRET;
break;
+ case 0xd6: /* udb */
+ insn->type = INSN_BUG;
+ break;
+
case 0xe0: /* loopne */
case 0xe1: /* loope */
case 0xe2: /* loop */
@@ -880,3 +946,26 @@ unsigned int arch_reloc_size(struct reloc *reloc)
return 8;
}
}
+
+bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
+{
+ switch (reloc_type(reloc)) {
+ case R_X86_64_32:
+ case R_X86_64_32S:
+ case R_X86_64_64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#ifdef DISAS
+
+int arch_disas_info_init(struct disassemble_info *dinfo)
+{
+ return disas_info_init(dinfo, bfd_arch_i386,
+ bfd_mach_i386_i386, bfd_mach_x86_64,
+ "att");
+}
+
+#endif /* DISAS */
diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c
index 7176b9ec5b05..735e150ca6b7 100644
--- a/tools/objtool/arch/x86/orc.c
+++ b/tools/objtool/arch/x86/orc.c
@@ -5,7 +5,6 @@
#include <objtool/check.h>
#include <objtool/orc.h>
#include <objtool/warn.h>
-#include <objtool/endianness.h>
int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn)
{
diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
index 06ca4a2659a4..e817a3fff449 100644
--- a/tools/objtool/arch/x86/special.c
+++ b/tools/objtool/arch/x86/special.c
@@ -4,6 +4,10 @@
#include <objtool/special.h>
#include <objtool/builtin.h>
#include <objtool/warn.h>
+#include <asm/cpufeatures.h>
+
+/* cpu feature name array generated from cpufeatures.h */
+#include "cpu-feature-names.c"
void arch_handle_alternative(struct special_alt *alt)
{
@@ -89,7 +93,7 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
/* look for a relocation which references .rodata */
text_reloc = find_reloc_by_dest_range(file->elf, insn->sec,
insn->offset, insn->len);
- if (!text_reloc || text_reloc->sym->type != STT_SECTION ||
+ if (!text_reloc || !is_sec_sym(text_reloc->sym) ||
!text_reloc->sym->sec->rodata)
return NULL;
@@ -134,3 +138,9 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
*table_size = 0;
return rodata_reloc;
}
+
+const char *arch_cpu_feature_name(int feature_number)
+{
+ return (feature_number < ARRAY_SIZE(cpu_feature_names)) ?
+ cpu_feature_names[feature_number] : NULL;
+}
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 80239843e9f0..b780df513715 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -73,34 +73,41 @@ static int parse_hacks(const struct option *opt, const char *str, int unset)
static const struct option check_options[] = {
OPT_GROUP("Actions:"),
+ OPT_BOOLEAN(0, "checksum", &opts.checksum, "generate per-function checksums"),
+ OPT_BOOLEAN(0, "cfi", &opts.cfi, "annotate kernel control flow integrity (kCFI) function preambles"),
+ OPT_STRING_OPTARG('d', "disas", &opts.disas, "function-pattern", "disassemble functions", "*"),
OPT_CALLBACK_OPTARG('h', "hacks", NULL, NULL, "jump_label,noinstr,skylake", "patch toolchain bugs/limitations", parse_hacks),
- OPT_BOOLEAN('i', "ibt", &opts.ibt, "validate and annotate IBT"),
- OPT_BOOLEAN('m', "mcount", &opts.mcount, "annotate mcount/fentry calls for ftrace"),
- OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"),
- OPT_BOOLEAN(0, "orc", &opts.orc, "generate ORC metadata"),
- OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"),
- OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"),
- OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"),
- OPT_INTEGER(0, "prefix", &opts.prefix, "generate prefix symbols"),
- OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"),
- OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"),
- OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"),
- OPT_BOOLEAN('u', "uaccess", &opts.uaccess, "validate uaccess rules for SMAP"),
- OPT_BOOLEAN(0 , "cfi", &opts.cfi, "annotate kernel control flow integrity (kCFI) function preambles"),
- OPT_CALLBACK_OPTARG(0, "dump", NULL, NULL, "orc", "dump metadata", parse_dump),
+ OPT_BOOLEAN('i', "ibt", &opts.ibt, "validate and annotate IBT"),
+ OPT_BOOLEAN('m', "mcount", &opts.mcount, "annotate mcount/fentry calls for ftrace"),
+ OPT_BOOLEAN(0, "noabs", &opts.noabs, "reject absolute references in allocatable sections"),
+ OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"),
+ OPT_BOOLEAN(0, "orc", &opts.orc, "generate ORC metadata"),
+ OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"),
+ OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"),
+ OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"),
+ OPT_INTEGER(0, "prefix", &opts.prefix, "generate prefix symbols"),
+ OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"),
+ OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"),
+ OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"),
+ OPT_BOOLEAN('u', "uaccess", &opts.uaccess, "validate uaccess rules for SMAP"),
+ OPT_CALLBACK_OPTARG(0, "dump", NULL, NULL, "orc", "dump metadata", parse_dump),
OPT_GROUP("Options:"),
- OPT_BOOLEAN(0, "backtrace", &opts.backtrace, "unwind on error"),
- OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"),
- OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"),
- OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"),
- OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"),
- OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"),
- OPT_STRING('o', "output", &opts.output, "file", "output file name"),
- OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"),
- OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"),
- OPT_BOOLEAN('v', "verbose", &opts.verbose, "verbose warnings"),
- OPT_BOOLEAN(0, "Werror", &opts.werror, "return error on warnings"),
+ OPT_BOOLEAN(0, "backtrace", &opts.backtrace, "unwind on error"),
+ OPT_BOOLEAN(0, "backup", &opts.backup, "create backup (.orig) file on warning/error"),
+ OPT_STRING(0, "debug-checksum", &opts.debug_checksum, "funcs", "enable checksum debug output"),
+ OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"),
+ OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"),
+ OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"),
+ OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"),
+ OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"),
+ OPT_STRING('o', "output", &opts.output, "file", "output file name"),
+ OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"),
+ OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"),
+ OPT_STRING(0, "trace", &opts.trace, "func", "trace function validation"),
+ OPT_BOOLEAN('v', "verbose", &opts.verbose, "verbose warnings"),
+ OPT_BOOLEAN(0, "werror", &opts.werror, "return error on warnings"),
+ OPT_BOOLEAN(0, "wide", &opts.wide, "wide output"),
OPT_END(),
};
@@ -158,10 +165,25 @@ static bool opts_valid(void)
return false;
}
- if (opts.hack_jump_label ||
+#ifndef BUILD_KLP
+ if (opts.checksum) {
+ ERROR("--checksum not supported; install xxhash-devel/libxxhash-dev (version >= 0.8) and recompile");
+ return false;
+ }
+#endif
+
+ if (opts.debug_checksum && !opts.checksum) {
+ ERROR("--debug-checksum requires --checksum");
+ return false;
+ }
+
+ if (opts.checksum ||
+ opts.disas ||
+ opts.hack_jump_label ||
opts.hack_noinstr ||
opts.ibt ||
opts.mcount ||
+ opts.noabs ||
opts.noinstr ||
opts.orc ||
opts.retpoline ||
@@ -241,15 +263,12 @@ static void save_argv(int argc, const char **argv)
ERROR_GLIBC("strdup(%s)", argv[i]);
exit(1);
}
- };
+ }
}
-void print_args(void)
+int make_backup(void)
{
- char *backup = NULL;
-
- if (opts.output || opts.dryrun)
- goto print;
+ char *backup;
/*
* Make a backup before kbuild deletes the file so the error
@@ -258,33 +277,32 @@ void print_args(void)
backup = malloc(strlen(objname) + strlen(ORIG_SUFFIX) + 1);
if (!backup) {
ERROR_GLIBC("malloc");
- goto print;
+ return 1;
}
strcpy(backup, objname);
strcat(backup, ORIG_SUFFIX);
- if (copy_file(objname, backup)) {
- backup = NULL;
- goto print;
- }
+ if (copy_file(objname, backup))
+ return 1;
-print:
/*
- * Print the cmdline args to make it easier to recreate. If '--output'
- * wasn't used, add it to the printed args with the backup as input.
+ * Print the cmdline args to make it easier to recreate.
*/
+
fprintf(stderr, "%s", orig_argv[0]);
for (int i = 1; i < orig_argc; i++) {
char *arg = orig_argv[i];
- if (backup && !strcmp(arg, objname))
+ /* Modify the printed args to use the backup */
+ if (!opts.output && !strcmp(arg, objname))
fprintf(stderr, " %s -o %s", backup, objname);
else
fprintf(stderr, " %s", arg);
}
fprintf(stderr, "\n");
+ return 0;
}
int objtool_run(int argc, const char **argv)
@@ -330,5 +348,5 @@ int objtool_run(int argc, const char **argv)
if (!opts.dryrun && file->elf->changed && elf_write(file->elf))
return 1;
- return 0;
+ return elf_close(file->elf);
}
diff --git a/tools/objtool/builtin-klp.c b/tools/objtool/builtin-klp.c
new file mode 100644
index 000000000000..56d5a5b92f72
--- /dev/null
+++ b/tools/objtool/builtin-klp.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <subcmd/parse-options.h>
+#include <string.h>
+#include <stdlib.h>
+#include <objtool/builtin.h>
+#include <objtool/objtool.h>
+#include <objtool/klp.h>
+
+struct subcmd {
+ const char *name;
+ const char *description;
+ int (*fn)(int, const char **);
+};
+
+static struct subcmd subcmds[] = {
+ { "diff", "Generate binary diff of two object files", cmd_klp_diff, },
+ { "post-link", "Finalize klp symbols/relocs after module linking", cmd_klp_post_link, },
+};
+
+static void cmd_klp_usage(void)
+{
+ fprintf(stderr, "usage: objtool klp <subcommand> [<options>]\n\n");
+ fprintf(stderr, "Subcommands:\n");
+
+ for (int i = 0; i < ARRAY_SIZE(subcmds); i++) {
+ struct subcmd *cmd = &subcmds[i];
+
+ fprintf(stderr, " %s\t%s\n", cmd->name, cmd->description);
+ }
+
+ exit(1);
+}
+
+int cmd_klp(int argc, const char **argv)
+{
+ argc--;
+ argv++;
+
+ if (!argc)
+ cmd_klp_usage();
+
+ if (argc) {
+ for (int i = 0; i < ARRAY_SIZE(subcmds); i++) {
+ struct subcmd *cmd = &subcmds[i];
+
+ if (!strcmp(cmd->name, argv[0]))
+ return cmd->fn(argc, argv);
+ }
+ }
+
+ cmd_klp_usage();
+ return 0;
+}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index b21b12ec88d9..3f7999317f4d 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -3,6 +3,8 @@
* Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
*/
+#define _GNU_SOURCE /* memmem() */
+#include <fnmatch.h>
#include <string.h>
#include <stdlib.h>
#include <inttypes.h>
@@ -11,10 +13,13 @@
#include <objtool/builtin.h>
#include <objtool/cfi.h>
#include <objtool/arch.h>
+#include <objtool/disas.h>
#include <objtool/check.h>
#include <objtool/special.h>
+#include <objtool/trace.h>
#include <objtool/warn.h>
-#include <objtool/endianness.h>
+#include <objtool/checksum.h>
+#include <objtool/util.h>
#include <linux/objtool_types.h>
#include <linux/hashtable.h>
@@ -22,11 +27,6 @@
#include <linux/static_call_types.h>
#include <linux/string.h>
-struct alternative {
- struct alternative *next;
- struct instruction *insn;
-};
-
static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
static struct cfi_init_state initial_func_cfi;
@@ -34,6 +34,10 @@ static struct cfi_state init_cfi;
static struct cfi_state func_cfi;
static struct cfi_state force_undefined_cfi;
+struct disas_context *objtool_disas_ctx;
+
+size_t sym_name_max_len;
+
struct instruction *find_insn(struct objtool_file *file,
struct section *sec, unsigned long offset)
{
@@ -106,7 +110,7 @@ static struct instruction *prev_insn_same_sym(struct objtool_file *file,
#define for_each_insn(file, insn) \
for (struct section *__sec, *__fake = (struct section *)1; \
__fake; __fake = NULL) \
- for_each_sec(file, __sec) \
+ for_each_sec(file->elf, __sec) \
sec_for_each_insn(file, __sec, insn)
#define func_for_each_insn(file, func, insn) \
@@ -131,15 +135,6 @@ static struct instruction *prev_insn_same_sym(struct objtool_file *file,
for (insn = next_insn_same_sec(file, insn); insn; \
insn = next_insn_same_sec(file, insn))
-static inline struct symbol *insn_call_dest(struct instruction *insn)
-{
- if (insn->type == INSN_JUMP_DYNAMIC ||
- insn->type == INSN_CALL_DYNAMIC)
- return NULL;
-
- return insn->_call_dest;
-}
-
static inline struct reloc *insn_jump_table(struct instruction *insn)
{
if (insn->type == INSN_JUMP_DYNAMIC ||
@@ -186,20 +181,6 @@ static bool is_sibling_call(struct instruction *insn)
}
/*
- * Checks if a string ends with another.
- */
-static bool str_ends_with(const char *s, const char *sub)
-{
- const int slen = strlen(s);
- const int sublen = strlen(sub);
-
- if (sublen > slen)
- return 0;
-
- return !memcmp(s + slen - sublen, sub, sublen);
-}
-
-/*
* Checks if a function is a Rust "noreturn" one.
*/
static bool is_rust_noreturn(const struct symbol *func)
@@ -217,6 +198,7 @@ static bool is_rust_noreturn(const struct symbol *func)
* these come from the Rust standard library).
*/
return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
+ str_ends_with(func->name, "_4core6option13expect_failed") ||
str_ends_with(func->name, "_4core6option13unwrap_failed") ||
str_ends_with(func->name, "_4core6result13unwrap_failed") ||
str_ends_with(func->name, "_4core9panicking5panic") ||
@@ -224,13 +206,15 @@ static bool is_rust_noreturn(const struct symbol *func)
str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
+ str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") ||
str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
str_ends_with(func->name, "_7___rustc17rust_begin_unwind") ||
strstr(func->name, "_4core9panicking13assert_failed") ||
strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
- (strstr(func->name, "_4core5slice5index24slice_") &&
+ (strstr(func->name, "_4core5slice5index") &&
+ strstr(func->name, "slice_") &&
str_ends_with(func->name, "_fail"));
}
@@ -259,7 +243,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
if (!func)
return false;
- if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) {
+ if (!is_local_sym(func)) {
if (is_rust_noreturn(func))
return true;
@@ -268,7 +252,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
return true;
}
- if (func->bind == STB_WEAK)
+ if (is_weak_sym(func))
return false;
if (!func->len)
@@ -428,14 +412,13 @@ static int decode_instructions(struct objtool_file *file)
struct symbol *func;
unsigned long offset;
struct instruction *insn;
- int ret;
- for_each_sec(file, sec) {
+ for_each_sec(file->elf, sec) {
struct instruction *insns = NULL;
u8 prev_len = 0;
u8 idx = 0;
- if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ if (!is_text_sec(sec))
continue;
if (strcmp(sec->name, ".altinstr_replacement") &&
@@ -458,9 +441,9 @@ static int decode_instructions(struct objtool_file *file)
if (!strcmp(sec->name, ".init.text") && !opts.module)
sec->init = true;
- for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
+ for (offset = 0; offset < sec_size(sec); offset += insn->len) {
if (!insns || idx == INSN_CHUNK_MAX) {
- insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
+ insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn));
if (!insns) {
ERROR_GLIBC("calloc");
return -1;
@@ -477,11 +460,8 @@ static int decode_instructions(struct objtool_file *file)
insn->offset = offset;
insn->prev_len = prev_len;
- ret = arch_decode_instruction(file, sec, offset,
- sec->sh.sh_size - offset,
- insn);
- if (ret)
- return ret;
+ if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn))
+ return -1;
prev_len = insn->len;
@@ -498,12 +478,12 @@ static int decode_instructions(struct objtool_file *file)
}
sec_for_each_sym(sec, func) {
- if (func->type != STT_NOTYPE && func->type != STT_FUNC)
+ if (!is_notype_sym(func) && !is_func_sym(func))
continue;
- if (func->offset == sec->sh.sh_size) {
+ if (func->offset == sec_size(sec)) {
/* Heuristic: likely an "end" symbol */
- if (func->type == STT_NOTYPE)
+ if (is_notype_sym(func))
continue;
ERROR("%s(): STT_FUNC at end of section", func->name);
return -1;
@@ -519,7 +499,7 @@ static int decode_instructions(struct objtool_file *file)
sym_for_each_insn(file, func, insn) {
insn->sym = func;
- if (func->type == STT_FUNC &&
+ if (is_func_sym(func) &&
insn->type == INSN_ENDBR &&
list_empty(&insn->call_node)) {
if (insn->offset == func->offset) {
@@ -563,7 +543,7 @@ static int add_pv_ops(struct objtool_file *file, const char *symname)
idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
func = reloc->sym;
- if (func->type == STT_SECTION)
+ if (is_sec_sym(func))
func = find_symbol_by_offset(reloc->sym->sec,
reloc_addend(reloc));
if (!func) {
@@ -597,7 +577,7 @@ static int init_pv_ops(struct objtool_file *file)
};
const char *pv_ops;
struct symbol *sym;
- int idx, nr, ret;
+ int idx, nr;
if (!opts.noinstr)
return 0;
@@ -609,7 +589,7 @@ static int init_pv_ops(struct objtool_file *file)
return 0;
nr = sym->len / sizeof(unsigned long);
- file->pv_ops = calloc(sizeof(struct pv_state), nr);
+ file->pv_ops = calloc(nr, sizeof(struct pv_state));
if (!file->pv_ops) {
ERROR_GLIBC("calloc");
return -1;
@@ -619,14 +599,27 @@ static int init_pv_ops(struct objtool_file *file)
INIT_LIST_HEAD(&file->pv_ops[idx].targets);
for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
- ret = add_pv_ops(file, pv_ops);
- if (ret)
- return ret;
+ if (add_pv_ops(file, pv_ops))
+ return -1;
}
return 0;
}
+static bool is_livepatch_module(struct objtool_file *file)
+{
+ struct section *sec;
+
+ if (!opts.module)
+ return false;
+
+ sec = find_section_by_name(file->elf, ".modinfo");
+ if (!sec)
+ return false;
+
+ return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12);
+}
+
static int create_static_call_sections(struct objtool_file *file)
{
struct static_call_site *site;
@@ -638,8 +631,14 @@ static int create_static_call_sections(struct objtool_file *file)
sec = find_section_by_name(file->elf, ".static_call_sites");
if (sec) {
- INIT_LIST_HEAD(&file->static_call_list);
- WARN("file already has .static_call_sites section, skipping");
+ /*
+ * Livepatch modules may have already extracted the static call
+ * site entries to take advantage of vmlinux static call
+ * privileges.
+ */
+ if (!file->klp)
+ WARN("file already has .static_call_sites section, skipping");
+
return 0;
}
@@ -683,7 +682,7 @@ static int create_static_call_sections(struct objtool_file *file)
key_sym = find_symbol_by_name(file->elf, tmp);
if (!key_sym) {
- if (!opts.module) {
+ if (!opts.module || file->klp) {
ERROR("static_call: can't find static_call_key symbol: %s", tmp);
return -1;
}
@@ -826,7 +825,7 @@ static int create_ibt_endbr_seal_sections(struct objtool_file *file)
struct symbol *sym = insn->sym;
*site = 0;
- if (opts.module && sym && sym->type == STT_FUNC &&
+ if (opts.module && sym && is_func_sym(sym) &&
insn->offset == sym->offset &&
(!strcmp(sym->name, "init_module") ||
!strcmp(sym->name, "cleanup_module"))) {
@@ -854,14 +853,13 @@ static int create_cfi_sections(struct objtool_file *file)
sec = find_section_by_name(file->elf, ".cfi_sites");
if (sec) {
- INIT_LIST_HEAD(&file->call_list);
WARN("file already has .cfi_sites section, skipping");
return 0;
}
idx = 0;
- for_each_sym(file, sym) {
- if (sym->type != STT_FUNC)
+ for_each_sym(file->elf, sym) {
+ if (!is_func_sym(sym))
continue;
if (strncmp(sym->name, "__cfi_", 6))
@@ -876,8 +874,8 @@ static int create_cfi_sections(struct objtool_file *file)
return -1;
idx = 0;
- for_each_sym(file, sym) {
- if (sym->type != STT_FUNC)
+ for_each_sym(file->elf, sym) {
+ if (!is_func_sym(sym))
continue;
if (strncmp(sym->name, "__cfi_", 6))
@@ -903,8 +901,13 @@ static int create_mcount_loc_sections(struct objtool_file *file)
sec = find_section_by_name(file->elf, "__mcount_loc");
if (sec) {
- INIT_LIST_HEAD(&file->mcount_loc_list);
- WARN("file already has __mcount_loc section, skipping");
+ /*
+ * Livepatch modules have already extracted their __mcount_loc
+ * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case.
+ */
+ if (!file->klp)
+ WARN("file already has __mcount_loc section, skipping");
+
return 0;
}
@@ -948,7 +951,6 @@ static int create_direct_call_sections(struct objtool_file *file)
sec = find_section_by_name(file->elf, ".call_sites");
if (sec) {
- INIT_LIST_HEAD(&file->call_list);
WARN("file already has .call_sites section, skipping");
return 0;
}
@@ -979,6 +981,59 @@ static int create_direct_call_sections(struct objtool_file *file)
return 0;
}
+#ifdef BUILD_KLP
+static int create_sym_checksum_section(struct objtool_file *file)
+{
+ struct section *sec;
+ struct symbol *sym;
+ unsigned int idx = 0;
+ struct sym_checksum *checksum;
+ size_t entsize = sizeof(struct sym_checksum);
+
+ sec = find_section_by_name(file->elf, ".discard.sym_checksum");
+ if (sec) {
+ if (!opts.dryrun)
+ WARN("file already has .discard.sym_checksum section, skipping");
+
+ return 0;
+ }
+
+ for_each_sym(file->elf, sym)
+ if (sym->csum.checksum)
+ idx++;
+
+ if (!idx)
+ return 0;
+
+ sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize,
+ idx, idx);
+ if (!sec)
+ return -1;
+
+ idx = 0;
+ for_each_sym(file->elf, sym) {
+ if (!sym->csum.checksum)
+ continue;
+
+ if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize,
+ sym, 0, R_TEXT64))
+ return -1;
+
+ checksum = (struct sym_checksum *)sec->data->d_buf + idx;
+ checksum->addr = 0; /* reloc */
+ checksum->checksum = sym->csum.checksum;
+
+ mark_sec_changed(file->elf, sec, true);
+
+ idx++;
+ }
+
+ return 0;
+}
+#else
+static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; }
+#endif
+
/*
* Warnings shouldn't be reported for ignored functions.
*/
@@ -1191,8 +1246,8 @@ static const char *uaccess_safe_builtin[] = {
"__ubsan_handle_type_mismatch_v1",
"__ubsan_handle_shift_out_of_bounds",
"__ubsan_handle_load_invalid_value",
- /* STACKLEAK */
- "stackleak_track_stack",
+ /* KSTACK_ERASE */
+ "__sanitizer_cov_stack_depth",
/* TRACE_BRANCH_PROFILING */
"ftrace_likely_update",
/* STACKPROTECTOR */
@@ -1430,9 +1485,14 @@ static void add_return_call(struct objtool_file *file, struct instruction *insn,
}
static bool is_first_func_insn(struct objtool_file *file,
- struct instruction *insn, struct symbol *sym)
+ struct instruction *insn)
{
- if (insn->offset == sym->offset)
+ struct symbol *func = insn_func(insn);
+
+ if (!func)
+ return false;
+
+ if (insn->offset == func->offset)
return true;
/* Allow direct CALL/JMP past ENDBR */
@@ -1440,7 +1500,7 @@ static bool is_first_func_insn(struct objtool_file *file,
struct instruction *prev = prev_insn_same_sym(file, insn);
if (prev && prev->type == INSN_ENDBR &&
- insn->offset == sym->offset + prev->len)
+ insn->offset == func->offset + prev->len)
return true;
}
@@ -1448,44 +1508,22 @@ static bool is_first_func_insn(struct objtool_file *file,
}
/*
- * A sibling call is a tail-call to another symbol -- to differentiate from a
- * recursive tail-call which is to the same symbol.
- */
-static bool jump_is_sibling_call(struct objtool_file *file,
- struct instruction *from, struct instruction *to)
-{
- struct symbol *fs = from->sym;
- struct symbol *ts = to->sym;
-
- /* Not a sibling call if from/to a symbol hole */
- if (!fs || !ts)
- return false;
-
- /* Not a sibling call if not targeting the start of a symbol. */
- if (!is_first_func_insn(file, to, ts))
- return false;
-
- /* Disallow sibling calls into STT_NOTYPE */
- if (ts->type == STT_NOTYPE)
- return false;
-
- /* Must not be self to be a sibling */
- return fs->pfunc != ts->pfunc;
-}
-
-/*
* Find the destination instructions for all jumps.
*/
static int add_jump_destinations(struct objtool_file *file)
{
- struct instruction *insn, *jump_dest;
+ struct instruction *insn;
struct reloc *reloc;
- struct section *dest_sec;
- unsigned long dest_off;
- int ret;
for_each_insn(file, insn) {
struct symbol *func = insn_func(insn);
+ struct instruction *dest_insn;
+ struct section *dest_sec;
+ struct symbol *dest_sym;
+ unsigned long dest_off;
+
+ if (!is_static_jump(insn))
+ continue;
if (insn->jump_dest) {
/*
@@ -1494,53 +1532,53 @@ static int add_jump_destinations(struct objtool_file *file)
*/
continue;
}
- if (!is_static_jump(insn))
- continue;
reloc = insn_reloc(file, insn);
if (!reloc) {
dest_sec = insn->sec;
dest_off = arch_jump_destination(insn);
- } else if (reloc->sym->type == STT_SECTION) {
- dest_sec = reloc->sym->sec;
- dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
- } else if (reloc->sym->retpoline_thunk) {
- ret = add_retpoline_call(file, insn);
- if (ret)
- return ret;
- continue;
- } else if (reloc->sym->return_thunk) {
- add_return_call(file, insn, true);
- continue;
- } else if (func) {
- /*
- * External sibling call or internal sibling call with
- * STT_FUNC reloc.
- */
- ret = add_call_dest(file, insn, reloc->sym, true);
- if (ret)
- return ret;
- continue;
- } else if (reloc->sym->sec->idx) {
- dest_sec = reloc->sym->sec;
- dest_off = reloc->sym->sym.st_value +
- arch_dest_reloc_offset(reloc_addend(reloc));
+ dest_sym = dest_sec->sym;
} else {
- /* non-func asm code jumping to another file */
- continue;
+ dest_sym = reloc->sym;
+ if (is_undef_sym(dest_sym)) {
+ if (dest_sym->retpoline_thunk) {
+ if (add_retpoline_call(file, insn))
+ return -1;
+ continue;
+ }
+
+ if (dest_sym->return_thunk) {
+ add_return_call(file, insn, true);
+ continue;
+ }
+
+ /* External symbol */
+ if (func) {
+ /* External sibling call */
+ if (add_call_dest(file, insn, dest_sym, true))
+ return -1;
+ continue;
+ }
+
+ /* Non-func asm code jumping to external symbol */
+ continue;
+ }
+
+ dest_sec = dest_sym->sec;
+ dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc);
}
- jump_dest = find_insn(file, dest_sec, dest_off);
- if (!jump_dest) {
+ dest_insn = find_insn(file, dest_sec, dest_off);
+ if (!dest_insn) {
struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
/*
- * This is a special case for retbleed_untrain_ret().
- * It jumps to __x86_return_thunk(), but objtool
- * can't find the thunk's starting RET
- * instruction, because the RET is also in the
- * middle of another instruction. Objtool only
- * knows about the outer instruction.
+ * retbleed_untrain_ret() jumps to
+ * __x86_return_thunk(), but objtool can't find
+ * the thunk's starting RET instruction,
+ * because the RET is also in the middle of
+ * another instruction. Objtool only knows
+ * about the outer instruction.
*/
if (sym && sym->embedded_insn) {
add_return_call(file, insn, false);
@@ -1548,76 +1586,52 @@ static int add_jump_destinations(struct objtool_file *file)
}
/*
- * GCOV/KCOV dead code can jump to the end of the
- * function/section.
+ * GCOV/KCOV dead code can jump to the end of
+ * the function/section.
*/
if (file->ignore_unreachables && func &&
dest_sec == insn->sec &&
dest_off == func->offset + func->len)
continue;
- ERROR_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
- dest_sec->name, dest_off);
+ ERROR_INSN(insn, "can't find jump dest instruction at %s",
+ offstr(dest_sec, dest_off));
return -1;
}
- /*
- * An intra-TU jump in retpoline.o might not have a relocation
- * for its jump dest, in which case the above
- * add_{retpoline,return}_call() didn't happen.
- */
- if (jump_dest->sym && jump_dest->offset == jump_dest->sym->offset) {
- if (jump_dest->sym->retpoline_thunk) {
- ret = add_retpoline_call(file, insn);
- if (ret)
- return ret;
- continue;
- }
- if (jump_dest->sym->return_thunk) {
- add_return_call(file, insn, true);
- continue;
- }
+ if (!dest_sym || is_sec_sym(dest_sym)) {
+ dest_sym = dest_insn->sym;
+ if (!dest_sym)
+ goto set_jump_dest;
}
- /*
- * Cross-function jump.
- */
- if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) {
+ if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) {
+ if (add_retpoline_call(file, insn))
+ return -1;
+ continue;
+ }
- /*
- * For GCC 8+, create parent/child links for any cold
- * subfunctions. This is _mostly_ redundant with a
- * similar initialization in read_symbols().
- *
- * If a function has aliases, we want the *first* such
- * function in the symbol table to be the subfunction's
- * parent. In that case we overwrite the
- * initialization done in read_symbols().
- *
- * However this code can't completely replace the
- * read_symbols() code because this doesn't detect the
- * case where the parent function's only reference to a
- * subfunction is through a jump table.
- */
- if (!strstr(func->name, ".cold") &&
- strstr(insn_func(jump_dest)->name, ".cold")) {
- func->cfunc = insn_func(jump_dest);
- insn_func(jump_dest)->pfunc = func;
- }
+ if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) {
+ add_return_call(file, insn, true);
+ continue;
}
- if (jump_is_sibling_call(file, insn, jump_dest)) {
- /*
- * Internal sibling call without reloc or with
- * STT_SECTION reloc.
- */
- ret = add_call_dest(file, insn, insn_func(jump_dest), true);
- if (ret)
- return ret;
+ if (!insn->sym || insn->sym->pfunc == dest_sym->pfunc)
+ goto set_jump_dest;
+
+ /*
+ * Internal cross-function jump.
+ */
+
+ if (is_first_func_insn(file, dest_insn)) {
+ /* Internal sibling call */
+ if (add_call_dest(file, insn, dest_sym, true))
+ return -1;
continue;
}
- insn->jump_dest = jump_dest;
+set_jump_dest:
+ insn->jump_dest = dest_insn;
}
return 0;
@@ -1643,7 +1657,6 @@ static int add_call_destinations(struct objtool_file *file)
unsigned long dest_off;
struct symbol *dest;
struct reloc *reloc;
- int ret;
for_each_insn(file, insn) {
struct symbol *func = insn_func(insn);
@@ -1655,9 +1668,8 @@ static int add_call_destinations(struct objtool_file *file)
dest_off = arch_jump_destination(insn);
dest = find_call_destination(insn->sec, dest_off);
- ret = add_call_dest(file, insn, dest, false);
- if (ret)
- return ret;
+ if (add_call_dest(file, insn, dest, false))
+ return -1;
if (func && func->ignore)
continue;
@@ -1667,13 +1679,13 @@ static int add_call_destinations(struct objtool_file *file)
return -1;
}
- if (func && insn_call_dest(insn)->type != STT_FUNC) {
+ if (func && !is_func_sym(insn_call_dest(insn))) {
ERROR_INSN(insn, "unsupported call to non-function");
return -1;
}
- } else if (reloc->sym->type == STT_SECTION) {
- dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
+ } else if (is_sec_sym(reloc->sym)) {
+ dest_off = arch_insn_adjusted_addend(insn, reloc);
dest = find_call_destination(reloc->sym->sec, dest_off);
if (!dest) {
ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
@@ -1681,19 +1693,16 @@ static int add_call_destinations(struct objtool_file *file)
return -1;
}
- ret = add_call_dest(file, insn, dest, false);
- if (ret)
- return ret;
+ if (add_call_dest(file, insn, dest, false))
+ return -1;
} else if (reloc->sym->retpoline_thunk) {
- ret = add_retpoline_call(file, insn);
- if (ret)
- return ret;
+ if (add_retpoline_call(file, insn))
+ return -1;
} else {
- ret = add_call_dest(file, insn, reloc->sym, false);
- if (ret)
- return ret;
+ if (add_call_dest(file, insn, reloc->sym, false))
+ return -1;
}
}
@@ -1742,6 +1751,7 @@ static int handle_group_alt(struct objtool_file *file,
orig_alt_group->last_insn = last_orig_insn;
orig_alt_group->nop = NULL;
orig_alt_group->ignore = orig_insn->ignore_alts;
+ orig_alt_group->feature = 0;
} else {
if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
orig_alt_group->first_insn->offset != special_alt->orig_len) {
@@ -1781,6 +1791,7 @@ static int handle_group_alt(struct objtool_file *file,
nop->type = INSN_NOP;
nop->sym = orig_insn->sym;
nop->alt_group = new_alt_group;
+ nop->fake = 1;
}
if (!special_alt->new_len) {
@@ -1845,6 +1856,7 @@ end:
new_alt_group->nop = nop;
new_alt_group->ignore = (*new_insn)->ignore_alts;
new_alt_group->cfi = orig_alt_group->cfi;
+ new_alt_group->feature = special_alt->feature;
return 0;
}
@@ -1909,8 +1921,9 @@ static int add_special_section_alts(struct objtool_file *file)
struct list_head special_alts;
struct instruction *orig_insn, *new_insn;
struct special_alt *special_alt, *tmp;
+ enum alternative_type alt_type;
struct alternative *alt;
- int ret;
+ struct alternative *a;
if (special_get_alts(file->elf, &special_alts))
return -1;
@@ -1942,16 +1955,18 @@ static int add_special_section_alts(struct objtool_file *file)
continue;
}
- ret = handle_group_alt(file, special_alt, orig_insn,
- &new_insn);
- if (ret)
- return ret;
+ if (handle_group_alt(file, special_alt, orig_insn, &new_insn))
+ return -1;
+
+ alt_type = ALT_TYPE_INSTRUCTIONS;
} else if (special_alt->jump_or_nop) {
- ret = handle_jump_alt(file, special_alt, orig_insn,
- &new_insn);
- if (ret)
- return ret;
+ if (handle_jump_alt(file, special_alt, orig_insn, &new_insn))
+ return -1;
+
+ alt_type = ALT_TYPE_JUMP_TABLE;
+ } else {
+ alt_type = ALT_TYPE_EX_TABLE;
}
alt = calloc(1, sizeof(*alt));
@@ -1961,8 +1976,20 @@ static int add_special_section_alts(struct objtool_file *file)
}
alt->insn = new_insn;
- alt->next = orig_insn->alts;
- orig_insn->alts = alt;
+ alt->type = alt_type;
+ alt->next = NULL;
+
+ /*
+ * Store alternatives in the same order they have been
+ * defined.
+ */
+ if (!orig_insn->alts) {
+ orig_insn->alts = alt;
+ } else {
+ for (a = orig_insn->alts; a->next; a = a->next)
+ ;
+ a->next = alt;
+ }
list_del(&special_alt->list);
free(special_alt);
@@ -2139,15 +2166,13 @@ static int add_func_jump_tables(struct objtool_file *file,
struct symbol *func)
{
struct instruction *insn;
- int ret;
func_for_each_insn(file, func, insn) {
if (!insn_jump_table(insn))
continue;
- ret = add_jump_table(file, insn);
- if (ret)
- return ret;
+ if (add_jump_table(file, insn))
+ return -1;
}
return 0;
@@ -2161,19 +2186,17 @@ static int add_func_jump_tables(struct objtool_file *file,
static int add_jump_table_alts(struct objtool_file *file)
{
struct symbol *func;
- int ret;
if (!file->rodata)
return 0;
- for_each_sym(file, func) {
- if (func->type != STT_FUNC)
+ for_each_sym(file->elf, func) {
+ if (!is_func_sym(func) || func->alias != func)
continue;
mark_func_jump_tables(file, func);
- ret = add_func_jump_tables(file, func);
- if (ret)
- return ret;
+ if (add_func_jump_tables(file, func))
+ return -1;
}
return 0;
@@ -2207,14 +2230,14 @@ static int read_unwind_hints(struct objtool_file *file)
return -1;
}
- if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
+ if (sec_size(sec) % sizeof(struct unwind_hint)) {
ERROR("struct unwind_hint size mismatch");
return -1;
}
file->hints = true;
- for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
+ for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) {
hint = (struct unwind_hint *)sec->data->d_buf + i;
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
@@ -2223,14 +2246,7 @@ static int read_unwind_hints(struct objtool_file *file)
return -1;
}
- if (reloc->sym->type == STT_SECTION) {
- offset = reloc_addend(reloc);
- } else if (reloc->sym->local_label) {
- offset = reloc->sym->offset;
- } else {
- ERROR("unexpected relocation symbol type in %s", sec->rsec->name);
- return -1;
- }
+ offset = reloc->sym->offset + reloc_addend(reloc);
insn = find_insn(file, reloc->sym->sec, offset);
if (!insn) {
@@ -2259,7 +2275,7 @@ static int read_unwind_hints(struct objtool_file *file)
if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
- if (sym && sym->bind == STB_GLOBAL) {
+ if (sym && is_global_sym(sym)) {
if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
return -1;
@@ -2297,7 +2313,7 @@ static int read_annotate(struct objtool_file *file,
struct instruction *insn;
struct reloc *reloc;
uint64_t offset;
- int type, ret;
+ int type;
sec = find_section_by_name(file->elf, ".discard.annotate_insn");
if (!sec)
@@ -2315,9 +2331,13 @@ static int read_annotate(struct objtool_file *file,
sec->sh.sh_entsize = 8;
}
- for_each_reloc(sec->rsec, reloc) {
- type = *(u32 *)(sec->data->d_buf + (reloc_idx(reloc) * sec->sh.sh_entsize) + 4);
+ if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) {
+ ERROR("bad .discard.annotate_insn section: missing relocs");
+ return -1;
+ }
+ for_each_reloc(sec->rsec, reloc) {
+ type = annotype(file->elf, sec, reloc);
offset = reloc->sym->offset + reloc_addend(reloc);
insn = find_insn(file, reloc->sym->sec, offset);
@@ -2326,9 +2346,8 @@ static int read_annotate(struct objtool_file *file,
return -1;
}
- ret = func(file, type, insn);
- if (ret < 0)
- return ret;
+ if (func(file, type, insn))
+ return -1;
}
return 0;
@@ -2389,6 +2408,8 @@ static int __annotate_ifc(struct objtool_file *file, int type, struct instructio
static int __annotate_late(struct objtool_file *file, int type, struct instruction *insn)
{
+ struct symbol *sym;
+
switch (type) {
case ANNOTYPE_NOENDBR:
/* early */
@@ -2430,6 +2451,15 @@ static int __annotate_late(struct objtool_file *file, int type, struct instructi
insn->dead_end = false;
break;
+ case ANNOTYPE_NOCFI:
+ sym = insn->sym;
+ if (!sym) {
+ ERROR_INSN(insn, "dodgy NOCFI annotation");
+ return -1;
+ }
+ insn->sym->nocfi = 1;
+ break;
+
default:
ERROR_INSN(insn, "Unknown annotation type: %d", type);
return -1;
@@ -2450,28 +2480,19 @@ static bool is_profiling_func(const char *name)
if (!strncmp(name, "__sanitizer_cov_", 16))
return true;
- /*
- * Some compilers currently do not remove __tsan_func_entry/exit nor
- * __tsan_atomic_signal_fence (used for barrier instrumentation) with
- * the __no_sanitize_thread attribute, remove them. Once the kernel's
- * minimum Clang version is 14.0, this can be removed.
- */
- if (!strncmp(name, "__tsan_func_", 12) ||
- !strcmp(name, "__tsan_atomic_signal_fence"))
- return true;
-
return false;
}
static int classify_symbols(struct objtool_file *file)
{
struct symbol *func;
+ size_t len;
- for_each_sym(file, func) {
- if (func->type == STT_NOTYPE && strstarts(func->name, ".L"))
+ for_each_sym(file->elf, func) {
+ if (is_notype_sym(func) && strstarts(func->name, ".L"))
func->local_label = true;
- if (func->bind != STB_GLOBAL)
+ if (!is_global_sym(func))
continue;
if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
@@ -2492,6 +2513,10 @@ static int classify_symbols(struct objtool_file *file)
if (is_profiling_func(func->name))
func->profiling_func = true;
+
+ len = strlen(func->name);
+ if (len > sym_name_max_len)
+ sym_name_max_len = len;
}
return 0;
@@ -2512,7 +2537,7 @@ static void mark_rodata(struct objtool_file *file)
*
* .rodata.str1.* sections are ignored; they don't contain jump tables.
*/
- for_each_sec(file, sec) {
+ for_each_sec(file->elf, sec) {
if ((!strncmp(sec->name, ".rodata", 7) &&
!strstr(sec->name, ".str1.")) ||
!strncmp(sec->name, ".data.rel.ro", 12)) {
@@ -2524,78 +2549,115 @@ static void mark_rodata(struct objtool_file *file)
file->rodata = found;
}
+static void mark_holes(struct objtool_file *file)
+{
+ struct instruction *insn;
+ bool in_hole = false;
+
+ if (!opts.link)
+ return;
+
+ /*
+ * Whole archive runs might encounter dead code from weak symbols.
+ * This is where the linker will have dropped the weak symbol in
+ * favour of a regular symbol, but leaves the code in place.
+ */
+ for_each_insn(file, insn) {
+ if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) {
+ in_hole = false;
+ continue;
+ }
+
+ /* Skip function padding and pfx code */
+ if (!in_hole && insn->type == INSN_NOP)
+ continue;
+
+ in_hole = true;
+ insn->hole = 1;
+
+ /*
+ * If this hole jumps to a .cold function, mark it ignore.
+ */
+ if (insn->jump_dest) {
+ struct symbol *dest_func = insn_func(insn->jump_dest);
+
+ if (dest_func && dest_func->cold)
+ dest_func->ignore = true;
+ }
+ }
+}
+
+static bool validate_branch_enabled(void)
+{
+ return opts.stackval ||
+ opts.orc ||
+ opts.uaccess ||
+ opts.checksum;
+}
+
static int decode_sections(struct objtool_file *file)
{
- int ret;
+ file->klp = is_livepatch_module(file);
mark_rodata(file);
- ret = init_pv_ops(file);
- if (ret)
- return ret;
+ if (init_pv_ops(file))
+ return -1;
/*
* Must be before add_{jump_call}_destination.
*/
- ret = classify_symbols(file);
- if (ret)
- return ret;
+ if (classify_symbols(file))
+ return -1;
- ret = decode_instructions(file);
- if (ret)
- return ret;
+ if (decode_instructions(file))
+ return -1;
- ret = add_ignores(file);
- if (ret)
- return ret;
+ if (add_ignores(file))
+ return -1;
add_uaccess_safe(file);
- ret = read_annotate(file, __annotate_early);
- if (ret)
- return ret;
+ if (read_annotate(file, __annotate_early))
+ return -1;
/*
* Must be before add_jump_destinations(), which depends on 'func'
* being set for alternatives, to enable proper sibling call detection.
*/
- if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
- ret = add_special_section_alts(file);
- if (ret)
- return ret;
+ if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label || opts.disas) {
+ if (add_special_section_alts(file))
+ return -1;
}
- ret = add_jump_destinations(file);
- if (ret)
- return ret;
+ if (add_jump_destinations(file))
+ return -1;
/*
* Must be before add_call_destination(); it changes INSN_CALL to
* INSN_JUMP.
*/
- ret = read_annotate(file, __annotate_ifc);
- if (ret)
- return ret;
+ if (read_annotate(file, __annotate_ifc))
+ return -1;
- ret = add_call_destinations(file);
- if (ret)
- return ret;
+ if (add_call_destinations(file))
+ return -1;
- ret = add_jump_table_alts(file);
- if (ret)
- return ret;
+ if (add_jump_table_alts(file))
+ return -1;
- ret = read_unwind_hints(file);
- if (ret)
- return ret;
+ if (read_unwind_hints(file))
+ return -1;
+
+ /* Must be after add_jump_destinations() */
+ mark_holes(file);
/*
* Must be after add_call_destinations() such that it can override
* dead_end_function() marks.
*/
- ret = read_annotate(file, __annotate_late);
- if (ret)
- return ret;
+ if (read_annotate(file, __annotate_late))
+ return -1;
return 0;
}
@@ -3220,18 +3282,19 @@ static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn
return 0;
}
-static int handle_insn_ops(struct instruction *insn,
- struct instruction *next_insn,
- struct insn_state *state)
+static int noinline handle_insn_ops(struct instruction *insn,
+ struct instruction *next_insn,
+ struct insn_state *state)
{
+ struct insn_state prev_state __maybe_unused = *state;
struct stack_op *op;
- int ret;
+ int ret = 0;
for (op = insn->stack_ops; op; op = op->next) {
ret = update_cfi_state(insn, next_insn, &state->cfi, op);
if (ret)
- return ret;
+ goto done;
if (!opts.uaccess || !insn->alt_group)
continue;
@@ -3241,7 +3304,8 @@ static int handle_insn_ops(struct instruction *insn,
state->uaccess_stack = 1;
} else if (state->uaccess_stack >> 31) {
WARN_INSN(insn, "PUSHF stack exhausted");
- return 1;
+ ret = 1;
+ goto done;
}
state->uaccess_stack <<= 1;
state->uaccess_stack |= state->uaccess;
@@ -3257,7 +3321,10 @@ static int handle_insn_ops(struct instruction *insn,
}
}
- return 0;
+done:
+ TRACE_INSN_STATE(insn, &prev_state, state);
+
+ return ret;
}
static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
@@ -3349,7 +3416,7 @@ static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
return false;
- idx = (arch_dest_reloc_offset(reloc_addend(reloc)) / sizeof(void *));
+ idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *);
if (file->pv_ops[idx].clean)
return true;
@@ -3511,9 +3578,14 @@ static bool skip_alt_group(struct instruction *insn)
{
struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
+ if (!insn->alt_group)
+ return false;
+
/* ANNOTATE_IGNORE_ALTERNATIVE */
- if (insn->alt_group && insn->alt_group->ignore)
+ if (insn->alt_group->ignore) {
+ TRACE_ALT(insn, "alt group ignored");
return true;
+ }
/*
* For NOP patched with CLAC/STAC, only follow the latter to avoid
@@ -3535,256 +3607,398 @@ static bool skip_alt_group(struct instruction *insn)
return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC;
}
-/*
- * Follow the branch starting at the given instruction, and recursively follow
- * any other branches (jumps). Meanwhile, track the frame pointer state at
- * each instruction and validate all the rules described in
- * tools/objtool/Documentation/objtool.txt.
- */
-static int validate_branch(struct objtool_file *file, struct symbol *func,
- struct instruction *insn, struct insn_state state)
+static int checksum_debug_init(struct objtool_file *file)
{
- struct alternative *alt;
- struct instruction *next_insn, *prev_insn = NULL;
- struct section *sec;
- u8 visited;
- int ret;
+ char *dup, *s;
- if (func && func->ignore)
+ if (!opts.debug_checksum)
return 0;
- sec = insn->sec;
+ dup = strdup(opts.debug_checksum);
+ if (!dup) {
+ ERROR_GLIBC("strdup");
+ return -1;
+ }
- while (1) {
- next_insn = next_insn_to_validate(file, insn);
+ s = dup;
+ while (*s) {
+ struct symbol *func;
+ char *comma;
- if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
- /* Ignore KCFI type preambles, which always fall through */
- if (!strncmp(func->name, "__cfi_", 6) ||
- !strncmp(func->name, "__pfx_", 6))
- return 0;
+ comma = strchr(s, ',');
+ if (comma)
+ *comma = '\0';
- if (file->ignore_unreachables)
- return 0;
+ func = find_symbol_by_name(file->elf, s);
+ if (!func || !is_func_sym(func))
+ WARN("--debug-checksum: can't find '%s'", s);
+ else
+ func->debug_checksum = 1;
- WARN("%s() falls through to next function %s()",
- func->name, insn_func(insn)->name);
- func->warned = 1;
+ if (!comma)
+ break;
- return 1;
- }
+ s = comma + 1;
+ }
- visited = VISITED_BRANCH << state.uaccess;
- if (insn->visited & VISITED_BRANCH_MASK) {
- if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
- return 1;
+ free(dup);
+ return 0;
+}
- if (insn->visited & visited)
- return 0;
- } else {
- nr_insns_visited++;
- }
+static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
+ struct instruction *insn)
+{
+ struct reloc *reloc = insn_reloc(file, insn);
+ unsigned long offset;
+ struct symbol *sym;
- if (state.noinstr)
- state.instr += insn->instr;
+ if (insn->fake)
+ return;
- if (insn->hint) {
- if (insn->restore) {
- struct instruction *save_insn, *i;
+ checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len);
- i = insn;
- save_insn = NULL;
+ if (!reloc) {
+ struct symbol *call_dest = insn_call_dest(insn);
- sym_for_each_insn_continue_reverse(file, func, i) {
- if (i->save) {
- save_insn = i;
- break;
- }
- }
+ if (call_dest)
+ checksum_update(func, insn, call_dest->demangled_name,
+ strlen(call_dest->demangled_name));
+ return;
+ }
- if (!save_insn) {
- WARN_INSN(insn, "no corresponding CFI save for CFI restore");
- return 1;
+ sym = reloc->sym;
+ offset = arch_insn_adjusted_addend(insn, reloc);
+
+ if (is_string_sec(sym->sec)) {
+ char *str;
+
+ str = sym->sec->data->d_buf + sym->offset + offset;
+ checksum_update(func, insn, str, strlen(str));
+ return;
+ }
+
+ if (is_sec_sym(sym)) {
+ sym = find_symbol_containing(reloc->sym->sec, offset);
+ if (!sym)
+ return;
+
+ offset -= sym->offset;
+ }
+
+ checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name));
+ checksum_update(func, insn, &offset, sizeof(offset));
+}
+
+static int validate_branch(struct objtool_file *file, struct symbol *func,
+ struct instruction *insn, struct insn_state state);
+static int do_validate_branch(struct objtool_file *file, struct symbol *func,
+ struct instruction *insn, struct insn_state state);
+
+static int validate_insn(struct objtool_file *file, struct symbol *func,
+ struct instruction *insn, struct insn_state *statep,
+ struct instruction *prev_insn, struct instruction *next_insn,
+ bool *dead_end)
+{
+ char *alt_name __maybe_unused = NULL;
+ struct alternative *alt;
+ u8 visited;
+ int ret;
+
+ /*
+ * Any returns before the end of this function are effectively dead
+ * ends, i.e. validate_branch() has reached the end of the branch.
+ */
+ *dead_end = true;
+
+ visited = VISITED_BRANCH << statep->uaccess;
+ if (insn->visited & VISITED_BRANCH_MASK) {
+ if (!insn->hint && !insn_cfi_match(insn, &statep->cfi))
+ return 1;
+
+ if (insn->visited & visited) {
+ TRACE_INSN(insn, "already visited");
+ return 0;
+ }
+ } else {
+ nr_insns_visited++;
+ }
+
+ if (statep->noinstr)
+ statep->instr += insn->instr;
+
+ if (insn->hint) {
+ if (insn->restore) {
+ struct instruction *save_insn, *i;
+
+ i = insn;
+ save_insn = NULL;
+
+ sym_for_each_insn_continue_reverse(file, func, i) {
+ if (i->save) {
+ save_insn = i;
+ break;
}
+ }
- if (!save_insn->visited) {
- /*
- * If the restore hint insn is at the
- * beginning of a basic block and was
- * branched to from elsewhere, and the
- * save insn hasn't been visited yet,
- * defer following this branch for now.
- * It will be seen later via the
- * straight-line path.
- */
- if (!prev_insn)
- return 0;
+ if (!save_insn) {
+ WARN_INSN(insn, "no corresponding CFI save for CFI restore");
+ return 1;
+ }
- WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
- return 1;
+ if (!save_insn->visited) {
+ /*
+ * If the restore hint insn is at the
+ * beginning of a basic block and was
+ * branched to from elsewhere, and the
+ * save insn hasn't been visited yet,
+ * defer following this branch for now.
+ * It will be seen later via the
+ * straight-line path.
+ */
+ if (!prev_insn) {
+ TRACE_INSN(insn, "defer restore");
+ return 0;
}
- insn->cfi = save_insn->cfi;
- nr_cfi_reused++;
+ WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
+ return 1;
}
- state.cfi = *insn->cfi;
+ insn->cfi = save_insn->cfi;
+ nr_cfi_reused++;
+ }
+
+ statep->cfi = *insn->cfi;
+ } else {
+ /* XXX track if we actually changed statep->cfi */
+
+ if (prev_insn && !cficmp(prev_insn->cfi, &statep->cfi)) {
+ insn->cfi = prev_insn->cfi;
+ nr_cfi_reused++;
} else {
- /* XXX track if we actually changed state.cfi */
+ insn->cfi = cfi_hash_find_or_add(&statep->cfi);
+ }
+ }
- if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
- insn->cfi = prev_insn->cfi;
- nr_cfi_reused++;
- } else {
- insn->cfi = cfi_hash_find_or_add(&state.cfi);
+ insn->visited |= visited;
+
+ if (propagate_alt_cfi(file, insn))
+ return 1;
+
+ if (insn->alts) {
+ for (alt = insn->alts; alt; alt = alt->next) {
+ TRACE_ALT_BEGIN(insn, alt, alt_name);
+ ret = validate_branch(file, func, alt->insn, *statep);
+ TRACE_ALT_END(insn, alt, alt_name);
+ if (ret) {
+ BT_INSN(insn, "(alt)");
+ return ret;
}
}
+ TRACE_ALT_INFO_NOADDR(insn, "/ ", "DEFAULT");
+ }
- insn->visited |= visited;
+ if (skip_alt_group(insn))
+ return 0;
- if (propagate_alt_cfi(file, insn))
+ if (handle_insn_ops(insn, next_insn, statep))
+ return 1;
+
+ switch (insn->type) {
+
+ case INSN_RETURN:
+ TRACE_INSN(insn, "return");
+ return validate_return(func, insn, statep);
+
+ case INSN_CALL:
+ case INSN_CALL_DYNAMIC:
+ if (insn->type == INSN_CALL)
+ TRACE_INSN(insn, "call");
+ else
+ TRACE_INSN(insn, "indirect call");
+
+ ret = validate_call(file, insn, statep);
+ if (ret)
+ return ret;
+
+ if (opts.stackval && func && !is_special_call(insn) &&
+ !has_valid_stack_frame(statep)) {
+ WARN_INSN(insn, "call without frame pointer save/setup");
return 1;
+ }
- if (insn->alts) {
- for (alt = insn->alts; alt; alt = alt->next) {
- ret = validate_branch(file, func, alt->insn, state);
- if (ret) {
- BT_INSN(insn, "(alt)");
- return ret;
- }
+ break;
+
+ case INSN_JUMP_CONDITIONAL:
+ case INSN_JUMP_UNCONDITIONAL:
+ if (is_sibling_call(insn)) {
+ TRACE_INSN(insn, "sibling call");
+ ret = validate_sibling_call(file, insn, statep);
+ if (ret)
+ return ret;
+
+ } else if (insn->jump_dest) {
+ if (insn->type == INSN_JUMP_UNCONDITIONAL)
+ TRACE_INSN(insn, "unconditional jump");
+ else
+ TRACE_INSN(insn, "jump taken");
+
+ ret = validate_branch(file, func, insn->jump_dest, *statep);
+ if (ret) {
+ BT_INSN(insn, "(branch)");
+ return ret;
}
}
- if (skip_alt_group(insn))
+ if (insn->type == INSN_JUMP_UNCONDITIONAL)
return 0;
- if (handle_insn_ops(insn, next_insn, &state))
- return 1;
-
- switch (insn->type) {
-
- case INSN_RETURN:
- return validate_return(func, insn, &state);
+ TRACE_INSN(insn, "jump not taken");
+ break;
- case INSN_CALL:
- case INSN_CALL_DYNAMIC:
- ret = validate_call(file, insn, &state);
+ case INSN_JUMP_DYNAMIC:
+ case INSN_JUMP_DYNAMIC_CONDITIONAL:
+ TRACE_INSN(insn, "indirect jump");
+ if (is_sibling_call(insn)) {
+ ret = validate_sibling_call(file, insn, statep);
if (ret)
return ret;
+ }
- if (opts.stackval && func && !is_special_call(insn) &&
- !has_valid_stack_frame(&state)) {
- WARN_INSN(insn, "call without frame pointer save/setup");
- return 1;
- }
+ if (insn->type == INSN_JUMP_DYNAMIC)
+ return 0;
- break;
+ break;
- case INSN_JUMP_CONDITIONAL:
- case INSN_JUMP_UNCONDITIONAL:
- if (is_sibling_call(insn)) {
- ret = validate_sibling_call(file, insn, &state);
- if (ret)
- return ret;
+ case INSN_SYSCALL:
+ TRACE_INSN(insn, "syscall");
+ if (func && (!next_insn || !next_insn->hint)) {
+ WARN_INSN(insn, "unsupported instruction in callable function");
+ return 1;
+ }
- } else if (insn->jump_dest) {
- ret = validate_branch(file, func,
- insn->jump_dest, state);
- if (ret) {
- BT_INSN(insn, "(branch)");
- return ret;
- }
- }
+ break;
- if (insn->type == INSN_JUMP_UNCONDITIONAL)
- return 0;
+ case INSN_SYSRET:
+ TRACE_INSN(insn, "sysret");
+ if (func && (!next_insn || !next_insn->hint)) {
+ WARN_INSN(insn, "unsupported instruction in callable function");
+ return 1;
+ }
+ return 0;
+
+ case INSN_STAC:
+ TRACE_INSN(insn, "stac");
+ if (!opts.uaccess)
break;
- case INSN_JUMP_DYNAMIC:
- case INSN_JUMP_DYNAMIC_CONDITIONAL:
- if (is_sibling_call(insn)) {
- ret = validate_sibling_call(file, insn, &state);
- if (ret)
- return ret;
- }
+ if (statep->uaccess) {
+ WARN_INSN(insn, "recursive UACCESS enable");
+ return 1;
+ }
- if (insn->type == INSN_JUMP_DYNAMIC)
- return 0;
+ statep->uaccess = true;
+ break;
+ case INSN_CLAC:
+ TRACE_INSN(insn, "clac");
+ if (!opts.uaccess)
break;
- case INSN_SYSCALL:
- if (func && (!next_insn || !next_insn->hint)) {
- WARN_INSN(insn, "unsupported instruction in callable function");
- return 1;
- }
+ if (!statep->uaccess && func) {
+ WARN_INSN(insn, "redundant UACCESS disable");
+ return 1;
+ }
- break;
+ if (func_uaccess_safe(func) && !statep->uaccess_stack) {
+ WARN_INSN(insn, "UACCESS-safe disables UACCESS");
+ return 1;
+ }
- case INSN_SYSRET:
- if (func && (!next_insn || !next_insn->hint)) {
- WARN_INSN(insn, "unsupported instruction in callable function");
- return 1;
- }
+ statep->uaccess = false;
+ break;
- return 0;
+ case INSN_STD:
+ TRACE_INSN(insn, "std");
+ if (statep->df) {
+ WARN_INSN(insn, "recursive STD");
+ return 1;
+ }
- case INSN_STAC:
- if (!opts.uaccess)
- break;
+ statep->df = true;
+ break;
- if (state.uaccess) {
- WARN_INSN(insn, "recursive UACCESS enable");
- return 1;
- }
+ case INSN_CLD:
+ TRACE_INSN(insn, "cld");
+ if (!statep->df && func) {
+ WARN_INSN(insn, "redundant CLD");
+ return 1;
+ }
- state.uaccess = true;
- break;
+ statep->df = false;
+ break;
- case INSN_CLAC:
- if (!opts.uaccess)
- break;
+ default:
+ break;
+ }
- if (!state.uaccess && func) {
- WARN_INSN(insn, "redundant UACCESS disable");
- return 1;
- }
+ if (insn->dead_end)
+ TRACE_INSN(insn, "dead end");
- if (func_uaccess_safe(func) && !state.uaccess_stack) {
- WARN_INSN(insn, "UACCESS-safe disables UACCESS");
- return 1;
- }
+ *dead_end = insn->dead_end;
+ return 0;
+}
- state.uaccess = false;
- break;
+/*
+ * Follow the branch starting at the given instruction, and recursively follow
+ * any other branches (jumps). Meanwhile, track the frame pointer state at
+ * each instruction and validate all the rules described in
+ * tools/objtool/Documentation/objtool.txt.
+ */
+static int do_validate_branch(struct objtool_file *file, struct symbol *func,
+ struct instruction *insn, struct insn_state state)
+{
+ struct instruction *next_insn, *prev_insn = NULL;
+ bool dead_end;
+ int ret;
- case INSN_STD:
- if (state.df) {
- WARN_INSN(insn, "recursive STD");
- return 1;
- }
+ if (func && func->ignore)
+ return 0;
- state.df = true;
- break;
+ do {
+ insn->trace = 0;
+ next_insn = next_insn_to_validate(file, insn);
- case INSN_CLD:
- if (!state.df && func) {
- WARN_INSN(insn, "redundant CLD");
- return 1;
- }
+ if (opts.checksum && func && insn->sec)
+ checksum_update_insn(file, func, insn);
- state.df = false;
- break;
+ if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
+ /* Ignore KCFI type preambles, which always fall through */
+ if (is_prefix_func(func))
+ return 0;
- default:
- break;
+ if (file->ignore_unreachables)
+ return 0;
+
+ WARN("%s() falls through to next function %s()",
+ func->name, insn_func(insn)->name);
+ func->warned = 1;
+
+ return 1;
}
- if (insn->dead_end)
- return 0;
+ ret = validate_insn(file, func, insn, &state, prev_insn, next_insn,
+ &dead_end);
+
+ if (!insn->trace) {
+ if (ret)
+ TRACE_INSN(insn, "warning (%d)", ret);
+ else
+ TRACE_INSN(insn, NULL);
+ }
- if (!next_insn) {
+ if (!dead_end && !next_insn) {
if (state.cfi.cfa.base == CFI_UNDEFINED)
return 0;
if (file->ignore_unreachables)
@@ -3792,15 +4006,28 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
WARN("%s%sunexpected end of section %s",
func ? func->name : "", func ? "(): " : "",
- sec->name);
+ insn->sec->name);
return 1;
}
prev_insn = insn;
insn = next_insn;
- }
- return 0;
+ } while (!dead_end);
+
+ return ret;
+}
+
+static int validate_branch(struct objtool_file *file, struct symbol *func,
+ struct instruction *insn, struct insn_state state)
+{
+ int ret;
+
+ trace_depth_inc();
+ ret = do_validate_branch(file, func, insn, state);
+ trace_depth_dec();
+
+ return ret;
}
static int validate_unwind_hint(struct objtool_file *file,
@@ -3808,7 +4035,13 @@ static int validate_unwind_hint(struct objtool_file *file,
struct insn_state *state)
{
if (insn->hint && !insn->visited) {
- int ret = validate_branch(file, insn_func(insn), insn, *state);
+ struct symbol *func = insn_func(insn);
+ int ret;
+
+ if (opts.checksum)
+ checksum_init(func);
+
+ ret = validate_branch(file, func, insn, *state);
if (ret)
BT_INSN(insn, "<=== (hint)");
return ret;
@@ -3999,6 +4232,37 @@ static int validate_retpoline(struct objtool_file *file)
warnings++;
}
+ if (!opts.cfi)
+ return warnings;
+
+ /*
+ * kCFI call sites look like:
+ *
+ * movl $(-0x12345678), %r10d
+ * addl -4(%r11), %r10d
+ * jz 1f
+ * ud2
+ * 1: cs call __x86_indirect_thunk_r11
+ *
+ * Verify all indirect calls are kCFI adorned by checking for the
+ * UD2. Notably, doing __nocfi calls to regular (cfi) functions is
+ * broken.
+ */
+ list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
+ struct symbol *sym = insn->sym;
+
+ if (sym && (sym->type == STT_NOTYPE ||
+ sym->type == STT_FUNC) && !sym->nocfi) {
+ struct instruction *prev =
+ prev_insn_same_sym(file, insn);
+
+ if (!prev || prev->type != INSN_BUG) {
+ WARN_INSN(insn, "no-cfi indirect call!");
+ warnings++;
+ }
+ }
+ }
+
return warnings;
}
@@ -4021,7 +4285,8 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
struct instruction *prev_insn;
int i;
- if (insn->type == INSN_NOP || insn->type == INSN_TRAP || (func && func->ignore))
+ if (insn->type == INSN_NOP || insn->type == INSN_TRAP ||
+ insn->hole || (func && func->ignore))
return true;
/*
@@ -4032,47 +4297,6 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
!strcmp(insn->sec->name, ".altinstr_aux"))
return true;
- /*
- * Whole archive runs might encounter dead code from weak symbols.
- * This is where the linker will have dropped the weak symbol in
- * favour of a regular symbol, but leaves the code in place.
- *
- * In this case we'll find a piece of code (whole function) that is not
- * covered by a !section symbol. Ignore them.
- */
- if (opts.link && !func) {
- int size = find_symbol_hole_containing(insn->sec, insn->offset);
- unsigned long end = insn->offset + size;
-
- if (!size) /* not a hole */
- return false;
-
- if (size < 0) /* hole until the end */
- return true;
-
- sec_for_each_insn_continue(file, insn) {
- /*
- * If we reach a visited instruction at or before the
- * end of the hole, ignore the unreachable.
- */
- if (insn->visited)
- return true;
-
- if (insn->offset >= end)
- break;
-
- /*
- * If this hole jumps to a .cold function, mark it ignore too.
- */
- if (insn->jump_dest && insn_func(insn->jump_dest) &&
- strstr(insn_func(insn->jump_dest)->name, ".cold")) {
- insn_func(insn->jump_dest)->ignore = true;
- }
- }
-
- return false;
- }
-
if (!func)
return false;
@@ -4124,14 +4348,54 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
return false;
}
-static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
+/*
+ * For FineIBT or kCFI, a certain number of bytes preceding the function may be
+ * NOPs. Those NOPs may be rewritten at runtime and executed, so give them a
+ * proper function name: __pfx_<func>.
+ *
+ * The NOPs may not exist for the following cases:
+ *
+ * - compiler cloned functions (*.cold, *.part0, etc)
+ * - asm functions created with inline asm or without SYM_FUNC_START()
+ *
+ * Also, the function may already have a prefix from a previous objtool run
+ * (livepatch extracted functions, or manually running objtool multiple times).
+ *
+ * So return 0 if the NOPs are missing or the function already has a prefix
+ * symbol.
+ */
+static int create_prefix_symbol(struct objtool_file *file, struct symbol *func)
{
struct instruction *insn, *prev;
+ char name[SYM_NAME_LEN];
struct cfi_state *cfi;
+ if (!is_func_sym(func) || is_prefix_func(func) ||
+ func->cold || func->static_call_tramp)
+ return 0;
+
+ if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) {
+ WARN("%s: symbol name too long, can't create __pfx_ symbol",
+ func->name);
+ return 0;
+ }
+
+ if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name))
+ return -1;
+
+ if (file->klp) {
+ struct symbol *pfx;
+
+ pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix);
+ if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name))
+ return 0;
+ }
+
insn = find_insn(file, func->sec, func->offset);
- if (!insn)
+ if (!insn) {
+ WARN("%s: can't find starting instruction", func->name);
return -1;
+ }
for (prev = prev_insn_same_sec(file, insn);
prev;
@@ -4139,22 +4403,27 @@ static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
u64 offset;
if (prev->type != INSN_NOP)
- return -1;
+ return 0;
offset = func->offset - prev->offset;
if (offset > opts.prefix)
- return -1;
+ return 0;
if (offset < opts.prefix)
continue;
- elf_create_prefix_symbol(file->elf, func, opts.prefix);
+ if (!elf_create_symbol(file->elf, name, func->sec,
+ GELF_ST_BIND(func->sym.st_info),
+ GELF_ST_TYPE(func->sym.st_info),
+ prev->offset, opts.prefix))
+ return -1;
+
break;
}
if (!prev)
- return -1;
+ return 0;
if (!insn->cfi) {
/*
@@ -4172,20 +4441,18 @@ static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
return 0;
}
-static int add_prefix_symbols(struct objtool_file *file)
+static int create_prefix_symbols(struct objtool_file *file)
{
struct section *sec;
struct symbol *func;
- for_each_sec(file, sec) {
- if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ for_each_sec(file->elf, sec) {
+ if (!is_text_sec(sec))
continue;
sec_for_each_sym(sec, func) {
- if (func->type != STT_FUNC)
- continue;
-
- add_prefix_symbol(file, func);
+ if (create_prefix_symbol(file, func))
+ return -1;
}
}
@@ -4196,6 +4463,7 @@ static int validate_symbol(struct objtool_file *file, struct section *sec,
struct symbol *sym, struct insn_state *state)
{
struct instruction *insn;
+ struct symbol *func;
int ret;
if (!sym->len) {
@@ -4213,9 +4481,26 @@ static int validate_symbol(struct objtool_file *file, struct section *sec,
if (opts.uaccess)
state->uaccess = sym->uaccess_safe;
- ret = validate_branch(file, insn_func(insn), insn, *state);
+ func = insn_func(insn);
+
+ if (opts.checksum)
+ checksum_init(func);
+
+ if (opts.trace && !fnmatch(opts.trace, sym->name, 0)) {
+ trace_enable();
+ TRACE("%s: validation begin\n", sym->name);
+ }
+
+ ret = validate_branch(file, func, insn, *state);
if (ret)
BT_INSN(insn, "<=== (sym)");
+
+ TRACE("%s: validation %s\n\n", sym->name, ret ? "failed" : "end");
+ trace_disable();
+
+ if (opts.checksum)
+ checksum_finish(func);
+
return ret;
}
@@ -4226,7 +4511,7 @@ static int validate_section(struct objtool_file *file, struct section *sec)
int warnings = 0;
sec_for_each_sym(sec, func) {
- if (func->type != STT_FUNC)
+ if (!is_func_sym(func))
continue;
init_insn_state(file, &state, sec);
@@ -4269,8 +4554,8 @@ static int validate_functions(struct objtool_file *file)
struct section *sec;
int warnings = 0;
- for_each_sec(file, sec) {
- if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ for_each_sec(file->elf, sec) {
+ if (!is_text_sec(sec))
continue;
warnings += validate_section(file, sec);
@@ -4397,12 +4682,7 @@ static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn
reloc_offset(reloc) + 1,
(insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
- off = reloc->sym->offset;
- if (reloc_type(reloc) == R_X86_64_PC32 ||
- reloc_type(reloc) == R_X86_64_PLT32)
- off += arch_dest_reloc_offset(reloc_addend(reloc));
- else
- off += reloc_addend(reloc);
+ off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc);
dest = find_insn(file, reloc->sym->sec, off);
if (!dest)
@@ -4453,10 +4733,10 @@ static int validate_ibt(struct objtool_file *file)
for_each_insn(file, insn)
warnings += validate_ibt_insn(file, insn);
- for_each_sec(file, sec) {
+ for_each_sec(file->elf, sec) {
/* Already done by validate_ibt_insn() */
- if (sec->sh.sh_flags & SHF_EXECINSTR)
+ if (is_text_sec(sec))
continue;
if (!sec->rsec)
@@ -4471,8 +4751,8 @@ static int validate_ibt(struct objtool_file *file)
!strncmp(sec->name, ".debug", 6) ||
!strcmp(sec->name, ".altinstructions") ||
!strcmp(sec->name, ".ibt_endbr_seal") ||
+ !strcmp(sec->name, ".kcfi_traps") ||
!strcmp(sec->name, ".orc_unwind_ip") ||
- !strcmp(sec->name, ".parainstructions") ||
!strcmp(sec->name, ".retpoline_sites") ||
!strcmp(sec->name, ".smp_locks") ||
!strcmp(sec->name, ".static_call_sites") ||
@@ -4481,12 +4761,14 @@ static int validate_ibt(struct objtool_file *file)
!strcmp(sec->name, "__bug_table") ||
!strcmp(sec->name, "__ex_table") ||
!strcmp(sec->name, "__jump_table") ||
+ !strcmp(sec->name, "__klp_funcs") ||
!strcmp(sec->name, "__mcount_loc") ||
- !strcmp(sec->name, ".kcfi_traps") ||
!strcmp(sec->name, ".llvm.call-graph-profile") ||
!strcmp(sec->name, ".llvm_bb_addr_map") ||
!strcmp(sec->name, "__tracepoints") ||
- strstr(sec->name, "__patchable_function_entries"))
+ !strcmp(sec->name, ".return_sites") ||
+ !strcmp(sec->name, ".call_sites") ||
+ !strcmp(sec->name, "__patchable_function_entries"))
continue;
for_each_reloc(sec->rsec, reloc)
@@ -4560,85 +4842,45 @@ static int validate_reachable_instructions(struct objtool_file *file)
return warnings;
}
-/* 'funcs' is a space-separated list of function names */
-static void disas_funcs(const char *funcs)
+__weak bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc)
{
- const char *objdump_str, *cross_compile;
- int size, ret;
- char *cmd;
-
- cross_compile = getenv("CROSS_COMPILE");
- if (!cross_compile)
- cross_compile = "";
-
- objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
- "BEGIN { split(_funcs, funcs); }"
- "/^$/ { func_match = 0; }"
- "/<.*>:/ { "
- "f = gensub(/.*<(.*)>:/, \"\\\\1\", 1);"
- "for (i in funcs) {"
- "if (funcs[i] == f) {"
- "func_match = 1;"
- "base = strtonum(\"0x\" $1);"
- "break;"
- "}"
- "}"
- "}"
- "{"
- "if (func_match) {"
- "addr = strtonum(\"0x\" $1);"
- "printf(\"%%04x \", addr - base);"
- "print;"
- "}"
- "}' 1>&2";
-
- /* fake snprintf() to calculate the size */
- size = snprintf(NULL, 0, objdump_str, cross_compile, objname, funcs) + 1;
- if (size <= 0) {
- WARN("objdump string size calculation failed");
- return;
- }
-
- cmd = malloc(size);
+ unsigned int type = reloc_type(reloc);
+ size_t sz = elf_addr_size(elf);
- /* real snprintf() */
- snprintf(cmd, size, objdump_str, cross_compile, objname, funcs);
- ret = system(cmd);
- if (ret) {
- WARN("disassembly failed: %d", ret);
- return;
- }
+ return (sz == 8) ? (type == R_ABS64) : (type == R_ABS32);
}
-static void disas_warned_funcs(struct objtool_file *file)
+static int check_abs_references(struct objtool_file *file)
{
- struct symbol *sym;
- char *funcs = NULL, *tmp;
-
- for_each_sym(file, sym) {
- if (sym->warned) {
- if (!funcs) {
- funcs = malloc(strlen(sym->name) + 1);
- if (!funcs) {
- ERROR_GLIBC("malloc");
- return;
- }
- strcpy(funcs, sym->name);
- } else {
- tmp = malloc(strlen(funcs) + strlen(sym->name) + 2);
- if (!tmp) {
- ERROR_GLIBC("malloc");
- return;
- }
- sprintf(tmp, "%s %s", funcs, sym->name);
- free(funcs);
- funcs = tmp;
+ struct section *sec;
+ struct reloc *reloc;
+ int ret = 0;
+
+ for_each_sec(file->elf, sec) {
+ /* absolute references in non-loadable sections are fine */
+ if (!(sec->sh.sh_flags & SHF_ALLOC))
+ continue;
+
+ /* section must have an associated .rela section */
+ if (!sec->rsec)
+ continue;
+
+ /*
+ * Special case for compiler generated metadata that is not
+ * consumed until after boot.
+ */
+ if (!strcmp(sec->name, "__patchable_function_entries"))
+ continue;
+
+ for_each_reloc(sec->rsec, reloc) {
+ if (arch_absolute_reloc(file->elf, reloc)) {
+ WARN("section %s has absolute relocation at offset 0x%llx",
+ sec->name, (unsigned long long)reloc_offset(reloc));
+ ret++;
}
}
}
-
- if (funcs)
- disas_funcs(funcs);
+ return ret;
}
struct insn_chunk {
@@ -4669,10 +4911,35 @@ static void free_insns(struct objtool_file *file)
free(chunk->addr);
}
+const char *objtool_disas_insn(struct instruction *insn)
+{
+ struct disas_context *dctx = objtool_disas_ctx;
+
+ if (!dctx)
+ return "";
+
+ disas_insn(dctx, insn);
+ return disas_result(dctx);
+}
+
int check(struct objtool_file *file)
{
+ struct disas_context *disas_ctx = NULL;
int ret = 0, warnings = 0;
+ /*
+ * Create a disassembly context if we might disassemble any
+ * instruction or function.
+ */
+ if (opts.verbose || opts.backtrace || opts.trace || opts.disas) {
+ disas_ctx = disas_context_create(file);
+ if (!disas_ctx) {
+ opts.disas = false;
+ opts.trace = false;
+ }
+ objtool_disas_ctx = disas_ctx;
+ }
+
arch_initial_func_cfi_state(&initial_func_cfi);
init_cfi_state(&init_cfi);
init_cfi_state(&func_cfi);
@@ -4688,6 +4955,10 @@ int check(struct objtool_file *file)
cfi_hash_add(&init_cfi);
cfi_hash_add(&func_cfi);
+ ret = checksum_debug_init(file);
+ if (ret)
+ goto out;
+
ret = decode_sections(file);
if (ret)
goto out;
@@ -4698,7 +4969,7 @@ int check(struct objtool_file *file)
if (opts.retpoline)
warnings += validate_retpoline(file);
- if (opts.stackval || opts.orc || opts.uaccess) {
+ if (validate_branch_enabled()) {
int w = 0;
w += validate_functions(file);
@@ -4763,7 +5034,7 @@ int check(struct objtool_file *file)
}
if (opts.prefix) {
- ret = add_prefix_symbols(file);
+ ret = create_prefix_symbols(file);
if (ret)
goto out;
}
@@ -4774,14 +5045,21 @@ int check(struct objtool_file *file)
goto out;
}
+ if (opts.noabs)
+ warnings += check_abs_references(file);
+
+ if (opts.checksum) {
+ ret = create_sym_checksum_section(file);
+ if (ret)
+ goto out;
+ }
+
if (opts.orc && nr_insns) {
ret = orc_create(file);
if (ret)
goto out;
}
- free_insns(file);
-
if (opts.stats) {
printf("nr_insns_visited: %ld\n", nr_insns_visited);
printf("nr_cfi: %ld\n", nr_cfi);
@@ -4790,18 +5068,32 @@ int check(struct objtool_file *file)
}
out:
- if (!ret && !warnings)
- return 0;
+ if (ret || warnings) {
+ if (opts.werror && warnings)
+ ret = 1;
- if (opts.werror && warnings)
- ret = 1;
+ if (opts.verbose) {
+ if (opts.werror && warnings)
+ WARN("%d warning(s) upgraded to errors", warnings);
+ disas_warned_funcs(disas_ctx);
+ }
+ }
- if (opts.verbose) {
- if (opts.werror && warnings)
- WARN("%d warning(s) upgraded to errors", warnings);
- print_args();
- disas_warned_funcs(file);
+ if (opts.disas)
+ disas_funcs(disas_ctx);
+
+ if (disas_ctx) {
+ disas_context_destroy(disas_ctx);
+ objtool_disas_ctx = NULL;
}
+ free_insns(file);
+
+ if (!ret && !warnings)
+ return 0;
+
+ if (opts.backup && make_backup())
+ return 1;
+
return ret;
}
diff --git a/tools/objtool/disas.c b/tools/objtool/disas.c
new file mode 100644
index 000000000000..2b5059f55e40
--- /dev/null
+++ b/tools/objtool/disas.c
@@ -0,0 +1,1248 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ */
+
+#define _GNU_SOURCE
+#include <fnmatch.h>
+
+#include <objtool/arch.h>
+#include <objtool/check.h>
+#include <objtool/disas.h>
+#include <objtool/special.h>
+#include <objtool/warn.h>
+
+#include <bfd.h>
+#include <linux/string.h>
+#include <tools/dis-asm-compat.h>
+
+/*
+ * Size of the buffer for storing the result of disassembling
+ * a single instruction.
+ */
+#define DISAS_RESULT_SIZE 1024
+
+struct disas_context {
+ struct objtool_file *file;
+ struct instruction *insn;
+ bool alt_applied;
+ char result[DISAS_RESULT_SIZE];
+ disassembler_ftype disassembler;
+ struct disassemble_info info;
+};
+
+/*
+ * Maximum number of alternatives
+ */
+#define DISAS_ALT_MAX 5
+
+/*
+ * Maximum number of instructions per alternative
+ */
+#define DISAS_ALT_INSN_MAX 50
+
+/*
+ * Information to disassemble an alternative
+ */
+struct disas_alt {
+ struct instruction *orig_insn; /* original instruction */
+ struct alternative *alt; /* alternative or NULL if default code */
+ char *name; /* name for this alternative */
+ int width; /* formatting width */
+ struct {
+ char *str; /* instruction string */
+ int offset; /* instruction offset */
+ int nops; /* number of nops */
+ } insn[DISAS_ALT_INSN_MAX]; /* alternative instructions */
+ int insn_idx; /* index of the next instruction to print */
+};
+
+#define DALT_DEFAULT(dalt) (!(dalt)->alt)
+#define DALT_INSN(dalt) (DALT_DEFAULT(dalt) ? (dalt)->orig_insn : (dalt)->alt->insn)
+#define DALT_GROUP(dalt) (DALT_INSN(dalt)->alt_group)
+#define DALT_ALTID(dalt) ((dalt)->orig_insn->offset)
+
+#define ALT_FLAGS_SHIFT 16
+#define ALT_FLAG_NOT (1 << 0)
+#define ALT_FLAG_DIRECT_CALL (1 << 1)
+#define ALT_FEATURE_MASK ((1 << ALT_FLAGS_SHIFT) - 1)
+
+static int alt_feature(unsigned int ft_flags)
+{
+ return (ft_flags & ALT_FEATURE_MASK);
+}
+
+static int alt_flags(unsigned int ft_flags)
+{
+ return (ft_flags >> ALT_FLAGS_SHIFT);
+}
+
+/*
+ * Wrapper around asprintf() to allocate and format a string.
+ * Return the allocated string or NULL on error.
+ */
+static char *strfmt(const char *fmt, ...)
+{
+ va_list ap;
+ char *str;
+ int rv;
+
+ va_start(ap, fmt);
+ rv = vasprintf(&str, fmt, ap);
+ va_end(ap);
+
+ return rv == -1 ? NULL : str;
+}
+
+static int sprint_name(char *str, const char *name, unsigned long offset)
+{
+ int len;
+
+ if (offset)
+ len = sprintf(str, "%s+0x%lx", name, offset);
+ else
+ len = sprintf(str, "%s", name);
+
+ return len;
+}
+
+#define DINFO_FPRINTF(dinfo, ...) \
+ ((*(dinfo)->fprintf_func)((dinfo)->stream, __VA_ARGS__))
+
+static int disas_result_fprintf(struct disas_context *dctx,
+ const char *fmt, va_list ap)
+{
+ char *buf = dctx->result;
+ int avail, len;
+
+ len = strlen(buf);
+ if (len >= DISAS_RESULT_SIZE - 1) {
+ WARN_FUNC(dctx->insn->sec, dctx->insn->offset,
+ "disassembly buffer is full");
+ return -1;
+ }
+ avail = DISAS_RESULT_SIZE - len;
+
+ len = vsnprintf(buf + len, avail, fmt, ap);
+ if (len < 0 || len >= avail) {
+ WARN_FUNC(dctx->insn->sec, dctx->insn->offset,
+ "disassembly buffer is truncated");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int disas_fprintf(void *stream, const char *fmt, ...)
+{
+ va_list arg;
+ int rv;
+
+ va_start(arg, fmt);
+ rv = disas_result_fprintf(stream, fmt, arg);
+ va_end(arg);
+
+ return rv;
+}
+
+/*
+ * For init_disassemble_info_compat().
+ */
+static int disas_fprintf_styled(void *stream,
+ enum disassembler_style style,
+ const char *fmt, ...)
+{
+ va_list arg;
+ int rv;
+
+ va_start(arg, fmt);
+ rv = disas_result_fprintf(stream, fmt, arg);
+ va_end(arg);
+
+ return rv;
+}
+
+static void disas_print_addr_sym(struct section *sec, struct symbol *sym,
+ bfd_vma addr, struct disassemble_info *dinfo)
+{
+ char symstr[1024];
+ char *str;
+
+ if (sym) {
+ sprint_name(symstr, sym->name, addr - sym->offset);
+ DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, symstr);
+ } else {
+ str = offstr(sec, addr);
+ DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, str);
+ free(str);
+ }
+}
+
+static bool disas_print_addr_alt(bfd_vma addr, struct disassemble_info *dinfo)
+{
+ struct disas_context *dctx = dinfo->application_data;
+ struct instruction *orig_first_insn;
+ struct alt_group *alt_group;
+ unsigned long offset;
+ struct symbol *sym;
+
+ /*
+ * Check if we are processing an alternative at the original
+ * instruction address (i.e. if alt_applied is true) and if
+ * we are referencing an address inside the alternative.
+ *
+ * For example, this happens if there is a branch inside an
+ * alternative. In that case, the address should be updated
+ * to a reference inside the original instruction flow.
+ */
+ if (!dctx->alt_applied)
+ return false;
+
+ alt_group = dctx->insn->alt_group;
+ if (!alt_group || !alt_group->orig_group ||
+ addr < alt_group->first_insn->offset ||
+ addr > alt_group->last_insn->offset)
+ return false;
+
+ orig_first_insn = alt_group->orig_group->first_insn;
+ offset = addr - alt_group->first_insn->offset;
+
+ addr = orig_first_insn->offset + offset;
+ sym = orig_first_insn->sym;
+
+ disas_print_addr_sym(orig_first_insn->sec, sym, addr, dinfo);
+
+ return true;
+}
+
+static void disas_print_addr_noreloc(bfd_vma addr,
+ struct disassemble_info *dinfo)
+{
+ struct disas_context *dctx = dinfo->application_data;
+ struct instruction *insn = dctx->insn;
+ struct symbol *sym = NULL;
+
+ if (disas_print_addr_alt(addr, dinfo))
+ return;
+
+ if (insn->sym && addr >= insn->sym->offset &&
+ addr < insn->sym->offset + insn->sym->len) {
+ sym = insn->sym;
+ }
+
+ disas_print_addr_sym(insn->sec, sym, addr, dinfo);
+}
+
+static void disas_print_addr_reloc(bfd_vma addr, struct disassemble_info *dinfo)
+{
+ struct disas_context *dctx = dinfo->application_data;
+ struct instruction *insn = dctx->insn;
+ unsigned long offset;
+ struct reloc *reloc;
+ char symstr[1024];
+ char *str;
+
+ reloc = find_reloc_by_dest_range(dctx->file->elf, insn->sec,
+ insn->offset, insn->len);
+ if (!reloc) {
+ /*
+ * There is no relocation for this instruction although
+ * the address to resolve points to the next instruction.
+ * So this is an effective reference to the next IP, for
+ * example: "lea 0x0(%rip),%rdi". The kernel can reference
+ * the next IP with _THIS_IP_ macro.
+ */
+ DINFO_FPRINTF(dinfo, "0x%lx <_THIS_IP_>", addr);
+ return;
+ }
+
+ offset = arch_insn_adjusted_addend(insn, reloc);
+
+ /*
+ * If the relocation symbol is a section name (for example ".bss")
+ * then we try to further resolve the name.
+ */
+ if (reloc->sym->type == STT_SECTION) {
+ str = offstr(reloc->sym->sec, reloc->sym->offset + offset);
+ DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, str);
+ free(str);
+ } else {
+ sprint_name(symstr, reloc->sym->name, offset);
+ DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, symstr);
+ }
+}
+
+/*
+ * Resolve an address into a "<symbol>+<offset>" string.
+ */
+static void disas_print_address(bfd_vma addr, struct disassemble_info *dinfo)
+{
+ struct disas_context *dctx = dinfo->application_data;
+ struct instruction *insn = dctx->insn;
+ struct instruction *jump_dest;
+ struct symbol *sym;
+ bool is_reloc;
+
+ /*
+ * If the instruction is a call/jump and it references a
+ * destination then this is likely the address we are looking
+ * up. So check it first.
+ */
+ jump_dest = insn->jump_dest;
+ if (jump_dest && jump_dest->sym && jump_dest->offset == addr) {
+ if (!disas_print_addr_alt(addr, dinfo))
+ disas_print_addr_sym(jump_dest->sec, jump_dest->sym,
+ addr, dinfo);
+ return;
+ }
+
+ /*
+ * If the address points to the next instruction then there is
+ * probably a relocation. It can be a false positive when the
+ * current instruction is referencing the address of the next
+ * instruction. This particular case will be handled in
+ * disas_print_addr_reloc().
+ */
+ is_reloc = (addr == insn->offset + insn->len);
+
+ /*
+ * The call destination offset can be the address we are looking
+ * up, or 0 if there is a relocation.
+ */
+ sym = insn_call_dest(insn);
+ if (sym && (sym->offset == addr || (sym->offset == 0 && is_reloc))) {
+ DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, sym->name);
+ return;
+ }
+
+ if (!is_reloc)
+ disas_print_addr_noreloc(addr, dinfo);
+ else
+ disas_print_addr_reloc(addr, dinfo);
+}
+
+/*
+ * Initialize disassemble info arch, mach (32 or 64-bit) and options.
+ */
+int disas_info_init(struct disassemble_info *dinfo,
+ int arch, int mach32, int mach64,
+ const char *options)
+{
+ struct disas_context *dctx = dinfo->application_data;
+ struct objtool_file *file = dctx->file;
+
+ dinfo->arch = arch;
+
+ switch (file->elf->ehdr.e_ident[EI_CLASS]) {
+ case ELFCLASS32:
+ dinfo->mach = mach32;
+ break;
+ case ELFCLASS64:
+ dinfo->mach = mach64;
+ break;
+ default:
+ return -1;
+ }
+
+ dinfo->disassembler_options = options;
+
+ return 0;
+}
+
+struct disas_context *disas_context_create(struct objtool_file *file)
+{
+ struct disas_context *dctx;
+ struct disassemble_info *dinfo;
+ int err;
+
+ dctx = malloc(sizeof(*dctx));
+ if (!dctx) {
+ WARN("failed to allocate disassembly context");
+ return NULL;
+ }
+
+ dctx->file = file;
+ dinfo = &dctx->info;
+
+ init_disassemble_info_compat(dinfo, dctx,
+ disas_fprintf, disas_fprintf_styled);
+
+ dinfo->read_memory_func = buffer_read_memory;
+ dinfo->print_address_func = disas_print_address;
+ dinfo->application_data = dctx;
+
+ /*
+ * bfd_openr() is not used to avoid doing ELF data processing
+ * and caching that has already being done. Here, we just need
+ * to identify the target file so we call an arch specific
+ * function to fill some disassemble info (arch, mach).
+ */
+
+ dinfo->arch = bfd_arch_unknown;
+ dinfo->mach = 0;
+
+ err = arch_disas_info_init(dinfo);
+ if (err || dinfo->arch == bfd_arch_unknown || dinfo->mach == 0) {
+ WARN("failed to init disassembly arch");
+ goto error;
+ }
+
+ dinfo->endian = (file->elf->ehdr.e_ident[EI_DATA] == ELFDATA2MSB) ?
+ BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
+
+ disassemble_init_for_target(dinfo);
+
+ dctx->disassembler = disassembler(dinfo->arch,
+ dinfo->endian == BFD_ENDIAN_BIG,
+ dinfo->mach, NULL);
+ if (!dctx->disassembler) {
+ WARN("failed to create disassembler function");
+ goto error;
+ }
+
+ return dctx;
+
+error:
+ free(dctx);
+ return NULL;
+}
+
+void disas_context_destroy(struct disas_context *dctx)
+{
+ free(dctx);
+}
+
+char *disas_result(struct disas_context *dctx)
+{
+ return dctx->result;
+}
+
+#define DISAS_INSN_OFFSET_SPACE 10
+#define DISAS_INSN_SPACE 60
+
+#define DISAS_PRINSN(dctx, insn, depth) \
+ disas_print_insn(stdout, dctx, insn, depth, "\n")
+
+/*
+ * Print a message in the instruction flow. If sec is not NULL then the
+ * address at the section offset is printed in addition of the message,
+ * otherwise only the message is printed.
+ */
+static int disas_vprint(FILE *stream, struct section *sec, unsigned long offset,
+ int depth, const char *format, va_list ap)
+{
+ const char *addr_str;
+ int i, n;
+ int len;
+
+ len = sym_name_max_len + DISAS_INSN_OFFSET_SPACE;
+ if (depth < 0) {
+ len += depth;
+ depth = 0;
+ }
+
+ n = 0;
+
+ if (sec) {
+ addr_str = offstr(sec, offset);
+ n += fprintf(stream, "%6lx: %-*s ", offset, len, addr_str);
+ free((char *)addr_str);
+ } else {
+ len += DISAS_INSN_OFFSET_SPACE + 1;
+ n += fprintf(stream, "%-*s", len, "");
+ }
+
+ /* print vertical bars to show the code flow */
+ for (i = 0; i < depth; i++)
+ n += fprintf(stream, "| ");
+
+ if (format)
+ n += vfprintf(stream, format, ap);
+
+ return n;
+}
+
+static int disas_print(FILE *stream, struct section *sec, unsigned long offset,
+ int depth, const char *format, ...)
+{
+ va_list args;
+ int len;
+
+ va_start(args, format);
+ len = disas_vprint(stream, sec, offset, depth, format, args);
+ va_end(args);
+
+ return len;
+}
+
+/*
+ * Print a message in the instruction flow. If insn is not NULL then
+ * the instruction address is printed in addition of the message,
+ * otherwise only the message is printed. In all cases, the instruction
+ * itself is not printed.
+ */
+void disas_print_info(FILE *stream, struct instruction *insn, int depth,
+ const char *format, ...)
+{
+ struct section *sec;
+ unsigned long off;
+ va_list args;
+
+ if (insn) {
+ sec = insn->sec;
+ off = insn->offset;
+ } else {
+ sec = NULL;
+ off = 0;
+ }
+
+ va_start(args, format);
+ disas_vprint(stream, sec, off, depth, format, args);
+ va_end(args);
+}
+
+/*
+ * Print an instruction address (offset and function), the instruction itself
+ * and an optional message.
+ */
+void disas_print_insn(FILE *stream, struct disas_context *dctx,
+ struct instruction *insn, int depth,
+ const char *format, ...)
+{
+ char fake_nop_insn[32];
+ const char *insn_str;
+ bool fake_nop;
+ va_list args;
+ int len;
+
+ /*
+ * Alternative can insert a fake nop, sometimes with no
+ * associated section so nothing to disassemble.
+ */
+ fake_nop = (!insn->sec && insn->type == INSN_NOP);
+ if (fake_nop) {
+ snprintf(fake_nop_insn, 32, "<fake nop> (%d bytes)", insn->len);
+ insn_str = fake_nop_insn;
+ } else {
+ disas_insn(dctx, insn);
+ insn_str = disas_result(dctx);
+ }
+
+ /* print the instruction */
+ len = (depth + 1) * 2 < DISAS_INSN_SPACE ? DISAS_INSN_SPACE - (depth+1) * 2 : 1;
+ disas_print_info(stream, insn, depth, "%-*s", len, insn_str);
+
+ /* print message if any */
+ if (!format)
+ return;
+
+ if (strcmp(format, "\n") == 0) {
+ fprintf(stream, "\n");
+ return;
+ }
+
+ fprintf(stream, " - ");
+ va_start(args, format);
+ vfprintf(stream, format, args);
+ va_end(args);
+}
+
+/*
+ * Disassemble a single instruction. Return the size of the instruction.
+ *
+ * If alt_applied is true then insn should be an instruction from of an
+ * alternative (i.e. insn->alt_group != NULL), and it is disassembled
+ * at the location of the original code it is replacing. When the
+ * instruction references any address inside the alternative then
+ * these references will be re-adjusted to replace the original code.
+ */
+static size_t disas_insn_common(struct disas_context *dctx,
+ struct instruction *insn,
+ bool alt_applied)
+{
+ disassembler_ftype disasm = dctx->disassembler;
+ struct disassemble_info *dinfo = &dctx->info;
+
+ dctx->insn = insn;
+ dctx->alt_applied = alt_applied;
+ dctx->result[0] = '\0';
+
+ if (insn->type == INSN_NOP) {
+ DINFO_FPRINTF(dinfo, "nop%d", insn->len);
+ return insn->len;
+ }
+
+ /*
+ * Set the disassembler buffer to read data from the section
+ * containing the instruction to disassemble.
+ */
+ dinfo->buffer = insn->sec->data->d_buf;
+ dinfo->buffer_vma = 0;
+ dinfo->buffer_length = insn->sec->sh.sh_size;
+
+ return disasm(insn->offset, &dctx->info);
+}
+
+size_t disas_insn(struct disas_context *dctx, struct instruction *insn)
+{
+ return disas_insn_common(dctx, insn, false);
+}
+
+static size_t disas_insn_alt(struct disas_context *dctx,
+ struct instruction *insn)
+{
+ return disas_insn_common(dctx, insn, true);
+}
+
+static struct instruction *next_insn_same_alt(struct objtool_file *file,
+ struct alt_group *alt_grp,
+ struct instruction *insn)
+{
+ if (alt_grp->last_insn == insn || alt_grp->nop == insn)
+ return NULL;
+
+ return next_insn_same_sec(file, insn);
+}
+
+#define alt_for_each_insn(file, alt_grp, insn) \
+ for (insn = alt_grp->first_insn; \
+ insn; \
+ insn = next_insn_same_alt(file, alt_grp, insn))
+
+/*
+ * Provide a name for the type of alternatives present at the
+ * specified instruction.
+ *
+ * An instruction can have alternatives with different types, for
+ * example alternative instructions and an exception table. In that
+ * case the name for the alternative instructions type is used.
+ *
+ * Return NULL if the instruction as no alternative.
+ */
+const char *disas_alt_type_name(struct instruction *insn)
+{
+ struct alternative *alt;
+ const char *name;
+
+ name = NULL;
+ for (alt = insn->alts; alt; alt = alt->next) {
+ if (alt->type == ALT_TYPE_INSTRUCTIONS) {
+ name = "alternative";
+ break;
+ }
+
+ switch (alt->type) {
+ case ALT_TYPE_EX_TABLE:
+ name = "ex_table";
+ break;
+ case ALT_TYPE_JUMP_TABLE:
+ name = "jump_table";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ }
+
+ return name;
+}
+
+/*
+ * Provide a name for an alternative.
+ */
+char *disas_alt_name(struct alternative *alt)
+{
+ char pfx[4] = { 0 };
+ char *str = NULL;
+ const char *name;
+ int feature;
+ int flags;
+ int num;
+
+ switch (alt->type) {
+
+ case ALT_TYPE_EX_TABLE:
+ str = strdup("EXCEPTION");
+ break;
+
+ case ALT_TYPE_JUMP_TABLE:
+ str = strdup("JUMP");
+ break;
+
+ case ALT_TYPE_INSTRUCTIONS:
+ /*
+ * This is a non-default group alternative. Create a name
+ * based on the feature and flags associated with this
+ * alternative. Use either the feature name (it is available)
+ * or the feature number. And add a prefix to show the flags
+ * used.
+ *
+ * Prefix flags characters:
+ *
+ * '!' alternative used when feature not enabled
+ * '+' direct call alternative
+ * '?' unknown flag
+ */
+
+ if (!alt->insn->alt_group)
+ return NULL;
+
+ feature = alt->insn->alt_group->feature;
+ num = alt_feature(feature);
+ flags = alt_flags(feature);
+ str = pfx;
+
+ if (flags & ~(ALT_FLAG_NOT | ALT_FLAG_DIRECT_CALL))
+ *str++ = '?';
+ if (flags & ALT_FLAG_DIRECT_CALL)
+ *str++ = '+';
+ if (flags & ALT_FLAG_NOT)
+ *str++ = '!';
+
+ name = arch_cpu_feature_name(num);
+ if (!name)
+ str = strfmt("%sFEATURE 0x%X", pfx, num);
+ else
+ str = strfmt("%s%s", pfx, name);
+
+ break;
+ }
+
+ return str;
+}
+
+/*
+ * Initialize an alternative. The default alternative should be initialized
+ * with alt=NULL.
+ */
+static int disas_alt_init(struct disas_alt *dalt,
+ struct instruction *orig_insn,
+ struct alternative *alt)
+{
+ dalt->orig_insn = orig_insn;
+ dalt->alt = alt;
+ dalt->insn_idx = 0;
+ dalt->name = alt ? disas_alt_name(alt) : strdup("DEFAULT");
+ if (!dalt->name)
+ return -1;
+ dalt->width = strlen(dalt->name);
+
+ return 0;
+}
+
+static int disas_alt_add_insn(struct disas_alt *dalt, int index, char *insn_str,
+ int offset, int nops)
+{
+ int len;
+
+ if (index >= DISAS_ALT_INSN_MAX) {
+ WARN("Alternative %lx.%s has more instructions than supported",
+ DALT_ALTID(dalt), dalt->name);
+ return -1;
+ }
+
+ len = strlen(insn_str);
+ dalt->insn[index].str = insn_str;
+ dalt->insn[index].offset = offset;
+ dalt->insn[index].nops = nops;
+ if (len > dalt->width)
+ dalt->width = len;
+
+ return 0;
+}
+
+static int disas_alt_jump(struct disas_alt *dalt)
+{
+ struct instruction *orig_insn;
+ struct instruction *dest_insn;
+ char suffix[2] = { 0 };
+ char *str;
+ int nops;
+
+ orig_insn = dalt->orig_insn;
+ dest_insn = dalt->alt->insn;
+
+ if (orig_insn->type == INSN_NOP) {
+ if (orig_insn->len == 5)
+ suffix[0] = 'q';
+ str = strfmt("jmp%-3s %lx <%s+0x%lx>", suffix,
+ dest_insn->offset, dest_insn->sym->name,
+ dest_insn->offset - dest_insn->sym->offset);
+ nops = 0;
+ } else {
+ str = strfmt("nop%d", orig_insn->len);
+ nops = orig_insn->len;
+ }
+
+ if (!str)
+ return -1;
+
+ disas_alt_add_insn(dalt, 0, str, 0, nops);
+
+ return 1;
+}
+
+/*
+ * Disassemble an exception table alternative.
+ */
+static int disas_alt_extable(struct disas_alt *dalt)
+{
+ struct instruction *alt_insn;
+ char *str;
+
+ alt_insn = dalt->alt->insn;
+ str = strfmt("resume at 0x%lx <%s+0x%lx>",
+ alt_insn->offset, alt_insn->sym->name,
+ alt_insn->offset - alt_insn->sym->offset);
+ if (!str)
+ return -1;
+
+ disas_alt_add_insn(dalt, 0, str, 0, 0);
+
+ return 1;
+}
+
+/*
+ * Disassemble an alternative and store instructions in the disas_alt
+ * structure. Return the number of instructions in the alternative.
+ */
+static int disas_alt_group(struct disas_context *dctx, struct disas_alt *dalt)
+{
+ struct objtool_file *file;
+ struct instruction *insn;
+ int offset;
+ char *str;
+ int count;
+ int nops;
+ int err;
+
+ file = dctx->file;
+ count = 0;
+ offset = 0;
+ nops = 0;
+
+ alt_for_each_insn(file, DALT_GROUP(dalt), insn) {
+
+ disas_insn_alt(dctx, insn);
+ str = strdup(disas_result(dctx));
+ if (!str)
+ return -1;
+
+ nops = insn->type == INSN_NOP ? insn->len : 0;
+ err = disas_alt_add_insn(dalt, count, str, offset, nops);
+ if (err)
+ break;
+ offset += insn->len;
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * Disassemble the default alternative.
+ */
+static int disas_alt_default(struct disas_context *dctx, struct disas_alt *dalt)
+{
+ char *str;
+ int nops;
+ int err;
+
+ if (DALT_GROUP(dalt))
+ return disas_alt_group(dctx, dalt);
+
+ /*
+ * Default alternative with no alt_group: this is the default
+ * code associated with either a jump table or an exception
+ * table and no other instruction alternatives. In that case
+ * the default alternative is made of a single instruction.
+ */
+ disas_insn(dctx, dalt->orig_insn);
+ str = strdup(disas_result(dctx));
+ if (!str)
+ return -1;
+ nops = dalt->orig_insn->type == INSN_NOP ? dalt->orig_insn->len : 0;
+ err = disas_alt_add_insn(dalt, 0, str, 0, nops);
+ if (err)
+ return -1;
+
+ return 1;
+}
+
+/*
+ * For each alternative, if there is an instruction at the specified
+ * offset then print this instruction, otherwise print a blank entry.
+ * The offset is an offset from the start of the alternative.
+ *
+ * Return the offset for the next instructions to print, or -1 if all
+ * instructions have been printed.
+ */
+static int disas_alt_print_insn(struct disas_alt *dalts, int alt_count,
+ int insn_count, int offset)
+{
+ struct disas_alt *dalt;
+ int offset_next;
+ char *str;
+ int i, j;
+
+ offset_next = -1;
+
+ for (i = 0; i < alt_count; i++) {
+ dalt = &dalts[i];
+ j = dalt->insn_idx;
+ if (j == -1) {
+ printf("| %-*s ", dalt->width, "");
+ continue;
+ }
+
+ if (dalt->insn[j].offset == offset) {
+ str = dalt->insn[j].str;
+ printf("| %-*s ", dalt->width, str ?: "");
+ if (++j < insn_count) {
+ dalt->insn_idx = j;
+ } else {
+ dalt->insn_idx = -1;
+ continue;
+ }
+ } else {
+ printf("| %-*s ", dalt->width, "");
+ }
+
+ if (dalt->insn[j].offset > 0 &&
+ (offset_next == -1 ||
+ (dalt->insn[j].offset < offset_next)))
+ offset_next = dalt->insn[j].offset;
+ }
+ printf("\n");
+
+ return offset_next;
+}
+
+/*
+ * Print all alternatives side-by-side.
+ */
+static void disas_alt_print_wide(char *alt_name, struct disas_alt *dalts, int alt_count,
+ int insn_count)
+{
+ struct instruction *orig_insn;
+ int offset_next;
+ int offset;
+ int i;
+
+ orig_insn = dalts[0].orig_insn;
+
+ /*
+ * Print an header with the name of each alternative.
+ */
+ disas_print_info(stdout, orig_insn, -2, NULL);
+
+ if (strlen(alt_name) > dalts[0].width)
+ dalts[0].width = strlen(alt_name);
+ printf("| %-*s ", dalts[0].width, alt_name);
+
+ for (i = 1; i < alt_count; i++)
+ printf("| %-*s ", dalts[i].width, dalts[i].name);
+
+ printf("\n");
+
+ /*
+ * Print instructions for each alternative.
+ */
+ offset_next = 0;
+ do {
+ offset = offset_next;
+ disas_print(stdout, orig_insn->sec, orig_insn->offset + offset,
+ -2, NULL);
+ offset_next = disas_alt_print_insn(dalts, alt_count, insn_count,
+ offset);
+ } while (offset_next > offset);
+}
+
+/*
+ * Print all alternatives one above the other.
+ */
+static void disas_alt_print_compact(char *alt_name, struct disas_alt *dalts,
+ int alt_count, int insn_count)
+{
+ struct instruction *orig_insn;
+ int width;
+ int i, j;
+ int len;
+
+ orig_insn = dalts[0].orig_insn;
+
+ len = disas_print(stdout, orig_insn->sec, orig_insn->offset, 0, NULL);
+ printf("%s\n", alt_name);
+
+ /*
+ * If all alternatives have a single instruction then print each
+ * alternative on a single line. Otherwise, print alternatives
+ * one above the other with a clear separation.
+ */
+
+ if (insn_count == 1) {
+ width = 0;
+ for (i = 0; i < alt_count; i++) {
+ if (dalts[i].width > width)
+ width = dalts[i].width;
+ }
+
+ for (i = 0; i < alt_count; i++) {
+ printf("%*s= %-*s (if %s)\n", len, "", width,
+ dalts[i].insn[0].str, dalts[i].name);
+ }
+
+ return;
+ }
+
+ for (i = 0; i < alt_count; i++) {
+ printf("%*s= %s\n", len, "", dalts[i].name);
+ for (j = 0; j < insn_count; j++) {
+ if (!dalts[i].insn[j].str)
+ break;
+ disas_print(stdout, orig_insn->sec,
+ orig_insn->offset + dalts[i].insn[j].offset, 0,
+ "| %s\n", dalts[i].insn[j].str);
+ }
+ printf("%*s|\n", len, "");
+ }
+}
+
+/*
+ * Trim NOPs in alternatives. This replaces trailing NOPs in alternatives
+ * with a single indication of the number of bytes covered with NOPs.
+ *
+ * Return the maximum numbers of instructions in all alternatives after
+ * trailing NOPs have been trimmed.
+ */
+static int disas_alt_trim_nops(struct disas_alt *dalts, int alt_count,
+ int insn_count)
+{
+ struct disas_alt *dalt;
+ int nops_count;
+ const char *s;
+ int offset;
+ int count;
+ int nops;
+ int i, j;
+
+ count = 0;
+ for (i = 0; i < alt_count; i++) {
+ offset = 0;
+ nops = 0;
+ nops_count = 0;
+ dalt = &dalts[i];
+ for (j = insn_count - 1; j >= 0; j--) {
+ if (!dalt->insn[j].str || !dalt->insn[j].nops)
+ break;
+ offset = dalt->insn[j].offset;
+ free(dalt->insn[j].str);
+ dalt->insn[j].offset = 0;
+ dalt->insn[j].str = NULL;
+ nops += dalt->insn[j].nops;
+ nops_count++;
+ }
+
+ /*
+ * All trailing NOPs have been removed. If there was a single
+ * NOP instruction then re-add it. If there was a block of
+ * NOPs then indicate the number of bytes than the block
+ * covers (nop*<number-of-bytes>).
+ */
+ if (nops_count) {
+ s = nops_count == 1 ? "" : "*";
+ dalt->insn[j + 1].str = strfmt("nop%s%d", s, nops);
+ dalt->insn[j + 1].offset = offset;
+ dalt->insn[j + 1].nops = nops;
+ j++;
+ }
+
+ if (j > count)
+ count = j;
+ }
+
+ return count + 1;
+}
+
+/*
+ * Disassemble an alternative.
+ *
+ * Return the last instruction in the default alternative so that
+ * disassembly can continue with the next instruction. Return NULL
+ * on error.
+ */
+static void *disas_alt(struct disas_context *dctx,
+ struct instruction *orig_insn)
+{
+ struct disas_alt dalts[DISAS_ALT_MAX] = { 0 };
+ struct instruction *last_insn = NULL;
+ struct alternative *alt;
+ struct disas_alt *dalt;
+ int insn_count = 0;
+ int alt_count = 0;
+ char *alt_name;
+ int count;
+ int i, j;
+ int err;
+
+ alt_name = strfmt("<%s.%lx>", disas_alt_type_name(orig_insn),
+ orig_insn->offset);
+ if (!alt_name) {
+ WARN("Failed to define name for alternative at instruction 0x%lx",
+ orig_insn->offset);
+ goto done;
+ }
+
+ /*
+ * Initialize and disassemble the default alternative.
+ */
+ err = disas_alt_init(&dalts[0], orig_insn, NULL);
+ if (err) {
+ WARN("%s: failed to initialize default alternative", alt_name);
+ goto done;
+ }
+
+ insn_count = disas_alt_default(dctx, &dalts[0]);
+ if (insn_count < 0) {
+ WARN("%s: failed to disassemble default alternative", alt_name);
+ goto done;
+ }
+
+ /*
+ * Initialize and disassemble all other alternatives.
+ */
+ i = 1;
+ for (alt = orig_insn->alts; alt; alt = alt->next) {
+ if (i >= DISAS_ALT_MAX) {
+ WARN("%s has more alternatives than supported", alt_name);
+ break;
+ }
+
+ dalt = &dalts[i];
+ err = disas_alt_init(dalt, orig_insn, alt);
+ if (err) {
+ WARN("%s: failed to disassemble alternative", alt_name);
+ goto done;
+ }
+
+ count = -1;
+ switch (dalt->alt->type) {
+ case ALT_TYPE_INSTRUCTIONS:
+ count = disas_alt_group(dctx, dalt);
+ break;
+ case ALT_TYPE_EX_TABLE:
+ count = disas_alt_extable(dalt);
+ break;
+ case ALT_TYPE_JUMP_TABLE:
+ count = disas_alt_jump(dalt);
+ break;
+ }
+ if (count < 0) {
+ WARN("%s: failed to disassemble alternative %s",
+ alt_name, dalt->name);
+ goto done;
+ }
+
+ insn_count = count > insn_count ? count : insn_count;
+ i++;
+ }
+ alt_count = i;
+
+ /*
+ * Print default and non-default alternatives.
+ */
+
+ insn_count = disas_alt_trim_nops(dalts, alt_count, insn_count);
+
+ if (opts.wide)
+ disas_alt_print_wide(alt_name, dalts, alt_count, insn_count);
+ else
+ disas_alt_print_compact(alt_name, dalts, alt_count, insn_count);
+
+ last_insn = orig_insn->alt_group ? orig_insn->alt_group->last_insn :
+ orig_insn;
+
+done:
+ for (i = 0; i < alt_count; i++) {
+ free(dalts[i].name);
+ for (j = 0; j < insn_count; j++)
+ free(dalts[i].insn[j].str);
+ }
+
+ free(alt_name);
+
+ return last_insn;
+}
+
+/*
+ * Disassemble a function.
+ */
+static void disas_func(struct disas_context *dctx, struct symbol *func)
+{
+ struct instruction *insn_start;
+ struct instruction *insn;
+
+ printf("%s:\n", func->name);
+ sym_for_each_insn(dctx->file, func, insn) {
+ if (insn->alts) {
+ insn_start = insn;
+ insn = disas_alt(dctx, insn);
+ if (insn)
+ continue;
+ /*
+ * There was an error with disassembling
+ * the alternative. Resume disassembling
+ * at the current instruction, this will
+ * disassemble the default alternative
+ * only and continue with the code after
+ * the alternative.
+ */
+ insn = insn_start;
+ }
+
+ DISAS_PRINSN(dctx, insn, 0);
+ }
+ printf("\n");
+}
+
+/*
+ * Disassemble all warned functions.
+ */
+void disas_warned_funcs(struct disas_context *dctx)
+{
+ struct symbol *sym;
+
+ if (!dctx)
+ return;
+
+ for_each_sym(dctx->file->elf, sym) {
+ if (sym->warned)
+ disas_func(dctx, sym);
+ }
+}
+
+void disas_funcs(struct disas_context *dctx)
+{
+ bool disas_all = !strcmp(opts.disas, "*");
+ struct section *sec;
+ struct symbol *sym;
+
+ for_each_sec(dctx->file->elf, sec) {
+
+ if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ continue;
+
+ sec_for_each_sym(sec, sym) {
+ /*
+ * If the function had a warning and the verbose
+ * option is used then the function was already
+ * disassemble.
+ */
+ if (opts.verbose && sym->warned)
+ continue;
+
+ if (disas_all || fnmatch(opts.disas, sym->name, 0) == 0)
+ disas_func(dctx, sym);
+ }
+ }
+}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index ca5d77db692a..6a8ed9c62323 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -16,12 +16,17 @@
#include <string.h>
#include <unistd.h>
#include <errno.h>
+#include <libgen.h>
+#include <ctype.h>
#include <linux/interval_tree_generic.h>
#include <objtool/builtin.h>
-
#include <objtool/elf.h>
#include <objtool/warn.h>
+#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1))
+#define ALIGN_UP_POW2(x) (1U << ((8 * sizeof(x)) - __builtin_clz((x) - 1U)))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
static inline u32 str_hash(const char *str)
{
return jhash(str, strlen(str), 0);
@@ -92,11 +97,12 @@ static inline unsigned long __sym_start(struct symbol *s)
static inline unsigned long __sym_last(struct symbol *s)
{
- return s->offset + s->len - 1;
+ return s->offset + (s->len ? s->len - 1 : 0);
}
INTERVAL_TREE_DEFINE(struct symbol, node, unsigned long, __subtree_last,
- __sym_start, __sym_last, static, __sym)
+ __sym_start, __sym_last, static inline __maybe_unused,
+ __sym)
#define __sym_for_each(_iter, _tree, _start, _end) \
for (_iter = __sym_iter_first((_tree), (_start), (_end)); \
@@ -108,7 +114,7 @@ struct symbol_hole {
};
/*
- * Find !section symbol where @offset is after it.
+ * Find the last symbol before @offset.
*/
static int symbol_hole_by_offset(const void *key, const struct rb_node *node)
{
@@ -119,8 +125,7 @@ static int symbol_hole_by_offset(const void *key, const struct rb_node *node)
return -1;
if (sh->key >= s->offset + s->len) {
- if (s->type != STT_SECTION)
- sh->sym = s;
+ sh->sym = s;
return 1;
}
@@ -167,11 +172,11 @@ static struct symbol *find_symbol_by_index(struct elf *elf, unsigned int idx)
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
{
struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
- struct symbol *iter;
+ struct symbol *sym;
- __sym_for_each(iter, tree, offset, offset) {
- if (iter->offset == offset && iter->type != STT_SECTION)
- return iter;
+ __sym_for_each(sym, tree, offset, offset) {
+ if (sym->offset == offset && !is_sec_sym(sym))
+ return sym->alias;
}
return NULL;
@@ -180,11 +185,11 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
struct symbol *find_func_by_offset(struct section *sec, unsigned long offset)
{
struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
- struct symbol *iter;
+ struct symbol *func;
- __sym_for_each(iter, tree, offset, offset) {
- if (iter->offset == offset && iter->type == STT_FUNC)
- return iter;
+ __sym_for_each(func, tree, offset, offset) {
+ if (func->offset == offset && is_func_sym(func))
+ return func->alias;
}
return NULL;
@@ -193,14 +198,29 @@ struct symbol *find_func_by_offset(struct section *sec, unsigned long offset)
struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset)
{
struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
- struct symbol *iter;
+ struct symbol *sym = NULL, *tmp;
- __sym_for_each(iter, tree, offset, offset) {
- if (iter->type != STT_SECTION)
- return iter;
+ __sym_for_each(tmp, tree, offset, offset) {
+ if (tmp->len) {
+ if (!sym) {
+ sym = tmp;
+ continue;
+ }
+
+ if (sym->offset != tmp->offset || sym->len != tmp->len) {
+ /*
+ * In the rare case of overlapping symbols,
+ * pick the smaller one.
+ *
+ * TODO: outlaw overlapping symbols
+ */
+ if (tmp->len < sym->len)
+ sym = tmp;
+ }
+ }
}
- return NULL;
+ return sym ? sym->alias : NULL;
}
/*
@@ -246,11 +266,11 @@ int find_symbol_hole_containing(const struct section *sec, unsigned long offset)
struct symbol *find_func_containing(struct section *sec, unsigned long offset)
{
struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
- struct symbol *iter;
+ struct symbol *func;
- __sym_for_each(iter, tree, offset, offset) {
- if (iter->type == STT_FUNC)
- return iter;
+ __sym_for_each(func, tree, offset, offset) {
+ if (is_func_sym(func))
+ return func->alias;
}
return NULL;
@@ -268,6 +288,35 @@ struct symbol *find_symbol_by_name(const struct elf *elf, const char *name)
return NULL;
}
+/* Find local symbol with matching STT_FILE */
+static struct symbol *find_local_symbol_by_file_and_name(const struct elf *elf,
+ struct symbol *file,
+ const char *name)
+{
+ struct symbol *sym;
+
+ elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) {
+ if (sym->bind == STB_LOCAL && sym->file == file &&
+ !strcmp(sym->name, name)) {
+ return sym;
+ }
+ }
+
+ return NULL;
+}
+
+struct symbol *find_global_symbol_by_name(const struct elf *elf, const char *name)
+{
+ struct symbol *sym;
+
+ elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) {
+ if (!strcmp(sym->name, name) && !is_local_sym(sym))
+ return sym;
+ }
+
+ return NULL;
+}
+
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len)
{
@@ -358,14 +407,14 @@ static int read_sections(struct elf *elf)
return -1;
}
- if (sec->sh.sh_size != 0 && !is_dwarf_section(sec)) {
+ if (sec_size(sec) != 0 && !is_dwarf_section(sec)) {
sec->data = elf_getdata(s, NULL);
if (!sec->data) {
ERROR_ELF("elf_getdata");
return -1;
}
if (sec->data->d_off != 0 ||
- sec->data->d_size != sec->sh.sh_size) {
+ sec->data->d_size != sec_size(sec)) {
ERROR("unexpected data attributes for %s", sec->name);
return -1;
}
@@ -393,7 +442,38 @@ static int read_sections(struct elf *elf)
return 0;
}
-static void elf_add_symbol(struct elf *elf, struct symbol *sym)
+static const char *demangle_name(struct symbol *sym)
+{
+ char *str;
+
+ if (!is_local_sym(sym))
+ return sym->name;
+
+ if (!is_func_sym(sym) && !is_object_sym(sym))
+ return sym->name;
+
+ if (!strstarts(sym->name, "__UNIQUE_ID_") && !strchr(sym->name, '.'))
+ return sym->name;
+
+ str = strdup(sym->name);
+ if (!str) {
+ ERROR_GLIBC("strdup");
+ return NULL;
+ }
+
+ for (int i = strlen(str) - 1; i >= 0; i--) {
+ char c = str[i];
+
+ if (!isdigit(c) && c != '.') {
+ str[i + 1] = '\0';
+ break;
+ }
+ }
+
+ return str;
+}
+
+static int elf_add_symbol(struct elf *elf, struct symbol *sym)
{
struct list_head *entry;
struct rb_node *pnode;
@@ -405,14 +485,15 @@ static void elf_add_symbol(struct elf *elf, struct symbol *sym)
sym->type = GELF_ST_TYPE(sym->sym.st_info);
sym->bind = GELF_ST_BIND(sym->sym.st_info);
- if (sym->type == STT_FILE)
+ if (is_file_sym(sym))
elf->num_files++;
sym->offset = sym->sym.st_value;
sym->len = sym->sym.st_size;
__sym_for_each(iter, &sym->sec->symbol_tree, sym->offset, sym->offset) {
- if (iter->offset == sym->offset && iter->type == sym->type)
+ if (!is_undef_sym(iter) && iter->offset == sym->offset &&
+ iter->type == sym->type && iter->len == sym->len)
iter->alias = sym;
}
@@ -423,21 +504,44 @@ static void elf_add_symbol(struct elf *elf, struct symbol *sym)
else
entry = &sym->sec->symbol_list;
list_add(&sym->list, entry);
+
+ list_add_tail(&sym->global_list, &elf->symbols);
elf_hash_add(symbol, &sym->hash, sym->idx);
elf_hash_add(symbol_name, &sym->name_hash, str_hash(sym->name));
- /*
- * Don't store empty STT_NOTYPE symbols in the rbtree. They
- * can exist within a function, confusing the sorting.
- */
- if (!sym->len)
- __sym_remove(sym, &sym->sec->symbol_tree);
+ if (is_func_sym(sym) &&
+ (strstarts(sym->name, "__pfx_") ||
+ strstarts(sym->name, "__cfi_") ||
+ strstarts(sym->name, "__pi___pfx_") ||
+ strstarts(sym->name, "__pi___cfi_")))
+ sym->prefix = 1;
+
+ if (strstarts(sym->name, ".klp.sym"))
+ sym->klp = 1;
+
+ if (!sym->klp && !is_sec_sym(sym) && strstr(sym->name, ".cold")) {
+ sym->cold = 1;
+
+ /*
+ * Clang doesn't mark cold subfunctions as STT_FUNC, which
+ * breaks several objtool assumptions. Fake it.
+ */
+ sym->type = STT_FUNC;
+ }
+
+ sym->pfunc = sym->cfunc = sym;
+
+ sym->demangled_name = demangle_name(sym);
+ if (!sym->demangled_name)
+ return -1;
+
+ return 0;
}
static int read_symbols(struct elf *elf)
{
struct section *symtab, *symtab_shndx, *sec;
- struct symbol *sym, *pfunc;
+ struct symbol *sym, *pfunc, *file = NULL;
int symbols_nr, i;
char *coldstr;
Elf_Data *shndx_data = NULL;
@@ -469,6 +573,9 @@ static int read_symbols(struct elf *elf)
ERROR_GLIBC("calloc");
return -1;
}
+
+ INIT_LIST_HEAD(&elf->symbols);
+
for (i = 0; i < symbols_nr; i++) {
sym = &elf->symbol_data[i];
@@ -477,14 +584,14 @@ static int read_symbols(struct elf *elf)
if (!gelf_getsymshndx(symtab->data, shndx_data, i, &sym->sym,
&shndx)) {
ERROR_ELF("gelf_getsymshndx");
- goto err;
+ return -1;
}
sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
sym->sym.st_name);
if (!sym->name) {
ERROR_ELF("elf_strptr");
- goto err;
+ return -1;
}
if ((sym->sym.st_shndx > SHN_UNDEF &&
@@ -496,7 +603,7 @@ static int read_symbols(struct elf *elf)
sym->sec = find_section_by_index(elf, shndx);
if (!sym->sec) {
ERROR("couldn't find section for symbol %s", sym->name);
- goto err;
+ return -1;
}
if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) {
sym->name = sym->sec->name;
@@ -505,7 +612,13 @@ static int read_symbols(struct elf *elf)
} else
sym->sec = find_section_by_index(elf, 0);
- elf_add_symbol(elf, sym);
+ if (elf_add_symbol(elf, sym))
+ return -1;
+
+ if (sym->type == STT_FILE)
+ file = sym;
+ else if (sym->bind == STB_LOCAL)
+ sym->file = file;
}
if (opts.stats) {
@@ -518,18 +631,15 @@ static int read_symbols(struct elf *elf)
sec_for_each_sym(sec, sym) {
char *pname;
size_t pnamelen;
- if (sym->type != STT_FUNC)
- continue;
-
- if (sym->pfunc == NULL)
- sym->pfunc = sym;
- if (sym->cfunc == NULL)
- sym->cfunc = sym;
+ if (!sym->cold)
+ continue;
coldstr = strstr(sym->name, ".cold");
- if (!coldstr)
- continue;
+ if (!coldstr) {
+ ERROR("%s(): cold subfunction without \".cold\"?", sym->name);
+ return -1;
+ }
pnamelen = coldstr - sym->name;
pname = strndup(sym->name, pnamelen);
@@ -538,7 +648,9 @@ static int read_symbols(struct elf *elf)
return -1;
}
- pfunc = find_symbol_by_name(elf, pname);
+ pfunc = find_local_symbol_by_file_and_name(elf, sym->file, pname);
+ if (!pfunc)
+ pfunc = find_global_symbol_by_name(elf, pname);
free(pname);
if (!pfunc) {
@@ -546,8 +658,9 @@ static int read_symbols(struct elf *elf)
return -1;
}
- sym->pfunc = pfunc;
+ sym->pfunc = pfunc->alias;
pfunc->cfunc = sym;
+ pfunc->alias->cfunc = sym;
/*
* Unfortunately, -fnoreorder-functions puts the child
@@ -566,10 +679,6 @@ static int read_symbols(struct elf *elf)
}
return 0;
-
-err:
- free(sym);
- return -1;
}
static int mark_group_syms(struct elf *elf)
@@ -583,7 +692,7 @@ static int mark_group_syms(struct elf *elf)
return -1;
}
- list_for_each_entry(sec, &elf->sections, list) {
+ for_each_sec(elf, sec) {
if (sec->sh.sh_type == SHT_GROUP &&
sec->sh.sh_link == symtab->idx) {
sym = find_symbol_by_index(elf, sec->sh.sh_info);
@@ -624,7 +733,7 @@ static int elf_update_sym_relocs(struct elf *elf, struct symbol *sym)
static int elf_update_symbol(struct elf *elf, struct section *symtab,
struct section *symtab_shndx, struct symbol *sym)
{
- Elf32_Word shndx = sym->sec ? sym->sec->idx : SHN_UNDEF;
+ Elf32_Word shndx;
Elf_Data *symtab_data = NULL, *shndx_data = NULL;
Elf64_Xword entsize = symtab->sh.sh_entsize;
int max_idx, idx = sym->idx;
@@ -632,8 +741,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
bool is_special_shndx = sym->sym.st_shndx >= SHN_LORESERVE &&
sym->sym.st_shndx != SHN_XINDEX;
- if (is_special_shndx)
- shndx = sym->sym.st_shndx;
+ shndx = is_special_shndx ? sym->sym.st_shndx : sym->sec->idx;
s = elf_getscn(elf->elf, symtab->idx);
if (!s) {
@@ -731,7 +839,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
}
/* setup extended section index magic and write the symbol */
- if ((shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) || is_special_shndx) {
+ if (shndx < SHN_LORESERVE || is_special_shndx) {
sym->sym.st_shndx = shndx;
if (!shndx_data)
shndx = 0;
@@ -751,24 +859,58 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
return 0;
}
-static struct symbol *
-__elf_create_symbol(struct elf *elf, struct symbol *sym)
+struct symbol *elf_create_symbol(struct elf *elf, const char *name,
+ struct section *sec, unsigned int bind,
+ unsigned int type, unsigned long offset,
+ size_t size)
{
struct section *symtab, *symtab_shndx;
Elf32_Word first_non_local, new_idx;
- struct symbol *old;
+ struct symbol *old, *sym;
- symtab = find_section_by_name(elf, ".symtab");
- if (symtab) {
- symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ sym = calloc(1, sizeof(*sym));
+ if (!sym) {
+ ERROR_GLIBC("calloc");
+ return NULL;
+ }
+
+ sym->name = strdup(name);
+ if (!sym->name) {
+ ERROR_GLIBC("strdup");
+ return NULL;
+ }
+
+ if (type != STT_SECTION) {
+ sym->sym.st_name = elf_add_string(elf, NULL, sym->name);
+ if (sym->sym.st_name == -1)
+ return NULL;
+ }
+
+ if (sec) {
+ sym->sec = sec;
} else {
+ sym->sec = find_section_by_index(elf, 0);
+ if (!sym->sec) {
+ ERROR("no NULL section");
+ return NULL;
+ }
+ }
+
+ sym->sym.st_info = GELF_ST_INFO(bind, type);
+ sym->sym.st_value = offset;
+ sym->sym.st_size = size;
+
+ symtab = find_section_by_name(elf, ".symtab");
+ if (!symtab) {
ERROR("no .symtab");
return NULL;
}
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+
new_idx = sec_num_entries(symtab);
- if (GELF_ST_BIND(sym->sym.st_info) != STB_LOCAL)
+ if (bind != STB_LOCAL)
goto non_local;
/*
@@ -806,10 +948,8 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
non_local:
sym->idx = new_idx;
- if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) {
- ERROR("elf_update_symbol");
+ if (sym->idx && elf_update_symbol(elf, symtab, symtab_shndx, sym))
return NULL;
- }
symtab->sh.sh_size += symtab->sh.sh_entsize;
mark_sec_changed(elf, symtab, true);
@@ -819,70 +959,28 @@ non_local:
mark_sec_changed(elf, symtab_shndx, true);
}
- return sym;
-}
-
-static struct symbol *
-elf_create_section_symbol(struct elf *elf, struct section *sec)
-{
- struct symbol *sym = calloc(1, sizeof(*sym));
-
- if (!sym) {
- ERROR_GLIBC("malloc");
+ if (elf_add_symbol(elf, sym))
return NULL;
- }
-
- sym->name = sec->name;
- sym->sec = sec;
-
- // st_name 0
- sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
- // st_other 0
- // st_value 0
- // st_size 0
-
- sym = __elf_create_symbol(elf, sym);
- if (sym)
- elf_add_symbol(elf, sym);
return sym;
}
-static int elf_add_string(struct elf *elf, struct section *strtab, char *str);
-
-struct symbol *
-elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size)
+struct symbol *elf_create_section_symbol(struct elf *elf, struct section *sec)
{
struct symbol *sym = calloc(1, sizeof(*sym));
- size_t namelen = strlen(orig->name) + sizeof("__pfx_");
- char *name = malloc(namelen);
- if (!sym || !name) {
- ERROR_GLIBC("malloc");
+ sym = elf_create_symbol(elf, sec->name, sec, STB_LOCAL, STT_SECTION, 0, 0);
+ if (!sym)
return NULL;
- }
- snprintf(name, namelen, "__pfx_%s", orig->name);
-
- sym->name = name;
- sym->sec = orig->sec;
-
- sym->sym.st_name = elf_add_string(elf, NULL, name);
- sym->sym.st_info = orig->sym.st_info;
- sym->sym.st_value = orig->sym.st_value - size;
- sym->sym.st_size = size;
-
- sym = __elf_create_symbol(elf, sym);
- if (sym)
- elf_add_symbol(elf, sym);
+ sec->sym = sym;
return sym;
}
-static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
- unsigned int reloc_idx,
- unsigned long offset, struct symbol *sym,
- s64 addend, unsigned int type)
+struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
+ unsigned int reloc_idx, unsigned long offset,
+ struct symbol *sym, s64 addend, unsigned int type)
{
struct reloc *reloc, empty = { 0 };
@@ -922,9 +1020,9 @@ struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec,
unsigned long insn_off)
{
struct symbol *sym = insn_sec->sym;
- int addend = insn_off;
+ s64 addend = insn_off;
- if (!(insn_sec->sh.sh_flags & SHF_EXECINSTR)) {
+ if (!is_text_sec(insn_sec)) {
ERROR("bad call to %s() for data symbol %s", __func__, sym->name);
return NULL;
}
@@ -939,8 +1037,6 @@ struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec,
sym = elf_create_section_symbol(elf, insn_sec);
if (!sym)
return NULL;
-
- insn_sec->sym = sym;
}
return elf_init_reloc(elf, sec->rsec, reloc_idx, offset, sym, addend,
@@ -953,7 +1049,7 @@ struct reloc *elf_init_reloc_data_sym(struct elf *elf, struct section *sec,
struct symbol *sym,
s64 addend)
{
- if (sym->sec && (sec->sh.sh_flags & SHF_EXECINSTR)) {
+ if (is_text_sec(sec)) {
ERROR("bad call to %s() for text symbol %s", __func__, sym->name);
return NULL;
}
@@ -986,12 +1082,16 @@ static int read_relocs(struct elf *elf)
rsec->base->rsec = rsec;
- nr_reloc = 0;
+ /* nr_alloc_relocs=0: libelf owns d_buf */
+ rsec->nr_alloc_relocs = 0;
+
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(*reloc));
if (!rsec->relocs) {
ERROR_GLIBC("calloc");
return -1;
}
+
+ nr_reloc = 0;
for (i = 0; i < sec_num_entries(rsec); i++) {
reloc = &rsec->relocs[i];
@@ -1044,6 +1144,12 @@ struct elf *elf_open_read(const char *name, int flags)
goto err;
}
+ elf->name = strdup(name);
+ if (!elf->name) {
+ ERROR_GLIBC("strdup");
+ return NULL;
+ }
+
if ((flags & O_ACCMODE) == O_RDONLY)
cmd = ELF_C_READ_MMAP;
else if ((flags & O_ACCMODE) == O_RDWR)
@@ -1081,11 +1187,142 @@ err:
return NULL;
}
-static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
+struct elf *elf_create_file(GElf_Ehdr *ehdr, const char *name)
{
- Elf_Data *data;
- Elf_Scn *s;
- int len;
+ struct section *null, *symtab, *strtab, *shstrtab;
+ char *dir, *base, *tmp_name;
+ struct symbol *sym;
+ struct elf *elf;
+
+ elf_version(EV_CURRENT);
+
+ elf = calloc(1, sizeof(*elf));
+ if (!elf) {
+ ERROR_GLIBC("calloc");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&elf->sections);
+
+ dir = strdup(name);
+ if (!dir) {
+ ERROR_GLIBC("strdup");
+ return NULL;
+ }
+
+ dir = dirname(dir);
+
+ base = strdup(name);
+ if (!base) {
+ ERROR_GLIBC("strdup");
+ return NULL;
+ }
+
+ base = basename(base);
+
+ tmp_name = malloc(256);
+ if (!tmp_name) {
+ ERROR_GLIBC("malloc");
+ return NULL;
+ }
+
+ snprintf(tmp_name, 256, "%s/%s.XXXXXX", dir, base);
+
+ elf->fd = mkstemp(tmp_name);
+ if (elf->fd == -1) {
+ ERROR_GLIBC("can't create tmp file");
+ exit(1);
+ }
+
+ elf->tmp_name = tmp_name;
+
+ elf->name = strdup(name);
+ if (!elf->name) {
+ ERROR_GLIBC("strdup");
+ return NULL;
+ }
+
+ elf->elf = elf_begin(elf->fd, ELF_C_WRITE, NULL);
+ if (!elf->elf) {
+ ERROR_ELF("elf_begin");
+ return NULL;
+ }
+
+ if (!gelf_newehdr(elf->elf, ELFCLASS64)) {
+ ERROR_ELF("gelf_newehdr");
+ return NULL;
+ }
+
+ memcpy(&elf->ehdr, ehdr, sizeof(elf->ehdr));
+
+ if (!gelf_update_ehdr(elf->elf, &elf->ehdr)) {
+ ERROR_ELF("gelf_update_ehdr");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&elf->symbols);
+
+ if (!elf_alloc_hash(section, 1000) ||
+ !elf_alloc_hash(section_name, 1000) ||
+ !elf_alloc_hash(symbol, 10000) ||
+ !elf_alloc_hash(symbol_name, 10000) ||
+ !elf_alloc_hash(reloc, 100000))
+ return NULL;
+
+ null = elf_create_section(elf, NULL, 0, 0, SHT_NULL, 0, 0);
+ shstrtab = elf_create_section(elf, NULL, 0, 0, SHT_STRTAB, 1, 0);
+ strtab = elf_create_section(elf, NULL, 0, 0, SHT_STRTAB, 1, 0);
+
+ if (!null || !shstrtab || !strtab)
+ return NULL;
+
+ null->name = "";
+ shstrtab->name = ".shstrtab";
+ strtab->name = ".strtab";
+
+ null->sh.sh_name = elf_add_string(elf, shstrtab, null->name);
+ shstrtab->sh.sh_name = elf_add_string(elf, shstrtab, shstrtab->name);
+ strtab->sh.sh_name = elf_add_string(elf, shstrtab, strtab->name);
+
+ if (null->sh.sh_name == -1 || shstrtab->sh.sh_name == -1 || strtab->sh.sh_name == -1)
+ return NULL;
+
+ elf_hash_add(section_name, &null->name_hash, str_hash(null->name));
+ elf_hash_add(section_name, &strtab->name_hash, str_hash(strtab->name));
+ elf_hash_add(section_name, &shstrtab->name_hash, str_hash(shstrtab->name));
+
+ if (elf_add_string(elf, strtab, "") == -1)
+ return NULL;
+
+ symtab = elf_create_section(elf, ".symtab", 0x18, 0x18, SHT_SYMTAB, 0x8, 0);
+ if (!symtab)
+ return NULL;
+
+ symtab->sh.sh_link = strtab->idx;
+ symtab->sh.sh_info = 1;
+
+ elf->ehdr.e_shstrndx = shstrtab->idx;
+ if (!gelf_update_ehdr(elf->elf, &elf->ehdr)) {
+ ERROR_ELF("gelf_update_ehdr");
+ return NULL;
+ }
+
+ sym = calloc(1, sizeof(*sym));
+ if (!sym) {
+ ERROR_GLIBC("calloc");
+ return NULL;
+ }
+
+ sym->name = "";
+ sym->sec = null;
+ elf_add_symbol(elf, sym);
+
+ return elf;
+}
+
+unsigned int elf_add_string(struct elf *elf, struct section *strtab, const char *str)
+{
+ unsigned int offset;
if (!strtab)
strtab = find_section_by_name(elf, ".strtab");
@@ -1094,76 +1331,109 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
return -1;
}
- s = elf_getscn(elf->elf, strtab->idx);
+ if (!strtab->sh.sh_addralign) {
+ ERROR("'%s': invalid sh_addralign", strtab->name);
+ return -1;
+ }
+
+ offset = ALIGN_UP(strtab->sh.sh_size, strtab->sh.sh_addralign);
+
+ if (!elf_add_data(elf, strtab, str, strlen(str) + 1))
+ return -1;
+
+ return offset;
+}
+
+void *elf_add_data(struct elf *elf, struct section *sec, const void *data, size_t size)
+{
+ unsigned long offset;
+ Elf_Scn *s;
+
+ if (!sec->sh.sh_addralign) {
+ ERROR("'%s': invalid sh_addralign", sec->name);
+ return NULL;
+ }
+
+ s = elf_getscn(elf->elf, sec->idx);
if (!s) {
ERROR_ELF("elf_getscn");
- return -1;
+ return NULL;
}
- data = elf_newdata(s);
- if (!data) {
+ sec->data = elf_newdata(s);
+ if (!sec->data) {
ERROR_ELF("elf_newdata");
- return -1;
+ return NULL;
}
- data->d_buf = str;
- data->d_size = strlen(str) + 1;
- data->d_align = 1;
+ sec->data->d_buf = calloc(1, size);
+ if (!sec->data->d_buf) {
+ ERROR_GLIBC("calloc");
+ return NULL;
+ }
- len = strtab->sh.sh_size;
- strtab->sh.sh_size += data->d_size;
+ if (data)
+ memcpy(sec->data->d_buf, data, size);
- mark_sec_changed(elf, strtab, true);
+ sec->data->d_size = size;
+ sec->data->d_align = 1;
- return len;
+ offset = ALIGN_UP(sec->sh.sh_size, sec->sh.sh_addralign);
+ sec->sh.sh_size = offset + size;
+
+ mark_sec_changed(elf, sec, true);
+
+ return sec->data->d_buf;
}
struct section *elf_create_section(struct elf *elf, const char *name,
- size_t entsize, unsigned int nr)
+ size_t size, size_t entsize,
+ unsigned int type, unsigned int align,
+ unsigned int flags)
{
struct section *sec, *shstrtab;
- size_t size = entsize * nr;
Elf_Scn *s;
- sec = malloc(sizeof(*sec));
+ if (name && find_section_by_name(elf, name)) {
+ ERROR("section '%s' already exists", name);
+ return NULL;
+ }
+
+ sec = calloc(1, sizeof(*sec));
if (!sec) {
- ERROR_GLIBC("malloc");
+ ERROR_GLIBC("calloc");
return NULL;
}
- memset(sec, 0, sizeof(*sec));
INIT_LIST_HEAD(&sec->symbol_list);
+ /* don't actually create the section, just the data structures */
+ if (type == SHT_NULL)
+ goto add;
+
s = elf_newscn(elf->elf);
if (!s) {
ERROR_ELF("elf_newscn");
return NULL;
}
- sec->name = strdup(name);
- if (!sec->name) {
- ERROR_GLIBC("strdup");
- return NULL;
- }
-
sec->idx = elf_ndxscn(s);
- sec->data = elf_newdata(s);
- if (!sec->data) {
- ERROR_ELF("elf_newdata");
- return NULL;
- }
+ if (size) {
+ sec->data = elf_newdata(s);
+ if (!sec->data) {
+ ERROR_ELF("elf_newdata");
+ return NULL;
+ }
- sec->data->d_size = size;
- sec->data->d_align = 1;
+ sec->data->d_size = size;
+ sec->data->d_align = 1;
- if (size) {
- sec->data->d_buf = malloc(size);
+ sec->data->d_buf = calloc(1, size);
if (!sec->data->d_buf) {
- ERROR_GLIBC("malloc");
+ ERROR_GLIBC("calloc");
return NULL;
}
- memset(sec->data->d_buf, 0, size);
}
if (!gelf_getshdr(s, &sec->sh)) {
@@ -1173,34 +1443,152 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->sh.sh_size = size;
sec->sh.sh_entsize = entsize;
- sec->sh.sh_type = SHT_PROGBITS;
- sec->sh.sh_addralign = 1;
- sec->sh.sh_flags = SHF_ALLOC;
-
- /* Add section name to .shstrtab (or .strtab for Clang) */
- shstrtab = find_section_by_name(elf, ".shstrtab");
- if (!shstrtab)
- shstrtab = find_section_by_name(elf, ".strtab");
- if (!shstrtab) {
- ERROR("can't find .shstrtab or .strtab section");
- return NULL;
+ sec->sh.sh_type = type;
+ sec->sh.sh_addralign = align;
+ sec->sh.sh_flags = flags;
+
+ if (name) {
+ sec->name = strdup(name);
+ if (!sec->name) {
+ ERROR("strdup");
+ return NULL;
+ }
+
+ /* Add section name to .shstrtab (or .strtab for Clang) */
+ shstrtab = find_section_by_name(elf, ".shstrtab");
+ if (!shstrtab) {
+ shstrtab = find_section_by_name(elf, ".strtab");
+ if (!shstrtab) {
+ ERROR("can't find .shstrtab or .strtab");
+ return NULL;
+ }
+ }
+ sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
+ if (sec->sh.sh_name == -1)
+ return NULL;
+
+ elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
}
- sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
- if (sec->sh.sh_name == -1)
- return NULL;
+add:
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(section, &sec->hash, sec->idx);
- elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
mark_sec_changed(elf, sec, true);
return sec;
}
-static struct section *elf_create_rela_section(struct elf *elf,
- struct section *sec,
- unsigned int reloc_nr)
+static int elf_alloc_reloc(struct elf *elf, struct section *rsec)
+{
+ struct reloc *old_relocs, *old_relocs_end, *new_relocs;
+ unsigned int nr_relocs_old = sec_num_entries(rsec);
+ unsigned int nr_relocs_new = nr_relocs_old + 1;
+ unsigned long nr_alloc;
+ struct symbol *sym;
+
+ if (!rsec->data) {
+ rsec->data = elf_newdata(elf_getscn(elf->elf, rsec->idx));
+ if (!rsec->data) {
+ ERROR_ELF("elf_newdata");
+ return -1;
+ }
+
+ rsec->data->d_align = 1;
+ rsec->data->d_type = ELF_T_RELA;
+ rsec->data->d_buf = NULL;
+ }
+
+ rsec->data->d_size = nr_relocs_new * elf_rela_size(elf);
+ rsec->sh.sh_size = rsec->data->d_size;
+
+ nr_alloc = MAX(64, ALIGN_UP_POW2(nr_relocs_new));
+ if (nr_alloc <= rsec->nr_alloc_relocs)
+ return 0;
+
+ if (rsec->data->d_buf && !rsec->nr_alloc_relocs) {
+ void *orig_buf = rsec->data->d_buf;
+
+ /*
+ * The original d_buf is owned by libelf so it can't be
+ * realloced.
+ */
+ rsec->data->d_buf = malloc(nr_alloc * elf_rela_size(elf));
+ if (!rsec->data->d_buf) {
+ ERROR_GLIBC("malloc");
+ return -1;
+ }
+ memcpy(rsec->data->d_buf, orig_buf,
+ nr_relocs_old * elf_rela_size(elf));
+ } else {
+ rsec->data->d_buf = realloc(rsec->data->d_buf,
+ nr_alloc * elf_rela_size(elf));
+ if (!rsec->data->d_buf) {
+ ERROR_GLIBC("realloc");
+ return -1;
+ }
+ }
+
+ rsec->nr_alloc_relocs = nr_alloc;
+
+ old_relocs = rsec->relocs;
+ new_relocs = calloc(nr_alloc, sizeof(struct reloc));
+ if (!new_relocs) {
+ ERROR_GLIBC("calloc");
+ return -1;
+ }
+
+ if (!old_relocs)
+ goto done;
+
+ /*
+ * The struct reloc's address has changed. Update all the symbols and
+ * relocs which reference it.
+ */
+
+ old_relocs_end = &old_relocs[nr_relocs_old];
+ for_each_sym(elf, sym) {
+ struct reloc *reloc;
+
+ reloc = sym->relocs;
+ if (!reloc)
+ continue;
+
+ if (reloc >= old_relocs && reloc < old_relocs_end)
+ sym->relocs = &new_relocs[reloc - old_relocs];
+
+ while (1) {
+ struct reloc *next_reloc = sym_next_reloc(reloc);
+
+ if (!next_reloc)
+ break;
+
+ if (next_reloc >= old_relocs && next_reloc < old_relocs_end)
+ set_sym_next_reloc(reloc, &new_relocs[next_reloc - old_relocs]);
+
+ reloc = next_reloc;
+ }
+ }
+
+ memcpy(new_relocs, old_relocs, nr_relocs_old * sizeof(struct reloc));
+
+ for (int i = 0; i < nr_relocs_old; i++) {
+ struct reloc *old = &old_relocs[i];
+ struct reloc *new = &new_relocs[i];
+ u32 key = reloc_hash(old);
+
+ elf_hash_del(reloc, &old->hash, key);
+ elf_hash_add(reloc, &new->hash, key);
+ }
+
+ free(old_relocs);
+done:
+ rsec->relocs = new_relocs;
+ return 0;
+}
+
+struct section *elf_create_rela_section(struct elf *elf, struct section *sec,
+ unsigned int nr_relocs)
{
struct section *rsec;
char *rsec_name;
@@ -1213,41 +1601,72 @@ static struct section *elf_create_rela_section(struct elf *elf,
strcpy(rsec_name, ".rela");
strcat(rsec_name, sec->name);
- rsec = elf_create_section(elf, rsec_name, elf_rela_size(elf), reloc_nr);
+ rsec = elf_create_section(elf, rsec_name, nr_relocs * elf_rela_size(elf),
+ elf_rela_size(elf), SHT_RELA, elf_addr_size(elf),
+ SHF_INFO_LINK);
free(rsec_name);
if (!rsec)
return NULL;
- rsec->data->d_type = ELF_T_RELA;
- rsec->sh.sh_type = SHT_RELA;
- rsec->sh.sh_addralign = elf_addr_size(elf);
- rsec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
- rsec->sh.sh_info = sec->idx;
- rsec->sh.sh_flags = SHF_INFO_LINK;
+ if (nr_relocs) {
+ rsec->data->d_type = ELF_T_RELA;
- rsec->relocs = calloc(sec_num_entries(rsec), sizeof(struct reloc));
- if (!rsec->relocs) {
- ERROR_GLIBC("calloc");
- return NULL;
+ rsec->nr_alloc_relocs = nr_relocs;
+ rsec->relocs = calloc(nr_relocs, sizeof(struct reloc));
+ if (!rsec->relocs) {
+ ERROR_GLIBC("calloc");
+ return NULL;
+ }
}
+ rsec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
+ rsec->sh.sh_info = sec->idx;
+
sec->rsec = rsec;
rsec->base = sec;
return rsec;
}
+struct reloc *elf_create_reloc(struct elf *elf, struct section *sec,
+ unsigned long offset,
+ struct symbol *sym, s64 addend,
+ unsigned int type)
+{
+ struct section *rsec = sec->rsec;
+
+ if (!rsec) {
+ rsec = elf_create_rela_section(elf, sec, 0);
+ if (!rsec)
+ return NULL;
+ }
+
+ if (find_reloc_by_dest(elf, sec, offset)) {
+ ERROR_FUNC(sec, offset, "duplicate reloc");
+ return NULL;
+ }
+
+ if (elf_alloc_reloc(elf, rsec))
+ return NULL;
+
+ mark_sec_changed(elf, rsec, true);
+
+ return elf_init_reloc(elf, rsec, sec_num_entries(rsec) - 1, offset, sym,
+ addend, type);
+}
+
struct section *elf_create_section_pair(struct elf *elf, const char *name,
size_t entsize, unsigned int nr,
- unsigned int reloc_nr)
+ unsigned int nr_relocs)
{
struct section *sec;
- sec = elf_create_section(elf, name, entsize, nr);
+ sec = elf_create_section(elf, name, nr * entsize, entsize,
+ SHT_PROGBITS, 1, SHF_ALLOC);
if (!sec)
return NULL;
- if (!elf_create_rela_section(elf, sec, reloc_nr))
+ if (!elf_create_rela_section(elf, sec, nr_relocs))
return NULL;
return sec;
@@ -1282,7 +1701,7 @@ int elf_write_insn(struct elf *elf, struct section *sec,
*/
static int elf_truncate_section(struct elf *elf, struct section *sec)
{
- u64 size = sec->sh.sh_size;
+ u64 size = sec_size(sec);
bool truncated = false;
Elf_Data *data = NULL;
Elf_Scn *s;
@@ -1296,7 +1715,6 @@ static int elf_truncate_section(struct elf *elf, struct section *sec)
for (;;) {
/* get next data descriptor for the relevant section */
data = elf_getdata(s, data);
-
if (!data) {
if (size) {
ERROR("end of section data but non-zero size left\n");
@@ -1332,8 +1750,8 @@ int elf_write(struct elf *elf)
/* Update changed relocation sections and section headers: */
list_for_each_entry(sec, &elf->sections, list) {
- if (sec->truncate)
- elf_truncate_section(elf, sec);
+ if (sec->truncate && elf_truncate_section(elf, sec))
+ return -1;
if (sec_changed(sec)) {
s = elf_getscn(elf->elf, sec->idx);
@@ -1366,7 +1784,7 @@ int elf_write(struct elf *elf)
return 0;
}
-void elf_close(struct elf *elf)
+int elf_close(struct elf *elf)
{
if (elf->elf)
elf_end(elf->elf);
@@ -1374,8 +1792,12 @@ void elf_close(struct elf *elf)
if (elf->fd > 0)
close(elf->fd);
+ if (elf->tmp_name && rename(elf->tmp_name, elf->name))
+ return -1;
+
/*
* NOTE: All remaining allocations are leaked on purpose. Objtool is
* about to exit anyway.
*/
+ return 0;
}
diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h
index 01ef6f415adf..8866158975fc 100644
--- a/tools/objtool/include/objtool/arch.h
+++ b/tools/objtool/include/objtool/arch.h
@@ -71,7 +71,7 @@ struct stack_op {
struct instruction;
-int arch_ftrace_match(char *name);
+int arch_ftrace_match(const char *name);
void arch_initial_func_cfi_state(struct cfi_init_state *state);
@@ -83,7 +83,8 @@ bool arch_callee_saved_reg(unsigned char reg);
unsigned long arch_jump_destination(struct instruction *insn);
-unsigned long arch_dest_reloc_offset(int addend);
+s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc);
+u64 arch_adjusted_addend(struct reloc *reloc);
const char *arch_nop_insn(int len);
const char *arch_ret_insn(int len);
@@ -97,8 +98,20 @@ bool arch_is_embedded_insn(struct symbol *sym);
int arch_rewrite_retpolines(struct objtool_file *file);
bool arch_pc_relative_reloc(struct reloc *reloc);
+bool arch_absolute_reloc(struct elf *elf, struct reloc *reloc);
unsigned int arch_reloc_size(struct reloc *reloc);
unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table);
+extern const char *arch_reg_name[CFI_NUM_REGS];
+
+#ifdef DISAS
+
+#include <bfd.h>
+#include <dis-asm.h>
+
+int arch_disas_info_init(struct disassemble_info *dinfo);
+
+#endif /* DISAS */
+
#endif /* _ARCH_H */
diff --git a/tools/objtool/include/objtool/builtin.h b/tools/objtool/include/objtool/builtin.h
index 6b08666fa69d..b9e229ed4dc0 100644
--- a/tools/objtool/include/objtool/builtin.h
+++ b/tools/objtool/include/objtool/builtin.h
@@ -9,12 +9,15 @@
struct opts {
/* actions: */
+ bool cfi;
+ bool checksum;
bool dump_orc;
bool hack_jump_label;
bool hack_noinstr;
bool hack_skylake;
bool ibt;
bool mcount;
+ bool noabs;
bool noinstr;
bool orc;
bool retpoline;
@@ -25,10 +28,12 @@ struct opts {
bool static_call;
bool uaccess;
int prefix;
- bool cfi;
+ const char *disas;
/* options: */
bool backtrace;
+ bool backup;
+ const char *debug_checksum;
bool dryrun;
bool link;
bool mnop;
@@ -37,8 +42,10 @@ struct opts {
const char *output;
bool sec_address;
bool stats;
+ const char *trace;
bool verbose;
bool werror;
+ bool wide;
};
extern struct opts opts;
@@ -47,6 +54,8 @@ int cmd_parse_options(int argc, const char **argv, const char * const usage[]);
int objtool_run(int argc, const char **argv);
-void print_args(void);
+int make_backup(void);
+
+int cmd_klp(int argc, const char **argv);
#endif /* _BUILTIN_H */
diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h
index 00fb745e7233..2e1346ad5e92 100644
--- a/tools/objtool/include/objtool/check.h
+++ b/tools/objtool/include/objtool/check.h
@@ -36,6 +36,19 @@ struct alt_group {
struct cfi_state **cfi;
bool ignore;
+ unsigned int feature;
+};
+
+enum alternative_type {
+ ALT_TYPE_INSTRUCTIONS,
+ ALT_TYPE_JUMP_TABLE,
+ ALT_TYPE_EX_TABLE,
+};
+
+struct alternative {
+ struct alternative *next;
+ struct instruction *insn;
+ enum alternative_type type;
};
#define INSN_CHUNK_BITS 8
@@ -64,8 +77,11 @@ struct instruction {
noendbr : 1,
unret : 1,
visited : 4,
- no_reloc : 1;
- /* 10 bit hole */
+ no_reloc : 1,
+ hole : 1,
+ fake : 1,
+ trace : 1;
+ /* 9 bit hole */
struct alt_group *alt_group;
struct instruction *jump_dest;
@@ -115,6 +131,15 @@ static inline bool is_jump(struct instruction *insn)
return is_static_jump(insn) || is_dynamic_jump(insn);
}
+static inline struct symbol *insn_call_dest(struct instruction *insn)
+{
+ if (insn->type == INSN_JUMP_DYNAMIC ||
+ insn->type == INSN_CALL_DYNAMIC)
+ return NULL;
+
+ return insn->_call_dest;
+}
+
struct instruction *find_insn(struct objtool_file *file,
struct section *sec, unsigned long offset);
@@ -125,4 +150,14 @@ struct instruction *next_insn_same_sec(struct objtool_file *file, struct instruc
insn && insn->sec == _sec; \
insn = next_insn_same_sec(file, insn))
+#define sym_for_each_insn(file, sym, insn) \
+ for (insn = find_insn(file, sym->sec, sym->offset); \
+ insn && insn->offset < sym->offset + sym->len; \
+ insn = next_insn_same_sec(file, insn))
+
+const char *objtool_disas_insn(struct instruction *insn);
+
+extern size_t sym_name_max_len;
+extern struct disas_context *objtool_disas_ctx;
+
#endif /* _CHECK_H */
diff --git a/tools/objtool/include/objtool/checksum.h b/tools/objtool/include/objtool/checksum.h
new file mode 100644
index 000000000000..7fe21608722a
--- /dev/null
+++ b/tools/objtool/include/objtool/checksum.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _OBJTOOL_CHECKSUM_H
+#define _OBJTOOL_CHECKSUM_H
+
+#include <objtool/elf.h>
+
+#ifdef BUILD_KLP
+
+static inline void checksum_init(struct symbol *func)
+{
+ if (func && !func->csum.state) {
+ func->csum.state = XXH3_createState();
+ XXH3_64bits_reset(func->csum.state);
+ }
+}
+
+static inline void checksum_update(struct symbol *func,
+ struct instruction *insn,
+ const void *data, size_t size)
+{
+ XXH3_64bits_update(func->csum.state, data, size);
+ dbg_checksum(func, insn, XXH3_64bits_digest(func->csum.state));
+}
+
+static inline void checksum_finish(struct symbol *func)
+{
+ if (func && func->csum.state) {
+ func->csum.checksum = XXH3_64bits_digest(func->csum.state);
+ func->csum.state = NULL;
+ }
+}
+
+#else /* !BUILD_KLP */
+
+static inline void checksum_init(struct symbol *func) {}
+static inline void checksum_update(struct symbol *func,
+ struct instruction *insn,
+ const void *data, size_t size) {}
+static inline void checksum_finish(struct symbol *func) {}
+
+#endif /* !BUILD_KLP */
+
+#endif /* _OBJTOOL_CHECKSUM_H */
diff --git a/tools/objtool/include/objtool/checksum_types.h b/tools/objtool/include/objtool/checksum_types.h
new file mode 100644
index 000000000000..507efdd8ab5b
--- /dev/null
+++ b/tools/objtool/include/objtool/checksum_types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _OBJTOOL_CHECKSUM_TYPES_H
+#define _OBJTOOL_CHECKSUM_TYPES_H
+
+struct sym_checksum {
+ u64 addr;
+ u64 checksum;
+};
+
+#ifdef BUILD_KLP
+
+#include <xxhash.h>
+
+struct checksum {
+ XXH3_state_t *state;
+ XXH64_hash_t checksum;
+};
+
+#else /* !BUILD_KLP */
+
+struct checksum {};
+
+#endif /* !BUILD_KLP */
+
+#endif /* _OBJTOOL_CHECKSUM_TYPES_H */
diff --git a/tools/objtool/include/objtool/disas.h b/tools/objtool/include/objtool/disas.h
new file mode 100644
index 000000000000..e8f395eff159
--- /dev/null
+++ b/tools/objtool/include/objtool/disas.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates.
+ */
+
+#ifndef _DISAS_H
+#define _DISAS_H
+
+struct alternative;
+struct disas_context;
+struct disassemble_info;
+
+#ifdef DISAS
+
+struct disas_context *disas_context_create(struct objtool_file *file);
+void disas_context_destroy(struct disas_context *dctx);
+void disas_warned_funcs(struct disas_context *dctx);
+void disas_funcs(struct disas_context *dctx);
+int disas_info_init(struct disassemble_info *dinfo,
+ int arch, int mach32, int mach64,
+ const char *options);
+size_t disas_insn(struct disas_context *dctx, struct instruction *insn);
+char *disas_result(struct disas_context *dctx);
+void disas_print_info(FILE *stream, struct instruction *insn, int depth,
+ const char *format, ...);
+void disas_print_insn(FILE *stream, struct disas_context *dctx,
+ struct instruction *insn, int depth,
+ const char *format, ...);
+char *disas_alt_name(struct alternative *alt);
+const char *disas_alt_type_name(struct instruction *insn);
+
+#else /* DISAS */
+
+#include <objtool/warn.h>
+
+static inline struct disas_context *disas_context_create(struct objtool_file *file)
+{
+ WARN("Rebuild with libopcodes for disassembly support");
+ return NULL;
+}
+
+static inline void disas_context_destroy(struct disas_context *dctx) {}
+static inline void disas_warned_funcs(struct disas_context *dctx) {}
+static inline void disas_funcs(struct disas_context *dctx) {}
+
+static inline int disas_info_init(struct disassemble_info *dinfo,
+ int arch, int mach32, int mach64,
+ const char *options)
+{
+ return -1;
+}
+
+static inline size_t disas_insn(struct disas_context *dctx,
+ struct instruction *insn)
+{
+ return -1;
+}
+
+static inline char *disas_result(struct disas_context *dctx)
+{
+ return NULL;
+}
+
+static inline void disas_print_info(FILE *stream, struct instruction *insn,
+ int depth, const char *format, ...) {}
+static inline void disas_print_insn(FILE *stream, struct disas_context *dctx,
+ struct instruction *insn, int depth,
+ const char *format, ...) {}
+static inline char *disas_alt_name(struct alternative *alt)
+{
+ return NULL;
+}
+
+static inline const char *disas_alt_type_name(struct instruction *insn)
+{
+ return NULL;
+}
+
+#endif /* DISAS */
+
+#endif /* _DISAS_H */
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 0a2fa3ac0079..e12c516bd320 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -8,12 +8,21 @@
#include <stdio.h>
#include <gelf.h>
+#include <linux/string.h>
#include <linux/list.h>
#include <linux/hashtable.h>
#include <linux/rbtree.h>
#include <linux/jhash.h>
+
+#include <objtool/endianness.h>
+#include <objtool/checksum_types.h>
#include <arch/elf.h>
+#define SEC_NAME_LEN 1024
+#define SYM_NAME_LEN 512
+
+#define bswap_if_needed(elf, val) __bswap_if_needed(&elf->ehdr, val)
+
#ifdef LIBELF_USE_DEPRECATED
# define elf_getshdrnum elf_getshnum
# define elf_getshdrstrndx elf_getshstrndx
@@ -40,24 +49,27 @@ struct section {
struct section *base, *rsec;
struct symbol *sym;
Elf_Data *data;
- char *name;
+ const char *name;
int idx;
bool _changed, text, rodata, noinstr, init, truncate;
struct reloc *relocs;
+ unsigned long nr_alloc_relocs;
+ struct section *twin;
};
struct symbol {
struct list_head list;
+ struct list_head global_list;
struct rb_node node;
struct elf_hash_node hash;
struct elf_hash_node name_hash;
GElf_Sym sym;
struct section *sec;
- char *name;
+ const char *name, *demangled_name;
unsigned int idx, len;
unsigned long offset;
unsigned long __subtree_last;
- struct symbol *pfunc, *cfunc, *alias;
+ struct symbol *pfunc, *cfunc, *alias, *file;
unsigned char bind, type;
u8 uaccess_safe : 1;
u8 static_call_tramp : 1;
@@ -70,9 +82,18 @@ struct symbol {
u8 local_label : 1;
u8 frame_pointer : 1;
u8 ignore : 1;
+ u8 nocfi : 1;
+ u8 cold : 1;
+ u8 prefix : 1;
+ u8 debug_checksum : 1;
+ u8 changed : 1;
+ u8 included : 1;
+ u8 klp : 1;
struct list_head pv_target;
struct reloc *relocs;
struct section *group_sec;
+ struct checksum csum;
+ struct symbol *twin, *clone;
};
struct reloc {
@@ -87,9 +108,10 @@ struct elf {
GElf_Ehdr ehdr;
int fd;
bool changed;
- char *name;
+ const char *name, *tmp_name;
unsigned int num_files;
struct list_head sections;
+ struct list_head symbols;
unsigned long num_relocs;
int symbol_bits;
@@ -109,14 +131,37 @@ struct elf {
};
struct elf *elf_open_read(const char *name, int flags);
+struct elf *elf_create_file(GElf_Ehdr *ehdr, const char *name);
struct section *elf_create_section(struct elf *elf, const char *name,
- size_t entsize, unsigned int nr);
+ size_t size, size_t entsize,
+ unsigned int type, unsigned int align,
+ unsigned int flags);
struct section *elf_create_section_pair(struct elf *elf, const char *name,
size_t entsize, unsigned int nr,
unsigned int reloc_nr);
-struct symbol *elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size);
+struct section *elf_create_rela_section(struct elf *elf, struct section *sec,
+ unsigned int reloc_nr);
+
+struct symbol *elf_create_symbol(struct elf *elf, const char *name,
+ struct section *sec, unsigned int bind,
+ unsigned int type, unsigned long offset,
+ size_t size);
+struct symbol *elf_create_section_symbol(struct elf *elf, struct section *sec);
+
+void *elf_add_data(struct elf *elf, struct section *sec, const void *data,
+ size_t size);
+
+unsigned int elf_add_string(struct elf *elf, struct section *strtab, const char *str);
+
+struct reloc *elf_create_reloc(struct elf *elf, struct section *sec,
+ unsigned long offset, struct symbol *sym,
+ s64 addend, unsigned int type);
+
+struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
+ unsigned int reloc_idx, unsigned long offset,
+ struct symbol *sym, s64 addend, unsigned int type);
struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec,
unsigned long offset,
@@ -130,16 +175,17 @@ struct reloc *elf_init_reloc_data_sym(struct elf *elf, struct section *sec,
struct symbol *sym,
s64 addend);
-int elf_write_insn(struct elf *elf, struct section *sec,
- unsigned long offset, unsigned int len,
- const char *insn);
+int elf_write_insn(struct elf *elf, struct section *sec, unsigned long offset,
+ unsigned int len, const char *insn);
+
int elf_write(struct elf *elf);
-void elf_close(struct elf *elf);
+int elf_close(struct elf *elf);
struct section *find_section_by_name(const struct elf *elf, const char *name);
struct symbol *find_func_by_offset(struct section *sec, unsigned long offset);
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
struct symbol *find_symbol_by_name(const struct elf *elf, const char *name);
+struct symbol *find_global_symbol_by_name(const struct elf *elf, const char *name);
struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset);
int find_symbol_hole_containing(const struct section *sec, unsigned long offset);
struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, unsigned long offset);
@@ -177,11 +223,76 @@ static inline unsigned int elf_text_rela_type(struct elf *elf)
return elf_addr_size(elf) == 4 ? R_TEXT32 : R_TEXT64;
}
+static inline bool is_undef_sym(struct symbol *sym)
+{
+ return !sym->sec->idx;
+}
+
+static inline bool is_null_sym(struct symbol *sym)
+{
+ return !sym->idx;
+}
+
+static inline bool is_sec_sym(struct symbol *sym)
+{
+ return sym->type == STT_SECTION;
+}
+
+static inline bool is_object_sym(struct symbol *sym)
+{
+ return sym->type == STT_OBJECT;
+}
+
+static inline bool is_func_sym(struct symbol *sym)
+{
+ return sym->type == STT_FUNC;
+}
+
+static inline bool is_file_sym(struct symbol *sym)
+{
+ return sym->type == STT_FILE;
+}
+
+static inline bool is_notype_sym(struct symbol *sym)
+{
+ return sym->type == STT_NOTYPE;
+}
+
+static inline bool is_global_sym(struct symbol *sym)
+{
+ return sym->bind == STB_GLOBAL;
+}
+
+static inline bool is_weak_sym(struct symbol *sym)
+{
+ return sym->bind == STB_WEAK;
+}
+
+static inline bool is_local_sym(struct symbol *sym)
+{
+ return sym->bind == STB_LOCAL;
+}
+
+static inline bool is_prefix_func(struct symbol *sym)
+{
+ return sym->prefix;
+}
+
static inline bool is_reloc_sec(struct section *sec)
{
return sec->sh.sh_type == SHT_RELA || sec->sh.sh_type == SHT_REL;
}
+static inline bool is_string_sec(struct section *sec)
+{
+ return sec->sh.sh_flags & SHF_STRINGS;
+}
+
+static inline bool is_text_sec(struct section *sec)
+{
+ return sec->sh.sh_flags & SHF_EXECINSTR;
+}
+
static inline bool sec_changed(struct section *sec)
{
return sec->_changed;
@@ -222,6 +333,11 @@ static inline bool is_32bit_reloc(struct reloc *reloc)
return reloc->sec->sh.sh_entsize < 16;
}
+static inline unsigned long sec_size(struct section *sec)
+{
+ return sec->sh.sh_size;
+}
+
#define __get_reloc_field(reloc, field) \
({ \
is_32bit_reloc(reloc) ? \
@@ -299,6 +415,15 @@ static inline void set_reloc_type(struct elf *elf, struct reloc *reloc, unsigned
mark_sec_changed(elf, reloc->sec, true);
}
+static inline unsigned int annotype(struct elf *elf, struct section *sec,
+ struct reloc *reloc)
+{
+ unsigned int type;
+
+ type = *(u32 *)(sec->data->d_buf + (reloc_idx(reloc) * 8) + 4);
+ return bswap_if_needed(elf, type);
+}
+
#define RELOC_JUMP_TABLE_BIT 1UL
/* Does reloc mark the beginning of a jump table? */
@@ -324,28 +449,54 @@ static inline void set_sym_next_reloc(struct reloc *reloc, struct reloc *next)
reloc->_sym_next_reloc = (unsigned long)next | bit;
}
-#define for_each_sec(file, sec) \
- list_for_each_entry(sec, &file->elf->sections, list)
+#define for_each_sec(elf, sec) \
+ list_for_each_entry(sec, &elf->sections, list)
#define sec_for_each_sym(sec, sym) \
list_for_each_entry(sym, &sec->symbol_list, list)
-#define for_each_sym(file, sym) \
- for (struct section *__sec, *__fake = (struct section *)1; \
- __fake; __fake = NULL) \
- for_each_sec(file, __sec) \
- sec_for_each_sym(__sec, sym)
+#define sec_prev_sym(sym) \
+ sym->sec && sym->list.prev != &sym->sec->symbol_list ? \
+ list_prev_entry(sym, list) : NULL
+
+#define for_each_sym(elf, sym) \
+ list_for_each_entry(sym, &elf->symbols, global_list)
+
+#define for_each_sym_continue(elf, sym) \
+ list_for_each_entry_continue(sym, &elf->symbols, global_list)
+
+#define rsec_next_reloc(rsec, reloc) \
+ reloc_idx(reloc) < sec_num_entries(rsec) - 1 ? reloc + 1 : NULL
#define for_each_reloc(rsec, reloc) \
- for (int __i = 0, __fake = 1; __fake; __fake = 0) \
- for (reloc = rsec->relocs; \
- __i < sec_num_entries(rsec); \
- __i++, reloc++)
+ for (reloc = rsec->relocs; reloc; reloc = rsec_next_reloc(rsec, reloc))
#define for_each_reloc_from(rsec, reloc) \
- for (int __i = reloc_idx(reloc); \
- __i < sec_num_entries(rsec); \
- __i++, reloc++)
+ for (; reloc; reloc = rsec_next_reloc(rsec, reloc))
+
+#define for_each_reloc_continue(rsec, reloc) \
+ for (reloc = rsec_next_reloc(rsec, reloc); reloc; \
+ reloc = rsec_next_reloc(rsec, reloc))
+
+#define sym_for_each_reloc(elf, sym, reloc) \
+ for (reloc = find_reloc_by_dest_range(elf, sym->sec, \
+ sym->offset, sym->len); \
+ reloc && reloc_offset(reloc) < sym->offset + sym->len; \
+ reloc = rsec_next_reloc(sym->sec->rsec, reloc))
+
+static inline struct symbol *get_func_prefix(struct symbol *func)
+{
+ struct symbol *prev;
+
+ if (!is_func_sym(func))
+ return NULL;
+
+ prev = sec_prev_sym(func);
+ if (prev && is_prefix_func(prev))
+ return prev;
+
+ return NULL;
+}
#define OFFSET_STRIDE_BITS 4
#define OFFSET_STRIDE (1UL << OFFSET_STRIDE_BITS)
diff --git a/tools/objtool/include/objtool/endianness.h b/tools/objtool/include/objtool/endianness.h
index 4d2aa9b0fe2f..aebcd2338668 100644
--- a/tools/objtool/include/objtool/endianness.h
+++ b/tools/objtool/include/objtool/endianness.h
@@ -4,7 +4,6 @@
#include <linux/kernel.h>
#include <endian.h>
-#include <objtool/elf.h>
/*
* Does a byte swap if target file endianness doesn't match the host, i.e. cross
@@ -12,16 +11,16 @@
* To be used for multi-byte values conversion, which are read from / about
* to be written to a target native endianness ELF file.
*/
-static inline bool need_bswap(struct elf *elf)
+static inline bool need_bswap(GElf_Ehdr *ehdr)
{
return (__BYTE_ORDER == __LITTLE_ENDIAN) ^
- (elf->ehdr.e_ident[EI_DATA] == ELFDATA2LSB);
+ (ehdr->e_ident[EI_DATA] == ELFDATA2LSB);
}
-#define bswap_if_needed(elf, val) \
+#define __bswap_if_needed(ehdr, val) \
({ \
__typeof__(val) __ret; \
- bool __need_bswap = need_bswap(elf); \
+ bool __need_bswap = need_bswap(ehdr); \
switch (sizeof(val)) { \
case 8: \
__ret = __need_bswap ? bswap_64(val) : (val); break; \
diff --git a/tools/objtool/include/objtool/klp.h b/tools/objtool/include/objtool/klp.h
new file mode 100644
index 000000000000..ad830a7ce55b
--- /dev/null
+++ b/tools/objtool/include/objtool/klp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _OBJTOOL_KLP_H
+#define _OBJTOOL_KLP_H
+
+#define SHF_RELA_LIVEPATCH 0x00100000
+#define SHN_LIVEPATCH 0xff20
+
+/*
+ * __klp_objects and __klp_funcs are created by klp diff and used by the patch
+ * module init code to build the klp_patch, klp_object and klp_func structs
+ * needed by the livepatch API.
+ */
+#define KLP_OBJECTS_SEC "__klp_objects"
+#define KLP_FUNCS_SEC "__klp_funcs"
+
+/*
+ * __klp_relocs is an intermediate section which are created by klp diff and
+ * converted into KLP symbols/relas by "objtool klp post-link". This is needed
+ * to work around the linker, which doesn't preserve SHN_LIVEPATCH or
+ * SHF_RELA_LIVEPATCH, nor does it support having two RELA sections for a
+ * single PROGBITS section.
+ */
+#define KLP_RELOCS_SEC "__klp_relocs"
+#define KLP_STRINGS_SEC ".rodata.klp.str1.1"
+
+struct klp_reloc {
+ void *offset;
+ void *sym;
+ u32 type;
+};
+
+int cmd_klp_diff(int argc, const char **argv);
+int cmd_klp_post_link(int argc, const char **argv);
+
+#endif /* _OBJTOOL_KLP_H */
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
index c0dc86a78ff6..6dc12a59ad00 100644
--- a/tools/objtool/include/objtool/objtool.h
+++ b/tools/objtool/include/objtool/objtool.h
@@ -28,7 +28,7 @@ struct objtool_file {
struct list_head mcount_loc_list;
struct list_head endbr_list;
struct list_head call_list;
- bool ignore_unreachables, hints, rodata;
+ bool ignore_unreachables, hints, rodata, klp;
unsigned int nr_endbr;
unsigned int nr_endbr_int;
@@ -39,6 +39,10 @@ struct objtool_file {
struct pv_state *pv_ops;
};
+char *top_level_dir(const char *file);
+
+int init_signal_handler(void);
+
struct objtool_file *objtool_open_read(const char *_objname);
int objtool_pv_add(struct objtool_file *file, int idx, struct symbol *func);
diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
index 72d09c0adf1a..121c3761899c 100644
--- a/tools/objtool/include/objtool/special.h
+++ b/tools/objtool/include/objtool/special.h
@@ -25,7 +25,7 @@ struct special_alt {
struct section *new_sec;
unsigned long new_off;
- unsigned int orig_len, new_len; /* group only */
+ unsigned int orig_len, new_len, feature; /* group only */
};
int special_get_alts(struct elf *elf, struct list_head *alts);
@@ -38,4 +38,6 @@ bool arch_support_alt_relocation(struct special_alt *special_alt,
struct reloc *arch_find_switch_table(struct objtool_file *file,
struct instruction *insn,
unsigned long *table_size);
+const char *arch_cpu_feature_name(int feature_number);
+
#endif /* _SPECIAL_H */
diff --git a/tools/objtool/include/objtool/trace.h b/tools/objtool/include/objtool/trace.h
new file mode 100644
index 000000000000..70b574366797
--- /dev/null
+++ b/tools/objtool/include/objtool/trace.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates.
+ */
+
+#ifndef _TRACE_H
+#define _TRACE_H
+
+#include <objtool/check.h>
+#include <objtool/disas.h>
+
+#ifdef DISAS
+
+extern bool trace;
+extern int trace_depth;
+
+#define TRACE(fmt, ...) \
+({ if (trace) \
+ fprintf(stderr, fmt, ##__VA_ARGS__); \
+})
+
+/*
+ * Print the instruction address and a message. The instruction
+ * itself is not printed.
+ */
+#define TRACE_ADDR(insn, fmt, ...) \
+({ \
+ if (trace) { \
+ disas_print_info(stderr, insn, trace_depth - 1, \
+ fmt "\n", ##__VA_ARGS__); \
+ } \
+})
+
+/*
+ * Print the instruction address, the instruction and a message.
+ */
+#define TRACE_INSN(insn, fmt, ...) \
+({ \
+ if (trace) { \
+ disas_print_insn(stderr, objtool_disas_ctx, \
+ insn, trace_depth - 1, \
+ fmt, ##__VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ insn->trace = 1; \
+ } \
+})
+
+#define TRACE_INSN_STATE(insn, sprev, snext) \
+({ \
+ if (trace) \
+ trace_insn_state(insn, sprev, snext); \
+})
+
+#define TRACE_ALT_FMT(pfx, fmt) pfx "<%s.%lx> " fmt
+#define TRACE_ALT_ARG(insn) disas_alt_type_name(insn), (insn)->offset
+
+#define TRACE_ALT(insn, fmt, ...) \
+ TRACE_INSN(insn, TRACE_ALT_FMT("", fmt), \
+ TRACE_ALT_ARG(insn), ##__VA_ARGS__)
+
+#define TRACE_ALT_INFO(insn, pfx, fmt, ...) \
+ TRACE_ADDR(insn, TRACE_ALT_FMT(pfx, fmt), \
+ TRACE_ALT_ARG(insn), ##__VA_ARGS__)
+
+#define TRACE_ALT_INFO_NOADDR(insn, pfx, fmt, ...) \
+ TRACE_ADDR(NULL, TRACE_ALT_FMT(pfx, fmt), \
+ TRACE_ALT_ARG(insn), ##__VA_ARGS__)
+
+#define TRACE_ALT_BEGIN(insn, alt, alt_name) \
+({ \
+ if (trace) { \
+ alt_name = disas_alt_name(alt); \
+ trace_alt_begin(insn, alt, alt_name); \
+ } \
+})
+
+#define TRACE_ALT_END(insn, alt, alt_name) \
+({ \
+ if (trace) { \
+ trace_alt_end(insn, alt, alt_name); \
+ free(alt_name); \
+ } \
+})
+
+static inline void trace_enable(void)
+{
+ trace = true;
+ trace_depth = 0;
+}
+
+static inline void trace_disable(void)
+{
+ trace = false;
+}
+
+static inline void trace_depth_inc(void)
+{
+ if (trace)
+ trace_depth++;
+}
+
+static inline void trace_depth_dec(void)
+{
+ if (trace)
+ trace_depth--;
+}
+
+void trace_insn_state(struct instruction *insn, struct insn_state *sprev,
+ struct insn_state *snext);
+void trace_alt_begin(struct instruction *orig_insn, struct alternative *alt,
+ char *alt_name);
+void trace_alt_end(struct instruction *orig_insn, struct alternative *alt,
+ char *alt_name);
+
+#else /* DISAS */
+
+#define TRACE(fmt, ...) ({})
+#define TRACE_ADDR(insn, fmt, ...) ({})
+#define TRACE_INSN(insn, fmt, ...) ({})
+#define TRACE_INSN_STATE(insn, sprev, snext) ({})
+#define TRACE_ALT(insn, fmt, ...) ({})
+#define TRACE_ALT_INFO(insn, fmt, ...) ({})
+#define TRACE_ALT_INFO_NOADDR(insn, fmt, ...) ({})
+#define TRACE_ALT_BEGIN(insn, alt, alt_name) ({})
+#define TRACE_ALT_END(insn, alt, alt_name) ({})
+
+
+static inline void trace_enable(void) {}
+static inline void trace_disable(void) {}
+static inline void trace_depth_inc(void) {}
+static inline void trace_depth_dec(void) {}
+static inline void trace_alt_begin(struct instruction *orig_insn,
+ struct alternative *alt,
+ char *alt_name) {};
+static inline void trace_alt_end(struct instruction *orig_insn,
+ struct alternative *alt,
+ char *alt_name) {};
+
+#endif
+
+#endif /* _TRACE_H */
diff --git a/tools/objtool/include/objtool/util.h b/tools/objtool/include/objtool/util.h
new file mode 100644
index 000000000000..a0180b312f73
--- /dev/null
+++ b/tools/objtool/include/objtool/util.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _UTIL_H
+#define _UTIL_H
+
+#include <objtool/warn.h>
+
+#define snprintf_check(str, size, format, args...) \
+({ \
+ int __ret = snprintf(str, size, format, args); \
+ if (__ret < 0) \
+ ERROR_GLIBC("snprintf"); \
+ else if (__ret >= size) \
+ ERROR("snprintf() failed for '" format "'", args); \
+ else \
+ __ret = 0; \
+ __ret; \
+})
+
+#endif /* _UTIL_H */
diff --git a/tools/objtool/include/objtool/warn.h b/tools/objtool/include/objtool/warn.h
index cb8fe846d9dd..25ff7942b4d5 100644
--- a/tools/objtool/include/objtool/warn.h
+++ b/tools/objtool/include/objtool/warn.h
@@ -77,9 +77,11 @@ static inline char *offstr(struct section *sec, unsigned long offset)
#define WARN_INSN(insn, format, ...) \
({ \
struct instruction *_insn = (insn); \
- if (!_insn->sym || !_insn->sym->warned) \
+ if (!_insn->sym || !_insn->sym->warned) { \
WARN_FUNC(_insn->sec, _insn->offset, format, \
##__VA_ARGS__); \
+ BT_INSN(_insn, ""); \
+ } \
if (_insn->sym) \
_insn->sym->warned = 1; \
})
@@ -87,10 +89,15 @@ static inline char *offstr(struct section *sec, unsigned long offset)
#define BT_INSN(insn, format, ...) \
({ \
if (opts.verbose || opts.backtrace) { \
- struct instruction *_insn = (insn); \
- char *_str = offstr(_insn->sec, _insn->offset); \
- WARN(" %s: " format, _str, ##__VA_ARGS__); \
- free(_str); \
+ struct instruction *__insn = (insn); \
+ char *_str = offstr(__insn->sec, __insn->offset); \
+ const char *_istr = objtool_disas_insn(__insn); \
+ int _len; \
+ _len = snprintf(NULL, 0, " %s: " format, _str, ##__VA_ARGS__); \
+ _len = (_len < 50) ? 50 - _len : 0; \
+ WARN(" %s: " format " %*s%s", _str, ##__VA_ARGS__, _len, "", _istr); \
+ free(_str); \
+ __insn->trace = 1; \
} \
})
@@ -102,4 +109,53 @@ static inline char *offstr(struct section *sec, unsigned long offset)
#define ERROR_FUNC(sec, offset, format, ...) __WARN_FUNC(ERROR_STR, sec, offset, format, ##__VA_ARGS__)
#define ERROR_INSN(insn, format, ...) WARN_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__)
+extern bool debug;
+extern int indent;
+
+static inline void unindent(int *unused) { indent--; }
+
+/*
+ * Clang prior to 17 is being silly and considers many __cleanup() variables
+ * as unused (because they are, their sole purpose is to go out of scope).
+ *
+ * https://github.com/llvm/llvm-project/commit/877210faa447f4cc7db87812f8ed80e398fedd61
+ */
+#undef __cleanup
+#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
+
+#define __dbg(format, ...) \
+ fprintf(stderr, \
+ "DEBUG: %s%s" format "\n", \
+ objname ?: "", \
+ objname ? ": " : "", \
+ ##__VA_ARGS__)
+
+#define dbg(args...) \
+({ \
+ if (unlikely(debug)) \
+ __dbg(args); \
+})
+
+#define __dbg_indent(format, ...) \
+({ \
+ if (unlikely(debug)) \
+ __dbg("%*s" format, indent * 8, "", ##__VA_ARGS__); \
+})
+
+#define dbg_indent(args...) \
+ int __cleanup(unindent) __dummy_##__COUNTER__; \
+ __dbg_indent(args); \
+ indent++
+
+#define dbg_checksum(func, insn, checksum) \
+({ \
+ if (unlikely(insn->sym && insn->sym->pfunc && \
+ insn->sym->pfunc->debug_checksum)) { \
+ char *insn_off = offstr(insn->sec, insn->offset); \
+ __dbg("checksum: %s %s %016lx", \
+ func->name, insn_off, checksum); \
+ free(insn_off); \
+ } \
+})
+
#endif /* _WARN_H */
diff --git a/tools/objtool/klp-diff.c b/tools/objtool/klp-diff.c
new file mode 100644
index 000000000000..4d1f9e9977eb
--- /dev/null
+++ b/tools/objtool/klp-diff.c
@@ -0,0 +1,1723 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#define _GNU_SOURCE /* memmem() */
+#include <subcmd/parse-options.h>
+#include <stdlib.h>
+#include <string.h>
+#include <libgen.h>
+#include <stdio.h>
+#include <ctype.h>
+
+#include <objtool/objtool.h>
+#include <objtool/warn.h>
+#include <objtool/arch.h>
+#include <objtool/klp.h>
+#include <objtool/util.h>
+#include <arch/special.h>
+
+#include <linux/objtool_types.h>
+#include <linux/livepatch_external.h>
+#include <linux/stringify.h>
+#include <linux/string.h>
+#include <linux/jhash.h>
+
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+
+struct elfs {
+ struct elf *orig, *patched, *out;
+ const char *modname;
+};
+
+struct export {
+ struct hlist_node hash;
+ char *mod, *sym;
+};
+
+static const char * const klp_diff_usage[] = {
+ "objtool klp diff [<options>] <in1.o> <in2.o> <out.o>",
+ NULL,
+};
+
+static const struct option klp_diff_options[] = {
+ OPT_GROUP("Options:"),
+ OPT_BOOLEAN('d', "debug", &debug, "enable debug output"),
+ OPT_END(),
+};
+
+static DEFINE_HASHTABLE(exports, 15);
+
+static inline u32 str_hash(const char *str)
+{
+ return jhash(str, strlen(str), 0);
+}
+
+static char *escape_str(const char *orig)
+{
+ size_t len = 0;
+ const char *a;
+ char *b, *new;
+
+ for (a = orig; *a; a++) {
+ switch (*a) {
+ case '\001': len += 5; break;
+ case '\n':
+ case '\t': len += 2; break;
+ default: len++;
+ }
+ }
+
+ new = malloc(len + 1);
+ if (!new)
+ return NULL;
+
+ for (a = orig, b = new; *a; a++) {
+ switch (*a) {
+ case '\001': memcpy(b, "<SOH>", 5); b += 5; break;
+ case '\n': *b++ = '\\'; *b++ = 'n'; break;
+ case '\t': *b++ = '\\'; *b++ = 't'; break;
+ default: *b++ = *a;
+ }
+ }
+
+ *b = '\0';
+ return new;
+}
+
+static int read_exports(void)
+{
+ const char *symvers = "Module.symvers";
+ char line[1024], *path = NULL;
+ unsigned int line_num = 1;
+ FILE *file;
+
+ file = fopen(symvers, "r");
+ if (!file) {
+ path = top_level_dir(symvers);
+ if (!path) {
+ ERROR("can't open '%s', \"objtool diff\" should be run from the kernel tree", symvers);
+ return -1;
+ }
+
+ file = fopen(path, "r");
+ if (!file) {
+ ERROR_GLIBC("fopen");
+ return -1;
+ }
+ }
+
+ while (fgets(line, 1024, file)) {
+ char *sym, *mod, *type;
+ struct export *export;
+
+ sym = strchr(line, '\t');
+ if (!sym) {
+ ERROR("malformed Module.symvers (sym) at line %d", line_num);
+ return -1;
+ }
+
+ *sym++ = '\0';
+
+ mod = strchr(sym, '\t');
+ if (!mod) {
+ ERROR("malformed Module.symvers (mod) at line %d", line_num);
+ return -1;
+ }
+
+ *mod++ = '\0';
+
+ type = strchr(mod, '\t');
+ if (!type) {
+ ERROR("malformed Module.symvers (type) at line %d", line_num);
+ return -1;
+ }
+
+ *type++ = '\0';
+
+ if (*sym == '\0' || *mod == '\0') {
+ ERROR("malformed Module.symvers at line %d", line_num);
+ return -1;
+ }
+
+ export = calloc(1, sizeof(*export));
+ if (!export) {
+ ERROR_GLIBC("calloc");
+ return -1;
+ }
+
+ export->mod = strdup(mod);
+ if (!export->mod) {
+ ERROR_GLIBC("strdup");
+ return -1;
+ }
+
+ export->sym = strdup(sym);
+ if (!export->sym) {
+ ERROR_GLIBC("strdup");
+ return -1;
+ }
+
+ hash_add(exports, &export->hash, str_hash(sym));
+ }
+
+ free(path);
+ fclose(file);
+
+ return 0;
+}
+
+static int read_sym_checksums(struct elf *elf)
+{
+ struct section *sec;
+
+ sec = find_section_by_name(elf, ".discard.sym_checksum");
+ if (!sec) {
+ ERROR("'%s' missing .discard.sym_checksum section, file not processed by 'objtool --checksum'?",
+ elf->name);
+ return -1;
+ }
+
+ if (!sec->rsec) {
+ ERROR("missing reloc section for .discard.sym_checksum");
+ return -1;
+ }
+
+ if (sec_size(sec) % sizeof(struct sym_checksum)) {
+ ERROR("struct sym_checksum size mismatch");
+ return -1;
+ }
+
+ for (int i = 0; i < sec_size(sec) / sizeof(struct sym_checksum); i++) {
+ struct sym_checksum *sym_checksum;
+ struct reloc *reloc;
+ struct symbol *sym;
+
+ sym_checksum = (struct sym_checksum *)sec->data->d_buf + i;
+
+ reloc = find_reloc_by_dest(elf, sec, i * sizeof(*sym_checksum));
+ if (!reloc) {
+ ERROR("can't find reloc for sym_checksum[%d]", i);
+ return -1;
+ }
+
+ sym = reloc->sym;
+
+ if (is_sec_sym(sym)) {
+ ERROR("not sure how to handle section %s", sym->name);
+ return -1;
+ }
+
+ if (is_func_sym(sym))
+ sym->csum.checksum = sym_checksum->checksum;
+ }
+
+ return 0;
+}
+
+static struct symbol *first_file_symbol(struct elf *elf)
+{
+ struct symbol *sym;
+
+ for_each_sym(elf, sym) {
+ if (is_file_sym(sym))
+ return sym;
+ }
+
+ return NULL;
+}
+
+static struct symbol *next_file_symbol(struct elf *elf, struct symbol *sym)
+{
+ for_each_sym_continue(elf, sym) {
+ if (is_file_sym(sym))
+ return sym;
+ }
+
+ return NULL;
+}
+
+/*
+ * Certain static local variables should never be correlated. They will be
+ * used in place rather than referencing the originals.
+ */
+static bool is_uncorrelated_static_local(struct symbol *sym)
+{
+ static const char * const vars[] = {
+ "__already_done.",
+ "__func__.",
+ "__key.",
+ "__warned.",
+ "_entry.",
+ "_entry_ptr.",
+ "_rs.",
+ "descriptor.",
+ "CSWTCH.",
+ };
+
+ if (!is_object_sym(sym) || !is_local_sym(sym))
+ return false;
+
+ if (!strcmp(sym->sec->name, ".data.once"))
+ return true;
+
+ for (int i = 0; i < ARRAY_SIZE(vars); i++) {
+ if (strstarts(sym->name, vars[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Clang emits several useless .Ltmp_* code labels.
+ */
+static bool is_clang_tmp_label(struct symbol *sym)
+{
+ return sym->type == STT_NOTYPE &&
+ is_text_sec(sym->sec) &&
+ strstarts(sym->name, ".Ltmp") &&
+ isdigit(sym->name[5]);
+}
+
+static bool is_special_section(struct section *sec)
+{
+ static const char * const specials[] = {
+ ".altinstructions",
+ ".smp_locks",
+ "__bug_table",
+ "__ex_table",
+ "__jump_table",
+ "__mcount_loc",
+
+ /*
+ * Extract .static_call_sites here to inherit non-module
+ * preferential treatment. The later static call processing
+ * during klp module build will be skipped when it sees this
+ * section already exists.
+ */
+ ".static_call_sites",
+ };
+
+ static const char * const non_special_discards[] = {
+ ".discard.addressable",
+ ".discard.sym_checksum",
+ };
+
+ if (is_text_sec(sec))
+ return false;
+
+ for (int i = 0; i < ARRAY_SIZE(specials); i++) {
+ if (!strcmp(sec->name, specials[i]))
+ return true;
+ }
+
+ /* Most .discard data sections are special */
+ for (int i = 0; i < ARRAY_SIZE(non_special_discards); i++) {
+ if (!strcmp(sec->name, non_special_discards[i]))
+ return false;
+ }
+
+ return strstarts(sec->name, ".discard.");
+}
+
+/*
+ * These sections are referenced by special sections but aren't considered
+ * special sections themselves.
+ */
+static bool is_special_section_aux(struct section *sec)
+{
+ static const char * const specials_aux[] = {
+ ".altinstr_replacement",
+ ".altinstr_aux",
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(specials_aux); i++) {
+ if (!strcmp(sec->name, specials_aux[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * These symbols should never be correlated, so their local patched versions
+ * are used instead of linking to the originals.
+ */
+static bool dont_correlate(struct symbol *sym)
+{
+ return is_file_sym(sym) ||
+ is_null_sym(sym) ||
+ is_sec_sym(sym) ||
+ is_prefix_func(sym) ||
+ is_uncorrelated_static_local(sym) ||
+ is_clang_tmp_label(sym) ||
+ is_string_sec(sym->sec) ||
+ is_special_section(sym->sec) ||
+ is_special_section_aux(sym->sec) ||
+ strstarts(sym->name, "__initcall__");
+}
+
+/*
+ * For each symbol in the original kernel, find its corresponding "twin" in the
+ * patched kernel.
+ */
+static int correlate_symbols(struct elfs *e)
+{
+ struct symbol *file1_sym, *file2_sym;
+ struct symbol *sym1, *sym2;
+
+ /* Correlate locals */
+ for (file1_sym = first_file_symbol(e->orig),
+ file2_sym = first_file_symbol(e->patched); ;
+ file1_sym = next_file_symbol(e->orig, file1_sym),
+ file2_sym = next_file_symbol(e->patched, file2_sym)) {
+
+ if (!file1_sym && file2_sym) {
+ ERROR("FILE symbol mismatch: NULL != %s", file2_sym->name);
+ return -1;
+ }
+
+ if (file1_sym && !file2_sym) {
+ ERROR("FILE symbol mismatch: %s != NULL", file1_sym->name);
+ return -1;
+ }
+
+ if (!file1_sym)
+ break;
+
+ if (strcmp(file1_sym->name, file2_sym->name)) {
+ ERROR("FILE symbol mismatch: %s != %s", file1_sym->name, file2_sym->name);
+ return -1;
+ }
+
+ file1_sym->twin = file2_sym;
+ file2_sym->twin = file1_sym;
+
+ sym1 = file1_sym;
+
+ for_each_sym_continue(e->orig, sym1) {
+ if (is_file_sym(sym1) || !is_local_sym(sym1))
+ break;
+
+ if (dont_correlate(sym1))
+ continue;
+
+ sym2 = file2_sym;
+ for_each_sym_continue(e->patched, sym2) {
+ if (is_file_sym(sym2) || !is_local_sym(sym2))
+ break;
+
+ if (sym2->twin || dont_correlate(sym2))
+ continue;
+
+ if (strcmp(sym1->demangled_name, sym2->demangled_name))
+ continue;
+
+ sym1->twin = sym2;
+ sym2->twin = sym1;
+ break;
+ }
+ }
+ }
+
+ /* Correlate globals */
+ for_each_sym(e->orig, sym1) {
+ if (sym1->bind == STB_LOCAL)
+ continue;
+
+ sym2 = find_global_symbol_by_name(e->patched, sym1->name);
+
+ if (sym2 && !sym2->twin && !strcmp(sym1->name, sym2->name)) {
+ sym1->twin = sym2;
+ sym2->twin = sym1;
+ }
+ }
+
+ for_each_sym(e->orig, sym1) {
+ if (sym1->twin || dont_correlate(sym1))
+ continue;
+ WARN("no correlation: %s", sym1->name);
+ }
+
+ return 0;
+}
+
+/* "sympos" is used by livepatch to disambiguate duplicate symbol names */
+static unsigned long find_sympos(struct elf *elf, struct symbol *sym)
+{
+ bool vmlinux = str_ends_with(objname, "vmlinux.o");
+ unsigned long sympos = 0, nr_matches = 0;
+ bool has_dup = false;
+ struct symbol *s;
+
+ if (sym->bind != STB_LOCAL)
+ return 0;
+
+ if (vmlinux && sym->type == STT_FUNC) {
+ /*
+ * HACK: Unfortunately, symbol ordering can differ between
+ * vmlinux.o and vmlinux due to the linker script emitting
+ * .text.unlikely* before .text*. Count .text.unlikely* first.
+ *
+ * TODO: Disambiguate symbols more reliably (checksums?)
+ */
+ for_each_sym(elf, s) {
+ if (strstarts(s->sec->name, ".text.unlikely") &&
+ !strcmp(s->name, sym->name)) {
+ nr_matches++;
+ if (s == sym)
+ sympos = nr_matches;
+ else
+ has_dup = true;
+ }
+ }
+ for_each_sym(elf, s) {
+ if (!strstarts(s->sec->name, ".text.unlikely") &&
+ !strcmp(s->name, sym->name)) {
+ nr_matches++;
+ if (s == sym)
+ sympos = nr_matches;
+ else
+ has_dup = true;
+ }
+ }
+ } else {
+ for_each_sym(elf, s) {
+ if (!strcmp(s->name, sym->name)) {
+ nr_matches++;
+ if (s == sym)
+ sympos = nr_matches;
+ else
+ has_dup = true;
+ }
+ }
+ }
+
+ if (!sympos) {
+ ERROR("can't find sympos for %s", sym->name);
+ return ULONG_MAX;
+ }
+
+ return has_dup ? sympos : 0;
+}
+
+static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym);
+
+static struct symbol *__clone_symbol(struct elf *elf, struct symbol *patched_sym,
+ bool data_too)
+{
+ struct section *out_sec = NULL;
+ unsigned long offset = 0;
+ struct symbol *out_sym;
+
+ if (data_too && !is_undef_sym(patched_sym)) {
+ struct section *patched_sec = patched_sym->sec;
+
+ out_sec = find_section_by_name(elf, patched_sec->name);
+ if (!out_sec) {
+ out_sec = elf_create_section(elf, patched_sec->name, 0,
+ patched_sec->sh.sh_entsize,
+ patched_sec->sh.sh_type,
+ patched_sec->sh.sh_addralign,
+ patched_sec->sh.sh_flags);
+ if (!out_sec)
+ return NULL;
+ }
+
+ if (is_string_sec(patched_sym->sec)) {
+ out_sym = elf_create_section_symbol(elf, out_sec);
+ if (!out_sym)
+ return NULL;
+
+ goto sym_created;
+ }
+
+ if (!is_sec_sym(patched_sym))
+ offset = sec_size(out_sec);
+
+ if (patched_sym->len || is_sec_sym(patched_sym)) {
+ void *data = NULL;
+ size_t size;
+
+ /* bss doesn't have data */
+ if (patched_sym->sec->data->d_buf)
+ data = patched_sym->sec->data->d_buf + patched_sym->offset;
+
+ if (is_sec_sym(patched_sym))
+ size = sec_size(patched_sym->sec);
+ else
+ size = patched_sym->len;
+
+ if (!elf_add_data(elf, out_sec, data, size))
+ return NULL;
+ }
+ }
+
+ out_sym = elf_create_symbol(elf, patched_sym->name, out_sec,
+ patched_sym->bind, patched_sym->type,
+ offset, patched_sym->len);
+ if (!out_sym)
+ return NULL;
+
+sym_created:
+ patched_sym->clone = out_sym;
+ out_sym->clone = patched_sym;
+
+ return out_sym;
+}
+
+static const char *sym_type(struct symbol *sym)
+{
+ switch (sym->type) {
+ case STT_NOTYPE: return "NOTYPE";
+ case STT_OBJECT: return "OBJECT";
+ case STT_FUNC: return "FUNC";
+ case STT_SECTION: return "SECTION";
+ case STT_FILE: return "FILE";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *sym_bind(struct symbol *sym)
+{
+ switch (sym->bind) {
+ case STB_LOCAL: return "LOCAL";
+ case STB_GLOBAL: return "GLOBAL";
+ case STB_WEAK: return "WEAK";
+ default: return "UNKNOWN";
+ }
+}
+
+/*
+ * Copy a symbol to the output object, optionally including its data and
+ * relocations.
+ */
+static struct symbol *clone_symbol(struct elfs *e, struct symbol *patched_sym,
+ bool data_too)
+{
+ struct symbol *pfx;
+
+ if (patched_sym->clone)
+ return patched_sym->clone;
+
+ dbg_indent("%s%s", patched_sym->name, data_too ? " [+DATA]" : "");
+
+ /* Make sure the prefix gets cloned first */
+ if (is_func_sym(patched_sym) && data_too) {
+ pfx = get_func_prefix(patched_sym);
+ if (pfx)
+ clone_symbol(e, pfx, true);
+ }
+
+ if (!__clone_symbol(e->out, patched_sym, data_too))
+ return NULL;
+
+ if (data_too && clone_sym_relocs(e, patched_sym))
+ return NULL;
+
+ return patched_sym->clone;
+}
+
+static void mark_included_function(struct symbol *func)
+{
+ struct symbol *pfx;
+
+ func->included = 1;
+
+ /* Include prefix function */
+ pfx = get_func_prefix(func);
+ if (pfx)
+ pfx->included = 1;
+
+ /* Make sure .cold parent+child always stay together */
+ if (func->cfunc && func->cfunc != func)
+ func->cfunc->included = 1;
+ if (func->pfunc && func->pfunc != func)
+ func->pfunc->included = 1;
+}
+
+/*
+ * Copy all changed functions (and their dependencies) from the patched object
+ * to the output object.
+ */
+static int mark_changed_functions(struct elfs *e)
+{
+ struct symbol *sym_orig, *patched_sym;
+ bool changed = false;
+
+ /* Find changed functions */
+ for_each_sym(e->orig, sym_orig) {
+ if (!is_func_sym(sym_orig) || is_prefix_func(sym_orig))
+ continue;
+
+ patched_sym = sym_orig->twin;
+ if (!patched_sym)
+ continue;
+
+ if (sym_orig->csum.checksum != patched_sym->csum.checksum) {
+ patched_sym->changed = 1;
+ mark_included_function(patched_sym);
+ changed = true;
+ }
+ }
+
+ /* Find added functions and print them */
+ for_each_sym(e->patched, patched_sym) {
+ if (!is_func_sym(patched_sym) || is_prefix_func(patched_sym))
+ continue;
+
+ if (!patched_sym->twin) {
+ printf("%s: new function: %s\n", objname, patched_sym->name);
+ mark_included_function(patched_sym);
+ changed = true;
+ }
+ }
+
+ /* Print changed functions */
+ for_each_sym(e->patched, patched_sym) {
+ if (patched_sym->changed)
+ printf("%s: changed function: %s\n", objname, patched_sym->name);
+ }
+
+ return !changed ? -1 : 0;
+}
+
+static int clone_included_functions(struct elfs *e)
+{
+ struct symbol *patched_sym;
+
+ for_each_sym(e->patched, patched_sym) {
+ if (patched_sym->included) {
+ if (!clone_symbol(e, patched_sym, true))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Determine whether a relocation should reference the section rather than the
+ * underlying symbol.
+ */
+static bool section_reference_needed(struct section *sec)
+{
+ /*
+ * String symbols are zero-length and uncorrelated. It's easier to
+ * deal with them as section symbols.
+ */
+ if (is_string_sec(sec))
+ return true;
+
+ /*
+ * .rodata has mostly anonymous data so there's no way to determine the
+ * length of a needed reference. just copy the whole section if needed.
+ */
+ if (strstarts(sec->name, ".rodata"))
+ return true;
+
+ /* UBSAN anonymous data */
+ if (strstarts(sec->name, ".data..Lubsan") || /* GCC */
+ strstarts(sec->name, ".data..L__unnamed_")) /* Clang */
+ return true;
+
+ return false;
+}
+
+static bool is_reloc_allowed(struct reloc *reloc)
+{
+ return section_reference_needed(reloc->sym->sec) == is_sec_sym(reloc->sym);
+}
+
+static struct export *find_export(struct symbol *sym)
+{
+ struct export *export;
+
+ hash_for_each_possible(exports, export, hash, str_hash(sym->name)) {
+ if (!strcmp(export->sym, sym->name))
+ return export;
+ }
+
+ return NULL;
+}
+
+static const char *__find_modname(struct elfs *e)
+{
+ struct section *sec;
+ char *name;
+
+ sec = find_section_by_name(e->orig, ".modinfo");
+ if (!sec) {
+ ERROR("missing .modinfo section");
+ return NULL;
+ }
+
+ name = memmem(sec->data->d_buf, sec_size(sec), "\0name=", 6);
+ if (name)
+ return name + 6;
+
+ name = strdup(e->orig->name);
+ if (!name) {
+ ERROR_GLIBC("strdup");
+ return NULL;
+ }
+
+ for (char *c = name; *c; c++) {
+ if (*c == '/')
+ name = c + 1;
+ else if (*c == '-')
+ *c = '_';
+ else if (*c == '.') {
+ *c = '\0';
+ break;
+ }
+ }
+
+ return name;
+}
+
+/* Get the object's module name as defined by the kernel (and klp_object) */
+static const char *find_modname(struct elfs *e)
+{
+ const char *modname;
+
+ if (e->modname)
+ return e->modname;
+
+ modname = __find_modname(e);
+ e->modname = modname;
+ return modname;
+}
+
+/*
+ * Copying a function from its native compiled environment to a kernel module
+ * removes its natural access to local functions/variables and unexported
+ * globals. References to such symbols need to be converted to KLP relocs so
+ * the kernel arch relocation code knows to apply them and where to find the
+ * symbols. Particularly, duplicate static symbols need to be disambiguated.
+ */
+static bool klp_reloc_needed(struct reloc *patched_reloc)
+{
+ struct symbol *patched_sym = patched_reloc->sym;
+ struct export *export;
+
+ /* no external symbol to reference */
+ if (dont_correlate(patched_sym))
+ return false;
+
+ /* For included functions, a regular reloc will do. */
+ if (patched_sym->included)
+ return false;
+
+ /*
+ * If exported by a module, it has to be a klp reloc. Thanks to the
+ * clusterfunk that is late module patching, the patch module is
+ * allowed to be loaded before any modules it depends on.
+ *
+ * If exported by vmlinux, a normal reloc will do.
+ */
+ export = find_export(patched_sym);
+ if (export)
+ return strcmp(export->mod, "vmlinux");
+
+ if (!patched_sym->twin) {
+ /*
+ * Presumably the symbol and its reference were added by the
+ * patch. The symbol could be defined in this .o or in another
+ * .o in the patch module.
+ *
+ * This check needs to be *after* the export check due to the
+ * possibility of the patch adding a new UNDEF reference to an
+ * exported symbol.
+ */
+ return false;
+ }
+
+ /* Unexported symbol which lives in the original vmlinux or module. */
+ return true;
+}
+
+static int convert_reloc_sym_to_secsym(struct elf *elf, struct reloc *reloc)
+{
+ struct symbol *sym = reloc->sym;
+ struct section *sec = sym->sec;
+
+ if (!sec->sym && !elf_create_section_symbol(elf, sec))
+ return -1;
+
+ reloc->sym = sec->sym;
+ set_reloc_sym(elf, reloc, sym->idx);
+ set_reloc_addend(elf, reloc, sym->offset + reloc_addend(reloc));
+ return 0;
+}
+
+static int convert_reloc_secsym_to_sym(struct elf *elf, struct reloc *reloc)
+{
+ struct symbol *sym = reloc->sym;
+ struct section *sec = sym->sec;
+
+ /* If the symbol has a dedicated section, it's easy to find */
+ sym = find_symbol_by_offset(sec, 0);
+ if (sym && sym->len == sec_size(sec))
+ goto found_sym;
+
+ /* No dedicated section; find the symbol manually */
+ sym = find_symbol_containing(sec, arch_adjusted_addend(reloc));
+ if (!sym) {
+ /*
+ * This can happen for special section references to weak code
+ * whose symbol has been stripped by the linker.
+ */
+ return -1;
+ }
+
+found_sym:
+ reloc->sym = sym;
+ set_reloc_sym(elf, reloc, sym->idx);
+ set_reloc_addend(elf, reloc, reloc_addend(reloc) - sym->offset);
+ return 0;
+}
+
+/*
+ * Convert a relocation symbol reference to the needed format: either a section
+ * symbol or the underlying symbol itself.
+ */
+static int convert_reloc_sym(struct elf *elf, struct reloc *reloc)
+{
+ if (is_reloc_allowed(reloc))
+ return 0;
+
+ if (section_reference_needed(reloc->sym->sec))
+ return convert_reloc_sym_to_secsym(elf, reloc);
+ else
+ return convert_reloc_secsym_to_sym(elf, reloc);
+}
+
+/*
+ * Convert a regular relocation to a klp relocation (sort of).
+ */
+static int clone_reloc_klp(struct elfs *e, struct reloc *patched_reloc,
+ struct section *sec, unsigned long offset,
+ struct export *export)
+{
+ struct symbol *patched_sym = patched_reloc->sym;
+ s64 addend = reloc_addend(patched_reloc);
+ const char *sym_modname, *sym_orig_name;
+ static struct section *klp_relocs;
+ struct symbol *sym, *klp_sym;
+ unsigned long klp_reloc_off;
+ char sym_name[SYM_NAME_LEN];
+ struct klp_reloc klp_reloc;
+ unsigned long sympos;
+
+ if (!patched_sym->twin) {
+ ERROR("unexpected klp reloc for new symbol %s", patched_sym->name);
+ return -1;
+ }
+
+ /*
+ * Keep the original reloc intact for now to avoid breaking objtool run
+ * which relies on proper relocations for many of its features. This
+ * will be disabled later by "objtool klp post-link".
+ *
+ * Convert it to UNDEF (and WEAK to avoid modpost warnings).
+ */
+
+ sym = patched_sym->clone;
+ if (!sym) {
+ /* STB_WEAK: avoid modpost undefined symbol warnings */
+ sym = elf_create_symbol(e->out, patched_sym->name, NULL,
+ STB_WEAK, patched_sym->type, 0, 0);
+ if (!sym)
+ return -1;
+
+ patched_sym->clone = sym;
+ sym->clone = patched_sym;
+ }
+
+ if (!elf_create_reloc(e->out, sec, offset, sym, addend, reloc_type(patched_reloc)))
+ return -1;
+
+ /*
+ * Create the KLP symbol.
+ */
+
+ if (export) {
+ sym_modname = export->mod;
+ sym_orig_name = export->sym;
+ sympos = 0;
+ } else {
+ sym_modname = find_modname(e);
+ if (!sym_modname)
+ return -1;
+
+ sym_orig_name = patched_sym->twin->name;
+ sympos = find_sympos(e->orig, patched_sym->twin);
+ if (sympos == ULONG_MAX)
+ return -1;
+ }
+
+ /* symbol format: .klp.sym.modname.sym_name,sympos */
+ if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_SYM_PREFIX "%s.%s,%ld",
+ sym_modname, sym_orig_name, sympos))
+ return -1;
+
+ klp_sym = find_symbol_by_name(e->out, sym_name);
+ if (!klp_sym) {
+ __dbg_indent("%s", sym_name);
+
+ /* STB_WEAK: avoid modpost undefined symbol warnings */
+ klp_sym = elf_create_symbol(e->out, sym_name, NULL,
+ STB_WEAK, patched_sym->type, 0, 0);
+ if (!klp_sym)
+ return -1;
+ }
+
+ /*
+ * Create the __klp_relocs entry. This will be converted to an actual
+ * KLP rela by "objtool klp post-link".
+ *
+ * This intermediate step is necessary to prevent corruption by the
+ * linker, which doesn't know how to properly handle two rela sections
+ * applying to the same base section.
+ */
+
+ if (!klp_relocs) {
+ klp_relocs = elf_create_section(e->out, KLP_RELOCS_SEC, 0,
+ 0, SHT_PROGBITS, 8, SHF_ALLOC);
+ if (!klp_relocs)
+ return -1;
+ }
+
+ klp_reloc_off = sec_size(klp_relocs);
+ memset(&klp_reloc, 0, sizeof(klp_reloc));
+
+ klp_reloc.type = reloc_type(patched_reloc);
+ if (!elf_add_data(e->out, klp_relocs, &klp_reloc, sizeof(klp_reloc)))
+ return -1;
+
+ /* klp_reloc.offset */
+ if (!sec->sym && !elf_create_section_symbol(e->out, sec))
+ return -1;
+
+ if (!elf_create_reloc(e->out, klp_relocs,
+ klp_reloc_off + offsetof(struct klp_reloc, offset),
+ sec->sym, offset, R_ABS64))
+ return -1;
+
+ /* klp_reloc.sym */
+ if (!elf_create_reloc(e->out, klp_relocs,
+ klp_reloc_off + offsetof(struct klp_reloc, sym),
+ klp_sym, addend, R_ABS64))
+ return -1;
+
+ return 0;
+}
+
+#define dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp) \
+ dbg_indent("%s+0x%lx: %s%s0x%lx [%s%s%s%s%s%s]", \
+ sec->name, offset, patched_sym->name, \
+ addend >= 0 ? "+" : "-", labs(addend), \
+ sym_type(patched_sym), \
+ patched_sym->type == STT_SECTION ? "" : " ", \
+ patched_sym->type == STT_SECTION ? "" : sym_bind(patched_sym), \
+ is_undef_sym(patched_sym) ? " UNDEF" : "", \
+ export ? " EXPORTED" : "", \
+ klp ? " KLP" : "")
+
+/* Copy a reloc and its symbol to the output object */
+static int clone_reloc(struct elfs *e, struct reloc *patched_reloc,
+ struct section *sec, unsigned long offset)
+{
+ struct symbol *patched_sym = patched_reloc->sym;
+ struct export *export = find_export(patched_sym);
+ long addend = reloc_addend(patched_reloc);
+ struct symbol *out_sym;
+ bool klp;
+
+ if (!is_reloc_allowed(patched_reloc)) {
+ ERROR_FUNC(patched_reloc->sec->base, reloc_offset(patched_reloc),
+ "missing symbol for reference to %s+%ld",
+ patched_sym->name, addend);
+ return -1;
+ }
+
+ klp = klp_reloc_needed(patched_reloc);
+
+ dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp);
+
+ if (klp) {
+ if (clone_reloc_klp(e, patched_reloc, sec, offset, export))
+ return -1;
+
+ return 0;
+ }
+
+ /*
+ * Why !export sets 'data_too':
+ *
+ * Unexported non-klp symbols need to live in the patch module,
+ * otherwise there will be unresolved symbols. Notably, this includes:
+ *
+ * - New functions/data
+ * - String sections
+ * - Special section entries
+ * - Uncorrelated static local variables
+ * - UBSAN sections
+ */
+ out_sym = clone_symbol(e, patched_sym, patched_sym->included || !export);
+ if (!out_sym)
+ return -1;
+
+ /*
+ * For strings, all references use section symbols, thanks to
+ * section_reference_needed(). clone_symbol() has cloned an empty
+ * version of the string section. Now copy the string itself.
+ */
+ if (is_string_sec(patched_sym->sec)) {
+ const char *str = patched_sym->sec->data->d_buf + addend;
+
+ __dbg_indent("\"%s\"", escape_str(str));
+
+ addend = elf_add_string(e->out, out_sym->sec, str);
+ if (addend == -1)
+ return -1;
+ }
+
+ if (!elf_create_reloc(e->out, sec, offset, out_sym, addend,
+ reloc_type(patched_reloc)))
+ return -1;
+
+ return 0;
+}
+
+/* Copy all relocs needed for a symbol's contents */
+static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym)
+{
+ struct section *patched_rsec = patched_sym->sec->rsec;
+ struct reloc *patched_reloc;
+ unsigned long start, end;
+ struct symbol *out_sym;
+
+ out_sym = patched_sym->clone;
+ if (!out_sym) {
+ ERROR("no clone for %s", patched_sym->name);
+ return -1;
+ }
+
+ if (!patched_rsec)
+ return 0;
+
+ if (!is_sec_sym(patched_sym) && !patched_sym->len)
+ return 0;
+
+ if (is_string_sec(patched_sym->sec))
+ return 0;
+
+ if (is_sec_sym(patched_sym)) {
+ start = 0;
+ end = sec_size(patched_sym->sec);
+ } else {
+ start = patched_sym->offset;
+ end = start + patched_sym->len;
+ }
+
+ for_each_reloc(patched_rsec, patched_reloc) {
+ unsigned long offset;
+
+ if (reloc_offset(patched_reloc) < start ||
+ reloc_offset(patched_reloc) >= end)
+ continue;
+
+ /*
+ * Skip any reloc referencing .altinstr_aux. Its code is
+ * always patched by alternatives. See ALTERNATIVE_TERNARY().
+ */
+ if (patched_reloc->sym->sec &&
+ !strcmp(patched_reloc->sym->sec->name, ".altinstr_aux"))
+ continue;
+
+ if (convert_reloc_sym(e->patched, patched_reloc)) {
+ ERROR_FUNC(patched_rsec->base, reloc_offset(patched_reloc),
+ "failed to convert reloc sym '%s' to its proper format",
+ patched_reloc->sym->name);
+ return -1;
+ }
+
+ offset = out_sym->offset + (reloc_offset(patched_reloc) - patched_sym->offset);
+
+ if (clone_reloc(e, patched_reloc, out_sym->sec, offset))
+ return -1;
+ }
+ return 0;
+
+}
+
+static int create_fake_symbol(struct elf *elf, struct section *sec,
+ unsigned long offset, size_t size)
+{
+ char name[SYM_NAME_LEN];
+ unsigned int type;
+ static int ctr;
+ char *c;
+
+ if (snprintf_check(name, SYM_NAME_LEN, "%s_%d", sec->name, ctr++))
+ return -1;
+
+ for (c = name; *c; c++)
+ if (*c == '.')
+ *c = '_';
+
+ /*
+ * STT_NOTYPE: Prevent objtool from validating .altinstr_replacement
+ * while still allowing objdump to disassemble it.
+ */
+ type = is_text_sec(sec) ? STT_NOTYPE : STT_OBJECT;
+ return elf_create_symbol(elf, name, sec, STB_LOCAL, type, offset, size) ? 0 : -1;
+}
+
+/*
+ * Special sections (alternatives, etc) are basically arrays of structs.
+ * For all the special sections, create a symbol for each struct entry. This
+ * is a bit cumbersome, but it makes the extracting of the individual entries
+ * much more straightforward.
+ *
+ * There are three ways to identify the entry sizes for a special section:
+ *
+ * 1) ELF section header sh_entsize: Ideally this would be used almost
+ * everywhere. But unfortunately the toolchains make it difficult. The
+ * assembler .[push]section directive syntax only takes entsize when
+ * combined with SHF_MERGE. But Clang disallows combining SHF_MERGE with
+ * SHF_WRITE. And some special sections do need to be writable.
+ *
+ * Another place this wouldn't work is .altinstr_replacement, whose entries
+ * don't have a fixed size.
+ *
+ * 2) ANNOTATE_DATA_SPECIAL: This is a lightweight objtool annotation which
+ * points to the beginning of each entry. The size of the entry is then
+ * inferred by the location of the subsequent annotation (or end of
+ * section).
+ *
+ * 3) Simple array of pointers: If the special section is just a basic array of
+ * pointers, the entry size can be inferred by the number of relocations.
+ * No annotations needed.
+ *
+ * Note I also tried to create per-entry symbols at the time of creation, in
+ * the original [inline] asm. Unfortunately, creating uniquely named symbols
+ * is trickier than one might think, especially with Clang inline asm. I
+ * eventually just gave up trying to make that work, in favor of using
+ * ANNOTATE_DATA_SPECIAL and creating the symbols here after the fact.
+ */
+static int create_fake_symbols(struct elf *elf)
+{
+ struct section *sec;
+ struct reloc *reloc;
+
+ /*
+ * 1) Make symbols for all the ANNOTATE_DATA_SPECIAL entries:
+ */
+
+ sec = find_section_by_name(elf, ".discard.annotate_data");
+ if (!sec || !sec->rsec)
+ return 0;
+
+ for_each_reloc(sec->rsec, reloc) {
+ unsigned long offset, size;
+ struct reloc *next_reloc;
+
+ if (annotype(elf, sec, reloc) != ANNOTYPE_DATA_SPECIAL)
+ continue;
+
+ offset = reloc_addend(reloc);
+
+ size = 0;
+ next_reloc = reloc;
+ for_each_reloc_continue(sec->rsec, next_reloc) {
+ if (annotype(elf, sec, next_reloc) != ANNOTYPE_DATA_SPECIAL ||
+ next_reloc->sym->sec != reloc->sym->sec)
+ continue;
+
+ size = reloc_addend(next_reloc) - offset;
+ break;
+ }
+
+ if (!size)
+ size = sec_size(reloc->sym->sec) - offset;
+
+ if (create_fake_symbol(elf, reloc->sym->sec, offset, size))
+ return -1;
+ }
+
+ /*
+ * 2) Make symbols for sh_entsize, and simple arrays of pointers:
+ */
+
+ for_each_sec(elf, sec) {
+ unsigned int entry_size;
+ unsigned long offset;
+
+ if (!is_special_section(sec) || find_symbol_by_offset(sec, 0))
+ continue;
+
+ if (!sec->rsec) {
+ ERROR("%s: missing special section relocations", sec->name);
+ return -1;
+ }
+
+ entry_size = sec->sh.sh_entsize;
+ if (!entry_size) {
+ entry_size = arch_reloc_size(sec->rsec->relocs);
+ if (sec_size(sec) != entry_size * sec_num_entries(sec->rsec)) {
+ ERROR("%s: missing special section entsize or annotations", sec->name);
+ return -1;
+ }
+ }
+
+ for (offset = 0; offset < sec_size(sec); offset += entry_size) {
+ if (create_fake_symbol(elf, sec, offset, entry_size))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Keep a special section entry if it references an included function */
+static bool should_keep_special_sym(struct elf *elf, struct symbol *sym)
+{
+ struct reloc *reloc;
+
+ if (is_sec_sym(sym) || !sym->sec->rsec)
+ return false;
+
+ sym_for_each_reloc(elf, sym, reloc) {
+ if (convert_reloc_sym(elf, reloc))
+ continue;
+
+ if (is_func_sym(reloc->sym) && reloc->sym->included)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Klp relocations aren't allowed for __jump_table and .static_call_sites if
+ * the referenced symbol lives in a kernel module, because such klp relocs may
+ * be applied after static branch/call init, resulting in code corruption.
+ *
+ * Validate a special section entry to avoid that. Note that an inert
+ * tracepoint is harmless enough, in that case just skip the entry and print a
+ * warning. Otherwise, return an error.
+ *
+ * This is only a temporary limitation which will be fixed when livepatch adds
+ * support for submodules: fully self-contained modules which are embedded in
+ * the top-level livepatch module's data and which can be loaded on demand when
+ * their corresponding to-be-patched module gets loaded. Then klp relocs can
+ * be retired.
+ *
+ * Return:
+ * -1: error: validation failed
+ * 1: warning: tracepoint skipped
+ * 0: success
+ */
+static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym)
+{
+ bool static_branch = !strcmp(sym->sec->name, "__jump_table");
+ bool static_call = !strcmp(sym->sec->name, ".static_call_sites");
+ struct symbol *code_sym = NULL;
+ unsigned long code_offset = 0;
+ struct reloc *reloc;
+ int ret = 0;
+
+ if (!static_branch && !static_call)
+ return 0;
+
+ sym_for_each_reloc(e->patched, sym, reloc) {
+ const char *sym_modname;
+ struct export *export;
+
+ /* Static branch/call keys are always STT_OBJECT */
+ if (reloc->sym->type != STT_OBJECT) {
+
+ /* Save code location which can be printed below */
+ if (reloc->sym->type == STT_FUNC && !code_sym) {
+ code_sym = reloc->sym;
+ code_offset = reloc_addend(reloc);
+ }
+
+ continue;
+ }
+
+ if (!klp_reloc_needed(reloc))
+ continue;
+
+ export = find_export(reloc->sym);
+ if (export) {
+ sym_modname = export->mod;
+ } else {
+ sym_modname = find_modname(e);
+ if (!sym_modname)
+ return -1;
+ }
+
+ /* vmlinux keys are ok */
+ if (!strcmp(sym_modname, "vmlinux"))
+ continue;
+
+ if (static_branch) {
+ if (strstarts(reloc->sym->name, "__tracepoint_")) {
+ WARN("%s: disabling unsupported tracepoint %s",
+ code_sym->name, reloc->sym->name + 13);
+ ret = 1;
+ continue;
+ }
+
+ ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead",
+ code_sym->name, code_offset, reloc->sym->name);
+ return -1;
+ }
+
+ /* static call */
+ if (strstarts(reloc->sym->name, "__SCK__tp_func_")) {
+ ret = 1;
+ continue;
+ }
+
+ ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead",
+ code_sym->name, code_offset, reloc->sym->name);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int clone_special_section(struct elfs *e, struct section *patched_sec)
+{
+ struct symbol *patched_sym;
+
+ /*
+ * Extract all special section symbols (and their dependencies) which
+ * reference included functions.
+ */
+ sec_for_each_sym(patched_sec, patched_sym) {
+ int ret;
+
+ if (!is_object_sym(patched_sym))
+ continue;
+
+ if (!should_keep_special_sym(e->patched, patched_sym))
+ continue;
+
+ ret = validate_special_section_klp_reloc(e, patched_sym);
+ if (ret < 0)
+ return -1;
+ if (ret > 0)
+ continue;
+
+ if (!clone_symbol(e, patched_sym, true))
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Extract only the needed bits from special sections */
+static int clone_special_sections(struct elfs *e)
+{
+ struct section *patched_sec;
+
+ if (create_fake_symbols(e->patched))
+ return -1;
+
+ for_each_sec(e->patched, patched_sec) {
+ if (is_special_section(patched_sec)) {
+ if (clone_special_section(e, patched_sec))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Create __klp_objects and __klp_funcs sections which are intermediate
+ * sections provided as input to the patch module's init code for building the
+ * klp_patch, klp_object and klp_func structs for the livepatch API.
+ */
+static int create_klp_sections(struct elfs *e)
+{
+ size_t obj_size = sizeof(struct klp_object_ext);
+ size_t func_size = sizeof(struct klp_func_ext);
+ struct section *obj_sec, *funcs_sec, *str_sec;
+ struct symbol *funcs_sym, *str_sym, *sym;
+ char sym_name[SYM_NAME_LEN];
+ unsigned int nr_funcs = 0;
+ const char *modname;
+ void *obj_data;
+ s64 addend;
+
+ obj_sec = elf_create_section_pair(e->out, KLP_OBJECTS_SEC, obj_size, 0, 0);
+ if (!obj_sec)
+ return -1;
+
+ funcs_sec = elf_create_section_pair(e->out, KLP_FUNCS_SEC, func_size, 0, 0);
+ if (!funcs_sec)
+ return -1;
+
+ funcs_sym = elf_create_section_symbol(e->out, funcs_sec);
+ if (!funcs_sym)
+ return -1;
+
+ str_sec = elf_create_section(e->out, KLP_STRINGS_SEC, 0, 0,
+ SHT_PROGBITS, 1,
+ SHF_ALLOC | SHF_STRINGS | SHF_MERGE);
+ if (!str_sec)
+ return -1;
+
+ if (elf_add_string(e->out, str_sec, "") == -1)
+ return -1;
+
+ str_sym = elf_create_section_symbol(e->out, str_sec);
+ if (!str_sym)
+ return -1;
+
+ /* allocate klp_object_ext */
+ obj_data = elf_add_data(e->out, obj_sec, NULL, obj_size);
+ if (!obj_data)
+ return -1;
+
+ modname = find_modname(e);
+ if (!modname)
+ return -1;
+
+ /* klp_object_ext.name */
+ if (strcmp(modname, "vmlinux")) {
+ addend = elf_add_string(e->out, str_sec, modname);
+ if (addend == -1)
+ return -1;
+
+ if (!elf_create_reloc(e->out, obj_sec,
+ offsetof(struct klp_object_ext, name),
+ str_sym, addend, R_ABS64))
+ return -1;
+ }
+
+ /* klp_object_ext.funcs */
+ if (!elf_create_reloc(e->out, obj_sec, offsetof(struct klp_object_ext, funcs),
+ funcs_sym, 0, R_ABS64))
+ return -1;
+
+ for_each_sym(e->out, sym) {
+ unsigned long offset = nr_funcs * func_size;
+ unsigned long sympos;
+ void *func_data;
+
+ if (!is_func_sym(sym) || sym->cold || !sym->clone || !sym->clone->changed)
+ continue;
+
+ /* allocate klp_func_ext */
+ func_data = elf_add_data(e->out, funcs_sec, NULL, func_size);
+ if (!func_data)
+ return -1;
+
+ /* klp_func_ext.old_name */
+ addend = elf_add_string(e->out, str_sec, sym->clone->twin->name);
+ if (addend == -1)
+ return -1;
+
+ if (!elf_create_reloc(e->out, funcs_sec,
+ offset + offsetof(struct klp_func_ext, old_name),
+ str_sym, addend, R_ABS64))
+ return -1;
+
+ /* klp_func_ext.new_func */
+ if (!elf_create_reloc(e->out, funcs_sec,
+ offset + offsetof(struct klp_func_ext, new_func),
+ sym, 0, R_ABS64))
+ return -1;
+
+ /* klp_func_ext.sympos */
+ BUILD_BUG_ON(sizeof(sympos) != sizeof_field(struct klp_func_ext, sympos));
+ sympos = find_sympos(e->orig, sym->clone->twin);
+ if (sympos == ULONG_MAX)
+ return -1;
+ memcpy(func_data + offsetof(struct klp_func_ext, sympos), &sympos,
+ sizeof_field(struct klp_func_ext, sympos));
+
+ nr_funcs++;
+ }
+
+ /* klp_object_ext.nr_funcs */
+ BUILD_BUG_ON(sizeof(nr_funcs) != sizeof_field(struct klp_object_ext, nr_funcs));
+ memcpy(obj_data + offsetof(struct klp_object_ext, nr_funcs), &nr_funcs,
+ sizeof_field(struct klp_object_ext, nr_funcs));
+
+ /*
+ * Find callback pointers created by KLP_PRE_PATCH_CALLBACK() and
+ * friends, and add them to the klp object.
+ */
+
+ if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_PATCH_PREFIX "%s", modname))
+ return -1;
+
+ sym = find_symbol_by_name(e->out, sym_name);
+ if (sym) {
+ struct reloc *reloc;
+
+ reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
+
+ if (!elf_create_reloc(e->out, obj_sec,
+ offsetof(struct klp_object_ext, callbacks) +
+ offsetof(struct klp_callbacks, pre_patch),
+ reloc->sym, reloc_addend(reloc), R_ABS64))
+ return -1;
+ }
+
+ if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_PATCH_PREFIX "%s", modname))
+ return -1;
+
+ sym = find_symbol_by_name(e->out, sym_name);
+ if (sym) {
+ struct reloc *reloc;
+
+ reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
+
+ if (!elf_create_reloc(e->out, obj_sec,
+ offsetof(struct klp_object_ext, callbacks) +
+ offsetof(struct klp_callbacks, post_patch),
+ reloc->sym, reloc_addend(reloc), R_ABS64))
+ return -1;
+ }
+
+ if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_UNPATCH_PREFIX "%s", modname))
+ return -1;
+
+ sym = find_symbol_by_name(e->out, sym_name);
+ if (sym) {
+ struct reloc *reloc;
+
+ reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
+
+ if (!elf_create_reloc(e->out, obj_sec,
+ offsetof(struct klp_object_ext, callbacks) +
+ offsetof(struct klp_callbacks, pre_unpatch),
+ reloc->sym, reloc_addend(reloc), R_ABS64))
+ return -1;
+ }
+
+ if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_UNPATCH_PREFIX "%s", modname))
+ return -1;
+
+ sym = find_symbol_by_name(e->out, sym_name);
+ if (sym) {
+ struct reloc *reloc;
+
+ reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
+
+ if (!elf_create_reloc(e->out, obj_sec,
+ offsetof(struct klp_object_ext, callbacks) +
+ offsetof(struct klp_callbacks, post_unpatch),
+ reloc->sym, reloc_addend(reloc), R_ABS64))
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Copy all .modinfo import_ns= tags to ensure all namespaced exported symbols
+ * can be accessed via normal relocs.
+ */
+static int copy_import_ns(struct elfs *e)
+{
+ struct section *patched_sec, *out_sec = NULL;
+ char *import_ns, *data_end;
+
+ patched_sec = find_section_by_name(e->patched, ".modinfo");
+ if (!patched_sec)
+ return 0;
+
+ import_ns = patched_sec->data->d_buf;
+ if (!import_ns)
+ return 0;
+
+ for (data_end = import_ns + sec_size(patched_sec);
+ import_ns < data_end;
+ import_ns += strlen(import_ns) + 1) {
+
+ import_ns = memmem(import_ns, data_end - import_ns, "import_ns=", 10);
+ if (!import_ns)
+ return 0;
+
+ if (!out_sec) {
+ out_sec = find_section_by_name(e->out, ".modinfo");
+ if (!out_sec) {
+ out_sec = elf_create_section(e->out, ".modinfo", 0,
+ patched_sec->sh.sh_entsize,
+ patched_sec->sh.sh_type,
+ patched_sec->sh.sh_addralign,
+ patched_sec->sh.sh_flags);
+ if (!out_sec)
+ return -1;
+ }
+ }
+
+ if (!elf_add_data(e->out, out_sec, import_ns, strlen(import_ns) + 1))
+ return -1;
+ }
+
+ return 0;
+}
+
+int cmd_klp_diff(int argc, const char **argv)
+{
+ struct elfs e = {0};
+
+ argc = parse_options(argc, argv, klp_diff_options, klp_diff_usage, 0);
+ if (argc != 3)
+ usage_with_options(klp_diff_usage, klp_diff_options);
+
+ objname = argv[0];
+
+ e.orig = elf_open_read(argv[0], O_RDONLY);
+ e.patched = elf_open_read(argv[1], O_RDONLY);
+ e.out = NULL;
+
+ if (!e.orig || !e.patched)
+ return -1;
+
+ if (read_exports())
+ return -1;
+
+ if (read_sym_checksums(e.orig))
+ return -1;
+
+ if (read_sym_checksums(e.patched))
+ return -1;
+
+ if (correlate_symbols(&e))
+ return -1;
+
+ if (mark_changed_functions(&e))
+ return 0;
+
+ e.out = elf_create_file(&e.orig->ehdr, argv[2]);
+ if (!e.out)
+ return -1;
+
+ if (clone_included_functions(&e))
+ return -1;
+
+ if (clone_special_sections(&e))
+ return -1;
+
+ if (create_klp_sections(&e))
+ return -1;
+
+ if (copy_import_ns(&e))
+ return -1;
+
+ if (elf_write(e.out))
+ return -1;
+
+ return elf_close(e.out);
+}
diff --git a/tools/objtool/klp-post-link.c b/tools/objtool/klp-post-link.c
new file mode 100644
index 000000000000..c013e39957b1
--- /dev/null
+++ b/tools/objtool/klp-post-link.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Read the intermediate KLP reloc/symbol representations created by klp diff
+ * and convert them to the proper format required by livepatch. This needs to
+ * run last to avoid linker wreckage. Linkers don't tend to handle the "two
+ * rela sections for a single base section" case very well, nor do they like
+ * SHN_LIVEPATCH.
+ *
+ * This is the final tool in the livepatch module generation pipeline:
+ *
+ * kernel builds -> objtool klp diff -> module link -> objtool klp post-link
+ */
+
+#include <fcntl.h>
+#include <gelf.h>
+#include <objtool/objtool.h>
+#include <objtool/warn.h>
+#include <objtool/klp.h>
+#include <objtool/util.h>
+#include <linux/livepatch_external.h>
+
+static int fix_klp_relocs(struct elf *elf)
+{
+ struct section *symtab, *klp_relocs;
+
+ klp_relocs = find_section_by_name(elf, KLP_RELOCS_SEC);
+ if (!klp_relocs)
+ return 0;
+
+ symtab = find_section_by_name(elf, ".symtab");
+ if (!symtab) {
+ ERROR("missing .symtab");
+ return -1;
+ }
+
+ for (int i = 0; i < sec_size(klp_relocs) / sizeof(struct klp_reloc); i++) {
+ struct klp_reloc *klp_reloc;
+ unsigned long klp_reloc_off;
+ struct section *sec, *tmp, *klp_rsec;
+ unsigned long offset;
+ struct reloc *reloc;
+ char sym_modname[64];
+ char rsec_name[SEC_NAME_LEN];
+ u64 addend;
+ struct symbol *sym, *klp_sym;
+
+ klp_reloc_off = i * sizeof(*klp_reloc);
+ klp_reloc = klp_relocs->data->d_buf + klp_reloc_off;
+
+ /*
+ * Read __klp_relocs[i]:
+ */
+
+ /* klp_reloc.sec_offset */
+ reloc = find_reloc_by_dest(elf, klp_relocs,
+ klp_reloc_off + offsetof(struct klp_reloc, offset));
+ if (!reloc) {
+ ERROR("malformed " KLP_RELOCS_SEC " section");
+ return -1;
+ }
+
+ sec = reloc->sym->sec;
+ offset = reloc_addend(reloc);
+
+ /* klp_reloc.sym */
+ reloc = find_reloc_by_dest(elf, klp_relocs,
+ klp_reloc_off + offsetof(struct klp_reloc, sym));
+ if (!reloc) {
+ ERROR("malformed " KLP_RELOCS_SEC " section");
+ return -1;
+ }
+
+ klp_sym = reloc->sym;
+ addend = reloc_addend(reloc);
+
+ /* symbol format: .klp.sym.modname.sym_name,sympos */
+ if (sscanf(klp_sym->name + strlen(KLP_SYM_PREFIX), "%55[^.]", sym_modname) != 1)
+ ERROR("can't find modname in klp symbol '%s'", klp_sym->name);
+
+ /*
+ * Create the KLP rela:
+ */
+
+ /* section format: .klp.rela.sec_objname.section_name */
+ if (snprintf_check(rsec_name, SEC_NAME_LEN,
+ KLP_RELOC_SEC_PREFIX "%s.%s",
+ sym_modname, sec->name))
+ return -1;
+
+ klp_rsec = find_section_by_name(elf, rsec_name);
+ if (!klp_rsec) {
+ klp_rsec = elf_create_section(elf, rsec_name, 0,
+ elf_rela_size(elf),
+ SHT_RELA, elf_addr_size(elf),
+ SHF_ALLOC | SHF_INFO_LINK | SHF_RELA_LIVEPATCH);
+ if (!klp_rsec)
+ return -1;
+
+ klp_rsec->sh.sh_link = symtab->idx;
+ klp_rsec->sh.sh_info = sec->idx;
+ klp_rsec->base = sec;
+ }
+
+ tmp = sec->rsec;
+ sec->rsec = klp_rsec;
+ if (!elf_create_reloc(elf, sec, offset, klp_sym, addend, klp_reloc->type))
+ return -1;
+ sec->rsec = tmp;
+
+ /*
+ * Fix up the corresponding KLP symbol:
+ */
+
+ klp_sym->sym.st_shndx = SHN_LIVEPATCH;
+ if (!gelf_update_sym(symtab->data, klp_sym->idx, &klp_sym->sym)) {
+ ERROR_ELF("gelf_update_sym");
+ return -1;
+ }
+
+ /*
+ * Disable the original non-KLP reloc by converting it to R_*_NONE:
+ */
+
+ reloc = find_reloc_by_dest(elf, sec, offset);
+ sym = reloc->sym;
+ sym->sym.st_shndx = SHN_LIVEPATCH;
+ set_reloc_type(elf, reloc, 0);
+ if (!gelf_update_sym(symtab->data, sym->idx, &sym->sym)) {
+ ERROR_ELF("gelf_update_sym");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This runs on the livepatch module after all other linking has been done. It
+ * converts the intermediate __klp_relocs section into proper KLP relocs to be
+ * processed by livepatch. This needs to run last to avoid linker wreckage.
+ * Linkers don't tend to handle the "two rela sections for a single base
+ * section" case very well, nor do they appreciate SHN_LIVEPATCH.
+ */
+int cmd_klp_post_link(int argc, const char **argv)
+{
+ struct elf *elf;
+
+ argc--;
+ argv++;
+
+ if (argc != 1) {
+ fprintf(stderr, "%d\n", argc);
+ fprintf(stderr, "usage: objtool link <file.ko>\n");
+ return -1;
+ }
+
+ elf = elf_open_read(argv[0], O_RDWR);
+ if (!elf)
+ return -1;
+
+ if (fix_klp_relocs(elf))
+ return -1;
+
+ if (elf_write(elf))
+ return -1;
+
+ return elf_close(elf);
+}
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index eacfe3b0a8d1..14f8ab653449 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -36,15 +36,16 @@ NORETURN(machine_real_restart)
NORETURN(make_task_dead)
NORETURN(mpt_halt_firmware)
NORETURN(mwait_play_dead)
+NORETURN(native_play_dead)
NORETURN(nmi_panic_self_stop)
NORETURN(panic)
+NORETURN(vpanic)
NORETURN(panic_smp_self_stop)
NORETURN(rest_init)
NORETURN(rewind_stack_and_make_dead)
NORETURN(rust_begin_unwind)
NORETURN(rust_helper_BUG)
NORETURN(sev_es_terminate)
-NORETURN(snp_abort)
NORETURN(start_kernel)
NORETURN(stop_this_cpu)
NORETURN(usercopy_abort)
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 5c8b974ad0f9..1c3622117c33 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -16,7 +16,8 @@
#include <objtool/objtool.h>
#include <objtool/warn.h>
-bool help;
+bool debug;
+int indent;
static struct objtool_file file;
@@ -71,13 +72,54 @@ int objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
return 0;
}
+char *top_level_dir(const char *file)
+{
+ ssize_t len, self_len, file_len;
+ char self[PATH_MAX], *str;
+ int i;
+
+ len = readlink("/proc/self/exe", self, sizeof(self) - 1);
+ if (len <= 0)
+ return NULL;
+ self[len] = '\0';
+
+ for (i = 0; i < 3; i++) {
+ char *s = strrchr(self, '/');
+ if (!s)
+ return NULL;
+ *s = '\0';
+ }
+
+ self_len = strlen(self);
+ file_len = strlen(file);
+
+ str = malloc(self_len + file_len + 2);
+ if (!str)
+ return NULL;
+
+ memcpy(str, self, self_len);
+ str[self_len] = '/';
+ strcpy(str + self_len + 1, file);
+
+ return str;
+}
+
int main(int argc, const char **argv)
{
static const char *UNUSED = "OBJTOOL_NOT_IMPLEMENTED";
+ if (init_signal_handler())
+ return -1;
+
/* libsubcmd init */
exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
pager_init(UNUSED);
+ if (argc > 1 && !strcmp(argv[1], "klp")) {
+ argc--;
+ argv++;
+ return cmd_klp(argc, argv);
+ }
+
return objtool_run(argc, argv);
}
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 1dd9fc18fe62..5a979f52425a 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -8,7 +8,6 @@
#include <objtool/objtool.h>
#include <objtool/orc.h>
#include <objtool/warn.h>
-#include <objtool/endianness.h>
int orc_dump(const char *filename)
{
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index 922e6aac7cea..1045e1380ffd 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -12,7 +12,6 @@
#include <objtool/check.h>
#include <objtool/orc.h>
#include <objtool/warn.h>
-#include <objtool/endianness.h>
struct orc_list_entry {
struct list_head list;
@@ -57,7 +56,7 @@ int orc_create(struct objtool_file *file)
/* Build a deduplicated list of ORC entries: */
INIT_LIST_HEAD(&orc_list);
- for_each_sec(file, sec) {
+ for_each_sec(file->elf, sec) {
struct orc_entry orc, prev_orc = {0};
struct instruction *insn;
bool empty = true;
@@ -127,7 +126,11 @@ int orc_create(struct objtool_file *file)
return -1;
}
orc_sec = elf_create_section(file->elf, ".orc_unwind",
- sizeof(struct orc_entry), nr);
+ nr * sizeof(struct orc_entry),
+ sizeof(struct orc_entry),
+ SHT_PROGBITS,
+ 1,
+ SHF_ALLOC);
if (!orc_sec)
return -1;
diff --git a/tools/objtool/signal.c b/tools/objtool/signal.c
new file mode 100644
index 000000000000..af5c65c0fb2d
--- /dev/null
+++ b/tools/objtool/signal.c
@@ -0,0 +1,135 @@
+/*
+ * signal.c: Register a sigaltstack for objtool, to be able to
+ * run a signal handler on a separate stack even if
+ * the main process stack has overflown. Print out
+ * stack overflow errors when this happens.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/resource.h>
+#include <string.h>
+
+#include <objtool/objtool.h>
+#include <objtool/warn.h>
+
+static unsigned long stack_limit;
+
+static bool is_stack_overflow(void *fault_addr)
+{
+ unsigned long fault = (unsigned long)fault_addr;
+
+ /* Check if fault is in the guard page just below the limit. */
+ return fault < stack_limit && fault >= stack_limit - 4096;
+}
+
+static void signal_handler(int sig_num, siginfo_t *info, void *context)
+{
+ struct sigaction sa_dfl = {0};
+ const char *sig_name;
+ char msg[256];
+ int msg_len;
+
+ switch (sig_num) {
+ case SIGSEGV: sig_name = "SIGSEGV"; break;
+ case SIGBUS: sig_name = "SIGBUS"; break;
+ case SIGILL: sig_name = "SIGILL"; break;
+ case SIGABRT: sig_name = "SIGABRT"; break;
+ default: sig_name = "Unknown signal"; break;
+ }
+
+ if (is_stack_overflow(info->si_addr)) {
+ msg_len = snprintf(msg, sizeof(msg),
+ "%s: error: %s: objtool stack overflow!\n",
+ objname, sig_name);
+ } else {
+ msg_len = snprintf(msg, sizeof(msg),
+ "%s: error: %s: objtool crash!\n",
+ objname, sig_name);
+ }
+
+ msg_len = write(STDERR_FILENO, msg, msg_len);
+
+ /* Re-raise the signal to trigger the core dump */
+ sa_dfl.sa_handler = SIG_DFL;
+ sigaction(sig_num, &sa_dfl, NULL);
+ raise(sig_num);
+}
+
+static int read_stack_limit(void)
+{
+ unsigned long stack_start, stack_end;
+ struct rlimit rlim;
+ char line[256];
+ int ret = 0;
+ FILE *fp;
+
+ if (getrlimit(RLIMIT_STACK, &rlim)) {
+ ERROR_GLIBC("getrlimit");
+ return -1;
+ }
+
+ fp = fopen("/proc/self/maps", "r");
+ if (!fp) {
+ ERROR_GLIBC("fopen");
+ return -1;
+ }
+
+ while (fgets(line, sizeof(line), fp)) {
+ if (strstr(line, "[stack]")) {
+ if (sscanf(line, "%lx-%lx", &stack_start, &stack_end) != 2) {
+ ERROR_GLIBC("sscanf");
+ ret = -1;
+ goto done;
+ }
+ stack_limit = stack_end - rlim.rlim_cur;
+ goto done;
+ }
+ }
+
+ ret = -1;
+ ERROR("/proc/self/maps: can't find [stack]");
+
+done:
+ fclose(fp);
+
+ return ret;
+}
+
+int init_signal_handler(void)
+{
+ int signals[] = {SIGSEGV, SIGBUS, SIGILL, SIGABRT};
+ struct sigaction sa;
+ stack_t ss;
+
+ if (read_stack_limit())
+ return -1;
+
+ ss.ss_sp = malloc(SIGSTKSZ);
+ if (!ss.ss_sp) {
+ ERROR_GLIBC("malloc");
+ return -1;
+ }
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = 0;
+
+ if (sigaltstack(&ss, NULL) == -1) {
+ ERROR_GLIBC("sigaltstack");
+ return -1;
+ }
+
+ sa.sa_sigaction = signal_handler;
+ sigemptyset(&sa.sa_mask);
+
+ sa.sa_flags = SA_ONSTACK | SA_SIGINFO;
+
+ for (int i = 0; i < ARRAY_SIZE(signals); i++) {
+ if (sigaction(signals[i], &sa, NULL) == -1) {
+ ERROR_GLIBC("sigaction");
+ return -1;
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index c80fed8a840e..2a533afbc69a 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -15,7 +15,6 @@
#include <objtool/builtin.h>
#include <objtool/special.h>
#include <objtool/warn.h>
-#include <objtool/endianness.h>
struct special_entry {
const char *sec;
@@ -82,6 +81,8 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
entry->orig_len);
alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
entry->new_len);
+ alt->feature = *(unsigned int *)(sec->data->d_buf + offset +
+ entry->feature);
}
orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
@@ -133,7 +134,7 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
struct section *sec;
unsigned int nr_entries;
struct special_alt *alt;
- int idx, ret;
+ int idx;
INIT_LIST_HEAD(alts);
@@ -142,12 +143,12 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
if (!sec)
continue;
- if (sec->sh.sh_size % entry->size != 0) {
+ if (sec_size(sec) % entry->size != 0) {
ERROR("%s size not a multiple of %d", sec->name, entry->size);
return -1;
}
- nr_entries = sec->sh.sh_size / entry->size;
+ nr_entries = sec_size(sec) / entry->size;
for (idx = 0; idx < nr_entries; idx++) {
alt = malloc(sizeof(*alt));
@@ -157,11 +158,8 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
}
memset(alt, 0, sizeof(*alt));
- ret = get_alt_entry(elf, entry, sec, idx, alt);
- if (ret > 0)
- continue;
- if (ret < 0)
- return ret;
+ if (get_alt_entry(elf, entry, sec, idx, alt))
+ return -1;
list_add_tail(&alt->list, alts);
}
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
index 81d120d05442..e38167ca56a9 100755
--- a/tools/objtool/sync-check.sh
+++ b/tools/objtool/sync-check.sh
@@ -16,6 +16,8 @@ arch/x86/include/asm/orc_types.h
arch/x86/include/asm/emulate_prefix.h
arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
+include/linux/interval_tree_generic.h
+include/linux/livepatch_external.h
include/linux/static_call_types.h
"
diff --git a/tools/objtool/trace.c b/tools/objtool/trace.c
new file mode 100644
index 000000000000..5dec44dab781
--- /dev/null
+++ b/tools/objtool/trace.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates.
+ */
+
+#include <objtool/trace.h>
+
+bool trace;
+int trace_depth;
+
+/*
+ * Macros to trace CFI state attributes changes.
+ */
+
+#define TRACE_CFI_ATTR(attr, prev, next, fmt, ...) \
+({ \
+ if ((prev)->attr != (next)->attr) \
+ TRACE("%s=" fmt " ", #attr, __VA_ARGS__); \
+})
+
+#define TRACE_CFI_ATTR_BOOL(attr, prev, next) \
+ TRACE_CFI_ATTR(attr, prev, next, \
+ "%s", (next)->attr ? "true" : "false")
+
+#define TRACE_CFI_ATTR_NUM(attr, prev, next, fmt) \
+ TRACE_CFI_ATTR(attr, prev, next, fmt, (next)->attr)
+
+#define CFI_REG_NAME_MAXLEN 16
+
+/*
+ * Return the name of a register. Note that the same static buffer
+ * is returned if the name is dynamically generated.
+ */
+static const char *cfi_reg_name(unsigned int reg)
+{
+ static char rname_buffer[CFI_REG_NAME_MAXLEN];
+ const char *rname;
+
+ switch (reg) {
+ case CFI_UNDEFINED:
+ return "<undefined>";
+ case CFI_CFA:
+ return "cfa";
+ case CFI_SP_INDIRECT:
+ return "(sp)";
+ case CFI_BP_INDIRECT:
+ return "(bp)";
+ }
+
+ if (reg < CFI_NUM_REGS) {
+ rname = arch_reg_name[reg];
+ if (rname)
+ return rname;
+ }
+
+ if (snprintf(rname_buffer, CFI_REG_NAME_MAXLEN, "r%d", reg) == -1)
+ return "<error>";
+
+ return (const char *)rname_buffer;
+}
+
+/*
+ * Functions and macros to trace CFI registers changes.
+ */
+
+static void trace_cfi_reg(const char *prefix, int reg, const char *fmt,
+ int base_prev, int offset_prev,
+ int base_next, int offset_next)
+{
+ char *rname;
+
+ if (base_prev == base_next && offset_prev == offset_next)
+ return;
+
+ if (prefix)
+ TRACE("%s:", prefix);
+
+ if (base_next == CFI_UNDEFINED) {
+ TRACE("%1$s=<undef> ", cfi_reg_name(reg));
+ } else {
+ rname = strdup(cfi_reg_name(reg));
+ TRACE(fmt, rname, cfi_reg_name(base_next), offset_next);
+ free(rname);
+ }
+}
+
+static void trace_cfi_reg_val(const char *prefix, int reg,
+ int base_prev, int offset_prev,
+ int base_next, int offset_next)
+{
+ trace_cfi_reg(prefix, reg, "%1$s=%2$s%3$+d ",
+ base_prev, offset_prev, base_next, offset_next);
+}
+
+static void trace_cfi_reg_ref(const char *prefix, int reg,
+ int base_prev, int offset_prev,
+ int base_next, int offset_next)
+{
+ trace_cfi_reg(prefix, reg, "%1$s=(%2$s%3$+d) ",
+ base_prev, offset_prev, base_next, offset_next);
+}
+
+#define TRACE_CFI_REG_VAL(reg, prev, next) \
+ trace_cfi_reg_val(NULL, reg, prev.base, prev.offset, \
+ next.base, next.offset)
+
+#define TRACE_CFI_REG_REF(reg, prev, next) \
+ trace_cfi_reg_ref(NULL, reg, prev.base, prev.offset, \
+ next.base, next.offset)
+
+void trace_insn_state(struct instruction *insn, struct insn_state *sprev,
+ struct insn_state *snext)
+{
+ struct cfi_state *cprev, *cnext;
+ int i;
+
+ if (!memcmp(sprev, snext, sizeof(struct insn_state)))
+ return;
+
+ cprev = &sprev->cfi;
+ cnext = &snext->cfi;
+
+ disas_print_insn(stderr, objtool_disas_ctx, insn,
+ trace_depth - 1, "state: ");
+
+ /* print registers changes */
+ TRACE_CFI_REG_VAL(CFI_CFA, cprev->cfa, cnext->cfa);
+ for (i = 0; i < CFI_NUM_REGS; i++) {
+ TRACE_CFI_REG_VAL(i, cprev->vals[i], cnext->vals[i]);
+ TRACE_CFI_REG_REF(i, cprev->regs[i], cnext->regs[i]);
+ }
+
+ /* print attributes changes */
+ TRACE_CFI_ATTR_NUM(stack_size, cprev, cnext, "%d");
+ TRACE_CFI_ATTR_BOOL(drap, cprev, cnext);
+ if (cnext->drap) {
+ trace_cfi_reg_val("drap", cnext->drap_reg,
+ cprev->drap_reg, cprev->drap_offset,
+ cnext->drap_reg, cnext->drap_offset);
+ }
+ TRACE_CFI_ATTR_BOOL(bp_scratch, cprev, cnext);
+ TRACE_CFI_ATTR_NUM(instr, sprev, snext, "%d");
+ TRACE_CFI_ATTR_NUM(uaccess_stack, sprev, snext, "%u");
+
+ TRACE("\n");
+
+ insn->trace = 1;
+}
+
+void trace_alt_begin(struct instruction *orig_insn, struct alternative *alt,
+ char *alt_name)
+{
+ struct instruction *alt_insn;
+ char suffix[2];
+
+ alt_insn = alt->insn;
+
+ if (alt->type == ALT_TYPE_EX_TABLE) {
+ /*
+ * When there is an exception table then the instruction
+ * at the original location is executed but it can cause
+ * an exception. In that case, the execution will be
+ * redirected to the alternative instruction.
+ *
+ * The instruction at the original location can have
+ * instruction alternatives, so we just print the location
+ * of the instruction that can cause the exception and
+ * not the instruction itself.
+ */
+ TRACE_ALT_INFO_NOADDR(orig_insn, "/ ", "%s for instruction at 0x%lx <%s+0x%lx>",
+ alt_name,
+ orig_insn->offset, orig_insn->sym->name,
+ orig_insn->offset - orig_insn->sym->offset);
+ } else {
+ TRACE_ALT_INFO_NOADDR(orig_insn, "/ ", "%s", alt_name);
+ }
+
+ if (alt->type == ALT_TYPE_JUMP_TABLE) {
+ /*
+ * For a jump alternative, if the default instruction is
+ * a NOP then it is replaced with the jmp instruction,
+ * otherwise it is replaced with a NOP instruction.
+ */
+ trace_depth++;
+ if (orig_insn->type == INSN_NOP) {
+ suffix[0] = (orig_insn->len == 5) ? 'q' : '\0';
+ TRACE_ADDR(orig_insn, "jmp%-3s %lx <%s+0x%lx>", suffix,
+ alt_insn->offset, alt_insn->sym->name,
+ alt_insn->offset - alt_insn->sym->offset);
+ } else {
+ TRACE_ADDR(orig_insn, "nop%d", orig_insn->len);
+ trace_depth--;
+ }
+ }
+}
+
+void trace_alt_end(struct instruction *orig_insn, struct alternative *alt,
+ char *alt_name)
+{
+ if (alt->type == ALT_TYPE_JUMP_TABLE && orig_insn->type == INSN_NOP)
+ trace_depth--;
+ TRACE_ALT_INFO_NOADDR(orig_insn, "\\ ", "%s", alt_name);
+}
diff --git a/tools/objtool/weak.c b/tools/objtool/weak.c
index d83f607733b0..d6562f292259 100644
--- a/tools/objtool/weak.c
+++ b/tools/objtool/weak.c
@@ -8,6 +8,8 @@
#include <stdbool.h>
#include <errno.h>
#include <objtool/objtool.h>
+#include <objtool/arch.h>
+#include <objtool/builtin.h>
#define UNSUPPORTED(name) \
({ \
@@ -24,3 +26,8 @@ int __weak orc_create(struct objtool_file *file)
{
UNSUPPORTED("ORC");
}
+
+int __weak cmd_klp(int argc, const char **argv)
+{
+ UNSUPPORTED("klp");
+}
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 5aaf73df6700..b64302a76144 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -48,8 +48,6 @@ libbpf/
libperf/
libsubcmd/
libsymbol/
-libtraceevent/
-libtraceevent_plugins/
fixdep
Documentation/doc.dep
python_ext_build/
diff --git a/tools/perf/Build b/tools/perf/Build
index 06107f1e1d42..b03cc59dabf8 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -73,7 +73,7 @@ endif
$(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
- $(Q)$(call echo-cmd,test)shellcheck -s bash -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+ $(Q)$(call echo-cmd,test)$(SHELLCHECK) "$<" > $@ || (cat $@ && rm $@ && false)
perf-y += $(SHELL_TEST_LOGS)
diff --git a/tools/perf/Documentation/Build.txt b/tools/perf/Documentation/Build.txt
index 83dc87c662b6..57b226e7fc2f 100644
--- a/tools/perf/Documentation/Build.txt
+++ b/tools/perf/Documentation/Build.txt
@@ -99,3 +99,18 @@ configuration paths for cross building:
In this case, the variable PKG_CONFIG_SYSROOT_DIR can be used alongside the
variable PKG_CONFIG_LIBDIR or PKG_CONFIG_PATH to prepend the sysroot path to
the library paths for cross compilation.
+
+5) Build with Clang
+===================
+By default, the makefile uses GCC as compiler. With specifying environment
+variables HOSTCC, CC and CXX, it allows to build perf with Clang.
+
+Using Clang for a native build:
+
+ $ HOSTCC=clang CC=clang CXX=clang++ make -C tools/perf
+
+Specifying ARCH and CROSS_COMPILE for cross compilation:
+
+ $ HOSTCC=clang CC=clang CXX=clang++ \
+ ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- \
+ make -C tools/perf
diff --git a/tools/perf/Documentation/android.txt b/tools/perf/Documentation/android.txt
index 24a59998fc91..3f3cc7ac3d13 100644
--- a/tools/perf/Documentation/android.txt
+++ b/tools/perf/Documentation/android.txt
@@ -1,78 +1,10 @@
How to compile perf for Android
-=========================================
+===============================
-I. Set the Android NDK environment
-------------------------------------------------
+There are two ways to build perf and run it on Android:
-(a). Use the Android NDK
-------------------------------------------------
-1. You need to download and install the Android Native Development Kit (NDK).
-Set the NDK variable to point to the path where you installed the NDK:
- export NDK=/path/to/android-ndk
+- Method 1: Build perf with static linking. See Build.txt, section
+ "4) Cross compilation" for how to build a static perf binary.
-2. Set cross-compiling environment variables for NDK toolchain and sysroot.
-For arm:
- export NDK_TOOLCHAIN=${NDK}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/bin/arm-linux-androideabi-
- export NDK_SYSROOT=${NDK}/platforms/android-24/arch-arm
-For x86:
- export NDK_TOOLCHAIN=${NDK}/toolchains/x86-4.9/prebuilt/linux-x86_64/bin/i686-linux-android-
- export NDK_SYSROOT=${NDK}/platforms/android-24/arch-x86
-
-This method is only tested for Android NDK versions Revision 11b and later.
-perf uses some bionic enhancements that are not included in prior NDK versions.
-You can use method (b) described below instead.
-
-(b). Use the Android source tree
------------------------------------------------
-1. Download the master branch of the Android source tree.
-Set the environment for the target you want using:
- source build/envsetup.sh
- lunch
-
-2. Build your own NDK sysroot to contain latest bionic changes and set the
-NDK sysroot environment variable.
- cd ${ANDROID_BUILD_TOP}/ndk
-For arm:
- ./build/tools/build-ndk-sysroot.sh --abi=arm
- export NDK_SYSROOT=${ANDROID_BUILD_TOP}/ndk/build/platforms/android-3/arch-arm
-For x86:
- ./build/tools/build-ndk-sysroot.sh --abi=x86
- export NDK_SYSROOT=${ANDROID_BUILD_TOP}/ndk/build/platforms/android-3/arch-x86
-
-3. Set the NDK toolchain environment variable.
-For arm:
- export NDK_TOOLCHAIN=${ANDROID_TOOLCHAIN}/arm-linux-androideabi-
-For x86:
- export NDK_TOOLCHAIN=${ANDROID_TOOLCHAIN}/i686-linux-android-
-
-II. Compile perf for Android
-------------------------------------------------
-You need to run make with the NDK toolchain and sysroot defined above:
-For arm:
- make WERROR=0 ARCH=arm CROSS_COMPILE=${NDK_TOOLCHAIN} EXTRA_CFLAGS="-pie --sysroot=${NDK_SYSROOT}"
-For x86:
- make WERROR=0 ARCH=x86 CROSS_COMPILE=${NDK_TOOLCHAIN} EXTRA_CFLAGS="-pie --sysroot=${NDK_SYSROOT}"
-
-III. Install perf
------------------------------------------------
-You need to connect to your Android device/emulator using adb.
-Install perf using:
- adb push perf /data/perf
-
-If you also want to use perf-archive you need busybox tools for Android.
-For installing perf-archive, you first need to replace #!/bin/bash with #!/system/bin/sh:
- sed 's/#!\/bin\/bash/#!\/system\/bin\/sh/g' perf-archive >> /tmp/perf-archive
- chmod +x /tmp/perf-archive
- adb push /tmp/perf-archive /data/perf-archive
-
-IV. Environment settings for running perf
-------------------------------------------------
-Some perf features need environment variables to run properly.
-You need to set these before running perf on the target:
- adb shell
- # PERF_PAGER=cat
-
-IV. Run perf
-------------------------------------------------
-Run perf on your device/emulator to which you previously connected using adb:
- # ./data/perf
+- Method 2: Download the Android NDK and use the bundled Clang to
+ build perf. See Build.txt, section "5) Build with clang" for details.
diff --git a/tools/perf/Documentation/intel-acr.txt b/tools/perf/Documentation/intel-acr.txt
new file mode 100644
index 000000000000..72654fdd9a52
--- /dev/null
+++ b/tools/perf/Documentation/intel-acr.txt
@@ -0,0 +1,53 @@
+Intel Auto Counter Reload Support
+---------------------------------
+Support for Intel Auto Counter Reload in perf tools
+
+Auto counter reload provides a means for software to specify to hardware
+that certain counters, if supported, should be automatically reloaded
+upon overflow of chosen counters. By taking a sample only if the rate of
+one event exceeds some threshold relative to the rate of another event,
+this feature enables software to sample based on the relative rate of
+two or more events. To enable this, the user must provide a sample period
+term and a bitmask ("acr_mask") for each relevant event specifying the
+counters in an event group to reload if the event's specified sample
+period is exceeded.
+
+For example, if the user desires to measure a scenario when IPC > 2,
+the event group might look like the one below:
+
+ perf record -e {cpu_atom/instructions,period=200000,acr_mask=0x2/, \
+ cpu_atom/cycles,period=100000,acr_mask=0x3/} -- true
+
+In this case, if the "instructions" counter exceeds the sample period of
+200000, the second counter, "cycles", will be reset and a sample will be
+taken. If "cycles" is exceeded first, both counters in the group will be
+reset. In this way, samples will only be taken for cases where IPC > 2.
+
+The acr_mask term is a hexadecimal value representing a bitmask of the
+events in the group to be reset when the period is exceeded. In the
+example above, "instructions" is assigned an acr_mask of 0x2, meaning
+only the second event in the group is reloaded and a sample is taken
+for the first event. "cycles" is assigned an acr_mask of 0x3, meaning
+that both event counters will be reset if the sample period is exceeded
+first.
+
+ratio-to-prev Event Term
+------------------------
+To simplify this, an event term "ratio-to-prev" is provided which is used
+alongside the sample period term n or the -c/--count option. This would
+allow users to specify the desired relative rate between events as a
+ratio. Note: Both events compared must belong to the same PMU.
+
+The command above would then become
+
+ perf record -e {cpu_atom/instructions/, \
+ cpu_atom/cycles,period=100000,ratio-to-prev=0.5/} -- true
+
+ratio-to-prev is the ratio of the event using the term relative
+to the previous event in the group, which will always be 1,
+for a 1:0.5 or 2:1 ratio.
+
+To sample for IPC < 2 for example, the events need to be reordered:
+
+ perf record -e {cpu_atom/cycles/, \
+ cpu_atom/instructions,period=200000,ratio-to-prev=2.0/} -- true
diff --git a/tools/perf/Documentation/perf-amd-ibs.txt b/tools/perf/Documentation/perf-amd-ibs.txt
index 2fd31d9d7b71..548549935760 100644
--- a/tools/perf/Documentation/perf-amd-ibs.txt
+++ b/tools/perf/Documentation/perf-amd-ibs.txt
@@ -85,6 +85,15 @@ System-wide profile, uOps event, sampling period: 100000, L3MissOnly (Zen4 onwar
# perf record -e ibs_op/cnt_ctl=1,l3missonly=1/ -c 100000 -a
+System-wide profile, cycles event, sampling period: 100000, LdLat filtering (Zen5
+onward)
+
+ # perf record -e ibs_op/ldlat=128/ -c 100000 -a
+
+ Supported load latency threshold values are 128 to 2048 (both inclusive).
+ Latency value which is a multiple of 128 incurs a little less profiling
+ overhead compared to other values.
+
Per process(upstream v6.2 onward), uOps event, sampling period: 100000
# perf record -e ibs_op/cnt_ctl=1/ -c 100000 -p 1234
@@ -162,23 +171,48 @@ Below is a simple example of the perf mem tool.
# perf mem report
A normal perf mem report output will provide detailed memory access profile.
-However, it can also be aggregated based on output fields. For example:
-
- # perf mem report -F mem,sample,snoop
- Samples: 3M of event 'ibs_op//', Event count (approx.): 23524876
- Memory access Samples Snoop
- N/A 1903343 N/A
- L1 hit 1056754 N/A
- L2 hit 75231 N/A
- L3 hit 9496 HitM
- L3 hit 2270 N/A
- RAM hit 8710 N/A
- Remote node, same socket RAM hit 3241 N/A
- Remote core, same node Any cache hit 1572 HitM
- Remote core, same node Any cache hit 514 N/A
- Remote node, same socket Any cache hit 1216 HitM
- Remote node, same socket Any cache hit 350 N/A
- Uncached hit 18 N/A
+New output fields will show related access info together. For example:
+
+ # perf mem report -F overhead,cache,snoop,comm
+ ...
+ # Samples: 92K of event 'ibs_op//'
+ # Total weight : 531104
+ #
+ # ---------- Cache ----------- --- Snoop ----
+ # Overhead L1 L2 L1-buf Other HitM Other Command
+ # ........ ............................ .............. ..........
+ #
+ 76.07% 5.8% 35.7% 0.0% 34.6% 23.3% 52.8% cc1
+ 5.79% 0.2% 0.0% 0.0% 5.6% 0.1% 5.7% make
+ 5.78% 0.1% 4.4% 0.0% 1.2% 0.5% 5.3% gcc
+ 5.33% 0.3% 3.9% 0.0% 1.1% 0.2% 5.2% as
+ 5.00% 0.1% 3.8% 0.0% 1.0% 0.3% 4.7% sh
+ 1.56% 0.1% 0.1% 0.0% 1.4% 0.6% 0.9% ld
+ 0.28% 0.1% 0.0% 0.0% 0.2% 0.1% 0.2% pkg-config
+ 0.09% 0.0% 0.0% 0.0% 0.1% 0.0% 0.1% git
+ 0.03% 0.0% 0.0% 0.0% 0.0% 0.0% 0.0% rm
+ ...
+
+Also, it can be aggregated based on various memory access info using the
+sort keys. For example:
+
+ # perf mem report -s mem,snoop
+ ...
+ # Samples: 92K of event 'ibs_op//'
+ # Total weight : 531104
+ # Sort order : mem,snoop
+ #
+ # Overhead Samples Memory access Snoop
+ # ........ ............ ....................................... ............
+ #
+ 47.99% 1509 L2 hit N/A
+ 25.08% 338 core, same node Any cache hit HitM
+ 10.24% 54374 N/A N/A
+ 6.77% 35938 L1 hit N/A
+ 6.39% 101 core, same node Any cache hit N/A
+ 3.50% 69 RAM hit N/A
+ 0.03% 158 LFB/MAB hit N/A
+ 0.00% 2 Uncached hit N/A
Please refer to their man page for more detail.
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index 46090c5b42b4..547f1a268018 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -170,7 +170,6 @@ include::itrace.txt[]
--code-with-type::
Show data type info in code annotation (for memory instructions only).
- Currently it only works with --stdio option.
SEE ALSO
diff --git a/tools/perf/Documentation/perf-arm-spe.txt b/tools/perf/Documentation/perf-arm-spe.txt
index 37afade4f1b2..8b02e5b983fa 100644
--- a/tools/perf/Documentation/perf-arm-spe.txt
+++ b/tools/perf/Documentation/perf-arm-spe.txt
@@ -141,27 +141,65 @@ Config parameters
These are placed between the // in the event and comma separated. For example '-e
arm_spe/load_filter=1,min_latency=10/'
- branch_filter=1 - collect branches only (PMSFCR.B)
- event_filter=<mask> - filter on specific events (PMSEVFR) - see bitfield description below
+ event_filter=<mask> - logical AND filter on specific events (PMSEVFR) - see bitfield description below
+ inv_event_filter=<mask> - logical OR to filter out specific events (PMSNEVFR, FEAT_SPEv1p2) - see bitfield description below
jitter=1 - use jitter to avoid resonance when sampling (PMSIRR.RND)
- load_filter=1 - collect loads only (PMSFCR.LD)
min_latency=<n> - collect only samples with this latency or higher* (PMSLATFR)
pa_enable=1 - collect physical address (as well as VA) of loads/stores (PMSCR.PA) - requires privilege
pct_enable=1 - collect physical timestamp instead of virtual timestamp (PMSCR.PCT) - requires privilege
- store_filter=1 - collect stores only (PMSFCR.ST)
ts_enable=1 - enable timestamping with value of generic timer (PMSCR.TS)
discard=1 - enable SPE PMU events but don't collect sample data - see 'Discard mode' (PMBLIMITR.FM = DISCARD)
+ inv_data_src_filter=<mask> - mask to filter from 0-63 possible data sources (PMSDSFR, FEAT_SPE_FDS) - See 'Data source filtering'
+++*+++ Latency is the total latency from the point at which sampling started on that instruction, rather
than only the execution latency.
-Only some events can be filtered on; these include:
-
- bit 1 - instruction retired (i.e. omit speculative instructions)
+Only some events can be filtered on using 'event_filter' bits. The overall
+filter is the logical AND of these bits, for example if bits 3 and 5 are set
+only samples that have both 'L1D cache refill' AND 'TLB walk' are recorded. When
+FEAT_SPEv1p2 is implemented 'inv_event_filter' can also be used to exclude
+events that have any (OR) of the filter's bits set. For example setting bits 3
+and 5 in 'inv_event_filter' will exclude any events that are either L1D cache
+refill OR TLB walk. If the same bit is set in both filters it's UNPREDICTABLE
+whether the sample is included or excluded. Filter bits for both event_filter
+and inv_event_filter are:
+
+ bit 1 - Instruction retired (i.e. omit speculative instructions)
+ bit 2 - L1D access (FEAT_SPEv1p4)
bit 3 - L1D refill
+ bit 4 - TLB access (FEAT_SPEv1p4)
bit 5 - TLB refill
- bit 7 - mispredict
- bit 11 - misaligned access
+ bit 6 - Not taken event (FEAT_SPEv1p2)
+ bit 7 - Mispredict
+ bit 8 - Last level cache access (FEAT_SPEv1p4)
+ bit 9 - Last level cache miss (FEAT_SPEv1p4)
+ bit 10 - Remote access (FEAT_SPEv1p4)
+ bit 11 - Misaligned access (FEAT_SPEv1p1)
+ bit 12-15 - IMPLEMENTATION DEFINED events (when implemented)
+ bit 16 - Transaction (FEAT_TME)
+ bit 17 - Partial or empty SME or SVE predicate (FEAT_SPEv1p1)
+ bit 18 - Empty SME or SVE predicate (FEAT_SPEv1p1)
+ bit 19 - L2D access (FEAT_SPEv1p4)
+ bit 20 - L2D miss (FEAT_SPEv1p4)
+ bit 21 - Cache data modified (FEAT_SPEv1p4)
+ bit 22 - Recently fetched (FEAT_SPEv1p4)
+ bit 23 - Data snooped (FEAT_SPEv1p4)
+ bit 24 - Streaming SVE mode event (when FEAT_SPE_SME is implemented), or
+ IMPLEMENTATION DEFINED event 24 (when implemented, only versions
+ less than FEAT_SPEv1p4)
+ bit 25 - SMCU or external coprocessor operation event when FEAT_SPE_SME is
+ implemented, or IMPLEMENTATION DEFINED event 25 (when implemented,
+ only versions less than FEAT_SPEv1p4)
+ bit 26-31 - IMPLEMENTATION DEFINED events (only versions less than FEAT_SPEv1p4)
+ bit 48-63 - IMPLEMENTATION DEFINED events (when implemented)
+
+For IMPLEMENTATION DEFINED bits, refer to the CPU TRM if these bits are
+implemented.
+
+The driver will reject events if requested filter bits require unimplemented SPE
+versions, but will not reject filter bits for unimplemented IMPDEF bits or when
+their related feature is not present (e.g. SME). For example, if FEAT_SPEv1p2 is
+not implemented, filtering on "Not taken event" (bit 6) will be rejected.
So to sample just retired instructions:
@@ -171,6 +209,31 @@ or just mispredicted branches:
perf record -e arm_spe/event_filter=0x80/ -- ./mybench
+When set, the following filters can be used to select samples that match any of
+the operation types (OR filtering). If only one is set then only samples of that
+type are collected:
+
+ branch_filter=1 - Collect branches (PMSFCR.B)
+ load_filter=1 - Collect loads (PMSFCR.LD)
+ store_filter=1 - Collect stores (PMSFCR.ST)
+
+When extended filtering is supported (FEAT_SPE_EFT), SIMD and float
+pointer operations can also be selected:
+
+ simd_filter=1 - Collect SIMD loads, stores and operations (PMSFCR.SIMD)
+ float_filter=1 - Collect floating point loads, stores and operations (PMSFCR.FP)
+
+When extended filtering is supported (FEAT_SPE_EFT), operation type filters can
+be changed to AND using _mask fields. For example samples could be selected if
+they are store AND SIMD by setting 'store_filter=1,simd_filter=1,
+store_filter_mask=1,simd_filter_mask=1'. The new masks are as follows:
+
+ branch_filter_mask=1 - Change branch filter behavior from OR to AND (PMSFCR.Bm)
+ load_filter_mask=1 - Change load filter behavior from OR to AND (PMSFCR.LDm)
+ store_filter_mask=1 - Change store filter behavior from OR to AND (PMSFCR.STm)
+ simd_filter_mask=1 - Change SIMD filter behavior from OR to AND (PMSFCR.SIMDm)
+ float_filter_mask=1 - Change floating point filter behavior from OR to AND (PMSFCR.FPm)
+
Viewing the data
~~~~~~~~~~~~~~~~~
@@ -191,19 +254,29 @@ groups:
36 branch
0 remote-access
900 memory
+ 1800 instructions
The arm_spe// and dummy:u events are implementation details and are expected to be empty.
-To get a full list of unique samples that are not sorted into groups, set the itrace option to
-generate 'instruction' samples. The period option is also taken into account, so set it to 1
-instruction unless you want to further downsample the already sampled SPE data:
+The instructions group contains the full list of unique samples that are not
+sorted into other groups. To generate only this group use --itrace=i1i.
- perf report --itrace=i1i
+1i (1 instruction interval) signifies no further downsampling. Rather than an
+instruction interval, this generates a sample every n SPE samples. For example
+to generate the default set of events for every 100 SPE samples:
+
+ perf report --itrace==bxofmtMai100i
+
+Other period types, for example nanoseconds (ns) are not currently supported.
Memory access details are also stored on the samples and this can be viewed with:
perf report --mem-mode
+The latency value from the SPE sample is stored in the 'weight' field of the
+Perf samples and can be displayed in Perf script and report outputs by enabling
+its display from the command line.
+
Common errors
~~~~~~~~~~~~~
@@ -247,6 +320,25 @@ to minimize output. Then run perf stat:
perf record -e arm_spe/discard/ -a -N -B --no-bpf-event -o - > /dev/null &
perf stat -e SAMPLE_FEED_LD
+Data source filtering
+~~~~~~~~~~~~~~~~~~~~~
+
+When FEAT_SPE_FDS is present, 'inv_data_src_filter' can be used as a mask to
+filter on a subset (0 - 63) of possible data source IDs. The full range of data
+sources is 0 - 65535 although these are unlikely to be used in practice. Data
+sources are IMPDEF so refer to the TRM for the mappings. Each bit N of the
+filter maps to data source N. The filter is an OR of all the bits, and the value
+provided inv_data_src_filter is inverted before writing to PMSDSFR_EL1 so that
+set bits exclude that data source and cleared bits include that data source.
+Therefore the default value of 0 is equivalent to no filtering (all data sources
+included).
+
+For example, to include only data sources 0 and 3, clear bits 0 and 3
+(0xFFFFFFFFFFFFFFF6)
+
+When 'inv_data_src_filter' is set to 0xFFFFFFFFFFFFFFFF, any samples with any
+data source set are excluded.
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index 8331bd28b10e..1160224cb718 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -177,11 +177,21 @@ Suite for evaluating performance of simple memory copy in various ways.
Options of *memcpy*
^^^^^^^^^^^^^^^^^^^
--l::
+-s::
--size::
Specify size of memory to copy (default: 1MB).
Available units are B, KB, MB, GB and TB (case insensitive).
+-p::
+--page::
+Specify page-size for mapping memory buffers (default: 4KB).
+Available values are 4KB, 2MB, 1GB (case insensitive).
+
+-k::
+--chunk::
+Specify the chunk-size for each invocation. (default: 0, or full-extent)
+Available units are B, KB, MB, GB and TB (case insensitive).
+
-f::
--function::
Specify function to copy (default: default).
@@ -201,11 +211,21 @@ Suite for evaluating performance of simple memory set in various ways.
Options of *memset*
^^^^^^^^^^^^^^^^^^^
--l::
+-s::
--size::
Specify size of memory to set (default: 1MB).
Available units are B, KB, MB, GB and TB (case insensitive).
+-p::
+--page::
+Specify page-size for mapping memory buffers (default: 4KB).
+Available values are 4KB, 2MB, 1GB (case insensitive).
+
+-k::
+--chunk::
+Specify the chunk-size for each invocation. (default: 0, or full-extent)
+Available units are B, KB, MB, GB and TB (case insensitive).
+
-f::
--function::
Specify function to set (default: default).
@@ -220,6 +240,40 @@ Repeat memset invocation this number of times.
--cycles::
Use perf's cpu-cycles event instead of gettimeofday syscall.
+*mmap*::
+Suite for evaluating memory subsystem performance for mmap()'d memory.
+
+Options of *mmap*
+^^^^^^^^^^^^^^^^^
+-s::
+--size::
+Specify size of memory to set (default: 1MB).
+Available units are B, KB, MB, GB and TB (case insensitive).
+
+-p::
+--page::
+Specify page-size for mapping memory buffers (default: 4KB).
+Available values are 4KB, 2MB, 1GB (case insensitive).
+
+-r::
+--randomize::
+Specify seed to randomize page access offset (default: 0, or not randomized).
+
+-f::
+--function::
+Specify function to set (default: all).
+Available functions are 'demand' and 'populate', with the first
+demand faulting pages in the region and the second using an eager
+mapping.
+
+-l::
+--nr_loops::
+Repeat mmap() invocation this number of times.
+
+-c::
+--cycles::
+Use perf's cpu-cycles event instead of gettimeofday syscall.
+
SUITES FOR 'numa'
~~~~~~~~~~~~~~~~~
*mem*::
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index 856f0dfb8e5a..40b0f71a2c44 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -54,8 +54,15 @@ RECORD OPTIONS
-l::
--ldlat::
- Configure mem-loads latency. Supported on Intel and Arm64 processors
- only. Ignored on other archs.
+ Configure mem-loads latency. Supported on Intel, Arm64 and some AMD
+ processors. Ignored on other archs.
+
+ On supported AMD processors:
+ - /sys/bus/event_source/devices/ibs_op/caps/ldlat file contains '1'.
+ - Supported latency values are 128 to 2048 (both inclusive).
+ - Latency value which is a multiple of 128 incurs a little less profiling
+ overhead compared to other values.
+ - Load latency filtering is disabled by default.
-k::
--all-kernel::
@@ -136,6 +143,13 @@ REPORT OPTIONS
feature, which causes cacheline sharing to behave like the cacheline
size is doubled.
+-M::
+--disassembler-style=::
+ Set disassembler style for objdump.
+
+--objdump=<path>::
+ Path to objdump binary.
+
C2C RECORD
----------
The perf c2c record command setup options related to HITM cacheline analysis
diff --git a/tools/perf/Documentation/perf-check.txt b/tools/perf/Documentation/perf-check.txt
index a764a4629220..09e1d35677f5 100644
--- a/tools/perf/Documentation/perf-check.txt
+++ b/tools/perf/Documentation/perf-check.txt
@@ -50,12 +50,12 @@ feature::
dwarf / HAVE_LIBDW_SUPPORT
dwarf_getlocations / HAVE_LIBDW_SUPPORT
dwarf-unwind / HAVE_DWARF_UNWIND_SUPPORT
- auxtrace / HAVE_AUXTRACE_SUPPORT
libbfd / HAVE_LIBBFD_SUPPORT
+ libbpf-strings / HAVE_LIBBPF_STRINGS_SUPPORT
libcapstone / HAVE_LIBCAPSTONE_SUPPORT
- libcrypto / HAVE_LIBCRYPTO_SUPPORT
libdw-dwarf-unwind / HAVE_LIBDW_SUPPORT
libelf / HAVE_LIBELF_SUPPORT
+ libLLVM / HAVE_LIBLLVM_SUPPORT
libnuma / HAVE_LIBNUMA_SUPPORT
libopencsd / HAVE_CSTRACE_SUPPORT
libperl / HAVE_LIBPERL_SUPPORT
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 36ebebc875ea..642d1c490d9e 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -452,6 +452,9 @@ call-graph.*::
kernel space is controlled not by this option but by the
kernel config (CONFIG_UNWINDER_*).
+ The 'defer' mode can be used with 'fp' mode to enable deferred
+ user callchains (like 'fp,defer').
+
call-graph.dump-size::
The size of stack to dump in order to do post-unwinding. Default is 8192 (byte).
When using dwarf into record-mode, the default size will be used if omitted.
@@ -708,6 +711,10 @@ intel-pt.*::
the maximum is exceeded there will be a "Never-ending loop"
error. The default is 100000.
+ intel-pt.all-switch-events::
+ If the user has permission to do so, always record all context
+ switch events on all CPUs.
+
auxtrace.*::
auxtrace.dumpdir::
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index f3067a4af294..58efab72d2e5 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -285,7 +285,7 @@ If specified the 'Weighted diff' column is displayed with value 'd' computed as:
- period being the hist entry period value
- - WEIGHT-A/WEIGHT-B being user supplied weights in the the '-c' option
+ - WEIGHT-A/WEIGHT-B being user supplied weights in the '-c' option
behind ':' separator like '-c wdiff:1,2'.
- WEIGHT-A being the weight of the data file
- WEIGHT-B being the weight of the baseline data file
diff --git a/tools/perf/Documentation/perf-ftrace.txt b/tools/perf/Documentation/perf-ftrace.txt
index b77f58c4d2fd..3f3808e513fe 100644
--- a/tools/perf/Documentation/perf-ftrace.txt
+++ b/tools/perf/Documentation/perf-ftrace.txt
@@ -123,6 +123,10 @@ OPTIONS for 'perf ftrace trace'
--graph-opts::
List of options allowed to set:
+ - args - Show function arguments.
+ - retval - Show function return value.
+ - retval-hex - Show function return value in hexadecimal format.
+ - retaddr - Show function return address.
- nosleep-time - Measure on-CPU time only for function_graph tracer.
- noirqs - Ignore functions that happen inside interrupt.
- verbose - Show process names, PIDs, timestamps, etc.
@@ -139,6 +143,12 @@ OPTIONS for 'perf ftrace latency'
Set the function name to get the histogram. Unlike perf ftrace trace,
it only allows single function to calculate the histogram.
+-e::
+--events=::
+ Set the pair of events to get the histogram. The histogram is calculated
+ by the time difference between the two events from the same thread. This
+ requires -b/--use-bpf option.
+
-b::
--use-bpf::
Use BPF to measure function latency instead of using the ftrace (it
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 8914f12d2b85..a4378a0cd914 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -73,6 +73,7 @@ counted. The following modifiers exist:
e - group or event are exclusive and do not share the PMU
b - use BPF aggregration (see perf stat --bpf-counters)
R - retire latency value of the event
+ X - don't regroup the event to match PMUs
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
@@ -278,17 +279,33 @@ also be supplied. For example:
perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
-EVENT QUALIFIERS:
+EVENT QUALIFIERS
+----------------
It is also possible to add extra qualifiers to an event:
percore:
-Sums up the event counts for all hardware threads in a core, e.g.:
+ Sums up the event counts for all hardware threads in a core, e.g.:
+ perf stat -e cpu/event=0,umask=0x3,percore=1/
+cpu:
- perf stat -e cpu/event=0,umask=0x3,percore=1/
+ Specifies a CPU or a range of CPUs to open the event upon. It may
+ also reference a PMU to copy the CPU mask from. The value may be
+ repeated to specify opening the event on multiple CPUs.
+ Example 1: to open the instructions event on CPUs 0 and 2, the
+ cycles event on CPUs 1 and 2:
+ perf stat -e instructions/cpu=0,cpu=2/,cycles/cpu=1-2/ -a sleep 1
+
+ Example 2: to open the data_read uncore event on CPU 0 and the
+ data_write uncore event on CPU 1:
+ perf stat -e data_read/cpu=0/,data_write/cpu=1/ -a sleep 1
+
+ Example 3: to open the software msr/tsc/ event only on the CPUs
+ matching those from the cpu_core PMU:
+ perf stat -e msr/tsc,cpu=cpu_core/ -a sleep 1
EVENT GROUPS
------------
@@ -376,6 +393,8 @@ Support raw format:
. '--raw-dump [hw|sw|cache|tracepoint|pmu|event_glob]', shows the raw-dump of
a certain kind of events.
+include::intel-acr.txt[]
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-top[1],
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 859dc11a7372..c17b3e318169 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -216,6 +216,21 @@ CONTENTION OPTIONS
--cgroup-filter=<value>::
Show lock contention only in the given cgroups (comma separated list).
+-J::
+--inject-delay=<time@function>::
+ Add delays to the given lock. It's added to the contention-end part so
+ that the (new) owner of the lock will be delayed. But by slowing down
+ the owner, the waiters will also be delayed as well. This is working
+ only with -b/--use-bpf.
+
+ The 'time' is specified in nsec but it can have a unit suffix. Available
+ units are "ms", "us" and "ns". Currently it accepts up to 10ms of delays
+ for safety reasons.
+
+ Note that it will busy-wait after it gets the lock. Delaying locks can
+ have significant consequences including potential kernel crashes. Please
+ use it at your own risk.
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 8a1bd9ff0f86..4d164836d094 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -28,6 +28,8 @@ and kernel support is required. See linkperf:perf-arm-spe[1] for a setup guide.
Due to the statistical nature of SPE sampling, not every memory operation will
be sampled.
+On AMD this use IBS Op PMU to sample load-store operations.
+
COMMON OPTIONS
--------------
-f::
@@ -67,8 +69,15 @@ RECORD OPTIONS
Configure all used events to run in user space.
--ldlat <n>::
- Specify desired latency for loads event. Supported on Intel and Arm64
- processors only. Ignored on other archs.
+ Specify desired latency for loads event. Supported on Intel, Arm64 and
+ some AMD processors. Ignored on other archs.
+
+ On supported AMD processors:
+ - /sys/bus/event_source/devices/ibs_op/caps/ldlat file contains '1'.
+ - Supported latency values are 128 to 2048 (both inclusive).
+ - Latency value which is a multiple of 128 incurs a little less profiling
+ overhead compared to other values.
+ - Load latency filtering is disabled by default.
REPORT OPTIONS
--------------
@@ -110,6 +119,22 @@ REPORT OPTIONS
And the default sort keys are changed to local_weight, mem, sym, dso,
symbol_daddr, dso_daddr, snoop, tlb, locked, blocked, local_ins_lat.
+-F::
+--fields=::
+ Specify output field - multiple keys can be specified in CSV format.
+ Please see linkperf:perf-report[1] for details.
+
+ In addition to the default fields, 'perf mem report' will provide the
+ following fields to break down sample periods.
+
+ - op: operation in the sample instruction (load, store, prefetch, ...)
+ - cache: location in CPU cache (L1, L2, ...) where the sample hit
+ - mem: location in memory or other places the sample hit
+ - dtlb: location in Data TLB (L1, L2) where the sample hit
+ - snoop: snoop result for the sampled data access
+
+ Please take a look at the OUTPUT FIELD SELECTION section for caveats.
+
-T::
--type-profile::
Show data-type profile result instead of code symbols. This requires
@@ -128,6 +153,59 @@ REPORT OPTIONS
In addition, for report all perf report options are valid, and for record
all perf record options.
+OVERHEAD CALCULATION
+--------------------
+Unlike linkperf:perf-report[1], which calculates overhead from the actual
+sample period, perf-mem overhead is calculated using sample weight. E.g.
+there are two samples in perf.data file, both with the same sample period,
+but one sample with weight 180 and the other with weight 20:
+
+ $ perf script -F period,data_src,weight,ip,sym
+ 100000 629080842 |OP LOAD|LVL L3 hit|... 20 7e69b93ca524 strcmp
+ 100000 1a29081042 |OP LOAD|LVL RAM hit|... 180 ffffffff82429168 memcpy
+
+ $ perf report -F overhead,symbol
+ 50% [.] strcmp
+ 50% [k] memcpy
+
+ $ perf mem report -F overhead,symbol
+ 90% [k] memcpy
+ 10% [.] strcmp
+
+OUTPUT FIELD SELECTION
+----------------------
+"perf mem report" adds a number of new output fields specific to data source
+information in the sample. Some of them have the same name with the existing
+sort keys ("mem" and "snoop"). So unlike other fields and sort keys, they'll
+behave differently when it's used by -F/--fields or -s/--sort.
+
+Using those two as output fields will aggregate samples altogether and show
+breakdown.
+
+ $ perf mem report -F mem,snoop
+ ...
+ # ------ Memory ------- --- Snoop ----
+ # RAM Uncach Other HitM Other
+ # ..................... ..............
+ #
+ 3.5% 0.0% 96.5% 25.1% 74.9%
+
+But using the same name for sort keys will aggregate samples for each type
+separately.
+
+ $ perf mem report -s mem,snoop
+ # Overhead Samples Memory access Snoop
+ # ........ ............ ....................................... ............
+ #
+ 47.99% 1509 L2 hit N/A
+ 25.08% 338 core, same node Any cache hit HitM
+ 10.24% 54374 N/A N/A
+ 6.77% 35938 L1 hit N/A
+ 6.39% 101 core, same node Any cache hit N/A
+ 3.50% 69 RAM hit N/A
+ 0.03% 158 LFB/MAB hit N/A
+ 0.00% 2 Uncached hit N/A
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-arm-spe[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index c7fc1ba265e2..e8b9aadbbfa5 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -325,6 +325,10 @@ OPTIONS
by default. User can change the number by passing it after comma
like "--call-graph fp,32".
+ Also "defer" can be used with "fp" (like "--call-graph fp,defer") to
+ enable deferred user callchain which will collect user-space callchains
+ when the thread returns to the user space.
+
-q::
--quiet::
Don't print any warnings or messages, useful for scripting.
@@ -340,7 +344,7 @@ OPTIONS
-d::
--data::
- Record the sample virtual addresses.
+ Record the sample virtual addresses. Implies --sample-mem-info.
--phys-data::
Record the sample physical addresses.
@@ -368,6 +372,11 @@ OPTIONS
the sample_type member of the struct perf_event_attr argument to the
perf_event_open system call.
+--sample-mem-info::
+ Record the sample data source information for memory operations.
+ It requires hardware supports and may work on specific events only.
+ Please consider using 'perf mem record' instead if you're not sure.
+
-n::
--no-samples::
Don't sample.
@@ -558,7 +567,9 @@ Specify vmlinux path which has debuginfo.
Record build-id of all DSOs regardless whether it's actually hit or not.
--buildid-mmap::
-Record build ids in mmap2 events, disables build id cache (implies --no-buildid).
+Legacy record build-id in map events option which is now the
+default. Behaves indentically to --no-buildid. Disable with
+--no-buildid-mmap.
--aio[=n]::
Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default: 1, max: 4).
@@ -837,6 +848,15 @@ filtered through the mask provided by -C option.
only, as of now. So the applications built without the frame
pointer might see bogus addresses.
+ off-cpu profiling consists two types of samples: direct samples, which
+ share the same behavior as regular samples, and the accumulated
+ samples, stored in BPF stack trace map, presented after all the regular
+ samples.
+
+--off-cpu-thresh::
+ Once a task's off-cpu time reaches this threshold (in milliseconds), it
+ generates a direct off-cpu sample. The default is 500ms.
+
--setup-filter=<action>::
Prepare BPF filter to be used by regular users. The action should be
either "pin" or "unpin". The filter can be used after it's pinned.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 3376c4710575..acef3ff4178e 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -94,6 +94,7 @@ OPTIONS
- comm: command (name) of the task which can be read via /proc/<pid>/comm
- pid: command and tid of the task
+ - tgid: command and tgid of the task
- dso: name of library or module executed at the time of sample
- dso_size: size of library or module executed at the time of sample
- symbol: name of function executed at the time of sample
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 28bec7e78bc8..03d112960632 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -527,6 +527,11 @@ include::itrace.txt[]
The known limitations include exception handing such as
setjmp/longjmp will have calls/returns not match.
+--merge-callchains::
+ Enable merging deferred user callchains if available. This is the
+ default behavior. If you want to see separate CALLCHAIN_DEFERRED
+ records for some reason, use --no-merge-callchains explicitly.
+
:GMEXAMPLECMD: script
:GMEXAMPLESUBCMD:
include::guest-files.txt[]
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 2bc063672486..1a766d4a2233 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -506,6 +506,13 @@ this option is not set. The TPEBS hardware feature starts from Intel Granite
Rapids microarchitecture. This option only exists in X86_64 and is meaningful on
Intel platforms with TPEBS feature.
+--tpebs-mode=[mean|min|max|last]::
+Set how retirement latency events have their sample times
+combined. The default "mean" gives the average of retirement
+latency. "min" or "max" give the smallest or largest retirment latency
+times respectively. "last" uses the last retirment latency sample's
+time.
+
--td-level::
Print the top-down statistics that equal the input level. It allows
users to print the interested top-down metrics level instead of the
@@ -633,18 +640,20 @@ JSON FORMAT
With -j, perf stat is able to print out a JSON format output
that can be used for parsing.
-- timestamp : optional usec time stamp in fractions of second (with -I)
+- interval : optional timestamp in fractions of second (with -I)
- optional aggregate options:
- core : core identifier (with --per-core)
- die : die identifier (with --per-die)
- socket : socket identifier (with --per-socket)
- node : node identifier (with --per-node)
- thread : thread identifier (with --per-thread)
+- counters : number of aggregated PMU counters
- counter-value : counter value
- unit : unit of the counter value or empty
- event : event name
- variance : optional variance if multiple values are collected (with -r)
-- runtime : run time of counter
+- event-runtime : run time of the event
+- pcnt-running : percentage of time the event was running
- metric-value : optional metric value
- metric-unit : optional unit of metric
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index ef0c7565bd5c..ef2281c56743 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -94,6 +94,9 @@ RECORD OPTIONS
-g::
--callchain::
Do call-graph (stack chain/backtrace) recording
+-o::
+--output=::
+ Select the output file (default: perf.data)
EXAMPLES
--------
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 887dc37773d0..892c82a9bf40 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -152,7 +152,8 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
--summary-mode=mode::
To be used with -s or -S, to select how to show summary. By default it'll
- show the syscall summary by thread. Possible values are: thread, total.
+ show the syscall summary by thread. Possible values are: thread, total,
+ cgroup.
--tool_stats::
Show tool stats such as number of times fd->pathname was discovered thru
@@ -237,20 +238,22 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
the same beautifiers used in the strace-like enter+exit lines to augment the
tracepoint arguments.
---map-dump::
- Dump BPF maps setup by events passed via -e, for instance the augmented_raw_syscalls
- living in tools/perf/examples/bpf/augmented_raw_syscalls.c. For now this
- dumps just boolean map values and integer keys, in time this will print in hex
- by default and use BTF when available, as well as use functions to do pretty
- printing using the existing 'perf trace' syscall arg beautifiers to map integer
- arguments to strings (pid to comm, syscall id to syscall name, etc).
-
--force-btf::
Use btf_dump to pretty print syscall argument data, instead of using hand-crafted pretty
printers. This option is intended for testing BTF integration in perf trace. btf_dump-based
pretty-printing serves as a fallback to hand-crafted pretty printers, as the latter can
better pretty-print integer flags and struct pointers.
+--bpf-summary::
+ Collect system call statistics in BPF. This is only for live mode and
+ works well with -s/--summary option where no argument information is
+ required.
+
+--max-summary=N::
+ Maximum number of lines in the summary mode. Note that this applies to
+ each entry (thread or cgroup).
+
+
PAGEFAULTS
----------
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index 010a4edcd384..c9d4dec65344 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -348,6 +348,16 @@ to special needs.
struct perf_bpil, which contains detailed information about
a BPF program, including type, id, tag, jited/xlated instructions, etc.
+The format of data in HEADER_BPF_PROG_INFO is as follows:
+ u32 count
+
+ struct perf_bpil {
+ u32 info_len; /* size of struct bpf_prog_info, when the tool is compiled */
+ u32 data_len; /* total bytes allocated for data, round up to 8 bytes */
+ u64 arrays; /* which arrays are included in data */
+ struct bpf_prog_info info;
+ u8 data[];
+ }[count];
HEADER_BPF_BTF = 26,
@@ -370,7 +380,7 @@ struct {
u32 mmap_len;
};
-Indicates that trace contains records of PERF_RECORD_COMPRESSED type
+Indicates that trace contains records of PERF_RECORD_COMPRESSED2 type
that have perf_events records in compressed form.
HEADER_CPU_PMU_CAPS = 28,
@@ -602,7 +612,14 @@ struct auxtrace_error_event {
Describes a header feature. These are records used in pipe-mode that
contain information that otherwise would be in perf.data file's header.
- PERF_RECORD_COMPRESSED = 81,
+ PERF_RECORD_COMPRESSED = 81, /* deprecated */
+
+The header is followed by compressed data frame that can be decompressed
+into array of perf trace records. The size of the entire compressed event
+record including the header is limited by the max value of header.size.
+
+It is deprecated and new files should use PERF_RECORD_COMPRESSED2 to gurantee
+8-byte alignment.
struct compressed_event {
struct perf_event_header header;
@@ -618,10 +635,17 @@ This is used, for instance, to 'perf inject' events after init and before
regular events, those emitted by the kernel, to support combining guest and
host records.
+ PERF_RECORD_COMPRESSED2 = 83,
-The header is followed by compressed data frame that can be decompressed
-into array of perf trace records. The size of the entire compressed event
-record including the header is limited by the max value of header.size.
+8-byte aligned version of `PERF_RECORD_COMPRESSED`. `header.size` indicates the
+total record size, including padding for 8-byte alignment, and `data_size`
+specifies the actual size of the compressed data.
+
+struct perf_record_compressed2 {
+ struct perf_event_header header;
+ __u64 data_size;
+ char data[];
+};
Event types
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 364b55b00b48..34af57b8ec2a 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,8 +1,10 @@
COPYING
LICENSES/preferred/GPL-2.0
arch/arm64/tools/gen-sysreg.awk
+arch/arm64/tools/syscall_64.tbl
arch/arm64/tools/sysreg
arch/*/include/uapi/asm/bpf_perf_event.h
+include/uapi/asm-generic/Kbuild
tools/perf
tools/arch
tools/scripts
@@ -25,6 +27,10 @@ tools/lib/str_error_r.c
tools/lib/vsprintf.c
tools/lib/zalloc.c
scripts/bpf_doc.py
+scripts/Kbuild.include
+scripts/Makefile.asm-headers
+scripts/syscall.tbl
+scripts/syscallhdr.sh
tools/bpf/bpftool
kernel/bpf/disasm.c
kernel/bpf/disasm.h
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b7769a22fe1a..bd9f4804d56b 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -19,8 +19,43 @@ detected_var = $(shell echo "$(1)=$($(1))" >> $(OUTPUT).config-detected)
CFLAGS := $(EXTRA_CFLAGS) $(filter-out -Wnested-externs,$(EXTRA_WARNINGS))
HOSTCFLAGS := $(filter-out -Wnested-externs,$(EXTRA_WARNINGS))
-# Enabled Wthread-safety analysis for clang builds.
+# This is required because the kernel is built with this and some of the code
+# borrowed from kernel headers depends on it, e.g. put_unaligned_*().
+CFLAGS += -fno-strict-aliasing
+
+# Set target flag and options when using clang as compiler.
ifeq ($(CC_NO_CLANG), 0)
+ CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi
+ CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu
+ CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu
+ CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu
+ CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu
+ CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
+ CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
+ CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
+ CLANG_TARGET_FLAGS_x86_64 := x86_64-linux-gnu
+
+ # Default to host architecture if ARCH is not explicitly given.
+ ifeq ($(ARCH), $(HOSTARCH))
+ CLANG_TARGET_FLAGS := $(shell $(CLANG) -print-target-triple)
+ else
+ CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
+ endif
+
+ ifeq ($(CROSS_COMPILE),)
+ ifeq ($(CLANG_TARGET_FLAGS),)
+ $(error Specify CROSS_COMPILE or add CLANG_TARGET_FLAGS for $(ARCH))
+ else
+ CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS)
+ endif # CLANG_TARGET_FLAGS
+ else
+ CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
+ endif # CROSS_COMPILE
+
+ CC := $(CLANG) $(CLANG_FLAGS) -fintegrated-as
+ CXX := $(CXX) $(CLANG_FLAGS) -fintegrated-as
+
+ # Enabled Wthread-safety analysis for clang builds.
CFLAGS += -Wthread-safety
endif
@@ -130,8 +165,6 @@ ifndef NO_LIBUNWIND
FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
endif
-FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
-
ifdef CSINCLUDES
LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
endif
@@ -321,9 +354,6 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
-FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
-FEATURE_CHECK_LDFLAGS-disassembler-init-styled = -lbfd -lopcodes -ldl
-
CORE_CFLAGS += -fno-omit-frame-pointer
CORE_CFLAGS += -Wall
CORE_CFLAGS += -Wextra
@@ -415,10 +445,6 @@ ifeq ($(feature-eventfd), 1)
CFLAGS += -DHAVE_EVENTFD_SUPPORT
endif
-ifeq ($(feature-get_current_dir_name), 1)
- CFLAGS += -DHAVE_GET_CURRENT_DIR_NAME
-endif
-
ifeq ($(feature-gettid), 1)
CFLAGS += -DHAVE_GETTID
endif
@@ -560,6 +586,8 @@ ifndef NO_LIBELF
ifeq ($(feature-libdebuginfod), 1)
CFLAGS += -DHAVE_DEBUGINFOD_SUPPORT
EXTLIBS += -ldebuginfod
+ else
+ $(warning No elfutils/debuginfod.h found, no debuginfo server support, please install libdebuginfod-dev/elfutils-debuginfod-client-devel or equivalent)
endif
endif
@@ -593,6 +621,7 @@ ifndef NO_LIBELF
LIBBPF_STATIC := 1
$(call detected,CONFIG_LIBBPF)
CFLAGS += -DHAVE_LIBBPF_SUPPORT
+ LIBBPF_INCLUDE = $(LIBBPF_DIR)/..
endif
endif
endif
@@ -625,6 +654,8 @@ endif
ifndef NO_LIBUNWIND
have_libunwind :=
+ $(call feature_check,libunwind)
+
$(call feature_check,libunwind-x86)
ifeq ($(feature-libunwind-x86), 1)
$(call detected,CONFIG_LIBUNWIND_X86)
@@ -649,7 +680,7 @@ ifndef NO_LIBUNWIND
endif
ifneq ($(feature-libunwind), 1)
- $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR)
+ $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR and set LIBUNWIND=1 in the make command line as it is opt-in now)
NO_LOCAL_LIBUNWIND := 1
else
have_libunwind := 1
@@ -768,28 +799,12 @@ ifneq ($(NO_LIBTRACEEVENT),1)
$(call detected,CONFIG_TRACE)
endif
-ifndef NO_LIBCRYPTO
- ifneq ($(feature-libcrypto), 1)
- $(warning No libcrypto.h found, disables jitted code injection, please install openssl-devel or libssl-dev)
- NO_LIBCRYPTO := 1
- else
- CFLAGS += -DHAVE_LIBCRYPTO_SUPPORT
- EXTLIBS += -lcrypto
- $(call detected,CONFIG_CRYPTO)
- endif
-endif
-
ifndef NO_SLANG
ifneq ($(feature-libslang), 1)
- ifneq ($(feature-libslang-include-subdir), 1)
- $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev)
- NO_SLANG := 1
- else
- CFLAGS += -DHAVE_SLANG_INCLUDE_SUBDIR
- endif
+ $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev)
+ NO_SLANG := 1
endif
ifndef NO_SLANG
- # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
CFLAGS += -DHAVE_SLANG_SUPPORT
EXTLIBS += -lslang
$(call detected,CONFIG_SLANG)
@@ -814,9 +829,7 @@ ifdef GTK2
endif
endif
-ifdef NO_LIBPERL
- CFLAGS += -DNO_LIBPERL
-else
+ifdef LIBPERL
PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
@@ -826,17 +839,13 @@ else
PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
+ $(call feature_check,libperl)
ifneq ($(feature-libperl), 1)
- CFLAGS += -DNO_LIBPERL
- NO_LIBPERL := 1
- $(warning Missing perl devel files. Disabling perl scripting support, please install perl-ExtUtils-Embed/libperl-dev)
+ $(error Missing perl devel files. Please install perl-ExtUtils-Embed/libperl-dev)
else
LDFLAGS += $(PERL_EMBED_LDFLAGS)
EXTLIBS += $(PERL_EMBED_LIBADD)
CFLAGS += -DHAVE_LIBPERL_SUPPORT
- ifeq ($(CC_NO_CLANG), 0)
- CFLAGS += -Wno-compound-token-split-by-macro
- endif
$(call detected,CONFIG_LIBPERL)
endif
endif
@@ -914,8 +923,12 @@ ifneq ($(NO_JEVENTS),1)
endif
ifdef BUILD_NONDISTRO
+ $(call feature_check,libbfd)
+
ifeq ($(feature-libbfd), 1)
EXTLIBS += -lbfd -lopcodes
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
+ FEATURE_CHECK_LDFLAGS-disassembler-init-styled = -lbfd -lopcodes -ldl
else
# we are on a system that requires -liberty and (maybe) -lz
# to link against -lbfd; test each case individually here
@@ -942,11 +955,23 @@ ifdef BUILD_NONDISTRO
CFLAGS += -DHAVE_LIBBFD_SUPPORT
CXXFLAGS += -DHAVE_LIBBFD_SUPPORT
+ $(call detected,CONFIG_LIBBFD)
+
+ $(call feature_check,libbfd-buildid)
+
ifeq ($(feature-libbfd-buildid), 1)
CFLAGS += -DHAVE_LIBBFD_BUILDID_SUPPORT
else
$(warning Old version of libbfd/binutils things like PE executable profiling will not be available)
endif
+
+ ifeq ($(feature-disassembler-four-args), 1)
+ CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+ endif
+
+ ifeq ($(feature-disassembler-init-styled), 1)
+ CFLAGS += -DDISASM_INIT_STYLED
+ endif
endif
ifndef NO_LIBLLVM
@@ -1038,14 +1063,6 @@ ifdef HAVE_KVM_STAT_SUPPORT
CFLAGS += -DHAVE_KVM_STAT_SUPPORT
endif
-ifeq ($(feature-disassembler-four-args), 1)
- CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
-endif
-
-ifeq ($(feature-disassembler-init-styled), 1)
- CFLAGS += -DDISASM_INIT_STYLED
-endif
-
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)
@@ -1095,19 +1112,6 @@ ifndef NO_CAPSTONE
endif
endif
-ifndef NO_AUXTRACE
- ifeq ($(SRCARCH),x86)
- ifeq ($(feature-get_cpuid), 0)
- $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc)
- NO_AUXTRACE := 1
- endif
- endif
- ifndef NO_AUXTRACE
- $(call detected,CONFIG_AUXTRACE)
- CFLAGS += -DHAVE_AUXTRACE_SUPPORT
- endif
-endif
-
ifdef EXTRA_TESTS
$(call detected,CONFIG_EXTRA_TESTS)
CFLAGS += -DHAVE_EXTRA_TESTS
@@ -1140,7 +1144,7 @@ ifndef NO_JVMTI
endif
endif # NO_JVMTI_CMLR
else
- $(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel)
+ $(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-latest-openjdk-devel)
NO_JVMTI := 1
endif
endif
@@ -1153,7 +1157,7 @@ ifndef NO_LIBPFM4
ASCIIDOC_EXTRA = -aHAVE_LIBPFM=1
$(call detected,CONFIG_LIBPFM4)
else
- $(warning libpfm4 not found, disables libpfm4 support. Please install libpfm4-dev)
+ $(warning libpfm4 not found, disables libpfm4 support. Please install libpfm-devel or libpfm4-dev)
endif
endif
@@ -1173,20 +1177,6 @@ ifneq ($(NO_LIBTRACEEVENT),1)
else
$(error ERROR: libtraceevent is missing. Please install libtraceevent-dev/libtraceevent-devel and/or set LIBTRACEEVENT_DIR or build with NO_LIBTRACEEVENT=1)
endif
-
- ifeq ($(feature-libtracefs), 1)
- CFLAGS += $(shell $(PKG_CONFIG) --cflags libtracefs)
- LDFLAGS += $(shell $(PKG_CONFIG) --libs-only-L libtracefs)
- EXTLIBS += $(shell $(PKG_CONFIG) --libs-only-l libtracefs)
- LIBTRACEFS_VERSION := $(shell $(PKG_CONFIG) --modversion libtracefs).0.0
- LIBTRACEFS_VERSION_1 := $(word 1, $(subst ., ,$(LIBTRACEFS_VERSION)))
- LIBTRACEFS_VERSION_2 := $(word 2, $(subst ., ,$(LIBTRACEFS_VERSION)))
- LIBTRACEFS_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEFS_VERSION)))
- LIBTRACEFS_VERSION_CPP := $(shell expr $(LIBTRACEFS_VERSION_1) \* 255 \* 255 + $(LIBTRACEFS_VERSION_2) \* 255 + $(LIBTRACEFS_VERSION_3))
- CFLAGS += -DLIBTRACEFS_VERSION=$(LIBTRACEFS_VERSION_CPP)
- else
- $(warning libtracefs is missing. Please install libtracefs-dev/libtracefs-devel)
- endif
endif
# Among the variables below, these:
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 979d4691221a..b3f481a626af 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -17,7 +17,7 @@ include ../scripts/utilities.mak
#
# Define CROSS_COMPILE as prefix name of compiler if you want cross-builds.
#
-# Define NO_LIBPERL to disable perl script extension.
+# Define LIBPERL to enable perl script extension.
#
# Define NO_LIBPYTHON to disable python script extension.
#
@@ -61,9 +61,6 @@ include ../scripts/utilities.mak
#
# Define NO_LIBBIONIC if you do not want bionic support
#
-# Define NO_LIBCRYPTO if you do not want libcrypto (openssl) support
-# used for generating build-ids for ELFs generated by jitdump.
-#
# Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
# for dwarf backtrace post unwind.
#
@@ -87,8 +84,6 @@ include ../scripts/utilities.mak
#
# Define NO_LZMA if you do not want to support compressed (xz) kernel modules
#
-# Define NO_AUXTRACE if you do not want AUX area tracing support
-#
# Define NO_LIBBPF if you do not want BPF support
#
# Define NO_LIBCAP if you do not want process capabilities considered by perf
@@ -197,7 +192,7 @@ else
# paths are used instead.
ifdef CROSS_COMPILE
ifeq ($(PKG_CONFIG_LIBDIR)$(PKG_CONFIG_PATH)$(PKG_CONFIG_SYSROOT_DIR),)
- CROSS_ARCH = $(shell $(CC) -dumpmachine)
+ CROSS_ARCH = $(notdir $(CROSS_COMPILE:%-=%))
PKG_CONFIG_LIBDIR := /usr/local/$(CROSS_ARCH)/lib/pkgconfig/
PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/lib/$(CROSS_ARCH)/pkgconfig/
PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/lib/$(CROSS_ARCH)/pkgconfig/
@@ -237,12 +232,12 @@ endif
# The fixdep build - we force fixdep tool to be built as
# the first target in the separate make session not to be
# disturbed by any parallel make jobs. Once fixdep is done
-# we issue the requested build with FIXDEP=1 variable.
+# we issue the requested build with FIXDEP_BUILT=1 variable.
#
# The fixdep build is disabled for $(NON_CONFIG_TARGETS)
# targets, because it's not necessary.
-ifdef FIXDEP
+ifdef FIXDEP_BUILT
force_fixdep := 0
else
force_fixdep := $(config)
@@ -262,6 +257,8 @@ ifneq ($(SHELLCHECK),)
ifeq ($(shell expr $(shell $(SHELLCHECK) --version | grep version: | \
sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 060), 1)
SHELLCHECK :=
+ else
+ SHELLCHECK := $(SHELLCHECK) -s bash -a -S warning
endif
endif
@@ -287,7 +284,7 @@ $(goals) all: sub-make
sub-make: fixdep
@./check-headers.sh
- $(Q)$(MAKE) FIXDEP=1 -f Makefile.perf $(goals)
+ $(Q)$(MAKE) FIXDEP_BUILT=1 -f Makefile.perf $(goals)
else # force_fixdep
@@ -942,7 +939,7 @@ $(OUTPUT)dlfilters/%.so: $(OUTPUT)dlfilters/%.o
ifndef NO_JVMTI
LIBJVMTI_IN := $(OUTPUT)jvmti/jvmti-in.o
-$(LIBJVMTI_IN): FORCE
+$(LIBJVMTI_IN): prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=jvmti obj=jvmti
$(OUTPUT)$(LIBJVMTI): $(LIBJVMTI_IN)
@@ -1104,7 +1101,7 @@ endif
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(call QUIET_INSTALL, perf-iostat) \
$(INSTALL) $(OUTPUT)perf-iostat -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
-ifndef NO_LIBPERL
+ifdef LIBPERL
$(call QUIET_INSTALL, perl-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
$(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
@@ -1147,7 +1144,8 @@ install-tests: all install-gtk
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \
$(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
- $(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
+ $(INSTALL) tests/shell/base_report/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
+ $(INSTALL) tests/shell/base_report/*.txt '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' ; \
$(INSTALL) tests/shell/coresight/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight'
$(Q)$(MAKE) -C tests/shell/coresight install-tests
@@ -1175,7 +1173,7 @@ SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
SKELETONS += $(SKEL_OUT)/off_cpu.skel.h $(SKEL_OUT)/lock_contention.skel.h
SKELETONS += $(SKEL_OUT)/kwork_trace.skel.h $(SKEL_OUT)/sample_filter.skel.h
-SKELETONS += $(SKEL_OUT)/kwork_top.skel.h
+SKELETONS += $(SKEL_OUT)/kwork_top.skel.h $(SKEL_OUT)/syscall_summary.skel.h
SKELETONS += $(SKEL_OUT)/bench_uprobe.skel.h
SKELETONS += $(SKEL_OUT)/augmented_raw_syscalls.skel.h
@@ -1249,8 +1247,11 @@ else
$(Q)cp "$(VMLINUX_H)" $@
endif
-$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) $(SKEL_OUT)/vmlinux.h | $(SKEL_TMP_OUT)
- $(QUIET_CLANG)$(CLANG) -g -O2 --target=bpf $(CLANG_OPTIONS) $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \
+$(SKEL_TMP_OUT)/%.bpf.o: $(OUTPUT)PERF-VERSION-FILE util/bpf_skel/perf_version.h | $(SKEL_TMP_OUT)
+$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) $(SKEL_OUT)/vmlinux.h
+ $(QUIET_CLANG)$(CLANG) -g -O2 -fno-stack-protector --target=bpf \
+ $(CLANG_OPTIONS) $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \
+ -include $(OUTPUT)PERF-VERSION-FILE -include util/bpf_skel/perf_version.h \
-c $(filter util/bpf_skel/%.bpf.c,$^) -o $@
$(SKEL_OUT)/%.skel.h: $(SKEL_TMP_OUT)/%.bpf.o | $(BPFTOOL)
@@ -1269,9 +1270,24 @@ endif # CONFIG_PERF_BPF_SKEL
bpf-skel-clean:
$(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) $(SKEL_OUT)/vmlinux.h
+pmu-events-clean:
+ifeq ($(OUTPUT),)
+ $(call QUIET_CLEAN, pmu-events) $(RM) \
+ pmu-events/pmu-events.c \
+ pmu-events/metric_test.log \
+ pmu-events/test-empty-pmu-events.c \
+ pmu-events/empty-pmu-events.log
+else # When an OUTPUT directory is present, clean up the copied pmu-events/arch directory.
+ $(call QUIET_CLEAN, pmu-events) $(RM) -r $(OUTPUT)pmu-events/arch \
+ $(OUTPUT)pmu-events/pmu-events.c \
+ $(OUTPUT)pmu-events/metric_test.log \
+ $(OUTPUT)pmu-events/test-empty-pmu-events.c \
+ $(OUTPUT)pmu-events/empty-pmu-events.log
+endif
+
clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean \
arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean \
- tests-coresight-targets-clean
+ tests-coresight-targets-clean pmu-events-clean
$(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive \
$(OUTPUT)perf-iostat $(LANG_BINDINGS)
$(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '*.a' -delete -o \
@@ -1284,10 +1300,6 @@ clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(
$(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
$(OUTPUT)util/intel-pt-decoder/inat-tables.c \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
- $(OUTPUT)pmu-events/pmu-events.c \
- $(OUTPUT)pmu-events/test-empty-pmu-events.c \
- $(OUTPUT)pmu-events/empty-pmu-events.log \
- $(OUTPUT)pmu-events/metric_test.log \
$(OUTPUT)$(fadvise_advice_array) \
$(OUTPUT)$(fsconfig_arrays) \
$(OUTPUT)$(fsmount_arrays) \
diff --git a/tools/perf/arch/arm/annotate/instructions.c b/tools/perf/arch/arm/annotate/instructions.c
index cf91a43362b0..5e667b0f5512 100644
--- a/tools/perf/arch/arm/annotate/instructions.c
+++ b/tools/perf/arch/arm/annotate/instructions.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/zalloc.h>
+#include <errno.h>
#include <sys/types.h>
#include <regex.h>
#include <stdlib.h>
diff --git a/tools/perf/arch/arm/entry/syscalls/syscall.tbl b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
index 27c1d5ebcd91..b07e699aaa3c 100644
--- a/tools/perf/arch/arm/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
@@ -482,3 +482,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build
index f7a8b37d1c68..fd695e1fdaee 100644
--- a/tools/perf/arch/arm/util/Build
+++ b/tools/perf/arch/arm/util/Build
@@ -3,4 +3,4 @@ perf-util-y += perf_regs.o
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-util-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
+perf-util-y += pmu.o auxtrace.o cs-etm.o
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index 3b8eca0ffb17..eb6404267f17 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -5,6 +5,7 @@
*/
#include <dirent.h>
+#include <errno.h>
#include <stdbool.h>
#include <linux/coresight-pmu.h>
#include <linux/zalloc.h>
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index f70075c89aa0..9be8da5207f5 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -20,7 +20,6 @@ void perf_pmu__arch_init(struct perf_pmu *pmu)
{
struct perf_cpu_map *intersect, *online = cpu_map__online();
-#ifdef HAVE_AUXTRACE_SUPPORT
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
pmu->auxtrace = true;
@@ -39,7 +38,6 @@ void perf_pmu__arch_init(struct perf_pmu *pmu)
pmu->selectable = true;
#endif
}
-#endif
/* Workaround some ARM PMU's failing to correctly set CPU maps for online processors. */
intersect = perf_cpu_map__intersect(online, pmu->cpus);
perf_cpu_map__put(online);
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index d465d093e7eb..16cb62d40bd9 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
+#include <errno.h>
#include <sys/types.h>
#include <regex.h>
#include <stdlib.h>
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index a74521b79eaa..d63881081d2e 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,13 +1,14 @@
+perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-util-y += ../../arm/util/auxtrace.o
+perf-util-y += ../../arm/util/cs-etm.o
+perf-util-y += ../../arm/util/pmu.o
+perf-util-y += arm-spe.o
perf-util-y += header.o
+perf-util-y += hisi-ptt.o
perf-util-y += machine.o
+perf-util-y += mem-events.o
perf-util-y += perf_regs.o
-perf-util-y += tsc.o
perf-util-y += pmu.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
-perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-
-perf-util-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
- ../../arm/util/auxtrace.o \
- ../../arm/util/cs-etm.o \
- arm-spe.o mem-events.o hisi-ptt.o
+perf-util-y += tsc.o
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 4f2833b62ff5..d5ec1408d0ae 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -10,6 +10,7 @@
#include <linux/log2.h>
#include <linux/string.h>
#include <linux/zalloc.h>
+#include <errno.h>
#include <time.h>
#include "../../../util/cpumap.h"
@@ -121,12 +122,17 @@ static int arm_spe_save_cpu_header(struct auxtrace_record *itr,
/* No Arm SPE PMU is found */
data[ARM_SPE_CPU_PMU_TYPE] = ULLONG_MAX;
data[ARM_SPE_CAP_MIN_IVAL] = 0;
+ data[ARM_SPE_CAP_EVENT_FILTER] = 0;
} else {
data[ARM_SPE_CPU_PMU_TYPE] = pmu->type;
if (perf_pmu__scan_file(pmu, "caps/min_interval", "%lu", &val) != 1)
val = 0;
data[ARM_SPE_CAP_MIN_IVAL] = val;
+
+ if (perf_pmu__scan_file(pmu, "caps/event_filter", "%lx", &val) != 1)
+ val = 0;
+ data[ARM_SPE_CAP_EVENT_FILTER] = val;
}
free(cpuid);
diff --git a/tools/perf/arch/arm64/util/arm64_exception_types.h b/tools/perf/arch/arm64/util/arm64_exception_types.h
index 27c981ebe401..bf827f19ace0 100644
--- a/tools/perf/arch/arm64/util/arm64_exception_types.h
+++ b/tools/perf/arch/arm64/util/arm64_exception_types.h
@@ -31,9 +31,10 @@
#define ESR_ELx_EC_FP_ASIMD (0x07)
#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
-/* Unallocated EC: 0x0A - 0x0B */
+#define ESR_ELx_EC_OTHER (0x0A)
+/* Unallocated EC: 0x0B */
#define ESR_ELx_EC_CP14_64 (0x0C)
-/* Unallocated EC: 0x0d */
+#define ESR_ELx_EC_BTI (0x0D)
#define ESR_ELx_EC_ILL (0x0E)
/* Unallocated EC: 0x0F - 0x10 */
#define ESR_ELx_EC_SVC32 (0x11)
@@ -46,7 +47,10 @@
#define ESR_ELx_EC_SYS64 (0x18)
#define ESR_ELx_EC_SVE (0x19)
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
-/* Unallocated EC: 0x1b - 0x1E */
+/* Unallocated EC: 0x1B */
+#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
+#define ESR_ELx_EC_SME (0x1D)
+/* Unallocated EC: 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
@@ -55,11 +59,12 @@
#define ESR_ELx_EC_DABT_LOW (0x24)
#define ESR_ELx_EC_DABT_CUR (0x25)
#define ESR_ELx_EC_SP_ALIGN (0x26)
-/* Unallocated EC: 0x27 */
+#define ESR_ELx_EC_MOPS (0x27)
#define ESR_ELx_EC_FP_EXC32 (0x28)
/* Unallocated EC: 0x29 - 0x2B */
#define ESR_ELx_EC_FP_EXC64 (0x2C)
-/* Unallocated EC: 0x2D - 0x2E */
+#define ESR_ELx_EC_GCS (0x2D)
+/* Unallocated EC: 0x2E */
#define ESR_ELx_EC_SERROR (0x2F)
#define ESR_ELx_EC_BREAKPT_LOW (0x30)
#define ESR_ELx_EC_BREAKPT_CUR (0x31)
diff --git a/tools/perf/arch/arm64/util/hisi-ptt.c b/tools/perf/arch/arm64/util/hisi-ptt.c
index eac9739c87e6..fe457fd58c9e 100644
--- a/tools/perf/arch/arm64/util/hisi-ptt.c
+++ b/tools/perf/arch/arm64/util/hisi-ptt.c
@@ -9,6 +9,7 @@
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
+#include <errno.h>
#include <time.h>
#include <internal/lib.h> // page_size
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 1e8c44c7b614..7a7049c2c307 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -382,3 +382,5 @@
465 n64 listxattrat sys_listxattrat
466 n64 removexattrat sys_removexattrat
467 n64 open_tree_attr sys_open_tree_attr
+468 n64 file_getattr sys_file_getattr
+469 n64 file_setattr sys_file_setattr
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 9a084bdb8926..b453e80dfc00 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -558,3 +558,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index ed82715080f9..3d0d5427aef7 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -5,9 +5,9 @@ perf-util-y += mem-events.o
perf-util-y += pmu.o
perf-util-y += sym-handling.o
perf-util-y += evsel.o
-perf-util-y += event.o
perf-util-$(CONFIG_LIBDW) += skip-callchain-idx.o
perf-util-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-util-y += auxtrace.o
diff --git a/tools/perf/arch/powerpc/util/auxtrace.c b/tools/perf/arch/powerpc/util/auxtrace.c
new file mode 100644
index 000000000000..292ea335e4ff
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/auxtrace.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPA support
+ */
+#include <errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include "../../util/evlist.h"
+#include "../../util/debug.h"
+#include "../../util/auxtrace.h"
+#include "../../util/powerpc-vpadtl.h"
+#include "../../util/record.h"
+#include <internal/lib.h> // page_size
+
+#define KiB(x) ((x) * 1024)
+
+static int
+powerpc_vpadtl_recording_options(struct auxtrace_record *ar __maybe_unused,
+ struct evlist *evlist __maybe_unused,
+ struct record_opts *opts)
+{
+ opts->full_auxtrace = true;
+
+ /*
+ * Set auxtrace_mmap_pages to minimum
+ * two pages
+ */
+ if (!opts->auxtrace_mmap_pages) {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+
+ return 0;
+}
+
+static size_t powerpc_vpadtl_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+ struct evlist *evlist __maybe_unused)
+{
+ return VPADTL_AUXTRACE_PRIV_SIZE;
+}
+
+static int
+powerpc_vpadtl_info_fill(struct auxtrace_record *itr __maybe_unused,
+ struct perf_session *session __maybe_unused,
+ struct perf_record_auxtrace_info *auxtrace_info,
+ size_t priv_size __maybe_unused)
+{
+ auxtrace_info->type = PERF_AUXTRACE_VPA_DTL;
+
+ return 0;
+}
+
+static void powerpc_vpadtl_free(struct auxtrace_record *itr)
+{
+ free(itr);
+}
+
+static u64 powerpc_vpadtl_reference(struct auxtrace_record *itr __maybe_unused)
+{
+ return 0;
+}
+
+struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
+ int *err)
+{
+ struct auxtrace_record *aux;
+ struct evsel *pos;
+ int found = 0;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (strstarts(pos->name, "vpa_dtl")) {
+ found = 1;
+ pos->needs_auxtrace_mmap = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace event
+ * must come first.
+ */
+ evlist__to_front(pos->evlist, pos);
+
+ aux = zalloc(sizeof(*aux));
+ if (aux == NULL) {
+ pr_debug("aux record is NULL\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ aux->recording_options = powerpc_vpadtl_recording_options;
+ aux->info_priv_size = powerpc_vpadtl_info_priv_size;
+ aux->info_fill = powerpc_vpadtl_info_fill;
+ aux->free = powerpc_vpadtl_free;
+ aux->reference = powerpc_vpadtl_reference;
+ return aux;
+}
diff --git a/tools/perf/arch/powerpc/util/event.c b/tools/perf/arch/powerpc/util/event.c
deleted file mode 100644
index 77d8cc2b5691..000000000000
--- a/tools/perf/arch/powerpc/util/event.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/zalloc.h>
-
-#include "../../../util/event.h"
-#include "../../../util/synthetic-events.h"
-#include "../../../util/machine.h"
-#include "../../../util/tool.h"
-#include "../../../util/map.h"
-#include "../../../util/debug.h"
-#include "../../../util/sample.h"
-
-void arch_perf_parse_sample_weight(struct perf_sample *data,
- const __u64 *array, u64 type)
-{
- union perf_sample_weight weight;
-
- weight.full = *array;
- if (type & PERF_SAMPLE_WEIGHT)
- data->weight = weight.full;
- else {
- data->weight = weight.var1_dw;
- data->ins_lat = weight.var2_w;
- data->p_stage_cyc = weight.var3_w;
- }
-}
-
-void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
- __u64 *array, u64 type)
-{
- *array = data->weight;
-
- if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
- *array &= 0xffffffff;
- *array |= ((u64)data->ins_lat << 32);
- }
-}
-
-const char *arch_perf_header_entry(const char *se_header)
-{
- if (!strcmp(se_header, "Local INSTR Latency"))
- return "Finish Cyc";
- else if (!strcmp(se_header, "INSTR Latency"))
- return "Global Finish_cyc";
- else if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
- return "Dispatch Cyc";
- else if (!strcmp(se_header, "Pipeline Stage Cycle"))
- return "Global Dispatch_cyc";
- return se_header;
-}
-
-int arch_support_sort_key(const char *sort_key)
-{
- if (!strcmp(sort_key, "p_stage_cyc"))
- return 1;
- if (!strcmp(sort_key, "local_p_stage_cyc"))
- return 1;
- return 0;
-}
diff --git a/tools/perf/arch/riscv/util/kvm-stat.c b/tools/perf/arch/riscv/util/kvm-stat.c
index 491aef449d1a..3ea7acb5e159 100644
--- a/tools/perf/arch/riscv/util/kvm-stat.c
+++ b/tools/perf/arch/riscv/util/kvm-stat.c
@@ -9,10 +9,10 @@
#include <memory.h>
#include "../../../util/evsel.h"
#include "../../../util/kvm-stat.h"
-#include "riscv_exception_types.h"
+#include "riscv_trap_types.h"
#include "debug.h"
-define_exit_reasons_table(riscv_exit_reasons, kvm_riscv_exception_class);
+define_exit_reasons_table(riscv_exit_reasons, kvm_riscv_trap_class);
const char *vcpu_id_str = "id";
const char *kvm_exit_reason = "scause";
@@ -30,7 +30,7 @@ static void event_get_key(struct evsel *evsel,
struct event_key *key)
{
key->info = 0;
- key->key = evsel__intval(evsel, sample, kvm_exit_reason);
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason) & ~CAUSE_IRQ_FLAG;
key->exit_reasons = riscv_exit_reasons;
}
diff --git a/tools/perf/arch/riscv/util/riscv_exception_types.h b/tools/perf/arch/riscv/util/riscv_exception_types.h
deleted file mode 100644
index c49b8fa5e847..000000000000
--- a/tools/perf/arch/riscv/util/riscv_exception_types.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef ARCH_PERF_RISCV_EXCEPTION_TYPES_H
-#define ARCH_PERF_RISCV_EXCEPTION_TYPES_H
-
-#define EXC_INST_MISALIGNED 0
-#define EXC_INST_ACCESS 1
-#define EXC_INST_ILLEGAL 2
-#define EXC_BREAKPOINT 3
-#define EXC_LOAD_MISALIGNED 4
-#define EXC_LOAD_ACCESS 5
-#define EXC_STORE_MISALIGNED 6
-#define EXC_STORE_ACCESS 7
-#define EXC_SYSCALL 8
-#define EXC_HYPERVISOR_SYSCALL 9
-#define EXC_SUPERVISOR_SYSCALL 10
-#define EXC_INST_PAGE_FAULT 12
-#define EXC_LOAD_PAGE_FAULT 13
-#define EXC_STORE_PAGE_FAULT 15
-#define EXC_INST_GUEST_PAGE_FAULT 20
-#define EXC_LOAD_GUEST_PAGE_FAULT 21
-#define EXC_VIRTUAL_INST_FAULT 22
-#define EXC_STORE_GUEST_PAGE_FAULT 23
-
-#define EXC(x) {EXC_##x, #x }
-
-#define kvm_riscv_exception_class \
- EXC(INST_MISALIGNED), EXC(INST_ACCESS), EXC(INST_ILLEGAL), \
- EXC(BREAKPOINT), EXC(LOAD_MISALIGNED), EXC(LOAD_ACCESS), \
- EXC(STORE_MISALIGNED), EXC(STORE_ACCESS), EXC(SYSCALL), \
- EXC(HYPERVISOR_SYSCALL), EXC(SUPERVISOR_SYSCALL), \
- EXC(INST_PAGE_FAULT), EXC(LOAD_PAGE_FAULT), EXC(STORE_PAGE_FAULT), \
- EXC(INST_GUEST_PAGE_FAULT), EXC(LOAD_GUEST_PAGE_FAULT), \
- EXC(VIRTUAL_INST_FAULT), EXC(STORE_GUEST_PAGE_FAULT)
-
-#endif /* ARCH_PERF_RISCV_EXCEPTION_TYPES_H */
diff --git a/tools/perf/arch/riscv/util/riscv_trap_types.h b/tools/perf/arch/riscv/util/riscv_trap_types.h
new file mode 100644
index 000000000000..6cc71eb01fca
--- /dev/null
+++ b/tools/perf/arch/riscv/util/riscv_trap_types.h
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef ARCH_PERF_RISCV_TRAP_TYPES_H
+#define ARCH_PERF_RISCV_TRAP_TYPES_H
+
+/* Exception cause high bit - is an interrupt if set */
+#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+
+/* Interrupt causes (minus the high bit) */
+#define IRQ_S_SOFT 1
+#define IRQ_VS_SOFT 2
+#define IRQ_M_SOFT 3
+#define IRQ_S_TIMER 5
+#define IRQ_VS_TIMER 6
+#define IRQ_M_TIMER 7
+#define IRQ_S_EXT 9
+#define IRQ_VS_EXT 10
+#define IRQ_M_EXT 11
+#define IRQ_S_GEXT 12
+#define IRQ_PMU_OVF 13
+
+/* Exception causes */
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_INST_ILLEGAL 2
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_MISALIGNED 4
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_MISALIGNED 6
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_HYPERVISOR_SYSCALL 9
+#define EXC_SUPERVISOR_SYSCALL 10
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+#define EXC_INST_GUEST_PAGE_FAULT 20
+#define EXC_LOAD_GUEST_PAGE_FAULT 21
+#define EXC_VIRTUAL_INST_FAULT 22
+#define EXC_STORE_GUEST_PAGE_FAULT 23
+
+#define TRAP(x) { x, #x }
+
+#define kvm_riscv_trap_class \
+ TRAP(IRQ_S_SOFT), TRAP(IRQ_VS_SOFT), TRAP(IRQ_M_SOFT), \
+ TRAP(IRQ_S_TIMER), TRAP(IRQ_VS_TIMER), TRAP(IRQ_M_TIMER), \
+ TRAP(IRQ_S_EXT), TRAP(IRQ_VS_EXT), TRAP(IRQ_M_EXT), \
+ TRAP(IRQ_S_GEXT), TRAP(IRQ_PMU_OVF), \
+ TRAP(EXC_INST_MISALIGNED), TRAP(EXC_INST_ACCESS), TRAP(EXC_INST_ILLEGAL), \
+ TRAP(EXC_BREAKPOINT), TRAP(EXC_LOAD_MISALIGNED), TRAP(EXC_LOAD_ACCESS), \
+ TRAP(EXC_STORE_MISALIGNED), TRAP(EXC_STORE_ACCESS), TRAP(EXC_SYSCALL), \
+ TRAP(EXC_HYPERVISOR_SYSCALL), TRAP(EXC_SUPERVISOR_SYSCALL), \
+ TRAP(EXC_INST_PAGE_FAULT), TRAP(EXC_LOAD_PAGE_FAULT), \
+ TRAP(EXC_STORE_PAGE_FAULT), TRAP(EXC_INST_GUEST_PAGE_FAULT), \
+ TRAP(EXC_LOAD_GUEST_PAGE_FAULT), TRAP(EXC_VIRTUAL_INST_FAULT), \
+ TRAP(EXC_STORE_GUEST_PAGE_FAULT)
+
+#endif /* ARCH_PERF_RISCV_TRAP_TYPES_H */
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index a4569b96ef06..8a6744d658db 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -470,3 +470,5 @@
465 common listxattrat sys_listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr sys_file_setattr
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index 736c0ad09194..c64eb18dbdae 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -7,4 +7,4 @@ perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
perf-util-y += machine.o
perf-util-y += pmu.o
-perf-util-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-util-y += auxtrace.o
diff --git a/tools/perf/arch/s390/util/auxtrace.c b/tools/perf/arch/s390/util/auxtrace.c
index 5068baa3e092..1a3676145066 100644
--- a/tools/perf/arch/s390/util/auxtrace.c
+++ b/tools/perf/arch/s390/util/auxtrace.c
@@ -1,3 +1,4 @@
+#include <errno.h>
#include <stdbool.h>
#include <stdlib.h>
#include <linux/kernel.h>
diff --git a/tools/perf/arch/sh/entry/syscalls/syscall.tbl b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
index 52a7652fcff6..5e9c9eff5539 100644
--- a/tools/perf/arch/sh/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
@@ -471,3 +471,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
index 83e45eb6c095..ebb7d06d1044 100644
--- a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
@@ -513,3 +513,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
index afae7b8f6bd6..d31a1168757c 100644
--- a/tools/perf/arch/x86/Build
+++ b/tools/perf/arch/x86/Build
@@ -10,6 +10,6 @@ endif
$(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
- $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+ $(Q)$(call echo-cmd,test)$(SHELLCHECK) "$<" > $@ || (cat $@ && rm $@ && false)
perf-test-y += $(SHELL_TEST_LOGS)
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index c6d403eae744..803f9351a3fb 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -248,6 +248,7 @@ static void update_insn_state_x86(struct type_state *state,
tsr = &state->regs[state->ret_reg];
tsr->type = type_die;
tsr->kind = TSR_KIND_TYPE;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("call [%x] return -> reg%d",
@@ -284,6 +285,7 @@ static void update_insn_state_x86(struct type_state *state,
!strcmp(var_name, "this_cpu_off") &&
tsr->kind == TSR_KIND_CONST) {
tsr->kind = TSR_KIND_PERCPU_BASE;
+ tsr->offset = 0;
tsr->ok = true;
imm_value = tsr->imm_value;
}
@@ -291,6 +293,19 @@ static void update_insn_state_x86(struct type_state *state,
else
return;
+ /* Ignore add to non-pointer or non-const types */
+ if (tsr->kind == TSR_KIND_POINTER ||
+ (dwarf_tag(&tsr->type) == DW_TAG_pointer_type &&
+ src->reg1 != DWARF_REG_PC && tsr->kind == TSR_KIND_TYPE && !dst->mem_ref)) {
+ tsr->offset += imm_value;
+ pr_debug_dtp("add [%x] offset %#"PRIx64" to reg%d",
+ insn_offset, imm_value, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+
+ if (tsr->kind == TSR_KIND_CONST)
+ tsr->imm_value += imm_value;
+
if (tsr->kind != TSR_KIND_PERCPU_BASE)
return;
@@ -301,7 +316,8 @@ static void update_insn_state_x86(struct type_state *state,
* as a pointer.
*/
tsr->type = type_die;
- tsr->kind = TSR_KIND_POINTER;
+ tsr->kind = TSR_KIND_PERCPU_POINTER;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d",
@@ -311,6 +327,135 @@ static void update_insn_state_x86(struct type_state *state,
return;
}
+ if (!strncmp(dl->ins.name, "sub", 3)) {
+ u64 imm_value = -1ULL;
+
+ if (!has_reg_type(state, dst->reg1))
+ return;
+
+ tsr = &state->regs[dst->reg1];
+ tsr->copied_from = -1;
+
+ if (src->imm)
+ imm_value = src->offset;
+ else if (has_reg_type(state, src->reg1) &&
+ state->regs[src->reg1].kind == TSR_KIND_CONST)
+ imm_value = state->regs[src->reg1].imm_value;
+
+ if (tsr->kind == TSR_KIND_POINTER ||
+ (dwarf_tag(&tsr->type) == DW_TAG_pointer_type &&
+ src->reg1 != DWARF_REG_PC && tsr->kind == TSR_KIND_TYPE && !dst->mem_ref)) {
+ tsr->offset -= imm_value;
+ pr_debug_dtp("sub [%x] offset %#"PRIx64" to reg%d",
+ insn_offset, imm_value, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+
+ if (tsr->kind == TSR_KIND_CONST)
+ tsr->imm_value -= imm_value;
+
+ return;
+ }
+
+ if (!strncmp(dl->ins.name, "lea", 3)) {
+ int sreg = src->reg1;
+ struct type_state_reg src_tsr;
+
+ if (!has_reg_type(state, sreg) ||
+ !has_reg_type(state, dst->reg1) ||
+ !src->mem_ref)
+ return;
+
+ src_tsr = state->regs[sreg];
+ tsr = &state->regs[dst->reg1];
+
+ tsr->copied_from = -1;
+ tsr->ok = false;
+
+ /* Case 1: Based on stack pointer or frame pointer */
+ if (sreg == fbreg || sreg == state->stack_reg) {
+ struct type_state_stack *stack;
+ int offset = src->offset - fboff;
+
+ stack = find_stack_state(state, offset);
+ if (!stack)
+ return;
+
+ tsr->type = stack->type;
+ tsr->kind = TSR_KIND_POINTER;
+ tsr->offset = offset - stack->offset;
+ tsr->ok = true;
+
+ if (sreg == fbreg) {
+ pr_debug_dtp("lea [%x] address of -%#x(stack) -> reg%d",
+ insn_offset, -src->offset, dst->reg1);
+ } else {
+ pr_debug_dtp("lea [%x] address of %#x(reg%d) -> reg%d",
+ insn_offset, src->offset, sreg, dst->reg1);
+ }
+
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* Case 2: Based on a register holding a typed pointer */
+ else if (src_tsr.ok && (src_tsr.kind == TSR_KIND_POINTER ||
+ (dwarf_tag(&src_tsr.type) == DW_TAG_pointer_type &&
+ src_tsr.kind == TSR_KIND_TYPE))) {
+
+ if (src_tsr.kind == TSR_KIND_TYPE &&
+ __die_get_real_type(&state->regs[sreg].type, &type_die) == NULL)
+ return;
+
+ if (src_tsr.kind == TSR_KIND_POINTER)
+ type_die = state->regs[sreg].type;
+
+ /* Check if the target type has a member at the new offset */
+ if (die_get_member_type(&type_die,
+ src->offset + src_tsr.offset, &type_die) == NULL)
+ return;
+
+ tsr->type = src_tsr.type;
+ tsr->kind = src_tsr.kind;
+ tsr->offset = src->offset + src_tsr.offset;
+ tsr->ok = true;
+
+ pr_debug_dtp("lea [%x] address of %s%#x(reg%d) -> reg%d",
+ insn_offset, src->offset < 0 ? "-" : "",
+ abs(src->offset), sreg, dst->reg1);
+
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ return;
+ }
+
+ /* Invalidate register states for other ops which may change pointers */
+ if (has_reg_type(state, dst->reg1) && !dst->mem_ref &&
+ dwarf_tag(&state->regs[dst->reg1].type) == DW_TAG_pointer_type) {
+ if (!strncmp(dl->ins.name, "imul", 4) || !strncmp(dl->ins.name, "mul", 3) ||
+ !strncmp(dl->ins.name, "idiv", 4) || !strncmp(dl->ins.name, "div", 3) ||
+ !strncmp(dl->ins.name, "shl", 3) || !strncmp(dl->ins.name, "shr", 3) ||
+ !strncmp(dl->ins.name, "sar", 3) || !strncmp(dl->ins.name, "and", 3) ||
+ !strncmp(dl->ins.name, "or", 2) || !strncmp(dl->ins.name, "neg", 3) ||
+ !strncmp(dl->ins.name, "inc", 3) || !strncmp(dl->ins.name, "dec", 3)) {
+ pr_debug_dtp("%s [%x] invalidate reg%d\n",
+ dl->ins.name, insn_offset, dst->reg1);
+ state->regs[dst->reg1].ok = false;
+ state->regs[dst->reg1].copied_from = -1;
+ return;
+ }
+
+ if (!strncmp(dl->ins.name, "xor", 3) && dst->reg1 == src->reg1) {
+ /* xor reg, reg clears the register */
+ pr_debug_dtp("xor [%x] clear reg%d\n",
+ insn_offset, dst->reg1);
+
+ state->regs[dst->reg1].kind = TSR_KIND_CONST;
+ state->regs[dst->reg1].imm_value = 0;
+ state->regs[dst->reg1].ok = true;
+ state->regs[dst->reg1].copied_from = -1;
+ return;
+ }
+ }
+
if (strncmp(dl->ins.name, "mov", 3))
return;
@@ -345,6 +490,7 @@ static void update_insn_state_x86(struct type_state *state,
if (var_addr == 40) {
tsr->kind = TSR_KIND_CANARY;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("mov [%x] stack canary -> reg%d\n",
@@ -361,6 +507,7 @@ static void update_insn_state_x86(struct type_state *state,
tsr->type = type_die;
tsr->kind = TSR_KIND_TYPE;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d",
@@ -372,6 +519,7 @@ static void update_insn_state_x86(struct type_state *state,
if (src->imm) {
tsr->kind = TSR_KIND_CONST;
tsr->imm_value = src->offset;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n",
@@ -388,10 +536,11 @@ static void update_insn_state_x86(struct type_state *state,
tsr->type = state->regs[src->reg1].type;
tsr->kind = state->regs[src->reg1].kind;
tsr->imm_value = state->regs[src->reg1].imm_value;
+ tsr->offset = state->regs[src->reg1].offset;
tsr->ok = true;
/* To copy back the variable type later (hopefully) */
- if (tsr->kind == TSR_KIND_TYPE)
+ if (tsr->kind == TSR_KIND_TYPE || tsr->kind == TSR_KIND_POINTER)
tsr->copied_from = src->reg1;
pr_debug_dtp("mov [%x] reg%d -> reg%d",
@@ -421,12 +570,14 @@ retry:
} else if (!stack->compound) {
tsr->type = stack->type;
tsr->kind = stack->kind;
+ tsr->offset = stack->ptr_offset;
tsr->ok = true;
} else if (die_get_member_type(&stack->type,
offset - stack->offset,
&type_die)) {
tsr->type = type_die;
tsr->kind = TSR_KIND_TYPE;
+ tsr->offset = 0;
tsr->ok = true;
} else {
tsr->ok = false;
@@ -446,15 +597,30 @@ retry:
else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
state->regs[sreg].kind == TSR_KIND_TYPE &&
die_deref_ptr_type(&state->regs[sreg].type,
- src->offset, &type_die)) {
+ src->offset + state->regs[sreg].offset, &type_die)) {
tsr->type = type_die;
tsr->kind = TSR_KIND_TYPE;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
insn_offset, src->offset, sreg, dst->reg1);
pr_debug_type_name(&tsr->type, tsr->kind);
}
+ /* Handle dereference of TSR_KIND_POINTER registers */
+ else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
+ state->regs[sreg].kind == TSR_KIND_POINTER &&
+ die_get_member_type(&state->regs[sreg].type,
+ src->offset + state->regs[sreg].offset, &type_die)) {
+ tsr->type = state->regs[sreg].type;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->offset = src->offset + state->regs[sreg].offset;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] addr %#x(reg%d) -> reg%d",
+ insn_offset, src->offset, sreg, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
/* Or check if it's a global variable */
else if (sreg == DWARF_REG_PC) {
struct map_symbol *ms = dloc->ms;
@@ -473,6 +639,7 @@ retry:
tsr->type = type_die;
tsr->kind = TSR_KIND_TYPE;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d",
@@ -504,6 +671,7 @@ retry:
die_get_member_type(&type_die, offset, &type_die)) {
tsr->type = type_die;
tsr->kind = TSR_KIND_TYPE;
+ tsr->offset = 0;
tsr->ok = true;
if (src->multi_regs) {
@@ -521,11 +689,12 @@ retry:
}
/* And then dereference the calculated pointer if it has one */
else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
- state->regs[sreg].kind == TSR_KIND_POINTER &&
+ state->regs[sreg].kind == TSR_KIND_PERCPU_POINTER &&
die_get_member_type(&state->regs[sreg].type,
src->offset, &type_die)) {
tsr->type = type_die;
tsr->kind = TSR_KIND_TYPE;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d",
@@ -548,6 +717,7 @@ retry:
&var_name, &offset) &&
!strcmp(var_name, "__per_cpu_offset")) {
tsr->kind = TSR_KIND_PERCPU_BASE;
+ tsr->offset = 0;
tsr->ok = true;
pr_debug_dtp("mov [%x] percpu base reg%d\n",
@@ -583,10 +753,10 @@ retry:
*/
if (!stack->compound)
set_stack_state(stack, offset, tsr->kind,
- &tsr->type);
+ &tsr->type, tsr->offset);
} else {
findnew_stack_state(state, offset, tsr->kind,
- &tsr->type);
+ &tsr->type, tsr->offset);
}
if (dst->reg1 == fbreg) {
@@ -596,6 +766,11 @@ retry:
pr_debug_dtp("mov [%x] reg%d -> %#x(reg%d)",
insn_offset, src->reg1, offset, dst->reg1);
}
+ if (tsr->offset != 0) {
+ pr_debug_dtp(" reg%d offset %#x ->",
+ src->reg1, tsr->offset);
+ }
+
pr_debug_type_name(&tsr->type, tsr->kind);
}
/*
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
index ac007ea00979..4877e16da69a 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
@@ -473,3 +473,5 @@
465 i386 listxattrat sys_listxattrat
466 i386 removexattrat sys_removexattrat
467 i386 open_tree_attr sys_open_tree_attr
+468 i386 file_getattr sys_file_getattr
+469 i386 file_setattr sys_file_setattr
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index cfb5ca41e30d..ced2a1deecd7 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -345,6 +345,7 @@
333 common io_pgetevents sys_io_pgetevents
334 common rseq sys_rseq
335 common uretprobe sys_uretprobe
+336 common uprobe sys_uprobe
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
424 common pidfd_send_signal sys_pidfd_send_signal
@@ -391,6 +392,8 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index c0421a26b875..7d65b9e51840 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -2,6 +2,8 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
+#include "tests/tests.h"
+
struct test_suite;
/* Tests */
@@ -12,10 +14,12 @@ int test__insn_x86(struct test_suite *test, int subtest);
int test__intel_pt_pkt_decoder(struct test_suite *test, int subtest);
int test__intel_pt_hybrid_compat(struct test_suite *test, int subtest);
int test__bp_modify(struct test_suite *test, int subtest);
-int test__x86_sample_parsing(struct test_suite *test, int subtest);
int test__amd_ibs_via_core_pmu(struct test_suite *test, int subtest);
+int test__amd_ibs_period(struct test_suite *test, int subtest);
int test__hybrid(struct test_suite *test, int subtest);
+DECLARE_SUITE(x86_topdown);
+
extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 86262c720857..b017d1ca6e3c 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -2,14 +2,15 @@ perf-test-$(CONFIG_DWARF_UNWIND) += regs_load.o
perf-test-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
perf-test-y += arch-tests.o
-perf-test-y += sample-parsing.o
perf-test-y += hybrid.o
-perf-test-$(CONFIG_AUXTRACE) += intel-pt-test.o
+perf-test-y += intel-pt-test.o
ifeq ($(CONFIG_EXTRA_TESTS),y)
-perf-test-$(CONFIG_AUXTRACE) += insn-x86.o
+perf-test-y += insn-x86.o
endif
perf-test-$(CONFIG_X86_64) += bp-modify.o
perf-test-y += amd-ibs-via-core-pmu.o
+perf-test-y += amd-ibs-period.o
+perf-test-y += topdown.o
ifdef SHELLCHECK
SHELL_TESTS := gen-insn-x86-dat.sh
@@ -21,6 +22,6 @@ endif
$(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
- $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+ $(Q)$(call echo-cmd,test)$(SHELLCHECK) "$<" > $@ || (cat $@ && rm $@ && false)
perf-test-y += $(SHELL_TEST_LOGS)
diff --git a/tools/perf/arch/x86/tests/amd-ibs-period.c b/tools/perf/arch/x86/tests/amd-ibs-period.c
new file mode 100644
index 000000000000..223e059e04de
--- /dev/null
+++ b/tools/perf/arch/x86/tests/amd-ibs-period.c
@@ -0,0 +1,1032 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sched.h>
+#include <sys/syscall.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/utsname.h>
+#include <string.h>
+
+#include "arch-tests.h"
+#include "linux/perf_event.h"
+#include "linux/zalloc.h"
+#include "tests/tests.h"
+#include "../perf-sys.h"
+#include "pmu.h"
+#include "pmus.h"
+#include "debug.h"
+#include "util.h"
+#include "strbuf.h"
+#include "../util/env.h"
+
+static int page_size;
+
+#define PERF_MMAP_DATA_PAGES 32L
+#define PERF_MMAP_DATA_SIZE (PERF_MMAP_DATA_PAGES * page_size)
+#define PERF_MMAP_DATA_MASK (PERF_MMAP_DATA_SIZE - 1)
+#define PERF_MMAP_TOTAL_PAGES (PERF_MMAP_DATA_PAGES + 1)
+#define PERF_MMAP_TOTAL_SIZE (PERF_MMAP_TOTAL_PAGES * page_size)
+
+#define rmb() asm volatile("lfence":::"memory")
+
+enum {
+ FD_ERROR,
+ FD_SUCCESS,
+};
+
+enum {
+ IBS_FETCH,
+ IBS_OP,
+};
+
+struct perf_pmu *fetch_pmu;
+struct perf_pmu *op_pmu;
+unsigned int perf_event_max_sample_rate;
+
+/* Dummy workload to generate IBS samples. */
+static int dummy_workload_1(unsigned long count)
+{
+ int (*func)(void);
+ int ret = 0;
+ char *p;
+ char insn1[] = {
+ 0xb8, 0x01, 0x00, 0x00, 0x00, /* mov 1,%eax */
+ 0xc3, /* ret */
+ 0xcc, /* int 3 */
+ };
+
+ char insn2[] = {
+ 0xb8, 0x02, 0x00, 0x00, 0x00, /* mov 2,%eax */
+ 0xc3, /* ret */
+ 0xcc, /* int 3 */
+ };
+
+ p = zalloc(2 * page_size);
+ if (!p) {
+ printf("malloc() failed. %m");
+ return 1;
+ }
+
+ func = (void *)((unsigned long)(p + page_size - 1) & ~(page_size - 1));
+
+ ret = mprotect(func, page_size, PROT_READ | PROT_WRITE | PROT_EXEC);
+ if (ret) {
+ printf("mprotect() failed. %m");
+ goto out;
+ }
+
+ if (count < 100000)
+ count = 100000;
+ else if (count > 10000000)
+ count = 10000000;
+ while (count--) {
+ memcpy((void *)func, insn1, sizeof(insn1));
+ if (func() != 1) {
+ pr_debug("ERROR insn1\n");
+ ret = -1;
+ goto out;
+ }
+ memcpy((void *)func, insn2, sizeof(insn2));
+ if (func() != 2) {
+ pr_debug("ERROR insn2\n");
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ free(p);
+ return ret;
+}
+
+/* Another dummy workload to generate IBS samples. */
+static void dummy_workload_2(char *perf)
+{
+ char bench[] = " bench sched messaging -g 10 -l 5000 > /dev/null 2>&1";
+ char taskset[] = "taskset -c 0 ";
+ int ret __maybe_unused;
+ struct strbuf sb;
+ char *cmd;
+
+ strbuf_init(&sb, 0);
+ strbuf_add(&sb, taskset, strlen(taskset));
+ strbuf_add(&sb, perf, strlen(perf));
+ strbuf_add(&sb, bench, strlen(bench));
+ cmd = strbuf_detach(&sb, NULL);
+ ret = system(cmd);
+ free(cmd);
+}
+
+static int sched_affine(int cpu)
+{
+ cpu_set_t set;
+
+ CPU_ZERO(&set);
+ CPU_SET(cpu, &set);
+ if (sched_setaffinity(getpid(), sizeof(set), &set) == -1) {
+ pr_debug("sched_setaffinity() failed. [%m]");
+ return -1;
+ }
+ return 0;
+}
+
+static void
+copy_sample_data(void *src, unsigned long offset, void *dest, size_t size)
+{
+ size_t chunk1_size, chunk2_size;
+
+ if ((offset + size) < (size_t)PERF_MMAP_DATA_SIZE) {
+ memcpy(dest, src + offset, size);
+ } else {
+ chunk1_size = PERF_MMAP_DATA_SIZE - offset;
+ chunk2_size = size - chunk1_size;
+
+ memcpy(dest, src + offset, chunk1_size);
+ memcpy(dest + chunk1_size, src, chunk2_size);
+ }
+}
+
+static int rb_read(struct perf_event_mmap_page *rb, void *dest, size_t size)
+{
+ void *base;
+ unsigned long data_tail, data_head;
+
+ /* Casting to (void *) is needed. */
+ base = (void *)rb + page_size;
+
+ data_head = rb->data_head;
+ rmb();
+ data_tail = rb->data_tail;
+
+ if ((data_head - data_tail) < size)
+ return -1;
+
+ data_tail &= PERF_MMAP_DATA_MASK;
+ copy_sample_data(base, data_tail, dest, size);
+ rb->data_tail += size;
+ return 0;
+}
+
+static void rb_skip(struct perf_event_mmap_page *rb, size_t size)
+{
+ size_t data_head = rb->data_head;
+
+ rmb();
+
+ if ((rb->data_tail + size) > data_head)
+ rb->data_tail = data_head;
+ else
+ rb->data_tail += size;
+}
+
+/* Sample period value taken from perf sample must match with expected value. */
+static int period_equal(unsigned long exp_period, unsigned long act_period)
+{
+ return exp_period == act_period ? 0 : -1;
+}
+
+/*
+ * Sample period value taken from perf sample must be >= minimum sample period
+ * supported by IBS HW.
+ */
+static int period_higher(unsigned long min_period, unsigned long act_period)
+{
+ return min_period <= act_period ? 0 : -1;
+}
+
+static int rb_drain_samples(struct perf_event_mmap_page *rb,
+ unsigned long exp_period,
+ int *nr_samples,
+ int (*callback)(unsigned long, unsigned long))
+{
+ struct perf_event_header hdr;
+ unsigned long period;
+ int ret = 0;
+
+ /*
+ * PERF_RECORD_SAMPLE:
+ * struct {
+ * struct perf_event_header hdr;
+ * { u64 period; } && PERF_SAMPLE_PERIOD
+ * };
+ */
+ while (1) {
+ if (rb_read(rb, &hdr, sizeof(hdr)))
+ return ret;
+
+ if (hdr.type == PERF_RECORD_SAMPLE) {
+ (*nr_samples)++;
+ period = 0;
+ if (rb_read(rb, &period, sizeof(period)))
+ pr_debug("rb_read(period) error. [%m]");
+ ret |= callback(exp_period, period);
+ } else {
+ rb_skip(rb, hdr.size - sizeof(hdr));
+ }
+ }
+ return ret;
+}
+
+static long perf_event_open(struct perf_event_attr *attr, pid_t pid,
+ int cpu, int group_fd, unsigned long flags)
+{
+ return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+}
+
+static void fetch_prepare_attr(struct perf_event_attr *attr,
+ unsigned long long config, int freq,
+ unsigned long sample_period)
+{
+ memset(attr, 0, sizeof(struct perf_event_attr));
+
+ attr->type = fetch_pmu->type;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->config = config;
+ attr->disabled = 1;
+ attr->sample_type = PERF_SAMPLE_PERIOD;
+ attr->freq = freq;
+ attr->sample_period = sample_period; /* = ->sample_freq */
+}
+
+static void op_prepare_attr(struct perf_event_attr *attr,
+ unsigned long config, int freq,
+ unsigned long sample_period)
+{
+ memset(attr, 0, sizeof(struct perf_event_attr));
+
+ attr->type = op_pmu->type;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->config = config;
+ attr->disabled = 1;
+ attr->sample_type = PERF_SAMPLE_PERIOD;
+ attr->freq = freq;
+ attr->sample_period = sample_period; /* = ->sample_freq */
+}
+
+struct ibs_configs {
+ /* Input */
+ unsigned long config;
+
+ /* Expected output */
+ unsigned long period;
+ int fd;
+};
+
+/*
+ * Somehow first Fetch event with sample period = 0x10 causes 0
+ * samples. So start with large period and decrease it gradually.
+ */
+struct ibs_configs fetch_configs[] = {
+ { .config = 0xffff, .period = 0xffff0, .fd = FD_SUCCESS },
+ { .config = 0x1000, .period = 0x10000, .fd = FD_SUCCESS },
+ { .config = 0xff, .period = 0xff0, .fd = FD_SUCCESS },
+ { .config = 0x1, .period = 0x10, .fd = FD_SUCCESS },
+ { .config = 0x0, .period = -1, .fd = FD_ERROR },
+ { .config = 0x10000, .period = -1, .fd = FD_ERROR },
+};
+
+struct ibs_configs op_configs[] = {
+ { .config = 0x0, .period = -1, .fd = FD_ERROR },
+ { .config = 0x1, .period = -1, .fd = FD_ERROR },
+ { .config = 0x8, .period = -1, .fd = FD_ERROR },
+ { .config = 0x9, .period = 0x90, .fd = FD_SUCCESS },
+ { .config = 0xf, .period = 0xf0, .fd = FD_SUCCESS },
+ { .config = 0x1000, .period = 0x10000, .fd = FD_SUCCESS },
+ { .config = 0xffff, .period = 0xffff0, .fd = FD_SUCCESS },
+ { .config = 0x10000, .period = -1, .fd = FD_ERROR },
+ { .config = 0x100000, .period = 0x100000, .fd = FD_SUCCESS },
+ { .config = 0xf00000, .period = 0xf00000, .fd = FD_SUCCESS },
+ { .config = 0xf0ffff, .period = 0xfffff0, .fd = FD_SUCCESS },
+ { .config = 0x1f0ffff, .period = 0x1fffff0, .fd = FD_SUCCESS },
+ { .config = 0x7f0ffff, .period = 0x7fffff0, .fd = FD_SUCCESS },
+ { .config = 0x8f0ffff, .period = -1, .fd = FD_ERROR },
+ { .config = 0x17f0ffff, .period = -1, .fd = FD_ERROR },
+};
+
+static int __ibs_config_test(int ibs_type, struct ibs_configs *config, int *nr_samples)
+{
+ struct perf_event_attr attr;
+ int fd, i;
+ void *rb;
+ int ret = 0;
+
+ if (ibs_type == IBS_FETCH)
+ fetch_prepare_attr(&attr, config->config, 0, 0);
+ else
+ op_prepare_attr(&attr, config->config, 0, 0);
+
+ /* CPU0, All processes */
+ fd = perf_event_open(&attr, -1, 0, -1, 0);
+ if (config->fd == FD_ERROR) {
+ if (fd != -1) {
+ close(fd);
+ return -1;
+ }
+ return 0;
+ }
+ if (fd <= -1)
+ return -1;
+
+ rb = mmap(NULL, PERF_MMAP_TOTAL_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (rb == MAP_FAILED) {
+ pr_debug("mmap() failed. [%m]\n");
+ return -1;
+ }
+
+ ioctl(fd, PERF_EVENT_IOC_RESET, 0);
+ ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
+
+ i = 5;
+ while (i--) {
+ dummy_workload_1(1000000);
+
+ ret = rb_drain_samples(rb, config->period, nr_samples,
+ period_equal);
+ if (ret)
+ break;
+ }
+
+ ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
+ munmap(rb, PERF_MMAP_TOTAL_SIZE);
+ close(fd);
+ return ret;
+}
+
+static int ibs_config_test(void)
+{
+ int nr_samples = 0;
+ unsigned long i;
+ int ret = 0;
+ int r;
+
+ pr_debug("\nIBS config tests:\n");
+ pr_debug("-----------------\n");
+
+ pr_debug("Fetch PMU tests:\n");
+ for (i = 0; i < ARRAY_SIZE(fetch_configs); i++) {
+ nr_samples = 0;
+ r = __ibs_config_test(IBS_FETCH, &(fetch_configs[i]), &nr_samples);
+
+ if (fetch_configs[i].fd == FD_ERROR) {
+ pr_debug("0x%-16lx: %-4s\n", fetch_configs[i].config,
+ !r ? "Ok" : "Fail");
+ } else {
+ /*
+ * Although nr_samples == 0 is reported as Fail here,
+ * the failure status is not cascaded up because, we
+ * can not decide whether test really failed or not
+ * without actual samples.
+ */
+ pr_debug("0x%-16lx: %-4s (nr samples: %d)\n", fetch_configs[i].config,
+ (!r && nr_samples != 0) ? "Ok" : "Fail", nr_samples);
+ }
+
+ ret |= r;
+ }
+
+ pr_debug("Op PMU tests:\n");
+ for (i = 0; i < ARRAY_SIZE(op_configs); i++) {
+ nr_samples = 0;
+ r = __ibs_config_test(IBS_OP, &(op_configs[i]), &nr_samples);
+
+ if (op_configs[i].fd == FD_ERROR) {
+ pr_debug("0x%-16lx: %-4s\n", op_configs[i].config,
+ !r ? "Ok" : "Fail");
+ } else {
+ /*
+ * Although nr_samples == 0 is reported as Fail here,
+ * the failure status is not cascaded up because, we
+ * can not decide whether test really failed or not
+ * without actual samples.
+ */
+ pr_debug("0x%-16lx: %-4s (nr samples: %d)\n", op_configs[i].config,
+ (!r && nr_samples != 0) ? "Ok" : "Fail", nr_samples);
+ }
+
+ ret |= r;
+ }
+
+ return ret;
+}
+
+struct ibs_period {
+ /* Input */
+ int freq;
+ unsigned long sample_freq;
+
+ /* Output */
+ int ret;
+ unsigned long period;
+};
+
+struct ibs_period fetch_period[] = {
+ { .freq = 0, .sample_freq = 0, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 1, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 0xf, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 0x10, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 0, .sample_freq = 0x11, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 0, .sample_freq = 0x8f, .ret = FD_SUCCESS, .period = 0x80 },
+ { .freq = 0, .sample_freq = 0x90, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 0, .sample_freq = 0x91, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 0, .sample_freq = 0x4d2, .ret = FD_SUCCESS, .period = 0x4d0 },
+ { .freq = 0, .sample_freq = 0x1007, .ret = FD_SUCCESS, .period = 0x1000 },
+ { .freq = 0, .sample_freq = 0xfff0, .ret = FD_SUCCESS, .period = 0xfff0 },
+ { .freq = 0, .sample_freq = 0xffff, .ret = FD_SUCCESS, .period = 0xfff0 },
+ { .freq = 0, .sample_freq = 0x10010, .ret = FD_SUCCESS, .period = 0x10010 },
+ { .freq = 0, .sample_freq = 0x7fffff, .ret = FD_SUCCESS, .period = 0x7ffff0 },
+ { .freq = 0, .sample_freq = 0xfffffff, .ret = FD_SUCCESS, .period = 0xffffff0 },
+ { .freq = 1, .sample_freq = 0, .ret = FD_ERROR, .period = -1 },
+ { .freq = 1, .sample_freq = 1, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0xf, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0x10, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0x11, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0x8f, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0x90, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0x91, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0x4d2, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0x1007, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0xfff0, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0xffff, .ret = FD_SUCCESS, .period = 0x10 },
+ { .freq = 1, .sample_freq = 0x10010, .ret = FD_SUCCESS, .period = 0x10 },
+ /* ret=FD_ERROR because freq > default perf_event_max_sample_rate (100000) */
+ { .freq = 1, .sample_freq = 0x7fffff, .ret = FD_ERROR, .period = -1 },
+};
+
+struct ibs_period op_period[] = {
+ { .freq = 0, .sample_freq = 0, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 1, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 0xf, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 0x10, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 0x11, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 0x8f, .ret = FD_ERROR, .period = -1 },
+ { .freq = 0, .sample_freq = 0x90, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 0, .sample_freq = 0x91, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 0, .sample_freq = 0x4d2, .ret = FD_SUCCESS, .period = 0x4d0 },
+ { .freq = 0, .sample_freq = 0x1007, .ret = FD_SUCCESS, .period = 0x1000 },
+ { .freq = 0, .sample_freq = 0xfff0, .ret = FD_SUCCESS, .period = 0xfff0 },
+ { .freq = 0, .sample_freq = 0xffff, .ret = FD_SUCCESS, .period = 0xfff0 },
+ { .freq = 0, .sample_freq = 0x10010, .ret = FD_SUCCESS, .period = 0x10010 },
+ { .freq = 0, .sample_freq = 0x7fffff, .ret = FD_SUCCESS, .period = 0x7ffff0 },
+ { .freq = 0, .sample_freq = 0xfffffff, .ret = FD_SUCCESS, .period = 0xffffff0 },
+ { .freq = 1, .sample_freq = 0, .ret = FD_ERROR, .period = -1 },
+ { .freq = 1, .sample_freq = 1, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0xf, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0x10, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0x11, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0x8f, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0x90, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0x91, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0x4d2, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0x1007, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0xfff0, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0xffff, .ret = FD_SUCCESS, .period = 0x90 },
+ { .freq = 1, .sample_freq = 0x10010, .ret = FD_SUCCESS, .period = 0x90 },
+ /* ret=FD_ERROR because freq > default perf_event_max_sample_rate (100000) */
+ { .freq = 1, .sample_freq = 0x7fffff, .ret = FD_ERROR, .period = -1 },
+};
+
+static int __ibs_period_constraint_test(int ibs_type, struct ibs_period *period,
+ int *nr_samples)
+{
+ struct perf_event_attr attr;
+ int ret = 0;
+ void *rb;
+ int fd;
+
+ if (period->freq && period->sample_freq > perf_event_max_sample_rate)
+ period->ret = FD_ERROR;
+
+ if (ibs_type == IBS_FETCH)
+ fetch_prepare_attr(&attr, 0, period->freq, period->sample_freq);
+ else
+ op_prepare_attr(&attr, 0, period->freq, period->sample_freq);
+
+ /* CPU0, All processes */
+ fd = perf_event_open(&attr, -1, 0, -1, 0);
+ if (period->ret == FD_ERROR) {
+ if (fd != -1) {
+ close(fd);
+ return -1;
+ }
+ return 0;
+ }
+ if (fd <= -1)
+ return -1;
+
+ rb = mmap(NULL, PERF_MMAP_TOTAL_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (rb == MAP_FAILED) {
+ pr_debug("mmap() failed. [%m]\n");
+ close(fd);
+ return -1;
+ }
+
+ ioctl(fd, PERF_EVENT_IOC_RESET, 0);
+ ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
+
+ if (period->freq) {
+ dummy_workload_1(100000);
+ ret = rb_drain_samples(rb, period->period, nr_samples,
+ period_higher);
+ } else {
+ dummy_workload_1(period->sample_freq * 10);
+ ret = rb_drain_samples(rb, period->period, nr_samples,
+ period_equal);
+ }
+
+ ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
+ munmap(rb, PERF_MMAP_TOTAL_SIZE);
+ close(fd);
+ return ret;
+}
+
+static int ibs_period_constraint_test(void)
+{
+ unsigned long i;
+ int nr_samples;
+ int ret = 0;
+ int r;
+
+ pr_debug("\nIBS sample period constraint tests:\n");
+ pr_debug("-----------------------------------\n");
+
+ pr_debug("Fetch PMU test:\n");
+ for (i = 0; i < ARRAY_SIZE(fetch_period); i++) {
+ nr_samples = 0;
+ r = __ibs_period_constraint_test(IBS_FETCH, &fetch_period[i],
+ &nr_samples);
+
+ if (fetch_period[i].ret == FD_ERROR) {
+ pr_debug("freq %d, sample_freq %9ld: %-4s\n",
+ fetch_period[i].freq, fetch_period[i].sample_freq,
+ !r ? "Ok" : "Fail");
+ } else {
+ /*
+ * Although nr_samples == 0 is reported as Fail here,
+ * the failure status is not cascaded up because, we
+ * can not decide whether test really failed or not
+ * without actual samples.
+ */
+ pr_debug("freq %d, sample_freq %9ld: %-4s (nr samples: %d)\n",
+ fetch_period[i].freq, fetch_period[i].sample_freq,
+ (!r && nr_samples != 0) ? "Ok" : "Fail", nr_samples);
+ }
+ ret |= r;
+ }
+
+ pr_debug("Op PMU test:\n");
+ for (i = 0; i < ARRAY_SIZE(op_period); i++) {
+ nr_samples = 0;
+ r = __ibs_period_constraint_test(IBS_OP, &op_period[i],
+ &nr_samples);
+
+ if (op_period[i].ret == FD_ERROR) {
+ pr_debug("freq %d, sample_freq %9ld: %-4s\n",
+ op_period[i].freq, op_period[i].sample_freq,
+ !r ? "Ok" : "Fail");
+ } else {
+ /*
+ * Although nr_samples == 0 is reported as Fail here,
+ * the failure status is not cascaded up because, we
+ * can not decide whether test really failed or not
+ * without actual samples.
+ */
+ pr_debug("freq %d, sample_freq %9ld: %-4s (nr samples: %d)\n",
+ op_period[i].freq, op_period[i].sample_freq,
+ (!r && nr_samples != 0) ? "Ok" : "Fail", nr_samples);
+ }
+ ret |= r;
+ }
+
+ return ret;
+}
+
+struct ibs_ioctl {
+ /* Input */
+ int freq;
+ unsigned long period;
+
+ /* Expected output */
+ int ret;
+};
+
+struct ibs_ioctl fetch_ioctl[] = {
+ { .freq = 0, .period = 0x0, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x1, .ret = FD_ERROR },
+ { .freq = 0, .period = 0xf, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x10, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x11, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x1f, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x20, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x80, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x8f, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x90, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x91, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x100, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0xfff0, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0xffff, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x10000, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x1fff0, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x1fff5, .ret = FD_ERROR },
+ { .freq = 1, .period = 0x0, .ret = FD_ERROR },
+ { .freq = 1, .period = 0x1, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0xf, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x10, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x11, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x1f, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x20, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x80, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x8f, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x90, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x91, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x100, .ret = FD_SUCCESS },
+};
+
+struct ibs_ioctl op_ioctl[] = {
+ { .freq = 0, .period = 0x0, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x1, .ret = FD_ERROR },
+ { .freq = 0, .period = 0xf, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x10, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x11, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x1f, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x20, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x80, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x8f, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x90, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x91, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x100, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0xfff0, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0xffff, .ret = FD_ERROR },
+ { .freq = 0, .period = 0x10000, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x1fff0, .ret = FD_SUCCESS },
+ { .freq = 0, .period = 0x1fff5, .ret = FD_ERROR },
+ { .freq = 1, .period = 0x0, .ret = FD_ERROR },
+ { .freq = 1, .period = 0x1, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0xf, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x10, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x11, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x1f, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x20, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x80, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x8f, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x90, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x91, .ret = FD_SUCCESS },
+ { .freq = 1, .period = 0x100, .ret = FD_SUCCESS },
+};
+
+static int __ibs_ioctl_test(int ibs_type, struct ibs_ioctl *ibs_ioctl)
+{
+ struct perf_event_attr attr;
+ int ret = 0;
+ int fd;
+ int r;
+
+ if (ibs_type == IBS_FETCH)
+ fetch_prepare_attr(&attr, 0, ibs_ioctl->freq, 1000);
+ else
+ op_prepare_attr(&attr, 0, ibs_ioctl->freq, 1000);
+
+ /* CPU0, All processes */
+ fd = perf_event_open(&attr, -1, 0, -1, 0);
+ if (fd <= -1) {
+ pr_debug("event_open() Failed\n");
+ return -1;
+ }
+
+ r = ioctl(fd, PERF_EVENT_IOC_PERIOD, &ibs_ioctl->period);
+ if ((ibs_ioctl->ret == FD_SUCCESS && r <= -1) ||
+ (ibs_ioctl->ret == FD_ERROR && r >= 0)) {
+ ret = -1;
+ }
+
+ close(fd);
+ return ret;
+}
+
+static int ibs_ioctl_test(void)
+{
+ unsigned long i;
+ int ret = 0;
+ int r;
+
+ pr_debug("\nIBS ioctl() tests:\n");
+ pr_debug("------------------\n");
+
+ pr_debug("Fetch PMU tests\n");
+ for (i = 0; i < ARRAY_SIZE(fetch_ioctl); i++) {
+ r = __ibs_ioctl_test(IBS_FETCH, &fetch_ioctl[i]);
+
+ pr_debug("ioctl(%s = 0x%-7lx): %s\n",
+ fetch_ioctl[i].freq ? "freq " : "period",
+ fetch_ioctl[i].period, r ? "Fail" : "Ok");
+ ret |= r;
+ }
+
+ pr_debug("Op PMU tests\n");
+ for (i = 0; i < ARRAY_SIZE(op_ioctl); i++) {
+ r = __ibs_ioctl_test(IBS_OP, &op_ioctl[i]);
+
+ pr_debug("ioctl(%s = 0x%-7lx): %s\n",
+ op_ioctl[i].freq ? "freq " : "period",
+ op_ioctl[i].period, r ? "Fail" : "Ok");
+ ret |= r;
+ }
+
+ return ret;
+}
+
+static int ibs_freq_neg_test(void)
+{
+ struct perf_event_attr attr;
+ int fd;
+
+ pr_debug("\nIBS freq (negative) tests:\n");
+ pr_debug("--------------------------\n");
+
+ /*
+ * Assuming perf_event_max_sample_rate <= 100000,
+ * config: 0x300D40 ==> MaxCnt: 200000
+ */
+ op_prepare_attr(&attr, 0x300D40, 1, 0);
+
+ /* CPU0, All processes */
+ fd = perf_event_open(&attr, -1, 0, -1, 0);
+ if (fd != -1) {
+ pr_debug("freq 1, sample_freq 200000: Fail\n");
+ close(fd);
+ return -1;
+ }
+
+ pr_debug("freq 1, sample_freq 200000: Ok\n");
+
+ return 0;
+}
+
+struct ibs_l3missonly {
+ /* Input */
+ int freq;
+ unsigned long sample_freq;
+
+ /* Expected output */
+ int ret;
+ unsigned long min_period;
+};
+
+struct ibs_l3missonly fetch_l3missonly = {
+ .freq = 1,
+ .sample_freq = 10000,
+ .ret = FD_SUCCESS,
+ .min_period = 0x10,
+};
+
+struct ibs_l3missonly op_l3missonly = {
+ .freq = 1,
+ .sample_freq = 10000,
+ .ret = FD_SUCCESS,
+ .min_period = 0x90,
+};
+
+static int __ibs_l3missonly_test(char *perf, int ibs_type, int *nr_samples,
+ struct ibs_l3missonly *l3missonly)
+{
+ struct perf_event_attr attr;
+ int ret = 0;
+ void *rb;
+ int fd;
+
+ if (l3missonly->sample_freq > perf_event_max_sample_rate)
+ l3missonly->ret = FD_ERROR;
+
+ if (ibs_type == IBS_FETCH) {
+ fetch_prepare_attr(&attr, 0x800000000000000UL, l3missonly->freq,
+ l3missonly->sample_freq);
+ } else {
+ op_prepare_attr(&attr, 0x10000, l3missonly->freq,
+ l3missonly->sample_freq);
+ }
+
+ /* CPU0, All processes */
+ fd = perf_event_open(&attr, -1, 0, -1, 0);
+ if (l3missonly->ret == FD_ERROR) {
+ if (fd != -1) {
+ close(fd);
+ return -1;
+ }
+ return 0;
+ }
+ if (fd == -1) {
+ pr_debug("perf_event_open() failed. [%m]\n");
+ return -1;
+ }
+
+ rb = mmap(NULL, PERF_MMAP_TOTAL_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (rb == MAP_FAILED) {
+ pr_debug("mmap() failed. [%m]\n");
+ close(fd);
+ return -1;
+ }
+
+ ioctl(fd, PERF_EVENT_IOC_RESET, 0);
+ ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
+
+ dummy_workload_2(perf);
+
+ ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
+
+ ret = rb_drain_samples(rb, l3missonly->min_period, nr_samples, period_higher);
+
+ munmap(rb, PERF_MMAP_TOTAL_SIZE);
+ close(fd);
+ return ret;
+}
+
+static int ibs_l3missonly_test(char *perf)
+{
+ int nr_samples = 0;
+ int ret = 0;
+ int r = 0;
+
+ pr_debug("\nIBS L3MissOnly test: (takes a while)\n");
+ pr_debug("--------------------\n");
+
+ if (perf_pmu__has_format(fetch_pmu, "l3missonly")) {
+ nr_samples = 0;
+ r = __ibs_l3missonly_test(perf, IBS_FETCH, &nr_samples, &fetch_l3missonly);
+ if (fetch_l3missonly.ret == FD_ERROR) {
+ pr_debug("Fetch L3MissOnly: %-4s\n", !r ? "Ok" : "Fail");
+ } else {
+ /*
+ * Although nr_samples == 0 is reported as Fail here,
+ * the failure status is not cascaded up because, we
+ * can not decide whether test really failed or not
+ * without actual samples.
+ */
+ pr_debug("Fetch L3MissOnly: %-4s (nr_samples: %d)\n",
+ (!r && nr_samples != 0) ? "Ok" : "Fail", nr_samples);
+ }
+ ret |= r;
+ }
+
+ if (perf_pmu__has_format(op_pmu, "l3missonly")) {
+ nr_samples = 0;
+ r = __ibs_l3missonly_test(perf, IBS_OP, &nr_samples, &op_l3missonly);
+ if (op_l3missonly.ret == FD_ERROR) {
+ pr_debug("Op L3MissOnly: %-4s\n", !r ? "Ok" : "Fail");
+ } else {
+ /*
+ * Although nr_samples == 0 is reported as Fail here,
+ * the failure status is not cascaded up because, we
+ * can not decide whether test really failed or not
+ * without actual samples.
+ */
+ pr_debug("Op L3MissOnly: %-4s (nr_samples: %d)\n",
+ (!r && nr_samples != 0) ? "Ok" : "Fail", nr_samples);
+ }
+ ret |= r;
+ }
+
+ return ret;
+}
+
+static unsigned int get_perf_event_max_sample_rate(void)
+{
+ unsigned int max_sample_rate = 100000;
+ FILE *fp;
+ int ret;
+
+ fp = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
+ if (!fp) {
+ pr_debug("Can't open perf_event_max_sample_rate. Assuming %d\n",
+ max_sample_rate);
+ goto out;
+ }
+
+ ret = fscanf(fp, "%d", &max_sample_rate);
+ if (ret == EOF) {
+ pr_debug("Can't read perf_event_max_sample_rate. Assuming 100000\n");
+ max_sample_rate = 100000;
+ }
+ fclose(fp);
+
+out:
+ return max_sample_rate;
+}
+
+/*
+ * Bunch of IBS sample period fixes that this test exercise went in v6.15.
+ * Skip the test on older kernels to distinguish between test failure due
+ * to a new bug vs known failure due to older kernel.
+ */
+static bool kernel_v6_15_or_newer(void)
+{
+ struct utsname utsname;
+ char *endptr = NULL;
+ long major, minor;
+
+ if (uname(&utsname) < 0) {
+ pr_debug("uname() failed. [%m]");
+ return false;
+ }
+
+ major = strtol(utsname.release, &endptr, 10);
+ endptr++;
+ minor = strtol(endptr, NULL, 10);
+
+ return major >= 6 && minor >= 15;
+}
+
+int test__amd_ibs_period(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ char perf[PATH_MAX] = {'\0'};
+ int ret = TEST_OK;
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ /*
+ * Reading perf_event_max_sample_rate only once _might_ cause some
+ * of the test to fail if kernel changes it after reading it here.
+ */
+ perf_event_max_sample_rate = get_perf_event_max_sample_rate();
+ fetch_pmu = perf_pmus__find("ibs_fetch");
+ op_pmu = perf_pmus__find("ibs_op");
+
+ if (!x86__is_amd_cpu() || !fetch_pmu || !op_pmu)
+ return TEST_SKIP;
+
+ if (!kernel_v6_15_or_newer()) {
+ pr_debug("Need v6.15 or newer kernel. Skipping.\n");
+ return TEST_SKIP;
+ }
+
+ perf_exe(perf, sizeof(perf));
+
+ if (sched_affine(0))
+ return TEST_FAIL;
+
+ /*
+ * Perf event can be opened in two modes:
+ * 1 Freq mode
+ * perf_event_attr->freq = 1, ->sample_freq = <frequency>
+ * 2 Sample period mode
+ * perf_event_attr->freq = 0, ->sample_period = <period>
+ *
+ * Instead of using above interface, IBS event in 'sample period mode'
+ * can also be opened by passing <period> value directly in a MaxCnt
+ * bitfields of perf_event_attr->config. Test this IBS specific special
+ * interface.
+ */
+ if (ibs_config_test())
+ ret = TEST_FAIL;
+
+ /*
+ * IBS Fetch and Op PMUs have HW constraints on minimum sample period.
+ * Also, sample period value must be in multiple of 0x10. Test that IBS
+ * driver honors HW constraints for various possible values in Freq as
+ * well as Sample Period mode IBS events.
+ */
+ if (ibs_period_constraint_test())
+ ret = TEST_FAIL;
+
+ /*
+ * Test ioctl() with various sample period values for IBS event.
+ */
+ if (ibs_ioctl_test())
+ ret = TEST_FAIL;
+
+ /*
+ * Test that opening of freq mode IBS event fails when the freq value
+ * is passed through ->config, not explicitly in ->sample_freq. Also
+ * use high freq value (beyond perf_event_max_sample_rate) to test IBS
+ * driver do not bypass perf_event_max_sample_rate checks.
+ */
+ if (ibs_freq_neg_test())
+ ret = TEST_FAIL;
+
+ /*
+ * L3MissOnly is a post-processing filter, i.e. IBS HW checks for L3
+ * Miss at the completion of the tagged uOp. The sample is discarded
+ * if the tagged uOp did not cause L3Miss. Also, IBS HW internally
+ * resets CurCnt to a small pseudo-random value and resumes counting.
+ * A new uOp is tagged once CurCnt reaches to MaxCnt. But the process
+ * repeats until the tagged uOp causes an L3 Miss.
+ *
+ * With the freq mode event, the next sample period is calculated by
+ * generic kernel on every sample to achieve desired freq of samples.
+ *
+ * Since the number of times HW internally reset CurCnt and the pseudo-
+ * random value of CurCnt for all those occurrences are not known to SW,
+ * the sample period adjustment by kernel goes for a toes for freq mode
+ * IBS events. Kernel will set very small period for the next sample if
+ * the window between current sample and prev sample is too high due to
+ * multiple samples being discarded internally by IBS HW.
+ *
+ * Test that IBS sample period constraints are honored when L3MissOnly
+ * is ON.
+ */
+ if (ibs_l3missonly_test(perf))
+ ret = TEST_FAIL;
+
+ return ret;
+}
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index a216a5d172ed..c3e1619c5e79 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -3,7 +3,6 @@
#include "tests/tests.h"
#include "arch-tests.h"
-#ifdef HAVE_AUXTRACE_SUPPORT
#ifdef HAVE_EXTRA_TESTS
DEFINE_SUITE("x86 instruction decoder - new instructions", insn_x86);
#endif
@@ -19,12 +18,11 @@ struct test_suite suite__intel_pt = {
.test_cases = intel_pt_tests,
};
-#endif
#if defined(__x86_64__)
DEFINE_SUITE("x86 bp modify", bp_modify);
#endif
-DEFINE_SUITE("x86 Sample parsing", x86_sample_parsing);
DEFINE_SUITE("AMD IBS via core pmu", amd_ibs_via_core_pmu);
+DEFINE_SUITE_EXCLUSIVE("AMD IBS sample period", amd_ibs_period);
static struct test_case hybrid_tests[] = {
TEST_CASE_REASON("x86 hybrid event parsing", hybrid, "not hybrid"),
{ .name = NULL, }
@@ -39,17 +37,16 @@ struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
&suite__dwarf_unwind,
#endif
-#ifdef HAVE_AUXTRACE_SUPPORT
#ifdef HAVE_EXTRA_TESTS
&suite__insn_x86,
#endif
&suite__intel_pt,
-#endif
#if defined(__x86_64__)
&suite__bp_modify,
#endif
- &suite__x86_sample_parsing,
&suite__amd_ibs_via_core_pmu,
+ &suite__amd_ibs_period,
&suite__hybrid,
+ &suite__x86_topdown,
NULL,
};
diff --git a/tools/perf/arch/x86/tests/intel-pt-test.c b/tools/perf/arch/x86/tests/intel-pt-test.c
index b217ed67cd4e..970997759ec2 100644
--- a/tools/perf/arch/x86/tests/intel-pt-test.c
+++ b/tools/perf/arch/x86/tests/intel-pt-test.c
@@ -3,7 +3,6 @@
#include <linux/compiler.h>
#include <linux/bits.h>
#include <string.h>
-#include <cpuid.h>
#include <sched.h>
#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
@@ -11,6 +10,7 @@
#include "debug.h"
#include "tests/tests.h"
#include "arch-tests.h"
+#include "../util/cpuid.h"
#include "cpumap.h"
/**
@@ -363,7 +363,7 @@ static int get_pt_caps(int cpu, struct pt_caps *caps)
memset(caps, 0, sizeof(*caps));
for (i = 0; i < INTEL_PT_SUBLEAF_CNT; i++) {
- __get_cpuid_count(20, i, &r.eax, &r.ebx, &r.ecx, &r.edx);
+ cpuid(20, i, &r.eax, &r.ebx, &r.ecx, &r.edx);
pr_debug("CPU %d CPUID leaf 20 subleaf %d\n", cpu, i);
pr_debug("eax = 0x%08x\n", r.eax);
pr_debug("ebx = 0x%08x\n", r.ebx);
@@ -380,7 +380,7 @@ static bool is_hybrid(void)
unsigned int eax, ebx, ecx, edx = 0;
bool result;
- __get_cpuid_count(7, 0, &eax, &ebx, &ecx, &edx);
+ cpuid(7, 0, &eax, &ebx, &ecx, &edx);
result = edx & BIT(15);
pr_debug("Is %shybrid : CPUID leaf 7 subleaf 0 edx %#x (bit-15 indicates hybrid)\n",
result ? "" : "not ", edx);
diff --git a/tools/perf/arch/x86/tests/sample-parsing.c b/tools/perf/arch/x86/tests/sample-parsing.c
deleted file mode 100644
index a061e8619267..000000000000
--- a/tools/perf/arch/x86/tests/sample-parsing.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <stdbool.h>
-#include <inttypes.h>
-#include <stdlib.h>
-#include <string.h>
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "event.h"
-#include "evsel.h"
-#include "debug.h"
-#include "util/sample.h"
-#include "util/synthetic-events.h"
-
-#include "tests/tests.h"
-#include "arch-tests.h"
-
-#define COMP(m) do { \
- if (s1->m != s2->m) { \
- pr_debug("Samples differ at '"#m"'\n"); \
- return false; \
- } \
-} while (0)
-
-static bool samples_same(const struct perf_sample *s1,
- const struct perf_sample *s2,
- u64 type)
-{
- if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
- COMP(ins_lat);
- COMP(retire_lat);
- }
-
- return true;
-}
-
-static int do_test(u64 sample_type)
-{
- struct evsel evsel = {
- .needs_swap = false,
- .core = {
- . attr = {
- .sample_type = sample_type,
- .read_format = 0,
- },
- },
- };
- union perf_event *event;
- struct perf_sample sample = {
- .weight = 101,
- .ins_lat = 102,
- .retire_lat = 103,
- };
- struct perf_sample sample_out;
- size_t i, sz, bufsz;
- int err, ret = -1;
-
- sz = perf_event__sample_event_size(&sample, sample_type, 0);
- bufsz = sz + 4096; /* Add a bit for overrun checking */
- event = malloc(bufsz);
- if (!event) {
- pr_debug("malloc failed\n");
- return -1;
- }
-
- memset(event, 0xff, bufsz);
- event->header.type = PERF_RECORD_SAMPLE;
- event->header.misc = 0;
- event->header.size = sz;
-
- err = perf_event__synthesize_sample(event, sample_type, 0, &sample);
- if (err) {
- pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
- "perf_event__synthesize_sample", sample_type, err);
- goto out_free;
- }
-
- /* The data does not contain 0xff so we use that to check the size */
- for (i = bufsz; i > 0; i--) {
- if (*(i - 1 + (u8 *)event) != 0xff)
- break;
- }
- if (i != sz) {
- pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
- i, sz);
- goto out_free;
- }
-
- evsel.sample_size = __evsel__sample_size(sample_type);
-
- err = evsel__parse_sample(&evsel, event, &sample_out);
- if (err) {
- pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
- "evsel__parse_sample", sample_type, err);
- goto out_free;
- }
-
- if (!samples_same(&sample, &sample_out, sample_type)) {
- pr_debug("parsing failed for sample_type %#"PRIx64"\n",
- sample_type);
- goto out_free;
- }
-
- ret = 0;
-out_free:
- free(event);
-
- return ret;
-}
-
-/**
- * test__x86_sample_parsing - test X86 specific sample parsing
- *
- * This function implements a test that synthesizes a sample event, parses it
- * and then checks that the parsed sample matches the original sample. If the
- * test passes %0 is returned, otherwise %-1 is returned.
- *
- * For now, the PERF_SAMPLE_WEIGHT_STRUCT is the only X86 specific sample type.
- * The test only checks the PERF_SAMPLE_WEIGHT_STRUCT type.
- */
-int test__x86_sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
-{
- return do_test(PERF_SAMPLE_WEIGHT_STRUCT);
-}
diff --git a/tools/perf/arch/x86/tests/topdown.c b/tools/perf/arch/x86/tests/topdown.c
new file mode 100644
index 000000000000..3ee4e5e71be3
--- /dev/null
+++ b/tools/perf/arch/x86/tests/topdown.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include "arch-tests.h"
+#include "../util/topdown.h"
+#include "debug.h"
+#include "evlist.h"
+#include "parse-events.h"
+#include "pmu.h"
+#include "pmus.h"
+
+static int event_cb(void *state, struct pmu_event_info *info)
+{
+ char buf[256];
+ struct parse_events_error parse_err;
+ int *ret = state, err;
+ struct evlist *evlist = evlist__new();
+ struct evsel *evsel;
+
+ if (!evlist)
+ return -ENOMEM;
+
+ parse_events_error__init(&parse_err);
+ snprintf(buf, sizeof(buf), "%s/%s/", info->pmu->name, info->name);
+ err = parse_events(evlist, buf, &parse_err);
+ if (err) {
+ parse_events_error__print(&parse_err, buf);
+ *ret = TEST_FAIL;
+ }
+ parse_events_error__exit(&parse_err);
+ evlist__for_each_entry(evlist, evsel) {
+ bool fail = false;
+ bool p_core_pmu = evsel->pmu->type == PERF_TYPE_RAW;
+ const char *name = evsel__name(evsel);
+
+ if (strcasestr(name, "uops_retired.slots") ||
+ strcasestr(name, "topdown.backend_bound_slots") ||
+ strcasestr(name, "topdown.br_mispredict_slots") ||
+ strcasestr(name, "topdown.memory_bound_slots") ||
+ strcasestr(name, "topdown.bad_spec_slots") ||
+ strcasestr(name, "topdown.slots_p")) {
+ if (arch_is_topdown_slots(evsel) || arch_is_topdown_metrics(evsel))
+ fail = true;
+ } else if (strcasestr(name, "slots")) {
+ if (arch_is_topdown_slots(evsel) != p_core_pmu ||
+ arch_is_topdown_metrics(evsel))
+ fail = true;
+ } else if (strcasestr(name, "topdown")) {
+ if (arch_is_topdown_slots(evsel) ||
+ arch_is_topdown_metrics(evsel) != p_core_pmu)
+ fail = true;
+ } else if (arch_is_topdown_slots(evsel) || arch_is_topdown_metrics(evsel)) {
+ fail = true;
+ }
+ if (fail) {
+ pr_debug("Broken topdown information for '%s'\n", evsel__name(evsel));
+ *ret = TEST_FAIL;
+ }
+ }
+ evlist__delete(evlist);
+ return 0;
+}
+
+static int test__x86_topdown(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int ret = TEST_OK;
+ struct perf_pmu *pmu = NULL;
+
+ if (!topdown_sys_has_perf_metrics())
+ return TEST_OK;
+
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ if (perf_pmu__for_each_event(pmu, /*skip_duplicate_pmus=*/false, &ret, event_cb))
+ break;
+ }
+ return ret;
+}
+
+DEFINE_SUITE("x86 topdown", x86_topdown);
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 06d7c0205b3d..c0dc5965f362 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -14,7 +14,7 @@ perf-util-y += iostat.o
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-util-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-util-y += auxtrace.o
perf-util-y += archinsn.o
-perf-util-$(CONFIG_AUXTRACE) += intel-pt.o
-perf-util-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-util-y += intel-pt.o
+perf-util-y += intel-bts.o
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
index a0400707180c..3cd384317739 100644
--- a/tools/perf/arch/x86/util/event.c
+++ b/tools/perf/arch/x86/util/event.c
@@ -91,49 +91,3 @@ int perf_event__synthesize_extra_kmaps(const struct perf_tool *tool,
}
#endif
-
-void arch_perf_parse_sample_weight(struct perf_sample *data,
- const __u64 *array, u64 type)
-{
- union perf_sample_weight weight;
-
- weight.full = *array;
- if (type & PERF_SAMPLE_WEIGHT)
- data->weight = weight.full;
- else {
- data->weight = weight.var1_dw;
- data->ins_lat = weight.var2_w;
- data->retire_lat = weight.var3_w;
- }
-}
-
-void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
- __u64 *array, u64 type)
-{
- *array = data->weight;
-
- if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
- *array &= 0xffffffff;
- *array |= ((u64)data->ins_lat << 32);
- *array |= ((u64)data->retire_lat << 48);
- }
-}
-
-const char *arch_perf_header_entry(const char *se_header)
-{
- if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
- return "Local Retire Latency";
- else if (!strcmp(se_header, "Pipeline Stage Cycle"))
- return "Retire Latency";
-
- return se_header;
-}
-
-int arch_support_sort_key(const char *sort_key)
-{
- if (!strcmp(sort_key, "p_stage_cyc"))
- return 1;
- if (!strcmp(sort_key, "local_p_stage_cyc"))
- return 1;
- return 0;
-}
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
index 1969758cc8c1..75e9d00a1494 100644
--- a/tools/perf/arch/x86/util/evlist.c
+++ b/tools/perf/arch/x86/util/evlist.c
@@ -81,3 +81,27 @@ int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
/* Default ordering by insertion index. */
return lhs->core.idx - rhs->core.idx;
}
+
+int arch_evlist__add_required_events(struct list_head *list)
+{
+ struct evsel *pos, *metric_event = NULL;
+ int idx = 0;
+
+ if (!topdown_sys_has_perf_metrics())
+ return 0;
+
+ list_for_each_entry(pos, list, core.node) {
+ if (arch_is_topdown_slots(pos)) {
+ /* Slots event already present, nothing to do. */
+ return 0;
+ }
+ if (metric_event == NULL && arch_is_topdown_metrics(pos))
+ metric_event = pos;
+ idx++;
+ }
+ if (metric_event == NULL) {
+ /* No topdown metric events, nothing to do. */
+ return 0;
+ }
+ return topdown_insert_slots_event(list, idx + 1, metric_event);
+}
diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
index 3dd29ba2c23b..23a8e662a912 100644
--- a/tools/perf/arch/x86/util/evsel.c
+++ b/tools/perf/arch/x86/util/evsel.c
@@ -1,10 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
+#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/evsel_config.h"
#include "util/env.h"
#include "util/pmu.h"
#include "util/pmus.h"
+#include "util/stat.h"
+#include "util/strbuf.h"
#include "linux/string.h"
#include "topdown.h"
#include "evsel.h"
@@ -23,47 +28,25 @@ void arch_evsel__set_sample_weight(struct evsel *evsel)
bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
{
struct perf_pmu *pmu;
- u32 type = evsel->core.attr.type;
- /*
- * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU
- * on a non-hybrid machine, "cpu_core" PMU on a hybrid machine.
- * The slots event is only available for the core PMU, which
- * supports the perf metrics feature.
- * Checking both the PERF_TYPE_RAW type and the slots event
- * should be good enough to detect the perf metrics feature.
- */
-again:
- switch (type) {
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- type = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
- if (type)
- goto again;
- break;
- case PERF_TYPE_RAW:
- break;
- default:
+ if (!topdown_sys_has_perf_metrics())
return false;
- }
- pmu = evsel->pmu;
- if (pmu && perf_pmu__is_fake(pmu))
- pmu = NULL;
-
- if (!pmu) {
- while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
- if (pmu->type == PERF_TYPE_RAW)
- break;
- }
- }
- return pmu && perf_pmu__have_event(pmu, "slots");
+ /*
+ * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU on a
+ * non-hybrid machine, "cpu_core" PMU on a hybrid machine. The
+ * topdown_sys_has_perf_metrics checks the slots event is only available
+ * for the core PMU, which supports the perf metrics feature. Checking
+ * both the PERF_TYPE_RAW type and the slots event should be good enough
+ * to detect the perf metrics feature.
+ */
+ pmu = evsel__find_pmu(evsel);
+ return pmu && pmu->type == PERF_TYPE_RAW;
}
bool arch_evsel__must_be_in_group(const struct evsel *evsel)
{
- if (!evsel__sys_has_perf_metrics(evsel) || !evsel->name ||
- strcasestr(evsel->name, "uops_retired.slots"))
+ if (!evsel__sys_has_perf_metrics(evsel))
return false;
return arch_is_topdown_metrics(evsel) || arch_is_topdown_slots(evsel);
@@ -89,6 +72,57 @@ int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
event_name);
}
+void arch_evsel__apply_ratio_to_prev(struct evsel *evsel,
+ struct perf_event_attr *attr)
+{
+ struct perf_event_attr *prev_attr = NULL;
+ struct evsel *evsel_prev = NULL;
+ const char *name = "acr_mask";
+ int evsel_idx = 0;
+ __u64 ev_mask, pr_ev_mask;
+
+ if (!perf_pmu__has_format(evsel->pmu, name)) {
+ pr_err("'%s' does not have acr_mask format support\n", evsel->pmu->name);
+ return;
+ }
+ if (perf_pmu__format_type(evsel->pmu, name) !=
+ PERF_PMU_FORMAT_VALUE_CONFIG2) {
+ pr_err("'%s' does not have config2 format support\n", evsel->pmu->name);
+ return;
+ }
+
+ evsel_prev = evsel__prev(evsel);
+ if (!evsel_prev) {
+ pr_err("Previous event does not exist.\n");
+ return;
+ }
+
+ prev_attr = &evsel_prev->core.attr;
+
+ if (prev_attr->config2) {
+ pr_err("'%s' has set config2 (acr_mask?) already, configuration not supported\n", evsel_prev->name);
+ return;
+ }
+
+ /*
+ * acr_mask (config2) is calculated using the event's index in
+ * the event group. The first event will use the index of the
+ * second event as its mask (e.g., 0x2), indicating that the
+ * second event counter will be reset and a sample taken for
+ * the first event if its counter overflows. The second event
+ * will use the mask consisting of the first and second bits
+ * (e.g., 0x3), meaning both counters will be reset if the
+ * second event counter overflows.
+ */
+
+ evsel_idx = evsel__group_idx(evsel);
+ ev_mask = 1ull << evsel_idx;
+ pr_ev_mask = 1ull << (evsel_idx - 1);
+
+ prev_attr->config2 = ev_mask;
+ attr->config2 = ev_mask | pr_ev_mask;
+}
+
static void ibs_l3miss_warn(void)
{
pr_warning(
@@ -124,13 +158,15 @@ void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
}
}
-int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
+static int amd_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
{
- if (!x86__is_amd_cpu())
+ struct perf_pmu *pmu;
+
+ if (evsel->core.attr.precise_ip == 0)
return 0;
- if (!evsel->core.attr.precise_ip &&
- !(evsel->pmu && !strncmp(evsel->pmu->name, "ibs", 3)))
+ pmu = evsel__find_pmu(evsel);
+ if (!pmu || strncmp(pmu->name, "ibs", 3))
return 0;
/* More verbose IBS errors. */
@@ -140,6 +176,54 @@ int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
"again without the privilege modifiers (like 'k') at the end.");
}
+ return 0;
+}
+
+static int intel_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret;
+ if (err != EINVAL)
+ return 0;
+
+ if (!topdown_sys_has_perf_metrics())
+ return 0;
+
+ if (arch_is_topdown_slots(evsel)) {
+ if (!evsel__is_group_leader(evsel)) {
+ evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
+ evlist__format_evsels(evsel->evlist, &sb, 2048);
+ ret = scnprintf(msg, size, "Topdown slots event can only be group leader "
+ "in '%s'.", sb.buf);
+ strbuf_release(&sb);
+ return ret;
+ }
+ } else if (arch_is_topdown_metrics(evsel)) {
+ struct evsel *pos;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ if (pos == evsel || !arch_is_topdown_metrics(pos))
+ continue;
+
+ if (pos->core.attr.config != evsel->core.attr.config)
+ continue;
+
+ evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
+ evlist__format_evsels(evsel->evlist, &sb, 2048);
+ ret = scnprintf(msg, size, "Perf metric event '%s' is duplicated "
+ "in the same group (only one event is allowed) in '%s'.",
+ evsel__name(evsel), sb.buf);
+ strbuf_release(&sb);
+ return ret;
+ }
+ }
return 0;
}
+
+int arch_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
+{
+ return x86__is_amd_cpu()
+ ? amd_evsel__open_strerror(evsel, msg, size)
+ : intel_evsel__open_strerror(evsel, err, msg, size);
+}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 8f235d8b67b6..b394ad9cc635 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -12,13 +12,13 @@
#include <linux/log2.h>
#include <linux/zalloc.h>
#include <linux/err.h>
-#include <cpuid.h>
#include "../../../util/session.h"
#include "../../../util/event.h"
#include "../../../util/evlist.h"
#include "../../../util/evsel.h"
#include "../../../util/evsel_config.h"
+#include "../../../util/config.h"
#include "../../../util/cpumap.h"
#include "../../../util/mmap.h"
#include <subcmd/parse-options.h>
@@ -33,6 +33,7 @@
#include <internal/lib.h> // page_size
#include "../../../util/intel-pt.h"
#include <api/fs/fs.h>
+#include "cpuid.h"
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -52,6 +53,7 @@ struct intel_pt_recording {
struct perf_pmu *intel_pt_pmu;
int have_sched_switch;
struct evlist *evlist;
+ bool all_switch_events;
bool snapshot_mode;
bool snapshot_init_done;
size_t snapshot_size;
@@ -70,7 +72,7 @@ static int intel_pt_parse_terms_with_default(const struct perf_pmu *pmu,
int err;
parse_events_terms__init(&terms);
- err = parse_events_terms(&terms, str, /*input=*/ NULL);
+ err = parse_events_terms(&terms, str);
if (err)
goto out_free;
@@ -309,7 +311,7 @@ static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
{
unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
- __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
+ cpuid(0x15, 0, &eax, &ebx, &ecx, &edx);
*n = ebx;
*d = eax;
}
@@ -794,7 +796,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
bool cpu_wide = !target__none(&opts->target) &&
!target__has_task(&opts->target);
- if (!cpu_wide && perf_can_record_cpu_wide()) {
+ if (ptr->all_switch_events && !cpu_wide && perf_can_record_cpu_wide()) {
struct evsel *switch_evsel;
switch_evsel = evlist__add_dummy_on_all_cpus(evlist);
@@ -1178,6 +1180,16 @@ static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
return rdtsc();
}
+static int intel_pt_perf_config(const char *var, const char *value, void *data)
+{
+ struct intel_pt_recording *ptr = data;
+
+ if (!strcmp(var, "intel-pt.all-switch-events"))
+ ptr->all_switch_events = perf_config_bool(var, value);
+
+ return 0;
+}
+
struct auxtrace_record *intel_pt_recording_init(int *err)
{
struct perf_pmu *intel_pt_pmu = perf_pmus__find(INTEL_PT_PMU_NAME);
@@ -1197,6 +1209,8 @@ struct auxtrace_record *intel_pt_recording_init(int *err)
return NULL;
}
+ perf_config(intel_pt_perf_config, ptr);
+
ptr->intel_pt_pmu = intel_pt_pmu;
ptr->itr.recording_options = intel_pt_recording_options;
ptr->itr.info_priv_size = intel_pt_info_priv_size;
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 424716518b75..bff36f9345ea 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -3,9 +3,11 @@
#include <string.h>
#include "../../../util/kvm-stat.h"
#include "../../../util/evsel.h"
+#include "../../../util/env.h"
#include <asm/svm.h>
#include <asm/vmx.h>
#include <asm/kvm.h>
+#include <subcmd/parse-options.h>
define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
@@ -211,3 +213,52 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
return 0;
}
+
+/*
+ * After KVM supports PEBS for guest on Intel platforms
+ * (https://lore.kernel.org/all/20220411101946.20262-1-likexu@tencent.com/),
+ * host loses the capability to sample guest with PEBS since all PEBS related
+ * MSRs are switched to guest value after vm-entry, like IA32_DS_AREA MSR is
+ * switched to guest GVA at vm-entry. This would lead to "perf kvm record"
+ * fails to sample guest on Intel platforms since "cycles:P" event is used to
+ * sample guest by default.
+ *
+ * So, to avoid this issue explicitly use "cycles" instead of "cycles:P" event
+ * by default to sample guest on Intel platforms.
+ */
+int kvm_add_default_arch_event(int *argc, const char **argv)
+{
+ const char **tmp;
+ bool event = false;
+ int ret = 0, i, j = *argc;
+
+ const struct option event_options[] = {
+ OPT_BOOLEAN('e', "event", &event, NULL),
+ OPT_BOOLEAN(0, "pfm-events", &event, NULL),
+ OPT_END()
+ };
+
+ if (!x86__is_intel_cpu())
+ return 0;
+
+ tmp = calloc(j + 1, sizeof(char *));
+ if (!tmp)
+ return -ENOMEM;
+
+ for (i = 0; i < j; i++)
+ tmp[i] = argv[i];
+
+ parse_options(j, tmp, event_options, NULL, PARSE_OPT_KEEP_UNKNOWN);
+ if (!event) {
+ argv[j++] = STRDUP_FAIL_EXIT("-e");
+ argv[j++] = STRDUP_FAIL_EXIT("cycles");
+ *argc += 2;
+ }
+
+ free(tmp);
+ return 0;
+
+EXIT:
+ free(tmp);
+ return ret;
+}
diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
index 62df03e91c7e..b38f519020ff 100644
--- a/tools/perf/arch/x86/util/mem-events.c
+++ b/tools/perf/arch/x86/util/mem-events.c
@@ -26,3 +26,9 @@ struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = {
E(NULL, NULL, NULL, false, 0),
E("mem-ldst", "%s//", NULL, false, 0),
};
+
+struct perf_mem_event perf_mem_events_amd_ldlat[PERF_MEM_EVENTS__MAX] = {
+ E(NULL, NULL, NULL, false, 0),
+ E(NULL, NULL, NULL, false, 0),
+ E("mem-ldst", "%s/ldlat=%u/", NULL, true, 0),
+};
diff --git a/tools/perf/arch/x86/util/mem-events.h b/tools/perf/arch/x86/util/mem-events.h
index f55c8d3b7d59..11e09a256f5b 100644
--- a/tools/perf/arch/x86/util/mem-events.h
+++ b/tools/perf/arch/x86/util/mem-events.h
@@ -6,5 +6,6 @@ extern struct perf_mem_event perf_mem_events_intel[PERF_MEM_EVENTS__MAX];
extern struct perf_mem_event perf_mem_events_intel_aux[PERF_MEM_EVENTS__MAX];
extern struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX];
+extern struct perf_mem_event perf_mem_events_amd_ldlat[PERF_MEM_EVENTS__MAX];
#endif /* _X86_MEM_EVENTS_H */
diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
index e0060dac2a9f..a3f96221758d 100644
--- a/tools/perf/arch/x86/util/pmu.c
+++ b/tools/perf/arch/x86/util/pmu.c
@@ -8,6 +8,8 @@
#include <linux/perf_event.h>
#include <linux/zalloc.h>
#include <api/fs/fs.h>
+#include <api/io_dir.h>
+#include <internal/cpumap.h>
#include <errno.h>
#include "../../../util/intel-pt.h"
@@ -16,11 +18,261 @@
#include "../../../util/fncache.h"
#include "../../../util/pmus.h"
#include "mem-events.h"
+#include "util/debug.h"
#include "util/env.h"
+#include "util/header.h"
-void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused)
+static bool x86__is_intel_graniterapids(void)
{
-#ifdef HAVE_AUXTRACE_SUPPORT
+ static bool checked_if_graniterapids;
+ static bool is_graniterapids;
+
+ if (!checked_if_graniterapids) {
+ const char *graniterapids_cpuid = "GenuineIntel-6-A[DE]";
+ char *cpuid = get_cpuid_str((struct perf_cpu){0});
+
+ is_graniterapids = cpuid && strcmp_cpuid_str(graniterapids_cpuid, cpuid) == 0;
+ free(cpuid);
+ checked_if_graniterapids = true;
+ }
+ return is_graniterapids;
+}
+
+static struct perf_cpu_map *read_sysfs_cpu_map(const char *sysfs_path)
+{
+ struct perf_cpu_map *cpus;
+ char *buf = NULL;
+ size_t buf_len;
+
+ if (sysfs__read_str(sysfs_path, &buf, &buf_len) < 0)
+ return NULL;
+
+ cpus = perf_cpu_map__new(buf);
+ free(buf);
+ return cpus;
+}
+
+static int snc_nodes_per_l3_cache(void)
+{
+ static bool checked_snc;
+ static int snc_nodes;
+
+ if (!checked_snc) {
+ struct perf_cpu_map *node_cpus =
+ read_sysfs_cpu_map("devices/system/node/node0/cpulist");
+ struct perf_cpu_map *cache_cpus =
+ read_sysfs_cpu_map("devices/system/cpu/cpu0/cache/index3/shared_cpu_list");
+
+ snc_nodes = perf_cpu_map__nr(cache_cpus) / perf_cpu_map__nr(node_cpus);
+ perf_cpu_map__put(cache_cpus);
+ perf_cpu_map__put(node_cpus);
+ checked_snc = true;
+ }
+ return snc_nodes;
+}
+
+static bool starts_with(const char *str, const char *prefix)
+{
+ return !strncmp(prefix, str, strlen(prefix));
+}
+
+static int num_chas(void)
+{
+ static bool checked_chas;
+ static int num_chas;
+
+ if (!checked_chas) {
+ int fd = perf_pmu__event_source_devices_fd();
+ struct io_dir dir;
+ struct io_dirent64 *dent;
+
+ if (fd < 0)
+ return -1;
+
+ io_dir__init(&dir, fd);
+
+ while ((dent = io_dir__readdir(&dir)) != NULL) {
+ /* Note, dent->d_type will be DT_LNK and so isn't a useful filter. */
+ if (starts_with(dent->d_name, "uncore_cha_"))
+ num_chas++;
+ }
+ close(fd);
+ checked_chas = true;
+ }
+ return num_chas;
+}
+
+#define MAX_SNCS 6
+
+static int uncore_cha_snc(struct perf_pmu *pmu)
+{
+ // CHA SNC numbers are ordered correspond to the CHAs number.
+ unsigned int cha_num;
+ int num_cha, chas_per_node, cha_snc;
+ int snc_nodes = snc_nodes_per_l3_cache();
+
+ if (snc_nodes <= 1)
+ return 0;
+
+ num_cha = num_chas();
+ if (num_cha <= 0) {
+ pr_warning("Unexpected: no CHAs found\n");
+ return 0;
+ }
+
+ /* Compute SNC for PMU. */
+ if (sscanf(pmu->name, "uncore_cha_%u", &cha_num) != 1) {
+ pr_warning("Unexpected: unable to compute CHA number '%s'\n", pmu->name);
+ return 0;
+ }
+ chas_per_node = num_cha / snc_nodes;
+ cha_snc = cha_num / chas_per_node;
+
+ /* Range check cha_snc. for unexpected out of bounds. */
+ return cha_snc >= MAX_SNCS ? 0 : cha_snc;
+}
+
+static int uncore_imc_snc(struct perf_pmu *pmu)
+{
+ // Compute the IMC SNC using lookup tables.
+ unsigned int imc_num;
+ int snc_nodes = snc_nodes_per_l3_cache();
+ const u8 snc2_map[] = {1, 1, 0, 0, 1, 1, 0, 0};
+ const u8 snc3_map[] = {1, 1, 0, 0, 2, 2, 1, 1, 0, 0, 2, 2};
+ const u8 *snc_map;
+ size_t snc_map_len;
+
+ switch (snc_nodes) {
+ case 2:
+ snc_map = snc2_map;
+ snc_map_len = ARRAY_SIZE(snc2_map);
+ break;
+ case 3:
+ snc_map = snc3_map;
+ snc_map_len = ARRAY_SIZE(snc3_map);
+ break;
+ default:
+ /* Error or no lookup support for SNC with >3 nodes. */
+ return 0;
+ }
+
+ /* Compute SNC for PMU. */
+ if (sscanf(pmu->name, "uncore_imc_%u", &imc_num) != 1) {
+ pr_warning("Unexpected: unable to compute IMC number '%s'\n", pmu->name);
+ return 0;
+ }
+ if (imc_num >= snc_map_len) {
+ pr_warning("Unexpected IMC %d for SNC%d mapping\n", imc_num, snc_nodes);
+ return 0;
+ }
+ return snc_map[imc_num];
+}
+
+static int uncore_cha_imc_compute_cpu_adjust(int pmu_snc)
+{
+ static bool checked_cpu_adjust[MAX_SNCS];
+ static int cpu_adjust[MAX_SNCS];
+ struct perf_cpu_map *node_cpus;
+ char node_path[] = "devices/system/node/node0/cpulist";
+
+ /* Was adjust already computed? */
+ if (checked_cpu_adjust[pmu_snc])
+ return cpu_adjust[pmu_snc];
+
+ /* SNC0 doesn't need an adjust. */
+ if (pmu_snc == 0) {
+ cpu_adjust[0] = 0;
+ checked_cpu_adjust[0] = true;
+ return 0;
+ }
+
+ /*
+ * Use NUMA topology to compute first CPU of the NUMA node, we want to
+ * adjust CPU 0 to be this and similarly for other CPUs if there is >1
+ * socket.
+ */
+ assert(pmu_snc >= 0 && pmu_snc <= 9);
+ node_path[24] += pmu_snc; // Shift node0 to be node<pmu_snc>.
+ node_cpus = read_sysfs_cpu_map(node_path);
+ cpu_adjust[pmu_snc] = perf_cpu_map__cpu(node_cpus, 0).cpu;
+ if (cpu_adjust[pmu_snc] < 0) {
+ pr_debug("Failed to read valid CPU list from <sysfs>/%s\n", node_path);
+ cpu_adjust[pmu_snc] = 0;
+ } else {
+ checked_cpu_adjust[pmu_snc] = true;
+ }
+ perf_cpu_map__put(node_cpus);
+ return cpu_adjust[pmu_snc];
+}
+
+static void gnr_uncore_cha_imc_adjust_cpumask_for_snc(struct perf_pmu *pmu, bool cha)
+{
+ // With sub-NUMA clustering (SNC) there is a NUMA node per SNC in the
+ // topology. For example, a two socket graniterapids machine may be set
+ // up with 3-way SNC meaning there are 6 NUMA nodes that should be
+ // displayed with --per-node. The cpumask of the CHA and IMC PMUs
+ // reflects per-socket information meaning, for example, uncore_cha_60
+ // on a two socket graniterapids machine with 120 cores per socket will
+ // have a cpumask of "0,120". This cpumask needs adjusting to "40,160"
+ // to reflect that uncore_cha_60 is used for the 2nd SNC of each
+ // socket. Without the adjustment events on uncore_cha_60 will appear in
+ // node 0 and node 3 (in our example 2 socket 3-way set up), but with
+ // the adjustment they will appear in node 1 and node 4. The number of
+ // CHAs is typically larger than the number of cores. The CHA numbers
+ // are assumed to split evenly and inorder wrt core numbers. There are
+ // fewer memory IMC PMUs than cores and mapping is handled using lookup
+ // tables.
+ static struct perf_cpu_map *cha_adjusted[MAX_SNCS];
+ static struct perf_cpu_map *imc_adjusted[MAX_SNCS];
+ struct perf_cpu_map **adjusted = cha ? cha_adjusted : imc_adjusted;
+ int idx, pmu_snc, cpu_adjust;
+ struct perf_cpu cpu;
+ bool alloc;
+
+ // Cpus from the kernel holds first CPU of each socket. e.g. 0,120.
+ if (perf_cpu_map__cpu(pmu->cpus, 0).cpu != 0) {
+ pr_debug("Ignoring cpumask adjust for %s as unexpected first CPU\n", pmu->name);
+ return;
+ }
+
+ pmu_snc = cha ? uncore_cha_snc(pmu) : uncore_imc_snc(pmu);
+ if (pmu_snc == 0) {
+ // No adjustment necessary for the first SNC.
+ return;
+ }
+
+ alloc = adjusted[pmu_snc] == NULL;
+ if (alloc) {
+ // Hold onto the perf_cpu_map globally to avoid recomputation.
+ cpu_adjust = uncore_cha_imc_compute_cpu_adjust(pmu_snc);
+ adjusted[pmu_snc] = perf_cpu_map__empty_new(perf_cpu_map__nr(pmu->cpus));
+ if (!adjusted[pmu_snc])
+ return;
+ }
+
+ perf_cpu_map__for_each_cpu(cpu, idx, pmu->cpus) {
+ // Compute the new cpu map values or if not allocating, assert
+ // that they match expectations. asserts will be removed to
+ // avoid overhead in NDEBUG builds.
+ if (alloc) {
+ RC_CHK_ACCESS(adjusted[pmu_snc])->map[idx].cpu = cpu.cpu + cpu_adjust;
+ } else if (idx == 0) {
+ cpu_adjust = perf_cpu_map__cpu(adjusted[pmu_snc], idx).cpu - cpu.cpu;
+ assert(uncore_cha_imc_compute_cpu_adjust(pmu_snc) == cpu_adjust);
+ } else {
+ assert(perf_cpu_map__cpu(adjusted[pmu_snc], idx).cpu ==
+ cpu.cpu + cpu_adjust);
+ }
+ }
+
+ perf_cpu_map__put(pmu->cpus);
+ pmu->cpus = perf_cpu_map__get(adjusted[pmu_snc]);
+}
+
+void perf_pmu__arch_init(struct perf_pmu *pmu)
+{
+ struct perf_pmu_caps *ldlat_cap;
+
if (!strcmp(pmu->name, INTEL_PT_PMU_NAME)) {
pmu->auxtrace = true;
pmu->selectable = true;
@@ -30,15 +282,33 @@ void perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused)
pmu->auxtrace = true;
pmu->selectable = true;
}
-#endif
if (x86__is_amd_cpu()) {
- if (!strcmp(pmu->name, "ibs_op"))
- pmu->mem_events = perf_mem_events_amd;
- } else if (pmu->is_core) {
- if (perf_pmu__have_event(pmu, "mem-loads-aux"))
- pmu->mem_events = perf_mem_events_intel_aux;
- else
- pmu->mem_events = perf_mem_events_intel;
+ if (strcmp(pmu->name, "ibs_op"))
+ return;
+
+ pmu->mem_events = perf_mem_events_amd;
+
+ if (!perf_pmu__caps_parse(pmu))
+ return;
+
+ ldlat_cap = perf_pmu__get_cap(pmu, "ldlat");
+ if (!ldlat_cap || strcmp(ldlat_cap->value, "1"))
+ return;
+
+ perf_mem_events__loads_ldlat = 0;
+ pmu->mem_events = perf_mem_events_amd_ldlat;
+ } else {
+ if (pmu->is_core) {
+ if (perf_pmu__have_event(pmu, "mem-loads-aux"))
+ pmu->mem_events = perf_mem_events_intel_aux;
+ else
+ pmu->mem_events = perf_mem_events_intel;
+ } else if (x86__is_intel_graniterapids()) {
+ if (starts_with(pmu->name, "uncore_cha_"))
+ gnr_uncore_cha_imc_adjust_cpumask_for_snc(pmu, /*cha=*/true);
+ else if (starts_with(pmu->name, "uncore_imc_"))
+ gnr_uncore_cha_imc_adjust_cpumask_for_snc(pmu, /*cha=*/false);
+ }
}
}
diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c
index d1c654839049..bafd285119d7 100644
--- a/tools/perf/arch/x86/util/topdown.c
+++ b/tools/perf/arch/x86/util/topdown.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include "api/fs/fs.h"
-#include "util/evsel.h"
+#include <errno.h>
#include "util/evlist.h"
#include "util/pmu.h"
#include "util/pmus.h"
@@ -8,6 +7,9 @@
#include "topdown.h"
#include "evsel.h"
+// cmask=0, inv=0, pc=0, edge=0, umask=4, event=0
+#define TOPDOWN_SLOTS 0x0400
+
/* Check whether there is a PMU which supports the perf metrics. */
bool topdown_sys_has_perf_metrics(void)
{
@@ -32,31 +34,19 @@ bool topdown_sys_has_perf_metrics(void)
return has_perf_metrics;
}
-#define TOPDOWN_SLOTS 0x0400
bool arch_is_topdown_slots(const struct evsel *evsel)
{
- if (evsel->core.attr.config == TOPDOWN_SLOTS)
- return true;
-
- return false;
+ return evsel->core.attr.type == PERF_TYPE_RAW &&
+ evsel->core.attr.config == TOPDOWN_SLOTS &&
+ evsel->core.attr.config1 == 0;
}
bool arch_is_topdown_metrics(const struct evsel *evsel)
{
- int config = evsel->core.attr.config;
- const char *name_from_config;
- struct perf_pmu *pmu;
-
- /* All topdown events have an event code of 0. */
- if ((config & 0xFF) != 0)
- return false;
-
- pmu = evsel__find_pmu(evsel);
- if (!pmu || !pmu->is_core)
- return false;
-
- name_from_config = perf_pmu__name_from_config(pmu, config);
- return name_from_config && strcasestr(name_from_config, "topdown");
+ // cmask=0, inv=0, pc=0, edge=0, umask=0x80-0x87, event=0
+ return evsel->core.attr.type == PERF_TYPE_RAW &&
+ (evsel->core.attr.config & 0xFFFFF8FF) == 0x8000 &&
+ evsel->core.attr.config1 == 0;
}
/*
@@ -88,3 +78,31 @@ bool arch_topdown_sample_read(struct evsel *leader)
return false;
}
+
+/*
+ * Make a copy of the topdown metric event metric_event with the given index but
+ * change its configuration to be a topdown slots event. Copying from
+ * metric_event ensures modifiers are the same.
+ */
+int topdown_insert_slots_event(struct list_head *list, int idx, struct evsel *metric_event)
+{
+ struct evsel *evsel = evsel__new_idx(&metric_event->core.attr, idx);
+
+ if (!evsel)
+ return -ENOMEM;
+
+ evsel->core.attr.config = TOPDOWN_SLOTS;
+ evsel->core.cpus = perf_cpu_map__get(metric_event->core.cpus);
+ evsel->core.pmu_cpus = perf_cpu_map__get(metric_event->core.pmu_cpus);
+ evsel->core.is_pmu_core = true;
+ evsel->pmu = metric_event->pmu;
+ evsel->name = strdup("slots");
+ evsel->precise_max = metric_event->precise_max;
+ evsel->sample_read = metric_event->sample_read;
+ evsel->weak_group = metric_event->weak_group;
+ evsel->bpf_counter = metric_event->bpf_counter;
+ evsel->retire_lat = metric_event->retire_lat;
+ evsel__set_leader(evsel, evsel__leader(metric_event));
+ list_add_tail(&evsel->core.node, list);
+ return 0;
+}
diff --git a/tools/perf/arch/x86/util/topdown.h b/tools/perf/arch/x86/util/topdown.h
index 1bae9b1822d7..69035565e649 100644
--- a/tools/perf/arch/x86/util/topdown.h
+++ b/tools/perf/arch/x86/util/topdown.h
@@ -2,8 +2,14 @@
#ifndef _TOPDOWN_H
#define _TOPDOWN_H 1
+#include <stdbool.h>
+
+struct evsel;
+struct list_head;
+
bool topdown_sys_has_perf_metrics(void);
bool arch_is_topdown_slots(const struct evsel *evsel);
bool arch_is_topdown_metrics(const struct evsel *evsel);
+int topdown_insert_slots_event(struct list_head *list, int idx, struct evsel *metric_event);
#endif
diff --git a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
index f657a77314f8..374e4cb788d8 100644
--- a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
@@ -438,3 +438,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index 9f736423af53..8519eb5a42fa 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -28,6 +28,7 @@ int bench_syscall_fork(int argc, const char **argv);
int bench_syscall_execve(int argc, const char **argv);
int bench_mem_memcpy(int argc, const char **argv);
int bench_mem_memset(int argc, const char **argv);
+int bench_mem_mmap(int argc, const char **argv);
int bench_mem_find_bit(int argc, const char **argv);
int bench_futex_hash(int argc, const char **argv);
int bench_futex_wake(int argc, const char **argv);
diff --git a/tools/perf/bench/evlist-open-close.c b/tools/perf/bench/evlist-open-close.c
index 5a27691469ed..faf9c34b4a5d 100644
--- a/tools/perf/bench/evlist-open-close.c
+++ b/tools/perf/bench/evlist-open-close.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
@@ -46,25 +47,6 @@ static struct record_opts opts = {
.ctl_fd_ack = -1,
};
-static const struct option options[] = {
- OPT_STRING('e', "event", &event_string, "event", "event selector. use 'perf list' to list available events"),
- OPT_INTEGER('n', "nr-events", &nr_events,
- "number of dummy events to create (default 1). If used with -e, it clones those events n times (1 = no change)"),
- OPT_INTEGER('i', "iterations", &iterations, "Number of iterations used to compute average (default=100)"),
- OPT_BOOLEAN('a', "all-cpus", &opts.target.system_wide, "system-wide collection from all CPUs"),
- OPT_STRING('C', "cpu", &opts.target.cpu_list, "cpu", "list of cpus where to open events"),
- OPT_STRING('p', "pid", &opts.target.pid, "pid", "record events on existing process id"),
- OPT_STRING('t', "tid", &opts.target.tid, "tid", "record events on existing thread id"),
- OPT_STRING('u', "uid", &opts.target.uid_str, "user", "user to profile"),
- OPT_BOOLEAN(0, "per-thread", &opts.target.per_thread, "use per-thread mmaps"),
- OPT_END()
-};
-
-static const char *const bench_usage[] = {
- "perf bench internals evlist-open-close <options>",
- NULL
-};
-
static int evlist__count_evsel_fds(struct evlist *evlist)
{
struct evsel *evsel;
@@ -76,7 +58,7 @@ static int evlist__count_evsel_fds(struct evlist *evlist)
return cnt;
}
-static struct evlist *bench__create_evlist(char *evstr)
+static struct evlist *bench__create_evlist(char *evstr, const char *uid_str)
{
struct parse_events_error err;
struct evlist *evlist = evlist__new();
@@ -97,6 +79,18 @@ static struct evlist *bench__create_evlist(char *evstr)
goto out_delete_evlist;
}
parse_events_error__exit(&err);
+ if (uid_str) {
+ uid_t uid = parse_uid(uid_str);
+
+ if (uid == UINT_MAX) {
+ pr_err("Invalid User: %s", uid_str);
+ ret = -EINVAL;
+ goto out_delete_evlist;
+ }
+ ret = parse_uid_filter(evlist, uid);
+ if (ret)
+ goto out_delete_evlist;
+ }
ret = evlist__create_maps(evlist, &opts.target);
if (ret < 0) {
pr_err("Not enough memory to create thread/cpu maps\n");
@@ -136,10 +130,10 @@ static int bench__do_evlist_open_close(struct evlist *evlist)
return 0;
}
-static int bench_evlist_open_close__run(char *evstr)
+static int bench_evlist_open_close__run(char *evstr, const char *uid_str)
{
// used to print statistics only
- struct evlist *evlist = bench__create_evlist(evstr);
+ struct evlist *evlist = bench__create_evlist(evstr, uid_str);
double time_average, time_stddev;
struct timeval start, end, diff;
struct stats time_stats;
@@ -161,7 +155,7 @@ static int bench_evlist_open_close__run(char *evstr)
for (i = 0; i < iterations; i++) {
pr_debug("Started iteration %d\n", i);
- evlist = bench__create_evlist(evstr);
+ evlist = bench__create_evlist(evstr, uid_str);
if (!evlist)
return -ENOMEM;
@@ -225,6 +219,30 @@ out_error:
int bench_evlist_open_close(int argc, const char **argv)
{
+ const char *uid_str = NULL;
+ const struct option options[] = {
+ OPT_STRING('e', "event", &event_string, "event",
+ "event selector. use 'perf list' to list available events"),
+ OPT_INTEGER('n', "nr-events", &nr_events,
+ "number of dummy events to create (default 1). If used with -e, it clones those events n times (1 = no change)"),
+ OPT_INTEGER('i', "iterations", &iterations,
+ "Number of iterations used to compute average (default=100)"),
+ OPT_BOOLEAN('a', "all-cpus", &opts.target.system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_STRING('C', "cpu", &opts.target.cpu_list, "cpu",
+ "list of cpus where to open events"),
+ OPT_STRING('p', "pid", &opts.target.pid, "pid",
+ "record events on existing process id"),
+ OPT_STRING('t', "tid", &opts.target.tid, "tid",
+ "record events on existing thread id"),
+ OPT_STRING('u', "uid", &uid_str, "user", "user to profile"),
+ OPT_BOOLEAN(0, "per-thread", &opts.target.per_thread, "use per-thread mmaps"),
+ OPT_END()
+ };
+ const char *const bench_usage[] = {
+ "perf bench internals evlist-open-close <options>",
+ NULL
+ };
char *evstr, errbuf[BUFSIZ];
int err;
@@ -241,15 +259,8 @@ int bench_evlist_open_close(int argc, const char **argv)
goto out;
}
- err = target__parse_uid(&opts.target);
- if (err) {
- target__strerror(&opts.target, err, errbuf, sizeof(errbuf));
- pr_err("%s", errbuf);
- goto out;
- }
-
- /* Enable ignoring missing threads when -u/-p option is defined. */
- opts.ignore_missing_thread = opts.target.uid != UINT_MAX || opts.target.pid;
+ /* Enable ignoring missing threads when -p option is defined. */
+ opts.ignore_missing_thread = opts.target.pid;
evstr = bench__repeat_event_string(event_string, nr_events);
if (!evstr) {
@@ -257,7 +268,7 @@ int bench_evlist_open_close(int argc, const char **argv)
goto out;
}
- err = bench_evlist_open_close__run(evstr);
+ err = bench_evlist_open_close__run(evstr, uid_str);
free(evstr);
out:
diff --git a/tools/perf/bench/find-bit-bench.c b/tools/perf/bench/find-bit-bench.c
index 7e25b0e413f6..e697c20951bc 100644
--- a/tools/perf/bench/find-bit-bench.c
+++ b/tools/perf/bench/find-bit-bench.c
@@ -37,7 +37,7 @@ static noinline void workload(int val)
accumulator++;
}
-#if (defined(__i386__) || defined(__x86_64__)) && defined(__GCC_ASM_FLAG_OUTPUTS__)
+#if defined(__i386__) || defined(__x86_64__)
static bool asm_test_bit(long nr, const unsigned long *addr)
{
bool oldbit;
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index fdf133c9520f..7e29f04da744 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -18,7 +18,6 @@
#include <stdlib.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
-#include <linux/prctl.h>
#include <linux/zalloc.h>
#include <sys/time.h>
#include <sys/mman.h>
@@ -57,7 +56,6 @@ static struct bench_futex_parameters params = {
static const struct option options[] = {
OPT_INTEGER( 'b', "buckets", &params.nbuckets, "Specify amount of hash buckets"),
- OPT_BOOLEAN( 'I', "immutable", &params.buckets_immutable, "Make the hash buckets immutable"),
OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
OPT_UINTEGER('r', "runtime", &params.runtime, "Specify runtime (in seconds)"),
OPT_UINTEGER('f', "futexes", &params.nfutexes, "Specify amount of futexes per threads"),
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 5144a158512c..40640b674427 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -47,7 +47,6 @@ static struct bench_futex_parameters params = {
static const struct option options[] = {
OPT_INTEGER( 'b', "buckets", &params.nbuckets, "Specify amount of hash buckets"),
- OPT_BOOLEAN( 'I', "immutable", &params.buckets_immutable, "Make the hash buckets immutable"),
OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
OPT_UINTEGER('r', "runtime", &params.runtime, "Specify runtime (in seconds)"),
OPT_BOOLEAN( 'M', "multi", &params.multi, "Use multiple futexes"),
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index a2f91ee1950b..0748b0fd689e 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -52,7 +52,6 @@ static struct bench_futex_parameters params = {
static const struct option options[] = {
OPT_INTEGER( 'b', "buckets", &params.nbuckets, "Specify amount of hash buckets"),
- OPT_BOOLEAN( 'I', "immutable", &params.buckets_immutable, "Make the hash buckets immutable"),
OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
OPT_UINTEGER('q', "nrequeue", &params.nrequeue, "Specify amount of threads to requeue at once"),
OPT_BOOLEAN( 's', "silent", &params.silent, "Silent mode: do not display data/details"),
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index ee66482c29fd..6aede7c46b33 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -63,7 +63,6 @@ static struct bench_futex_parameters params = {
static const struct option options[] = {
OPT_INTEGER( 'b', "buckets", &params.nbuckets, "Specify amount of hash buckets"),
- OPT_BOOLEAN( 'I', "immutable", &params.buckets_immutable, "Make the hash buckets immutable"),
OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
OPT_UINTEGER('w', "nwakers", &params.nwakes, "Specify amount of waking threads"),
OPT_BOOLEAN( 's', "silent", &params.silent, "Silent mode: do not display data/details"),
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 8d6107f7cd94..a31fc1563862 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -52,7 +52,6 @@ static struct bench_futex_parameters params = {
static const struct option options[] = {
OPT_INTEGER( 'b', "buckets", &params.nbuckets, "Specify amount of hash buckets"),
- OPT_BOOLEAN( 'I', "immutable", &params.buckets_immutable, "Make the hash buckets immutable"),
OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
OPT_UINTEGER('w', "nwakes", &params.nwakes, "Specify amount of threads to wake at once"),
OPT_BOOLEAN( 's', "silent", &params.silent, "Silent mode: do not display data/details"),
diff --git a/tools/perf/bench/futex.c b/tools/perf/bench/futex.c
index 26382e4d8d4c..1968c9d00b5b 100644
--- a/tools/perf/bench/futex.c
+++ b/tools/perf/bench/futex.c
@@ -1,22 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
#include <err.h>
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
-#include <linux/prctl.h>
#include <sys/prctl.h>
#include "futex.h"
+#ifndef PR_FUTEX_HASH
+#define PR_FUTEX_HASH 78
+# define PR_FUTEX_HASH_SET_SLOTS 1
+# define PR_FUTEX_HASH_GET_SLOTS 2
+#endif // PR_FUTEX_HASH
+
void futex_set_nbuckets_param(struct bench_futex_parameters *params)
{
- unsigned long flags;
int ret;
if (params->nbuckets < 0)
return;
- flags = params->buckets_immutable ? FH_FLAG_IMMUTABLE : 0;
- ret = prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_SET_SLOTS, params->nbuckets, flags);
+ ret = prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_SET_SLOTS, params->nbuckets, 0);
if (ret) {
printf("Requesting %d hash buckets failed: %d/%m\n",
params->nbuckets, ret);
@@ -40,18 +44,11 @@ void futex_print_nbuckets(struct bench_futex_parameters *params)
printf("Requested: %d in usage: %d\n", params->nbuckets, ret);
err(EXIT_FAILURE, "prctl(PR_FUTEX_HASH)");
}
- if (params->nbuckets == 0) {
+ if (params->nbuckets == 0)
ret = asprintf(&futex_hash_mode, "Futex hashing: global hash");
- } else {
- ret = prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_GET_IMMUTABLE);
- if (ret < 0) {
- printf("Can't check if the hash is immutable: %m\n");
- err(EXIT_FAILURE, "prctl(PR_FUTEX_HASH)");
- }
- ret = asprintf(&futex_hash_mode, "Futex hashing: %d hash buckets %s",
- params->nbuckets,
- ret == 1 ? "(immutable)" : "");
- }
+ else
+ ret = asprintf(&futex_hash_mode, "Futex hashing: %d hash buckets",
+ params->nbuckets);
} else {
if (ret <= 0) {
ret = asprintf(&futex_hash_mode, "Futex hashing: global hash");
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index 9c9a73f9d865..fcb72d682cf8 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -8,6 +8,7 @@
#ifndef _FUTEX_H
#define _FUTEX_H
+#include <stdbool.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
@@ -26,7 +27,6 @@ struct bench_futex_parameters {
unsigned int nwakes;
unsigned int nrequeue;
int nbuckets;
- bool buckets_immutable;
};
/**
diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
index f55c07e4be94..aad572a78d7f 100644
--- a/tools/perf/bench/inject-buildid.c
+++ b/tools/perf/bench/inject-buildid.c
@@ -80,7 +80,7 @@ static int add_dso(const char *fpath, const struct stat *sb __maybe_unused,
int typeflag, struct FTW *ftwbuf __maybe_unused)
{
struct bench_dso *dso = &dsos[nr_dsos];
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
if (typeflag == FTW_D || typeflag == FTW_SL)
return 0;
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index 19d45c377ac1..2908a3a796c9 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -22,27 +22,39 @@
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
+#include <sys/mman.h>
#include <errno.h>
#include <linux/time64.h>
-#include <linux/zalloc.h>
+#include <linux/log2.h>
#define K 1024
+#define PAGE_SHIFT_4KB 12
+#define PAGE_SHIFT_2MB 21
+#define PAGE_SHIFT_1GB 30
+
static const char *size_str = "1MB";
static const char *function_str = "all";
-static int nr_loops = 1;
+static const char *page_size_str = "4KB";
+static const char *chunk_size_str = "0";
+static unsigned int nr_loops = 1;
static bool use_cycles;
static int cycles_fd;
+static unsigned int seed;
-static const struct option options[] = {
+static const struct option bench_common_options[] = {
OPT_STRING('s', "size", &size_str, "1MB",
"Specify the size of the memory buffers. "
"Available units: B, KB, MB, GB and TB (case insensitive)"),
+ OPT_STRING('p', "page", &page_size_str, "4KB",
+ "Specify page-size for mapping memory buffers. "
+ "Available sizes: 4KB, 2MB, 1GB (case insensitive)"),
+
OPT_STRING('f', "function", &function_str, "all",
"Specify the function to run, \"all\" runs all available functions, \"help\" lists them"),
- OPT_INTEGER('l', "nr_loops", &nr_loops,
+ OPT_UINTEGER('l', "nr_loops", &nr_loops,
"Specify the number of loops to run. (default: 1)"),
OPT_BOOLEAN('c', "cycles", &use_cycles,
@@ -51,15 +63,56 @@ static const struct option options[] = {
OPT_END()
};
+static const struct option bench_mem_options[] = {
+ OPT_STRING('k', "chunk", &chunk_size_str, "0",
+ "Specify the chunk-size for each invocation. "
+ "Available units: B, KB, MB, GB and TB (case insensitive)"),
+ OPT_PARENT(bench_common_options),
+ OPT_END()
+};
+
+union bench_clock {
+ u64 cycles;
+ struct timeval tv;
+};
+
+struct bench_params {
+ size_t size;
+ size_t size_total;
+ size_t chunk_size;
+ unsigned int nr_loops;
+ unsigned int page_shift;
+ unsigned int seed;
+};
+
+struct bench_mem_info {
+ const struct function *functions;
+ int (*do_op)(const struct function *r, struct bench_params *p,
+ void *src, void *dst, union bench_clock *rt);
+ const char *const *usage;
+ const struct option *options;
+ bool alloc_src;
+};
+
+typedef bool (*mem_init_t)(struct bench_mem_info *, struct bench_params *,
+ void **, void **);
+typedef void (*mem_fini_t)(struct bench_mem_info *, struct bench_params *,
+ void **, void **);
typedef void *(*memcpy_t)(void *, const void *, size_t);
typedef void *(*memset_t)(void *, int, size_t);
+typedef void (*mmap_op_t)(void *, size_t, unsigned int, bool);
struct function {
const char *name;
const char *desc;
- union {
- memcpy_t memcpy;
- memset_t memset;
+ struct {
+ mem_init_t init;
+ mem_fini_t fini;
+ union {
+ memcpy_t memcpy;
+ memset_t memset;
+ mmap_op_t mmap_op;
+ };
} fn;
};
@@ -91,6 +144,34 @@ static u64 get_cycles(void)
return clk;
}
+static void clock_get(union bench_clock *t)
+{
+ if (use_cycles)
+ t->cycles = get_cycles();
+ else
+ BUG_ON(gettimeofday(&t->tv, NULL));
+}
+
+static union bench_clock clock_diff(union bench_clock *s, union bench_clock *e)
+{
+ union bench_clock t;
+
+ if (use_cycles)
+ t.cycles = e->cycles - s->cycles;
+ else
+ timersub(&e->tv, &s->tv, &t.tv);
+
+ return t;
+}
+
+static void clock_accum(union bench_clock *a, union bench_clock *b)
+{
+ if (use_cycles)
+ a->cycles += b->cycles;
+ else
+ timeradd(&a->tv, &b->tv, &a->tv);
+}
+
static double timeval2double(struct timeval *ts)
{
return (double)ts->tv_sec + (double)ts->tv_usec / (double)USEC_PER_SEC;
@@ -107,54 +188,40 @@ static double timeval2double(struct timeval *ts)
printf(" %14lf GB/sec\n", x / K / K / K); \
} while (0)
-struct bench_mem_info {
- const struct function *functions;
- u64 (*do_cycles)(const struct function *r, size_t size, void *src, void *dst);
- double (*do_gettimeofday)(const struct function *r, size_t size, void *src, void *dst);
- const char *const *usage;
- bool alloc_src;
-};
-
-static void __bench_mem_function(struct bench_mem_info *info, int r_idx, size_t size, double size_total)
+static void __bench_mem_function(struct bench_mem_info *info, struct bench_params *p,
+ int r_idx)
{
const struct function *r = &info->functions[r_idx];
double result_bps = 0.0;
- u64 result_cycles = 0;
- void *src = NULL, *dst = zalloc(size);
+ union bench_clock rt = { 0 };
+ void *src = NULL, *dst = NULL;
printf("# function '%s' (%s)\n", r->name, r->desc);
- if (dst == NULL)
- goto out_alloc_failed;
-
- if (info->alloc_src) {
- src = zalloc(size);
- if (src == NULL)
- goto out_alloc_failed;
- }
+ if (r->fn.init && r->fn.init(info, p, &src, &dst))
+ goto out_init_failed;
if (bench_format == BENCH_FORMAT_DEFAULT)
printf("# Copying %s bytes ...\n\n", size_str);
- if (use_cycles) {
- result_cycles = info->do_cycles(r, size, src, dst);
- } else {
- result_bps = info->do_gettimeofday(r, size, src, dst);
- }
+ if (info->do_op(r, p, src, dst, &rt))
+ goto out_test_failed;
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
if (use_cycles) {
- printf(" %14lf cycles/byte\n", (double)result_cycles/size_total);
+ printf(" %14lf cycles/byte\n", (double)rt.cycles/(double)p->size_total);
} else {
+ result_bps = (double)p->size_total/timeval2double(&rt.tv);
print_bps(result_bps);
}
break;
case BENCH_FORMAT_SIMPLE:
if (use_cycles) {
- printf("%lf\n", (double)result_cycles/size_total);
+ printf("%lf\n", (double)rt.cycles/(double)p->size_total);
} else {
+ result_bps = (double)p->size_total/timeval2double(&rt.tv);
printf("%lf\n", result_bps);
}
break;
@@ -164,22 +231,23 @@ static void __bench_mem_function(struct bench_mem_info *info, int r_idx, size_t
break;
}
+out_test_failed:
out_free:
- free(src);
- free(dst);
+ if (r->fn.fini) r->fn.fini(info, p, &src, &dst);
return;
-out_alloc_failed:
- printf("# Memory allocation failed - maybe size (%s) is too large?\n", size_str);
+out_init_failed:
+ printf("# Memory allocation failed - maybe size (%s) %s?\n", size_str,
+ p->page_shift != PAGE_SHIFT_4KB ? "has insufficient hugepages" : "is too large");
goto out_free;
}
static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *info)
{
int i;
- size_t size;
- double size_total;
+ struct bench_params p = { 0 };
+ unsigned int page_size;
- argc = parse_options(argc, argv, options, info->usage, 0);
+ argc = parse_options(argc, argv, info->options, info->usage, 0);
if (use_cycles) {
i = init_cycles();
@@ -189,17 +257,37 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
}
}
- size = (size_t)perf_atoll((char *)size_str);
- size_total = (double)size * nr_loops;
+ p.nr_loops = nr_loops;
+ p.size = (size_t)perf_atoll((char *)size_str);
- if ((s64)size <= 0) {
+ if ((s64)p.size <= 0) {
fprintf(stderr, "Invalid size:%s\n", size_str);
return 1;
}
+ p.size_total = p.size * p.nr_loops;
+
+ p.chunk_size = (size_t)perf_atoll((char *)chunk_size_str);
+ if ((s64)p.chunk_size < 0 || (s64)p.chunk_size > (s64)p.size) {
+ fprintf(stderr, "Invalid chunk_size:%s\n", chunk_size_str);
+ return 1;
+ }
+ if (!p.chunk_size)
+ p.chunk_size = p.size;
+
+ page_size = (unsigned int)perf_atoll((char *)page_size_str);
+ if (page_size != (1 << PAGE_SHIFT_4KB) &&
+ page_size != (1 << PAGE_SHIFT_2MB) &&
+ page_size != (1 << PAGE_SHIFT_1GB)) {
+ fprintf(stderr, "Invalid page-size:%s\n", page_size_str);
+ return 1;
+ }
+ p.page_shift = ilog2(page_size);
+
+ p.seed = seed;
if (!strncmp(function_str, "all", 3)) {
for (i = 0; info->functions[i].name; i++)
- __bench_mem_function(info, i, size, size_total);
+ __bench_mem_function(info, &p, i);
return 0;
}
@@ -218,7 +306,7 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
return 1;
}
- __bench_mem_function(info, i, size, size_total);
+ __bench_mem_function(info, &p, i);
return 0;
}
@@ -235,47 +323,81 @@ static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst)
fn(dst, src, size);
}
-static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
+static int do_memcpy(const struct function *r, struct bench_params *p,
+ void *src, void *dst, union bench_clock *rt)
{
- u64 cycle_start = 0ULL, cycle_end = 0ULL;
+ union bench_clock start, end;
memcpy_t fn = r->fn.memcpy;
- int i;
- memcpy_prefault(fn, size, src, dst);
+ memcpy_prefault(fn, p->size, src, dst);
+
+ clock_get(&start);
+ for (unsigned int i = 0; i < p->nr_loops; ++i)
+ for (size_t off = 0; off < p->size; off += p->chunk_size)
+ fn(dst + off, src + off, min(p->chunk_size, p->size - off));
+ clock_get(&end);
- cycle_start = get_cycles();
- for (i = 0; i < nr_loops; ++i)
- fn(dst, src, size);
- cycle_end = get_cycles();
+ *rt = clock_diff(&start, &end);
- return cycle_end - cycle_start;
+ return 0;
}
-static double do_memcpy_gettimeofday(const struct function *r, size_t size, void *src, void *dst)
+static void *bench_mmap(size_t size, bool populate, unsigned int page_shift)
{
- struct timeval tv_start, tv_end, tv_diff;
- memcpy_t fn = r->fn.memcpy;
- int i;
+ void *p;
+ int extra = populate ? MAP_POPULATE : 0;
+
+ if (page_shift != PAGE_SHIFT_4KB)
+ extra |= MAP_HUGETLB | (page_shift << MAP_HUGE_SHIFT);
+
+ p = mmap(NULL, size, PROT_READ|PROT_WRITE,
+ extra | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+
+ return p == MAP_FAILED ? NULL : p;
+}
+
+static void bench_munmap(void *p, size_t size)
+{
+ if (p)
+ munmap(p, size);
+}
+
+static bool mem_alloc(struct bench_mem_info *info, struct bench_params *p,
+ void **src, void **dst)
+{
+ bool failed;
- memcpy_prefault(fn, size, src, dst);
+ *dst = bench_mmap(p->size, true, p->page_shift);
+ failed = *dst == NULL;
- BUG_ON(gettimeofday(&tv_start, NULL));
- for (i = 0; i < nr_loops; ++i)
- fn(dst, src, size);
- BUG_ON(gettimeofday(&tv_end, NULL));
+ if (info->alloc_src) {
+ *src = bench_mmap(p->size, true, p->page_shift);
+ failed = failed || *src == NULL;
+ }
+
+ return failed;
+}
- timersub(&tv_end, &tv_start, &tv_diff);
+static void mem_free(struct bench_mem_info *info __maybe_unused,
+ struct bench_params *p __maybe_unused,
+ void **src, void **dst)
+{
+ bench_munmap(*dst, p->size);
+ bench_munmap(*src, p->size);
- return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
+ *dst = *src = NULL;
}
struct function memcpy_functions[] = {
{ .name = "default",
.desc = "Default memcpy() provided by glibc",
+ .fn.init = mem_alloc,
+ .fn.fini = mem_free,
.fn.memcpy = memcpy },
#ifdef HAVE_ARCH_X86_64_SUPPORT
-# define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn},
+# define MEMCPY_FN(_fn, _init, _fini, _name, _desc) \
+ {.name = _name, .desc = _desc, .fn.memcpy = _fn, .fn.init = _init, .fn.fini = _fini },
# include "mem-memcpy-x86-64-asm-def.h"
# undef MEMCPY_FN
#endif
@@ -292,55 +414,36 @@ int bench_mem_memcpy(int argc, const char **argv)
{
struct bench_mem_info info = {
.functions = memcpy_functions,
- .do_cycles = do_memcpy_cycles,
- .do_gettimeofday = do_memcpy_gettimeofday,
+ .do_op = do_memcpy,
.usage = bench_mem_memcpy_usage,
+ .options = bench_mem_options,
.alloc_src = true,
};
return bench_mem_common(argc, argv, &info);
}
-static u64 do_memset_cycles(const struct function *r, size_t size, void *src __maybe_unused, void *dst)
-{
- u64 cycle_start = 0ULL, cycle_end = 0ULL;
- memset_t fn = r->fn.memset;
- int i;
-
- /*
- * We prefault the freshly allocated memory range here,
- * to not measure page fault overhead:
- */
- fn(dst, -1, size);
-
- cycle_start = get_cycles();
- for (i = 0; i < nr_loops; ++i)
- fn(dst, i, size);
- cycle_end = get_cycles();
-
- return cycle_end - cycle_start;
-}
-
-static double do_memset_gettimeofday(const struct function *r, size_t size, void *src __maybe_unused, void *dst)
+static int do_memset(const struct function *r, struct bench_params *p,
+ void *src __maybe_unused, void *dst, union bench_clock *rt)
{
- struct timeval tv_start, tv_end, tv_diff;
+ union bench_clock start, end;
memset_t fn = r->fn.memset;
- int i;
/*
* We prefault the freshly allocated memory range here,
* to not measure page fault overhead:
*/
- fn(dst, -1, size);
+ fn(dst, -1, p->size);
- BUG_ON(gettimeofday(&tv_start, NULL));
- for (i = 0; i < nr_loops; ++i)
- fn(dst, i, size);
- BUG_ON(gettimeofday(&tv_end, NULL));
+ clock_get(&start);
+ for (unsigned int i = 0; i < p->nr_loops; ++i)
+ for (size_t off = 0; off < p->size; off += p->chunk_size)
+ fn(dst + off, i, min(p->chunk_size, p->size - off));
+ clock_get(&end);
- timersub(&tv_end, &tv_start, &tv_diff);
+ *rt = clock_diff(&start, &end);
- return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
+ return 0;
}
static const char * const bench_mem_memset_usage[] = {
@@ -351,10 +454,13 @@ static const char * const bench_mem_memset_usage[] = {
static const struct function memset_functions[] = {
{ .name = "default",
.desc = "Default memset() provided by glibc",
+ .fn.init = mem_alloc,
+ .fn.fini = mem_free,
.fn.memset = memset },
#ifdef HAVE_ARCH_X86_64_SUPPORT
-# define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn },
+# define MEMSET_FN(_fn, _init, _fini, _name, _desc) \
+ {.name = _name, .desc = _desc, .fn.memset = _fn, .fn.init = _init, .fn.fini = _fini },
# include "mem-memset-x86-64-asm-def.h"
# undef MEMSET_FN
#endif
@@ -366,9 +472,91 @@ int bench_mem_memset(int argc, const char **argv)
{
struct bench_mem_info info = {
.functions = memset_functions,
- .do_cycles = do_memset_cycles,
- .do_gettimeofday = do_memset_gettimeofday,
+ .do_op = do_memset,
.usage = bench_mem_memset_usage,
+ .options = bench_mem_options,
+ };
+
+ return bench_mem_common(argc, argv, &info);
+}
+
+static void mmap_page_touch(void *dst, size_t size, unsigned int page_shift, bool random)
+{
+ unsigned long npages = size / (1 << page_shift);
+ unsigned long offset = 0, r = 0;
+
+ for (unsigned long i = 0; i < npages; i++) {
+ if (random)
+ r = rand() % (1 << page_shift);
+
+ *((char *)dst + offset + r) = *(char *)(dst + offset + r) + i;
+ offset += 1 << page_shift;
+ }
+}
+
+static int do_mmap(const struct function *r, struct bench_params *p,
+ void *src __maybe_unused, void *dst __maybe_unused,
+ union bench_clock *accum)
+{
+ union bench_clock start, end, diff;
+ mmap_op_t fn = r->fn.mmap_op;
+ bool populate = strcmp(r->name, "populate") == 0;
+
+ if (p->seed)
+ srand(p->seed);
+
+ for (unsigned int i = 0; i < p->nr_loops; i++) {
+ clock_get(&start);
+ dst = bench_mmap(p->size, populate, p->page_shift);
+ if (!dst)
+ goto out;
+
+ fn(dst, p->size, p->page_shift, p->seed);
+ clock_get(&end);
+ diff = clock_diff(&start, &end);
+ clock_accum(accum, &diff);
+
+ bench_munmap(dst, p->size);
+ }
+
+ return 0;
+out:
+ printf("# Memory allocation failed - maybe size (%s) %s?\n", size_str,
+ p->page_shift != PAGE_SHIFT_4KB ? "has insufficient hugepages" : "is too large");
+ return -1;
+}
+
+static const char * const bench_mem_mmap_usage[] = {
+ "perf bench mem mmap <options>",
+ NULL
+};
+
+static const struct function mmap_functions[] = {
+ { .name = "demand",
+ .desc = "Demand loaded mmap()",
+ .fn.mmap_op = mmap_page_touch },
+
+ { .name = "populate",
+ .desc = "Eagerly populated mmap()",
+ .fn.mmap_op = mmap_page_touch },
+
+ { .name = NULL, }
+};
+
+int bench_mem_mmap(int argc, const char **argv)
+{
+ static const struct option bench_mmap_options[] = {
+ OPT_UINTEGER('r', "randomize", &seed,
+ "Seed to randomize page access offset."),
+ OPT_PARENT(bench_common_options),
+ OPT_END()
+ };
+
+ struct bench_mem_info info = {
+ .functions = mmap_functions,
+ .do_op = do_mmap,
+ .usage = bench_mem_mmap_usage,
+ .options = bench_mmap_options,
};
return bench_mem_common(argc, argv, &info);
diff --git a/tools/perf/bench/mem-memcpy-arch.h b/tools/perf/bench/mem-memcpy-arch.h
index 5bcaec5601a8..852e48cfd8fe 100644
--- a/tools/perf/bench/mem-memcpy-arch.h
+++ b/tools/perf/bench/mem-memcpy-arch.h
@@ -2,7 +2,7 @@
#ifdef HAVE_ARCH_X86_64_SUPPORT
-#define MEMCPY_FN(fn, name, desc) \
+#define MEMCPY_FN(fn, init, fini, name, desc) \
void *fn(void *, const void *, size_t);
#include "mem-memcpy-x86-64-asm-def.h"
diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm-def.h b/tools/perf/bench/mem-memcpy-x86-64-asm-def.h
index 6188e19d3129..f43038f4448b 100644
--- a/tools/perf/bench/mem-memcpy-x86-64-asm-def.h
+++ b/tools/perf/bench/mem-memcpy-x86-64-asm-def.h
@@ -1,9 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
MEMCPY_FN(memcpy_orig,
+ mem_alloc,
+ mem_free,
"x86-64-unrolled",
"unrolled memcpy() in arch/x86/lib/memcpy_64.S")
MEMCPY_FN(__memcpy,
+ mem_alloc,
+ mem_free,
"x86-64-movsq",
"movsq-based memcpy() in arch/x86/lib/memcpy_64.S")
diff --git a/tools/perf/bench/mem-memset-arch.h b/tools/perf/bench/mem-memset-arch.h
index 53f45482663f..278c5da12d63 100644
--- a/tools/perf/bench/mem-memset-arch.h
+++ b/tools/perf/bench/mem-memset-arch.h
@@ -2,7 +2,7 @@
#ifdef HAVE_ARCH_X86_64_SUPPORT
-#define MEMSET_FN(fn, name, desc) \
+#define MEMSET_FN(fn, init, fini, name, desc) \
void *fn(void *, int, size_t);
#include "mem-memset-x86-64-asm-def.h"
diff --git a/tools/perf/bench/mem-memset-x86-64-asm-def.h b/tools/perf/bench/mem-memset-x86-64-asm-def.h
index 247c72fdfb9d..80ad1b7ea770 100644
--- a/tools/perf/bench/mem-memset-x86-64-asm-def.h
+++ b/tools/perf/bench/mem-memset-x86-64-asm-def.h
@@ -1,9 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
MEMSET_FN(memset_orig,
+ mem_alloc,
+ mem_free,
"x86-64-unrolled",
"unrolled memset() in arch/x86/lib/memset_64.S")
MEMSET_FN(__memset,
+ mem_alloc,
+ mem_free,
"x86-64-stosq",
"movsq-based memset() in arch/x86/lib/memset_64.S")
diff --git a/tools/perf/bench/pmu-scan.c b/tools/perf/bench/pmu-scan.c
index 9e4d36486f62..14a464ad8cea 100644
--- a/tools/perf/bench/pmu-scan.c
+++ b/tools/perf/bench/pmu-scan.c
@@ -4,6 +4,7 @@
*
* Copyright 2023 Google LLC.
*/
+#include <errno.h>
#include <stdio.h>
#include "bench.h"
#include "util/debug.h"
diff --git a/tools/perf/bench/synthesize.c b/tools/perf/bench/synthesize.c
index 9b333276cbdb..265d49a913d9 100644
--- a/tools/perf/bench/synthesize.c
+++ b/tools/perf/bench/synthesize.c
@@ -6,6 +6,7 @@
*
* Copyright 2019 Google LLC.
*/
+#include <errno.h>
#include <stdio.h>
#include "bench.h"
#include "../util/debug.h"
@@ -114,12 +115,16 @@ static int run_single_threaded(void)
.pid = "self",
};
struct perf_thread_map *threads;
+ struct perf_env host_env;
int err;
perf_set_singlethreaded();
- session = perf_session__new(NULL, NULL);
+ perf_env__init(&host_env);
+ session = __perf_session__new(/*data=*/NULL, /*tool=*/NULL,
+ /*trace_event_repipe=*/false, &host_env);
if (IS_ERR(session)) {
pr_err("Session creation failed.\n");
+ perf_env__exit(&host_env);
return PTR_ERR(session);
}
threads = thread_map__new_by_pid(getpid());
@@ -144,6 +149,7 @@ err_out:
perf_thread_map__put(threads);
perf_session__delete(session);
+ perf_env__exit(&host_env);
return err;
}
@@ -154,17 +160,21 @@ static int do_run_multi_threaded(struct target *target,
u64 runtime_us;
unsigned int i;
double time_average, time_stddev, event_average, event_stddev;
- int err;
+ int err = 0;
struct stats time_stats, event_stats;
struct perf_session *session;
+ struct perf_env host_env;
+ perf_env__init(&host_env);
init_stats(&time_stats);
init_stats(&event_stats);
for (i = 0; i < multi_iterations; i++) {
- session = perf_session__new(NULL, NULL);
- if (IS_ERR(session))
- return PTR_ERR(session);
-
+ session = __perf_session__new(/*data=*/NULL, /*tool=*/NULL,
+ /*trace_event_repipe=*/false, &host_env);
+ if (IS_ERR(session)) {
+ err = PTR_ERR(session);
+ goto err_out;
+ }
atomic_set(&event_count, 0);
gettimeofday(&start, NULL);
err = __machine__synthesize_threads(&session->machines.host,
@@ -175,7 +185,7 @@ static int do_run_multi_threaded(struct target *target,
nr_threads_synthesize);
if (err) {
perf_session__delete(session);
- return err;
+ goto err_out;
}
gettimeofday(&end, NULL);
@@ -198,7 +208,9 @@ static int do_run_multi_threaded(struct target *target,
printf(" Average time per event %.3f usec\n",
time_average / event_average);
- return 0;
+err_out:
+ perf_env__exit(&host_env);
+ return err;
}
static int run_multi_threaded(void)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 9833c2c82a2f..9c27bb30b708 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -313,7 +313,8 @@ out_put:
return ret;
}
-static int process_feature_event(struct perf_session *session,
+static int process_feature_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
if (event->feat.feat_id < HEADER_LAST_FEATURE)
@@ -519,7 +520,7 @@ find_next:
/* skip missing symbols */
nd = rb_next(nd);
} else if (use_browser == 1) {
- key = hist_entry__tui_annotate(he, evsel, NULL);
+ key = hist_entry__tui_annotate(he, evsel, NULL, NO_ADDR);
switch (key) {
case -1:
@@ -562,7 +563,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (!annotate_opts.objdump_path) {
- ret = perf_env__lookup_objdump(&session->header.env,
+ ret = perf_env__lookup_objdump(perf_session__env(session),
&annotate_opts.objdump_path);
if (ret)
goto out;
@@ -896,7 +897,7 @@ int cmd_annotate(int argc, const char **argv)
symbol_conf.try_vmlinux_path = true;
- ret = symbol__init(&annotate.session->header.env);
+ ret = symbol__init(perf_session__env(annotate.session));
if (ret < 0)
goto out_delete;
@@ -917,11 +918,6 @@ int cmd_annotate(int argc, const char **argv)
symbol_conf.annotate_data_sample = true;
} else if (annotate_opts.code_with_type) {
symbol_conf.annotate_data_member = true;
-
- if (!annotate.use_stdio) {
- pr_err("--code-with-type only works with --stdio.\n");
- goto out_delete;
- }
}
setup_browser(true);
@@ -947,7 +943,7 @@ int cmd_annotate(int argc, const char **argv)
annotate_opts.show_br_cntr = true;
}
- if (setup_sorting(NULL) < 0)
+ if (setup_sorting(/*evlist=*/NULL, perf_session__env(annotate.session)) < 0)
usage_with_options(annotate_usage, options);
ret = __cmd_annotate(&annotate);
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 2c1a9f3d847a..02dea1b88228 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -65,6 +65,7 @@ static struct bench mem_benchmarks[] = {
{ "memcpy", "Benchmark for memcpy() functions", bench_mem_memcpy },
{ "memset", "Benchmark for memset() functions", bench_mem_memset },
{ "find_bit", "Benchmark for find_bit() functions", bench_mem_find_bit },
+ { "mmap", "Benchmark for mmap() mappings", bench_mem_mmap },
{ "all", "Run all memory access benchmarks", NULL },
{ NULL, NULL, NULL }
};
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index b0511d16aeb6..c98104481c8a 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -31,7 +31,7 @@
#include <linux/string.h>
#include <linux/err.h>
-static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
+static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid, size_t sbuildid_size)
{
char root_dir[PATH_MAX];
char *p;
@@ -42,7 +42,7 @@ static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
if (!p)
return -1;
*p = '\0';
- return sysfs__sprintf_build_id(root_dir, sbuildid);
+ return sysfs__snprintf_build_id(root_dir, sbuildid, sbuildid_size);
}
static int build_id_cache__kcore_dir(char *dir, size_t sz)
@@ -128,7 +128,7 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
return -1;
*p = '\0';
- if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
+ if (build_id_cache__kcore_buildid(from_dir, sbuildid, sizeof(sbuildid)) < 0)
return -1;
scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s",
@@ -175,7 +175,7 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
static int build_id_cache__add_file(const char *filename, struct nsinfo *nsi)
{
char sbuild_id[SBUILD_ID_SIZE];
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
int err;
struct nscookie nsc;
@@ -187,7 +187,7 @@ static int build_id_cache__add_file(const char *filename, struct nsinfo *nsi)
return -1;
}
- build_id__sprintf(&bid, sbuild_id);
+ build_id__snprintf(&bid, sbuild_id, sizeof(sbuild_id));
err = build_id_cache__add_s(sbuild_id, filename, nsi,
false, false);
pr_debug("Adding %s %s: %s\n", sbuild_id, filename,
@@ -198,7 +198,7 @@ static int build_id_cache__add_file(const char *filename, struct nsinfo *nsi)
static int build_id_cache__remove_file(const char *filename, struct nsinfo *nsi)
{
char sbuild_id[SBUILD_ID_SIZE];
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
struct nscookie nsc;
int err;
@@ -211,7 +211,7 @@ static int build_id_cache__remove_file(const char *filename, struct nsinfo *nsi)
return -1;
}
- build_id__sprintf(&bid, sbuild_id);
+ build_id__snprintf(&bid, sbuild_id, sizeof(sbuild_id));
err = build_id_cache__remove_s(sbuild_id);
pr_debug("Removing %s %s: %s\n", sbuild_id, filename,
err ? "FAIL" : "Ok");
@@ -275,7 +275,7 @@ static int build_id_cache__purge_all(void)
static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
{
char filename[PATH_MAX];
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
if (!dso__build_id_filename(dso, filename, sizeof(filename), false))
return true;
@@ -303,7 +303,7 @@ static int build_id_cache__fprintf_missing(struct perf_session *session, FILE *f
static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi)
{
char sbuild_id[SBUILD_ID_SIZE];
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
struct nscookie nsc;
int err;
@@ -317,7 +317,7 @@ static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi)
}
err = 0;
- build_id__sprintf(&bid, sbuild_id);
+ build_id__snprintf(&bid, sbuild_id, sizeof(sbuild_id));
if (build_id_cache__cached(sbuild_id))
err = build_id_cache__remove_s(sbuild_id);
@@ -453,7 +453,7 @@ int cmd_buildid_cache(int argc, const char **argv)
return PTR_ERR(session);
}
- if (symbol__init(session ? &session->header.env : NULL) < 0)
+ if (symbol__init(session ? perf_session__env(session) : NULL) < 0)
goto out;
setup_pager();
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 52dfacaff8e3..a91bbb34ac94 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -31,7 +31,7 @@ static int buildid__map_cb(struct map *map, void *arg __maybe_unused)
memset(bid_buf, 0, sizeof(bid_buf));
if (dso__has_build_id(dso))
- build_id__sprintf(dso__bid_const(dso), bid_buf);
+ build_id__snprintf(dso__bid(dso), bid_buf, sizeof(bid_buf));
printf("%s %16" PRIx64 " %16" PRIx64, bid_buf, map__start(map), map__end(map));
if (dso_long_name != NULL)
printf(" %s", dso_long_name);
@@ -45,11 +45,14 @@ static int buildid__map_cb(struct map *map, void *arg __maybe_unused)
static void buildid__show_kernel_maps(void)
{
+ struct perf_env host_env;
struct machine *machine;
- machine = machine__new_host();
+ perf_env__init(&host_env);
+ machine = machine__new_host(&host_env);
machine__for_each_kernel_map(machine, buildid__map_cb, NULL);
machine__delete(machine);
+ perf_env__exit(&host_env);
}
static int sysfs__fprintf_build_id(FILE *fp)
@@ -57,7 +60,7 @@ static int sysfs__fprintf_build_id(FILE *fp)
char sbuild_id[SBUILD_ID_SIZE];
int ret;
- ret = sysfs__sprintf_build_id("/", sbuild_id);
+ ret = sysfs__snprintf_build_id("/", sbuild_id, sizeof(sbuild_id));
if (ret != sizeof(sbuild_id))
return ret < 0 ? ret : -EINVAL;
@@ -69,7 +72,7 @@ static int filename__fprintf_build_id(const char *name, FILE *fp)
char sbuild_id[SBUILD_ID_SIZE];
int ret;
- ret = filename__sprintf_build_id(name, sbuild_id);
+ ret = filename__snprintf_build_id(name, sbuild_id, sizeof(sbuild_id));
if (ret != sizeof(sbuild_id))
return ret < 0 ? ret : -EINVAL;
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 5d5bb0f32334..d390ae4e3ec8 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -45,6 +45,8 @@
#include "pmus.h"
#include "string2.h"
#include "util/util.h"
+#include "util/symbol.h"
+#include "util/annotate.h"
struct c2c_hists {
struct hists hists;
@@ -62,6 +64,7 @@ struct compute_stats {
struct c2c_hist_entry {
struct c2c_hists *hists;
+ struct evsel *evsel;
struct c2c_stats stats;
unsigned long *cpuset;
unsigned long *nodeset;
@@ -195,12 +198,14 @@ static struct hist_entry_ops c2c_entry_ops = {
static int c2c_hists__init(struct c2c_hists *hists,
const char *sort,
- int nr_header_lines);
+ int nr_header_lines,
+ struct perf_env *env);
static struct c2c_hists*
he__get_c2c_hists(struct hist_entry *he,
const char *sort,
- int nr_header_lines)
+ int nr_header_lines,
+ struct perf_env *env)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *hists;
@@ -214,7 +219,7 @@ he__get_c2c_hists(struct hist_entry *he,
if (!hists)
return NULL;
- ret = c2c_hists__init(hists, sort, nr_header_lines);
+ ret = c2c_hists__init(hists, sort, nr_header_lines, env);
if (ret) {
free(hists);
return NULL;
@@ -223,6 +228,12 @@ he__get_c2c_hists(struct hist_entry *he,
return hists;
}
+static void c2c_he__set_evsel(struct c2c_hist_entry *c2c_he,
+ struct evsel *evsel)
+{
+ c2c_he->evsel = evsel;
+}
+
static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
struct perf_sample *sample)
{
@@ -273,6 +284,33 @@ static void compute_stats(struct c2c_hist_entry *c2c_he,
update_stats(&cstats->load, weight);
}
+/*
+ * Return true if annotation is possible. When list is NULL,
+ * it means that we are called at the c2c_browser level,
+ * in that case we allow annotation to be initialized. When list
+ * is non-NULL, it means that we are called at the cacheline_browser
+ * level, in that case we allow annotation only if use_browser
+ * is set and symbol information is available.
+ */
+static bool perf_c2c__has_annotation(struct perf_hpp_list *list)
+{
+ if (use_browser != 1)
+ return false;
+ return !list || list->sym;
+}
+
+static void perf_c2c__evsel_hists_inc_stats(struct evsel *evsel,
+ struct hist_entry *he,
+ struct perf_sample *sample)
+{
+ struct hists *evsel_hists = evsel__hists(evsel);
+
+ hists__inc_nr_samples(evsel_hists, he->filtered);
+ evsel_hists->stats.total_period += sample->period;
+ if (!he->filtered)
+ evsel_hists->stats.total_non_filtered_period += sample->period;
+}
+
static int process_sample_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
@@ -284,7 +322,7 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
struct c2c_stats stats = { .nr_entries = 0, };
struct hist_entry *he;
struct addr_location al;
- struct mem_info *mi, *mi_dup;
+ struct mem_info *mi = NULL;
struct callchain_cursor *cursor;
int ret;
@@ -311,20 +349,15 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
goto out;
}
- /*
- * The mi object is released in hists__add_entry_ops,
- * if it gets sorted out into existing data, so we need
- * to take the copy now.
- */
- mi_dup = mem_info__get(mi);
-
c2c_decode_stats(&stats, mi);
he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
&al, NULL, NULL, mi, NULL,
sample, true);
- if (he == NULL)
- goto free_mi;
+ if (he == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
c2c_he = container_of(he, struct c2c_hist_entry, he);
c2c_add_stats(&c2c_he->stats, &stats);
@@ -332,8 +365,15 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
c2c_he__set_cpu(c2c_he, sample);
c2c_he__set_node(c2c_he, sample);
+ c2c_he__set_evsel(c2c_he, evsel);
hists__inc_nr_samples(&c2c_hists->hists, he->filtered);
+
+ if (perf_c2c__has_annotation(NULL)) {
+ perf_c2c__evsel_hists_inc_stats(evsel, he, sample);
+ addr_map_symbol__inc_samples(mem_info__iaddr(mi), sample, evsel);
+ }
+
ret = hist_entry__append_callchain(he, sample);
if (!ret) {
@@ -348,17 +388,19 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
int cpu = sample->cpu == (unsigned int) -1 ? 0 : sample->cpu;
int node = c2c.cpu2node[cpu];
- mi = mi_dup;
-
- c2c_hists = he__get_c2c_hists(he, c2c.cl_sort, 2);
- if (!c2c_hists)
- goto free_mi;
+ c2c_hists = he__get_c2c_hists(he, c2c.cl_sort, 2, machine->env);
+ if (!c2c_hists) {
+ ret = -ENOMEM;
+ goto out;
+ }
he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
&al, NULL, NULL, mi, NULL,
sample, true);
- if (he == NULL)
- goto free_mi;
+ if (he == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
c2c_he = container_of(he, struct c2c_hist_entry, he);
c2c_add_stats(&c2c_he->stats, &stats);
@@ -369,20 +411,16 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
c2c_he__set_cpu(c2c_he, sample);
c2c_he__set_node(c2c_he, sample);
+ c2c_he__set_evsel(c2c_he, evsel);
hists__inc_nr_samples(&c2c_hists->hists, he->filtered);
ret = hist_entry__append_callchain(he, sample);
}
out:
+ mem_info__put(mi);
addr_location__exit(&al);
return ret;
-
-free_mi:
- mem_info__put(mi_dup);
- mem_info__put(mi);
- ret = -ENOMEM;
- goto out;
}
static const char * const c2c_usage[] = {
@@ -1966,33 +2004,38 @@ static struct c2c_fmt *get_format(const char *name)
return c2c_fmt;
}
-static int c2c_hists__init_output(struct perf_hpp_list *hpp_list, char *name)
+static int c2c_hists__init_output(struct perf_hpp_list *hpp_list, char *name,
+ struct perf_env *env __maybe_unused)
{
struct c2c_fmt *c2c_fmt = get_format(name);
+ int level = 0;
if (!c2c_fmt) {
reset_dimensions();
- return output_field_add(hpp_list, name);
+ return output_field_add(hpp_list, name, &level);
}
perf_hpp_list__column_register(hpp_list, &c2c_fmt->fmt);
return 0;
}
-static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
+static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name, struct perf_env *env)
{
struct c2c_fmt *c2c_fmt = get_format(name);
struct c2c_dimension *dim;
if (!c2c_fmt) {
reset_dimensions();
- return sort_dimension__add(hpp_list, name, NULL, 0);
+ return sort_dimension__add(hpp_list, name, /*evlist=*/NULL, env, /*level=*/0);
}
dim = c2c_fmt->dim;
if (dim == &dim_dso)
hpp_list->dso = 1;
+ if (dim == &dim_symbol || dim == &dim_iaddr)
+ hpp_list->sym = 1;
+
perf_hpp_list__register_sort_field(hpp_list, &c2c_fmt->fmt);
return 0;
}
@@ -2007,7 +2050,7 @@ static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
\
for (tok = strtok_r((char *)_list, ", ", &tmp); \
tok; tok = strtok_r(NULL, ", ", &tmp)) { \
- ret = _fn(hpp_list, tok); \
+ ret = _fn(hpp_list, tok, env); \
if (ret == -EINVAL) { \
pr_err("Invalid --fields key: `%s'", tok); \
break; \
@@ -2020,7 +2063,8 @@ static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
static int hpp_list__parse(struct perf_hpp_list *hpp_list,
const char *output_,
- const char *sort_)
+ const char *sort_,
+ struct perf_env *env)
{
char *output = output_ ? strdup(output_) : NULL;
char *sort = sort_ ? strdup(sort_) : NULL;
@@ -2051,7 +2095,8 @@ static int hpp_list__parse(struct perf_hpp_list *hpp_list,
static int c2c_hists__init(struct c2c_hists *hists,
const char *sort,
- int nr_header_lines)
+ int nr_header_lines,
+ struct perf_env *env)
{
__hists__init(&hists->hists, &hists->list);
@@ -2065,15 +2110,16 @@ static int c2c_hists__init(struct c2c_hists *hists,
/* Overload number of header lines.*/
hists->list.nr_header_lines = nr_header_lines;
- return hpp_list__parse(&hists->list, NULL, sort);
+ return hpp_list__parse(&hists->list, /*output=*/NULL, sort, env);
}
static int c2c_hists__reinit(struct c2c_hists *c2c_hists,
const char *output,
- const char *sort)
+ const char *sort,
+ struct perf_env *env)
{
perf_hpp__reset_output_field(&c2c_hists->list);
- return hpp_list__parse(&c2c_hists->list, output, sort);
+ return hpp_list__parse(&c2c_hists->list, output, sort, env);
}
#define DISPLAY_LINE_LIMIT 0.001
@@ -2206,8 +2252,9 @@ static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
return 0;
}
-static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
+static int resort_cl_cb(struct hist_entry *he, void *arg)
{
+ struct perf_env *env = arg;
struct c2c_hist_entry *c2c_he;
struct c2c_hists *c2c_hists;
bool display = he__display(he, &c2c.shared_clines_stats);
@@ -2221,7 +2268,7 @@ static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
c2c_he->cacheline_idx = idx++;
calc_width(c2c_he);
- c2c_hists__reinit(c2c_hists, c2c.cl_output, c2c.cl_resort);
+ c2c_hists__reinit(c2c_hists, c2c.cl_output, c2c.cl_resort, env);
hists__collapse_resort(&c2c_hists->hists, NULL);
hists__output_resort_cb(&c2c_hists->hists, NULL, filter_cb);
@@ -2266,14 +2313,15 @@ static int setup_nodes(struct perf_session *session)
int node, idx;
struct perf_cpu cpu;
int *cpu2node;
+ struct perf_env *env = perf_session__env(session);
if (c2c.node_info > 2)
c2c.node_info = 2;
- c2c.nodes_cnt = session->header.env.nr_numa_nodes;
- c2c.cpus_cnt = session->header.env.nr_cpus_avail;
+ c2c.nodes_cnt = env->nr_numa_nodes;
+ c2c.cpus_cnt = env->nr_cpus_avail;
- n = session->header.env.numa_nodes;
+ n = env->numa_nodes;
if (!n)
return -EINVAL;
@@ -2332,7 +2380,7 @@ static int resort_shared_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
return 0;
}
-static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
+static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb, void *arg)
{
struct rb_node *next = rb_first_cached(&hists->entries);
int ret = 0;
@@ -2341,7 +2389,7 @@ static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
struct hist_entry *he;
he = rb_entry(next, struct hist_entry, rb_node);
- ret = cb(he, NULL);
+ ret = cb(he, arg);
if (ret)
break;
next = rb_next(&he->rb_node);
@@ -2447,7 +2495,7 @@ static void print_cacheline(struct c2c_hists *c2c_hists,
hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, false);
}
-static void print_pareto(FILE *out)
+static void print_pareto(FILE *out, struct perf_env *env)
{
struct perf_hpp_list hpp_list;
struct rb_node *nd;
@@ -2472,7 +2520,7 @@ static void print_pareto(FILE *out)
"dcacheline";
perf_hpp_list__init(&hpp_list);
- ret = hpp_list__parse(&hpp_list, cl_output, NULL);
+ ret = hpp_list__parse(&hpp_list, cl_output, /*evlist=*/NULL, env);
if (WARN_ONCE(ret, "failed to setup sort entries\n"))
return;
@@ -2537,10 +2585,48 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
fprintf(out, "=================================================\n");
fprintf(out, "#\n");
- print_pareto(out);
+ print_pareto(out, perf_session__env(session));
}
#ifdef HAVE_SLANG_SUPPORT
+
+static int perf_c2c__toggle_annotation(struct hist_browser *browser)
+{
+ struct hist_entry *he = browser->he_selection;
+ struct symbol *sym = NULL;
+ struct annotated_source *src = NULL;
+ struct c2c_hist_entry *c2c_he = NULL;
+ u64 al_addr = NO_ADDR;
+
+ if (!perf_c2c__has_annotation(he->hists->hpp_list)) {
+ ui_browser__help_window(&browser->b, "No annotation support");
+ return 0;
+ }
+
+ if (he == NULL) {
+ ui_browser__help_window(&browser->b, "No entry selected for annotation");
+ return 0;
+ }
+
+ sym = he->ms.sym;
+ if (sym == NULL) {
+ ui_browser__help_window(&browser->b, "Can not annotate, no symbol found");
+ return 0;
+ }
+
+ src = symbol__hists(sym, 0);
+ if (src == NULL) {
+ ui_browser__help_window(&browser->b, "Failed to initialize annotation source");
+ return 0;
+ }
+
+ if (he->mem_info)
+ al_addr = mem_info__iaddr(he->mem_info)->al_addr;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ return hist_entry__tui_annotate(he, c2c_he->evsel, NULL, al_addr);
+}
+
static void c2c_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
@@ -2608,6 +2694,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
" ENTER Toggle callchains (if present) \n"
" n Toggle Node details info \n"
" s Toggle full length of symbol and source line columns \n"
+ " a Toggle annotation view \n"
" q Return back to cacheline list \n";
if (!he)
@@ -2642,6 +2729,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
c2c.node_info = (c2c.node_info + 1) % 3;
setup_nodes_header();
break;
+ case 'a':
+ perf_c2c__toggle_annotation(browser);
+ break;
case 'q':
goto out;
case '?':
@@ -2997,6 +3087,7 @@ static int perf_c2c__report(int argc, const char **argv)
const char *display = NULL;
const char *coalesce = NULL;
bool no_source = false;
+ const char *disassembler_style = NULL, *objdump_path = NULL;
const struct option options[] = {
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
@@ -3024,11 +3115,22 @@ static int perf_c2c__report(int argc, const char **argv)
OPT_BOOLEAN(0, "stitch-lbr", &c2c.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPT_BOOLEAN(0, "double-cl", &chk_double_cl, "Detect adjacent cacheline false sharing"),
+ OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
+ "Specify disassembler style (e.g. -M intel for intel syntax)"),
+ OPT_STRING(0, "objdump", &objdump_path, "path",
+ "objdump binary to use for disassembly and annotations"),
OPT_PARENT(c2c_options),
OPT_END()
};
int err = 0;
const char *output_str, *sort_str = NULL;
+ struct perf_env *env;
+
+ annotation_options__init();
+
+ err = hists__init();
+ if (err < 0)
+ goto out;
argc = parse_options(argc, argv, options, report_c2c_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
@@ -3042,6 +3144,27 @@ static int perf_c2c__report(int argc, const char **argv)
if (c2c.stats_only)
c2c.use_stdio = true;
+ /**
+ * Annotation related options disassembler_style, objdump_path are set
+ * in the c2c_options, so we can use them here.
+ */
+ if (disassembler_style) {
+ annotate_opts.disassembler_style = strdup(disassembler_style);
+ if (!annotate_opts.disassembler_style) {
+ err = -ENOMEM;
+ pr_err("Failed to allocate memory for annotation options\n");
+ goto out;
+ }
+ }
+ if (objdump_path) {
+ annotate_opts.objdump_path = strdup(objdump_path);
+ if (!annotate_opts.objdump_path) {
+ err = -ENOMEM;
+ pr_err("Failed to allocate memory for annotation options\n");
+ goto out;
+ }
+ }
+
err = symbol__validate_sym_arguments();
if (err)
goto out;
@@ -3071,14 +3194,14 @@ static int perf_c2c__report(int argc, const char **argv)
pr_debug("Error creating perf session\n");
goto out;
}
-
+ env = perf_session__env(session);
/*
* Use the 'tot' as default display type if user doesn't specify it;
* since Arm64 platform doesn't support HITMs flag, use 'peer' as the
* default display type.
*/
if (!display) {
- if (!strcmp(perf_env__arch(&session->header.env), "arm64"))
+ if (!strcmp(perf_env__arch(env), "arm64"))
display = "peer";
else
display = "tot";
@@ -3094,7 +3217,7 @@ static int perf_c2c__report(int argc, const char **argv)
goto out_session;
}
- err = c2c_hists__init(&c2c.hists, "dcacheline", 2);
+ err = c2c_hists__init(&c2c.hists, "dcacheline", 2, perf_session__env(session));
if (err) {
pr_debug("Failed to initialize hists\n");
goto out_session;
@@ -3108,7 +3231,7 @@ static int perf_c2c__report(int argc, const char **argv)
goto out_session;
}
- err = mem2node__init(&c2c.mem2node, &session->header.env);
+ err = mem2node__init(&c2c.mem2node, env);
if (err)
goto out_session;
@@ -3116,7 +3239,39 @@ static int perf_c2c__report(int argc, const char **argv)
if (err)
goto out_mem2node;
- if (symbol__init(&session->header.env) < 0)
+ if (c2c.use_stdio)
+ use_browser = 0;
+ else
+ use_browser = 1;
+
+ /*
+ * Only in the TUI browser we are doing integrated annotation,
+ * so don't allocate extra space that won't be used in the stdio
+ * implementation.
+ */
+ if (perf_c2c__has_annotation(NULL)) {
+ int ret = symbol__annotation_init();
+
+ if (ret < 0)
+ goto out_mem2node;
+ /*
+ * For searching by name on the "Browse map details".
+ * providing it only in verbose mode not to bloat too
+ * much struct symbol.
+ */
+ if (verbose > 0) {
+ /*
+ * XXX: Need to provide a less kludgy way to ask for
+ * more space per symbol, the u32 is for the index on
+ * the ui browser.
+ * See symbol__browser_index.
+ */
+ symbol_conf.priv_size += sizeof(u32);
+ }
+ annotation_config__init();
+ }
+
+ if (symbol__init(env) < 0)
goto out_mem2node;
/* No pipe support at the moment. */
@@ -3125,11 +3280,6 @@ static int perf_c2c__report(int argc, const char **argv)
goto out_mem2node;
}
- if (c2c.use_stdio)
- use_browser = 0;
- else
- use_browser = 1;
-
setup_browser(false);
err = perf_session__process_events(session);
@@ -3178,13 +3328,13 @@ static int perf_c2c__report(int argc, const char **argv)
else if (c2c.display == DISPLAY_SNP_PEER)
sort_str = "tot_peer";
- c2c_hists__reinit(&c2c.hists, output_str, sort_str);
+ c2c_hists__reinit(&c2c.hists, output_str, sort_str, perf_session__env(session));
ui_progress__init(&prog, c2c.hists.hists.nr_entries, "Sorting...");
hists__collapse_resort(&c2c.hists.hists, NULL);
hists__output_resort_cb(&c2c.hists.hists, &prog, resort_shared_cl_cb);
- hists__iterate_cb(&c2c.hists.hists, resort_cl_cb);
+ hists__iterate_cb(&c2c.hists.hists, resort_cl_cb, perf_session__env(session));
ui_progress__finish();
@@ -3200,6 +3350,7 @@ out_mem2node:
out_session:
perf_session__delete(session);
out:
+ annotation_options__exit();
return err;
}
diff --git a/tools/perf/builtin-check.c b/tools/perf/builtin-check.c
index 61a11a9b4e75..d19769a8f689 100644
--- a/tools/perf/builtin-check.c
+++ b/tools/perf/builtin-check.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
#include "color.h"
+#include "util/bpf-utils.h"
#include "util/debug.h"
#include "util/header.h"
#include <tools/config.h>
@@ -22,6 +23,17 @@ static const char *check_feature_usage[] = {
NULL
};
+#define FEATURE_STATUS(name_, macro_) { \
+ .name = name_, \
+ .macro = #macro_, \
+ .is_builtin = IS_BUILTIN(macro_) }
+
+#define FEATURE_STATUS_TIP(name_, macro_, tip_) { \
+ .name = name_, \
+ .macro = #macro_, \
+ .tip = tip_, \
+ .is_builtin = IS_BUILTIN(macro_) }
+
struct feature_status supported_features[] = {
FEATURE_STATUS("aio", HAVE_AIO_SUPPORT),
FEATURE_STATUS("bpf", HAVE_LIBBPF_SUPPORT),
@@ -30,20 +42,20 @@ struct feature_status supported_features[] = {
FEATURE_STATUS("dwarf", HAVE_LIBDW_SUPPORT),
FEATURE_STATUS("dwarf_getlocations", HAVE_LIBDW_SUPPORT),
FEATURE_STATUS("dwarf-unwind", HAVE_DWARF_UNWIND_SUPPORT),
- FEATURE_STATUS("auxtrace", HAVE_AUXTRACE_SUPPORT),
- FEATURE_STATUS("libbfd", HAVE_LIBBFD_SUPPORT),
+ FEATURE_STATUS_TIP("libbfd", HAVE_LIBBFD_SUPPORT, "Deprecated, license incompatibility, use BUILD_NONDISTRO=1 and install binutils-dev[el]"),
+ FEATURE_STATUS("libbpf-strings", HAVE_LIBBPF_STRINGS_SUPPORT),
FEATURE_STATUS("libcapstone", HAVE_LIBCAPSTONE_SUPPORT),
- FEATURE_STATUS("libcrypto", HAVE_LIBCRYPTO_SUPPORT),
FEATURE_STATUS("libdw-dwarf-unwind", HAVE_LIBDW_SUPPORT),
FEATURE_STATUS("libelf", HAVE_LIBELF_SUPPORT),
+ FEATURE_STATUS("libLLVM", HAVE_LIBLLVM_SUPPORT),
FEATURE_STATUS("libnuma", HAVE_LIBNUMA_SUPPORT),
FEATURE_STATUS("libopencsd", HAVE_CSTRACE_SUPPORT),
- FEATURE_STATUS("libperl", HAVE_LIBPERL_SUPPORT),
+ FEATURE_STATUS_TIP("libperl", HAVE_LIBPERL_SUPPORT, "Deprecated, use LIBPERL=1 and install perl-ExtUtils-Embed/libperl-dev to build with it"),
FEATURE_STATUS("libpfm4", HAVE_LIBPFM),
FEATURE_STATUS("libpython", HAVE_LIBPYTHON_SUPPORT),
FEATURE_STATUS("libslang", HAVE_SLANG_SUPPORT),
FEATURE_STATUS("libtraceevent", HAVE_LIBTRACEEVENT),
- FEATURE_STATUS("libunwind", HAVE_LIBUNWIND_SUPPORT),
+ FEATURE_STATUS_TIP("libunwind", HAVE_LIBUNWIND_SUPPORT, "Deprecated, use LIBUNWIND=1 and install libunwind-dev[el] to build with it"),
FEATURE_STATUS("lzma", HAVE_LZMA_SUPPORT),
FEATURE_STATUS("numa_num_possible_cpus", HAVE_LIBNUMA_SUPPORT),
FEATURE_STATUS("zlib", HAVE_ZLIB_SUPPORT),
@@ -66,21 +78,20 @@ static void on_off_print(const char *status)
}
/* Helper function to print status of a feature along with name/macro */
-static void status_print(const char *name, const char *macro,
- const char *status)
+void feature_status__printf(const struct feature_status *feature)
{
+ const char *name = feature->name, *macro = feature->macro,
+ *status = feature->is_builtin ? "on" : "OFF";
+
printf("%22s: ", name);
on_off_print(status);
- printf(" # %s\n", macro);
-}
+ printf(" # %s", macro);
+
+ if (!feature->is_builtin && feature->tip)
+ printf(" ( tip: %s )", feature->tip);
-#define STATUS(feature) \
-do { \
- if (feature.is_builtin) \
- status_print(feature.name, feature.macro, "on"); \
- else \
- status_print(feature.name, feature.macro, "OFF"); \
-} while (0)
+ putchar('\n');
+}
/**
* check whether "feature" is built-in with perf
@@ -95,7 +106,7 @@ static int has_support(const char *feature)
if ((strcasecmp(feature, supported_features[i].name) == 0) ||
(strcasecmp(feature, supported_features[i].macro) == 0)) {
if (!quiet)
- STATUS(supported_features[i]);
+ feature_status__printf(&supported_features[i]);
return supported_features[i].is_builtin;
}
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index ae490d58af92..53d5ea4a6a4f 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -2003,7 +2003,7 @@ int cmd_diff(int argc, const char **argv)
sort__mode = SORT_MODE__DIFF;
}
- if (setup_sorting(NULL) < 0)
+ if (setup_sorting(/*evlist=*/NULL, perf_session__env(data__files[0].session)) < 0)
usage_with_options(diff_usage, options);
setup_pager();
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index a9bd7bbef5a9..fb6e2c3c24c8 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -19,7 +19,8 @@
#include "util/tool.h"
#include "util/util.h"
-static int process_header_feature(struct perf_session *session __maybe_unused,
+static int process_header_feature(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
session_done = 1;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index 7caa18d5ffc3..6b6eec65f93f 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -19,6 +19,7 @@
#include <ctype.h>
#include <linux/capability.h>
#include <linux/string.h>
+#include <sys/stat.h>
#include "debug.h"
#include <subcmd/pager.h>
@@ -45,6 +46,8 @@ static volatile sig_atomic_t done;
static struct stats latency_stats; /* for tracepoints */
+static char tracing_instance[PATH_MAX]; /* Trace instance directory */
+
static void sig_handler(int sig __maybe_unused)
{
done = true;
@@ -100,6 +103,34 @@ static bool is_ftrace_supported(void)
return supported;
}
+/*
+ * Wrapper to test if a file in directory .../tracing/instances/XXX
+ * exists. If so return the .../tracing/instances/XXX file for use.
+ * Otherwise the file exists only in directory .../tracing and
+ * is applicable to all instances, for example file available_filter_functions.
+ * Return that file name in this case.
+ *
+ * This functions works similar to get_tracing_file() and expects its caller
+ * to free the returned file name.
+ *
+ * The global variable tracing_instance is set in init_tracing_instance()
+ * called at the beginning to a process specific tracing subdirectory.
+ */
+static char *get_tracing_instance_file(const char *name)
+{
+ char *file;
+
+ if (asprintf(&file, "%s/%s", tracing_instance, name) < 0)
+ return NULL;
+
+ if (!access(file, F_OK))
+ return file;
+
+ free(file);
+ file = get_tracing_file(name);
+ return file;
+}
+
static int __write_tracing_file(const char *name, const char *val, bool append)
{
char *file;
@@ -109,7 +140,7 @@ static int __write_tracing_file(const char *name, const char *val, bool append)
char errbuf[512];
char *val_copy;
- file = get_tracing_file(name);
+ file = get_tracing_instance_file(name);
if (!file) {
pr_debug("cannot get tracing file: %s\n", name);
return -1;
@@ -167,7 +198,7 @@ static int read_tracing_file_to_stdout(const char *name)
int fd;
int ret = -1;
- file = get_tracing_file(name);
+ file = get_tracing_instance_file(name);
if (!file) {
pr_debug("cannot get tracing file: %s\n", name);
return -1;
@@ -209,7 +240,7 @@ static int read_tracing_file_by_line(const char *name,
char *file;
FILE *fp;
- file = get_tracing_file(name);
+ file = get_tracing_instance_file(name);
if (!file) {
pr_debug("cannot get tracing file: %s\n", name);
return -1;
@@ -270,6 +301,10 @@ static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
write_tracing_option_file("funcgraph-proc", "0");
write_tracing_option_file("funcgraph-abstime", "0");
write_tracing_option_file("funcgraph-tail", "0");
+ write_tracing_option_file("funcgraph-args", "0");
+ write_tracing_option_file("funcgraph-retval", "0");
+ write_tracing_option_file("funcgraph-retval-hex", "0");
+ write_tracing_option_file("funcgraph-retaddr", "0");
write_tracing_option_file("latency-format", "0");
write_tracing_option_file("irq-info", "0");
}
@@ -299,6 +334,39 @@ static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
return 0;
}
+/* Remove .../tracing/instances/XXX subdirectory created with
+ * init_tracing_instance().
+ */
+static void exit_tracing_instance(void)
+{
+ if (rmdir(tracing_instance))
+ pr_err("failed to delete tracing/instances directory\n");
+}
+
+/* Create subdirectory within .../tracing/instances/XXX to have session
+ * or process specific setup. To delete this setup, simply remove the
+ * subdirectory.
+ */
+static int init_tracing_instance(void)
+{
+ char dirname[] = "instances/perf-ftrace-XXXXXX";
+ char *path;
+
+ path = get_tracing_file(dirname);
+ if (!path)
+ goto error;
+ strncpy(tracing_instance, path, sizeof(tracing_instance) - 1);
+ put_tracing_file(path);
+ path = mkdtemp(tracing_instance);
+ if (!path)
+ goto error;
+ return 0;
+
+error:
+ pr_err("failed to create tracing/instances directory\n");
+ return -1;
+}
+
static int set_tracing_pid(struct perf_ftrace *ftrace)
{
int i;
@@ -478,6 +546,41 @@ static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
return 0;
}
+static int set_tracing_funcgraph_args(struct perf_ftrace *ftrace)
+{
+ if (ftrace->graph_args) {
+ if (write_tracing_option_file("funcgraph-args", "1") < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_tracing_funcgraph_retval(struct perf_ftrace *ftrace)
+{
+ if (ftrace->graph_retval || ftrace->graph_retval_hex) {
+ if (write_tracing_option_file("funcgraph-retval", "1") < 0)
+ return -1;
+ }
+
+ if (ftrace->graph_retval_hex) {
+ if (write_tracing_option_file("funcgraph-retval-hex", "1") < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_tracing_funcgraph_retaddr(struct perf_ftrace *ftrace)
+{
+ if (ftrace->graph_retaddr) {
+ if (write_tracing_option_file("funcgraph-retaddr", "1") < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
{
if (!ftrace->graph_noirqs)
@@ -578,6 +681,21 @@ static int set_tracing_options(struct perf_ftrace *ftrace)
return -1;
}
+ if (set_tracing_funcgraph_args(ftrace) < 0) {
+ pr_err("failed to set tracing option funcgraph-args\n");
+ return -1;
+ }
+
+ if (set_tracing_funcgraph_retval(ftrace) < 0) {
+ pr_err("failed to set tracing option funcgraph-retval\n");
+ return -1;
+ }
+
+ if (set_tracing_funcgraph_retaddr(ftrace) < 0) {
+ pr_err("failed to set tracing option funcgraph-retaddr\n");
+ return -1;
+ }
+
if (set_tracing_funcgraph_irqs(ftrace) < 0) {
pr_err("failed to set tracing option funcgraph-irqs\n");
return -1;
@@ -629,14 +747,17 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace)
select_tracer(ftrace);
+ if (init_tracing_instance() < 0)
+ goto out;
+
if (reset_tracing_files(ftrace) < 0) {
pr_err("failed to reset ftrace\n");
- goto out;
+ goto out_reset;
}
/* reset ftrace buffer */
if (write_tracing_file("trace", "0") < 0)
- goto out;
+ goto out_reset;
if (set_tracing_options(ftrace) < 0)
goto out_reset;
@@ -648,7 +769,7 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace)
setup_pager();
- trace_file = get_tracing_file("trace_pipe");
+ trace_file = get_tracing_instance_file("trace_pipe");
if (!trace_file) {
pr_err("failed to open trace_pipe\n");
goto out_reset;
@@ -723,7 +844,7 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace)
out_close_fd:
close(trace_fd);
out_reset:
- reset_tracing_files(ftrace);
+ exit_tracing_instance();
out:
return (done && !workload_exec_errno) ? 0 : -1;
}
@@ -924,6 +1045,9 @@ static int prepare_func_latency(struct perf_ftrace *ftrace)
if (ftrace->target.use_bpf)
return perf_ftrace__latency_prepare_bpf(ftrace);
+ if (init_tracing_instance() < 0)
+ return -1;
+
if (reset_tracing_files(ftrace) < 0) {
pr_err("failed to reset ftrace\n");
return -1;
@@ -942,7 +1066,7 @@ static int prepare_func_latency(struct perf_ftrace *ftrace)
return -1;
}
- trace_file = get_tracing_file("trace_pipe");
+ trace_file = get_tracing_instance_file("trace_pipe");
if (!trace_file) {
pr_err("failed to open trace_pipe\n");
return -1;
@@ -993,7 +1117,7 @@ static int cleanup_func_latency(struct perf_ftrace *ftrace)
if (ftrace->target.use_bpf)
return perf_ftrace__latency_cleanup_bpf(ftrace);
- reset_tracing_files(ftrace);
+ exit_tracing_instance();
return 0;
}
@@ -1304,17 +1428,20 @@ static int __cmd_profile(struct perf_ftrace *ftrace)
goto out;
}
+ if (init_tracing_instance() < 0)
+ goto out;
+
if (reset_tracing_files(ftrace) < 0) {
pr_err("failed to reset ftrace\n");
- goto out;
+ goto out_reset;
}
/* reset ftrace buffer */
if (write_tracing_file("trace", "0") < 0)
- goto out;
+ goto out_reset;
if (set_tracing_options(ftrace) < 0)
- return -1;
+ goto out_reset;
if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
@@ -1323,7 +1450,7 @@ static int __cmd_profile(struct perf_ftrace *ftrace)
setup_pager();
- trace_file = get_tracing_file("trace_pipe");
+ trace_file = get_tracing_instance_file("trace_pipe");
if (!trace_file) {
pr_err("failed to open trace_pipe\n");
goto out_reset;
@@ -1385,7 +1512,7 @@ out_free_line:
out_close_fd:
close(trace_fd);
out_reset:
- reset_tracing_files(ftrace);
+ exit_tracing_instance();
out:
return (done && !workload_exec_errno) ? 0 : -1;
}
@@ -1476,6 +1603,33 @@ static void delete_filter_func(struct list_head *head)
}
}
+static int parse_filter_event(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ struct list_head *head = opt->value;
+ struct filter_entry *entry;
+ char *s, *p;
+ int ret = -ENOMEM;
+
+ s = strdup(str);
+ if (s == NULL)
+ return -ENOMEM;
+
+ while ((p = strsep(&s, ",")) != NULL) {
+ entry = malloc(sizeof(*entry) + strlen(p) + 1);
+ if (entry == NULL)
+ goto out;
+
+ strcpy(entry->name, p);
+ list_add_tail(&entry->list, head);
+ }
+ ret = 0;
+
+out:
+ free(s);
+ return ret;
+}
+
static int parse_buffer_size(const struct option *opt,
const char *str, int unset)
{
@@ -1534,6 +1688,10 @@ static int parse_graph_tracer_opts(const struct option *opt,
int ret;
struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
struct sublevel_option graph_tracer_opts[] = {
+ { .name = "args", .value_ptr = &ftrace->graph_args },
+ { .name = "retval", .value_ptr = &ftrace->graph_retval },
+ { .name = "retval-hex", .value_ptr = &ftrace->graph_retval_hex },
+ { .name = "retaddr", .value_ptr = &ftrace->graph_retaddr },
{ .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time },
{ .name = "noirqs", .value_ptr = &ftrace->graph_noirqs },
{ .name = "verbose", .value_ptr = &ftrace->graph_verbose },
@@ -1590,7 +1748,6 @@ int cmd_ftrace(int argc, const char **argv)
int (*cmd_func)(struct perf_ftrace *) = NULL;
struct perf_ftrace ftrace = {
.tracer = DEFAULT_TRACER,
- .target = { .uid = UINT_MAX, },
};
const struct option common_options[] = {
OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
@@ -1626,7 +1783,7 @@ int cmd_ftrace(int argc, const char **argv)
OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
"Set nograph filter on given functions", parse_filter_func),
OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
- "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
+ "Graph tracer options, available options: args,retval,retval-hex,retaddr,nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
parse_graph_tracer_opts),
OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
"Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
@@ -1639,6 +1796,8 @@ int cmd_ftrace(int argc, const char **argv)
const struct option latency_options[] = {
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
"Show latency of given function", parse_filter_func),
+ OPT_CALLBACK('e', "events", &ftrace.event_pair, "event1,event2",
+ "Show latency between the two events", parse_filter_event),
#ifdef HAVE_BPF_SKEL
OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf,
"Use BPF to measure function latency"),
@@ -1691,6 +1850,7 @@ int cmd_ftrace(int argc, const char **argv)
INIT_LIST_HEAD(&ftrace.notrace);
INIT_LIST_HEAD(&ftrace.graph_funcs);
INIT_LIST_HEAD(&ftrace.nograph_funcs);
+ INIT_LIST_HEAD(&ftrace.event_pair);
signal(SIGINT, sig_handler);
signal(SIGUSR1, sig_handler);
@@ -1745,9 +1905,24 @@ int cmd_ftrace(int argc, const char **argv)
cmd_func = __cmd_ftrace;
break;
case PERF_FTRACE_LATENCY:
- if (list_empty(&ftrace.filters)) {
- pr_err("Should provide a function to measure\n");
+ if (list_empty(&ftrace.filters) && list_empty(&ftrace.event_pair)) {
+ pr_err("Should provide a function or events to measure\n");
+ parse_options_usage(ftrace_usage, options, "T", 1);
+ parse_options_usage(NULL, options, "e", 1);
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+ if (!list_empty(&ftrace.filters) && !list_empty(&ftrace.event_pair)) {
+ pr_err("Please specify either of function or events\n");
parse_options_usage(ftrace_usage, options, "T", 1);
+ parse_options_usage(NULL, options, "e", 1);
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+ if (!list_empty(&ftrace.event_pair) && !ftrace.target.use_bpf) {
+ pr_err("Event processing needs BPF\n");
+ parse_options_usage(ftrace_usage, options, "b", 1);
+ parse_options_usage(NULL, options, "e", 1);
ret = -EINVAL;
goto out_delete_filters;
}
@@ -1838,6 +2013,7 @@ out_delete_filters:
delete_filter_func(&ftrace.notrace);
delete_filter_func(&ftrace.graph_funcs);
delete_filter_func(&ftrace.nograph_funcs);
+ delete_filter_func(&ftrace.event_pair);
return ret;
}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 11e49cafa3af..aa7be4fb5838 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -197,18 +197,20 @@ static int perf_event__drop_oe(const struct perf_tool *tool __maybe_unused,
}
#endif
-static int perf_event__repipe_op2_synth(struct perf_session *session,
+static int perf_event__repipe_op2_synth(const struct perf_tool *tool,
+ struct perf_session *session __maybe_unused,
union perf_event *event)
{
- return perf_event__repipe_synth(session->tool, event);
+ return perf_event__repipe_synth(tool, event);
}
-static int perf_event__repipe_op4_synth(struct perf_session *session,
+static int perf_event__repipe_op4_synth(const struct perf_tool *tool,
+ struct perf_session *session __maybe_unused,
union perf_event *event,
u64 data __maybe_unused,
const char *str __maybe_unused)
{
- return perf_event__repipe_synth(session->tool, event);
+ return perf_event__repipe_synth(tool, event);
}
static int perf_event__repipe_attr(const struct perf_tool *tool,
@@ -237,8 +239,6 @@ static int perf_event__repipe_event_update(const struct perf_tool *tool,
return perf_event__repipe_synth(tool, event);
}
-#ifdef HAVE_AUXTRACE_SUPPORT
-
static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
{
char buf[4096];
@@ -258,12 +258,11 @@ static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t
return 0;
}
-static s64 perf_event__repipe_auxtrace(struct perf_session *session,
+static s64 perf_event__repipe_auxtrace(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event)
{
- const struct perf_tool *tool = session->tool;
- struct perf_inject *inject = container_of(tool, struct perf_inject,
- tool);
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
int ret;
inject->have_auxtrace = true;
@@ -296,18 +295,6 @@ static s64 perf_event__repipe_auxtrace(struct perf_session *session,
return event->auxtrace.size;
}
-#else
-
-static s64
-perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- pr_err("AUX area tracing not supported\n");
- return -EINVAL;
-}
-
-#endif
-
static int perf_event__repipe(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -587,15 +574,17 @@ static int perf_event__repipe_mmap2(const struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
- struct dso_id id;
- struct dso_id *dso_id = NULL;
+ struct dso_id id = dso_id_empty;
- if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ build_id__init(&id.build_id, event->mmap2.build_id, event->mmap2.build_id_size);
+ } else {
id.maj = event->mmap2.maj;
id.min = event->mmap2.min;
id.ino = event->mmap2.ino;
id.ino_generation = event->mmap2.ino_generation;
- dso_id = &id;
+ id.mmap2_valid = true;
+ id.mmap2_ino_generation_valid = true;
}
return perf_event__repipe_common_mmap(
@@ -603,7 +592,7 @@ static int perf_event__repipe_mmap2(const struct perf_tool *tool,
event->mmap2.pid, event->mmap2.tid,
event->mmap2.start, event->mmap2.len, event->mmap2.pgoff,
event->mmap2.flags, event->mmap2.prot,
- event->mmap2.filename, dso_id,
+ event->mmap2.filename, &id,
perf_event__process_mmap2);
}
@@ -659,31 +648,33 @@ static int perf_event__repipe_exit(const struct perf_tool *tool,
}
#ifdef HAVE_LIBTRACEEVENT
-static int perf_event__repipe_tracing_data(struct perf_session *session,
+static int perf_event__repipe_tracing_data(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event)
{
- perf_event__repipe_synth(session->tool, event);
+ perf_event__repipe_synth(tool, event);
- return perf_event__process_tracing_data(session, event);
+ return perf_event__process_tracing_data(tool, session, event);
}
#endif
static int dso__read_build_id(struct dso *dso)
{
struct nscookie nsc;
+ struct build_id bid = { .size = 0, };
if (dso__has_build_id(dso))
return 0;
mutex_lock(dso__lock(dso));
nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
- if (filename__read_build_id(dso__long_name(dso), dso__bid(dso)) > 0)
- dso__set_has_build_id(dso);
+ if (filename__read_build_id(dso__long_name(dso), &bid) > 0)
+ dso__set_build_id(dso, &bid);
else if (dso__nsinfo(dso)) {
char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso));
- if (new_name && filename__read_build_id(new_name, dso__bid(dso)) > 0)
- dso__set_has_build_id(dso);
+ if (new_name && filename__read_build_id(new_name, &bid) > 0)
+ dso__set_build_id(dso, &bid);
free(new_name);
}
nsinfo__mountns_exit(&nsc);
@@ -732,23 +723,26 @@ static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
struct dso *dso)
{
struct str_node *pos;
- int bid_len;
strlist__for_each_entry(pos, inject->known_build_ids) {
+ struct build_id bid;
const char *build_id, *dso_name;
+ size_t bid_len;
build_id = skip_spaces(pos->s);
dso_name = strchr(build_id, ' ');
bid_len = dso_name - pos->s;
+ if (bid_len > sizeof(bid.data))
+ bid_len = sizeof(bid.data);
dso_name = skip_spaces(dso_name);
if (strcmp(dso__long_name(dso), dso_name))
continue;
- for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
- dso__bid(dso)->data[ix] = (hex(build_id[2 * ix]) << 4 |
- hex(build_id[2 * ix + 1]));
+ for (size_t ix = 0; 2 * ix + 1 < bid_len; ++ix) {
+ bid.data[ix] = (hex(build_id[2 * ix]) << 4 |
+ hex(build_id[2 * ix + 1]));
}
- dso__bid(dso)->size = bid_len / 2;
- dso__set_has_build_id(dso);
+ bid.size = bid_len / 2;
+ dso__set_build_id(dso, &bid);
return true;
}
return false;
@@ -1342,7 +1336,7 @@ static int process_build_id(const struct perf_tool *tool,
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
- return perf_event__process_build_id(inject->session, event);
+ return perf_event__process_build_id(tool, inject->session, event);
}
static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
@@ -1774,9 +1768,10 @@ static int host__repipe(const struct perf_tool *tool,
return perf_event__repipe(tool, event, sample, machine);
}
-static int host__finished_init(struct perf_session *session, union perf_event *event)
+static int host__finished_init(const struct perf_tool *tool, struct perf_session *session,
+ union perf_event *event)
{
- struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
struct guest_session *gs = &inject->guest_session;
int ret;
@@ -1823,7 +1818,7 @@ static int host__finished_init(struct perf_session *session, union perf_event *e
if (ret)
return ret;
- return perf_event__repipe_op2_synth(session, event);
+ return perf_event__repipe_op2_synth(tool, session, event);
}
/*
@@ -2530,9 +2525,12 @@ int cmd_inject(int argc, const char **argv)
inject.tool.finished_init = perf_event__repipe_op2_synth;
inject.tool.compressed = perf_event__repipe_op4_synth;
inject.tool.auxtrace = perf_event__repipe_auxtrace;
+ inject.tool.bpf_metadata = perf_event__repipe_op2_synth;
inject.tool.dont_split_sample_group = true;
+ inject.tool.merge_deferred_callchains = false;
inject.session = __perf_session__new(&data, &inject.tool,
- /*trace_event_repipe=*/inject.output.is_pipe);
+ /*trace_event_repipe=*/inject.output.is_pipe,
+ /*host_env=*/NULL);
if (IS_ERR(inject.session)) {
ret = PTR_ERR(inject.session);
@@ -2601,7 +2599,7 @@ int cmd_inject(int argc, const char **argv)
inject.tool.finished_round = perf_event__drop_oe;
}
#endif
- ret = symbol__init(&inject.session->header.env);
+ ret = symbol__init(perf_session__env(inject.session));
if (ret < 0)
goto out_delete;
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
index a3c2ffdc1af8..3c4339982b16 100644
--- a/tools/perf/builtin-kallsyms.c
+++ b/tools/perf/builtin-kallsyms.c
@@ -12,18 +12,28 @@
#include <subcmd/parse-options.h>
#include "debug.h"
#include "dso.h"
+#include "env.h"
#include "machine.h"
#include "map.h"
#include "symbol.h"
static int __cmd_kallsyms(int argc, const char **argv)
{
- int i;
- struct machine *machine = machine__new_kallsyms();
+ int i, err;
+ struct perf_env host_env;
+ struct machine *machine = NULL;
+
+ perf_env__init(&host_env);
+ err = perf_env__set_cmdline(&host_env, argc, argv);
+ if (err)
+ goto out;
+
+ machine = machine__new_kallsyms(&host_env);
if (machine == NULL) {
pr_err("Couldn't read /proc/kallsyms\n");
- return -1;
+ err = -1;
+ goto out;
}
for (i = 0; i < argc; ++i) {
@@ -42,9 +52,10 @@ static int __cmd_kallsyms(int argc, const char **argv)
map__unmap_ip(map, symbol->start), map__unmap_ip(map, symbol->end),
symbol->start, symbol->end);
}
-
+out:
machine__delete(machine);
- return 0;
+ perf_env__exit(&host_env);
+ return err;
}
int cmd_kallsyms(int argc, const char **argv)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 67fb1946ef13..7929a5fa5f46 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -2024,7 +2024,7 @@ int cmd_kmem(int argc, const char **argv)
symbol_conf.use_callchain = true;
}
- symbol__init(&session->header.env);
+ symbol__init(perf_session__env(session));
if (perf_time__parse_str(&ptime, time_str) != 0) {
pr_err("Invalid time string\n");
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 67fd2b006b0b..c61369d54dd9 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1175,7 +1175,7 @@ static int cpu_isa_config(struct perf_kvm_stat *kvm)
}
cpuid = buf;
} else
- cpuid = kvm->session->header.env.cpuid;
+ cpuid = perf_session__env(kvm->session)->cpuid;
if (!cpuid) {
pr_err("Failed to look up CPU type\n");
@@ -1561,7 +1561,7 @@ static int read_events(struct perf_kvm_stat *kvm)
return PTR_ERR(kvm->session);
}
- symbol__init(&kvm->session->header.env);
+ symbol__init(perf_session__env(kvm->session));
if (!perf_session__has_traces(kvm->session, "kvm record")) {
ret = -EINVAL;
@@ -1636,14 +1636,6 @@ exit:
return ret;
}
-#define STRDUP_FAIL_EXIT(s) \
- ({ char *_p; \
- _p = strdup(s); \
- if (!_p) \
- return -ENOMEM; \
- _p; \
- })
-
int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
{
return 0;
@@ -1688,7 +1680,7 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
for (j = 0; j < events_tp_size; j++) {
- rec_argv[i++] = "-e";
+ rec_argv[i++] = STRDUP_FAIL_EXIT("-e");
rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
}
@@ -1696,7 +1688,7 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
for (j = 1; j < (unsigned int)argc; j++, i++)
- rec_argv[i] = argv[j];
+ rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN);
set_option_flag(record_options, 0, "filter", PARSE_OPT_HIDDEN);
@@ -1719,7 +1711,13 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
set_option_flag(record_options, 0, "transaction", PARSE_OPT_DISABLED);
record_usage = kvm_stat_record_usage;
- return cmd_record(i, rec_argv);
+ ret = cmd_record(i, rec_argv);
+
+EXIT:
+ for (i = 0; i < rec_argc; i++)
+ free((void *)rec_argv[i]);
+ free(rec_argv);
+ return ret;
}
static int
@@ -1871,8 +1869,6 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
kvm->opts.user_interval = 1;
kvm->opts.mmap_pages = 512;
kvm->opts.target.uses_mmap = false;
- kvm->opts.target.uid_str = NULL;
- kvm->opts.target.uid = UINT_MAX;
symbol__init(NULL);
disable_buildid_cache();
@@ -2002,58 +1998,122 @@ static int __cmd_record(const char *file_name, int argc, const char **argv)
int rec_argc, i = 0, j, ret;
const char **rec_argv;
- ret = kvm_add_default_arch_event(&argc, argv);
- if (ret)
- return -EINVAL;
-
- rec_argc = argc + 2;
+ /*
+ * Besides the 2 more options "-o" and "filename",
+ * kvm_add_default_arch_event() may add 2 extra options,
+ * so allocate 4 more items.
+ */
+ rec_argc = argc + 2 + 2;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
- rec_argv[i++] = strdup("record");
- rec_argv[i++] = strdup("-o");
- rec_argv[i++] = strdup(file_name);
+ if (!rec_argv)
+ return -ENOMEM;
+
+ rec_argv[i++] = STRDUP_FAIL_EXIT("record");
+ rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
+ rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
for (j = 1; j < argc; j++, i++)
- rec_argv[i] = argv[j];
+ rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
- BUG_ON(i != rec_argc);
+ BUG_ON(i + 2 != rec_argc);
+
+ ret = kvm_add_default_arch_event(&i, rec_argv);
+ if (ret)
+ goto EXIT;
+
+ ret = cmd_record(i, rec_argv);
- return cmd_record(i, rec_argv);
+EXIT:
+ for (i = 0; i < rec_argc; i++)
+ free((void *)rec_argv[i]);
+ free(rec_argv);
+ return ret;
}
static int __cmd_report(const char *file_name, int argc, const char **argv)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, ret;
const char **rec_argv;
rec_argc = argc + 2;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
- rec_argv[i++] = strdup("report");
- rec_argv[i++] = strdup("-i");
- rec_argv[i++] = strdup(file_name);
+ if (!rec_argv)
+ return -ENOMEM;
+
+ rec_argv[i++] = STRDUP_FAIL_EXIT("report");
+ rec_argv[i++] = STRDUP_FAIL_EXIT("-i");
+ rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
for (j = 1; j < argc; j++, i++)
- rec_argv[i] = argv[j];
+ rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
BUG_ON(i != rec_argc);
- return cmd_report(i, rec_argv);
+ ret = cmd_report(i, rec_argv);
+
+EXIT:
+ for (i = 0; i < rec_argc; i++)
+ free((void *)rec_argv[i]);
+ free(rec_argv);
+ return ret;
}
static int
__cmd_buildid_list(const char *file_name, int argc, const char **argv)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, ret;
const char **rec_argv;
rec_argc = argc + 2;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
- rec_argv[i++] = strdup("buildid-list");
- rec_argv[i++] = strdup("-i");
- rec_argv[i++] = strdup(file_name);
+ if (!rec_argv)
+ return -ENOMEM;
+
+ rec_argv[i++] = STRDUP_FAIL_EXIT("buildid-list");
+ rec_argv[i++] = STRDUP_FAIL_EXIT("-i");
+ rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
for (j = 1; j < argc; j++, i++)
- rec_argv[i] = argv[j];
+ rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
BUG_ON(i != rec_argc);
- return cmd_buildid_list(i, rec_argv);
+ ret = cmd_buildid_list(i, rec_argv);
+
+EXIT:
+ for (i = 0; i < rec_argc; i++)
+ free((void *)rec_argv[i]);
+ free(rec_argv);
+ return ret;
+}
+
+static int __cmd_top(int argc, const char **argv)
+{
+ int rec_argc, i = 0, ret;
+ const char **rec_argv;
+
+ /*
+ * kvm_add_default_arch_event() may add 2 extra options, so
+ * allocate 2 more pointers in adavance.
+ */
+ rec_argc = argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ if (!rec_argv)
+ return -ENOMEM;
+
+ for (i = 0; i < argc; i++)
+ rec_argv[i] = STRDUP_FAIL_EXIT(argv[i]);
+
+ BUG_ON(i != argc);
+
+ ret = kvm_add_default_arch_event(&i, rec_argv);
+ if (ret)
+ goto EXIT;
+
+ ret = cmd_top(i, rec_argv);
+
+EXIT:
+ for (i = 0; i < rec_argc; i++)
+ free((void *)rec_argv[i]);
+ free(rec_argv);
+ return ret;
}
int cmd_kvm(int argc, const char **argv)
@@ -2116,7 +2176,7 @@ int cmd_kvm(int argc, const char **argv)
else if (strlen(argv[0]) > 2 && strstarts("diff", argv[0]))
return cmd_diff(argc, argv);
else if (!strcmp(argv[0], "top"))
- return cmd_top(argc, argv);
+ return __cmd_top(argc, argv);
else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
return __cmd_buildid_list(file_name, argc, argv);
#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index c41a68d073de..7f3068264568 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -1804,7 +1804,7 @@ static int perf_kwork__read_events(struct perf_kwork *kwork)
return PTR_ERR(session);
}
- symbol__init(&session->header.env);
+ symbol__init(perf_session__env(session));
if (perf_kwork__check_config(kwork, session) != 0)
goto out_delete;
@@ -2273,12 +2273,23 @@ static void setup_event_list(struct perf_kwork *kwork,
pr_debug("\n");
}
+#define STRDUP_FAIL_EXIT(s) \
+ ({ char *_p; \
+ _p = strdup(s); \
+ if (!_p) { \
+ ret = -ENOMEM; \
+ goto EXIT; \
+ } \
+ _p; \
+ })
+
static int perf_kwork__record(struct perf_kwork *kwork,
int argc, const char **argv)
{
const char **rec_argv;
unsigned int rec_argc, i, j;
struct kwork_class *class;
+ int ret;
const char *const record_args[] = {
"record",
@@ -2298,17 +2309,17 @@ static int perf_kwork__record(struct perf_kwork *kwork,
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
- rec_argv[i] = strdup(record_args[i]);
+ rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
list_for_each_entry(class, &kwork->class_list, list) {
for (j = 0; j < class->nr_tracepoints; j++) {
- rec_argv[i++] = strdup("-e");
- rec_argv[i++] = strdup(class->tp_handlers[j].name);
+ rec_argv[i++] = STRDUP_FAIL_EXIT("-e");
+ rec_argv[i++] = STRDUP_FAIL_EXIT(class->tp_handlers[j].name);
}
}
for (j = 1; j < (unsigned int)argc; j++, i++)
- rec_argv[i] = argv[j];
+ rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
BUG_ON(i != rec_argc);
@@ -2317,7 +2328,13 @@ static int perf_kwork__record(struct perf_kwork *kwork,
pr_debug("%s ", rec_argv[j]);
pr_debug("\n");
- return cmd_record(i, rec_argv);
+ ret = cmd_record(i, rec_argv);
+
+EXIT:
+ for (i = 0; i < rec_argc; i++)
+ free((void *)rec_argv[i]);
+ free(rec_argv);
+ return ret;
}
int cmd_kwork(int argc, const char **argv)
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index fed482adb039..5cbca0bacd35 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -58,6 +58,8 @@ struct print_state {
bool metrics;
/** @metricgroups: Controls printing of metric and metric groups. */
bool metricgroups;
+ /** @exclude_abi: Exclude PMUs with types less than PERF_TYPE_MAX except PERF_TYPE_RAW. */
+ bool exclude_abi;
/** @last_topic: The last printed event topic. */
char *last_topic;
/** @last_metricgroups: The last printed metric group. */
@@ -113,7 +115,8 @@ static void wordwrap(FILE *fp, const char *s, int start, int max, int corr)
}
}
-static void default_print_event(void *ps, const char *topic, const char *pmu_name,
+static void default_print_event(void *ps, const char *topic,
+ const char *pmu_name, u32 pmu_type,
const char *event_name, const char *event_alias,
const char *scale_unit __maybe_unused,
bool deprecated, const char *event_type_desc,
@@ -127,7 +130,10 @@ static void default_print_event(void *ps, const char *topic, const char *pmu_nam
if (deprecated && !print_state->deprecated)
return;
- if (print_state->pmu_glob && pmu_name && !strglobmatch(pmu_name, print_state->pmu_glob))
+ if (print_state->pmu_glob && (!pmu_name || !strglobmatch(pmu_name, print_state->pmu_glob)))
+ return;
+
+ if (print_state->exclude_abi && pmu_type < PERF_TYPE_MAX && pmu_type != PERF_TYPE_RAW)
return;
if (print_state->event_glob &&
@@ -197,7 +203,8 @@ static void default_print_metric(void *ps,
const char *long_desc,
const char *expr,
const char *threshold,
- const char *unit __maybe_unused)
+ const char *unit __maybe_unused,
+ const char *pmu_name __maybe_unused)
{
struct print_state *print_state = ps;
FILE *fp = print_state->fp;
@@ -276,8 +283,8 @@ static void default_print_metric(void *ps,
}
struct json_print_state {
- /** @fp: File to write output to. */
- FILE *fp;
+ /** The shared print_state */
+ struct print_state common;
/** Should a separator be printed prior to the next item? */
bool need_sep;
};
@@ -285,7 +292,7 @@ struct json_print_state {
static void json_print_start(void *ps)
{
struct json_print_state *print_state = ps;
- FILE *fp = print_state->fp;
+ FILE *fp = print_state->common.fp;
fprintf(fp, "[\n");
}
@@ -293,7 +300,7 @@ static void json_print_start(void *ps)
static void json_print_end(void *ps)
{
struct json_print_state *print_state = ps;
- FILE *fp = print_state->fp;
+ FILE *fp = print_state->common.fp;
fprintf(fp, "%s]\n", print_state->need_sep ? "\n" : "");
}
@@ -353,7 +360,8 @@ static void fix_escape_fprintf(FILE *fp, struct strbuf *buf, const char *fmt, ..
fputs(buf->buf, fp);
}
-static void json_print_event(void *ps, const char *topic, const char *pmu_name,
+static void json_print_event(void *ps, const char *topic,
+ const char *pmu_name, u32 pmu_type __maybe_unused,
const char *event_name, const char *event_alias,
const char *scale_unit,
bool deprecated, const char *event_type_desc,
@@ -362,9 +370,26 @@ static void json_print_event(void *ps, const char *topic, const char *pmu_name,
{
struct json_print_state *print_state = ps;
bool need_sep = false;
- FILE *fp = print_state->fp;
+ FILE *fp = print_state->common.fp;
struct strbuf buf;
+ if (deprecated && !print_state->common.deprecated)
+ return;
+
+ if (print_state->common.pmu_glob &&
+ (!pmu_name || !strglobmatch(pmu_name, print_state->common.pmu_glob)))
+ return;
+
+ if (print_state->common.exclude_abi && pmu_type < PERF_TYPE_MAX &&
+ pmu_type != PERF_TYPE_RAW)
+ return;
+
+ if (print_state->common.event_glob &&
+ (!event_name || !strglobmatch(event_name, print_state->common.event_glob)) &&
+ (!event_alias || !strglobmatch(event_alias, print_state->common.event_glob)) &&
+ (!topic || !strglobmatch_nocase(topic, print_state->common.event_glob)))
+ return;
+
strbuf_init(&buf, 0);
fprintf(fp, "%s{\n", print_state->need_sep ? ",\n" : "");
print_state->need_sep = true;
@@ -433,13 +458,21 @@ static void json_print_event(void *ps, const char *topic, const char *pmu_name,
static void json_print_metric(void *ps __maybe_unused, const char *group,
const char *name, const char *desc,
const char *long_desc, const char *expr,
- const char *threshold, const char *unit)
+ const char *threshold, const char *unit,
+ const char *pmu_name)
{
struct json_print_state *print_state = ps;
bool need_sep = false;
- FILE *fp = print_state->fp;
+ FILE *fp = print_state->common.fp;
struct strbuf buf;
+ if (print_state->common.event_glob &&
+ (!print_state->common.metrics || !name ||
+ !strglobmatch(name, print_state->common.event_glob)) &&
+ (!print_state->common.metricgroups || !group ||
+ !strglobmatch(group, print_state->common.event_glob)))
+ return;
+
strbuf_init(&buf, 0);
fprintf(fp, "%s{\n", print_state->need_sep ? ",\n" : "");
print_state->need_sep = true;
@@ -483,6 +516,12 @@ static void json_print_metric(void *ps __maybe_unused, const char *group,
long_desc);
need_sep = true;
}
+ if (pmu_name) {
+ fix_escape_fprintf(fp, &buf, "%s\t\"Unit\": \"%S\"",
+ need_sep ? ",\n" : "",
+ pmu_name);
+ need_sep = true;
+ }
fprintf(fp, "%s}", need_sep ? "\n" : "");
strbuf_release(&buf);
}
@@ -506,10 +545,12 @@ int cmd_list(int argc, const char **argv)
.fp = stdout,
.desc = true,
};
- struct print_state json_ps = {
- .fp = stdout,
+ struct json_print_state json_ps = {
+ .common = {
+ .fp = stdout,
+ },
};
- void *ps = &default_ps;
+ struct print_state *ps = &default_ps;
struct print_callbacks print_cb = {
.print_start = default_print_start,
.print_end = default_print_end,
@@ -557,9 +598,11 @@ int cmd_list(int argc, const char **argv)
argc = parse_options(argc, argv, list_options, list_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ if (json)
+ ps = &json_ps.common;
+
if (output_path) {
- default_ps.fp = fopen(output_path, "w");
- json_ps.fp = default_ps.fp;
+ ps->fp = fopen(output_path, "w");
}
setup_pager();
@@ -575,14 +618,13 @@ int cmd_list(int argc, const char **argv)
.print_metric = json_print_metric,
.skip_duplicate_pmus = json_skip_duplicate_pmus,
};
- ps = &json_ps;
} else {
- default_ps.last_topic = strdup("");
- assert(default_ps.last_topic);
- default_ps.visited_metrics = strlist__new(NULL, NULL);
- assert(default_ps.visited_metrics);
+ ps->last_topic = strdup("");
+ assert(ps->last_topic);
+ ps->visited_metrics = strlist__new(NULL, NULL);
+ assert(ps->visited_metrics);
if (unit_name)
- default_ps.pmu_glob = strdup(unit_name);
+ ps->pmu_glob = strdup(unit_name);
else if (cputype) {
const struct perf_pmu *pmu = perf_pmus__pmu_for_pmu_filter(cputype);
@@ -591,14 +633,16 @@ int cmd_list(int argc, const char **argv)
ret = -1;
goto out;
}
- default_ps.pmu_glob = strdup(pmu->name);
+ ps->pmu_glob = strdup(pmu->name);
}
}
print_cb.print_start(ps);
if (argc == 0) {
- default_ps.metrics = true;
- default_ps.metricgroups = true;
+ if (!unit_name) {
+ ps->metrics = true;
+ ps->metricgroups = true;
+ }
print_events(&print_cb, ps);
goto out;
}
@@ -606,19 +650,10 @@ int cmd_list(int argc, const char **argv)
for (i = 0; i < argc; ++i) {
char *sep, *s;
- if (strcmp(argv[i], "tracepoint") == 0)
- print_tracepoint_events(&print_cb, ps);
- else if (strcmp(argv[i], "hw") == 0 ||
- strcmp(argv[i], "hardware") == 0)
- print_symbol_events(&print_cb, ps, PERF_TYPE_HARDWARE,
- event_symbols_hw, PERF_COUNT_HW_MAX);
- else if (strcmp(argv[i], "sw") == 0 ||
- strcmp(argv[i], "software") == 0) {
+ if (strcmp(argv[i], "tracepoint") == 0) {
char *old_pmu_glob = default_ps.pmu_glob;
- print_symbol_events(&print_cb, ps, PERF_TYPE_SOFTWARE,
- event_symbols_sw, PERF_COUNT_SW_MAX);
- default_ps.pmu_glob = strdup("tool");
+ default_ps.pmu_glob = strdup("tracepoint");
if (!default_ps.pmu_glob) {
ret = -1;
goto out;
@@ -626,21 +661,59 @@ int cmd_list(int argc, const char **argv)
perf_pmus__print_pmu_events(&print_cb, ps);
zfree(&default_ps.pmu_glob);
default_ps.pmu_glob = old_pmu_glob;
+ } else if (strcmp(argv[i], "hw") == 0 ||
+ strcmp(argv[i], "hardware") == 0) {
+ char *old_event_glob = ps->event_glob;
+
+ ps->event_glob = strdup("legacy hardware");
+ if (!ps->event_glob) {
+ ret = -1;
+ goto out;
+ }
+ perf_pmus__print_pmu_events(&print_cb, ps);
+ zfree(&ps->event_glob);
+ ps->event_glob = old_event_glob;
+ } else if (strcmp(argv[i], "sw") == 0 ||
+ strcmp(argv[i], "software") == 0) {
+ char *old_pmu_glob = ps->pmu_glob;
+ static const char * const sw_globs[] = { "software", "tool" };
+
+ for (size_t j = 0; j < ARRAY_SIZE(sw_globs); j++) {
+ ps->pmu_glob = strdup(sw_globs[j]);
+ if (!ps->pmu_glob) {
+ ret = -1;
+ goto out;
+ }
+ perf_pmus__print_pmu_events(&print_cb, ps);
+ zfree(&ps->pmu_glob);
+ }
+ ps->pmu_glob = old_pmu_glob;
} else if (strcmp(argv[i], "cache") == 0 ||
- strcmp(argv[i], "hwcache") == 0)
- print_hwcache_events(&print_cb, ps);
- else if (strcmp(argv[i], "pmu") == 0)
+ strcmp(argv[i], "hwcache") == 0) {
+ char *old_event_glob = ps->event_glob;
+
+ ps->event_glob = strdup("legacy cache");
+ if (!ps->event_glob) {
+ ret = -1;
+ goto out;
+ }
+ perf_pmus__print_pmu_events(&print_cb, ps);
+ zfree(&ps->event_glob);
+ ps->event_glob = old_event_glob;
+ } else if (strcmp(argv[i], "pmu") == 0) {
+ ps->exclude_abi = true;
perf_pmus__print_pmu_events(&print_cb, ps);
- else if (strcmp(argv[i], "sdt") == 0)
+ ps->exclude_abi = false;
+ } else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(&print_cb, ps);
else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0) {
- default_ps.metricgroups = false;
- default_ps.metrics = true;
+ ps->metricgroups = false;
+ ps->metrics = true;
metricgroup__print(&print_cb, ps);
} else if (strcmp(argv[i], "metricgroup") == 0 ||
strcmp(argv[i], "metricgroups") == 0) {
- default_ps.metricgroups = true;
- default_ps.metrics = false;
+ ps->metricgroups = true;
+ ps->metrics = false;
metricgroup__print(&print_cb, ps);
}
#ifdef HAVE_LIBPFM
@@ -648,37 +721,40 @@ int cmd_list(int argc, const char **argv)
print_libpfm_events(&print_cb, ps);
#endif
else if ((sep = strchr(argv[i], ':')) != NULL) {
- char *old_pmu_glob = default_ps.pmu_glob;
+ char *old_pmu_glob = ps->pmu_glob;
+ char *old_event_glob = ps->event_glob;
- default_ps.event_glob = strdup(argv[i]);
- if (!default_ps.event_glob) {
+ ps->event_glob = strdup(argv[i]);
+ if (!ps->event_glob) {
ret = -1;
goto out;
}
- print_tracepoint_events(&print_cb, ps);
+ ps->pmu_glob = strdup("tracepoint");
+ if (!ps->pmu_glob) {
+ zfree(&ps->event_glob);
+ ret = -1;
+ goto out;
+ }
+ perf_pmus__print_pmu_events(&print_cb, ps);
+ zfree(&ps->pmu_glob);
+ ps->pmu_glob = old_pmu_glob;
print_sdt_events(&print_cb, ps);
- default_ps.metrics = true;
- default_ps.metricgroups = true;
+ ps->metrics = true;
+ ps->metricgroups = true;
metricgroup__print(&print_cb, ps);
- zfree(&default_ps.event_glob);
- default_ps.pmu_glob = old_pmu_glob;
+ zfree(&ps->event_glob);
+ ps->event_glob = old_event_glob;
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
printf("Critical: Not enough memory! Trying to continue...\n");
continue;
}
- default_ps.event_glob = s;
- print_symbol_events(&print_cb, ps, PERF_TYPE_HARDWARE,
- event_symbols_hw, PERF_COUNT_HW_MAX);
- print_symbol_events(&print_cb, ps, PERF_TYPE_SOFTWARE,
- event_symbols_sw, PERF_COUNT_SW_MAX);
- print_hwcache_events(&print_cb, ps);
+ ps->event_glob = s;
perf_pmus__print_pmu_events(&print_cb, ps);
- print_tracepoint_events(&print_cb, ps);
print_sdt_events(&print_cb, ps);
- default_ps.metrics = true;
- default_ps.metricgroups = true;
+ ps->metrics = true;
+ ps->metricgroups = true;
metricgroup__print(&print_cb, ps);
free(s);
}
@@ -686,12 +762,12 @@ int cmd_list(int argc, const char **argv)
out:
print_cb.print_end(ps);
- free(default_ps.pmu_glob);
- free(default_ps.last_topic);
- free(default_ps.last_metricgroups);
- strlist__delete(default_ps.visited_metrics);
+ free(ps->pmu_glob);
+ free(ps->last_topic);
+ free(ps->last_metricgroups);
+ strlist__delete(ps->visited_metrics);
if (output_path)
- fclose(default_ps.fp);
+ fclose(ps->fp);
return ret;
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 05e7bc30488a..e8962c985d34 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -62,6 +62,8 @@ static const char *output_name = NULL;
static FILE *lock_output;
static struct lock_filter filters;
+static struct lock_delay *delays;
+static int nr_delays;
static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
@@ -1865,6 +1867,7 @@ static int __cmd_report(bool display_info)
eops.sample = process_sample_event;
eops.comm = perf_event__process_comm;
eops.mmap = perf_event__process_mmap;
+ eops.mmap2 = perf_event__process_mmap2;
eops.namespaces = perf_event__process_namespaces;
eops.tracing_data = perf_event__process_tracing_data;
session = perf_session__new(&data, &eops);
@@ -1874,7 +1877,7 @@ static int __cmd_report(bool display_info)
}
symbol_conf.allow_aliases = true;
- symbol__init(&session->header.env);
+ symbol__init(perf_session__env(session));
if (!data.is_pipe) {
if (!perf_session__has_traces(session, "lock record"))
@@ -2001,10 +2004,13 @@ static int __cmd_contention(int argc, const char **argv)
.max_stack = max_stack_depth,
.stack_skip = stack_skip,
.filters = &filters,
+ .delays = delays,
+ .nr_delays = nr_delays,
.save_callstack = needs_callstack(),
.owner = show_lock_owner,
.cgroups = RB_ROOT,
};
+ struct perf_env host_env;
lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table));
if (!lockhash_table)
@@ -2018,9 +2024,13 @@ static int __cmd_contention(int argc, const char **argv)
eops.sample = process_sample_event;
eops.comm = perf_event__process_comm;
eops.mmap = perf_event__process_mmap;
+ eops.mmap2 = perf_event__process_mmap2;
eops.tracing_data = perf_event__process_tracing_data;
- session = perf_session__new(use_bpf ? NULL : &data, &eops);
+ perf_env__init(&host_env);
+ session = __perf_session__new(use_bpf ? NULL : &data, &eops,
+ /*trace_event_repipe=*/false, &host_env);
+
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
err = PTR_ERR(session);
@@ -2038,7 +2048,7 @@ static int __cmd_contention(int argc, const char **argv)
con.save_callstack = true;
symbol_conf.allow_aliases = true;
- symbol__init(&session->header.env);
+ symbol__init(perf_session__env(session));
if (use_bpf) {
err = target__validate(&target);
@@ -2138,6 +2148,7 @@ out_delete:
evlist__delete(con.evlist);
lock_contention_finish(&con);
perf_session__delete(session);
+ perf_env__exit(&host_env);
zfree(&lockhash_table);
return err;
}
@@ -2504,6 +2515,79 @@ static int parse_cgroup_filter(const struct option *opt __maybe_unused, const ch
return ret;
}
+static bool add_lock_delay(char *spec)
+{
+ char *at, *pos;
+ struct lock_delay *tmp;
+ unsigned long duration;
+
+ at = strchr(spec, '@');
+ if (at == NULL) {
+ pr_err("lock delay should have '@' sign: %s\n", spec);
+ return false;
+ }
+ if (at == spec) {
+ pr_err("lock delay should have time before '@': %s\n", spec);
+ return false;
+ }
+
+ *at = '\0';
+ duration = strtoul(spec, &pos, 0);
+ if (!strcmp(pos, "ns"))
+ duration *= 1;
+ else if (!strcmp(pos, "us"))
+ duration *= 1000;
+ else if (!strcmp(pos, "ms"))
+ duration *= 1000 * 1000;
+ else if (*pos) {
+ pr_err("invalid delay time: %s@%s\n", spec, at + 1);
+ return false;
+ }
+
+ if (duration > 10 * 1000 * 1000) {
+ pr_err("lock delay is too long: %s (> 10ms)\n", spec);
+ return false;
+ }
+
+ tmp = realloc(delays, (nr_delays + 1) * sizeof(*delays));
+ if (tmp == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+ delays = tmp;
+
+ delays[nr_delays].sym = strdup(at + 1);
+ if (delays[nr_delays].sym == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+ delays[nr_delays].time = duration;
+
+ nr_delays++;
+ return true;
+}
+
+static int parse_lock_delay(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ char *s, *tmp, *tok;
+ int ret = 0;
+
+ s = strdup(str);
+ if (s == NULL)
+ return -1;
+
+ for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (!add_lock_delay(tok)) {
+ ret = -1;
+ break;
+ }
+ }
+
+ free(s);
+ return ret;
+}
+
int cmd_lock(int argc, const char **argv)
{
const struct option lock_options[] = {
@@ -2580,6 +2664,8 @@ int cmd_lock(int argc, const char **argv)
OPT_BOOLEAN(0, "lock-cgroup", &show_lock_cgroups, "show lock stats by cgroup"),
OPT_CALLBACK('G', "cgroup-filter", NULL, "CGROUPS",
"Filter specific cgroups", parse_cgroup_filter),
+ OPT_CALLBACK('J', "inject-delay", NULL, "TIME@FUNC",
+ "Inject delays to specific locks", parse_lock_delay),
OPT_PARENT(lock_options)
};
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 5ec83cd85650..d43500b92a7b 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
#include <inttypes.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -304,7 +305,7 @@ static int report_raw_events(struct perf_mem *mem)
goto out_delete;
}
- ret = symbol__init(&session->header.env);
+ ret = symbol__init(perf_session__env(session));
if (ret < 0)
goto out_delete;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ba20bf7c011d..2584d0d8bc82 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -26,6 +26,7 @@
#include "util/target.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/stat.h"
#include "util/symbol.h"
#include "util/record.h"
#include "util/cpumap.h"
@@ -51,6 +52,7 @@
#include "util/clockid.h"
#include "util/off_cpu.h"
#include "util/bpf-filter.h"
+#include "util/strbuf.h"
#include "asm/bug.h"
#include "perf.h"
#include "cputopo.h"
@@ -169,10 +171,12 @@ struct record {
bool no_buildid_cache_set;
bool buildid_all;
bool buildid_mmap;
+ bool buildid_mmap_set;
bool timestamp_filename;
bool timestamp_boundary;
bool off_cpu;
const char *filter_action;
+ const char *uid_str;
struct switch_output switch_output;
unsigned long long samples;
unsigned long output_max_size; /* = 0: unlimited */
@@ -648,14 +652,27 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
struct record *rec = to;
if (record__comp_enabled(rec)) {
+ struct perf_record_compressed2 *event = map->data;
+ size_t padding = 0;
+ u8 pad[8] = {0};
ssize_t compressed = zstd_compress(rec->session, map, map->data,
mmap__mmap_len(map), bf, size);
if (compressed < 0)
return (int)compressed;
- size = compressed;
- bf = map->data;
+ bf = event;
+ thread->samples++;
+
+ /*
+ * The record from `zstd_compress` is not 8 bytes aligned, which would cause asan
+ * error. We make it aligned here.
+ */
+ event->data_size = compressed - sizeof(struct perf_record_compressed2);
+ event->header.size = PERF_ALIGN(compressed, sizeof(u64));
+ padding = event->header.size - compressed;
+ return record__write(rec, map, bf, compressed) ||
+ record__write(rec, map, &pad, padding);
}
thread->samples++;
@@ -713,8 +730,6 @@ static void record__sig_exit(void)
raise(signr);
}
-#ifdef HAVE_AUXTRACE_SUPPORT
-
static int record__process_auxtrace(const struct perf_tool *tool,
struct mmap *map,
union perf_event *event, void *data1,
@@ -758,7 +773,9 @@ static int record__auxtrace_mmap_read(struct record *rec,
{
int ret;
- ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
+ ret = auxtrace_mmap__read(map, rec->itr,
+ perf_session__env(rec->session),
+ &rec->tool,
record__process_auxtrace);
if (ret < 0)
return ret;
@@ -774,7 +791,9 @@ static int record__auxtrace_mmap_read_snapshot(struct record *rec,
{
int ret;
- ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
+ ret = auxtrace_mmap__read_snapshot(map, rec->itr,
+ perf_session__env(rec->session),
+ &rec->tool,
record__process_auxtrace,
rec->opts.auxtrace_snapshot_size);
if (ret < 0)
@@ -868,40 +887,6 @@ static int record__auxtrace_init(struct record *rec)
return auxtrace_parse_filters(rec->evlist);
}
-#else
-
-static inline
-int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
- struct mmap *map __maybe_unused)
-{
- return 0;
-}
-
-static inline
-void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
- bool on_exit __maybe_unused)
-{
-}
-
-static inline
-int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
-{
- return 0;
-}
-
-static inline
-int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
-{
- return 0;
-}
-
-static int record__auxtrace_init(struct record *rec __maybe_unused)
-{
- return 0;
-}
-
-#endif
-
static int record__config_text_poke(struct evlist *evlist)
{
struct evsel *evsel;
@@ -962,7 +947,6 @@ static int record__config_tracking_events(struct record *rec)
*/
if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
perf_pmus__num_core_pmus() > 1) {
-
/*
* User space tasks can migrate between CPUs, so when tracing
* selected CPUs, sideband for all CPUs is still needed.
@@ -1367,10 +1351,27 @@ static int record__open(struct record *rec)
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
int rc = 0;
+ bool skipped = false;
+ bool removed_tracking = false;
evlist__for_each_entry(evlist, pos) {
+ if (removed_tracking) {
+ /*
+ * Normally the head of the list has tracking enabled
+ * for sideband data like mmaps. If this event is
+ * removed, make sure to add tracking to the next
+ * processed event.
+ */
+ if (!pos->tracking) {
+ pos->tracking = true;
+ evsel__config(pos, opts, &callchain_param);
+ }
+ removed_tracking = false;
+ }
try_again:
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
+ bool report_error = true;
+
if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
@@ -1382,15 +1383,72 @@ try_again:
pos = evlist__reset_weak_group(evlist, pos, true);
goto try_again;
}
- rc = -errno;
- evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
- ui__error("%s\n", msg);
- goto out;
+#if defined(__aarch64__) || defined(__arm__)
+ if (strstr(evsel__name(pos), "cycles")) {
+ struct evsel *pos2;
+ /*
+ * Unfortunately ARM has many events named
+ * "cycles" on PMUs like the system-level (L3)
+ * cache which don't support sampling. Only
+ * display such failures to open when there is
+ * only 1 cycles event or verbose is enabled.
+ */
+ evlist__for_each_entry(evlist, pos2) {
+ if (pos2 == pos)
+ continue;
+ if (strstr(evsel__name(pos2), "cycles")) {
+ report_error = false;
+ break;
+ }
+ }
+ }
+#endif
+ if (report_error || verbose > 0) {
+ ui__error("Failure to open event '%s' on PMU '%s' which will be "
+ "removed.\n%s\n",
+ evsel__name(pos), evsel__pmu_name(pos), msg);
+ }
+ if (pos->tracking)
+ removed_tracking = true;
+ pos->skippable = true;
+ skipped = true;
}
-
- pos->supported = true;
}
+ if (skipped) {
+ struct evsel *tmp;
+ int idx = 0;
+ bool evlist_empty = true;
+
+ /* Remove evsels that failed to open and update indices. */
+ evlist__for_each_entry_safe(evlist, tmp, pos) {
+ if (pos->skippable) {
+ evlist__remove(evlist, pos);
+ continue;
+ }
+
+ /*
+ * Note, dummy events may be command line parsed or
+ * added by the tool. We care about supporting `perf
+ * record -e dummy` which may be used as a permission
+ * check. Dummy events that are added to the command
+ * line and opened along with other events that fail,
+ * will still fail as if the dummy events were tool
+ * added events for the sake of code simplicity.
+ */
+ if (!evsel__is_dummy_event(pos))
+ evlist_empty = false;
+ }
+ evlist__for_each_entry(evlist, pos) {
+ pos->core.idx = idx++;
+ }
+ /* If list is empty then fail. */
+ if (evlist_empty) {
+ ui__error("Failure to open any events for recording.\n");
+ rc = -1;
+ goto out;
+ }
+ }
if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
@@ -1534,7 +1592,7 @@ static void record__adjust_affinity(struct record *rec, struct mmap *map)
static size_t process_comp_header(void *record, size_t increment)
{
- struct perf_record_compressed *event = record;
+ struct perf_record_compressed2 *event = record;
size_t size = sizeof(*event);
if (increment) {
@@ -1542,7 +1600,7 @@ static size_t process_comp_header(void *record, size_t increment)
return increment;
}
- event->header.type = PERF_RECORD_COMPRESSED;
+ event->header.type = PERF_RECORD_COMPRESSED2;
event->header.size = size;
return size;
@@ -1552,7 +1610,7 @@ static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
void *dst, size_t dst_size, void *src, size_t src_size)
{
ssize_t compressed;
- size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
+ size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed2) - 1;
struct zstd_data *zstd_data = &session->zstd_data;
if (map && map->file)
@@ -1795,15 +1853,15 @@ record__finish_output(struct record *rec)
data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
}
- if (!rec->no_buildid) {
+ /* Buildid scanning disabled or build ID in kernel and synthesized map events. */
+ if (!rec->no_buildid || !rec->no_buildid_cache) {
process_buildids(rec);
if (rec->buildid_all)
perf_session__dsos_hit_all(rec->session);
}
perf_session__write_header(rec->session, rec->evlist, fd, true);
-
- return;
+ perf_session__cache_build_ids(rec->session);
}
static int record__synthesize_workload(struct record *rec, bool tail)
@@ -2146,6 +2204,14 @@ out:
return err;
}
+static void record__synthesize_final_bpf_metadata(struct record *rec __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ perf_event__synthesize_final_bpf_metadata(rec->session,
+ process_synthesized_event);
+#endif
+}
+
static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
{
struct record *rec = data;
@@ -2177,7 +2243,7 @@ static int record__setup_sb_evlist(struct record *rec)
}
}
- if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
+ if (evlist__add_bpf_sb_event(rec->sb_evlist, perf_session__env(rec->session))) {
pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
return -1;
}
@@ -2196,15 +2262,16 @@ static int record__init_clock(struct record *rec)
struct perf_session *session = rec->session;
struct timespec ref_clockid;
struct timeval ref_tod;
+ struct perf_env *env = perf_session__env(session);
u64 ref;
if (!rec->opts.use_clockid)
return 0;
if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
- session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
+ env->clock.clockid_res_ns = rec->opts.clockid_res_ns;
- session->header.env.clock.clockid = rec->opts.clockid;
+ env->clock.clockid = rec->opts.clockid;
if (gettimeofday(&ref_tod, NULL) != 0) {
pr_err("gettimeofday failed, cannot set reference time.\n");
@@ -2219,12 +2286,12 @@ static int record__init_clock(struct record *rec)
ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
(u64) ref_tod.tv_usec * NSEC_PER_USEC;
- session->header.env.clock.tod_ns = ref;
+ env->clock.tod_ns = ref;
ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
(u64) ref_clockid.tv_nsec;
- session->header.env.clock.clockid_ns = ref;
+ env->clock.clockid_ns = ref;
return 0;
}
@@ -2370,6 +2437,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
int fd;
float ratio = 0;
enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
+ struct perf_env *env;
atexit(record__sig_exit);
signal(SIGCHLD, sig_handler);
@@ -2411,7 +2479,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
pr_err("Perf session creation failed.\n");
return PTR_ERR(session);
}
-
+ env = perf_session__env(session);
if (record__threads_enabled(rec)) {
if (perf_data__is_pipe(&rec->data)) {
pr_err("Parallel trace streaming is not available in pipe mode.\n");
@@ -2445,8 +2513,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
#endif // HAVE_EVENTFD_SUPPORT
- session->header.env.comp_type = PERF_COMP_ZSTD;
- session->header.env.comp_level = rec->opts.comp_level;
+ env->comp_type = PERF_COMP_ZSTD;
+ env->comp_level = rec->opts.comp_level;
if (rec->opts.kcore &&
!record__kcore_readable(&session->machines.host)) {
@@ -2483,7 +2551,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n");
}
- evlist__uniquify_name(rec->evlist);
+ /*
+ * Use global stat_config that is zero meaning aggr_mode is AGGR_NONE
+ * and hybrid_merge is false.
+ */
+ evlist__uniquify_evsel_names(rec->evlist, &stat_config);
evlist__config(rec->evlist, opts, &callchain_param);
@@ -2495,7 +2567,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
/* Debug message used by test scripts */
pr_debug3("perf record done opening and mmapping events\n");
- session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
+ env->comp_mmap_len = session->evlist->core.mmap_len;
if (rec->opts.kcore) {
err = record__kcore_copy(&session->machines.host, data);
@@ -2569,6 +2641,13 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
evlist__enable(rec->evlist);
/*
+ * offcpu-time does not call execve, so enable_on_exe wouldn't work
+ * when recording a workload, do it manually
+ */
+ if (rec->off_cpu)
+ evlist__enable_evsel(rec->evlist, (char *)OFFCPU_EVENT);
+
+ /*
* Let the child rip
*/
if (forks) {
@@ -2780,17 +2859,21 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
trigger_off(&auxtrace_snapshot_trigger);
trigger_off(&switch_output_trigger);
+ record__synthesize_final_bpf_metadata(rec);
+
if (opts->auxtrace_snapshot_on_exit)
record__auxtrace_snapshot_exit(rec);
if (forks && workload_exec_errno) {
- char msg[STRERR_BUFSIZE], strevsels[2048];
+ char msg[STRERR_BUFSIZE];
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
+ struct strbuf sb = STRBUF_INIT;
- evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
+ evlist__format_evsels(rec->evlist, &sb, 2048);
pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
- strevsels, argv[0], emsg);
+ sb.buf, argv[0], emsg);
+ strbuf_release(&sb);
err = -1;
goto out_child;
}
@@ -2814,7 +2897,7 @@ out_free_threads:
if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
- session->header.env.comp_ratio = ratio + 0.5;
+ env->comp_ratio = ratio + 0.5;
}
if (forks) {
@@ -2838,11 +2921,11 @@ out_free_threads:
rec->bytes_written += off_cpu_write(rec->session);
record__read_lost_samples(rec);
- record__synthesize(rec, true);
/* this will be recalculated during process_buildids() */
rec->samples = 0;
if (!err) {
+ record__synthesize(rec, true);
if (!rec->timestamp_filename) {
record__finish_output(rec);
} else {
@@ -2963,9 +3046,11 @@ static int perf_record_config(const char *var, const char *value, void *cb)
else if (!strcmp(value, "no-cache"))
rec->no_buildid_cache = true;
else if (!strcmp(value, "skip"))
- rec->no_buildid = true;
+ rec->no_buildid = rec->no_buildid_cache = true;
else if (!strcmp(value, "mmap"))
rec->buildid_mmap = true;
+ else if (!strcmp(value, "no-mmap"))
+ rec->buildid_mmap = false;
else
return -1;
return 0;
@@ -3155,6 +3240,28 @@ out_free:
return ret;
}
+static int record__parse_off_cpu_thresh(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct record_opts *opts = opt->value;
+ char *endptr;
+ u64 off_cpu_thresh_ms;
+
+ if (!str)
+ return -EINVAL;
+
+ off_cpu_thresh_ms = strtoull(str, &endptr, 10);
+
+ /* the threshold isn't string "0", yet strtoull() returns 0, parsing failed */
+ if (*endptr || (off_cpu_thresh_ms == 0 && strcmp(str, "0")))
+ return -EINVAL;
+ else
+ opts->off_cpu_thresh_ns = off_cpu_thresh_ms * NSEC_PER_MSEC;
+
+ return 0;
+}
+
void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
{
}
@@ -3348,7 +3455,9 @@ static struct record record = {
.ctl_fd = -1,
.ctl_fd_ack = -1,
.synth = PERF_SYNTH_ALL,
+ .off_cpu_thresh_ns = OFFCPU_THRESH,
},
+ .buildid_mmap = true,
};
const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
@@ -3436,6 +3545,8 @@ static struct option __record_options[] = {
"Record the sampled data address data page size"),
OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
"Record the sampled code address (ip) page size"),
+ OPT_BOOLEAN(0, "sample-mem-info", &record.opts.sample_data_src,
+ "Record the data source for memory operations"),
OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
"Record the sample identifier"),
@@ -3460,8 +3571,7 @@ static struct option __record_options[] = {
"or ranges of time to enable events e.g. '-D 10-20,30-40'",
record__parse_event_enable_time),
OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
- OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
- "user to profile"),
+ OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"),
OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
"branch any", "sample any taken branches",
@@ -3480,7 +3590,7 @@ static struct option __record_options[] = {
"sample selected machine registers on interrupt,"
" use '-I?' to list register names", parse_intr_regs),
OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
- "sample selected machine registers on interrupt,"
+ "sample selected machine registers in user space,"
" use '--user-regs=?' to list register names", parse_user_regs),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"),
@@ -3514,8 +3624,8 @@ static struct option __record_options[] = {
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
"Record build-id of all DSOs regardless of hits"),
- OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
- "Record build-id in map events"),
+ OPT_BOOLEAN_SET(0, "buildid-mmap", &record.buildid_mmap, &record.buildid_mmap_set,
+ "Record build-id in mmap events and skip build-id processing."),
OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
"append timestamp to output filename"),
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
@@ -3573,6 +3683,9 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin",
"BPF filter action"),
+ OPT_CALLBACK(0, "off-cpu-thresh", &record.opts, "ms",
+ "Dump off-cpu samples if off-cpu time exceeds this threshold (in milliseconds). (Default: 500ms)",
+ record__parse_off_cpu_thresh),
OPT_END()
};
@@ -4042,19 +4155,25 @@ int cmd_record(int argc, const char **argv)
record.opts.record_switch_events = true;
}
+ if (rec->buildid_mmap && !perf_can_record_build_id()) {
+ pr_warning("Missing support for build id in kernel mmap events.\n"
+ "Disable this warning with --no-buildid-mmap\n");
+ rec->buildid_mmap = false;
+ }
+
if (rec->buildid_mmap) {
- if (!perf_can_record_build_id()) {
- pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
- err = -EINVAL;
- goto out_opts;
- }
- pr_debug("Enabling build id in mmap2 events.\n");
- /* Enable mmap build id synthesizing. */
- symbol_conf.buildid_mmap2 = true;
/* Enable perf_event_attr::build_id bit. */
rec->opts.build_id = true;
- /* Disable build id cache. */
+ /* Disable build-ID table in the header. */
rec->no_buildid = true;
+ } else {
+ pr_debug("Disabling build id in synthesized mmap2 events.\n");
+ symbol_conf.no_buildid_mmap2 = true;
+ }
+
+ if (rec->no_buildid_set && rec->no_buildid) {
+ /* -B implies -N for historic reasons. */
+ rec->no_buildid_cache = true;
}
if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
@@ -4130,6 +4249,10 @@ int cmd_record(int argc, const char **argv)
goto out_opts;
}
+ /* For backward compatibility, -d implies --mem-info */
+ if (rec->opts.sample_address)
+ rec->opts.sample_data_src = true;
+
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().
@@ -4147,7 +4270,7 @@ int cmd_record(int argc, const char **argv)
err = -ENOMEM;
- if (rec->no_buildid_cache || rec->no_buildid) {
+ if (rec->no_buildid_cache) {
disable_buildid_cache();
} else if (rec->switch_output.enabled) {
/*
@@ -4182,9 +4305,13 @@ int cmd_record(int argc, const char **argv)
record.opts.tail_synthesize = true;
if (rec->evlist->core.nr_entries == 0) {
- err = parse_event(rec->evlist, "cycles:P");
- if (err)
+ struct evlist *def_evlist = evlist__new_default();
+
+ if (!def_evlist)
goto out;
+
+ evlist__splice_list_tail(rec->evlist, &def_evlist->core.entries);
+ evlist__delete(def_evlist);
}
if (rec->opts.target.tid && !rec->opts.no_inherit_set)
@@ -4196,19 +4323,24 @@ int cmd_record(int argc, const char **argv)
ui__warning("%s\n", errbuf);
}
- err = target__parse_uid(&rec->opts.target);
- if (err) {
- int saved_errno = errno;
+ if (rec->uid_str) {
+ uid_t uid = parse_uid(rec->uid_str);
- target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
- ui__error("%s", errbuf);
+ if (uid == UINT_MAX) {
+ ui__error("Invalid User: %s", rec->uid_str);
+ err = -EINVAL;
+ goto out;
+ }
+ err = parse_uid_filter(rec->evlist, uid);
+ if (err)
+ goto out;
- err = -saved_errno;
- goto out;
+ /* User ID filtering implies system wide. */
+ rec->opts.target.system_wide = true;
}
- /* Enable ignoring missing threads when -u/-p option is defined. */
- rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
+ /* Enable ignoring missing threads when -p option is defined. */
+ rec->opts.ignore_missing_thread = rec->opts.target.pid;
evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index b030ce72e13e..add6b1c2aaf0 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -240,10 +240,11 @@ static void setup_forced_leader(struct report *report,
evlist__force_leader(evlist);
}
-static int process_feature_event(struct perf_session *session,
+static int process_feature_event(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event)
{
- struct report *rep = container_of(session->tool, struct report, tool);
+ struct report *rep = container_of(tool, struct report, tool);
if (event->feat.feat_id < HEADER_LAST_FEATURE)
return perf_event__process_feature(session, event);
@@ -413,7 +414,7 @@ static int report__setup_sample_type(struct report *rep)
/* Silently ignore if callchain is missing */
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
symbol_conf.cumulate_callchain = false;
- perf_hpp__cancel_cumulate();
+ perf_hpp__cancel_cumulate(session->evlist);
}
}
@@ -447,7 +448,7 @@ static int report__setup_sample_type(struct report *rep)
}
}
- callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env));
+ callchain_param_setup(sample_type, perf_env__arch(perf_session__env(rep->session)));
if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
@@ -529,7 +530,10 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
if (rep->mem_mode) {
ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
- ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
+ if (sort_order || !field_order) {
+ ret += fprintf(fp, "\n# Sort order : %s",
+ sort_order ? : default_mem_sort_order);
+ }
} else
ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
@@ -547,7 +551,7 @@ static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *
evlist__for_each_entry(evlist, pos) {
ret = report__browse_block_hists(&rep->block_reports[i++].hist,
rep->min_percent, pos,
- &rep->session->header.env);
+ perf_session__env(rep->session));
if (ret != 0)
return ret;
}
@@ -682,7 +686,7 @@ static int report__browse_hists(struct report *rep)
}
ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
- &session->header.env, true);
+ perf_session__env(session), true);
/*
* Usually "ret" is the last pressed key, and we only
* care if the key notifies us to switch data file.
@@ -858,17 +862,24 @@ static int maps__fprintf_task_cb(struct map *map, void *data)
struct maps__fprintf_task_args *args = data;
const struct dso *dso = map__dso(map);
u32 prot = map__prot(map);
+ const struct dso_id *dso_id = dso__id_const(dso);
int ret;
+ char buf[SBUILD_ID_SIZE];
+
+ if (dso_id->mmap2_valid)
+ snprintf(buf, sizeof(buf), "%" PRIu64, dso_id->ino);
+ else
+ build_id__snprintf(&dso_id->build_id, buf, sizeof(buf));
ret = fprintf(args->fp,
- "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
+ "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %s %s\n",
args->indent, "", map__start(map), map__end(map),
prot & PROT_READ ? 'r' : '-',
prot & PROT_WRITE ? 'w' : '-',
prot & PROT_EXEC ? 'x' : '-',
map__flags(map) ? 's' : 'p',
map__pgoff(map),
- dso__id_const(dso)->ino, dso__name(dso));
+ buf, dso__name(dso));
if (ret < 0)
return ret;
@@ -1088,7 +1099,7 @@ static int __cmd_report(struct report *rep)
/* Don't show Latency column for non-parallel profiles by default. */
if (!symbol_conf.prefer_latency && rep->total_samples &&
rep->singlethreaded_samples * 100 / rep->total_samples >= 99)
- perf_hpp__cancel_latency();
+ perf_hpp__cancel_latency(session->evlist);
evlist__check_mem_load_aux(session->evlist);
@@ -1264,6 +1275,8 @@ static int process_attr(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
+ struct perf_session *session;
+ struct perf_env *env;
u64 sample_type;
int err;
@@ -1276,7 +1289,9 @@ static int process_attr(const struct perf_tool *tool __maybe_unused,
* on events sample_type.
*/
sample_type = evlist__combined_sample_type(*pevlist);
- callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
+ session = (*pevlist)->session;
+ env = perf_session__env(session);
+ callchain_param_setup(sample_type, perf_env__arch(env));
return 0;
}
@@ -1599,6 +1614,7 @@ repeat:
report.tool.event_update = perf_event__process_event_update;
report.tool.feature = process_feature_event;
report.tool.ordering_requires_timestamps = true;
+ report.tool.merge_deferred_callchains = !dump_trace;
session = perf_session__new(&data, &report.tool);
if (IS_ERR(session)) {
@@ -1672,14 +1688,10 @@ repeat:
}
if (symbol_conf.report_hierarchy) {
- /* disable incompatible options */
- if (field_order) {
- pr_err("Error: --hierarchy and --fields options cannot be used together\n");
- parse_options_usage(report_usage, options, "F", 1);
- parse_options_usage(NULL, options, "hierarchy", 0);
- goto error;
- }
-
+ /*
+ * The hist entries in hierarchy are added during the collpase
+ * phase. Let's enable it even if no sort keys require it.
+ */
perf_hpp_list.need_collapse = true;
}
@@ -1780,7 +1792,7 @@ repeat:
}
if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) &&
- (setup_sorting(session->evlist) < 0)) {
+ (setup_sorting(session->evlist, perf_session__env(session)) < 0)) {
if (sort_order)
parse_options_usage(report_usage, options, "s", 1);
if (field_order)
@@ -1836,7 +1848,7 @@ repeat:
annotation_config__init();
}
- if (symbol__init(&session->header.env) < 0)
+ if (symbol__init(perf_session__env(session)) < 0)
goto error;
if (report.time_str) {
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 26ece6e9bfd1..eca3b1c58c4b 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -994,7 +994,7 @@ thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
else if (cmp < 0)
node = node->rb_right;
else {
- BUG_ON(thread != atoms->thread);
+ BUG_ON(!RC_CHK_EQUAL(thread, atoms->thread));
return atoms;
}
}
@@ -1111,6 +1111,21 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
atoms->nb_atoms++;
}
+static void free_work_atoms(struct work_atoms *atoms)
+{
+ struct work_atom *atom, *tmp;
+
+ if (atoms == NULL)
+ return;
+
+ list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
+ list_del(&atom->list);
+ free(atom);
+ }
+ thread__zput(atoms->thread);
+ free(atoms);
+}
+
static int latency_switch_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
@@ -1517,35 +1532,24 @@ static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unus
return 0;
}
-union map_priv {
- void *ptr;
- bool color;
-};
-
static bool thread__has_color(struct thread *thread)
{
- union map_priv priv = {
- .ptr = thread__priv(thread),
- };
-
- return priv.color;
+ return thread__priv(thread) != NULL;
}
static struct thread*
map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
{
struct thread *thread = machine__findnew_thread(machine, pid, tid);
- union map_priv priv = {
- .color = false,
- };
+ bool color = false;
if (!sched->map.color_pids || !thread || thread__priv(thread))
return thread;
if (thread_map__has(sched->map.color_pids, tid))
- priv.color = true;
+ color = true;
- thread__set_priv(thread, priv.ptr);
+ thread__set_priv(thread, color ? ((void*)1) : NULL);
return thread;
}
@@ -1634,6 +1638,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
const char *color = PERF_COLOR_NORMAL;
char stimestamp[32];
const char *str;
+ int ret = -1;
BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
@@ -1664,17 +1669,20 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
sched_in = map__findnew_thread(sched, machine, -1, next_pid);
sched_out = map__findnew_thread(sched, machine, -1, prev_pid);
if (sched_in == NULL || sched_out == NULL)
- return -1;
+ goto out;
tr = thread__get_runtime(sched_in);
- if (tr == NULL) {
- thread__put(sched_in);
- return -1;
- }
+ if (tr == NULL)
+ goto out;
+
+ thread__put(sched->curr_thread[this_cpu.cpu]);
+ thread__put(sched->curr_out_thread[this_cpu.cpu]);
sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out);
+ ret = 0;
+
str = thread__comm_str(sched_in);
new_shortname = 0;
if (!tr->shortname[0]) {
@@ -1769,12 +1777,10 @@ sched_out:
color_fprintf(stdout, color, "\n");
out:
- if (sched->map.task_name)
- thread__put(sched_out);
-
+ thread__put(sched_out);
thread__put(sched_in);
- return 0;
+ return ret;
}
static int process_sched_switch_event(const struct perf_tool *tool,
@@ -1922,7 +1928,7 @@ static int perf_sched__read_events(struct perf_sched *sched)
return PTR_ERR(session);
}
- symbol__init(&session->header.env);
+ symbol__init(perf_session__env(session));
/* prefer sched_waking if it is captured */
if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
@@ -2018,6 +2024,16 @@ static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
return r->last_time[cpu];
}
+static void timehist__evsel_priv_destructor(void *priv)
+{
+ struct evsel_runtime *r = priv;
+
+ if (r) {
+ free(r->last_time);
+ free(r);
+ }
+}
+
static int comm_width = 30;
static char *timehist_get_commstr(struct thread *thread)
@@ -2174,6 +2190,11 @@ static void timehist_print_sample(struct perf_sched *sched,
printf(" ");
}
+ if (!thread__comm_set(thread)) {
+ const char *prev_comm = evsel__strval(evsel, sample, "prev_comm");
+ thread__set_comm(thread, prev_comm, sample->time);
+ }
+
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
if (sched->show_prio)
@@ -2311,8 +2332,10 @@ static void save_task_callchain(struct perf_sched *sched,
return;
}
- if (!sched->show_callchain || sample->callchain == NULL)
+ if (!sched->show_callchain || sample->callchain == NULL) {
+ thread__put(thread);
return;
+ }
cursor = get_tls_callchain_cursor();
@@ -2321,10 +2344,12 @@ static void save_task_callchain(struct perf_sched *sched,
if (verbose > 0)
pr_err("Failed to resolve callchain. Skipping\n");
+ thread__put(thread);
return;
}
callchain_cursor_commit(cursor);
+ thread__put(thread);
while (true) {
struct callchain_cursor_node *node;
@@ -2401,8 +2426,17 @@ static void free_idle_threads(void)
return;
for (i = 0; i < idle_max_cpu; ++i) {
- if ((idle_threads[i]))
- thread__delete(idle_threads[i]);
+ struct thread *idle = idle_threads[i];
+
+ if (idle) {
+ struct idle_thread_runtime *itr;
+
+ itr = thread__priv(idle);
+ if (itr)
+ thread__put(itr->last_thread);
+
+ thread__delete(idle);
+ }
}
free(idle_threads);
@@ -2439,7 +2473,7 @@ static struct thread *get_idle_thread(int cpu)
}
}
- return idle_threads[cpu];
+ return thread__get(idle_threads[cpu]);
}
static void save_idle_callchain(struct perf_sched *sched,
@@ -2494,7 +2528,8 @@ static struct thread *timehist_get_thread(struct perf_sched *sched,
if (itr == NULL)
return NULL;
- itr->last_thread = thread;
+ thread__put(itr->last_thread);
+ itr->last_thread = thread__get(thread);
/* copy task callchain when entering to idle */
if (evsel__intval(evsel, sample, "next_pid") == 0)
@@ -2565,6 +2600,7 @@ static void timehist_print_wakeup_event(struct perf_sched *sched,
/* show wakeup unless both awakee and awaker are filtered */
if (timehist_skip_sample(sched, thread, evsel, sample) &&
timehist_skip_sample(sched, awakened, evsel, sample)) {
+ thread__put(thread);
return;
}
@@ -2581,6 +2617,8 @@ static void timehist_print_wakeup_event(struct perf_sched *sched,
printf("awakened: %s", timehist_get_commstr(awakened));
printf("\n");
+
+ thread__put(thread);
}
static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
@@ -2609,8 +2647,10 @@ static int timehist_sched_wakeup_event(const struct perf_tool *tool,
return -1;
tr = thread__get_runtime(thread);
- if (tr == NULL)
+ if (tr == NULL) {
+ thread__put(thread);
return -1;
+ }
if (tr->ready_to_run == 0)
tr->ready_to_run = sample->time;
@@ -2620,6 +2660,7 @@ static int timehist_sched_wakeup_event(const struct perf_tool *tool,
!perf_time__skip_sample(&sched->ptime, sample->time))
timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
+ thread__put(thread);
return 0;
}
@@ -2647,6 +2688,7 @@ static void timehist_print_migration_event(struct perf_sched *sched,
if (timehist_skip_sample(sched, thread, evsel, sample) &&
timehist_skip_sample(sched, migrated, evsel, sample)) {
+ thread__put(thread);
return;
}
@@ -2674,6 +2716,7 @@ static void timehist_print_migration_event(struct perf_sched *sched,
printf(" cpu %d => %d", ocpu, dcpu);
printf("\n");
+ thread__put(thread);
}
static int timehist_migrate_task_event(const struct perf_tool *tool,
@@ -2693,8 +2736,10 @@ static int timehist_migrate_task_event(const struct perf_tool *tool,
return -1;
tr = thread__get_runtime(thread);
- if (tr == NULL)
+ if (tr == NULL) {
+ thread__put(thread);
return -1;
+ }
tr->migrations++;
tr->migrated = sample->time;
@@ -2704,6 +2749,7 @@ static int timehist_migrate_task_event(const struct perf_tool *tool,
timehist_print_migration_event(sched, evsel, sample,
machine, thread);
}
+ thread__put(thread);
return 0;
}
@@ -2726,10 +2772,10 @@ static void timehist_update_task_prio(struct evsel *evsel,
return;
tr = thread__get_runtime(thread);
- if (tr == NULL)
- return;
+ if (tr != NULL)
+ tr->prio = next_prio;
- tr->prio = next_prio;
+ thread__put(thread);
}
static int timehist_sched_change_event(const struct perf_tool *tool,
@@ -2741,7 +2787,7 @@ static int timehist_sched_change_event(const struct perf_tool *tool,
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
struct perf_time_interval *ptime = &sched->ptime;
struct addr_location al;
- struct thread *thread;
+ struct thread *thread = NULL;
struct thread_runtime *tr = NULL;
u64 tprev, t = sample->time;
int rc = 0;
@@ -2865,6 +2911,7 @@ out:
evsel__save_time(evsel, sample->time, sample->cpu);
+ thread__put(thread);
addr_location__exit(&al);
return rc;
}
@@ -3236,6 +3283,7 @@ static int perf_sched__timehist(struct perf_sched *sched)
};
struct perf_session *session;
+ struct perf_env *env;
struct evlist *evlist;
int err = -1;
@@ -3260,6 +3308,7 @@ static int perf_sched__timehist(struct perf_sched *sched)
if (IS_ERR(session))
return PTR_ERR(session);
+ env = perf_session__env(session);
if (cpu_list) {
err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
if (err < 0)
@@ -3268,7 +3317,7 @@ static int perf_sched__timehist(struct perf_sched *sched)
evlist = session->evlist;
- symbol__init(&session->header.env);
+ symbol__init(env);
if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
pr_err("Invalid time string\n");
@@ -3286,6 +3335,8 @@ static int perf_sched__timehist(struct perf_sched *sched)
setup_pager();
+ evsel__set_priv_destructor(timehist__evsel_priv_destructor);
+
/* prefer sched_waking if it is captured */
if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
handlers[1].handler = timehist_sched_wakeup_ignore;
@@ -3305,7 +3356,7 @@ static int perf_sched__timehist(struct perf_sched *sched)
goto out;
/* pre-allocate struct for per-CPU idle stats */
- sched->max_cpu.cpu = session->header.env.nr_cpus_online;
+ sched->max_cpu.cpu = env->nr_cpus_online;
if (sched->max_cpu.cpu == 0)
sched->max_cpu.cpu = 4;
if (init_idle_threads(sched->max_cpu.cpu))
@@ -3386,13 +3437,13 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d
this->total_runtime += data->total_runtime;
this->nb_atoms += data->nb_atoms;
this->total_lat += data->total_lat;
- list_splice(&data->work_list, &this->work_list);
+ list_splice_init(&data->work_list, &this->work_list);
if (this->max_lat < data->max_lat) {
this->max_lat = data->max_lat;
this->max_lat_start = data->max_lat_start;
this->max_lat_end = data->max_lat_end;
}
- zfree(&data);
+ free_work_atoms(data);
return;
}
}
@@ -3471,7 +3522,6 @@ static int perf_sched__lat(struct perf_sched *sched)
work_list = rb_entry(next, struct work_atoms, node);
output_lat_thread(sched, work_list);
next = rb_next(next);
- thread__zput(work_list->thread);
}
printf(" -----------------------------------------------------------------------------------------------------------------\n");
@@ -3485,6 +3535,13 @@ static int perf_sched__lat(struct perf_sched *sched)
rc = 0;
+ while ((next = rb_first_cached(&sched->sorted_atom_root))) {
+ struct work_atoms *data;
+
+ data = rb_entry(next, struct work_atoms, node);
+ rb_erase_cached(next, &sched->sorted_atom_root);
+ free_work_atoms(data);
+ }
out_free_cpus_switch_event:
free_cpus_switch_event(sched);
return rc;
@@ -3556,10 +3613,10 @@ static int perf_sched__map(struct perf_sched *sched)
sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread)));
if (!sched->curr_out_thread)
- return rc;
+ goto out_free_curr_thread;
if (setup_cpus_switch_event(sched))
- goto out_free_curr_thread;
+ goto out_free_curr_out_thread;
if (setup_map_cpus(sched))
goto out_free_cpus_switch_event;
@@ -3590,7 +3647,14 @@ out_put_map_cpus:
out_free_cpus_switch_event:
free_cpus_switch_event(sched);
+out_free_curr_out_thread:
+ for (int i = 0; i < MAX_CPUS; i++)
+ thread__put(sched->curr_out_thread[i]);
+ zfree(&sched->curr_out_thread);
+
out_free_curr_thread:
+ for (int i = 0; i < MAX_CPUS; i++)
+ thread__put(sched->curr_thread[i]);
zfree(&sched->curr_thread);
return rc;
}
@@ -3898,13 +3962,15 @@ int cmd_sched(int argc, const char **argv)
if (!argc)
usage_with_options(sched_usage, sched_options);
+ thread__set_priv_destructor(free);
+
/*
* Aliased to 'perf script' for now:
*/
if (!strcmp(argv[0], "script")) {
- return cmd_script(argc, argv);
+ ret = cmd_script(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
- return __cmd_record(argc, argv);
+ ret = __cmd_record(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
sched.tp_handler = &lat_ops;
if (argc > 1) {
@@ -3913,7 +3979,7 @@ int cmd_sched(int argc, const char **argv)
usage_with_options(latency_usage, latency_options);
}
setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__lat(&sched);
+ ret = perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
if (argc) {
argc = parse_options(argc, argv, map_options, map_usage, 0);
@@ -3924,13 +3990,14 @@ int cmd_sched(int argc, const char **argv)
sched.map.task_names = strlist__new(sched.map.task_name, NULL);
if (sched.map.task_names == NULL) {
fprintf(stderr, "Failed to parse task names\n");
- return -1;
+ ret = -1;
+ goto out;
}
}
}
sched.tp_handler = &map_ops;
setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__map(&sched);
+ ret = perf_sched__map(&sched);
} else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
sched.tp_handler = &replay_ops;
if (argc) {
@@ -3938,7 +4005,7 @@ int cmd_sched(int argc, const char **argv)
if (argc)
usage_with_options(replay_usage, replay_options);
}
- return perf_sched__replay(&sched);
+ ret = perf_sched__replay(&sched);
} else if (!strcmp(argv[0], "timehist")) {
if (argc) {
argc = parse_options(argc, argv, timehist_options,
@@ -3954,19 +4021,19 @@ int cmd_sched(int argc, const char **argv)
parse_options_usage(NULL, timehist_options, "w", true);
if (sched.show_next)
parse_options_usage(NULL, timehist_options, "n", true);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = symbol__validate_sym_arguments();
- if (ret)
- return ret;
-
- return perf_sched__timehist(&sched);
+ if (!ret)
+ ret = perf_sched__timehist(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
+out:
/* free usage string allocated by parse_options_subcommand */
free((void *)sched_usage[0]);
- return 0;
+ return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9b16df881af8..62e43d3c5ad7 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -33,15 +33,18 @@
#include "util/path.h"
#include "util/event.h"
#include "util/mem-info.h"
+#include "util/metricgroup.h"
#include "ui/ui.h"
#include "print_binary.h"
#include "print_insn.h"
#include "archinsn.h"
#include <linux/bitmap.h>
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
+#include <linux/unaligned.h>
#include <sys/utsname.h>
#include "asm/bug.h"
#include "util/mem-events.h"
@@ -50,6 +53,7 @@
#include <errno.h>
#include <inttypes.h>
#include <signal.h>
+#include <stdio.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -221,7 +225,7 @@ enum {
OUTPUT_TYPE_MAX
};
-// We need to refactor the evsel->priv use in in 'perf script' to allow for
+// We need to refactor the evsel->priv use in 'perf script' to allow for
// using that area, that is being used only in some cases.
#define OUTPUT_TYPE_UNSET -1
@@ -338,16 +342,8 @@ struct evsel_script {
char *filename;
FILE *fp;
u64 samples;
- /* For metric output */
- u64 val;
- int gnum;
};
-static inline struct evsel_script *evsel_script(struct evsel *evsel)
-{
- return (struct evsel_script *)evsel->priv;
-}
-
static struct evsel_script *evsel_script__new(struct evsel *evsel, struct perf_data *data)
{
struct evsel_script *es = zalloc(sizeof(*es));
@@ -680,7 +676,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
evlist__for_each_entry(session->evlist, evsel) {
not_pipe = true;
- if (evsel__has_callchain(evsel)) {
+ if (evsel__has_callchain(evsel) || evsel__is_offcpu_event(evsel)) {
use_callchain = true;
break;
}
@@ -712,7 +708,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
}
}
- if (tod && !session->header.env.clock.enabled) {
+ if (tod && !perf_session__env(session)->clock.enabled) {
pr_err("Can't provide 'tod' time, missing clock data. "
"Please record with -k/--clockid option.\n");
return -1;
@@ -757,7 +753,7 @@ tod_scnprintf(struct perf_script *script, char *buf, int buflen,
if (buflen < 64 || !script)
return buf;
- env = &script->session->header.env;
+ env = perf_session__env(script->session);
if (!env->clock.enabled) {
scnprintf(buf, buflen, "disabled");
return buf;
@@ -1222,7 +1218,6 @@ static int any_dump_insn(struct evsel *evsel __maybe_unused,
u8 *inbuf, int inlen, int *lenp,
FILE *fp)
{
-#ifdef HAVE_LIBCAPSTONE_SUPPORT
if (PRINT_FIELD(BRSTACKDISASM)) {
int printed = fprintf_insn_asm(x->machine, x->thread, x->cpumode, x->is64bit,
(uint8_t *)inbuf, inlen, ip, lenp,
@@ -1231,7 +1226,6 @@ static int any_dump_insn(struct evsel *evsel __maybe_unused,
if (printed > 0)
return printed;
}
-#endif
return fprintf(fp, "%s", dump_insn(x, ip, inbuf, inlen, lenp));
}
@@ -2001,6 +1995,25 @@ static int perf_sample__fprintf_synth_iflag_chg(struct perf_sample *sample, FILE
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
+static int perf_sample__fprintf_synth_vpadtl(struct perf_sample *data, FILE *fp)
+{
+ struct powerpc_vpadtl_entry *dtl = (struct powerpc_vpadtl_entry *)data->raw_data;
+ int len;
+
+ len = fprintf(fp, "timebase: %" PRIu64 " dispatch_reason:%s, preempt_reason:%s,\n"
+ "enqueue_to_dispatch_time:%d, ready_to_enqueue_time:%d,"
+ "waiting_to_ready_time:%d, processor_id: %d",
+ get_unaligned_be64(&dtl->timebase),
+ dispatch_reasons[dtl->dispatch_reason],
+ preempt_reasons[dtl->preempt_reason],
+ be32_to_cpu(dtl->enqueue_to_dispatch_time),
+ be32_to_cpu(dtl->ready_to_enqueue_time),
+ be32_to_cpu(dtl->waiting_to_ready_time),
+ be16_to_cpu(dtl->processor_id));
+
+ return len;
+}
+
static int perf_sample__fprintf_synth(struct perf_sample *sample,
struct evsel *evsel, FILE *fp)
{
@@ -2023,6 +2036,8 @@ static int perf_sample__fprintf_synth(struct perf_sample *sample,
return perf_sample__fprintf_synth_evt(sample, fp);
case PERF_SYNTH_INTEL_IFLAG_CHG:
return perf_sample__fprintf_synth_iflag_chg(sample, fp);
+ case PERF_SYNTH_POWERPC_VPA_DTL:
+ return perf_sample__fprintf_synth_vpadtl(sample, fp);
default:
break;
}
@@ -2102,13 +2117,161 @@ static void script_new_line(struct perf_stat_config *config __maybe_unused,
fputs("\tmetric: ", mctx->fp);
}
-static void perf_sample__fprint_metric(struct perf_script *script,
- struct thread *thread,
+struct script_find_metrics_args {
+ struct evlist *evlist;
+ bool system_wide;
+};
+
+static struct evsel *map_metric_evsel_to_script_evsel(struct evlist *script_evlist,
+ struct evsel *metric_evsel)
+{
+ struct evsel *script_evsel;
+
+ evlist__for_each_entry(script_evlist, script_evsel) {
+ /* Skip if perf_event_attr differ. */
+ if (metric_evsel->core.attr.type != script_evsel->core.attr.type)
+ continue;
+ if (metric_evsel->core.attr.config != script_evsel->core.attr.config)
+ continue;
+ /* Skip if the script event has a metric_id that doesn't match. */
+ if (script_evsel->metric_id &&
+ strcmp(evsel__metric_id(metric_evsel), evsel__metric_id(script_evsel))) {
+ pr_debug("Skipping matching evsel due to differing metric ids '%s' vs '%s'\n",
+ evsel__metric_id(metric_evsel), evsel__metric_id(script_evsel));
+ continue;
+ }
+ return script_evsel;
+ }
+ return NULL;
+}
+
+static int script_find_metrics(const struct pmu_metric *pm,
+ const struct pmu_metrics_table *table __maybe_unused,
+ void *data)
+{
+ struct script_find_metrics_args *args = data;
+ struct evlist *script_evlist = args->evlist;
+ struct evlist *metric_evlist = evlist__new();
+ struct evsel *metric_evsel;
+ int ret = metricgroup__parse_groups(metric_evlist,
+ /*pmu=*/"all",
+ pm->metric_name,
+ /*metric_no_group=*/false,
+ /*metric_no_merge=*/false,
+ /*metric_no_threshold=*/true,
+ /*user_requested_cpu_list=*/NULL,
+ args->system_wide,
+ /*hardware_aware_grouping=*/false);
+
+ if (ret) {
+ /* Metric parsing failed but continue the search. */
+ goto out;
+ }
+
+ /*
+ * Check the script_evlist has an entry for each metric_evlist entry. If
+ * the script evsel was already set up avoid changing data that may
+ * break it.
+ */
+ evlist__for_each_entry(metric_evlist, metric_evsel) {
+ struct evsel *script_evsel =
+ map_metric_evsel_to_script_evsel(script_evlist, metric_evsel);
+ struct evsel *new_metric_leader;
+
+ if (!script_evsel) {
+ pr_debug("Skipping metric '%s' as evsel '%s' / '%s' is missing\n",
+ pm->metric_name, evsel__name(metric_evsel),
+ evsel__metric_id(metric_evsel));
+ goto out;
+ }
+
+ if (script_evsel->metric_leader == NULL)
+ continue;
+
+ if (metric_evsel->metric_leader == metric_evsel) {
+ new_metric_leader = script_evsel;
+ } else {
+ new_metric_leader =
+ map_metric_evsel_to_script_evsel(script_evlist,
+ metric_evsel->metric_leader);
+ }
+ /* Mismatching evsel leaders. */
+ if (script_evsel->metric_leader != new_metric_leader) {
+ pr_debug("Skipping metric '%s' due to mismatching evsel metric leaders '%s' vs '%s'\n",
+ pm->metric_name, evsel__metric_id(metric_evsel),
+ evsel__metric_id(script_evsel));
+ goto out;
+ }
+ }
+ /*
+ * Metric events match those in the script evlist, copy metric evsel
+ * data into the script evlist.
+ */
+ evlist__for_each_entry(metric_evlist, metric_evsel) {
+ struct evsel *script_evsel =
+ map_metric_evsel_to_script_evsel(script_evlist, metric_evsel);
+ struct metric_event *metric_me = metricgroup__lookup(&metric_evlist->metric_events,
+ metric_evsel,
+ /*create=*/false);
+
+ if (script_evsel->metric_id == NULL) {
+ script_evsel->metric_id = metric_evsel->metric_id;
+ metric_evsel->metric_id = NULL;
+ }
+
+ if (script_evsel->metric_leader == NULL) {
+ if (metric_evsel->metric_leader == metric_evsel) {
+ script_evsel->metric_leader = script_evsel;
+ } else {
+ script_evsel->metric_leader =
+ map_metric_evsel_to_script_evsel(script_evlist,
+ metric_evsel->metric_leader);
+ }
+ }
+
+ if (metric_me) {
+ struct metric_expr *expr;
+ struct metric_event *script_me =
+ metricgroup__lookup(&script_evlist->metric_events,
+ script_evsel,
+ /*create=*/true);
+
+ if (!script_me) {
+ /*
+ * As the metric_expr is created, the only
+ * failure is a lack of memory.
+ */
+ goto out;
+ }
+ list_splice_init(&metric_me->head, &script_me->head);
+ list_for_each_entry(expr, &script_me->head, nd) {
+ for (int i = 0; expr->metric_events[i]; i++) {
+ expr->metric_events[i] =
+ map_metric_evsel_to_script_evsel(script_evlist,
+ expr->metric_events[i]);
+ }
+ }
+ }
+ }
+ pr_debug("Found metric '%s' whose evsels match those of in the perf data\n",
+ pm->metric_name);
+ evlist__delete(metric_evlist);
+out:
+ return 0;
+}
+
+static struct aggr_cpu_id script_aggr_cpu_id_get(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
+{
+ return aggr_cpu_id__global(cpu, /*data=*/NULL);
+}
+
+static void perf_sample__fprint_metric(struct thread *thread,
struct evsel *evsel,
struct perf_sample *sample,
FILE *fp)
{
- struct evsel *leader = evsel__leader(evsel);
+ static bool init_metrics;
struct perf_stat_output_ctx ctx = {
.print_metric = script_print_metric,
.new_line = script_new_line,
@@ -2120,24 +2283,84 @@ static void perf_sample__fprint_metric(struct perf_script *script,
},
.force_header = false,
};
- struct evsel *ev2;
- u64 val;
+ struct perf_counts_values *count, *old_count;
+ int cpu_map_idx, thread_map_idx, aggr_idx;
+ struct evsel *pos;
+
+ if (!init_metrics) {
+ /* One time initialization of stat_config and metric data. */
+ struct script_find_metrics_args args = {
+ .evlist = evsel->evlist,
+ .system_wide = perf_thread_map__pid(evsel->core.threads, /*idx=*/0) == -1,
+
+ };
+ if (!stat_config.output)
+ stat_config.output = stdout;
+
+ if (!stat_config.aggr_map) {
+ /* TODO: currently only global aggregation is supported. */
+ assert(stat_config.aggr_mode == AGGR_GLOBAL);
+ stat_config.aggr_get_id = script_aggr_cpu_id_get;
+ stat_config.aggr_map =
+ cpu_aggr_map__new(evsel->evlist->core.user_requested_cpus,
+ aggr_cpu_id__global, /*data=*/NULL,
+ /*needs_sort=*/false);
+ }
+
+ metricgroup__for_each_metric(pmu_metrics_table__find(), script_find_metrics, &args);
+ init_metrics = true;
+ }
+
+ if (!evsel->stats) {
+ if (evlist__alloc_stats(&stat_config, evsel->evlist, /*alloc_raw=*/true) < 0)
+ return;
+ }
+ if (!evsel->stats->aggr) {
+ if (evlist__alloc_aggr_stats(evsel->evlist, stat_config.aggr_map->nr) < 0)
+ return;
+ }
- if (!evsel->stats)
- evlist__alloc_stats(&stat_config, script->session->evlist, /*alloc_raw=*/false);
- if (evsel_script(leader)->gnum++ == 0)
- perf_stat__reset_shadow_stats();
- val = sample->period * evsel->scale;
- evsel_script(evsel)->val = val;
- if (evsel_script(leader)->gnum == leader->core.nr_members) {
- for_each_group_member (ev2, leader) {
- perf_stat__print_shadow_stats(&stat_config, ev2,
- evsel_script(ev2)->val,
- sample->cpu,
- &ctx,
- NULL);
+ /* Update the evsel's count using the sample's data. */
+ cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, (struct perf_cpu){sample->cpu});
+ if (cpu_map_idx < 0) {
+ /* Missing CPU, check for any CPU. */
+ if (perf_cpu_map__cpu(evsel->core.cpus, /*idx=*/0).cpu == -1 ||
+ sample->cpu == (u32)-1) {
+ /* Place the counts in the which ever CPU is first in the map. */
+ cpu_map_idx = 0;
+ } else {
+ pr_info("Missing CPU map entry for CPU %d\n", sample->cpu);
+ return;
+ }
+ }
+ thread_map_idx = perf_thread_map__idx(evsel->core.threads, sample->tid);
+ if (thread_map_idx < 0) {
+ /* Missing thread, check for any thread. */
+ if (perf_thread_map__pid(evsel->core.threads, /*idx=*/0) == -1 ||
+ sample->tid == (u32)-1) {
+ /* Place the counts in the which ever thread is first in the map. */
+ thread_map_idx = 0;
+ } else {
+ pr_info("Missing thread map entry for thread %d\n", sample->tid);
+ return;
+ }
+ }
+ count = perf_counts(evsel->counts, cpu_map_idx, thread_map_idx);
+ old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread_map_idx);
+ count->val = old_count->val + sample->period;
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+
+ /* Update the aggregated stats. */
+ perf_stat_process_counter(&stat_config, evsel);
+
+ /* Display all metrics. */
+ evlist__for_each_entry(evsel->evlist, pos) {
+ cpu_aggr_map__for_each_idx(aggr_idx, stat_config.aggr_map) {
+ perf_stat__print_shadow_stats(&stat_config, pos,
+ aggr_idx,
+ &ctx);
}
- evsel_script(leader)->gnum = 0;
}
}
@@ -2251,7 +2474,7 @@ static void process_event(struct perf_script *script,
fprintf(fp, "%16" PRIu16, sample->ins_lat);
if (PRINT_FIELD(RETIRE_LAT))
- fprintf(fp, "%16" PRIu16, sample->retire_lat);
+ fprintf(fp, "%16" PRIu16, sample->weight3);
if (PRINT_FIELD(CGROUP)) {
const char *cgrp_name;
@@ -2295,7 +2518,7 @@ static void process_event(struct perf_script *script,
else if (PRINT_FIELD(BRSTACKOFF))
perf_sample__fprintf_brstackoff(sample, thread, evsel, fp);
- if (evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
+ if (evsel__is_bpf_output(evsel) && !evsel__is_offcpu_event(evsel) && PRINT_FIELD(BPF_OUTPUT))
perf_sample__fprintf_bpf_output(sample, fp);
perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
@@ -2319,7 +2542,7 @@ static void process_event(struct perf_script *script,
}
if (PRINT_FIELD(METRIC))
- perf_sample__fprint_metric(script, thread, evsel, sample, fp);
+ perf_sample__fprint_metric(thread, evsel, sample, fp);
if (verbose > 0)
fflush(fp);
@@ -2483,6 +2706,94 @@ out_put:
return ret;
}
+static int process_deferred_sample_event(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine)
+{
+ struct perf_script *scr = container_of(tool, struct perf_script, tool);
+ struct perf_event_attr *attr = &evsel->core.attr;
+ struct evsel_script *es = evsel->priv;
+ unsigned int type = output_type(attr->type);
+ struct addr_location al;
+ FILE *fp = es->fp;
+ int ret = 0;
+
+ if (output[type].fields == 0)
+ return 0;
+
+ /* Set thread to NULL to indicate addr_al and al are not initialized */
+ addr_location__init(&al);
+
+ if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
+ sample->time)) {
+ goto out_put;
+ }
+
+ if (debug_mode) {
+ if (sample->time < last_timestamp) {
+ pr_err("Samples misordered, previous: %" PRIu64
+ " this: %" PRIu64 "\n", last_timestamp,
+ sample->time);
+ nr_unordered++;
+ }
+ last_timestamp = sample->time;
+ goto out_put;
+ }
+
+ if (filter_cpu(sample))
+ goto out_put;
+
+ if (machine__resolve(machine, &al, sample) < 0) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ ret = -1;
+ goto out_put;
+ }
+
+ if (al.filtered)
+ goto out_put;
+
+ if (!show_event(sample, evsel, al.thread, &al, NULL))
+ goto out_put;
+
+ if (evswitch__discard(&scr->evswitch, evsel))
+ goto out_put;
+
+ perf_sample__fprintf_start(scr, sample, al.thread, evsel,
+ PERF_RECORD_CALLCHAIN_DEFERRED, fp);
+ fprintf(fp, "DEFERRED CALLCHAIN [cookie: %llx]",
+ (unsigned long long)event->callchain_deferred.cookie);
+
+ if (PRINT_FIELD(IP)) {
+ struct callchain_cursor *cursor = NULL;
+
+ if (symbol_conf.use_callchain && sample->callchain) {
+ cursor = get_tls_callchain_cursor();
+ if (thread__resolve_callchain(al.thread, cursor, evsel,
+ sample, NULL, NULL,
+ scripting_max_stack)) {
+ pr_info("cannot resolve deferred callchains\n");
+ cursor = NULL;
+ }
+ }
+
+ fputc(cursor ? '\n' : ' ', fp);
+ sample__fprintf_sym(sample, &al, 0, output[type].print_ip_opts,
+ cursor, symbol_conf.bt_stop_list, fp);
+ }
+
+ fprintf(fp, "\n");
+
+ if (verbose > 0)
+ fflush(fp);
+
+out_put:
+ addr_location__exit(&al);
+ return ret;
+}
+
// Used when scr->per_event_dump is not set
static struct evsel_script es_stdout;
@@ -2533,7 +2844,7 @@ static int process_attr(const struct perf_tool *tool, union perf_event *event,
* on events sample_type.
*/
sample_type = evlist__combined_sample_type(evlist);
- callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
+ callchain_param_setup(sample_type, perf_env__arch(perf_session__env(scr->session)));
/* Enable fields for callchain entries */
if (symbol_conf.use_callchain &&
@@ -2700,7 +3011,8 @@ static int process_switch_event(const struct perf_tool *tool,
sample->tid);
}
-static int process_auxtrace_error(struct perf_session *session,
+static int process_auxtrace_error(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event)
{
if (scripting_ops && scripting_ops->process_auxtrace_error) {
@@ -2708,7 +3020,7 @@ static int process_auxtrace_error(struct perf_session *session,
return 0;
}
- return perf_event__process_auxtrace_error(session, event);
+ return perf_event__process_auxtrace_error(tool, session, event);
}
static int
@@ -2755,6 +3067,15 @@ process_bpf_events(const struct perf_tool *tool __maybe_unused,
sample->tid);
}
+static int
+process_bpf_metadata_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
+ union perf_event *event)
+{
+ perf_event__fprintf(event, NULL, stdout);
+ return 0;
+}
+
static int process_text_poke_events(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -2877,8 +3198,9 @@ static int __cmd_script(struct perf_script *script)
script->tool.finished_round = process_finished_round_event;
}
if (script->show_bpf_events) {
- script->tool.ksymbol = process_bpf_events;
- script->tool.bpf = process_bpf_events;
+ script->tool.ksymbol = process_bpf_events;
+ script->tool.bpf = process_bpf_events;
+ script->tool.bpf_metadata = process_bpf_metadata_event;
}
if (script->show_text_poke_events) {
script->tool.ksymbol = process_bpf_events;
@@ -3506,7 +3828,8 @@ static void script__setup_sample_type(struct perf_script *script)
}
}
-static int process_stat_round_event(struct perf_session *session,
+static int process_stat_round_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
struct perf_record_stat_round *round = &event->stat_round;
@@ -3521,7 +3844,8 @@ static int process_stat_round_event(struct perf_session *session,
return 0;
}
-static int process_stat_config_event(struct perf_session *session __maybe_unused,
+static int process_stat_config_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
union perf_event *event)
{
perf_event__read_stat_config(&stat_config, &event->stat_config);
@@ -3555,10 +3879,10 @@ static int set_maps(struct perf_script *script)
}
static
-int process_thread_map_event(struct perf_session *session,
+int process_thread_map_event(const struct perf_tool *tool,
+ struct perf_session *session __maybe_unused,
union perf_event *event)
{
- const struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (dump_trace)
@@ -3577,10 +3901,10 @@ int process_thread_map_event(struct perf_session *session,
}
static
-int process_cpu_map_event(struct perf_session *session,
+int process_cpu_map_event(const struct perf_tool *tool,
+ struct perf_session *session __maybe_unused,
union perf_event *event)
{
- const struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (dump_trace)
@@ -3598,7 +3922,8 @@ int process_cpu_map_event(struct perf_session *session,
return set_maps(script);
}
-static int process_feature_event(struct perf_session *session,
+static int process_feature_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
if (event->feat.feat_id < HEADER_LAST_FEATURE)
@@ -3606,14 +3931,13 @@ static int process_feature_event(struct perf_session *session,
return 0;
}
-#ifdef HAVE_AUXTRACE_SUPPORT
-static int perf_script__process_auxtrace_info(struct perf_session *session,
+static int perf_script__process_auxtrace_info(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event)
{
- int ret = perf_event__process_auxtrace_info(session, event);
+ int ret = perf_event__process_auxtrace_info(tool, session, event);
if (ret == 0) {
- const struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
ret = perf_script__setup_per_event_dump(script);
@@ -3621,9 +3945,6 @@ static int perf_script__process_auxtrace_info(struct perf_session *session,
return ret;
}
-#else
-#define perf_script__process_auxtrace_info 0
-#endif
static int parse_insn_trace(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
@@ -3688,6 +4009,7 @@ int cmd_script(int argc, const char **argv)
bool header_only = false;
bool script_started = false;
bool unsorted_dump = false;
+ bool merge_deferred_callchains = true;
char *rec_script_path = NULL;
char *rep_script_path = NULL;
struct perf_session *session;
@@ -3841,6 +4163,8 @@ int cmd_script(int argc, const char **argv)
"Guest code can be found in hypervisor process"),
OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr,
"Enable LBR callgraph stitching approach"),
+ OPT_BOOLEAN('\0', "merge-callchains", &merge_deferred_callchains,
+ "Enable merge deferred user callchains"),
OPTS_EVSWITCH(&script.evswitch),
OPT_END()
};
@@ -3853,6 +4177,7 @@ int cmd_script(int argc, const char **argv)
"perf script [<options>] <top-script> [script-args]",
NULL
};
+ struct perf_env *env;
perf_set_singlethreaded();
@@ -4069,6 +4394,7 @@ script_found:
perf_tool__init(&script.tool, !unsorted_dump);
script.tool.sample = process_sample_event;
+ script.tool.callchain_deferred = process_deferred_sample_event;
script.tool.mmap = perf_event__process_mmap;
script.tool.mmap2 = perf_event__process_mmap2;
script.tool.comm = perf_event__process_comm;
@@ -4095,10 +4421,12 @@ script_found:
script.tool.throttle = process_throttle_event;
script.tool.unthrottle = process_throttle_event;
script.tool.ordering_requires_timestamps = true;
+ script.tool.merge_deferred_callchains = merge_deferred_callchains;
session = perf_session__new(&data, &script.tool);
if (IS_ERR(session))
return PTR_ERR(session);
+ env = perf_session__env(session);
if (header || header_only) {
script.tool.show_feat_hdr = SHOW_FEAT_HEADER;
perf_session__fprintf_info(session, stdout, show_full_info);
@@ -4108,17 +4436,17 @@ script_found:
if (show_full_info)
script.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
- if (symbol__init(&session->header.env) < 0)
+ if (symbol__init(env) < 0)
goto out_delete;
uname(&uts);
if (data.is_pipe) { /* Assume pipe_mode indicates native_arch */
native_arch = true;
- } else if (session->header.env.arch) {
- if (!strcmp(uts.machine, session->header.env.arch))
+ } else if (env->arch) {
+ if (!strcmp(uts.machine, env->arch))
native_arch = true;
else if (!strcmp(uts.machine, "x86_64") &&
- !strcmp(session->header.env.arch, "i386"))
+ !strcmp(env->arch, "i386"))
native_arch = true;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 68ea7589c143..ab40d85fb125 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -74,6 +74,7 @@
#include "util/intel-tpebs.h"
#include "asm/bug.h"
+#include <linux/list_sort.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <api/fs/fs.h>
@@ -96,9 +97,18 @@
#include <perf/evlist.h>
#include <internal/threadmap.h>
+#ifdef HAVE_BPF_SKEL
+#include "util/bpf_skel/bperf_cgroup.h"
+#endif
+
#define DEFAULT_SEPARATOR " "
#define FREEZE_ON_SMI_PATH "bus/event_source/devices/cpu/freeze_on_smi"
+struct rusage_stats {
+ struct stats ru_utime_usec_stat;
+ struct stats ru_stime_usec_stat;
+};
+
static void print_counters(struct timespec *ts, int argc, const char **argv);
static struct evlist *evsel_list;
@@ -108,9 +118,7 @@ static struct parse_events_option_args parse_events_option_args = {
static bool all_counters_use_bpf = true;
-static struct target target = {
- .uid = UINT_MAX,
-};
+static struct target target;
static volatile sig_atomic_t child_pid = -1;
static int detailed_run = 0;
@@ -130,6 +138,7 @@ static bool interval_count;
static const char *output_name;
static int output_fd;
static char *metrics;
+static struct rusage_stats ru_stats;
struct perf_stat {
bool record;
@@ -230,7 +239,7 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a,
static void perf_stat__reset_stats(void)
{
evlist__reset_stats(evsel_list);
- perf_stat__reset_shadow_stats();
+ memset(stat_config.walltime_nsecs_stats, 0, sizeof(*stat_config.walltime_nsecs_stats));
}
static int process_synthesized_event(const struct perf_tool *tool __maybe_unused,
@@ -280,17 +289,27 @@ static int read_single_counter(struct evsel *counter, int cpu_map_idx, int threa
if (err && cpu_map_idx == 0 &&
(evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME ||
evsel__tool_event(counter) == TOOL_PMU__EVENT_SYSTEM_TIME)) {
- u64 val, *start_time;
struct perf_counts_values *count =
perf_counts(counter->counts, cpu_map_idx, thread);
+ struct perf_counts_values *old_count = NULL;
+ u64 val;
+
+ if (counter->prev_raw_counts)
+ old_count = perf_counts(counter->prev_raw_counts, cpu_map_idx, thread);
- start_time = xyarray__entry(counter->start_times, cpu_map_idx, thread);
if (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME)
val = ru_stats.ru_utime_usec_stat.mean;
else
val = ru_stats.ru_stime_usec_stat.mean;
- count->ena = count->run = *start_time + val;
+
count->val = val;
+ if (old_count) {
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+ } else {
+ count->run++;
+ count->ena++;
+ }
return 0;
}
return err;
@@ -347,7 +366,7 @@ static int read_counter_cpu(struct evsel *counter, int cpu_map_idx)
return 0;
}
-static int read_affinity_counters(void)
+static int read_counters_with_affinity(void)
{
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity;
@@ -368,6 +387,9 @@ static int read_affinity_counters(void)
if (evsel__is_bpf(counter))
continue;
+ if (evsel__is_tool(counter))
+ continue;
+
if (!counter->err)
counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx);
}
@@ -393,16 +415,46 @@ static int read_bpf_map_counters(void)
return 0;
}
-static int read_counters(void)
+static int read_tool_counters(void)
{
- if (!stat_config.stop_read_counter) {
- if (read_bpf_map_counters() ||
- read_affinity_counters())
- return -1;
+ struct evsel *counter;
+
+ evlist__for_each_entry(evsel_list, counter) {
+ int idx;
+
+ if (!evsel__is_tool(counter))
+ continue;
+
+ perf_cpu_map__for_each_idx(idx, counter->core.cpus) {
+ if (!counter->err)
+ counter->err = read_counter_cpu(counter, idx);
+ }
}
return 0;
}
+static int read_counters(void)
+{
+ int ret;
+
+ if (stat_config.stop_read_counter)
+ return 0;
+
+ // Read all BPF counters first.
+ ret = read_bpf_map_counters();
+ if (ret)
+ return ret;
+
+ // Read non-BPF and non-tool counters next.
+ ret = read_counters_with_affinity();
+ if (ret)
+ return ret;
+
+ // Read the tool counters last. This way the duration_time counter
+ // should always be greater than any other counter's enabled time.
+ return read_tool_counters();
+}
+
static void process_counters(void)
{
struct evsel *counter;
@@ -436,8 +488,8 @@ static void process_interval(void)
pr_err("failed to write stat round event\n");
}
- init_stats(&walltime_nsecs_stats);
- update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
+ init_stats(stat_config.walltime_nsecs_stats);
+ update_stats(stat_config.walltime_nsecs_stats, stat_config.interval * 1000000ULL);
print_counters(&rs, 0, NULL);
}
@@ -612,38 +664,34 @@ static int dispatch_events(bool forks, int timeout, int interval, int *times)
enum counter_recovery {
COUNTER_SKIP,
COUNTER_RETRY,
- COUNTER_FATAL,
};
-static enum counter_recovery stat_handle_error(struct evsel *counter)
+static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
{
char msg[BUFSIZ];
+
+ assert(!counter->supported);
+
/*
* PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e).
*/
- if (errno == EINVAL || errno == ENOSYS ||
- errno == ENOENT || errno == ENXIO) {
- if (verbose > 0)
- ui__warning("%s event is not supported by the kernel.\n",
- evsel__name(counter));
- counter->supported = false;
- /*
- * errored is a sticky flag that means one of the counter's
- * cpu event had a problem and needs to be reexamined.
- */
- counter->errored = true;
-
- if ((evsel__leader(counter) != counter) ||
- !(counter->core.leader->nr_members > 1))
- return COUNTER_SKIP;
- } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) {
+ if (err == EINVAL || err == ENOSYS || err == ENOENT || err == ENXIO) {
+ if (verbose > 0) {
+ evsel__open_strerror(counter, &target, err, msg, sizeof(msg));
+ ui__warning("%s event is not supported by the kernel.\n%s\n",
+ evsel__name(counter), msg);
+ }
+ return COUNTER_SKIP;
+ }
+ if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
+ counter->supported = true;
return COUNTER_RETRY;
- } else if (target__has_per_thread(&target) && errno != EOPNOTSUPP &&
- evsel_list->core.threads &&
- evsel_list->core.threads->err_thread != -1) {
+ }
+ if (target__has_per_thread(&target) && err != EOPNOTSUPP &&
+ evsel_list->core.threads && evsel_list->core.threads->err_thread != -1) {
/*
* For global --per-thread case, skip current
* error thread.
@@ -651,39 +699,85 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
if (!thread_map__remove(evsel_list->core.threads,
evsel_list->core.threads->err_thread)) {
evsel_list->core.threads->err_thread = -1;
+ counter->supported = true;
return COUNTER_RETRY;
}
- } else if (counter->skippable) {
- if (verbose > 0)
- ui__warning("skipping event %s that kernel failed to open .\n",
- evsel__name(counter));
- counter->supported = false;
- counter->errored = true;
- return COUNTER_SKIP;
}
+ if (verbose > 0) {
+ evsel__open_strerror(counter, &target, err, msg, sizeof(msg));
+ ui__warning(err == EOPNOTSUPP
+ ? "%s event is not supported by the kernel.\n%s\n"
+ : "skipping event %s that kernel failed to open.\n%s\n",
+ evsel__name(counter), msg);
+ }
+ return COUNTER_SKIP;
+}
- if (errno == EOPNOTSUPP) {
- if (verbose > 0) {
- ui__warning("%s event is not supported by the kernel.\n",
- evsel__name(counter));
- }
- counter->supported = false;
- counter->errored = true;
+static int create_perf_stat_counter(struct evsel *evsel,
+ struct perf_stat_config *config,
+ int cpu_map_idx)
+{
+ struct perf_event_attr *attr = &evsel->core.attr;
+ struct evsel *leader = evsel__leader(evsel);
+
+ /* Reset supported flag as creating a stat counter is retried. */
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+ /*
+ * The event is part of non trivial group, let's enable
+ * the group read (for leader) and ID retrieval for all
+ * members.
+ */
+ if (leader->core.nr_members > 1)
+ attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
+
+ attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
+
+ /*
+ * Some events get initialized with sample_(period/type) set,
+ * like tracepoints. Clear it up for counting.
+ */
+ attr->sample_period = 0;
+
+ if (config->identifier)
+ attr->sample_type = PERF_SAMPLE_IDENTIFIER;
- if ((evsel__leader(counter) != counter) ||
- !(counter->core.leader->nr_members > 1))
- return COUNTER_SKIP;
+ if (config->all_user) {
+ attr->exclude_kernel = 1;
+ attr->exclude_user = 0;
}
- evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
- ui__error("%s\n", msg);
+ if (config->all_kernel) {
+ attr->exclude_kernel = 0;
+ attr->exclude_user = 1;
+ }
- if (child_pid != -1)
- kill(child_pid, SIGTERM);
+ /*
+ * Disabling all counters initially, they will be enabled
+ * either manually by us or by kernel via enable_on_exec
+ * set later.
+ */
+ if (evsel__is_group_leader(evsel)) {
+ attr->disabled = 1;
- tpebs_delete();
+ if (target__enable_on_exec(&target))
+ attr->enable_on_exec = 1;
+ }
- return COUNTER_FATAL;
+ return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx,
+ evsel->core.threads);
+}
+
+static void update_rusage_stats(const struct rusage *rusage)
+{
+ const u64 us_to_ns = 1000;
+ const u64 s_to_ns = 1000000000;
+
+ update_stats(&ru_stats.ru_utime_usec_stat,
+ (rusage->ru_utime.tv_usec * us_to_ns + rusage->ru_utime.tv_sec * s_to_ns));
+ update_stats(&ru_stats.ru_stime_usec_stat,
+ (rusage->ru_stime.tv_usec * us_to_ns + rusage->ru_stime.tv_sec * s_to_ns));
}
static int __run_perf_stat(int argc, const char **argv, int run_idx)
@@ -700,8 +794,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity = NULL;
- int err;
- bool second_pass = false;
+ int err, open_err = 0;
+ bool second_pass = false, has_supported_counters;
if (forks) {
if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
@@ -741,14 +835,17 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (target.use_bpf)
break;
- if (counter->reset_group || counter->errored)
+ if (counter->reset_group || !counter->supported)
continue;
if (evsel__is_bperf(counter))
continue;
-try_again:
- if (create_perf_stat_counter(counter, &stat_config, &target,
- evlist_cpu_itr.cpu_map_idx) < 0) {
+ while (true) {
+ if (create_perf_stat_counter(counter, &stat_config,
+ evlist_cpu_itr.cpu_map_idx) == 0)
+ break;
+
+ open_err = errno;
/*
* Weak group failed. We cannot just undo this here
* because earlier CPUs might be in group mode, and the kernel
@@ -756,29 +853,19 @@ try_again:
* it to later.
* Don't close here because we're in the wrong affinity.
*/
- if ((errno == EINVAL || errno == EBADF) &&
+ if ((open_err == EINVAL || open_err == EBADF) &&
evsel__leader(counter) != counter &&
counter->weak_group) {
evlist__reset_weak_group(evsel_list, counter, false);
assert(counter->reset_group);
+ counter->supported = true;
second_pass = true;
- continue;
- }
-
- switch (stat_handle_error(counter)) {
- case COUNTER_FATAL:
- err = -1;
- goto err_out;
- case COUNTER_RETRY:
- goto try_again;
- case COUNTER_SKIP:
- continue;
- default:
break;
}
+ if (stat_handle_error(counter, open_err) != COUNTER_RETRY)
+ break;
}
- counter->supported = true;
}
if (second_pass) {
@@ -791,7 +878,7 @@ try_again:
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel;
- if (!counter->reset_group && !counter->errored)
+ if (!counter->reset_group && counter->supported)
continue;
perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
@@ -802,34 +889,29 @@ try_again:
if (!counter->reset_group)
continue;
-try_again_reset:
- pr_debug2("reopening weak %s\n", evsel__name(counter));
- if (create_perf_stat_counter(counter, &stat_config, &target,
- evlist_cpu_itr.cpu_map_idx) < 0) {
-
- switch (stat_handle_error(counter)) {
- case COUNTER_FATAL:
- err = -1;
- goto err_out;
- case COUNTER_RETRY:
- goto try_again_reset;
- case COUNTER_SKIP:
- continue;
- default:
+
+ while (true) {
+ pr_debug2("reopening weak %s\n", evsel__name(counter));
+ if (create_perf_stat_counter(counter, &stat_config,
+ evlist_cpu_itr.cpu_map_idx) == 0)
+ break;
+
+ open_err = errno;
+ if (stat_handle_error(counter, open_err) != COUNTER_RETRY)
break;
- }
}
- counter->supported = true;
}
}
affinity__cleanup(affinity);
affinity = NULL;
+ has_supported_counters = false;
evlist__for_each_entry(evsel_list, counter) {
if (!counter->supported) {
perf_evsel__free_fd(&counter->core);
continue;
}
+ has_supported_counters = true;
l = strlen(counter->unit);
if (l > stat_config.unit_width)
@@ -841,6 +923,18 @@ try_again_reset:
goto err_out;
}
}
+ if (!has_supported_counters && !stat_config.null_run) {
+ if (open_err) {
+ evsel__open_strerror(evlist__first(evsel_list), &target, open_err,
+ msg, sizeof(msg));
+ }
+ ui__error("No supported events found.\n%s\n", msg);
+
+ if (child_pid != -1)
+ kill(child_pid, SIGTERM);
+ err = -1;
+ goto err_out;
+ }
if (evlist__apply_filters(evsel_list, &counter, &target)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
@@ -913,10 +1007,20 @@ try_again_reset:
goto err_out;
}
- if (WIFSIGNALED(status))
+ if (WIFSIGNALED(status)) {
+ /*
+ * We want to indicate failure to stop a repeat run,
+ * hence negative. We want the value to be the exit code
+ * of perf, which for termination by a signal is 128
+ * plus the signal number.
+ */
+ err = 0 - (128 + WTERMSIG(status));
psignal(WTERMSIG(status), argv[0]);
+ } else {
+ err = WEXITSTATUS(status);
+ }
} else {
- status = dispatch_events(forks, timeout, interval, &times);
+ err = dispatch_events(forks, timeout, interval, &times);
}
disable_counters();
@@ -929,15 +1033,15 @@ try_again_reset:
if (interval && stat_config.summary) {
stat_config.interval = 0;
stat_config.stop_read_counter = true;
- init_stats(&walltime_nsecs_stats);
- update_stats(&walltime_nsecs_stats, t1 - t0);
+ init_stats(stat_config.walltime_nsecs_stats);
+ update_stats(stat_config.walltime_nsecs_stats, t1 - t0);
evlist__copy_prev_raw_counts(evsel_list);
evlist__reset_prev_raw_counts(evsel_list);
evlist__reset_aggr_stats(evsel_list);
} else {
- update_stats(&walltime_nsecs_stats, t1 - t0);
- update_rusage_stats(&ru_stats, &stat_config.ru_data);
+ update_stats(stat_config.walltime_nsecs_stats, t1 - t0);
+ update_rusage_stats(&stat_config.ru_data);
}
/*
@@ -956,7 +1060,7 @@ try_again_reset:
if (!STAT_RECORD)
evlist__close(evsel_list);
- return WEXITSTATUS(status);
+ return err;
err_out:
if (forks)
@@ -1369,7 +1473,7 @@ static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config,
struct aggr_cpu_id id;
/* per-process mode - should use global aggr mode */
- if (cpu.cpu == -1)
+ if (cpu.cpu == -1 || cpu.cpu >= config->cpus_aggr_map->nr)
return get_id(config, cpu);
if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
@@ -1517,11 +1621,8 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of
* the aggregation translate cpumap.
*/
- if (!perf_cpu_map__is_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus))
- nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
- else
- nr = 0;
- stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
+ nr = perf_cpu_map__max(evsel_list->core.all_cpus).cpu + 1;
+ stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr);
return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
}
@@ -1696,48 +1797,48 @@ static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu _
static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
- return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env);
+ return perf_env__get_socket_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
}
static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
- return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env);
+ return perf_env__get_die_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
}
static struct aggr_cpu_id perf_stat__get_cluster_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
- return perf_env__get_cluster_aggr_by_cpu(cpu, &perf_stat.session->header.env);
+ return perf_env__get_cluster_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
}
static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
- return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env);
+ return perf_env__get_cache_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
}
static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
- return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env);
+ return perf_env__get_core_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
}
static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
- return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env);
+ return perf_env__get_cpu_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
}
static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
- return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env);
+ return perf_env__get_node_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
}
static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused,
struct perf_cpu cpu)
{
- return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env);
+ return perf_env__get_global_aggr_by_cpu(cpu, perf_session__env(perf_stat.session));
}
static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)
@@ -1796,7 +1897,7 @@ static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode)
static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
{
- struct perf_env *env = &st->session->header.env;
+ struct perf_env *env = perf_session__env(st->session);
aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode);
bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
@@ -1829,6 +1930,35 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
return 0;
}
+static int default_evlist_evsel_cmp(void *priv __maybe_unused,
+ const struct list_head *l,
+ const struct list_head *r)
+{
+ const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
+ const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
+ const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
+ const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
+
+ if (evsel__leader(lhs) == evsel__leader(rhs)) {
+ /* Within the same group, respect the original order. */
+ return lhs_core->idx - rhs_core->idx;
+ }
+
+ /* Sort default metrics evsels first, and default show events before those. */
+ if (lhs->default_metricgroup != rhs->default_metricgroup)
+ return lhs->default_metricgroup ? -1 : 1;
+
+ if (lhs->default_show_events != rhs->default_show_events)
+ return lhs->default_show_events ? -1 : 1;
+
+ /* Sort by PMU type (prefers legacy types first). */
+ if (lhs->pmu != rhs->pmu)
+ return lhs->pmu->type - rhs->pmu->type;
+
+ /* Sort by name. */
+ return strcmp(evsel__name((struct evsel *)lhs), evsel__name((struct evsel *)rhs));
+}
+
/*
* Add default events, if there were no attributes specified or
* if -d/--detailed, -d -d or -d -d -d is used:
@@ -1856,7 +1986,7 @@ static int add_default_events(void)
* will use this approach. To determine transaction support
* on an architecture test for such a metric name.
*/
- if (!metricgroup__has_metric(pmu, "transaction")) {
+ if (!metricgroup__has_metric_or_groups(pmu, "transaction")) {
pr_err("Missing transaction metrics\n");
ret = -1;
goto out;
@@ -1867,8 +1997,7 @@ static int add_default_events(void)
stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
- stat_config.hardware_aware_grouping,
- &stat_config.metric_events);
+ stat_config.hardware_aware_grouping);
goto out;
}
@@ -1890,7 +2019,7 @@ static int add_default_events(void)
smi_reset = true;
}
- if (!metricgroup__has_metric(pmu, "smi")) {
+ if (!metricgroup__has_metric_or_groups(pmu, "smi")) {
pr_err("Missing smi metrics\n");
ret = -1;
goto out;
@@ -1905,8 +2034,7 @@ static int add_default_events(void)
stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
- stat_config.hardware_aware_grouping,
- &stat_config.metric_events);
+ stat_config.hardware_aware_grouping);
goto out;
}
@@ -1943,8 +2071,7 @@ static int add_default_events(void)
/*metric_no_threshold=*/true,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
- stat_config.hardware_aware_grouping,
- &stat_config.metric_events) < 0) {
+ stat_config.hardware_aware_grouping) < 0) {
ret = -1;
goto out;
}
@@ -1954,95 +2081,52 @@ static int add_default_events(void)
stat_config.topdown_level = 1;
if (!evlist->core.nr_entries && !evsel_list->core.nr_entries) {
- /* No events so add defaults. */
- if (target__has_cpu(&target))
- ret = parse_events(evlist, "cpu-clock", &err);
- else
- ret = parse_events(evlist, "task-clock", &err);
- if (ret)
- goto out;
-
- ret = parse_events(evlist,
- "context-switches,"
- "cpu-migrations,"
- "page-faults,"
- "instructions,"
- "cycles,"
- "stalled-cycles-frontend,"
- "stalled-cycles-backend,"
- "branches,"
- "branch-misses",
- &err);
- if (ret)
- goto out;
-
/*
- * Add TopdownL1 metrics if they exist. To minimize
- * multiplexing, don't request threshold computation.
+ * Add Default metrics. To minimize multiplexing, don't request
+ * threshold computation, but it will be computed if the events
+ * are present.
*/
- if (metricgroup__has_metric(pmu, "Default")) {
- struct evlist *metric_evlist = evlist__new();
+ const char *default_metricgroup_names[] = {
+ "Default", "Default2", "Default3", "Default4",
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(default_metricgroup_names); i++) {
+ struct evlist *metric_evlist;
+
+ if (!metricgroup__has_metric_or_groups(pmu, default_metricgroup_names[i]))
+ continue;
+
+ if ((int)i > detailed_run)
+ break;
+ metric_evlist = evlist__new();
if (!metric_evlist) {
ret = -ENOMEM;
- goto out;
+ break;
}
- if (metricgroup__parse_groups(metric_evlist, pmu, "Default",
+ if (metricgroup__parse_groups(metric_evlist, pmu, default_metricgroup_names[i],
/*metric_no_group=*/false,
/*metric_no_merge=*/false,
/*metric_no_threshold=*/true,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
- stat_config.hardware_aware_grouping,
- &stat_config.metric_events) < 0) {
+ stat_config.hardware_aware_grouping) < 0) {
+ evlist__delete(metric_evlist);
ret = -1;
- goto out;
+ break;
}
evlist__for_each_entry(metric_evlist, evsel)
evsel->default_metricgroup = true;
evlist__splice_list_tail(evlist, &metric_evlist->core.entries);
+ metricgroup__copy_metric_events(evlist, /*cgrp=*/NULL,
+ &evlist->metric_events,
+ &metric_evlist->metric_events);
evlist__delete(metric_evlist);
}
- }
+ list_sort(/*priv=*/NULL, &evlist->core.entries, default_evlist_evsel_cmp);
- /* Detailed events get appended to the event list: */
-
- if (!ret && detailed_run >= 1) {
- /*
- * Detailed stats (-d), covering the L1 and last level data
- * caches:
- */
- ret = parse_events(evlist,
- "L1-dcache-loads,"
- "L1-dcache-load-misses,"
- "LLC-loads,"
- "LLC-load-misses",
- &err);
- }
- if (!ret && detailed_run >= 2) {
- /*
- * Very detailed stats (-d -d), covering the instruction cache
- * and the TLB caches:
- */
- ret = parse_events(evlist,
- "L1-icache-loads,"
- "L1-icache-load-misses,"
- "dTLB-loads,"
- "dTLB-load-misses,"
- "iTLB-loads,"
- "iTLB-load-misses",
- &err);
- }
- if (!ret && detailed_run >= 3) {
- /*
- * Very, very detailed stats (-d -d -d), adding prefetch events:
- */
- ret = parse_events(evlist,
- "L1-dcache-prefetches,"
- "L1-dcache-prefetch-misses",
- &err);
}
out:
if (!ret) {
@@ -2051,12 +2135,15 @@ out:
* Make at least one event non-skippable so fatal errors are visible.
* 'cycles' always used to be default and non-skippable, so use that.
*/
- if (strcmp("cycles", evsel__name(evsel)))
+ if (!evsel__match(evsel, HARDWARE, HW_CPU_CYCLES))
evsel->skippable = true;
}
}
parse_events_error__exit(&err);
evlist__splice_list_tail(evsel_list, &evlist->core.entries);
+ metricgroup__copy_metric_events(evsel_list, /*cgrp=*/NULL,
+ &evsel_list->metric_events,
+ &evlist->metric_events);
evlist__delete(evlist);
return ret;
}
@@ -2112,18 +2199,20 @@ static int __cmd_record(const struct option stat_options[], struct opt_aggr_mode
return argc;
}
-static int process_stat_round_event(struct perf_session *session,
+static int process_stat_round_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
struct perf_record_stat_round *stat_round = &event->stat_round;
struct timespec tsh, *ts = NULL;
- const char **argv = session->header.env.cmdline_argv;
- int argc = session->header.env.nr_cmdline;
+ struct perf_env *env = perf_session__env(session);
+ const char **argv = env->cmdline_argv;
+ int argc = env->nr_cmdline;
process_counters();
if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
- update_stats(&walltime_nsecs_stats, stat_round->time);
+ update_stats(stat_config.walltime_nsecs_stats, stat_round->time);
if (stat_config.interval && stat_round->time) {
tsh.tv_sec = stat_round->time / NSEC_PER_SEC;
@@ -2136,10 +2225,10 @@ static int process_stat_round_event(struct perf_session *session,
}
static
-int process_stat_config_event(struct perf_session *session,
+int process_stat_config_event(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event)
{
- const struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
perf_event__read_stat_config(&stat_config, &event->stat_config);
@@ -2185,10 +2274,10 @@ static int set_maps(struct perf_stat *st)
}
static
-int process_thread_map_event(struct perf_session *session,
+int process_thread_map_event(const struct perf_tool *tool,
+ struct perf_session *session __maybe_unused,
union perf_event *event)
{
- const struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
if (st->threads) {
@@ -2204,10 +2293,10 @@ int process_thread_map_event(struct perf_session *session,
}
static
-int process_cpu_map_event(struct perf_session *session,
+int process_cpu_map_event(const struct perf_tool *tool,
+ struct perf_session *session __maybe_unused,
union perf_event *event)
{
- const struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
struct perf_cpu_map *cpus;
@@ -2329,6 +2418,32 @@ static void setup_system_wide(int forks)
}
}
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+static int parse_tpebs_mode(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ enum tpebs_mode *mode = opt->value;
+
+ if (!strcasecmp("mean", str)) {
+ *mode = TPEBS_MODE__MEAN;
+ return 0;
+ }
+ if (!strcasecmp("min", str)) {
+ *mode = TPEBS_MODE__MIN;
+ return 0;
+ }
+ if (!strcasecmp("max", str)) {
+ *mode = TPEBS_MODE__MAX;
+ return 0;
+ }
+ if (!strcasecmp("last", str)) {
+ *mode = TPEBS_MODE__LAST;
+ return 0;
+ }
+ return -1;
+}
+#endif // HAVE_ARCH_X86_64_SUPPORT
+
int cmd_stat(int argc, const char **argv)
{
struct opt_aggr_mode opt_mode = {};
@@ -2433,6 +2548,9 @@ int cmd_stat(int argc, const char **argv)
#ifdef HAVE_ARCH_X86_64_SUPPORT
OPT_BOOLEAN(0, "record-tpebs", &tpebs_recording,
"enable recording for tpebs when retire_latency required"),
+ OPT_CALLBACK(0, "tpebs-mode", &tpebs_mode, "tpebs-mode",
+ "Mode of TPEBS recording: mean, min or max",
+ parse_tpebs_mode),
#endif
OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
"Set the metrics level for the top-down statistics (0: max level)"),
@@ -2486,6 +2604,7 @@ int cmd_stat(int argc, const char **argv)
unsigned int interval, timeout;
const char * const stat_subcommands[] = { "record", "report" };
char errbuf[BUFSIZ];
+ struct evsel *counter;
setlocale(LC_ALL, "");
@@ -2714,8 +2833,7 @@ int cmd_stat(int argc, const char **argv)
stat_config.metric_no_threshold,
stat_config.user_requested_cpu_list,
stat_config.system_wide,
- stat_config.hardware_aware_grouping,
- &stat_config.metric_events);
+ stat_config.hardware_aware_grouping);
zfree(&metrics);
if (ret) {
@@ -2735,16 +2853,34 @@ int cmd_stat(int argc, const char **argv)
goto out;
}
- if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
- &stat_config.metric_events, true) < 0) {
+ if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, true) < 0) {
parse_options_usage(stat_usage, stat_options,
"for-each-cgroup", 0);
goto out;
}
}
-
+#ifdef HAVE_BPF_SKEL
+ if (target.use_bpf && nr_cgroups &&
+ (evsel_list->core.nr_entries / nr_cgroups) > BPERF_CGROUP__MAX_EVENTS) {
+ pr_warning("Disabling BPF counters due to more events (%d) than the max (%d)\n",
+ evsel_list->core.nr_entries / nr_cgroups, BPERF_CGROUP__MAX_EVENTS);
+ target.use_bpf = false;
+ }
+#endif // HAVE_BPF_SKEL
evlist__warn_user_requested_cpus(evsel_list, target.cpu_list);
+ evlist__for_each_entry(evsel_list, counter) {
+ /*
+ * Setup BPF counters to require CPUs as any(-1) isn't
+ * supported. evlist__create_maps below will propagate this
+ * information to the evsels. Note, evsel__is_bperf isn't yet
+ * set up, and this change must happen early, so directly use
+ * the bpf_counter variable and target information.
+ */
+ if ((counter->bpf_counter || target.use_bpf) && !target__has_cpu(&target))
+ counter->core.requires_cpu = true;
+ }
+
if (evlist__create_maps(evsel_list, &target) < 0) {
if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n");
@@ -2843,7 +2979,7 @@ int cmd_stat(int argc, const char **argv)
evlist__reset_prev_raw_counts(evsel_list);
status = run_perf_stat(argc, argv, run_idx);
- if (status == -1)
+ if (status < 0)
break;
if (forever && !interval) {
@@ -2884,7 +3020,7 @@ int cmd_stat(int argc, const char **argv)
}
if (!interval) {
- if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
+ if (WRITE_STAT_ROUND_EVENT(stat_config.walltime_nsecs_stats->max, FINAL))
pr_err("failed to write stat round event\n");
}
@@ -2911,8 +3047,8 @@ out:
evlist__delete(evsel_list);
- metricgroup__rblist_exit(&stat_config.metric_events);
evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close);
- return status;
+ /* Only the low byte of status becomes the exit code. */
+ return abs(status);
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 068d297aaf44..f8b49d69e9a5 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1618,7 +1618,7 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
if (IS_ERR(session))
return PTR_ERR(session);
- symbol__init(&session->header.env);
+ symbol__init(perf_session__env(session));
(void)perf_header__process_sections(&session->header,
perf_data__fd(session->data),
@@ -1651,7 +1651,7 @@ out_delete:
return ret;
}
-static int timechart__io_record(int argc, const char **argv)
+static int timechart__io_record(int argc, const char **argv, const char *output_data)
{
unsigned int rec_argc, i;
const char **rec_argv;
@@ -1659,7 +1659,7 @@ static int timechart__io_record(int argc, const char **argv)
char *filter = NULL;
const char * const common_args[] = {
- "record", "-a", "-R", "-c", "1",
+ "record", "-a", "-R", "-c", "1", "-o", output_data,
};
unsigned int common_args_nr = ARRAY_SIZE(common_args);
@@ -1786,7 +1786,8 @@ static int timechart__io_record(int argc, const char **argv)
}
-static int timechart__record(struct timechart *tchart, int argc, const char **argv)
+static int timechart__record(struct timechart *tchart, int argc, const char **argv,
+ const char *output_data)
{
unsigned int rec_argc, i, j;
const char **rec_argv;
@@ -1794,7 +1795,7 @@ static int timechart__record(struct timechart *tchart, int argc, const char **ar
unsigned int record_elems;
const char * const common_args[] = {
- "record", "-a", "-R", "-c", "1",
+ "record", "-a", "-R", "-c", "1", "-o", output_data,
};
unsigned int common_args_nr = ARRAY_SIZE(common_args);
@@ -1934,6 +1935,7 @@ int cmd_timechart(int argc, const char **argv)
.merge_dist = 1000,
};
const char *output_name = "output.svg";
+ const char *output_record_data = "perf.data";
const struct option timechart_common_options[] = {
OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
@@ -1976,6 +1978,7 @@ int cmd_timechart(int argc, const char **argv)
OPT_BOOLEAN('I', "io-only", &tchart.io_only,
"record only IO data"),
OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
+ OPT_STRING('o', "output", &output_record_data, "file", "output data file name"),
OPT_PARENT(timechart_common_options),
};
const char * const timechart_record_usage[] = {
@@ -2024,9 +2027,9 @@ int cmd_timechart(int argc, const char **argv)
}
if (tchart.io_only)
- ret = timechart__io_record(argc, argv);
+ ret = timechart__io_record(argc, argv, output_record_data);
else
- ret = timechart__record(&tchart, argc, argv);
+ ret = timechart__record(&tchart, argc, argv, output_record_data);
goto out;
} else if (argc)
usage_with_options(timechart_usage, timechart_options);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1061f4eebc3f..710604c4f6f6 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -35,6 +35,7 @@
#include "util/mmap.h"
#include "util/session.h"
#include "util/thread.h"
+#include "util/stat.h"
#include "util/symbol.h"
#include "util/synthetic-events.h"
#include "util/top.h"
@@ -642,11 +643,12 @@ repeat:
*/
evlist__for_each_entry(top->evlist, pos) {
struct hists *hists = evsel__hists(pos);
- hists->uid_filter_str = top->record_opts.target.uid_str;
+ hists->uid_filter_str = top->uid_str;
}
ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
- &top->session->header.env, !top->record_opts.overwrite);
+ perf_session__env(top->session),
+ !top->record_opts.overwrite);
if (ret == K_RELOAD) {
top->zero = true;
goto repeat;
@@ -1252,7 +1254,7 @@ static int __cmd_top(struct perf_top *top)
int ret;
if (!annotate_opts.objdump_path) {
- ret = perf_env__lookup_objdump(&top->session->header.env,
+ ret = perf_env__lookup_objdump(perf_session__env(top->session),
&annotate_opts.objdump_path);
if (ret)
return ret;
@@ -1299,7 +1301,7 @@ static int __cmd_top(struct perf_top *top)
perf_set_multithreaded();
if (perf_hpp_list.socket) {
- ret = perf_env__read_cpu_topology_map(&perf_env);
+ ret = perf_env__read_cpu_topology_map(perf_session__env(top->session));
if (ret < 0) {
char errbuf[BUFSIZ];
const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
@@ -1309,7 +1311,11 @@ static int __cmd_top(struct perf_top *top)
}
}
- evlist__uniquify_name(top->evlist);
+ /*
+ * Use global stat_config that is zero meaning aggr_mode is AGGR_NONE
+ * and hybrid_merge is false.
+ */
+ evlist__uniquify_evsel_names(top->evlist, &stat_config);
ret = perf_top__start_counters(top);
if (ret)
return ret;
@@ -1566,7 +1572,7 @@ int cmd_top(int argc, const char **argv)
"Add prefix to source file path names in programs (with --prefix-strip)"),
OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
"Strip first N entries of source file path name in programs (with --prefix)"),
- OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
+ OPT_STRING('u', "uid", &top.uid_str, "user", "user to profile"),
OPT_CALLBACK(0, "percent-limit", &top, "percent",
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
@@ -1618,6 +1624,7 @@ int cmd_top(int argc, const char **argv)
NULL
};
int status = hists__init();
+ struct perf_env host_env;
if (status < 0)
return status;
@@ -1631,14 +1638,19 @@ int cmd_top(int argc, const char **argv)
if (top.evlist == NULL)
return -ENOMEM;
+ perf_env__init(&host_env);
status = perf_config(perf_top_config, &top);
if (status)
- return status;
+ goto out_delete_evlist;
/*
* Since the per arch annotation init routine may need the cpuid, read
* it here, since we are not getting this from the perf.data header.
*/
- status = perf_env__read_cpuid(&perf_env);
+ status = perf_env__set_cmdline(&host_env, argc, argv);
+ if (status)
+ goto out_delete_evlist;
+
+ status = perf_env__read_cpuid(&host_env);
if (status) {
/*
* Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
@@ -1648,7 +1660,6 @@ int cmd_top(int argc, const char **argv)
"Couldn't read the cpuid for this machine: %s\n",
str_error_r(errno, errbuf, sizeof(errbuf)));
}
- top.evlist->env = &perf_env;
argc = parse_options(argc, argv, options, top_usage, 0);
if (argc)
@@ -1656,18 +1667,24 @@ int cmd_top(int argc, const char **argv)
if (disassembler_style) {
annotate_opts.disassembler_style = strdup(disassembler_style);
- if (!annotate_opts.disassembler_style)
- return -ENOMEM;
+ if (!annotate_opts.disassembler_style) {
+ status = -ENOMEM;
+ goto out_delete_evlist;
+ }
}
if (objdump_path) {
annotate_opts.objdump_path = strdup(objdump_path);
- if (!annotate_opts.objdump_path)
- return -ENOMEM;
+ if (!annotate_opts.objdump_path) {
+ status = -ENOMEM;
+ goto out_delete_evlist;
+ }
}
if (addr2line_path) {
symbol_conf.addr2line_path = strdup(addr2line_path);
- if (!symbol_conf.addr2line_path)
- return -ENOMEM;
+ if (!symbol_conf.addr2line_path) {
+ status = -ENOMEM;
+ goto out_delete_evlist;
+ }
}
status = symbol__validate_sym_arguments();
@@ -1678,11 +1695,13 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
if (!top.evlist->core.nr_entries) {
- bool can_profile_kernel = perf_event_paranoid_check(1);
- int err = parse_event(top.evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
+ struct evlist *def_evlist = evlist__new_default();
- if (err)
+ if (!def_evlist)
goto out_delete_evlist;
+
+ evlist__splice_list_tail(top.evlist, &def_evlist->core.entries);
+ evlist__delete(def_evlist);
}
status = evswitch__init(&top.evswitch, top.evlist, stderr);
@@ -1729,6 +1748,14 @@ int cmd_top(int argc, const char **argv)
if (opts->branch_stack && callchain_param.enabled)
symbol_conf.show_branchflag_count = true;
+ if (opts->branch_stack) {
+ status = perf_env__read_core_pmu_caps(&host_env);
+ if (status) {
+ pr_err("PMU capability data is not available\n");
+ goto out_delete_evlist;
+ }
+ }
+
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
perf_hpp_list.need_collapse = 1;
@@ -1742,7 +1769,17 @@ int cmd_top(int argc, const char **argv)
setup_browser(false);
- if (setup_sorting(top.evlist) < 0) {
+ top.session = __perf_session__new(/*data=*/NULL, /*tool=*/NULL,
+ /*trace_event_repipe=*/false,
+ &host_env);
+ if (IS_ERR(top.session)) {
+ status = PTR_ERR(top.session);
+ top.session = NULL;
+ goto out_delete_evlist;
+ }
+ top.evlist->session = top.session;
+
+ if (setup_sorting(top.evlist, perf_session__env(top.session)) < 0) {
if (sort_order)
parse_options_usage(top_usage, options, "s", 1);
if (field_order)
@@ -1757,15 +1794,17 @@ int cmd_top(int argc, const char **argv)
ui__warning("%s\n", errbuf);
}
- status = target__parse_uid(target);
- if (status) {
- int saved_errno = errno;
-
- target__strerror(target, status, errbuf, BUFSIZ);
- ui__error("%s\n", errbuf);
+ if (top.uid_str) {
+ uid_t uid = parse_uid(top.uid_str);
- status = -saved_errno;
- goto out_delete_evlist;
+ if (uid == UINT_MAX) {
+ ui__error("Invalid User: %s", top.uid_str);
+ status = -EINVAL;
+ goto out_delete_evlist;
+ }
+ status = parse_uid_filter(top.evlist, uid);
+ if (status)
+ goto out_delete_evlist;
}
if (target__none(target))
@@ -1790,7 +1829,7 @@ int cmd_top(int argc, const char **argv)
if (!callchain_param.enabled) {
symbol_conf.cumulate_callchain = false;
- perf_hpp__cancel_cumulate();
+ perf_hpp__cancel_cumulate(top.evlist);
}
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
@@ -1815,13 +1854,6 @@ int cmd_top(int argc, const char **argv)
signal(SIGWINCH, winch_sig);
}
- top.session = perf_session__new(NULL, NULL);
- if (IS_ERR(top.session)) {
- status = PTR_ERR(top.session);
- top.session = NULL;
- goto out_delete_evlist;
- }
-
if (!evlist__needs_bpf_sb_event(top.evlist))
top.record_opts.no_bpf_event = true;
@@ -1835,7 +1867,7 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
- if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
+ if (evlist__add_bpf_sb_event(top.sb_evlist, &host_env)) {
pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
status = -EINVAL;
goto out_delete_evlist;
@@ -1857,6 +1889,7 @@ out_delete_evlist:
evlist__delete(top.evlist);
perf_session__delete(top.session);
annotation_options__exit();
+ perf_env__exit(&host_env);
return status;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6ac51925ea42..baee1f695600 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -20,9 +20,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
-#ifdef HAVE_BPF_SKEL
-#include "bpf_skel/augmented_raw_syscalls.skel.h"
-#endif
#endif
#include "util/bpf_map.h"
#include "util/rlimit.h"
@@ -55,6 +52,7 @@
#include "util/thread_map.h"
#include "util/stat.h"
#include "util/tool.h"
+#include "util/trace.h"
#include "util/util.h"
#include "trace/beauty/beauty.h"
#include "trace-event.h"
@@ -141,13 +139,8 @@ struct syscall_fmt {
bool hexret;
};
-enum summary_mode {
- SUMMARY__NONE = 0,
- SUMMARY__BY_TOTAL,
- SUMMARY__BY_THREAD,
-};
-
struct trace {
+ struct perf_env host_env;
struct perf_tool tool;
struct {
/** Sorted sycall numbers used by the trace. */
@@ -160,9 +153,6 @@ struct trace {
*bpf_output;
} events;
} syscalls;
-#ifdef HAVE_BPF_SKEL
- struct augmented_raw_syscalls_bpf *skel;
-#endif
#ifdef HAVE_LIBBPF_SUPPORT
struct btf *btf;
#endif
@@ -205,7 +195,8 @@ struct trace {
} stats;
unsigned int max_stack;
unsigned int min_stack;
- enum summary_mode summary_mode;
+ enum trace_summary_mode summary_mode;
+ int max_summary;
int raw_augmented_syscalls_args_size;
bool raw_augmented_syscalls;
bool fd_path_disabled;
@@ -234,12 +225,14 @@ struct trace {
bool force;
bool vfs_getname;
bool force_btf;
+ bool summary_bpf;
int trace_pgfaults;
char *perfconfig_events;
struct {
struct ordered_events data;
u64 last;
} oe;
+ const char *uid_str;
};
static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused)
@@ -1127,12 +1120,14 @@ static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size _
#define STRARRAY(name, array) \
{ .scnprintf = SCA_STRARRAY, \
.strtoul = STUL_STRARRAY, \
- .parm = &strarray__##array, }
+ .parm = &strarray__##array, \
+ .show_zero = true, }
#define STRARRAY_FLAGS(name, array) \
{ .scnprintf = SCA_STRARRAY_FLAGS, \
.strtoul = STUL_STRARRAY_FLAGS, \
- .parm = &strarray__##array, }
+ .parm = &strarray__##array, \
+ .show_zero = true, }
#include "trace/beauty/eventfd.c"
#include "trace/beauty/futex_op.c"
@@ -1352,7 +1347,7 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
[2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
[4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
- { .name = "rseq", .errpid = true,
+ { .name = "rseq",
.arg = { [0] = { .from_user = true /* rseq */, }, }, },
{ .name = "rt_sigaction",
.arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
@@ -1376,7 +1371,7 @@ static const struct syscall_fmt syscall_fmts[] = {
{ .name = "sendto",
.arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
[4] = SCA_SOCKADDR_FROM_USER(addr), }, },
- { .name = "set_robust_list", .errpid = true,
+ { .name = "set_robust_list",
.arg = { [0] = { .from_user = true /* head */, }, }, },
{ .name = "set_tid_address", .errpid = true, },
{ .name = "setitimer",
@@ -1657,7 +1652,7 @@ static const size_t trace__entry_str_size = 2048;
static void thread_trace__free_files(struct thread_trace *ttrace)
{
- for (int i = 0; i < ttrace->files.max; ++i) {
+ for (int i = 0; i <= ttrace->files.max; ++i) {
struct file *file = ttrace->files.table + i;
zfree(&file->pathname);
}
@@ -1703,6 +1698,7 @@ static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pat
if (file != NULL) {
struct stat st;
+
if (stat(pathname, &st) == 0)
file->dev_maj = major(st.st_rdev);
file->pathname = strdup(pathname);
@@ -1983,17 +1979,24 @@ static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long l
return machine__resolve_kernel_addr(vmachine, addrp, modp);
}
-static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
+static int trace__symbols_init(struct trace *trace, int argc, const char **argv,
+ struct evlist *evlist)
{
int err = symbol__init(NULL);
if (err)
return err;
- trace->host = machine__new_host();
- if (trace->host == NULL)
- return -ENOMEM;
+ perf_env__init(&trace->host_env);
+ err = perf_env__set_cmdline(&trace->host_env, argc, argv);
+ if (err)
+ goto out;
+ trace->host = machine__new_host(&trace->host_env);
+ if (trace->host == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
thread__set_priv_destructor(thread_trace__delete);
err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
@@ -2002,11 +2005,14 @@ static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
evlist->core.threads, trace__tool_process,
- true, false, 1);
+ /*needs_mmap=*/callchain_param.enabled,
+ /*mmap_data=*/false,
+ /*nr_threads_synthesize=*/1);
out:
- if (err)
+ if (err) {
+ perf_env__exit(&trace->host_env);
symbol__exit();
-
+ }
return err;
}
@@ -2015,6 +2021,7 @@ static void trace__symbols__exit(struct trace *trace)
machine__exit(trace->host);
trace->host = NULL;
+ perf_env__exit(&trace->host_env);
symbol__exit();
}
@@ -2062,6 +2069,15 @@ static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *n
return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
}
+/*
+ * v6.19 kernel added new fields to read userspace memory for event tracing.
+ * But it's not used by perf and confuses the syscall parameters.
+ */
+static bool is_internal_field(struct tep_format_field *field)
+{
+ return !strcmp(field->type, "__data_loc char[]");
+}
+
static struct tep_format_field *
syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field,
bool *use_btf)
@@ -2070,6 +2086,10 @@ syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field
int len;
for (; field; field = field->next, ++arg) {
+ /* assume it's the last argument */
+ if (is_internal_field(field))
+ continue;
+
last_field = field;
if (arg->scnprintf)
@@ -2138,6 +2158,7 @@ static int syscall__read_info(struct syscall *sc, struct trace *trace)
{
char tp_name[128];
const char *name;
+ struct tep_format_field *field;
int err;
if (sc->nonexistent)
@@ -2194,6 +2215,13 @@ static int syscall__read_info(struct syscall *sc, struct trace *trace)
--sc->nr_args;
}
+ field = sc->args;
+ while (field) {
+ if (is_internal_field(field))
+ --sc->nr_args;
+ field = field->next;
+ }
+
sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
@@ -2614,6 +2642,9 @@ static void thread__update_stats(struct thread *thread, struct thread_trace *ttr
struct syscall_stats *stats = NULL;
u64 duration = 0;
+ if (trace->summary_bpf)
+ return;
+
if (trace->summary_mode == SUMMARY__BY_TOTAL)
syscall_stats = trace->syscall_stats;
@@ -2842,7 +2873,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
e_machine = thread__e_machine(thread, trace->host);
sc = trace__syscall_info(trace, evsel, e_machine, id);
if (sc == NULL)
- return -1;
+ goto out_put;
ttrace = thread__trace(thread, trace);
/*
* We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
@@ -2891,13 +2922,6 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
}
-static const char *errno_to_name(struct evsel *evsel, int err)
-{
- struct perf_env *env = evsel__env(evsel);
-
- return perf_env__arch_strerrno(env, err);
-}
-
static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -2983,8 +3007,9 @@ signed_print:
} else if (ret < 0) {
errno_print: {
char bf[STRERR_BUFSIZE];
- const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
- *e = errno_to_name(evsel, -ret);
+ struct perf_env *env = evsel__env(evsel) ?: &trace->host_env;
+ const char *emsg = str_error_r(-ret, bf, sizeof(bf));
+ const char *e = perf_env__arch_strerrno(env, err);
fprintf(trace->output, "-1 %s (%s)", e, emsg);
}
@@ -3005,8 +3030,8 @@ errno_print: {
else if (sc->fmt->errpid) {
struct thread *child = machine__find_thread(trace->host, ret, ret);
+ fprintf(trace->output, "%ld", ret);
if (child != NULL) {
- fprintf(trace->output, "%ld", ret);
if (thread__comm_set(child))
fprintf(trace->output, " (%s)", thread__comm_str(child));
thread__put(child);
@@ -3700,7 +3725,10 @@ out_enomem:
goto out;
}
-#ifdef HAVE_BPF_SKEL
+#ifdef HAVE_LIBBPF_SUPPORT
+
+static struct bpf_program *unaugmented_prog;
+
static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type)
{
int id;
@@ -3718,26 +3746,8 @@ static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, st
return 0;
}
-static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
-{
- struct bpf_program *pos, *prog = NULL;
- const char *sec_name;
-
- if (trace->skel->obj == NULL)
- return NULL;
-
- bpf_object__for_each_program(pos, trace->skel->obj) {
- sec_name = bpf_program__section_name(pos);
- if (sec_name && !strcmp(sec_name, name)) {
- prog = pos;
- break;
- }
- }
-
- return prog;
-}
-
-static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
+static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace __maybe_unused,
+ struct syscall *sc,
const char *prog_name, const char *type)
{
struct bpf_program *prog;
@@ -3745,19 +3755,19 @@ static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, str
if (prog_name == NULL) {
char default_prog_name[256];
scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
- prog = trace__find_bpf_program_by_title(trace, default_prog_name);
+ prog = augmented_syscalls__find_by_title(default_prog_name);
if (prog != NULL)
goto out_found;
if (sc->fmt && sc->fmt->alias) {
scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
- prog = trace__find_bpf_program_by_title(trace, default_prog_name);
+ prog = augmented_syscalls__find_by_title(default_prog_name);
if (prog != NULL)
goto out_found;
}
goto out_unaugmented;
}
- prog = trace__find_bpf_program_by_title(trace, prog_name);
+ prog = augmented_syscalls__find_by_title(prog_name);
if (prog != NULL) {
out_found:
@@ -3767,7 +3777,7 @@ out_found:
pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
prog_name, type, sc->name);
out_unaugmented:
- return trace->skel->progs.syscall_unaugmented;
+ return unaugmented_prog;
}
static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, int id)
@@ -3784,13 +3794,13 @@ static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, in
static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int e_machine, int id)
{
struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
- return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
+ return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(unaugmented_prog);
}
static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int e_machine, int id)
{
struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
- return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
+ return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(unaugmented_prog);
}
static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int e_machine, int key, unsigned int *beauty_array)
@@ -3900,7 +3910,7 @@ try_to_find_pair:
bool is_candidate = false;
if (pair == NULL || pair->id == sc->id ||
- pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
+ pair->bpf_prog.sys_enter == unaugmented_prog)
continue;
for (field = sc->args, candidate_field = pair->args;
@@ -3966,7 +3976,7 @@ try_to_find_pair:
*/
if (pair_prog == NULL) {
pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
- if (pair_prog == trace->skel->progs.syscall_unaugmented)
+ if (pair_prog == unaugmented_prog)
goto next_candidate;
}
@@ -3982,12 +3992,17 @@ try_to_find_pair:
static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_machine)
{
- int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
- int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
- int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter);
+ int map_enter_fd;
+ int map_exit_fd;
+ int beauty_map_fd;
int err = 0;
unsigned int beauty_array[6];
+ if (augmented_syscalls__get_map_fds(&map_enter_fd, &map_exit_fd, &beauty_map_fd) < 0)
+ return -1;
+
+ unaugmented_prog = augmented_syscalls__unaugmented();
+
for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) {
int prog_fd, key = syscalltbl__id_at_idx(e_machine, i);
@@ -4057,7 +4072,7 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_m
* For now we're just reusing the sys_enter prog, and if it
* already has an augmenter, we don't need to find one.
*/
- if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
+ if (sc->bpf_prog.sys_enter != unaugmented_prog)
continue;
/*
@@ -4082,7 +4097,13 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_m
return err;
}
-#endif // HAVE_BPF_SKEL
+#else // !HAVE_LIBBPF_SUPPORT
+static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused,
+ int e_machine __maybe_unused)
+{
+ return -1;
+}
+#endif // HAVE_LIBBPF_SUPPORT
static int trace__set_ev_qualifier_filter(struct trace *trace)
{
@@ -4091,24 +4112,6 @@ static int trace__set_ev_qualifier_filter(struct trace *trace)
return 0;
}
-static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
- size_t npids __maybe_unused, pid_t *pids __maybe_unused)
-{
- int err = 0;
-#ifdef HAVE_LIBBPF_SUPPORT
- bool value = true;
- int map_fd = bpf_map__fd(map);
- size_t i;
-
- for (i = 0; i < npids; ++i) {
- err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
- if (err)
- break;
- }
-#endif
- return err;
-}
-
static int trace__set_filter_loop_pids(struct trace *trace)
{
unsigned int nr = 1, err;
@@ -4128,14 +4131,17 @@ static int trace__set_filter_loop_pids(struct trace *trace)
if (!strcmp(thread__comm_str(parent), "sshd") ||
strstarts(thread__comm_str(parent), "gnome-terminal")) {
pids[nr++] = thread__tid(parent);
+ thread__put(parent);
break;
}
+ thread__put(thread);
thread = parent;
}
+ thread__put(thread);
err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
- if (!err && trace->filter_pids.map)
- err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
+ if (!err)
+ err = augmented_syscalls__set_filter_pids(nr, pids);
return err;
}
@@ -4152,8 +4158,8 @@ static int trace__set_filter_pids(struct trace *trace)
if (trace->filter_pids.nr > 0) {
err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
trace->filter_pids.entries);
- if (!err && trace->filter_pids.map) {
- err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
+ if (!err) {
+ err = augmented_syscalls__set_filter_pids(trace->filter_pids.nr,
trace->filter_pids.entries);
}
} else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
@@ -4377,6 +4383,14 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
trace->live = true;
+ if (trace->summary_bpf) {
+ if (trace_prepare_bpf_summary(trace->summary_mode) < 0)
+ goto out_delete_evlist;
+
+ if (trace->summary_only)
+ goto create_maps;
+ }
+
if (!trace->raw_augmented_syscalls) {
if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
goto out_error_raw_syscalls;
@@ -4401,8 +4415,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
evlist__add(evlist, pgfault_min);
}
- /* Enable ignoring missing threads when -u/-p option is defined. */
- trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
+ /* Enable ignoring missing threads when -p option is defined. */
+ trace->opts.ignore_missing_thread = trace->opts.target.pid;
if (trace->sched &&
evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
@@ -4435,21 +4449,22 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (trace->cgroup)
evlist__set_default_cgroup(trace->evlist, trace->cgroup);
+create_maps:
err = evlist__create_maps(evlist, &trace->opts.target);
if (err < 0) {
fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
goto out_delete_evlist;
}
- err = trace__symbols_init(trace, evlist);
+ err = trace__symbols_init(trace, argc, argv, evlist);
if (err < 0) {
fprintf(trace->output, "Problems initializing symbol libraries!\n");
goto out_delete_evlist;
}
- if (trace->summary_mode == SUMMARY__BY_TOTAL) {
+ if (trace->summary_mode == SUMMARY__BY_TOTAL && !trace->summary_bpf) {
trace->syscall_stats = alloc_syscall_stats();
- if (trace->syscall_stats == NULL)
+ if (IS_ERR(trace->syscall_stats))
goto out_delete_evlist;
}
@@ -4467,41 +4482,18 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = evlist__open(evlist);
if (err < 0)
goto out_error_open;
-#ifdef HAVE_BPF_SKEL
- if (trace->syscalls.events.bpf_output) {
- struct perf_cpu cpu;
- /*
- * Set up the __augmented_syscalls__ BPF map to hold for each
- * CPU the bpf-output event's file descriptor.
- */
- perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
- int mycpu = cpu.cpu;
-
- bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
- &mycpu, sizeof(mycpu),
- xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
- mycpu, 0),
- sizeof(__u32), BPF_ANY);
- }
- }
+ augmented_syscalls__setup_bpf_output();
- if (trace->skel)
- trace->filter_pids.map = trace->skel->maps.pids_filtered;
-#endif
err = trace__set_filter_pids(trace);
if (err < 0)
goto out_error_mem;
-#ifdef HAVE_BPF_SKEL
- if (trace->skel && trace->skel->progs.sys_enter) {
- /*
- * TODO: Initialize for all host binary machine types, not just
- * those matching the perf binary.
- */
- trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST);
- }
-#endif
+ /*
+ * TODO: Initialize for all host binary machine types, not just
+ * those matching the perf binary.
+ */
+ trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST);
if (trace->ev_qualifier_ids.nr > 0) {
err = trace__set_ev_qualifier_filter(trace);
@@ -4535,9 +4527,11 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
- err = evlist__mmap(evlist, trace->opts.mmap_pages);
- if (err < 0)
- goto out_error_mmap;
+ if (!trace->summary_only || !trace->summary_bpf) {
+ err = evlist__mmap(evlist, trace->opts.mmap_pages);
+ if (err < 0)
+ goto out_error_mmap;
+ }
if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
evlist__enable(evlist);
@@ -4550,6 +4544,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
evlist__enable(evlist);
}
+ if (trace->summary_bpf)
+ trace_start_bpf_summary();
+
trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
perf_thread_map__nr(evlist->core.threads) > 1 ||
evlist__first(evlist)->core.attr.inherit;
@@ -4617,12 +4614,17 @@ out_disable:
evlist__disable(evlist);
+ if (trace->summary_bpf)
+ trace_end_bpf_summary();
+
if (trace->sort_events)
ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
if (!err) {
if (trace->summary) {
- if (trace->summary_mode == SUMMARY__BY_TOTAL)
+ if (trace->summary_bpf)
+ trace_print_bpf_summary(trace->output, trace->max_summary);
+ else if (trace->summary_mode == SUMMARY__BY_TOTAL)
trace__fprintf_total_summary(trace, trace->output);
else
trace__fprintf_thread_summary(trace, trace->output);
@@ -4638,6 +4640,7 @@ out_disable:
}
out_delete_evlist:
+ trace_cleanup_bpf_summary();
delete_syscall_stats(trace->syscall_stats);
trace__symbols__exit(trace);
evlist__free_syscall_tp_fields(evlist);
@@ -4726,7 +4729,7 @@ static int trace__replay(struct trace *trace)
if (trace->opts.target.tid)
symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
- if (symbol__init(&session->header.env) < 0)
+ if (symbol__init(perf_session__env(session)) < 0)
goto out;
trace->host = &session->machines.host;
@@ -4769,7 +4772,7 @@ static int trace__replay(struct trace *trace)
if (trace->summary_mode == SUMMARY__BY_TOTAL) {
trace->syscall_stats = alloc_syscall_stats();
- if (trace->syscall_stats == NULL)
+ if (IS_ERR(trace->syscall_stats))
goto out;
}
@@ -4843,6 +4846,7 @@ static size_t syscall__dump_stats(struct trace *trace, int e_machine, FILE *fp,
struct hashmap *syscall_stats)
{
size_t printed = 0;
+ int lines = 0;
struct syscall *sc;
struct syscall_entry *entries;
@@ -4887,7 +4891,11 @@ static size_t syscall__dump_stats(struct trace *trace, int e_machine, FILE *fp,
fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]);
}
}
+ lines++;
}
+
+ if (trace->max_summary && trace->max_summary <= lines)
+ break;
}
free(entries);
@@ -5279,6 +5287,8 @@ static int trace__parse_summary_mode(const struct option *opt, const char *str,
trace->summary_mode = SUMMARY__BY_THREAD;
} else if (!strcmp(str, "total")) {
trace->summary_mode = SUMMARY__BY_TOTAL;
+ } else if (!strcmp(str, "cgroup")) {
+ trace->summary_mode = SUMMARY__BY_CGROUP;
} else {
pr_err("Unknown summary mode: %s\n", str);
return -1;
@@ -5333,6 +5343,7 @@ out:
static void trace__exit(struct trace *trace)
{
+ thread__zput(trace->current);
strlist__delete(trace->ev_qualifier);
zfree(&trace->ev_qualifier_ids.entries);
if (trace->syscalls.table) {
@@ -5343,24 +5354,13 @@ static void trace__exit(struct trace *trace)
zfree(&trace->perfconfig_events);
evlist__delete(trace->evlist);
trace->evlist = NULL;
+ ordered_events__free(&trace->oe.data);
#ifdef HAVE_LIBBPF_SUPPORT
btf__free(trace->btf);
trace->btf = NULL;
#endif
}
-#ifdef HAVE_BPF_SKEL
-static int bpf__setup_bpf_output(struct evlist *evlist)
-{
- int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
-
- if (err)
- pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
-
- return err;
-}
-#endif
-
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
@@ -5373,7 +5373,6 @@ int cmd_trace(int argc, const char **argv)
struct trace trace = {
.opts = {
.target = {
- .uid = UINT_MAX,
.uses_mmap = true,
},
.user_freq = UINT_MAX,
@@ -5420,8 +5419,7 @@ int cmd_trace(int argc, const char **argv)
"child tasks do not inherit counters"),
OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
"number of mmap data pages", evlist__parse_mmap_pages),
- OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
- "user to profile"),
+ OPT_STRING('u', "uid", &trace.uid_str, "user", "user to profile"),
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
@@ -5438,7 +5436,7 @@ int cmd_trace(int argc, const char **argv)
OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
"Show errno stats per syscall, use with -s or -S"),
OPT_CALLBACK(0, "summary-mode", &trace, "mode",
- "How to show summary: select thread (default) or total",
+ "How to show summary: select thread (default), total or cgroup",
trace__parse_summary_mode),
OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
"Trace pagefaults", parse_pagefaults, "maj"),
@@ -5473,6 +5471,9 @@ int cmd_trace(int argc, const char **argv)
"start"),
OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer"
"to customized ones"),
+ OPT_BOOLEAN(0, "bpf-summary", &trace.summary_bpf, "Summary syscall stats in BPF"),
+ OPT_INTEGER(0, "max-summary", &trace.max_summary,
+ "Max number of entries in the summary."),
OPTS_EVSWITCH(&trace.evswitch),
OPT_END()
};
@@ -5493,6 +5494,9 @@ int cmd_trace(int argc, const char **argv)
sigchld_act.sa_sigaction = sighandler_chld;
sigaction(SIGCHLD, &sigchld_act, NULL);
+ ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
+ ordered_events__set_copy_on_queue(&trace.oe.data, true);
+
trace.evlist = evlist__new();
if (trace.evlist == NULL) {
@@ -5555,7 +5559,6 @@ int cmd_trace(int argc, const char **argv)
"cgroup monitoring only available in system-wide mode");
}
-#ifdef HAVE_BPF_SKEL
if (!trace.trace_syscalls)
goto skip_augmentation;
@@ -5564,42 +5567,27 @@ int cmd_trace(int argc, const char **argv)
goto skip_augmentation;
}
- trace.skel = augmented_raw_syscalls_bpf__open();
- if (!trace.skel) {
- pr_debug("Failed to open augmented syscalls BPF skeleton");
- } else {
- /*
- * Disable attaching the BPF programs except for sys_enter and
- * sys_exit that tail call into this as necessary.
- */
- struct bpf_program *prog;
-
- bpf_object__for_each_program(prog, trace.skel->obj) {
- if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
- bpf_program__set_autoattach(prog, /*autoattach=*/false);
+ if (trace.summary_bpf) {
+ if (!trace.opts.target.system_wide) {
+ /* TODO: Add filters in the BPF to support other targets. */
+ pr_err("Error: --bpf-summary only works for system-wide mode.\n");
+ goto out;
}
+ if (trace.summary_only)
+ goto skip_augmentation;
+ }
- err = augmented_raw_syscalls_bpf__load(trace.skel);
+ err = augmented_syscalls__prepare();
+ if (err < 0)
+ goto skip_augmentation;
- if (err < 0) {
- libbpf_strerror(err, bf, sizeof(bf));
- pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
- } else {
- augmented_raw_syscalls_bpf__attach(trace.skel);
- trace__add_syscall_newtp(&trace);
- }
- }
+ trace__add_syscall_newtp(&trace);
+
+ err = augmented_syscalls__create_bpf_output(trace.evlist);
+ if (err == 0)
+ trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
- err = bpf__setup_bpf_output(trace.evlist);
- if (err) {
- libbpf_strerror(err, bf, sizeof(bf));
- pr_err("ERROR: Setup BPF output event failed: %s\n", bf);
- goto out;
- }
- trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
- assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__"));
skip_augmentation:
-#endif
err = -1;
if (trace.trace_pgfaults) {
@@ -5641,11 +5629,6 @@ skip_augmentation:
trace__load_vmlinux_btf(&trace);
}
- if (trace.sort_events) {
- ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
- ordered_events__set_copy_on_queue(&trace.oe.data, true);
- }
-
/*
* If we are augmenting syscalls, then combine what we put in the
* __augmented_syscalls__ BPF map with what is in the
@@ -5741,6 +5724,12 @@ init_augmented_syscall_tp:
symbol_conf.keep_exited_threads = true;
if (trace.summary_mode == SUMMARY__NONE)
trace.summary_mode = SUMMARY__BY_THREAD;
+
+ if (!trace.summary_bpf && trace.summary_mode == SUMMARY__BY_CGROUP) {
+ pr_err("Error: --summary-mode=cgroup only works with --bpf-summary\n");
+ err = -EINVAL;
+ goto out;
+ }
}
if (output_name != NULL) {
@@ -5762,11 +5751,19 @@ init_augmented_syscall_tp:
goto out_close;
}
- err = target__parse_uid(&trace.opts.target);
- if (err) {
- target__strerror(&trace.opts.target, err, bf, sizeof(bf));
- fprintf(trace.output, "%s", bf);
- goto out_close;
+ if (trace.uid_str) {
+ uid_t uid = parse_uid(trace.uid_str);
+
+ if (uid == UINT_MAX) {
+ ui__error("Invalid User: %s", trace.uid_str);
+ err = -EINVAL;
+ goto out_close;
+ }
+ err = parse_uid_filter(trace.evlist, uid);
+ if (err)
+ goto out_close;
+
+ trace.opts.target.system_wide = true;
}
if (!argc && target__none(&trace.opts.target))
@@ -5782,8 +5779,6 @@ out_close:
fclose(trace.output);
out:
trace__exit(&trace);
-#ifdef HAVE_BPF_SKEL
- augmented_raw_syscalls_bpf__destroy(trace.skel);
-#endif
+ augmented_syscalls__cleanup();
return err;
}
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index e149d96c6dc5..10f25c6705b1 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -26,38 +26,10 @@ static const char * const version_usage[] = {
NULL
};
-static void on_off_print(const char *status)
-{
- printf("[ ");
-
- if (!strcmp(status, "OFF"))
- color_fprintf(stdout, PERF_COLOR_RED, "%-3s", status);
- else
- color_fprintf(stdout, PERF_COLOR_GREEN, "%-3s", status);
-
- printf(" ]");
-}
-
-static void status_print(const char *name, const char *macro,
- const char *status)
-{
- printf("%22s: ", name);
- on_off_print(status);
- printf(" # %s\n", macro);
-}
-
-#define STATUS(feature) \
-do { \
- if (feature.is_builtin) \
- status_print(feature.name, feature.macro, "on"); \
- else \
- status_print(feature.name, feature.macro, "OFF"); \
-} while (0)
-
static void library_status(void)
{
for (int i = 0; supported_features[i].name; ++i)
- STATUS(supported_features[i]);
+ feature_status__printf(&supported_features[i]);
}
int cmd_version(int argc, const char **argv)
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index a07e93c53848..40c4078c295f 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -5,15 +5,14 @@
struct feature_status {
const char *name;
const char *macro;
+ const char *tip;
int is_builtin;
};
-#define FEATURE_STATUS(name_, macro_) { \
- .name = name_, \
- .macro = #macro_, \
- .is_builtin = IS_BUILTIN(macro_) }
-
extern struct feature_status supported_features[];
+
+void feature_status__printf(const struct feature_status *feature);
+
struct cmdnames;
void list_common_cmds_help(void);
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index e9fab20e9330..e0537f275da2 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -4,18 +4,23 @@
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
-declare -a FILES
-FILES=(
+declare -a FILES=(
"include/uapi/linux/const.h"
"include/uapi/drm/drm.h"
"include/uapi/drm/i915_drm.h"
"include/uapi/linux/bits.h"
"include/uapi/linux/fadvise.h"
"include/uapi/linux/fscrypt.h"
+ "include/uapi/linux/genetlink.h"
+ "include/uapi/linux/if_addr.h"
+ "include/uapi/linux/in.h"
"include/uapi/linux/kcmp.h"
"include/uapi/linux/kvm.h"
- "include/uapi/linux/in.h"
+ "include/uapi/linux/neighbour.h"
+ "include/uapi/linux/netfilter.h"
+ "include/uapi/linux/netfilter_arp.h"
"include/uapi/linux/perf_event.h"
+ "include/uapi/linux/rtnetlink.h"
"include/uapi/linux/seccomp.h"
"include/uapi/linux/stat.h"
"include/linux/bits.h"
@@ -24,6 +29,7 @@ FILES=(
"include/linux/const.h"
"include/vdso/const.h"
"include/vdso/unaligned.h"
+ "include/linux/gfp_types.h"
"include/linux/hash.h"
"include/linux/list-sort.h"
"include/uapi/linux/hw_breakpoint.h"
@@ -41,15 +47,12 @@ FILES=(
"arch/s390/include/uapi/asm/perf_regs.h"
"arch/x86/include/uapi/asm/perf_regs.h"
"arch/x86/include/uapi/asm/kvm.h"
- "arch/x86/include/uapi/asm/kvm_perf.h"
"arch/x86/include/uapi/asm/svm.h"
"arch/x86/include/uapi/asm/unistd.h"
"arch/x86/include/uapi/asm/vmx.h"
"arch/powerpc/include/uapi/asm/kvm.h"
"arch/s390/include/uapi/asm/kvm.h"
- "arch/s390/include/uapi/asm/kvm_perf.h"
"arch/s390/include/uapi/asm/sie.h"
- "arch/arm/include/uapi/asm/kvm.h"
"arch/arm64/include/uapi/asm/kvm.h"
"arch/arm64/include/uapi/asm/unistd.h"
"arch/alpha/include/uapi/asm/errno.h"
@@ -73,8 +76,7 @@ FILES=(
"scripts/syscall.tbl"
)
-declare -a SYNC_CHECK_FILES
-SYNC_CHECK_FILES=(
+declare -a SYNC_CHECK_FILES=(
"arch/x86/include/asm/inat.h"
"arch/x86/include/asm/insn.h"
"arch/x86/lib/inat.c"
@@ -86,8 +88,7 @@ SYNC_CHECK_FILES=(
# tables that then gets included in .c files for things like id->string syscall
# tables (and the reverse lookup as well: string -> id)
-declare -a BEAUTY_FILES
-BEAUTY_FILES=(
+declare -a BEAUTY_FILES=(
"arch/x86/include/asm/irq_vectors.h"
"arch/x86/include/uapi/asm/prctl.h"
"include/linux/socket.h"
@@ -186,7 +187,7 @@ done
# diff with extra ignore lines
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))" -I"^#include <linux/cfi_types.h>"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
-check arch/x86/include/asm/amd/ibs.h '-I "^#include [<\"]\(asm/\)*msr-index.h"'
+check arch/x86/include/asm/amd/ibs.h '-I "^#include .*/msr-index.h"'
check arch/arm64/include/asm/cputype.h '-I "^#include [<\"]\(asm/\)*sysreg.h"'
check include/linux/unaligned.h '-I "^#include <linux/unaligned/packed_struct.h>" -I "^#include <asm/byteorder.h>" -I "^#pragma GCC diagnostic"'
check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
diff --git a/tools/perf/include/perf/perf_dlfilter.h b/tools/perf/include/perf/perf_dlfilter.h
index 16fc4568ac53..2d3540ed3c58 100644
--- a/tools/perf/include/perf/perf_dlfilter.h
+++ b/tools/perf/include/perf/perf_dlfilter.h
@@ -87,7 +87,7 @@ struct perf_dlfilter_al {
__u8 is_64_bit; /* Only valid if dso is not NULL */
__u8 is_kernel_ip; /* True if in kernel space */
__u32 buildid_size;
- __u8 *buildid;
+ const __u8 *buildid;
/* Below members are only populated by resolve_ip() */
__u8 filtered; /* True if this sample event will be filtered out */
const char *comm;
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index fcca275e5bf9..82514e6532b8 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -141,11 +141,11 @@ copy_class_filename(const char * class_sign, const char * file_name, char * resu
* Assume path name is class hierarchy, this is a common practice with Java programs
*/
if (*class_sign == 'L') {
- int j, i = 0;
+ size_t j, i = 0;
char *p = strrchr(class_sign, '/');
if (p) {
/* drop the 'L' prefix and copy up to the final '/' */
- for (i = 0; i < (p - class_sign); i++)
+ for (i = 0; i < (size_t)(p - class_sign); i++)
result[i] = class_sign[i+1];
}
/*
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
index 6ed7e52ab881..7977e9b0a5ea 100755
--- a/tools/perf/perf-archive.sh
+++ b/tools/perf/perf-archive.sh
@@ -16,6 +16,13 @@ while [ $# -gt 0 ] ; do
elif [ $1 == "--unpack" ]; then
UNPACK=1
shift
+ elif [ $1 == "--exclude-buildids" ]; then
+ EXCLUDE_BUILDIDS="$2"
+ if [ ! -e "$EXCLUDE_BUILDIDS" ]; then
+ echo "Provided exclude-buildids file $EXCLUDE_BUILDIDS does not exist"
+ exit 1
+ fi
+ shift 2
else
PERF_DATA=$1
UNPACK_TAR=$1
@@ -86,11 +93,29 @@ fi
BUILDIDS=$(mktemp /tmp/perf-archive-buildids.XXXXXX)
-perf buildid-list -i $PERF_DATA --with-hits | grep -v "^ " > $BUILDIDS
-if [ ! -s $BUILDIDS ] ; then
- echo "perf archive: no build-ids found"
- rm $BUILDIDS || true
- exit 1
+#
+# EXCLUDE_BUILDIDS is an optional file that contains build-ids to be excluded from the
+# archive. It is a list of build-ids, one per line, without any leading or trailing spaces.
+# If the file is empty, all build-ids will be included in the archive. To create a exclude-
+# buildids file, you can use the following command:
+# perf buildid-list -i perf.data --with-hits | grep -v "^ " > exclude_buildids.txt
+# You can edit the file to remove the lines that you want to keep in the archive, then:
+# perf archive --exclude-buildids exclude_buildids.txt
+#
+if [ -s "$EXCLUDE_BUILDIDS" ]; then
+ perf buildid-list -i $PERF_DATA --with-hits | grep -v "^ " | grep -Fv -f $EXCLUDE_BUILDIDS > $BUILDIDS
+ if [ ! -s "$BUILDIDS" ] ; then
+ echo "perf archive: no build-ids found after applying exclude-buildids file"
+ rm $BUILDIDS || true
+ exit 1
+ fi
+else
+ perf buildid-list -i $PERF_DATA --with-hits | grep -v "^ " > $BUILDIDS
+ if [ ! -s "$BUILDIDS" ] ; then
+ echo "perf archive: no build-ids found"
+ rm $BUILDIDS || true
+ exit 1
+ fi
fi
MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX)
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index f0617cc41f5f..88c60ecf3395 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -346,12 +346,9 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
use_pager = 1;
commit_pager_choice();
- perf_env__init(&perf_env);
- perf_env__set_cmdline(&perf_env, argc, argv);
status = p->fn(argc, argv);
perf_config__exit();
exit_browser(status);
- perf_env__exit(&perf_env);
if (status)
return status & 0xff;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 3cb40965549f..e004178472d9 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -2,9 +2,7 @@
#ifndef _PERF_PERF_H
#define _PERF_PERF_H
-#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 4096
-#endif
enum perf_affinity {
PERF_AFFINITY_SYS = 0,
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 32f387d48908..a46ab7b612df 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -1,7 +1,5 @@
pmu-events-y += pmu-events.o
-JDIR = pmu-events/arch/$(SRCARCH)
-JSON = $(shell [ -d $(JDIR) ] && \
- find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
+JSON = $(shell find pmu-events/arch -name '*.json' -o -name '*.csv')
JDIR_TEST = pmu-events/arch/test
JSON_TEST = $(shell [ -d $(JDIR_TEST) ] && \
find $(JDIR_TEST) -name '*.json')
@@ -13,6 +11,8 @@ PMU_EVENTS_C = $(OUTPUT)pmu-events/pmu-events.c
METRIC_TEST_LOG = $(OUTPUT)pmu-events/metric_test.log
TEST_EMPTY_PMU_EVENTS_C = $(OUTPUT)pmu-events/test-empty-pmu-events.c
EMPTY_PMU_EVENTS_TEST_LOG = $(OUTPUT)pmu-events/empty-pmu-events.log
+LEGACY_CACHE_PY = pmu-events/make_legacy_cache.py
+LEGACY_CACHE_JSON = $(OUTPUT)pmu-events/arch/common/common/legacy-cache.json
ifeq ($(JEVENTS_ARCH),)
JEVENTS_ARCH=$(SRCARCH)
@@ -29,13 +29,26 @@ $(PMU_EVENTS_C): $(EMPTY_PMU_EVENTS_C)
$(call rule_mkdir)
$(Q)$(call echo-cmd,gen)cp $< $@
else
+# Copy checked-in json to OUTPUT for generation if it's an out of source build
+ifneq ($(OUTPUT),)
+$(OUTPUT)pmu-events/arch/%: pmu-events/arch/%
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)cp $< $@
+endif
+
+$(LEGACY_CACHE_JSON): $(LEGACY_CACHE_PY)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)$(PYTHON) $(LEGACY_CACHE_PY) > $@
+
+GEN_JSON = $(patsubst %,$(OUTPUT)%,$(JSON)) $(LEGACY_CACHE_JSON)
+
$(METRIC_TEST_LOG): $(METRIC_TEST_PY) $(METRIC_PY)
$(call rule_mkdir)
$(Q)$(call echo-cmd,test)$(PYTHON) $< 2> $@ || (cat $@ && false)
-$(TEST_EMPTY_PMU_EVENTS_C): $(JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_LOG)
+$(TEST_EMPTY_PMU_EVENTS_C): $(GEN_JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_LOG)
$(call rule_mkdir)
- $(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) none none pmu-events/arch $@
+ $(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) none none $(OUTPUT)pmu-events/arch $@
$(EMPTY_PMU_EVENTS_TEST_LOG): $(EMPTY_PMU_EVENTS_C) $(TEST_EMPTY_PMU_EVENTS_C)
$(call rule_mkdir)
@@ -63,10 +76,10 @@ $(OUTPUT)%.pylint_log: %
$(call rule_mkdir)
$(Q)$(call echo-cmd,test)pylint "$<" > $@ || (cat $@ && rm $@ && false)
-$(PMU_EVENTS_C): $(JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_LOG) \
+$(PMU_EVENTS_C): $(GEN_JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_LOG) \
$(EMPTY_PMU_EVENTS_TEST_LOG) $(PMU_EVENTS_MYPY_TEST_LOGS) $(PMU_EVENTS_PYLINT_TEST_LOGS)
$(call rule_mkdir)
- $(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) $(JEVENTS_ARCH) $(JEVENTS_MODEL) pmu-events/arch $@
+ $(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) $(JEVENTS_ARCH) $(JEVENTS_MODEL) $(OUTPUT)pmu-events/arch $@
endif
# pmu-events.c file is generated in the OUTPUT directory so it needs a
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
index afcdad58ef89..324104438e78 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
@@ -113,7 +113,7 @@
{
"MetricName": "load_store_spec_rate",
"MetricExpr": "((LDST_SPEC / INST_SPEC) * 100)",
- "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
+ "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speculatively executed",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
},
@@ -132,7 +132,7 @@
{
"MetricName": "pc_write_spec_rate",
"MetricExpr": "((PC_WRITE_SPEC / INST_SPEC) * 100)",
- "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
+ "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speculatively executed",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "1percent of operations"
},
@@ -195,14 +195,14 @@
{
"MetricName": "stall_frontend_cache_rate",
"MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
- "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
+ "BriefDescription": "Proportion of cycles stalled and no operations delivered from frontend and cache miss",
"MetricGroup": "Stall",
"ScaleUnit": "1percent of cycles"
},
{
"MetricName": "stall_frontend_tlb_rate",
"MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
- "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
+ "BriefDescription": "Proportion of cycles stalled and no operations delivered from frontend and TLB miss",
"MetricGroup": "Stall",
"ScaleUnit": "1percent of cycles"
},
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
index 5228f94a793f..a29aadc9b2e3 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
@@ -113,7 +113,7 @@
{
"MetricName": "load_store_spec_rate",
"MetricExpr": "LDST_SPEC / INST_SPEC",
- "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
+ "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speculatively executed",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "100percent of operations"
},
@@ -132,7 +132,7 @@
{
"MetricName": "pc_write_spec_rate",
"MetricExpr": "PC_WRITE_SPEC / INST_SPEC",
- "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
+ "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speculatively executed",
"MetricGroup": "Operation_Mix",
"ScaleUnit": "100percent of operations"
},
@@ -195,14 +195,14 @@
{
"MetricName": "stall_frontend_cache_rate",
"MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
- "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
+ "BriefDescription": "Proportion of cycles stalled and no operations delivered from frontend and cache miss",
"MetricGroup": "Stall",
"ScaleUnit": "100percent of cycles"
},
{
"MetricName": "stall_frontend_tlb_rate",
"MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
- "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
+ "BriefDescription": "Proportion of cycles stalled and no operations delivered from frontend and TLB miss",
"MetricGroup": "Stall",
"ScaleUnit": "100percent of cycles"
},
@@ -388,55 +388,55 @@
"MetricExpr": "L1D_CACHE_RW / L1D_CACHE",
"BriefDescription": "L1D cache access - demand",
"MetricGroup": "Cache",
- "ScaleUnit": "100percent of cache acceses"
+ "ScaleUnit": "100percent of cache accesses"
},
{
- "MetricName": "l1d_cache_access_prefetces",
+ "MetricName": "l1d_cache_access_prefetches",
"MetricExpr": "L1D_CACHE_PRFM / L1D_CACHE",
"BriefDescription": "L1D cache access - prefetch",
"MetricGroup": "Cache",
- "ScaleUnit": "100percent of cache acceses"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "l1d_cache_demand_misses",
"MetricExpr": "L1D_CACHE_REFILL_RW / L1D_CACHE",
"BriefDescription": "L1D cache demand misses",
"MetricGroup": "Cache",
- "ScaleUnit": "100percent of cache acceses"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "l1d_cache_demand_misses_read",
"MetricExpr": "L1D_CACHE_REFILL_RD / L1D_CACHE",
"BriefDescription": "L1D cache demand misses - read",
"MetricGroup": "Cache",
- "ScaleUnit": "100percent of cache acceses"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "l1d_cache_demand_misses_write",
"MetricExpr": "L1D_CACHE_REFILL_WR / L1D_CACHE",
"BriefDescription": "L1D cache demand misses - write",
"MetricGroup": "Cache",
- "ScaleUnit": "100percent of cache acceses"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "l1d_cache_prefetch_misses",
"MetricExpr": "L1D_CACHE_REFILL_PRFM / L1D_CACHE",
"BriefDescription": "L1D cache prefetch misses",
"MetricGroup": "Cache",
- "ScaleUnit": "100percent of cache acceses"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "ase_scalar_mix",
"MetricExpr": "ASE_SCALAR_SPEC / OP_SPEC",
"BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) scalar operations",
"MetricGroup": "Instructions",
- "ScaleUnit": "100percent of cache acceses"
+ "ScaleUnit": "100percent of cache accesses"
},
{
"MetricName": "ase_vector_mix",
"MetricExpr": "ASE_VECTOR_SPEC / OP_SPEC",
"BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) vector operations",
"MetricGroup": "Instructions",
- "ScaleUnit": "100percent of cache acceses"
+ "ScaleUnit": "100percent of cache accesses"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
index 4cc50b7da526..4001cc5753a7 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
@@ -81,7 +81,7 @@
"BriefDescription": "L2D TLB access"
},
{
- "PublicDescription": "Level 2 access to instruciton TLB that caused a page table walk. This event counts on any instruciton access which causes L2I_TLB_REFILL to count",
+ "PublicDescription": "Level 2 access to instruction TLB that caused a page table walk. This event counts on any instruction access which causes L2I_TLB_REFILL to count",
"EventCode": "0x35",
"EventName": "L2I_TLB_ACCESS",
"BriefDescription": "L2I TLB access"
diff --git a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
index e40be37addf8..2416d9f8a83d 100644
--- a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
+++ b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
@@ -1833,5 +1833,75 @@
"EventCode": "0x8324",
"EventName": "L1I_CACHE_REFILL_PERCYC",
"BriefDescription": "Level 1 instruction or unified cache refills in progress."
+ },
+ {
+ "EventCode": "0x8431",
+ "EventName": "ASE_FP_VREDUCE_SPEC",
+ "BriefDescription": "Floating-point operation_speculatively_executed, Advanced SIMD pairwise or reduction."
+ },
+ {
+ "EventCode": "0x8432",
+ "EventName": "SVE_FP_PREDUCE_SPEC",
+ "BriefDescription": "Floating-point operation_speculatively_executed, Advanced SIMD pairwise add step or pairwise reduce step."
+ },
+ {
+ "EventCode": "0x8443",
+ "EventName": "ASE_FP_BF16_MIN_SPEC",
+ "BriefDescription": "Advanced SIMD data processing operation speculatively_executed, smallest type is BFloat16 floating-point."
+ },
+ {
+ "EventCode": "0x8444",
+ "EventName": "ASE_FP_FP8_MIN_SPEC",
+ "BriefDescription": "Advanced SIMD data processing operation speculatively_executed, smallest type is 8-bit floating-point."
+ },
+ {
+ "EventCode": "0x844B",
+ "EventName": "ASE_SVE_FP_BF16_MIN_SPEC",
+ "BriefDescription": "Advanced SIMD data processing or SVE data processing operation speculatively_executed, smallest type is BFloat16 floating-point."
+ },
+ {
+ "EventCode": "0x844C",
+ "EventName": "ASE_SVE_FP_FP8_MIN_SPEC",
+ "BriefDescription": "Advanced SIMD data processing or SVE data processing operation speculatively_executed, smallest type is 8-bit floating-point."
+ },
+ {
+ "EventCode": "0x8463",
+ "EventName": "SVE_FP_BF16_MIN_SPEC",
+ "BriefDescription": "SVE data processing operation speculatively_executed, smallest type is BFloat16 floating-point."
+ },
+ {
+ "EventCode": "0x8464",
+ "EventName": "SVE_FP_FP8_MIN_SPEC",
+ "BriefDescription": "SVE data processing operation speculatively_executed, smallest type is 8-bit floating-point."
+ },
+ {
+ "EventCode": "0x8473",
+ "EventName": "FP_BF16_MIN_SPEC",
+ "BriefDescription": "Floating-point operation speculatively_executed, smallest type is BFloat16 floating-point."
+ },
+ {
+ "EventCode": "0x8474",
+ "EventName": "FP_FP8_MIN_SPEC",
+ "BriefDescription": "Floating-point operation speculatively_executed, smallest type is 8-bit floating-point."
+ },
+ {
+ "EventCode": "0x8483",
+ "EventName": "FP_BF16_FIXED_MIN_OPS_SPEC",
+ "BriefDescription": "Non-scalable element arithmetic operations speculatively executed, smallest type is BFloat16 floating-point."
+ },
+ {
+ "EventCode": "0x8484",
+ "EventName": "FP_FP8_FIXED_MIN_OPS_SPEC",
+ "BriefDescription": "Non-scalable element arithmetic operations speculatively executed, smallest type is 8-bit floating-point."
+ },
+ {
+ "EventCode": "0x848B",
+ "EventName": "FP_BF16_SCALE_MIN_OPS_SPEC",
+ "BriefDescription": "Scalable element arithmetic operations speculatively executed, smallest type is BFloat16 floating-point."
+ },
+ {
+ "EventCode": "0x848C",
+ "EventName": "FP_FP8_SCALE_MIN_OPS_SPEC",
+ "BriefDescription": "Scalable element arithmetic operations speculatively executed, smallest type is 8-bit floating-point."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx94/sys/ddrc.json b/tools/perf/pmu-events/arch/arm64/freescale/imx94/sys/ddrc.json
new file mode 100644
index 000000000000..aa7b58721dc7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx94/sys/ddrc.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "ddr cycles event",
+ "EventCode": "0x00",
+ "EventName": "imx94_ddr.cycles",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx94/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/freescale/imx94/sys/metrics.json
new file mode 100644
index 000000000000..629f1f52761e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx94/sys/metrics.json
@@ -0,0 +1,450 @@
+[
+ {
+ "BriefDescription": "bandwidth usage for lpddr5 evk board",
+ "MetricName": "imx94_bandwidth_usage.lpddr5",
+ "MetricExpr": "(( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x000\\,axi_id\\=0x000@ + imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x000\\,axi_id\\=0x000@ ) * 32 / duration_time) / (4266 * 1000000 * 4)",
+ "ScaleUnit": "1e2%",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bandwidth usage for lpddr4 evk board",
+ "MetricName": "imx94_bandwidth_usage.lpddr4",
+ "MetricExpr": "(( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x000\\,axi_id\\=0x000@ + imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x000\\,axi_id\\=0x000@ ) * 32 / duration_time) / (4266 * 1000000 * 4)",
+ "ScaleUnit": "1e2%",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of all masters read from ddr",
+ "MetricName": "imx94_ddr_read.all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x000\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of all masters write to ddr",
+ "MetricName": "imx94_ddr_write.all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x000\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of all a55 read from ddr",
+ "MetricName": "imx94_ddr_read.a55_all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3fc\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of all a55 write from ddr",
+ "MetricName": "imx94_ddr_write.a55_all",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3fc\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 0 read from ddr",
+ "MetricName": "imx94_ddr_read.a55_0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3ff\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 0 write to ddr",
+ "MetricName": "imx94_ddr_write.a55_0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3ff\\,axi_id\\=0x000@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 1 read from ddr",
+ "MetricName": "imx94_ddr_read.a55_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x001@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 1 write to ddr",
+ "MetricName": "imx94_ddr_write.a55_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x001@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 2 read from ddr",
+ "MetricName": "imx94_ddr_read.a55_2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x002@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 2 write to ddr",
+ "MetricName": "imx94_ddr_write.a55_2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x002@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 3 read from ddr",
+ "MetricName": "imx94_ddr_read.a55_3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x003@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of a55 core 3 write to ddr",
+ "MetricName": "imx94_ddr_write.a55_3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x003@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of m7 core1 read from ddr",
+ "MetricName": "imx94_ddr_read.m7_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x004@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of m7 core1 write to ddr",
+ "MetricName": "imx94_ddr_write.m7_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x004@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of m33 core1 (in netc) read from ddr",
+ "MetricName": "imx94_ddr_read.m33_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x005@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of m33 core1 (in netc) write to ddr",
+ "MetricName": "imx94_ddr_write.m33_1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x005@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of pcie2 read from ddr",
+ "MetricName": "imx94_ddr_read.pcie2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x006@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of pcie2 write to ddr",
+ "MetricName": "imx94_ddr_write.pcie2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x006@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of Cortex-A DSU L3 evicted/ACP transactions read from ddr",
+ "MetricName": "imx94_ddr_read.cortex_a_dsu",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x007@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of Cortex-A DSU L3 evicted/ACP transactions write to ddr",
+ "MetricName": "imx94_ddr_write.cortex_a_dsu",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x007@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of m33 core0 read from ddr",
+ "MetricName": "imx94_ddr_read.m33_0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x008@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of m33 core0 write to ddr",
+ "MetricName": "imx94_ddr_write.m33_0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x008@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of m7 core0 read from ddr",
+ "MetricName": "imx94_ddr_read.m7_0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x009@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of m7 core0 write to ddr",
+ "MetricName": "imx94_ddr_write.m7_0",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x009@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of sentinel read from ddr",
+ "MetricName": "imx94_ddr_read.sentinel",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x00a@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of sentinel write to ddr",
+ "MetricName": "imx94_ddr_write.sentinel",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00a@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of edma1 read from ddr",
+ "MetricName": "imx94_ddr_read.edma1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x00f\\,axi_id\\=0x00b@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of edma1 write to ddr",
+ "MetricName": "imx94_ddr_write.edma1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00b@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of edma2 read from ddr",
+ "MetricName": "imx94_ddr_read.edma2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x00c@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of edma2 write to ddr",
+ "MetricName": "imx94_ddr_write.edma2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00c@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of netc read from ddr",
+ "MetricName": "imx94_ddr_read.netc",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x00f\\,axi_id\\=0x00d@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of netc write to ddr",
+ "MetricName": "imx94_ddr_write.netc",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00d@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of aonmix read from ddr",
+ "MetricName": "imx94_ddr_read.aonmix",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x00f\\,axi_id\\=0x00f@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of aonmix write to ddr",
+ "MetricName": "imx94_ddr_write.aonmix",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x00f\\,axi_id\\=0x00f@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of npumix read from ddr",
+ "MetricName": "imx94_ddr_read.npumix",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x010@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of npumix write to ddr",
+ "MetricName": "imx94_ddr_write.npumix",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x010@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usdhc1 read from ddr",
+ "MetricName": "imx94_ddr_read.usdhc1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x0b0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usdhc1 write to ddr",
+ "MetricName": "imx94_ddr_write.usdhc1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x0b0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usdhc2 read from ddr",
+ "MetricName": "imx94_ddr_read.usdhc2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x0c0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usdhc2 write to ddr",
+ "MetricName": "imx94_ddr_write.usdhc2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x0c0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usdhc3 read from ddr",
+ "MetricName": "imx94_ddr_read.usdhc3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x0d0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usdhc3 write to ddr",
+ "MetricName": "imx94_ddr_write.usdhc3",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x0d0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of xspi read from ddr",
+ "MetricName": "imx94_ddr_read.xspi",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x0f0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of xspi write to ddr",
+ "MetricName": "imx94_ddr_write.xspi",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x0f0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of pcie1 read from ddr",
+ "MetricName": "imx94_ddr_read.pcie1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x100@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of pcie1 write to ddr",
+ "MetricName": "imx94_ddr_write.pcie1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x100@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usb1 read from ddr",
+ "MetricName": "imx94_ddr_read.usb1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x140@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usb1 write to ddr",
+ "MetricName": "imx94_ddr_write.usb1",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x140@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usb2 read from ddr",
+ "MetricName": "imx94_ddr_read.usb2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt2\\,axi_mask\\=0x3f0\\,axi_id\\=0x150@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of usb2 write to ddr",
+ "MetricName": "imx94_ddr_write.usb2",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x150@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of pxp read from ddr",
+ "MetricName": "imx94_ddr_read.pxp",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt0\\,axi_mask\\=0x3f0\\,axi_id\\=0x2a0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of pxp write to ddr",
+ "MetricName": "imx94_ddr_write.pxp",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x2a0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of dcif read from ddr",
+ "MetricName": "imx94_ddr_read.dcif",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_rd_beat_filt1\\,axi_mask\\=0x3f0\\,axi_id\\=0x2b0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ },
+ {
+ "BriefDescription": "bytes of dcif write to ddr",
+ "MetricName": "imx94_ddr_write.dcif",
+ "MetricExpr": "( imx9_ddr0@eddrtq_pm_wr_beat_filt\\,axi_mask\\=0x3f0\\,axi_id\\=0x2b0@ ) * 32",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx9_ddr",
+ "Compat": "imx94"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json
index 52f5ca1482fe..57a854ff5033 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/core-imp-def.json
@@ -1,6 +1,6 @@
[
{
"ArchStdEvent": "L1I_CACHE_PRF",
- "BriefDescription": "This event counts fetch counted by either Level 1 instruction hardware prefetch or Level 1 instruction software prefetch."
+ "BriefDescription": "This event counts L1I_CACHE caused by hardware prefetch or software prefetch."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json
index 24ff5d8dbb98..84374adbb0f8 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/cycle_accounting.json
@@ -12,12 +12,12 @@
{
"EventCode": "0x0184",
"EventName": "LD_COMP_WAIT",
- "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L1D cache, L2 cache and memory access."
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L1D cache, L2 cache, L3 cache and memory access."
},
{
"EventCode": "0x0185",
"EventName": "LD_COMP_WAIT_EX",
- "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L1D cache, L2 cache and memory access."
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L1D cache, L2 cache, L3 cache and memory access."
},
{
"EventCode": "0x0186",
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json
index f231712fe261..fba66bbcfeb5 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/exception.json
@@ -33,7 +33,7 @@
},
{
"ArchStdEvent": "EXC_SMC",
- "BriefDescription": "This event counts only Secure Monitor Call exceptions. The counter does not increment on SMC instructions trapped as a Hyp Trap exception."
+ "BriefDescription": "This event counts only Secure Monitor Call exceptions. This event does not increment on SMC instructions trapped as a Hyp Trap exception."
},
{
"ArchStdEvent": "EXC_HVC",
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json
index a3c368959199..2ffdc16530dd 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/fp_operation.json
@@ -2,7 +2,7 @@
{
"EventCode": "0x0105",
"EventName": "FP_MV_SPEC",
- "BriefDescription": "This event counts architecturally executed floating-point move operations."
+ "BriefDescription": "This event counts architecturally executed floating-point move operation."
},
{
"EventCode": "0x0112",
@@ -24,7 +24,7 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point operation."
},
{
"ArchStdEvent": "FP_HP_SPEC",
@@ -40,7 +40,7 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_HP_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE half-precision floating-point operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE half-precision floating-point operation."
},
{
"ArchStdEvent": "FP_SP_SPEC",
@@ -56,7 +56,7 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_SP_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE single-precision floating-point operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE single-precision floating-point operation."
},
{
"ArchStdEvent": "FP_DP_SPEC",
@@ -72,7 +72,7 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_DP_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE double-precision floating-point operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE double-precision floating-point operation."
},
{
"ArchStdEvent": "FP_DIV_SPEC",
@@ -88,7 +88,7 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_DIV_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point divide operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point divide operation."
},
{
"ArchStdEvent": "FP_SQRT_SPEC",
@@ -104,7 +104,7 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_SQRT_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point square root operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point square root operation."
},
{
"ArchStdEvent": "ASE_FP_FMA_SPEC",
@@ -116,11 +116,11 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_FMA_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point FMA operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point FMA operation."
},
{
"ArchStdEvent": "FP_MUL_SPEC",
- "BriefDescription": "This event counts architecturally executed floating-point multiply operations."
+ "BriefDescription": "This event counts architecturally executed floating-point multiply operation."
},
{
"ArchStdEvent": "ASE_FP_MUL_SPEC",
@@ -132,11 +132,11 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_MUL_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point multiply operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point multiply operation."
},
{
"ArchStdEvent": "FP_ADDSUB_SPEC",
- "BriefDescription": "This event counts architecturally executed floating-point add or subtract operations."
+ "BriefDescription": "This event counts architecturally executed floating-point add or subtract operation."
},
{
"ArchStdEvent": "ASE_FP_ADDSUB_SPEC",
@@ -148,19 +148,19 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_ADDSUB_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point add or subtract operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point add or subtract operation."
},
{
"ArchStdEvent": "ASE_FP_RECPE_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point reciprocal estimate operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point reciprocal estimate operation."
},
{
"ArchStdEvent": "SVE_FP_RECPE_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE floating-point reciprocal estimate operations."
+ "BriefDescription": "This event counts architecturally executed SVE floating-point reciprocal estimate operation."
},
{
"ArchStdEvent": "ASE_SVE_FP_RECPE_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point reciprocal estimate operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point reciprocal estimate operation."
},
{
"ArchStdEvent": "ASE_FP_CVT_SPEC",
@@ -172,15 +172,15 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_CVT_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point convert operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point convert operation."
},
{
"ArchStdEvent": "SVE_FP_AREDUCE_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE floating-point accumulating reduction operations."
+ "BriefDescription": "This event counts architecturally executed SVE floating-point accumulating reduction operation."
},
{
"ArchStdEvent": "ASE_FP_PREDUCE_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point pairwise add step operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point pairwise add step operation."
},
{
"ArchStdEvent": "SVE_FP_VREDUCE_SPEC",
@@ -188,15 +188,15 @@
},
{
"ArchStdEvent": "ASE_SVE_FP_VREDUCE_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE floating-point vector reduction operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE floating-point vector reduction operation."
},
{
"ArchStdEvent": "FP_SCALE_OPS_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE arithmetic operations. See FP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by (128 / CSIZE) and by twice that amount for operations that would also be counted by SVE_FP_FMA_SPEC."
+ "BriefDescription": "This event counts architecturally executed SVE arithmetic operation. See FP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by (128 / CSIZE) and by twice that amount for operations that would also be counted by SVE_FP_FMA_SPEC."
},
{
"ArchStdEvent": "FP_FIXED_OPS_SPEC",
- "BriefDescription": "This event counts architecturally executed v8SIMD&FP arithmetic operations. See FP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. The event counter is incremented by the specified number of elements for Advanced SIMD operations or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
+ "BriefDescription": "This event counts architecturally executed v8SIMD&FP arithmetic operation. See FP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by the specified number of elements for Advanced SIMD operations or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
},
{
"ArchStdEvent": "ASE_SVE_FP_DOT_SPEC",
@@ -205,5 +205,61 @@
{
"ArchStdEvent": "ASE_SVE_FP_MMLA_SPEC",
"BriefDescription": "This event counts architecturally executed microarchitectural Advanced SIMD or SVE floating-point matrix multiply operation."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_VREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD floating-point vector reduction operation."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_PREDUCE_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE floating-point pairwise add step operation."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_BF16_MIN_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD data processing operations, smallest type is BFloat16 floating-point."
+ },
+ {
+ "ArchStdEvent": "ASE_FP_FP8_MIN_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD data processing operations, smallest type is 8-bit floating-point."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_BF16_MIN_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD data processing or SVE data processing operations, smallest type is BFloat16 floating-point."
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_FP_FP8_MIN_SPEC",
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD data processing or SVE data processing operations, smallest type is 8-bit floating-point."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_BF16_MIN_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE data processing operations, smallest type is BFloat16 floating-point."
+ },
+ {
+ "ArchStdEvent": "SVE_FP_FP8_MIN_SPEC",
+ "BriefDescription": "This event counts architecturally executed SVE data processing operations, smallest type is 8-bit floating-point."
+ },
+ {
+ "ArchStdEvent": "FP_BF16_MIN_SPEC",
+ "BriefDescription": "This event counts architecturally executed data processing operations, smallest type is BFloat16 floating-point."
+ },
+ {
+ "ArchStdEvent": "FP_FP8_MIN_SPEC",
+ "BriefDescription": "This event counts architecturally executed data processing operations, smallest type is 8-bit floating-point."
+ },
+ {
+ "ArchStdEvent": "FP_BF16_FIXED_MIN_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed non-scalable element arithmetic operations, smallest type is BFloat16 floating-point."
+ },
+ {
+ "ArchStdEvent": "FP_FP8_FIXED_MIN_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed non-scalable element arithmetic operations, smallest type is 8-bit floating-point."
+ },
+ {
+ "ArchStdEvent": "FP_BF16_SCALE_MIN_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed scalable element arithmetic operations, smallest type is BFloat16 floating-point."
+ },
+ {
+ "ArchStdEvent": "FP_FP8_SCALE_MIN_OPS_SPEC",
+ "BriefDescription": "This event counts architecturally executed scalable element arithmetic operations, smallest type is 8-bit floating-point."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json
index b0818a2fedb0..a2ff3b49ac0d 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1d_cache.json
@@ -72,11 +72,11 @@
},
{
"ArchStdEvent": "L1D_CACHE_HWPRF",
- "BriefDescription": "This event counts access counted by L1D_CACHE that is due to a hardware prefetch."
+ "BriefDescription": "This event counts L1D_CACHE caused by hardware prefetch."
},
{
"ArchStdEvent": "L1D_CACHE_REFILL_HWPRF",
- "BriefDescription": "This event counts hardware prefetch counted by L1D_CACHE_HWPRF that causes a refill of the Level 1 data cache from outside of the Level 1 data cache."
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by hardware prefetch."
},
{
"ArchStdEvent": "L1D_CACHE_HIT_RD",
@@ -100,14 +100,14 @@
},
{
"ArchStdEvent": "L1D_CACHE_PRF",
- "BriefDescription": "This event counts fetch counted by either Level 1 data hardware prefetch or Level 1 data software prefetch."
+ "BriefDescription": "This event counts L1D_CACHE caused by hardware prefetch or software prefetch."
},
{
"ArchStdEvent": "L1D_CACHE_REFILL_PRF",
- "BriefDescription": "This event counts hardware prefetch counted by L1D_CACHE_PRF that causes a refill of the Level 1 data cache from outside of the Level 1 data cache."
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by hardware prefetch or software prefetch."
},
{
"ArchStdEvent": "L1D_CACHE_REFILL_PERCYC",
- "BriefDescription": "The counter counts by the number of cache refills counted by L1D_CACHE_REFILL in progress on each Processor cycle."
+ "BriefDescription": "This counter counts by the number of cache refills counted by L1D_CACHE_REFILL in progress on each Processor cycle."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json
index 8680d8ec461d..5250af8631c0 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l1i_cache.json
@@ -23,11 +23,11 @@
},
{
"ArchStdEvent": "L1I_CACHE_HWPRF",
- "BriefDescription": "This event counts access counted by L1I_CACHE that is due to a hardware prefetch."
+ "BriefDescription": "This event counts L1I_CACHE caused by hardware prefetch."
},
{
"ArchStdEvent": "L1I_CACHE_REFILL_HWPRF",
- "BriefDescription": "This event counts hardware prefetch counted by L1I_CACHE_HWPRF that causes a refill of the Level 1 instruction cache from outside of the Level 1 instruction cache."
+ "BriefDescription": "This event counts L1I_CACHE_REFILL caused by hardware prefetch."
},
{
"ArchStdEvent": "L1I_CACHE_HIT_RD",
@@ -43,10 +43,10 @@
},
{
"ArchStdEvent": "L1I_CACHE_REFILL_PRF",
- "BriefDescription": "This event counts hardware prefetch counted by L1I_CACHE_PRF that causes a refill of the Level 1 instruction cache from outside of the Level 1 instruction cache."
+ "BriefDescription": "This event counts L1I_CACHE_REFILL caused by hardware prefetch or software prefetch."
},
{
"ArchStdEvent": "L1I_CACHE_REFILL_PERCYC",
- "BriefDescription": "The counter counts by the number of cache refills counted by L1I_CACHE_REFILL in progress on each Processor cycle."
+ "BriefDescription": "This counter counts by the number of cache refills counted by L1I_CACHE_REFILL in progress on each Processor cycle."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json
index 9e092752e6db..67f9151d7685 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l2_cache.json
@@ -21,19 +21,19 @@
},
{
"ArchStdEvent": "L2D_CACHE_RD",
- "BriefDescription": "This event counts L2D CACHE caused by read access."
+ "BriefDescription": "This event counts L2D_CACHE caused by read access."
},
{
"ArchStdEvent": "L2D_CACHE_WR",
- "BriefDescription": "This event counts L2D CACHE caused by write access."
+ "BriefDescription": "This event counts L2D_CACHE caused by write access."
},
{
"ArchStdEvent": "L2D_CACHE_REFILL_RD",
- "BriefDescription": "This event counts L2D CACHE_REFILL caused by read access."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by read access."
},
{
"ArchStdEvent": "L2D_CACHE_REFILL_WR",
- "BriefDescription": "This event counts L2D CACHE_REFILL caused by write access."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by write access."
},
{
"ArchStdEvent": "L2D_CACHE_WB_VICTIM",
@@ -57,7 +57,7 @@
{
"EventCode": "0x0305",
"EventName": "L2D_CACHE_HWPRF_ADJACENT",
- "BriefDescription": "This event counts L2D_CACHE caused by hardware adjacent prefetch access."
+ "BriefDescription": "This event counts L2D_CACHE caused by hardware adjacent prefetch."
},
{
"EventCode": "0x0308",
@@ -111,7 +111,7 @@
},
{
"ArchStdEvent": "L2D_CACHE_LMISS_RD",
- "BriefDescription": "This event counts operations that cause a refill of the L2D cache that incurs additional latency."
+ "BriefDescription": "This event counts operations that cause a refill of the L2 cache that incurs additional latency."
},
{
"ArchStdEvent": "L2D_CACHE_MISS",
@@ -119,23 +119,23 @@
},
{
"ArchStdEvent": "L2D_CACHE_HWPRF",
- "BriefDescription": "This event counts access counted by L2D_CACHE that is due to a hardware prefetch."
+ "BriefDescription": "This event counts L2D_CACHE caused by hardware prefetch."
},
{
"ArchStdEvent": "L2D_CACHE_REFILL_HWPRF",
- "BriefDescription": "This event counts hardware prefetch counted by L2D_CACHE_HWPRF that causes a refill of the Level 2 cache, or any Level 1 data and instruction cache of this PE, from outside of those caches."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by hardware prefetch."
},
{
"ArchStdEvent": "L2D_CACHE_HIT_RD",
- "BriefDescription": "This event counts demand read counted by L2D_CACHE_RD that hits in the Level 2 data cache."
+ "BriefDescription": "This event counts demand read counted by L2D_CACHE_RD that hits in the Level 2 cache."
},
{
"ArchStdEvent": "L2D_CACHE_HIT_WR",
- "BriefDescription": "This event counts demand write counted by L2D_CACHE_WR that hits in the Level 2 data cache."
+ "BriefDescription": "This event counts demand write counted by L2D_CACHE_WR that hits in the Level 2 cache."
},
{
"ArchStdEvent": "L2D_CACHE_HIT",
- "BriefDescription": "This event counts access counted by L2D_CACHE that hits in the Level 2 data cache."
+ "BriefDescription": "This event counts access counted by L2D_CACHE that hits in the Level 2 cache."
},
{
"ArchStdEvent": "L2D_LFB_HIT_RD",
@@ -147,14 +147,14 @@
},
{
"ArchStdEvent": "L2D_CACHE_PRF",
- "BriefDescription": "This event counts fetch counted by either Level 2 data hardware prefetch or Level 2 data software prefetch."
+ "BriefDescription": "This event counts L2D_CACHE caused by hardware prefetch or software prefetch."
},
{
"ArchStdEvent": "L2D_CACHE_REFILL_PRF",
- "BriefDescription": "This event counts hardware prefetch counted by L2D_CACHE_PRF that causes a refill of the Level 2 data cache from outside of the Level 1 data cache."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by hardware prefetch or software prefetch."
},
{
"ArchStdEvent": "L2D_CACHE_REFILL_PERCYC",
- "BriefDescription": "The counter counts by the number of cache refills counted by L2D_CACHE_REFILL in progress on each Processor cycle."
+ "BriefDescription": "This counter counts by the number of cache refills counted by L2D_CACHE_REFILL in progress on each Processor cycle."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json
index 3f3e0d22ac68..cf49c4d452b7 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/l3_cache.json
@@ -30,17 +30,17 @@
{
"EventCode": "0x0394",
"EventName": "L2D_CACHE_REFILL_L3D_CACHE_PRF",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by prefetch access."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by hardware prefetch or software prefetch."
},
{
"EventCode": "0x0395",
"EventName": "L2D_CACHE_REFILL_L3D_CACHE_HWPRF",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by hardware prefetch access."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_CACHE caused by hardware prefetch."
},
{
"EventCode": "0x0396",
"EventName": "L2D_CACHE_REFILL_L3D_MISS",
- "BriefDescription": "This event counts operations that cause a miss of the L3 cache."
+ "BriefDescription": "This event counts operations that cause a miss of the L3 cache. Note: This event may count inaccurately."
},
{
"EventCode": "0x0397",
@@ -60,17 +60,17 @@
{
"EventCode": "0x039A",
"EventName": "L2D_CACHE_REFILL_L3D_MISS_PRF",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by prefetch access."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by hardware prefetch or software prefetch. Note: This event may count inaccurately."
},
{
"EventCode": "0x039B",
"EventName": "L2D_CACHE_REFILL_L3D_MISS_HWPRF",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by hardware prefetch access."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS caused by hardware prefetch. Note: This event may count inaccurately."
},
{
"EventCode": "0x039C",
"EventName": "L2D_CACHE_REFILL_L3D_HIT",
- "BriefDescription": "This event counts operations that cause a hit of the L3 cache."
+ "BriefDescription": "This event counts operations that cause a hit of the L3 cache. Note: This event may count inaccurately."
},
{
"EventCode": "0x039D",
@@ -90,70 +90,65 @@
{
"EventCode": "0x03A0",
"EventName": "L2D_CACHE_REFILL_L3D_HIT_PRF",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by prefetch access."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by hardware prefetch or software prefetch. Note: This event may count inaccurately."
},
{
"EventCode": "0x03A1",
"EventName": "L2D_CACHE_REFILL_L3D_HIT_HWPRF",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by hardware prefetch access."
- },
- {
- "EventCode": "0x03A2",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT",
- "BriefDescription": "This event counts the number of L3 cache misses where the requests hit the PFTGT buffer."
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_HIT caused by hardware prefetch. Note: This event may count inaccurately."
},
{
"EventCode": "0x03A3",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT_DM",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT caused by demand access."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_PFTGT_HIT",
+ "BriefDescription": "This event counts the number of L3 cache misses caused by demand access where the requests hit the PFTGT buffer."
},
{
"EventCode": "0x03A4",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT_DM_RD",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT caused by demand read access."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_RD_PFTGT_HIT",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM_PFTGT_HIT caused by read access."
},
{
"EventCode": "0x03A5",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT_DM_WR",
- "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_PFTGT_HIT caused by demand write access."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_WR_PFTGT_HIT",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM_PFTGT_HIT caused by write access."
},
{
"EventCode": "0x03A6",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_L_MEM",
- "BriefDescription": "This event counts the number of L3 cache misses where the requests access the memory in the same socket as the requests."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_L_MEM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM where the requests access the memory in the same socket as the requests."
},
{
"EventCode": "0x03A7",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_FR_MEM",
- "BriefDescription": "This event counts the number of L3 cache misses where the requests access the memory in the different socket from the requests."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_FR_MEM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM where the requests access the memory in the different socket from the requests."
},
{
"EventCode": "0x03A8",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_L_L2",
- "BriefDescription": "This event counts the number of L3 cache misses where the requests access the different L2 cache from the requests in the same Numa nodes as the requests."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_L_L2",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM where the requests access the different L2 cache from the requests in the same Numa nodes as the requests."
},
{
"EventCode": "0x03A9",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_NR_L2",
- "BriefDescription": "This event counts the number of L3 cache misses where the requests access L2 cache in the different Numa nodes from the requests in the same socket as the requests."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_NR_L2",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM where the requests access L2 cache in the different Numa nodes from the requests in the same socket as the requests."
},
{
"EventCode": "0x03AA",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_NR_L3",
- "BriefDescription": "This event counts the number of L3 cache misses where the requests access L3 cache in the different Numa nodes from the requests in the same socket as the requests."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_NR_L3",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM where the requests access L3 cache in the different Numa nodes from the requests in the same socket as the requests."
},
{
"EventCode": "0x03AB",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_FR_L2",
- "BriefDescription": "This event counts the number of L3 cache misses where the requests access L2 cache in the different socket from the requests."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_FR_L2",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM where the requests access L2 cache in the different socket from the requests."
},
{
"EventCode": "0x03AC",
- "EventName": "L2D_CACHE_REFILL_L3D_MISS_FR_L3",
- "BriefDescription": "This event counts the number of L3 cache misses where the requests access L3 cache in the different socket from the requests."
+ "EventName": "L2D_CACHE_REFILL_L3D_MISS_DM_FR_L3",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL_L3D_MISS_DM where the requests access L3 cache in the different socket from the requests."
},
{
"ArchStdEvent": "L3D_CACHE_LMISS_RD",
- "BriefDescription": "This event counts access counted by L3D_CACHE that is not completed by the L3D cache, and a Memory-read operation, as defined by the L2D_CACHE_REFILL_L3D_MISS events."
+ "BriefDescription": "This event counts access counted by L3D_CACHE that is not completed by the L3 cache, and a Memory-read operation, as defined by the L2D_CACHE_REFILL_L3D_MISS events. Note: This event may count inaccurately."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json
index a441b84729ab..d49d9f6df72c 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ll_cache.json
@@ -5,6 +5,6 @@
},
{
"ArchStdEvent": "LL_CACHE_MISS_RD",
- "BriefDescription": "This event counts access counted by L3D_CACHE that is not completed by the L3D cache, and a Memory-read operation, as defined by the L2D_CACHE_REFILL_L3D_MISS events."
+ "BriefDescription": "This event counts access counted by L3D_CACHE that is not completed by the L3 cache, and a Memory-read operation, as defined by the L2D_CACHE_REFILL_L3D_MISS events. Note: This event may count inaccurately."
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json
index 3cc3105f4a5e..15cf54730b85 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/pipeline.json
@@ -147,17 +147,17 @@
{
"EventCode": "0x02B0",
"EventName": "L1_PIPE_COMP_GATHER_2FLOW",
- "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 2 flows because 2 elements could not be combined."
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 2-flows because 2 elements could not be combined."
},
{
"EventCode": "0x02B1",
"EventName": "L1_PIPE_COMP_GATHER_1FLOW",
- "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 1 flow because 2 elements could be combined."
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 1-flow because 2 elements could be combined."
},
{
"EventCode": "0x02B2",
"EventName": "L1_PIPE_COMP_GATHER_0FLOW",
- "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 0 flow because both predicate values are 0."
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 0-flow because both predicate values are 0."
},
{
"EventCode": "0x02B3",
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json
index 4841b43e2871..1caf3baeae4e 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/spec_operation.json
@@ -81,7 +81,7 @@
},
{
"ArchStdEvent": "CSDB_SPEC",
- "BriefDescription": "This event counts speculatively executed control speculation barrier instructions."
+ "BriefDescription": "This event counts architecturally executed control speculation barrier instructions."
},
{
"EventCode": "0x0108",
@@ -91,17 +91,17 @@
{
"EventCode": "0x0109",
"EventName": "IEL_SPEC",
- "BriefDescription": "This event counts architecturally executed inter-element manipulation operations."
+ "BriefDescription": "This event counts architecturally executed inter-element manipulation operation."
},
{
"EventCode": "0x010A",
"EventName": "IREG_SPEC",
- "BriefDescription": "This event counts architecturally executed inter-register manipulation operations."
+ "BriefDescription": "This event counts architecturally executed inter-register manipulation operation."
},
{
"EventCode": "0x011A",
"EventName": "BC_LD_SPEC",
- "BriefDescription": "This event counts architecturally executed SIMD broadcast floating-point load operations."
+ "BriefDescription": "This event counts architecturally executed SIMD broadcast floating-point load operation."
},
{
"EventCode": "0x011B",
@@ -130,7 +130,7 @@
},
{
"ArchStdEvent": "ASE_INST_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD operation."
},
{
"ArchStdEvent": "INT_SPEC",
@@ -158,7 +158,7 @@
},
{
"ArchStdEvent": "NONFP_SPEC",
- "BriefDescription": "This event counts architecturally executed non-floating-point operations."
+ "BriefDescription": "This event counts architecturally executed non-floating-point operation."
},
{
"ArchStdEvent": "INT_SCALE_OPS_SPEC",
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json
index 5fb81e2a0a07..e1e16d513828 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/stall.json
@@ -5,7 +5,7 @@
},
{
"ArchStdEvent": "STALL_BACKEND",
- "BriefDescription": "This event counts every cycle counted by the CPU_CYCLES event on that no operation was issued because the backend is unable to accept any operations."
+ "BriefDescription": "This event counts every cycle counted by the CPU_CYCLES event on that no operation was issued because the backend is unable to accept any operation."
},
{
"ArchStdEvent": "STALL",
@@ -69,7 +69,7 @@
},
{
"ArchStdEvent": "STALL_BACKEND_L2D",
- "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_MEMBOUND when there is a demand data miss in L2D cache."
+ "BriefDescription": "This event counts every cycle counted by STALL_BACKEND_MEMBOUND when there is a demand data miss in L2 cache."
},
{
"ArchStdEvent": "STALL_BACKEND_CPUBOUND",
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json
index e66b5af00f90..88cab0caf49e 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/sve.json
@@ -13,11 +13,11 @@
},
{
"ArchStdEvent": "ASE_SVE_INST_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE operation."
},
{
"ArchStdEvent": "UOP_SPEC",
- "BriefDescription": "This event counts all architecturally executed micro-operations."
+ "BriefDescription": "This event counts all architecturally executed micro-operation."
},
{
"ArchStdEvent": "SVE_MATH_SPEC",
@@ -29,7 +29,7 @@
},
{
"ArchStdEvent": "FP_FMA_SPEC",
- "BriefDescription": "This event counts architecturally executed floating-point fused multiply-add and multiply-subtract operations."
+ "BriefDescription": "This event counts architecturally executed floating-point fused multiply-add and multiply-subtract operation."
},
{
"ArchStdEvent": "FP_RECPE_SPEC",
@@ -41,15 +41,15 @@
},
{
"ArchStdEvent": "ASE_INT_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD integer operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD integer operation."
},
{
"ArchStdEvent": "SVE_INT_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE integer operations."
+ "BriefDescription": "This event counts architecturally executed SVE integer operation."
},
{
"ArchStdEvent": "ASE_SVE_INT_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE integer operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE integer operation."
},
{
"ArchStdEvent": "SVE_INT_DIV_SPEC",
@@ -69,7 +69,7 @@
},
{
"ArchStdEvent": "ASE_SVE_INT_MUL_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE integer multiply operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE integer multiply operation."
},
{
"ArchStdEvent": "SVE_INT_MUL64_SPEC",
@@ -77,19 +77,19 @@
},
{
"ArchStdEvent": "SVE_INT_MULH64_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE integer 64-bit x 64-bit multiply returning high part operations."
+ "BriefDescription": "This event counts architecturally executed SVE integer 64-bit x 64-bit multiply returning high part operation."
},
{
"ArchStdEvent": "ASE_NONFP_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD non-floating-point operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD non-floating-point operation."
},
{
"ArchStdEvent": "SVE_NONFP_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE non-floating-point operations."
+ "BriefDescription": "This event counts architecturally executed SVE non-floating-point operation."
},
{
"ArchStdEvent": "ASE_SVE_NONFP_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE non-floating-point operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE non-floating-point operation."
},
{
"ArchStdEvent": "ASE_INT_VREDUCE_SPEC",
@@ -101,7 +101,7 @@
},
{
"ArchStdEvent": "ASE_SVE_INT_VREDUCE_SPEC",
- "BriefDescription": "This event counts architecturally executed Advanced SIMD and SVE integer reduction operations."
+ "BriefDescription": "This event counts architecturally executed Advanced SIMD or SVE integer reduction operation."
},
{
"ArchStdEvent": "SVE_PERM_SPEC",
@@ -149,11 +149,11 @@
},
{
"ArchStdEvent": "ASE_SVE_LD_SPEC",
- "BriefDescription": "This event counts architecturally executed operations that read from memory due to SVE and Advanced SIMD load instructions."
+ "BriefDescription": "This event counts architecturally executed operations that read from memory due to Advanced SIMD or SVE load instructions."
},
{
"ArchStdEvent": "ASE_SVE_ST_SPEC",
- "BriefDescription": "This event counts architecturally executed operations that write to memory due to SVE and Advanced SIMD store instructions."
+ "BriefDescription": "This event counts architecturally executed operations that write to memory due to Advanced SIMD or SVE store instructions."
},
{
"ArchStdEvent": "PRF_SPEC",
@@ -197,11 +197,11 @@
},
{
"ArchStdEvent": "ASE_SVE_LD_MULTI_SPEC",
- "BriefDescription": "This event counts architecturally executed operations that read from memory due to SVE and Advanced SIMD multiple vector contiguous structure load instructions."
+ "BriefDescription": "This event counts architecturally executed operations that read from memory due to Advanced SIMD or SVE multiple vector contiguous structure load instructions."
},
{
"ArchStdEvent": "ASE_SVE_ST_MULTI_SPEC",
- "BriefDescription": "This event counts architecturally executed operations that write to memory due to SVE and Advanced SIMD multiple vector contiguous structure store instructions."
+ "BriefDescription": "This event counts architecturally executed operations that write to memory due to Advanced SIMD or SVE multiple vector contiguous structure store instructions."
},
{
"ArchStdEvent": "SVE_LD_GATHER_SPEC",
@@ -221,27 +221,27 @@
},
{
"ArchStdEvent": "FP_HP_SCALE_OPS_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE half-precision arithmetic operations. See FP_HP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 8, or by 16 for operations that would also be counted by SVE_FP_FMA_SPEC."
+ "BriefDescription": "This event counts architecturally executed SVE half-precision arithmetic operation. See FP_HP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 8, or by 16 for operations that would also be counted by SVE_FP_FMA_SPEC."
},
{
"ArchStdEvent": "FP_HP_FIXED_OPS_SPEC",
- "BriefDescription": "This event counts architecturally executed v8SIMD&FP half-precision arithmetic operations. See FP_HP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by the number of 16-bit elements for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
+ "BriefDescription": "This event counts architecturally executed v8SIMD&FP half-precision arithmetic operation. See FP_HP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by the number of 16-bit elements for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
},
{
"ArchStdEvent": "FP_SP_SCALE_OPS_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE single-precision arithmetic operations. See FP_SP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 4, or by 8 for operations that would also be counted by SVE_FP_FMA_SPEC."
+ "BriefDescription": "This event counts architecturally executed SVE single-precision arithmetic operation. See FP_SP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 4, or by 8 for operations that would also be counted by SVE_FP_FMA_SPEC."
},
{
"ArchStdEvent": "FP_SP_FIXED_OPS_SPEC",
- "BriefDescription": "This event counts architecturally executed v8SIMD&FP single-precision arithmetic operations. See FP_SP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by the number of 32-bit elements for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
+ "BriefDescription": "This event counts architecturally executed v8SIMD&FP single-precision arithmetic operation. See FP_SP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by the number of 32-bit elements for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
},
{
"ArchStdEvent": "FP_DP_SCALE_OPS_SPEC",
- "BriefDescription": "This event counts architecturally executed SVE double-precision arithmetic operations. See FP_DP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 2, or by 4 for operations that would also be counted by SVE_FP_FMA_SPEC."
+ "BriefDescription": "This event counts architecturally executed SVE double-precision arithmetic operation. See FP_DP_SCALE_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 2, or by 4 for operations that would also be counted by SVE_FP_FMA_SPEC."
},
{
"ArchStdEvent": "FP_DP_FIXED_OPS_SPEC",
- "BriefDescription": "This event counts architecturally executed v8SIMD&FP double-precision arithmetic operations. See FP_DP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 2 for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
+ "BriefDescription": "This event counts architecturally executed v8SIMD&FP double-precision arithmetic operation. See FP_DP_FIXED_OPS_SPEC of ARMv9 Reference Manual for more information. This event counter is incremented by 2 for Advanced SIMD operations, or by 1 for scalar operations, and by twice those amounts for operations that would also be counted by FP_FMA_SPEC."
},
{
"ArchStdEvent": "ASE_SVE_INT_DOT_SPEC",
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json
index edc7cb8696c8..f54029ba369a 100644
--- a/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/tlb.json
@@ -104,72 +104,72 @@
{
"EventCode": "0x0C10",
"EventName": "L1I_TLB_REFILL_4K",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 4KB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 4KB page."
},
{
"EventCode": "0x0C11",
"EventName": "L1I_TLB_REFILL_64K",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 64KB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 64KB page."
},
{
"EventCode": "0x0C12",
"EventName": "L1I_TLB_REFILL_2M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 2MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 2MB page."
},
{
"EventCode": "0x0C13",
"EventName": "L1I_TLB_REFILL_32M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 32MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 32MB page."
},
{
"EventCode": "0x0C14",
"EventName": "L1I_TLB_REFILL_512M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 512MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 512MB page."
},
{
"EventCode": "0x0C15",
"EventName": "L1I_TLB_REFILL_1G",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 1GB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 1GB page."
},
{
"EventCode": "0x0C16",
"EventName": "L1I_TLB_REFILL_16G",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1I in 16GB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 16GB page."
},
{
"EventCode": "0x0C18",
"EventName": "L1D_TLB_REFILL_4K",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 4KB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 4KB page."
},
{
"EventCode": "0x0C19",
"EventName": "L1D_TLB_REFILL_64K",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 64KB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 64KB page."
},
{
"EventCode": "0x0C1A",
"EventName": "L1D_TLB_REFILL_2M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 2MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 2MB page."
},
{
"EventCode": "0x0C1B",
"EventName": "L1D_TLB_REFILL_32M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 32MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 32MB page."
},
{
"EventCode": "0x0C1C",
"EventName": "L1D_TLB_REFILL_512M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 512MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 512MB page."
},
{
"EventCode": "0x0C1D",
"EventName": "L1D_TLB_REFILL_1G",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 1GB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 1GB page."
},
{
"EventCode": "0x0C1E",
"EventName": "L1D_TLB_REFILL_16G",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L1D in 16GB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 16GB page."
},
{
"EventCode": "0x0C20",
@@ -244,72 +244,72 @@
{
"EventCode": "0x0C30",
"EventName": "L2I_TLB_REFILL_4K",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2Iin 4KB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2I in 4KB page."
},
{
"EventCode": "0x0C31",
"EventName": "L2I_TLB_REFILL_64K",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 64KB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2I in 64KB page."
},
{
"EventCode": "0x0C32",
"EventName": "L2I_TLB_REFILL_2M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 2MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2I in 2MB page."
},
{
"EventCode": "0x0C33",
"EventName": "L2I_TLB_REFILL_32M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 32MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2I in 32MB page."
},
{
"EventCode": "0x0C34",
"EventName": "L2I_TLB_REFILL_512M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 512MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2I in 512MB page."
},
{
"EventCode": "0x0C35",
"EventName": "L2I_TLB_REFILL_1G",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 1GB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2I in 1GB page."
},
{
"EventCode": "0x0C36",
"EventName": "L2I_TLB_REFILL_16G",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2I in 16GB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2I in 16GB page."
},
{
"EventCode": "0x0C38",
"EventName": "L2D_TLB_REFILL_4K",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 4KB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2D in 4KB page."
},
{
"EventCode": "0x0C39",
"EventName": "L2D_TLB_REFILL_64K",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 64KB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2D in 64KB page."
},
{
"EventCode": "0x0C3A",
"EventName": "L2D_TLB_REFILL_2M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 2MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2D in 2MB page."
},
{
"EventCode": "0x0C3B",
"EventName": "L2D_TLB_REFILL_32M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 32MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2D in 32MB page."
},
{
"EventCode": "0x0C3C",
"EventName": "L2D_TLB_REFILL_512M",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 512MB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2D in 512MB page."
},
{
"EventCode": "0x0C3D",
"EventName": "L2D_TLB_REFILL_1G",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 1GB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2D in 1GB page."
},
{
"EventCode": "0x0C3E",
"EventName": "L2D_TLB_REFILL_16G",
- "BriefDescription": "This event counts operations that cause a TLB refill to the L2D in 16GB page."
+ "BriefDescription": "This event counts operations that cause a TLB refill of the L2D in 16GB page."
},
{
"ArchStdEvent": "DTLB_WALK_PERCYC",
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
index 2b3cb55df288..014454d78293 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
@@ -3,56 +3,48 @@
"ConfigCode": "0x00",
"EventName": "flux_wr",
"BriefDescription": "DDRC total write operations",
- "PublicDescription": "DDRC total write operations",
"Unit": "hisi_sccl,ddrc"
},
{
"ConfigCode": "0x01",
"EventName": "flux_rd",
"BriefDescription": "DDRC total read operations",
- "PublicDescription": "DDRC total read operations",
"Unit": "hisi_sccl,ddrc"
},
{
"ConfigCode": "0x02",
"EventName": "flux_wcmd",
"BriefDescription": "DDRC write commands",
- "PublicDescription": "DDRC write commands",
"Unit": "hisi_sccl,ddrc"
},
{
"ConfigCode": "0x03",
"EventName": "flux_rcmd",
"BriefDescription": "DDRC read commands",
- "PublicDescription": "DDRC read commands",
"Unit": "hisi_sccl,ddrc"
},
{
"ConfigCode": "0x04",
"EventName": "pre_cmd",
"BriefDescription": "DDRC precharge commands",
- "PublicDescription": "DDRC precharge commands",
"Unit": "hisi_sccl,ddrc"
},
{
"ConfigCode": "0x05",
"EventName": "act_cmd",
"BriefDescription": "DDRC active commands",
- "PublicDescription": "DDRC active commands",
"Unit": "hisi_sccl,ddrc"
},
{
"ConfigCode": "0x06",
"EventName": "rnk_chg",
"BriefDescription": "DDRC rank commands",
- "PublicDescription": "DDRC rank commands",
"Unit": "hisi_sccl,ddrc"
},
{
"ConfigCode": "0x07",
"EventName": "rw_chg",
"BriefDescription": "DDRC read and write changes",
- "PublicDescription": "DDRC read and write changes",
"Unit": "hisi_sccl,ddrc"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
index 9a7ec7af2060..b2b895fa670e 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
@@ -3,42 +3,41 @@
"ConfigCode": "0x00",
"EventName": "rx_ops_num",
"BriefDescription": "The number of all operations received by the HHA",
- "PublicDescription": "The number of all operations received by the HHA",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x01",
"EventName": "rx_outer",
"BriefDescription": "The number of all operations received by the HHA from another socket",
- "PublicDescription": "The number of all operations received by the HHA from another socket",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x02",
"EventName": "rx_sccl",
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
- "PublicDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x03",
"EventName": "rx_ccix",
"BriefDescription": "Count of the number of operations that HHA has received from CCIX",
- "PublicDescription": "Count of the number of operations that HHA has received from CCIX",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x4",
"EventName": "rx_wbi",
+ "BriefDescription": "Count of the number of WriteBackI operations that HHA has received",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x5",
"EventName": "rx_wbip",
+ "BriefDescription": "Count of the number of WriteBackIPtl operations that HHA has received",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x11",
+ "BriefDescription": "Count of the number of WriteThruIStash operations that HHA has received",
"EventName": "rx_wtistash",
"Unit": "hisi_sccl,hha"
},
@@ -46,107 +45,114 @@
"ConfigCode": "0x1c",
"EventName": "rd_ddr_64b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
- "PublicDescription": "The number of read operations sent by HHA to DDRC which size is 64bytes",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x1d",
"EventName": "wr_ddr_64b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
- "PublicDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x1e",
"EventName": "rd_ddr_128b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
- "PublicDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x1f",
"EventName": "wr_ddr_128b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
- "PublicDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x20",
"EventName": "spill_num",
"BriefDescription": "Count of the number of spill operations that the HHA has sent",
- "PublicDescription": "Count of the number of spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x21",
"EventName": "spill_success",
"BriefDescription": "Count of the number of successful spill operations that the HHA has sent",
- "PublicDescription": "Count of the number of successful spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x23",
"EventName": "bi_num",
+ "BriefDescription": "Count of the number of HHA BackInvalid operations",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x32",
"EventName": "mediated_num",
+ "BriefDescription": "Count of the number of Mediated operations that the HHA has forwarded",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x33",
"EventName": "tx_snp_num",
+ "BriefDescription": "Count of the number of Snoop operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x34",
"EventName": "tx_snp_outer",
+ "BriefDescription": "Count of the number of Snoop operations that the HHA has sent to another socket",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x35",
"EventName": "tx_snp_ccix",
+ "BriefDescription": "Count of the number of Snoop operations that the HHA has sent to CCIX",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x38",
"EventName": "rx_snprspdata",
+ "BriefDescription": "Count of the number of SnprspData flit operations that HHA has received",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x3c",
"EventName": "rx_snprsp_outer",
+ "BriefDescription": "Count of the number of SnprspData operations that HHA has received from another socket",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x40",
"EventName": "sdir-lookup",
+ "BriefDescription": "Count of the number of HHA S-Dir lookup operations",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x41",
"EventName": "edir-lookup",
+ "BriefDescription": "Count of the number of HHA E-Dir lookup operations",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x42",
"EventName": "sdir-hit",
+ "BriefDescription": "Count of the number of HHA S-Dir hit operations",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x43",
"EventName": "edir-hit",
+ "BriefDescription": "Count of the number of HHA E-Dir hit operations",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x4c",
"EventName": "sdir-home-migrate",
+ "BriefDescription": "Count of the number of HHA S-Dir read home migrate operations",
"Unit": "hisi_sccl,hha"
},
{
"ConfigCode": "0x4d",
"EventName": "edir-home-migrate",
+ "BriefDescription": "Count of the number of HHA E-Dir read home migrate operations",
"Unit": "hisi_sccl,hha"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
index e3479b65be9a..d83c22eb1d15 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
@@ -3,91 +3,78 @@
"ConfigCode": "0x00",
"EventName": "rd_cpipe",
"BriefDescription": "Total read accesses",
- "PublicDescription": "Total read accesses",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x01",
"EventName": "wr_cpipe",
"BriefDescription": "Total write accesses",
- "PublicDescription": "Total write accesses",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x02",
"EventName": "rd_hit_cpipe",
"BriefDescription": "Total read hits",
- "PublicDescription": "Total read hits",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x03",
"EventName": "wr_hit_cpipe",
"BriefDescription": "Total write hits",
- "PublicDescription": "Total write hits",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x04",
"EventName": "victim_num",
"BriefDescription": "l3c precharge commands",
- "PublicDescription": "l3c precharge commands",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x20",
"EventName": "rd_spipe",
"BriefDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
- "PublicDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x21",
"EventName": "wr_spipe",
"BriefDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
- "PublicDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x22",
"EventName": "rd_hit_spipe",
"BriefDescription": "Count of the number of read lines that hits in spipe of this L3C",
- "PublicDescription": "Count of the number of read lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x23",
"EventName": "wr_hit_spipe",
"BriefDescription": "Count of the number of write lines that hits in spipe of this L3C",
- "PublicDescription": "Count of the number of write lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x29",
"EventName": "back_invalid",
"BriefDescription": "Count of the number of L3C back invalid operations",
- "PublicDescription": "Count of the number of L3C back invalid operations",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x40",
"EventName": "retry_cpu",
"BriefDescription": "Count of the number of retry that L3C suppresses the CPU operations",
- "PublicDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x41",
"EventName": "retry_ring",
"BriefDescription": "Count of the number of retry that L3C suppresses the ring operations",
- "PublicDescription": "Count of the number of retry that L3C suppresses the ring operations",
"Unit": "hisi_sccl,l3c"
},
{
"ConfigCode": "0x42",
"EventName": "prefetch_drop",
"BriefDescription": "Count of the number of prefetch drops from this L3C",
- "PublicDescription": "Count of the number of prefetch drops from this L3C",
"Unit": "hisi_sccl,l3c"
}
]
diff --git a/tools/perf/pmu-events/arch/common/common/legacy-hardware.json b/tools/perf/pmu-events/arch/common/common/legacy-hardware.json
new file mode 100644
index 000000000000..71700647f19b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/common/common/legacy-hardware.json
@@ -0,0 +1,72 @@
+[
+ {
+ "EventName": "cpu-cycles",
+ "BriefDescription": "Total cycles. Be wary of what happens during CPU frequency scaling [This event is an alias of cycles].",
+ "LegacyConfigCode": "0"
+ },
+ {
+ "EventName": "cycles",
+ "BriefDescription": "Total cycles. Be wary of what happens during CPU frequency scaling [This event is an alias of cpu-cycles].",
+ "LegacyConfigCode": "0"
+ },
+ {
+ "EventName": "instructions",
+ "BriefDescription": "Retired instructions. Be careful, these can be affected by various issues, most notably hardware interrupt counts.",
+ "LegacyConfigCode": "1"
+ },
+ {
+ "EventName": "cache-references",
+ "BriefDescription": "Cache accesses. Usually this indicates Last Level Cache accesses but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU.",
+ "LegacyConfigCode": "2"
+ },
+ {
+ "EventName": "cache-misses",
+ "BriefDescription": "Cache misses. Usually this indicates Last Level Cache misses; this is intended to be used in conjunction with the PERF_COUNT_HW_CACHE_REFERENCES event to calculate cache miss rates.",
+ "LegacyConfigCode": "3"
+ },
+ {
+ "EventName": "branches",
+ "BriefDescription": "Retired branch instructions [This event is an alias of branch-instructions].",
+ "LegacyConfigCode": "4"
+ },
+ {
+ "EventName": "branch-instructions",
+ "BriefDescription": "Retired branch instructions [This event is an alias of branches].",
+ "LegacyConfigCode": "4"
+ },
+ {
+ "EventName": "branch-misses",
+ "BriefDescription": "Mispredicted branch instructions.",
+ "LegacyConfigCode": "5"
+ },
+ {
+ "EventName": "bus-cycles",
+ "BriefDescription": "Bus cycles, which can be different from total cycles.",
+ "LegacyConfigCode": "6"
+ },
+ {
+ "EventName": "stalled-cycles-frontend",
+ "BriefDescription": "Stalled cycles during issue [This event is an alias of idle-cycles-frontend].",
+ "LegacyConfigCode": "7"
+ },
+ {
+ "EventName": "idle-cycles-frontend",
+ "BriefDescription": "Stalled cycles during issue [This event is an alias of stalled-cycles-fronted].",
+ "LegacyConfigCode": "7"
+ },
+ {
+ "EventName": "stalled-cycles-backend",
+ "BriefDescription": "Stalled cycles during retirement [This event is an alias of idle-cycles-backend].",
+ "LegacyConfigCode": "8"
+ },
+ {
+ "EventName": "idle-cycles-backend",
+ "BriefDescription": "Stalled cycles during retirement [This event is an alias of stalled-cycles-backend].",
+ "LegacyConfigCode": "8"
+ },
+ {
+ "EventName": "ref-cycles",
+ "BriefDescription": "Total cycles; not affected by CPU frequency scaling.",
+ "LegacyConfigCode": "9"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/common/common/metrics.json b/tools/perf/pmu-events/arch/common/common/metrics.json
new file mode 100644
index 000000000000..0d010b3ebc6d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/common/common/metrics.json
@@ -0,0 +1,151 @@
+[
+ {
+ "BriefDescription": "Average CPU utilization",
+ "MetricExpr": "(software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@) / (duration_time * 1e9)",
+ "MetricGroup": "Default",
+ "MetricName": "CPUs_utilized",
+ "ScaleUnit": "1CPUs",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Context switches per CPU second",
+ "MetricExpr": "(software@context\\-switches\\,name\\=context\\-switches@ * 1e9) / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)",
+ "MetricGroup": "Default",
+ "MetricName": "cs_per_second",
+ "ScaleUnit": "1cs/sec",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Process migrations to a new CPU per CPU second",
+ "MetricExpr": "(software@cpu\\-migrations\\,name\\=cpu\\-migrations@ * 1e9) / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)",
+ "MetricGroup": "Default",
+ "MetricName": "migrations_per_second",
+ "ScaleUnit": "1migrations/sec",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Page faults per CPU second",
+ "MetricExpr": "(software@page\\-faults\\,name\\=page\\-faults@ * 1e9) / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)",
+ "MetricGroup": "Default",
+ "MetricName": "page_faults_per_second",
+ "ScaleUnit": "1faults/sec",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle",
+ "MetricExpr": "instructions / cpu\\-cycles",
+ "MetricGroup": "Default",
+ "MetricName": "insn_per_cycle",
+ "MetricThreshold": "insn_per_cycle < 1",
+ "ScaleUnit": "1instructions",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Max front or backend stalls per instruction",
+ "MetricExpr": "max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions",
+ "MetricGroup": "Default",
+ "MetricName": "stalled_cycles_per_instruction",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Frontend stalls per cycle",
+ "MetricExpr": "stalled\\-cycles\\-frontend / cpu\\-cycles",
+ "MetricGroup": "Default",
+ "MetricName": "frontend_cycles_idle",
+ "MetricThreshold": "frontend_cycles_idle > 0.1",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Backend stalls per cycle",
+ "MetricExpr": "stalled\\-cycles\\-backend / cpu\\-cycles",
+ "MetricGroup": "Default",
+ "MetricName": "backend_cycles_idle",
+ "MetricThreshold": "backend_cycles_idle > 0.2",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Cycles per CPU second",
+ "MetricExpr": "cpu\\-cycles / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)",
+ "MetricGroup": "Default",
+ "MetricName": "cycles_frequency",
+ "ScaleUnit": "1GHz",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Branches per CPU second",
+ "MetricExpr": "branches / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)",
+ "MetricGroup": "Default",
+ "MetricName": "branch_frequency",
+ "ScaleUnit": "1000M/sec",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "Branch miss rate",
+ "MetricExpr": "branch\\-misses / branches",
+ "MetricGroup": "Default",
+ "MetricName": "branch_miss_rate",
+ "MetricThreshold": "branch_miss_rate > 0.05",
+ "ScaleUnit": "100%",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "L1D miss rate",
+ "MetricExpr": "L1\\-dcache\\-load\\-misses / L1\\-dcache\\-loads",
+ "MetricGroup": "Default2",
+ "MetricName": "l1d_miss_rate",
+ "MetricThreshold": "l1d_miss_rate > 0.05",
+ "ScaleUnit": "100%",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "LLC miss rate",
+ "MetricExpr": "LLC\\-load\\-misses / LLC\\-loads",
+ "MetricGroup": "Default2",
+ "MetricName": "llc_miss_rate",
+ "MetricThreshold": "llc_miss_rate > 0.05",
+ "ScaleUnit": "100%",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "L1I miss rate",
+ "MetricExpr": "L1\\-icache\\-load\\-misses / L1\\-icache\\-loads",
+ "MetricGroup": "Default3",
+ "MetricName": "l1i_miss_rate",
+ "MetricThreshold": "l1i_miss_rate > 0.05",
+ "ScaleUnit": "100%",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "dTLB miss rate",
+ "MetricExpr": "dTLB\\-load\\-misses / dTLB\\-loads",
+ "MetricGroup": "Default3",
+ "MetricName": "dtlb_miss_rate",
+ "MetricThreshold": "dtlb_miss_rate > 0.05",
+ "ScaleUnit": "100%",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "iTLB miss rate",
+ "MetricExpr": "iTLB\\-load\\-misses / iTLB\\-loads",
+ "MetricGroup": "Default3",
+ "MetricName": "itlb_miss_rate",
+ "MetricThreshold": "itlb_miss_rate > 0.05",
+ "ScaleUnit": "100%",
+ "DefaultShowEvents": "1"
+ },
+ {
+ "BriefDescription": "L1 prefetch miss rate",
+ "MetricExpr": "L1\\-dcache\\-prefetch\\-misses / L1\\-dcache\\-prefetches",
+ "MetricGroup": "Default4",
+ "MetricName": "l1_prefetch_miss_rate",
+ "MetricThreshold": "l1_prefetch_miss_rate > 0.05",
+ "ScaleUnit": "100%",
+ "DefaultShowEvents": "1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/common/common/software.json b/tools/perf/pmu-events/arch/common/common/software.json
new file mode 100644
index 000000000000..e6819ae219bb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/common/common/software.json
@@ -0,0 +1,94 @@
+[
+ {
+ "Unit": "software",
+ "EventName": "cpu-clock",
+ "BriefDescription": "Per-CPU high-resolution timer based event",
+ "ConfigCode": "0",
+ "ScaleUnit": "1e-6msec"
+ },
+ {
+ "Unit": "software",
+ "EventName": "task-clock",
+ "BriefDescription": "Per-task high-resolution timer based event",
+ "ConfigCode": "1",
+ "ScaleUnit": "1e-6msec"
+ },
+ {
+ "Unit": "software",
+ "EventName": "faults",
+ "BriefDescription": "Number of page faults [This event is an alias of page-faults]",
+ "ConfigCode": "2"
+ },
+ {
+ "Unit": "software",
+ "EventName": "page-faults",
+ "BriefDescription": "Number of page faults [This event is an alias of faults]",
+ "ConfigCode": "2"
+ },
+ {
+ "Unit": "software",
+ "EventName": "context-switches",
+ "BriefDescription": "Number of context switches [This event is an alias of cs]",
+ "ConfigCode": "3"
+ },
+ {
+ "Unit": "software",
+ "EventName": "cs",
+ "BriefDescription": "Number of context switches [This event is an alias of context-switches]",
+ "ConfigCode": "3"
+ },
+ {
+ "Unit": "software",
+ "EventName": "cpu-migrations",
+ "BriefDescription": "Number of times a process has migrated to a new CPU [This event is an alias of migrations]",
+ "ConfigCode": "4"
+ },
+ {
+ "Unit": "software",
+ "EventName": "migrations",
+ "BriefDescription": "Number of times a process has migrated to a new CPU [This event is an alias of cpu-migrations]",
+ "ConfigCode": "4"
+ },
+ {
+ "Unit": "software",
+ "EventName": "minor-faults",
+ "BriefDescription": "Number of minor page faults. Minor faults don't require I/O to handle",
+ "ConfigCode": "5"
+ },
+ {
+ "Unit": "software",
+ "EventName": "major-faults",
+ "BriefDescription": "Number of major page faults. Major faults require I/O to handle",
+ "ConfigCode": "6"
+ },
+ {
+ "Unit": "software",
+ "EventName": "alignment-faults",
+ "BriefDescription": "Number of kernel handled memory alignment faults",
+ "ConfigCode": "7"
+ },
+ {
+ "Unit": "software",
+ "EventName": "emulation-faults",
+ "BriefDescription": "Number of kernel handled unimplemented instruction faults handled through emulation",
+ "ConfigCode": "8"
+ },
+ {
+ "Unit": "software",
+ "EventName": "dummy",
+ "BriefDescription": "A placeholder event that doesn't count anything",
+ "ConfigCode": "9"
+ },
+ {
+ "Unit": "software",
+ "EventName": "bpf-output",
+ "BriefDescription": "An event used by BPF programs to write to the perf ring buffer",
+ "ConfigCode": "10"
+ },
+ {
+ "Unit": "software",
+ "EventName": "cgroup-switches",
+ "BriefDescription": "Number of context switches to a task in a different cgroup",
+ "ConfigCode": "11"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/common/common/tool.json b/tools/perf/pmu-events/arch/common/common/tool.json
index 12f2ef1813a6..14d0d60a1976 100644
--- a/tools/perf/pmu-events/arch/common/common/tool.json
+++ b/tools/perf/pmu-events/arch/common/common/tool.json
@@ -70,5 +70,17 @@
"EventName": "system_tsc_freq",
"BriefDescription": "The amount a Time Stamp Counter (TSC) increases per second",
"ConfigCode": "12"
+ },
+ {
+ "Unit": "tool",
+ "EventName": "core_wide",
+ "BriefDescription": "1 if not SMT, if SMT are events being gathered on all SMT threads 1 otherwise 0",
+ "ConfigCode": "13"
+ },
+ {
+ "Unit": "tool",
+ "EventName": "target_cpu",
+ "BriefDescription": "1 if CPUs being analyzed, 0 if threads/processes",
+ "ConfigCode": "14"
}
]
diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv
index 0a7e7dcc81be..d5eea7f9aa9a 100644
--- a/tools/perf/pmu-events/arch/riscv/mapfile.csv
+++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv
@@ -20,5 +20,6 @@
0x489-0x8000000000000008-0x[[:xdigit:]]+,v1,sifive/p550,core
0x489-0x8000000000000[1-6]08-0x[9b][[:xdigit:]]+,v1,sifive/p650,core
0x5b7-0x0-0x0,v1,thead/c900-legacy,core
+0x5b7-0x80000000090c0d00-0x2047000,v1,thead/c900-legacy,core
0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core
0x31e-0x8000000000008a45-0x[[:xdigit:]]+,v1,andes/ax45,core
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json b/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json
index cf8563d059b9..a82674f62409 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json
@@ -753,14 +753,14 @@
"EventCode": "4203",
"EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
"BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 128",
- "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA- 128 function ending with CC=0"
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128 function ending with CC=0"
},
{
"Unit": "PAI-CRYPTO",
"EventCode": "4204",
"EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
"BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 192",
- "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA- 192 function ending with CC=0"
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA-192 function ending with CC=0"
},
{
"Unit": "PAI-CRYPTO",
@@ -788,21 +788,21 @@
"EventCode": "4208",
"EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
"BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 128",
- "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 128 function ending with CC=0"
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES-128 function ending with CC=0"
},
{
"Unit": "PAI-CRYPTO",
"EventCode": "4209",
"EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
"BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 192",
- "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 192 function ending with CC=0"
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES-192 function ending with CC=0"
},
{
"Unit": "PAI-CRYPTO",
"EventCode": "4210",
- "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
- "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 256A",
- "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 256A function ending with CC=0"
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 256",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES-256 function ending with CC=0"
},
{
"Unit": "PAI-CRYPTO",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
index 3ab1d3a6638c..57b785307a85 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
@@ -7,17 +7,17 @@
{
"BriefDescription": "Cycles per Instruction",
"MetricName": "cpi",
- "MetricExpr": "CPU_CYCLES / INSTRUCTIONS if has_event(INSTRUCTIONS) else 0"
+ "MetricExpr": "CPU_CYCLES / INSTRUCTIONS if has_event(CPU_CYCLES) else 0"
},
{
"BriefDescription": "Problem State Instruction Ratio",
"MetricName": "prbstate",
- "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0"
+ "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100 if has_event(PROBLEM_STATE_INSTRUCTIONS) else 0"
},
{
"BriefDescription": "Level One Miss per 100 Instructions",
"MetricName": "l1mp",
- "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0"
+ "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100 if has_event(L1I_DIR_WRITES) else 0"
},
{
"BriefDescription": "Percentage sourced from Level 2 cache",
@@ -52,7 +52,7 @@
{
"BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
"MetricName": "est_cpi",
- "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS) if has_event(INSTRUCTIONS) else 0"
+ "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS) if has_event(CPU_CYCLES) else 0"
},
{
"BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z17/basic.json b/tools/perf/pmu-events/arch/s390/cf_z17/basic.json
new file mode 100644
index 000000000000..1023d47028ce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z17/basic.json
@@ -0,0 +1,58 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "0",
+ "EventName": "CPU_CYCLES",
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "1",
+ "EventName": "INSTRUCTIONS",
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "2",
+ "EventName": "L1I_DIR_WRITES",
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "3",
+ "EventName": "L1I_PENALTY_CYCLES",
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "4",
+ "EventName": "L1D_DIR_WRITES",
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "5",
+ "EventName": "L1D_PENALTY_CYCLES",
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "32",
+ "EventName": "PROBLEM_STATE_CPU_CYCLES",
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "33",
+ "EventName": "PROBLEM_STATE_INSTRUCTIONS",
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z17/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z17/crypto6.json
new file mode 100644
index 000000000000..8b4380b8e489
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z17/crypto6.json
@@ -0,0 +1,142 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "64",
+ "EventName": "PRNG_FUNCTIONS",
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "65",
+ "EventName": "PRNG_CYCLES",
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "66",
+ "EventName": "PRNG_BLOCKED_FUNCTIONS",
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "67",
+ "EventName": "PRNG_BLOCKED_CYCLES",
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "68",
+ "EventName": "SHA_FUNCTIONS",
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "69",
+ "EventName": "SHA_CYCLES",
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "70",
+ "EventName": "SHA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "71",
+ "EventName": "SHA_BLOCKED_CYCLES",
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "72",
+ "EventName": "DEA_FUNCTIONS",
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "73",
+ "EventName": "DEA_CYCLES",
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "74",
+ "EventName": "DEA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "75",
+ "EventName": "DEA_BLOCKED_CYCLES",
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "76",
+ "EventName": "AES_FUNCTIONS",
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "77",
+ "EventName": "AES_CYCLES",
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "78",
+ "EventName": "AES_BLOCKED_FUNCTIONS",
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "79",
+ "EventName": "AES_BLOCKED_CYCLES",
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "80",
+ "EventName": "ECC_FUNCTION_COUNT",
+ "BriefDescription": "ECC Function Count",
+ "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "81",
+ "EventName": "ECC_CYCLES_COUNT",
+ "BriefDescription": "ECC Cycles Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the ECC coprocessor is busy performing the elliptic-curve cryptography (ECC) functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "82",
+ "EventName": "ECC_BLOCKED_FUNCTION_COUNT",
+ "BriefDescription": "Ecc Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions that are issued by the CPU and are blocked because the ECC coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "83",
+ "EventName": "ECC_BLOCKED_CYCLES_COUNT",
+ "BriefDescription": "ECC Blocked Cycles Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the elliptic-curve cryptography (ECC) functions issued by the CPU because the ECC coprocessor is busy performing a function issued by another CPU."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z17/extended.json b/tools/perf/pmu-events/arch/s390/cf_z17/extended.json
new file mode 100644
index 000000000000..e139482e217f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z17/extended.json
@@ -0,0 +1,541 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "128",
+ "EventName": "L1D_RO_EXCL_WRITES",
+ "BriefDescription": "L1D Read-only Exclusive Writes",
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "129",
+ "EventName": "DTLB2_WRITES",
+ "BriefDescription": "DTLB2 Writes",
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the Level-1 Data cache. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "130",
+ "EventName": "DTLB2_MISSES",
+ "BriefDescription": "DTLB2 Misses",
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "131",
+ "EventName": "CRSTE_1MB_WRITES",
+ "BriefDescription": "One Megabyte CRSTE writes",
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "132",
+ "EventName": "DTLB2_GPAGE_WRITES",
+ "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "134",
+ "EventName": "ITLB2_WRITES",
+ "BriefDescription": "ITLB2 Writes",
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the Level-1 Instruction cache. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "135",
+ "EventName": "ITLB2_MISSES",
+ "BriefDescription": "ITLB2 Misses",
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "137",
+ "EventName": "TLB2_PTE_WRITES",
+ "BriefDescription": "TLB2 Page Table Entry Writes",
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "138",
+ "EventName": "TLB2_CRSTE_WRITES",
+ "BriefDescription": "TLB2 Combined Region and Segment Entry Writes",
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "139",
+ "EventName": "TLB2_ENGINES_BUSY",
+ "BriefDescription": "TLB2 Engines Busy",
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "140",
+ "EventName": "TX_C_TEND",
+ "BriefDescription": "Completed TEND instructions in constrained TX mode",
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "141",
+ "EventName": "TX_NC_TEND",
+ "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "143",
+ "EventName": "L1C_TLB2_MISSES",
+ "BriefDescription": "L1C TLB2 Misses",
+ "PublicDescription": "Increments by one for any cycle where a Level-1 cache or Level-2 TLB miss is in progress."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "145",
+ "EventName": "DCW_REQ",
+ "BriefDescription": "Directory Write Level 1 Data Cache from L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "146",
+ "EventName": "DCW_REQ_IV",
+ "BriefDescription": "Directory Write Level 1 Data Cache from L2-Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "147",
+ "EventName": "DCW_REQ_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from L2-Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "148",
+ "EventName": "DCW_REQ_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from L2-Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "149",
+ "EventName": "DCW_ON_CHIP",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "150",
+ "EventName": "DCW_ON_CHIP_IV",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip L2-Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "151",
+ "EventName": "DCW_ON_CHIP_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip L2-Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "152",
+ "EventName": "DCW_ON_CHIP_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip L2-Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "153",
+ "EventName": "DCW_ON_MODULE",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Module L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "154",
+ "EventName": "DCW_ON_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "155",
+ "EventName": "DCW_OFF_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "156",
+ "EventName": "DCW_ON_CHIP_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Cache from On-Chip Memory",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "157",
+ "EventName": "DCW_ON_MODULE_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Cache from On-Module Memory",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from On-Module memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "158",
+ "EventName": "DCW_ON_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Cache from On-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "159",
+ "EventName": "DCW_OFF_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Cache from Off-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "160",
+ "EventName": "IDCW_ON_MODULE_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory L2-Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "161",
+ "EventName": "IDCW_ON_MODULE_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory L2-Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "162",
+ "EventName": "IDCW_ON_MODULE_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory L2-Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "163",
+ "EventName": "IDCW_ON_DRAWER_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer L2-Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "164",
+ "EventName": "IDCW_ON_DRAWER_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer L2-Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "165",
+ "EventName": "IDCW_ON_DRAWER_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer L2-Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "166",
+ "EventName": "IDCW_OFF_DRAWER_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer L2-Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "167",
+ "EventName": "IDCW_OFF_DRAWER_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer L2-Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "168",
+ "EventName": "IDCW_OFF_DRAWER_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer L2-Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "169",
+ "EventName": "ICW_REQ",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced the requestors Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "170",
+ "EventName": "ICW_REQ_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from L2-Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "171",
+ "EventName": "ICW_REQ_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from L2-Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "172",
+ "EventName": "ICW_REQ_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from L2-Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "173",
+ "EventName": "ICW_ON_CHIP",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "174",
+ "EventName": "ICW_ON_CHIP_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip L2-Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "175",
+ "EventName": "ICW_ON_CHIP_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip L2-Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "176",
+ "EventName": "ICW_ON_CHIP_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip L2-Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip level 2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "177",
+ "EventName": "ICW_ON_MODULE",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "178",
+ "EventName": "ICW_ON_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "179",
+ "EventName": "ICW_OFF_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer L2-Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "202",
+ "EventName": "CYCLES_SAMETHRD",
+ "BriefDescription": "CPU is not in wait state and CPU is running by itself",
+ "PublicDescription": "The number of cycles the CPU is not in wait state and the CPU is running by itself on the Core."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "203",
+ "EventName": "CYCLES_DIFFTHRD",
+ "BriefDescription": "CPU is not in wait state and CPU is running by another thread",
+ "PublicDescription": "The number of cycles the CPU is not in wait state and the CPU is running with another thread on the Core."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "204",
+ "EventName": "INST_SAMETHRD",
+ "BriefDescription": "Instructions executed on CPU by itself",
+ "PublicDescription": "The number of instructions executed on the CPU and the CPU is running by itself on the Core."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "205",
+ "EventName": "INST_DIFFTHRD",
+ "BriefDescription": "Instructions executed on CPU by another thread",
+ "PublicDescription": "The number of instructions executed on the CPU and the CPU is running with another thread on the Core."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "206",
+ "EventName": "WRONG_BRANCH_PREDICTION",
+ "BriefDescription": "Incorrect branch prediction on core",
+ "PublicDescription": "A count of the number of branches that were predicted incorrectly by the branch prediction logic in the Core. This includes incorrectly predicted branches that are executed in Firmware. Examples of instructions implemented in Firmware are complicated instructions like MVCL (Move Character Long) and PC (Program Call)."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "225",
+ "EventName": "VX_BCD_EXECUTION_SLOTS",
+ "BriefDescription": "Count finished vector arithmetic Binary Coded Decimal instructions",
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMP, VMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOP, VCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVD, VCVDG, VSCHP, VSCSHP, VCSPH, VCLZDP, VPKZR, VSRPR, VUPKZH, VUPKZL, VTZ, VUPH, VUPL, VCVBX, VCVDX."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "226",
+ "EventName": "DECIMAL_INSTRUCTIONS",
+ "BriefDescription": "Decimal instruction dispatched",
+ "PublicDescription": "Decimal instruction dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP, TP."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "232",
+ "EventName": "LAST_HOST_TRANSLATIONS",
+ "BriefDescription": "Last host translation done",
+ "PublicDescription": "Last Host Translation done."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "244",
+ "EventName": "TX_NC_TABORT",
+ "BriefDescription": "Aborted transactions in unconstrained TX mode",
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "245",
+ "EventName": "TX_C_TABORT_NO_SPECIAL",
+ "BriefDescription": "Aborted transactions in constrained TX mode",
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "246",
+ "EventName": "TX_C_TABORT_SPECIAL",
+ "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "248",
+ "EventName": "DFLT_ACCESS",
+ "BriefDescription": "Cycles CPU spent obtaining access to Deflate unit",
+ "PublicDescription": "Cycles CPU spent obtaining access to Deflate unit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "253",
+ "EventName": "DFLT_CYCLES",
+ "BriefDescription": "Cycles CPU is using Deflate unit",
+ "PublicDescription": "Cycles CPU is using Deflate unit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "256",
+ "EventName": "SORTL",
+ "BriefDescription": "Count SORTL instructions",
+ "PublicDescription": "Increments by one for every SORT LISTS (SORTL) instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "265",
+ "EventName": "DFLT_CC",
+ "BriefDescription": "Increments DEFLATE CONVERSION CALL",
+ "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL (DFLTCC) instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "266",
+ "EventName": "DFLT_CCFINISH",
+ "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+ "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL (DFLTCC) instruction executed that ended in Condition Codes 0, 1 or 2."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "267",
+ "EventName": "NNPA_INVOCATIONS",
+ "BriefDescription": "NNPA Total invocations",
+ "PublicDescription": "Increments by one for every NEURAL NETWORK PROCESSING ASSIST (NNPA) instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "268",
+ "EventName": "NNPA_COMPLETIONS",
+ "BriefDescription": "NNPA Total completions",
+ "PublicDescription": "Increments by one for every NEURAL NETWORK PROCESSING ASSIST (NNPA) instruction executed that ended in Condition Code 0."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "269",
+ "EventName": "NNPA_WAIT_LOCK",
+ "BriefDescription": "Cycles spent obtaining NNPA lock",
+ "PublicDescription": "Cycles CPU spent obtaining access to IBM Z Integrated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "270",
+ "EventName": "NNPA_HOLD_LOCK",
+ "BriefDescription": "Cycles spent holding NNPA lock",
+ "PublicDescription": "Cycles CPU is using IBM Z Integrated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "272",
+ "EventName": "NNPA_INST_ONCHIP",
+ "BriefDescription": "NNPA instructions used on-chip Integrated Accelerator",
+ "PublicDescription": "A NEURAL NETWORK PROCESSING ASSIST (NNPA) instruction has used the Local On-Chip IBM Z Integrated Accelerator for AI during its execution"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "273",
+ "EventName": "NNPA_INST_OFFCHIP",
+ "BriefDescription": "NNPA instructions used off-chip Integrated Accelerator",
+ "PublicDescription": "A NEURAL NETWORK PROCESSING ASSIST (NNPA) instruction has used an Off-Chip IBM Z Integrated Accelerator for AI during its execution."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "274",
+ "EventName": "NNPA_INST_DIFF",
+ "BriefDescription": "NNPA instructions used different Integrated Accelerator",
+ "PublicDescription": "A NEURAL NETWORK PROCESSING ASSIST (NNPA) instruction has used a different IBM Z Integrated Accelerator for AI since it was last executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "276",
+ "EventName": "NNPA_4K_PREFETCH",
+ "BriefDescription": "Number of 4K prefetches for Integated Accelerator",
+ "PublicDescription": "Number of 4K prefetches done for a remote IBM Z Integated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "277",
+ "EventName": "NNPA_COMPL_LOCK",
+ "BriefDescription": "A Perform Locked Operation has completed",
+ "PublicDescription": "A PERFORM LOCKED OPERATION (PLO) has completed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "278",
+ "EventName": "NNPA_RETRY_LOCK",
+ "BriefDescription": "A Perform Locked Operation has been retried",
+ "PublicDescription": "A PERFORM LOCKED OPERATION (PLO) has been retried and the CPU did not use any special logic to allow the PLO to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "279",
+ "EventName": "NNPA_RETRY_LOCK_WITH_PLO",
+ "BriefDescription": "A Perform Locked Operation has been retried using special logic",
+ "PublicDescription": "A PERFORM LOCKED OPERATION (PLO) has been retried and the CPU is using special logic to allow PLO to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "448",
+ "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
+ "BriefDescription": "Cycle count with one thread active",
+ "PublicDescription": "Cycle count with one thread active"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "449",
+ "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
+ "BriefDescription": "Cycle count with two threads active",
+ "PublicDescription": "Cycle count with two threads active"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z17/pai_crypto.json b/tools/perf/pmu-events/arch/s390/cf_z17/pai_crypto.json
new file mode 100644
index 000000000000..fd2eb536ecc7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z17/pai_crypto.json
@@ -0,0 +1,1213 @@
+[
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4096",
+ "EventName": "CRYPTO_ALL",
+ "BriefDescription": "CRYPTO ALL",
+ "PublicDescription": "Sums of all non zero cryptography counters"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4097",
+ "EventName": "KM_DEA",
+ "BriefDescription": "KM DEA",
+ "PublicDescription": "KM-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4098",
+ "EventName": "KM_TDEA_128",
+ "BriefDescription": "KM TDEA 128",
+ "PublicDescription": "KM-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4099",
+ "EventName": "KM_TDEA_192",
+ "BriefDescription": "KM TDEA 192",
+ "PublicDescription": "KM-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4100",
+ "EventName": "KM_ENCRYPTED_DEA",
+ "BriefDescription": "KM ENCRYPTED DEA",
+ "PublicDescription": "KM-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4101",
+ "EventName": "KM_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KM ENCRYPTED TDEA 128",
+ "PublicDescription": "KM-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4102",
+ "EventName": "KM_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KM ENCRYPTED TDEA 192",
+ "PublicDescription": "KM-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4103",
+ "EventName": "KM_AES_128",
+ "BriefDescription": "KM AES 128",
+ "PublicDescription": "KM-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4104",
+ "EventName": "KM_AES_192",
+ "BriefDescription": "KM AES 192",
+ "PublicDescription": "KM-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4105",
+ "EventName": "KM_AES_256",
+ "BriefDescription": "KM AES 256",
+ "PublicDescription": "KM-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4106",
+ "EventName": "KM_ENCRYPTED_AES_128",
+ "BriefDescription": "KM ENCRYPTED AES 128",
+ "PublicDescription": "KM-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4107",
+ "EventName": "KM_ENCRYPTED_AES_192",
+ "BriefDescription": "KM ENCRYPTED AES 192",
+ "PublicDescription": "KM-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4108",
+ "EventName": "KM_ENCRYPTED_AES_256",
+ "BriefDescription": "KM ENCRYPTED AES 256",
+ "PublicDescription": "KM-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4109",
+ "EventName": "KM_XTS_AES_128",
+ "BriefDescription": "KM XTS AES 128",
+ "PublicDescription": "KM-XTS-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4110",
+ "EventName": "KM_XTS_AES_256",
+ "BriefDescription": "KM XTS AES 256",
+ "PublicDescription": "KM-XTS-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4111",
+ "EventName": "KM_XTS_ENCRYPTED_AES_128",
+ "BriefDescription": "KM XTS ENCRYPTED AES 128",
+ "PublicDescription": "KM-XTS-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4112",
+ "EventName": "KM_XTS_ENCRYPTED_AES_256",
+ "BriefDescription": "KM XTS ENCRYPTED AES 256",
+ "PublicDescription": "KM-XTS-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4113",
+ "EventName": "KMC_DEA",
+ "BriefDescription": "KMC DEA",
+ "PublicDescription": "KMC-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4114",
+ "EventName": "KMC_TDEA_128",
+ "BriefDescription": "KMC TDEA 128",
+ "PublicDescription": "KMC-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4115",
+ "EventName": "KMC_TDEA_192",
+ "BriefDescription": "KMC TDEA 192",
+ "PublicDescription": "KMC-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4116",
+ "EventName": "KMC_ENCRYPTED_DEA",
+ "BriefDescription": "KMC ENCRYPTED DEA",
+ "PublicDescription": "KMC-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4117",
+ "EventName": "KMC_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMC ENCRYPTED TDEA 128",
+ "PublicDescription": "KMC-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4118",
+ "EventName": "KMC_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMC ENCRYPTED TDEA 192",
+ "PublicDescription": "KMC-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4119",
+ "EventName": "KMC_AES_128",
+ "BriefDescription": "KMC AES 128",
+ "PublicDescription": "KMC-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4120",
+ "EventName": "KMC_AES_192",
+ "BriefDescription": "KMC AES 192",
+ "PublicDescription": "KMC-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4121",
+ "EventName": "KMC_AES_256",
+ "BriefDescription": "KMC AES 256",
+ "PublicDescription": "KMC-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4122",
+ "EventName": "KMC_ENCRYPTED_AES_128",
+ "BriefDescription": "KMC ENCRYPTED AES 128",
+ "PublicDescription": "KMC-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4123",
+ "EventName": "KMC_ENCRYPTED_AES_192",
+ "BriefDescription": "KMC ENCRYPTED AES 192",
+ "PublicDescription": "KMC-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4124",
+ "EventName": "KMC_ENCRYPTED_AES_256",
+ "BriefDescription": "KMC ENCRYPTED AES 256",
+ "PublicDescription": "KMC-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4125",
+ "EventName": "KMC_PRNG",
+ "BriefDescription": "KMC PRNG",
+ "PublicDescription": "KMC-PRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4126",
+ "EventName": "KMA_GCM_AES_128",
+ "BriefDescription": "KMA GCM AES 128",
+ "PublicDescription": "KMA-GCM-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4127",
+ "EventName": "KMA_GCM_AES_192",
+ "BriefDescription": "KMA GCM AES 192",
+ "PublicDescription": "KMA-GCM-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4128",
+ "EventName": "KMA_GCM_AES_256",
+ "BriefDescription": "KMA GCM AES 256",
+ "PublicDescription": "KMA-GCM-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4129",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_128",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 128",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4130",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_192",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 192",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4131",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_256",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 256",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4132",
+ "EventName": "KMF_DEA",
+ "BriefDescription": "KMF DEA",
+ "PublicDescription": "KMF-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4133",
+ "EventName": "KMF_TDEA_128",
+ "BriefDescription": "KMF TDEA 128",
+ "PublicDescription": "KMF-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4134",
+ "EventName": "KMF_TDEA_192",
+ "BriefDescription": "KMF TDEA 192",
+ "PublicDescription": "KMF-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4135",
+ "EventName": "KMF_ENCRYPTED_DEA",
+ "BriefDescription": "KMF ENCRYPTED DEA",
+ "PublicDescription": "KMF-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4136",
+ "EventName": "KMF_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMF ENCRYPTED TDEA 128",
+ "PublicDescription": "KMF-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4137",
+ "EventName": "KMF_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMF ENCRYPTED TDEA 192",
+ "PublicDescription": "KMF-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4138",
+ "EventName": "KMF_AES_128",
+ "BriefDescription": "KMF AES 128",
+ "PublicDescription": "KMF-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4139",
+ "EventName": "KMF_AES_192",
+ "BriefDescription": "KMF AES 192",
+ "PublicDescription": "KMF-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4140",
+ "EventName": "KMF_AES_256",
+ "BriefDescription": "KMF AES 256",
+ "PublicDescription": "KMF-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4141",
+ "EventName": "KMF_ENCRYPTED_AES_128",
+ "BriefDescription": "KMF ENCRYPTED AES 128",
+ "PublicDescription": "KMF-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4142",
+ "EventName": "KMF_ENCRYPTED_AES_192",
+ "BriefDescription": "KMF ENCRYPTED AES 192",
+ "PublicDescription": "KMF-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4143",
+ "EventName": "KMF_ENCRYPTED_AES_256",
+ "BriefDescription": "KMF ENCRYPTED AES 256",
+ "PublicDescription": "KMF-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4144",
+ "EventName": "KMCTR_DEA",
+ "BriefDescription": "KMCTR DEA",
+ "PublicDescription": "KMCTR-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4145",
+ "EventName": "KMCTR_TDEA_128",
+ "BriefDescription": "KMCTR TDEA 128",
+ "PublicDescription": "KMCTR-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4146",
+ "EventName": "KMCTR_TDEA_192",
+ "BriefDescription": "KMCTR TDEA 192",
+ "PublicDescription": "KMCTR-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4147",
+ "EventName": "KMCTR_ENCRYPTED_DEA",
+ "BriefDescription": "KMCTR ENCRYPTED DEA",
+ "PublicDescription": "KMCTR-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4148",
+ "EventName": "KMCTR_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMCTR ENCRYPTED TDEA 128",
+ "PublicDescription": "KMCTR-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4149",
+ "EventName": "KMCTR_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMCTR ENCRYPTED TDEA 192",
+ "PublicDescription": "KMCTR-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4150",
+ "EventName": "KMCTR_AES_128",
+ "BriefDescription": "KMCTR AES 128",
+ "PublicDescription": "KMCTR-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4151",
+ "EventName": "KMCTR_AES_192",
+ "BriefDescription": "KMCTR AES 192",
+ "PublicDescription": "KMCTR-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4152",
+ "EventName": "KMCTR_AES_256",
+ "BriefDescription": "KMCTR AES 256",
+ "PublicDescription": "KMCTR-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4153",
+ "EventName": "KMCTR_ENCRYPTED_AES_128",
+ "BriefDescription": "KMCTR ENCRYPTED AES 128",
+ "PublicDescription": "KMCTR-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4154",
+ "EventName": "KMCTR_ENCRYPTED_AES_192",
+ "BriefDescription": "KMCTR ENCRYPTED AES 192",
+ "PublicDescription": "KMCTR-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4155",
+ "EventName": "KMCTR_ENCRYPTED_AES_256",
+ "BriefDescription": "KMCTR ENCRYPTED AES 256",
+ "PublicDescription": "KMCTR-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4156",
+ "EventName": "KMO_DEA",
+ "BriefDescription": "KMO DEA",
+ "PublicDescription": "KMO-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4157",
+ "EventName": "KMO_TDEA_128",
+ "BriefDescription": "KMO TDEA 128",
+ "PublicDescription": "KMO-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4158",
+ "EventName": "KMO_TDEA_192",
+ "BriefDescription": "KMO TDEA 192",
+ "PublicDescription": "KMO-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4159",
+ "EventName": "KMO_ENCRYPTED_DEA",
+ "BriefDescription": "KMO ENCRYPTED DEA",
+ "PublicDescription": "KMO-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4160",
+ "EventName": "KMO_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMO ENCRYPTED TDEA 128",
+ "PublicDescription": "KMO-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4161",
+ "EventName": "KMO_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMO ENCRYPTED TDEA 192",
+ "PublicDescription": "KMO-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4162",
+ "EventName": "KMO_AES_128",
+ "BriefDescription": "KMO AES 128",
+ "PublicDescription": "KMO-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4163",
+ "EventName": "KMO_AES_192",
+ "BriefDescription": "KMO AES 192",
+ "PublicDescription": "KMO-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4164",
+ "EventName": "KMO_AES_256",
+ "BriefDescription": "KMO AES 256",
+ "PublicDescription": "KMO-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4165",
+ "EventName": "KMO_ENCRYPTED_AES_128",
+ "BriefDescription": "KMO ENCRYPTED AES 128",
+ "PublicDescription": "KMO-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4166",
+ "EventName": "KMO_ENCRYPTED_AES_192",
+ "BriefDescription": "KMO ENCRYPTED AES 192",
+ "PublicDescription": "KMO-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4167",
+ "EventName": "KMO_ENCRYPTED_AES_256",
+ "BriefDescription": "KMO ENCRYPTED AES 256",
+ "PublicDescription": "KMO-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4168",
+ "EventName": "KIMD_SHA_1",
+ "BriefDescription": "KIMD SHA 1",
+ "PublicDescription": "KIMD-SHA-1 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4169",
+ "EventName": "KIMD_SHA_256",
+ "BriefDescription": "KIMD SHA 256",
+ "PublicDescription": "KIMD-SHA-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4170",
+ "EventName": "KIMD_SHA_512",
+ "BriefDescription": "KIMD SHA 512",
+ "PublicDescription": "KIMD-SHA-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4171",
+ "EventName": "KIMD_SHA3_224",
+ "BriefDescription": "KIMD SHA3 224",
+ "PublicDescription": "KIMD-SHA3-224 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4172",
+ "EventName": "KIMD_SHA3_256",
+ "BriefDescription": "KIMD SHA3 256",
+ "PublicDescription": "KIMD-SHA3-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4173",
+ "EventName": "KIMD_SHA3_384",
+ "BriefDescription": "KIMD SHA3 384",
+ "PublicDescription": "KIMD-SHA3-384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4174",
+ "EventName": "KIMD_SHA3_512",
+ "BriefDescription": "KIMD SHA3 512",
+ "PublicDescription": "KIMD-SHA3-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4175",
+ "EventName": "KIMD_SHAKE_128",
+ "BriefDescription": "KIMD SHAKE 128",
+ "PublicDescription": "KIMD-SHAKE-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4176",
+ "EventName": "KIMD_SHAKE_256",
+ "BriefDescription": "KIMD SHAKE 256",
+ "PublicDescription": "KIMD-SHAKE-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4177",
+ "EventName": "KIMD_GHASH",
+ "BriefDescription": "KIMD GHASH",
+ "PublicDescription": "KIMD-GHASH function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4178",
+ "EventName": "KLMD_SHA_1",
+ "BriefDescription": "KLMD SHA 1",
+ "PublicDescription": "KLMD-SHA-1 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4179",
+ "EventName": "KLMD_SHA_256",
+ "BriefDescription": "KLMD SHA 256",
+ "PublicDescription": "KLMD-SHA-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4180",
+ "EventName": "KLMD_SHA_512",
+ "BriefDescription": "KLMD SHA 512",
+ "PublicDescription": "KLMD-SHA-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4181",
+ "EventName": "KLMD_SHA3_224",
+ "BriefDescription": "KLMD SHA3 224",
+ "PublicDescription": "KLMD-SHA3-224 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4182",
+ "EventName": "KLMD_SHA3_256",
+ "BriefDescription": "KLMD SHA3 256",
+ "PublicDescription": "KLMD-SHA3-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4183",
+ "EventName": "KLMD_SHA3_384",
+ "BriefDescription": "KLMD SHA3 384",
+ "PublicDescription": "KLMD-SHA3-384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4184",
+ "EventName": "KLMD_SHA3_512",
+ "BriefDescription": "KLMD SHA3 512",
+ "PublicDescription": "KLMD-SHA3-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4185",
+ "EventName": "KLMD_SHAKE_128",
+ "BriefDescription": "KLMD SHAKE 128",
+ "PublicDescription": "KLMD-SHAKE-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4186",
+ "EventName": "KLMD_SHAKE_256",
+ "BriefDescription": "KLMD SHAKE 256",
+ "PublicDescription": "KLMD-SHAKE-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4187",
+ "EventName": "KMAC_DEA",
+ "BriefDescription": "KMAC DEA",
+ "PublicDescription": "KMAC-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4188",
+ "EventName": "KMAC_TDEA_128",
+ "BriefDescription": "KMAC TDEA 128",
+ "PublicDescription": "KMAC-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4189",
+ "EventName": "KMAC_TDEA_192",
+ "BriefDescription": "KMAC TDEA 192",
+ "PublicDescription": "KMAC-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4190",
+ "EventName": "KMAC_ENCRYPTED_DEA",
+ "BriefDescription": "KMAC ENCRYPTED DEA",
+ "PublicDescription": "KMAC-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4191",
+ "EventName": "KMAC_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMAC ENCRYPTED TDEA 128",
+ "PublicDescription": "KMAC-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4192",
+ "EventName": "KMAC_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMAC ENCRYPTED TDEA 192",
+ "PublicDescription": "KMAC-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4193",
+ "EventName": "KMAC_AES_128",
+ "BriefDescription": "KMAC AES 128",
+ "PublicDescription": "KMAC-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4194",
+ "EventName": "KMAC_AES_192",
+ "BriefDescription": "KMAC AES 192",
+ "PublicDescription": "KMAC-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4195",
+ "EventName": "KMAC_AES_256",
+ "BriefDescription": "KMAC AES 256",
+ "PublicDescription": "KMAC-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4196",
+ "EventName": "KMAC_ENCRYPTED_AES_128",
+ "BriefDescription": "KMAC ENCRYPTED AES 128",
+ "PublicDescription": "KMAC-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4197",
+ "EventName": "KMAC_ENCRYPTED_AES_192",
+ "BriefDescription": "KMAC ENCRYPTED AES 192",
+ "PublicDescription": "KMAC-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4198",
+ "EventName": "KMAC_ENCRYPTED_AES_256",
+ "BriefDescription": "KMAC ENCRYPTED AES 256",
+ "PublicDescription": "KMAC-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4199",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING DEA",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4200",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING TDEA 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4201",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING TDEA 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4202",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED DEA",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4203",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4204",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4205",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4206",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4207",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 256",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4208",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4209",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4210",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 256",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4211",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING AES 128",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4212",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING AES 256",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4213",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING ENCRYPTED AES 128",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4214",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING ENCRYPTED AES 256",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4215",
+ "EventName": "PCC_SCALAR_MULTIPLY_P256",
+ "BriefDescription": "PCC SCALAR MULTIPLY P256",
+ "PublicDescription": "PCC-Scalar-Multiply-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4216",
+ "EventName": "PCC_SCALAR_MULTIPLY_P384",
+ "BriefDescription": "PCC SCALAR MULTIPLY P384",
+ "PublicDescription": "PCC-Scalar-Multiply-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4217",
+ "EventName": "PCC_SCALAR_MULTIPLY_P521",
+ "BriefDescription": "PCC SCALAR MULTIPLY P521",
+ "PublicDescription": "PCC-Scalar-Multiply-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4218",
+ "EventName": "PCC_SCALAR_MULTIPLY_ED25519",
+ "BriefDescription": "PCC SCALAR MULTIPLY ED25519",
+ "PublicDescription": "PCC-Scalar-Multiply-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4219",
+ "EventName": "PCC_SCALAR_MULTIPLY_ED448",
+ "BriefDescription": "PCC SCALAR MULTIPLY ED448",
+ "PublicDescription": "PCC-Scalar-Multiply-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4220",
+ "EventName": "PCC_SCALAR_MULTIPLY_X25519",
+ "BriefDescription": "PCC SCALAR MULTIPLY X25519",
+ "PublicDescription": "PCC-Scalar-Multiply-X25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4221",
+ "EventName": "PCC_SCALAR_MULTIPLY_X448",
+ "BriefDescription": "PCC SCALAR MULTIPLY X448",
+ "PublicDescription": "PCC-Scalar-Multiply-X448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4222",
+ "EventName": "PRNO_SHA_512_DRNG",
+ "BriefDescription": "PRNO SHA 512 DRNG",
+ "PublicDescription": "PRNO-SHA-512-DRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4223",
+ "EventName": "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
+ "BriefDescription": "PRNO TRNG QUERY RAW TO CONDITIONED RATIO",
+ "PublicDescription": "PRNO-TRNG-Query-Raw-to-Conditioned-Ratio function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4224",
+ "EventName": "PRNO_TRNG",
+ "BriefDescription": "PRNO TRNG",
+ "PublicDescription": "PRNO-TRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4225",
+ "EventName": "KDSA_ECDSA_VERIFY_P256",
+ "BriefDescription": "KDSA ECDSA VERIFY P256",
+ "PublicDescription": "KDSA-ECDSA-Verify-P256 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4226",
+ "EventName": "KDSA_ECDSA_VERIFY_P384",
+ "BriefDescription": "KDSA ECDSA VERIFY P384",
+ "PublicDescription": "KDSA-ECDSA-Verify-P384 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4227",
+ "EventName": "KDSA_ECDSA_VERIFY_P521",
+ "BriefDescription": "KDSA ECDSA VERIFY P521",
+ "PublicDescription": "KDSA-ECDSA-Verify-P521 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4228",
+ "EventName": "KDSA_ECDSA_SIGN_P256",
+ "BriefDescription": "KDSA ECDSA SIGN P256",
+ "PublicDescription": "KDSA-ECDSA-Sign-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4229",
+ "EventName": "KDSA_ECDSA_SIGN_P384",
+ "BriefDescription": "KDSA ECDSA SIGN P384",
+ "PublicDescription": "KDSA-ECDSA-Sign-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4230",
+ "EventName": "KDSA_ECDSA_SIGN_P521",
+ "BriefDescription": "KDSA ECDSA SIGN P521",
+ "PublicDescription": "KDSA-ECDSA-Sign-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4231",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P256",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4232",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P384",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4233",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P521",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4234",
+ "EventName": "KDSA_EDDSA_VERIFY_ED25519",
+ "BriefDescription": "KDSA EDDSA VERIFY ED25519",
+ "PublicDescription": "KDSA-EdDSA-Verify-Ed25519 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4235",
+ "EventName": "KDSA_EDDSA_VERIFY_ED448",
+ "BriefDescription": "KDSA EDDSA VERIFY ED448",
+ "PublicDescription": "KDSA-EdDSA-Verify-Ed448 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4236",
+ "EventName": "KDSA_EDDSA_SIGN_ED25519",
+ "BriefDescription": "KDSA EDDSA SIGN ED25519",
+ "PublicDescription": "KDSA-EdDSA-Sign-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4237",
+ "EventName": "KDSA_EDDSA_SIGN_ED448",
+ "BriefDescription": "KDSA EDDSA SIGN ED448",
+ "PublicDescription": "KDSA-EdDSA-Sign-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4238",
+ "EventName": "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
+ "BriefDescription": "KDSA ENCRYPTED EDDSA SIGN ED25519",
+ "PublicDescription": "KDSA-Encrypted-EdDSA-Sign-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4239",
+ "EventName": "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
+ "BriefDescription": "KDSA ENCRYPTED EDDSA SIGN ED448",
+ "PublicDescription": "KDSA-Encrypted-EdDSA-Sign-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4240",
+ "EventName": "PCKMO_ENCRYPT_DEA_KEY",
+ "BriefDescription": "PCKMO ENCRYPT DEA KEY",
+ "PublicDescription": "PCKMO-Encrypt-DEA-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4241",
+ "EventName": "PCKMO_ENCRYPT_TDEA_128_KEY",
+ "BriefDescription": "PCKMO ENCRYPT TDEA 128 KEY",
+ "PublicDescription": "PCKMO-Encrypt-TDEA-128-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4242",
+ "EventName": "PCKMO_ENCRYPT_TDEA_192_KEY",
+ "BriefDescription": "PCKMO ENCRYPT TDEA 192 KEY",
+ "PublicDescription": "PCKMO-Encrypt-TDEA-192-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4243",
+ "EventName": "PCKMO_ENCRYPT_AES_128_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 128 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-128-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4244",
+ "EventName": "PCKMO_ENCRYPT_AES_192_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 192 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-192-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4245",
+ "EventName": "PCKMO_ENCRYPT_AES_256_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 256 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-256-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4246",
+ "EventName": "PCKMO_ENCRYPT_ECC_P256_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P256 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P256-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4247",
+ "EventName": "PCKMO_ENCRYPT_ECC_P384_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P384 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P384-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4248",
+ "EventName": "PCKMO_ENCRYPT_ECC_P521_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P521 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P521-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4249",
+ "EventName": "PCKMO_ENCRYPT_ECC_ED25519_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC ED25519 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-Ed25519-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4250",
+ "EventName": "PCKMO_ENCRYPT_ECC_ED448_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC ED448 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-Ed448-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4251",
+ "EventName": "IBM_RESERVED_155",
+ "BriefDescription": "IBM RESERVED_155",
+ "PublicDescription": "Reserved for IBM use"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4252",
+ "EventName": "IBM_RESERVED_156",
+ "BriefDescription": "IBM RESERVED_156",
+ "PublicDescription": "Reserved for IBM use"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4253",
+ "EventName": "KM_FULL_XTS_AES_128",
+ "BriefDescription": "KM FULL XTS AES 128",
+ "PublicDescription": "KM-Full-XTS-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4254",
+ "EventName": "KM_FULL_XTS_AES_256",
+ "BriefDescription": "KM FULL XTS AES 256",
+ "PublicDescription": "KM-Full-XTS-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4255",
+ "EventName": "KM_FULL_XTS_ENCRYPTED_AES_128",
+ "BriefDescription": "KM FULL XTS ENCRYPTED AES 128",
+ "PublicDescription": "KM-Full-XTS-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4256",
+ "EventName": "KM_FULL_XTS_ENCRYPTED_AES_256",
+ "BriefDescription": "KM FULL XTS ENCRYPTED AES 256",
+ "PublicDescription": "KM-FULL-XTS-ENCRYPTED-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4257",
+ "EventName": "KMAC_HMAC_SHA_224",
+ "BriefDescription": "KMAC HMAC SHA 224",
+ "PublicDescription": "KMAC-HMAC-SHA-224 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4258",
+ "EventName": "KMAC_HMAC_SHA_256",
+ "BriefDescription": "KMAC HMAC SHA 256",
+ "PublicDescription": "KMAC-HMAC-SHA-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4259",
+ "EventName": "KMAC_HMAC_SHA_384",
+ "BriefDescription": "KMAC HMAC SHA 384",
+ "PublicDescription": "KMAC-HMAC-SHA-384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4260",
+ "EventName": "KMAC_HMAC_SHA_512",
+ "BriefDescription": "KMAC HMAC SHA 512",
+ "PublicDescription": "KMAC-HMAC-SHA-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4261",
+ "EventName": "KMAC_HMAC_ENCRYPTED_SHA_224",
+ "BriefDescription": "KMAC HMAC ENCRYPTED SHA 224",
+ "PublicDescription": "KMAC-HMAC-Encrypted-SHA-224 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4262",
+ "EventName": "KMAC_HMAC_ENCRYPTED_SHA_256",
+ "BriefDescription": "KMAC HMAC ENCRYPTED SHA 256",
+ "PublicDescription": "KMAC-HMAC-Encrypted-SHA-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4263",
+ "EventName": "KMAC_HMAC_ENCRYPTED_SHA_384",
+ "BriefDescription": "KMAC HMAC ENCRYPTED SHA 384",
+ "PublicDescription": "KMAC-HMAC-Encrypted-SHA-384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4264",
+ "EventName": "KMAC_HMAC_ENCRYPTED_SHA_512",
+ "BriefDescription": "KMAC HMAC ENCRYPTED SHA 512",
+ "PublicDescription": "KMAC-HMAC-Encrypted-SHA-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4265",
+ "EventName": "PCKMO_ENCRYPT_HMAC_512_KEY",
+ "BriefDescription": "PCKMO ENCRYPT HMAC 512 KEY",
+ "PublicDescription": "PCKMO-Encrypt-HMAC-512-Key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4266",
+ "EventName": "PCKMO_ENCRYPT_HMAC_1024_KEY",
+ "BriefDescription": "PCKMO ENCRYPT HMAC 1024 KEY",
+ "PublicDescription": "PCKMO-Encrypt-HMAC-1024-Key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4267",
+ "EventName": "PCKMO_ENCRYPT_AES_XTS_128",
+ "BriefDescription": "PCKMO ENCRYPT AES XTS Double Key 128",
+ "PublicDescription": "PCKMO-ENCRYPT-AES-XTS-128 Double Key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4268",
+ "EventName": "PCKMO_ENCRYPT_AES_XTS_256",
+ "BriefDescription": "PCKMO ENCRYPT AES XTS Double Key 256",
+ "PublicDescription": "PCKMO-ENCRYPT-AES-XTS-256 Double Key function"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z17/pai_ext.json b/tools/perf/pmu-events/arch/s390/cf_z17/pai_ext.json
new file mode 100644
index 000000000000..935e9f5763b4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z17/pai_ext.json
@@ -0,0 +1,261 @@
+[
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6144",
+ "EventName": "NNPA_ALL",
+ "BriefDescription": "NNPA ALL",
+ "PublicDescription": "Sums of all non zero NNPA counters"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6145",
+ "EventName": "NNPA_ADD",
+ "BriefDescription": "NNPA ADD function",
+ "PublicDescription": "NNPA-ADD function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6146",
+ "EventName": "NNPA_SUB",
+ "BriefDescription": "NNPA SUB function",
+ "PublicDescription": "NNPA-SUB function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6147",
+ "EventName": "NNPA_MUL",
+ "BriefDescription": "NNPA MUL function",
+ "PublicDescription": "NNPA-MUL function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6148",
+ "EventName": "NNPA_DIV",
+ "BriefDescription": "NNPA_DIV function",
+ "PublicDescription": "NNPA-DIV function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6149",
+ "EventName": "NNPA_MIN",
+ "BriefDescription": "NNPA MIN function",
+ "PublicDescription": "NNPA-MIN function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6150",
+ "EventName": "NNPA_MAX",
+ "BriefDescription": "NNPA MAX function",
+ "PublicDescription": "NNPA-MAX function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6151",
+ "EventName": "NNPA_LOG",
+ "BriefDescription": "NNPA LOG function",
+ "PublicDescription": "NNPA Log function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6152",
+ "EventName": "NNPA_EXP",
+ "BriefDescription": "NNPA EXP function",
+ "PublicDescription": "NNPA-EXP function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6153",
+ "EventName": "NNPA_IBM_RESERVED_9",
+ "BriefDescription": "Reserved for IBM use",
+ "PublicDescription": "Reserved for IBM use"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6154",
+ "EventName": "NNPA_RELU",
+ "BriefDescription": "NNPA RELU function",
+ "PublicDescription": "NNPA-RELU function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6155",
+ "EventName": "NNPA_TANH",
+ "BriefDescription": "NNPA TANH function",
+ "PublicDescription": "NNPA-TANH function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6156",
+ "EventName": "NNPA_SIGMOID",
+ "BriefDescription": "NNPA SIGMOID function",
+ "PublicDescription": "NNPA-SIGMOID function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6157",
+ "EventName": "NNPA_SOFTMAX",
+ "BriefDescription": "NNPA SOFTMAX function",
+ "PublicDescription": "NNPA-SOFTMAX function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6158",
+ "EventName": "NNPA_BATCHNORM",
+ "BriefDescription": "NNPA BATCHNORM function",
+ "PublicDescription": "NNPA-BATCHNORM function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6159",
+ "EventName": "NNPA_MAXPOOL2D",
+ "BriefDescription": "NNPA MAXPOOL2D function",
+ "PublicDescription": "NNPA-MAXPOOL2D function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6160",
+ "EventName": "NNPA_AVGPOOL2D",
+ "BriefDescription": "NNPA_AVGPOOL2D function",
+ "PublicDescription": "NNPA-AVGPOOL2D function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6161",
+ "EventName": "NNPA_LSTMACT",
+ "BriefDescription": "NNPA LSTMACT function",
+ "PublicDescription": "NNPA-LSTMACT function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6162",
+ "EventName": "NNPA_GRUACT",
+ "BriefDescription": "NNPA GRUACT function",
+ "PublicDescription": "NNPA-GRUACT function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6163",
+ "EventName": "NNPA_CONVOLUTION",
+ "BriefDescription": "NNPA CONVOLUTION function",
+ "PublicDescription": "NNPA-CONVOLUTION function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6164",
+ "EventName": "NNPA_MATMUL_OP",
+ "BriefDescription": "NNPA MATMUL OP function",
+ "PublicDescription": "NNPA-MATMUL-OP function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6165",
+ "EventName": "NNPA_MATMUL_OP_BCAST23",
+ "BriefDescription": "NNPA MATMUL OP BCAST23 function",
+ "PublicDescription": "NNPA-MATMUL-OP-BCAST23 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6166",
+ "EventName": "NNPA_SMALLBATCH",
+ "BriefDescription": "NNPA Counter 22",
+ "PublicDescription": "NNPA function with conditions as described in Principles of Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6167",
+ "EventName": "NNPA_LARGEDIM",
+ "BriefDescription": "NNPA Counter 23",
+ "PublicDescription": "NNPA function with conditions as described in Principles of Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6168",
+ "EventName": "NNPA_SMALLTENSOR",
+ "BriefDescription": "NNPA Counter 24",
+ "PublicDescription": "NNPA function with conditions as described in Principles of Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6169",
+ "EventName": "NNPA_1MFRAME",
+ "BriefDescription": "NNPA Counter 25",
+ "PublicDescription": "NNPA function with conditions as described in Principles of Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6170",
+ "EventName": "NNPA_2GFRAME",
+ "BriefDescription": "NNPA Counter 26",
+ "PublicDescription": "NNPA function with conditions as described in Principles of Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6171",
+ "EventName": "NNPA_ACCESSEXCEPT",
+ "BriefDescription": "NNPA Counter 27",
+ "PublicDescription": "NNPA function with conditions as described in Principles of Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6172",
+ "EventName": "NNPA_TRANSFORM",
+ "BriefDescription": "NNPA-TRANSFORM function",
+ "PublicDescription": "NNPA-TRANSFORM function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6173",
+ "EventName": "NNPA_GELU",
+ "BriefDescription": "NNPA-GELU function",
+ "PublicDescription": "NNPA-GELU function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6174",
+ "EventName": "NNPA_MOMENTS",
+ "BriefDescription": "NNPA-MOMENTS function",
+ "PublicDescription": "NNPA-MOMENTS function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6175",
+ "EventName": "NNPA_LAYERNORM",
+ "BriefDescription": "NNPA-LAYERNORM function",
+ "PublicDescription": "NNPA-LAYERNORM function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6176",
+ "EventName": "NNPA_MATMUL_OP_BCAST1",
+ "BriefDescription": "NNPA-MATMUL_OP_BCAST1 function",
+ "PublicDescription": "NNPA-MATMUL-OP-BCAST1 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6177",
+ "EventName": "NNPA_SQRT",
+ "BriefDescription": "NNPA-SQRT function",
+ "PublicDescription": "NNPA-SQRT function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6178",
+ "EventName": "NNPA_INVSQRT",
+ "BriefDescription": "NNPA-INVSQRT function",
+ "PublicDescription": "NNPA-INVSQRT function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6179",
+ "EventName": "NNPA_NORM",
+ "BriefDescription": "NNPA-NORM function",
+ "PublicDescription": "NNPA-NORM function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6180",
+ "EventName": "NNPA_REDUCE",
+ "BriefDescription": "NNPA-REDUCE function",
+ "PublicDescription": "NNPA-REDUCE function ending with CC=0"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z17/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z17/transaction.json
new file mode 100644
index 000000000000..7ded6a5a76c0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z17/transaction.json
@@ -0,0 +1,72 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL if has_event(TX_C_TEND) else 0"
+ },
+ {
+ "BriefDescription": "Cycles per Instruction",
+ "MetricName": "cpi",
+ "MetricExpr": "CPU_CYCLES / INSTRUCTIONS if has_event(CPU_CYCLES) else 0"
+ },
+ {
+ "BriefDescription": "Problem State Instruction Ratio",
+ "MetricName": "prbstate",
+ "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100 if has_event(PROBLEM_STATE_INSTRUCTIONS) else 0"
+ },
+ {
+ "BriefDescription": "Level One Miss per 100 Instructions",
+ "MetricName": "l1mp",
+ "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100 if has_event(L1I_DIR_WRITES) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 2 cache",
+ "MetricName": "l2p",
+ "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 3 on same chip cache",
+ "MetricName": "l3p",
+ "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_CHIP_HIT) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Local cache on same drawer",
+ "MetricName": "l4lp",
+ "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_DRAWER_HIT) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Remote cache on different book",
+ "MetricName": "l4rp",
+ "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_OFF_DRAWER) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from memory",
+ "MetricName": "memp",
+ "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_ON_CHIP_MEMORY) else 0"
+ },
+ {
+ "BriefDescription": "Cycles per Instructions from Finite cache/memory",
+ "MetricName": "finite_cpi",
+ "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS if has_event(L1C_TLB2_MISSES) else 0"
+ },
+ {
+ "BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
+ "MetricName": "est_cpi",
+ "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS) if has_event(L1C_TLB2_MISSES) else 0"
+ },
+ {
+ "BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
+ "MetricName": "scpl1m",
+ "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES) if has_event(L1C_TLB2_MISSES) else 0"
+ },
+ {
+ "BriefDescription": "Estimated TLB CPU percentage of Total CPU",
+ "MetricName": "tlb_percent",
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100 if has_event(CPU_CYCLES) else 0"
+ },
+ {
+ "BriefDescription": "Estimated Cycles per TLB Miss",
+ "MetricName": "tlb_miss",
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) if has_event(DTLB2_MISSES) else 0"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
index b22648d12751..6fdede50e7b2 100644
--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
+++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
@@ -6,3 +6,4 @@ Family-model,Version,Filename,EventType
^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
^IBM.393[12].*$,3,cf_z16,core
+^IBM.917[56].*$,3,cf_z17,core
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
index 147379cae37b..cae7c0cf02f2 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -1,56 +1,56 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
@@ -103,7 +103,7 @@
"MetricExpr": "tma_core_bound",
"MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_allocation_restriction",
- "MetricThreshold": "(tma_allocation_restriction >0.10) & ((tma_core_bound >0.10) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -113,7 +113,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
- "MetricThreshold": "(tma_backend_bound >0.10)",
+ "MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%",
@@ -125,7 +125,7 @@
"MetricExpr": "(5 * cpu_atom@CPU_CLK_UNHALTED.CORE@ - (cpu_atom@TOPDOWN_FE_BOUND.ALL@ + cpu_atom@TOPDOWN_BE_BOUND.ALL@ + cpu_atom@TOPDOWN_RETIRING.ALL@)) / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
- "MetricThreshold": "(tma_bad_speculation >0.15)",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"ScaleUnit": "100%",
@@ -136,7 +136,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "MetricThreshold": "(tma_branch_detect >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -146,7 +146,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
- "MetricThreshold": "(tma_branch_mispredicts >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -156,7 +156,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
- "MetricThreshold": "(tma_branch_resteer >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -165,7 +165,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "(tma_cisc >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -174,7 +174,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
- "MetricThreshold": "(tma_core_bound >0.10) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -184,7 +184,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
- "MetricThreshold": "(tma_decode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -193,7 +193,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
- "MetricThreshold": "(tma_fast_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -203,7 +203,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
- "MetricThreshold": "(tma_frontend_bound >0.20)",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -213,7 +213,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "(tma_icache_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -222,7 +222,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_bandwidth",
- "MetricThreshold": "(tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -232,7 +232,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_latency",
- "MetricThreshold": "(tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -552,7 +552,7 @@
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_atom@",
"MetricName": "tma_info_system_cpu_utilization",
"Unit": "cpu_atom"
},
@@ -567,7 +567,7 @@
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
"MetricName": "tma_info_system_mux",
- "MetricThreshold": "((tma_info_system_mux > 1.1)|(tma_info_system_mux < 0.9))",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9",
"Unit": "cpu_atom"
},
{
@@ -606,7 +606,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "(tma_itlb_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -615,7 +615,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
- "MetricThreshold": "(tma_machine_clears >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -625,7 +625,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
- "MetricThreshold": "(tma_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -634,7 +634,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
- "MetricThreshold": "(tma_non_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -643,7 +643,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
- "MetricThreshold": "(tma_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -652,7 +652,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
- "MetricThreshold": "(tma_other_fb >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -661,7 +661,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
- "MetricThreshold": "(tma_predecode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -670,7 +670,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
- "MetricThreshold": "(tma_register >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -679,7 +679,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
- "MetricThreshold": "(tma_reorder_buffer >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -688,7 +688,7 @@
"MetricExpr": "tma_backend_bound - tma_core_bound",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
- "MetricThreshold": "(tma_resource_bound >0.20) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -699,7 +699,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
- "MetricThreshold": "(tma_retiring >0.75)",
+ "MetricThreshold": "tma_retiring > 0.75",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -709,7 +709,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / (5 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
- "MetricThreshold": "(tma_serialization >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -721,7 +721,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_0@ + cpu_core@UOPS_DISPATCHED.PORT_1@ + cpu_core@UOPS_DISPATCHED.PORT_5_11@ + cpu_core@UOPS_DISPATCHED.PORT_6@) / (5 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -734,13 +734,13 @@
"MetricExpr": "78 * cpu_core@ASSISTS.ANY@ / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists",
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
"MetricExpr": "63 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_slots",
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_avx_assists",
@@ -751,7 +751,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-be\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
@@ -768,13 +768,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20",
@@ -790,35 +790,35 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: ",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms))) - tma_bottleneck_big_code",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20",
@@ -826,7 +826,7 @@
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -835,7 +835,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -844,16 +844,16 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
- "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -862,15 +862,15 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls",
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
"MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -879,7 +879,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
- "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-br\\-mispredict@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
@@ -893,26 +893,26 @@
"MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C01@ / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c01_wait",
- "MetricThreshold": "tma_c01_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C02@ / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c02_wait",
- "MetricThreshold": "tma_c02_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -921,7 +921,7 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources. Sample with: FRONTEND_RETIRED.MS_FLOWS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -931,26 +931,26 @@
"MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
"MetricExpr": "max(0, tma_icache_misses - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
"MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -959,7 +959,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -968,35 +968,35 @@
"MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricExpr": "((28 * tma_info_system_core_frequency - 3 * tma_info_system_core_frequency) * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) + (27 * tma_info_system_core_frequency - 3 * tma_info_system_core_frequency) * cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "(25 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) + 24 * tma_info_system_core_frequency * cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1007,26 +1007,27 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricExpr": "(27 * tma_info_system_core_frequency - 3 * tma_info_system_core_frequency) * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "24 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu_core@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu_core@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu_core@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu_core@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1036,7 +1037,7 @@
"MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1046,7 +1047,7 @@
"MetricExpr": "cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1057,7 +1058,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1066,28 +1067,28 @@
"MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "min(7 * cpu_core@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@, max(cpu_core@CYCLE_ACTIVITY.CYCLES_MEM_ANY@ - cpu_core@MEMORY_ACTIVITY.CYCLES_L1D_MISS@, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(7 * cpu_core@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@, max(cpu_core@CYCLE_ACTIVITY.CYCLES_MEM_ANY@ - cpu_core@MEMORY_ACTIVITY.CYCLES_L1D_MISS@, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(7 * cpu_core@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@) / tma_info_core_core_clks",
+ "MetricExpr": "(7 * cpu_core@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1096,8 +1097,8 @@
"MetricExpr": "28 * tma_info_system_core_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1107,7 +1108,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1118,18 +1119,18 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
- "MetricExpr": "topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
+ "MetricExpr": "cpu_core@topdown\\-fetch\\-lat@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
"MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1149,7 +1150,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1159,16 +1160,16 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "cpu_core@ARITH.FPDIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1177,8 +1178,8 @@
"MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1187,8 +1188,8 @@
"MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1197,8 +1198,8 @@
"MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1207,41 +1208,41 @@
"MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
+ "MetricExpr": "cpu_core@topdown\\-fe\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
"MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.MACRO_FUSED@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
- "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "cpu_core@topdown\\-heavy\\-ops@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1250,8 +1251,8 @@
"MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1264,7 +1265,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_NTAKEN@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
@@ -1272,7 +1273,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
@@ -1280,15 +1281,15 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.INDIRECT@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.RET@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -1320,7 +1321,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_lsd + tma_ms)))",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -1329,7 +1330,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -1338,10 +1339,11 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
"MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: ",
"Unit": "cpu_core"
},
{
@@ -1412,12 +1414,12 @@
"MetricExpr": "(cpu_core@FP_ARITH_DISPATCHED.PORT_0@ + cpu_core@FP_ARITH_DISPATCHED.PORT_1@ + cpu_core@FP_ARITH_DISPATCHED.PORT_5@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp",
"Unit": "cpu_core"
@@ -1432,22 +1434,22 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency",
"Unit": "cpu_core"
@@ -1497,14 +1499,14 @@
},
{
"BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
- "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed",
"MetricName": "tma_info_frontend_unknown_branch_cost",
- "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node",
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch",
@@ -1524,7 +1526,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.",
"Unit": "cpu_core"
},
{
@@ -1533,7 +1535,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1542,7 +1544,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1551,7 +1553,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1560,7 +1562,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1623,7 +1625,7 @@
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 6 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 13",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp",
"Unit": "cpu_core"
},
@@ -1769,7 +1771,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp",
"Unit": "cpu_core"
@@ -1848,8 +1850,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "",
- "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute",
"Unit": "cpu_core"
@@ -1880,20 +1882,20 @@
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY@",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire",
"Unit": "cpu_core"
},
{
"BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
- "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "MicroSeq;Pipeline;Ret",
"MetricName": "tma_info_pipeline_strings_cycles",
"MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1",
@@ -1909,7 +1911,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc\\,cpu=cpu_core@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency",
"Unit": "cpu_core"
@@ -1923,7 +1925,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_core@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized",
"Unit": "cpu_core"
@@ -1933,7 +1935,7 @@
"MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / tma_info_system_time / 1e3",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full",
"Unit": "cpu_core"
},
{
@@ -1946,23 +1948,22 @@
},
{
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000",
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6",
"Unit": "cpu_core"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@INST_RETIRED.ANY_P@k",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_utilization",
"MetricThreshold": "tma_info_system_kernel_utilization > 0.05",
@@ -1978,7 +1979,6 @@
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.RD + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.RD",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
@@ -2030,7 +2030,14 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]",
+ "MetricExpr": "tma_info_system_socket_clks / 1e9 / tma_info_system_time",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_uncore_frequency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks",
@@ -2041,7 +2048,6 @@
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
"MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr",
"Unit": "cpu_core"
},
{
@@ -2049,7 +2055,7 @@
"MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_ISSUED.ANY@",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
"Unit": "cpu_core"
},
{
@@ -2061,14 +2067,14 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "cpu_core@TOPDOWN.SLOTS@",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (cpu_core@TOPDOWN.SLOTS@ / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization",
"Unit": "cpu_core"
@@ -2086,15 +2092,15 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 6 * 1.5",
+ "MetricThreshold": "tma_info_thread_uptb < 9",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2104,7 +2110,7 @@
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_int_operations",
"MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2113,8 +2119,8 @@
"MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_128@ + cpu_core@INT_VEC_RETIRED.VNNI_128@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_128b",
- "MetricThreshold": "tma_int_vector_128b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2123,8 +2129,8 @@
"MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_256b",
- "MetricThreshold": "tma_int_vector_256b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2133,8 +2139,8 @@
"MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2143,18 +2149,18 @@
"MetricExpr": "max((cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (cpu_core@MEM_INST_RETIRED.ALL_LOADS@ - cpu_core@MEM_LOAD_RETIRED.FB_HIT@ - cpu_core@MEM_LOAD_RETIRED.L1_MISS@) * 20 / 100, max(cpu_core@CYCLE_ACTIVITY.CYCLES_MEM_ANY@ - cpu_core@MEMORY_ACTIVITY.CYCLES_L1D_MISS@, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2163,7 +2169,7 @@
"MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2173,7 +2179,7 @@
"MetricExpr": "3 * tma_info_system_core_frequency * cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2183,18 +2189,18 @@
"MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(12 * tma_info_system_core_frequency - 3 * tma_info_system_core_frequency) * (cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2)) / tma_info_thread_clks",
+ "MetricExpr": "9 * tma_info_system_core_frequency * (cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2203,19 +2209,19 @@
"MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2234,7 +2240,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2243,43 +2249,44 @@
"MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(16 * max(0, cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ - cpu_core@L2_RQSTS.ALL_RFO@) + cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@ * (10 * cpu_core@L2_RQSTS.RFO_HIT@ + min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2290,7 +2297,7 @@
"MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_lsd",
"MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2301,17 +2308,17 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2320,34 +2327,33 @@
"MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
- "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-mem\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
"MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
- "MetricThreshold": "tma_memory_fence > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -2370,7 +2376,7 @@
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2386,18 +2392,18 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "160 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_clks",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "max(cpu_core@IDQ.MS_CYCLES_ANY@, cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@)) / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "max(cpu_core@IDQ.MS_CYCLES_ANY@, cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@)) / tma_info_core_core_clks / 2.4",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -2406,10 +2412,10 @@
},
{
"BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
- "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@) / tma_info_thread_clks",
+ "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@) / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2420,7 +2426,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2429,13 +2435,14 @@
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.NOP@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -2445,20 +2452,20 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / (cpu_core@INT_MISC.CLEARS_COUNT@ - cpu_core@MACHINE_CLEARS.COUNT@)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - cpu_core@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_core@MACHINE_CLEARS.COUNT@), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2468,7 +2475,7 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_page_faults",
"MetricThreshold": "tma_page_faults > 0.05",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2478,7 +2485,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2488,7 +2495,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2498,58 +2505,59 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@)) / tma_info_thread_clks if cpu_core@ARITH.DIV_ACTIVE@ < cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ else (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "(cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ + max(cpu_core@RS.EMPTY_RESOURCE@ - cpu_core@RESOURCE_STALLS.SCOREBOARD@, 0)) / tma_info_thread_clks * (cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2557,7 +2565,7 @@
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-retiring@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -2571,7 +2579,7 @@
"MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks + tma_c02_wait",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2581,18 +2589,17 @@
"MetricExpr": "tma_light_operations * cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_shuffles_256b",
- "MetricThreshold": "tma_shuffles_256b > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2603,7 +2610,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2612,8 +2619,8 @@
"MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2622,8 +2629,8 @@
"MetricExpr": "(cpu_core@XQ.FULL_CYCLES@ + cpu_core@L1D_PEND_MISS.L2_STALLS@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2632,8 +2639,8 @@
"MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2642,8 +2649,8 @@
"MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2652,8 +2659,8 @@
"MetricExpr": "(cpu_core@MEM_STORE_RETIRED.L2_HIT@ * 10 * (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) + (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) * min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2672,7 +2679,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2681,34 +2688,34 @@
"MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2717,7 +2724,7 @@
"MetricExpr": "9 * cpu_core@OCR.STREAMING_WR.ANY_RESPONSE@ / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2727,7 +2734,7 @@
"MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2737,8 +2744,8 @@
"MetricExpr": "tma_retiring * cpu_core@UOPS_EXECUTED.X87@ / cpu_core@UOPS_EXECUTED.THREAD@",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
}
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/cache.json b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
index a20e19738046..be15a7f83717 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
@@ -461,7 +461,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x81",
"Unit": "cpu_core"
@@ -472,7 +472,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
- "PublicDescription": "Counts all retired store instructions.",
+ "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x82",
"Unit": "cpu_core"
@@ -483,7 +483,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
- "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x83",
"Unit": "cpu_core"
@@ -494,7 +494,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "PublicDescription": "Counts retired load instructions with locked access.",
+ "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x21",
"Unit": "cpu_core"
@@ -505,7 +505,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x41",
"Unit": "cpu_core"
@@ -516,7 +516,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x42",
"Unit": "cpu_core"
@@ -527,7 +527,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x11",
"Unit": "cpu_core"
@@ -538,7 +538,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x12",
"Unit": "cpu_core"
@@ -559,7 +559,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -570,7 +570,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -581,7 +581,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -592,7 +592,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -603,7 +603,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -614,7 +614,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -625,7 +625,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -636,7 +636,7 @@
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -647,7 +647,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -658,7 +658,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -669,7 +669,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -680,7 +680,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -691,7 +691,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -702,7 +702,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -713,7 +713,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "50021",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -1051,12 +1051,61 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0008",
+ "PublicDescription": "Counts writebacks of modified cachelines that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts writebacks of non-modified cachelines that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_NONM.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C1000",
+ "PublicDescription": "Counts writebacks of non-modified cachelines that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1068,6 +1117,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1079,6 +1129,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1090,17 +1141,43 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1112,6 +1189,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1123,6 +1201,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1134,6 +1213,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1145,6 +1225,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1156,6 +1237,31 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1167,6 +1273,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1178,6 +1285,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1189,6 +1297,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1200,6 +1309,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1211,6 +1321,31 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C4477",
+ "PublicDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.SWPF_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x14000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1222,6 +1357,7 @@
"EventName": "OCR.SWPF_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C4000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1233,6 +1369,7 @@
"EventName": "OCR.SWPF_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C4000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1244,6 +1381,7 @@
"EventName": "OCR.SWPF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C4000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1255,6 +1393,7 @@
"EventName": "OCR.SWPF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C4000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1352,12 +1491,12 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "For every cycle where the core is waiting on at least 1 outstanding Demand RFO request, increments by 1.",
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
"CounterMask": "1",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "PublicDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "1000003",
"UMask": "0x4",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
index c5b3818ad479..ff3b30c2619a 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
@@ -55,7 +55,7 @@
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -67,7 +67,7 @@
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x11",
- "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -79,7 +79,7 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x14",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -91,7 +91,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -103,7 +103,7 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x13",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -115,7 +115,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x600106",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -127,7 +127,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"MSRValue": "0x608006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -139,7 +139,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"MSRValue": "0x601006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -151,7 +151,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x600206",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -163,7 +163,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"MSRValue": "0x610006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -175,7 +175,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x100206",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -187,7 +187,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"MSRValue": "0x602006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -199,7 +199,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x600406",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -211,7 +211,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"MSRValue": "0x620006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -223,7 +223,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x604006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -235,7 +235,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
"MSRValue": "0x600806",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -247,6 +247,7 @@
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
"MSRValue": "0x8",
+ "PublicDescription": "FRONTEND_RETIRED.MS_FLOWS Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -258,7 +259,7 @@
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x15",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -270,6 +271,7 @@
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
"MSRValue": "0x17",
+ "PublicDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/memory.json b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
index fa15f5797bed..a0260d5b8619 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
@@ -248,29 +248,67 @@
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
- "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x784000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x784000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -282,6 +320,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -293,6 +332,19 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS] Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x784000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -304,6 +356,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -315,6 +368,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -326,6 +380,19 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS] Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.SWPF_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x784004000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -337,6 +404,7 @@
"EventName": "OCR.SWPF_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84404000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/other.json b/tools/perf/pmu-events/arch/x86/alderlake/other.json
index a8b23e92408c..af46cde26b54 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/other.json
@@ -56,122 +56,13 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.COREWB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10008",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x784000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x784000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x784000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800000010000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -183,6 +74,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400000010000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -194,6 +86,7 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -205,97 +98,12 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.SWPF_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x14000",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.SWPF_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x784004000",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
- "SampleAfterValue": "1000003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
- "Counter": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_COUNT",
- "Invert": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "SampleAfterValue": "100003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_RESOURCE",
- "SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
- "Counter": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "Deprecated": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS_EMPTY.COUNT",
- "Invert": "1",
- "SampleAfterValue": "100003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
- "Counter": "0,1,2,3,4,5,6,7",
- "Deprecated": "1",
- "EventCode": "0xa5",
- "EventName": "RS_EMPTY.CYCLES",
- "SampleAfterValue": "1000003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
- "SampleAfterValue": "200003",
- "UMask": "0x4",
- "Unit": "cpu_atom"
- },
- {
"BriefDescription": "Cycles the uncore cannot take further requests",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
index f5bf0816f190..57a8c78cdc49 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
@@ -32,8 +32,9 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of active floating point and integer dividers per cycle.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5",
+ "Deprecated": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_OCCUPANCY",
"SampleAfterValue": "1000003",
@@ -41,8 +42,9 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of floating point and integer divider uops executed per cycle.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5",
+ "Deprecated": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_UOPS",
"SampleAfterValue": "1000003",
@@ -133,7 +135,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all branch instructions retired.",
+ "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
@@ -161,7 +163,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PublicDescription": "Counts conditional branch instructions retired.",
+ "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11",
"Unit": "cpu_core"
@@ -171,7 +173,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts not taken branch instructions retired.",
+ "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -190,7 +192,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -209,7 +211,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PublicDescription": "Counts far branch instructions retired.",
+ "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -228,7 +230,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80",
"Unit": "cpu_core"
@@ -276,7 +278,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -295,7 +297,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts return instructions retired.",
+ "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -314,7 +316,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts taken branch instructions retired.",
+ "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -372,7 +374,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
@@ -390,7 +392,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11",
"Unit": "cpu_core"
@@ -400,7 +402,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -419,7 +421,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -438,7 +440,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80",
"Unit": "cpu_core"
@@ -457,7 +459,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -496,7 +498,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -516,7 +518,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -876,7 +878,7 @@
"BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -885,7 +887,7 @@
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -931,7 +933,7 @@
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
- "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1136,7 +1138,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1213,8 +1215,9 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5",
+ "Deprecated": "1",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -1290,6 +1293,70 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.COUNT",
+ "Invert": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.CYCLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x75",
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
index 7c0779c74154..b5604c7534e1 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
@@ -65,7 +65,6 @@
"Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
- "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
@@ -103,7 +102,6 @@
"Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.RD",
- "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
index ad04b1e3881e..0f72c9192df6 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
@@ -1,56 +1,56 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
@@ -75,7 +75,7 @@
"MetricExpr": "tma_core_bound",
"MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_allocation_restriction",
- "MetricThreshold": "(tma_allocation_restriction >0.10) & ((tma_core_bound >0.10) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
@@ -84,7 +84,7 @@
"MetricExpr": "TOPDOWN_BE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
- "MetricThreshold": "(tma_backend_bound >0.10)",
+ "MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%"
@@ -95,7 +95,7 @@
"MetricExpr": "(5 * CPU_CLK_UNHALTED.CORE - (TOPDOWN_FE_BOUND.ALL + TOPDOWN_BE_BOUND.ALL + TOPDOWN_RETIRING.ALL)) / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
- "MetricThreshold": "(tma_bad_speculation >0.15)",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"ScaleUnit": "100%"
@@ -105,7 +105,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "MetricThreshold": "(tma_branch_detect >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%"
},
@@ -114,7 +114,7 @@
"MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
- "MetricThreshold": "(tma_branch_mispredicts >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -123,7 +123,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
- "MetricThreshold": "(tma_branch_resteer >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -131,7 +131,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.CISC / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "(tma_cisc >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -139,7 +139,7 @@
"MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
- "MetricThreshold": "(tma_core_bound >0.10) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -148,7 +148,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.DECODE / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
- "MetricThreshold": "(tma_decode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -156,7 +156,7 @@
"MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
- "MetricThreshold": "(tma_fast_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -165,7 +165,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
- "MetricThreshold": "(tma_frontend_bound >0.20)",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%"
},
@@ -174,7 +174,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "(tma_icache_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -182,7 +182,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_bandwidth",
- "MetricThreshold": "(tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -191,7 +191,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_latency",
- "MetricThreshold": "(tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -460,12 +460,12 @@
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricName": "tma_info_system_cpu_utilization"
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "cpu@CPU_CLK_UNHALTED.CORE_P@k / CPU_CLK_UNHALTED.CORE",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE_P:k / CPU_CLK_UNHALTED.CORE",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_kernel_utilization"
},
@@ -473,7 +473,7 @@
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "CPU_CLK_UNHALTED.CORE_P / CPU_CLK_UNHALTED.CORE",
"MetricName": "tma_info_system_mux",
- "MetricThreshold": "((tma_info_system_mux > 1.1)|(tma_info_system_mux < 0.9))"
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -506,7 +506,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.ITLB / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "(tma_itlb_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -514,7 +514,7 @@
"MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
- "MetricThreshold": "(tma_machine_clears >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -523,7 +523,7 @@
"MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
- "MetricThreshold": "(tma_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
@@ -531,7 +531,7 @@
"MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
- "MetricThreshold": "(tma_non_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
@@ -539,7 +539,7 @@
"MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
- "MetricThreshold": "(tma_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -547,7 +547,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.OTHER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
- "MetricThreshold": "(tma_other_fb >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -555,7 +555,7 @@
"MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
- "MetricThreshold": "(tma_predecode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -563,7 +563,7 @@
"MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
- "MetricThreshold": "(tma_register >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
@@ -571,7 +571,7 @@
"MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
- "MetricThreshold": "(tma_reorder_buffer >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
@@ -579,7 +579,7 @@
"MetricExpr": "tma_backend_bound - tma_core_bound",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
- "MetricThreshold": "(tma_resource_bound >0.20) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -589,7 +589,7 @@
"MetricExpr": "TOPDOWN_RETIRING.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
- "MetricThreshold": "(tma_retiring >0.75)",
+ "MetricThreshold": "tma_retiring > 0.75",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%"
},
@@ -598,7 +598,7 @@
"MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / (5 * CPU_CLK_UNHALTED.CORE)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
- "MetricThreshold": "(tma_serialization >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/cache.json b/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
index fd9ed58c2f90..76a841675337 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
@@ -397,12 +397,35 @@
"UMask": "0x6"
},
{
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -413,6 +436,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -423,6 +447,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -433,6 +458,18 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -443,6 +480,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -453,6 +491,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -463,6 +502,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -473,6 +513,18 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -483,6 +535,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -493,6 +546,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -503,6 +557,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -513,6 +568,18 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.SWPF_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x14000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -523,6 +590,7 @@
"EventName": "OCR.SWPF_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C4000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -533,6 +601,7 @@
"EventName": "OCR.SWPF_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C4000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -543,6 +612,7 @@
"EventName": "OCR.SWPF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C4000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -553,6 +623,7 @@
"EventName": "OCR.SWPF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C4000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/memory.json b/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
index 3b46b048dfb2..049c5e2630d7 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
@@ -57,12 +57,35 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x784000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x784000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -73,6 +96,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -83,6 +107,18 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS] Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x784000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -93,6 +129,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -103,6 +140,18 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. [L3_MISS_LOCAL is alias to L3_MISS] Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xB7",
+ "EventName": "OCR.SWPF_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x784004000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -113,6 +162,7 @@
"EventName": "OCR.SWPF_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84404000",
+ "PublicDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/other.json b/tools/perf/pmu-events/arch/x86/alderlaken/other.json
index f8c21b7f8f40..144d7b06f240 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/other.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/other.json
@@ -9,82 +9,13 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.COREWB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10008",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x784000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x784000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x784000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800000010000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -95,6 +26,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400000010000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -105,35 +37,8 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.SWPF_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x14000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache software prefetches which include T0/T1/T2 and NTA (except PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0xB7",
- "EventName": "OCR.SWPF_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x784004000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
- "Counter": "0,1,2,3,4,5",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
- "SampleAfterValue": "200003",
- "UMask": "0x4"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
index 713ebc21cec0..d650cbd48c1f 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
@@ -9,16 +9,18 @@
"UMask": "0x3"
},
{
- "BriefDescription": "Counts the number of active floating point and integer dividers per cycle.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5",
+ "Deprecated": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_OCCUPANCY",
"SampleAfterValue": "1000003",
"UMask": "0x3"
},
{
- "BriefDescription": "Counts the number of floating point and integer divider uops executed per cycle.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5",
+ "Deprecated": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_UOPS",
"SampleAfterValue": "1000003",
@@ -337,7 +339,7 @@
"BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -399,8 +401,9 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5",
+ "Deprecated": "1",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -424,6 +427,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state)",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x75",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
index 7c0779c74154..b5604c7534e1 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
@@ -65,7 +65,6 @@
"Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
- "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
@@ -103,7 +102,6 @@
"Counter": "0,1",
"EventCode": "0x81",
"EventName": "UNC_ARB_TRK_REQUESTS.RD",
- "Experimental": "1",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "ARB"
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/arl-metrics.json b/tools/perf/pmu-events/arch/x86/arrowlake/arl-metrics.json
index 7ddb89dd1871..4f1f77404943 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/arl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/arl-metrics.json
@@ -1,56 +1,56 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
@@ -75,7 +75,7 @@
"MetricExpr": "tma_core_bound",
"MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_allocation_restriction",
- "MetricThreshold": "(tma_allocation_restriction >0.10) & ((tma_core_bound >0.10) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -85,7 +85,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL_P@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
- "MetricThreshold": "(tma_backend_bound >0.10)",
+ "MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%",
@@ -97,7 +97,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.ALL_P@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
- "MetricThreshold": "(tma_bad_speculation >0.15)",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"ScaleUnit": "100%",
@@ -108,7 +108,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "MetricThreshold": "(tma_branch_detect >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -118,7 +118,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
- "MetricThreshold": "(tma_branch_mispredicts >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -128,7 +128,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
- "MetricThreshold": "(tma_branch_resteer >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -137,7 +137,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "(tma_cisc >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -146,7 +146,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
- "MetricThreshold": "(tma_core_bound >0.10) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -156,7 +156,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
- "MetricThreshold": "(tma_decode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -165,7 +165,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
- "MetricThreshold": "(tma_fast_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -175,7 +175,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
- "MetricThreshold": "(tma_frontend_bound >0.20)",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -185,7 +185,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "(tma_icache_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -194,7 +194,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_bandwidth",
- "MetricThreshold": "(tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -204,7 +204,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_latency",
- "MetricThreshold": "(tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -567,7 +567,7 @@
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_atom@",
"MetricName": "tma_info_system_cpu_utilization",
"Unit": "cpu_atom"
},
@@ -590,7 +590,7 @@
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
"MetricName": "tma_info_system_mux",
- "MetricThreshold": "((tma_info_system_mux > 1.1)|(tma_info_system_mux < 0.9))",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9",
"Unit": "cpu_atom"
},
{
@@ -629,7 +629,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB_MISS@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "(tma_itlb_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -638,7 +638,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
- "MetricThreshold": "(tma_machine_clears >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -648,7 +648,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
- "MetricThreshold": "(tma_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -657,7 +657,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
- "MetricThreshold": "(tma_non_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -666,7 +666,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
- "MetricThreshold": "(tma_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -675,7 +675,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
- "MetricThreshold": "(tma_other_fb >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -684,7 +684,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
- "MetricThreshold": "(tma_predecode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -693,7 +693,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
- "MetricThreshold": "(tma_register >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -702,7 +702,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
- "MetricThreshold": "(tma_reorder_buffer >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -711,7 +711,7 @@
"MetricExpr": "tma_backend_bound - tma_core_bound",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
- "MetricThreshold": "(tma_resource_bound >0.20) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -722,7 +722,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
- "MetricThreshold": "(tma_retiring >0.75)",
+ "MetricThreshold": "tma_retiring > 0.75",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -732,7 +732,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
- "MetricThreshold": "(tma_serialization >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -744,7 +744,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "cpu_core@UOPS_DISPATCHED.ALU@ / (6 * tma_info_thread_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -757,13 +757,13 @@
"MetricExpr": "78 * cpu_core@ASSISTS.ANY@ / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists",
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
"MetricExpr": "63 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_slots",
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_avx_assists",
@@ -774,7 +774,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-be\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
@@ -786,18 +786,18 @@
{
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-bad\\-spec / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-bad\\-spec@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20",
@@ -813,35 +813,35 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: ",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_l1_latency_capacity + tma_lock_latency + tma_split_loads + tma_fb_full)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
- "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_capacity / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_capacity / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "(tma_bottleneck_cache_memory_latency > 20)",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms))) - tma_bottleneck_big_code",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20",
@@ -849,7 +849,7 @@
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_microcode_sequencer + tma_few_uops_instructions) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_microcode_sequencer + tma_few_uops_instructions) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -858,10 +858,10 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
- "MetricThreshold": "(tma_bottleneck_memory_data_tlbs > 20)",
+ "MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
"PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store",
"Unit": "cpu_core"
},
@@ -870,13 +870,13 @@
"MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
- "MetricThreshold": "(tma_bottleneck_memory_synchronization > 10)",
+ "MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
"PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -885,15 +885,15 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
- "MetricThreshold": "(tma_bottleneck_other_bottlenecks > 20)",
+ "MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
"PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
"MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_microcode_sequencer + tma_few_uops_instructions) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -902,7 +902,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
- "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-br\\-mispredict@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
@@ -916,26 +916,26 @@
"MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C01@ / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c01_wait",
- "MetricThreshold": "tma_c01_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C02@ / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c02_wait",
- "MetricThreshold": "tma_c02_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -944,8 +944,8 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -954,99 +954,99 @@
"MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
- "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.L1I_MISS@ * cpu_core@frontend_retired.l1i_miss@R / tma_info_thread_clks - tma_code_l2_miss)",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
+ "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.L1I_MISS@ * cpu_core@FRONTEND_RETIRED.L1I_MISS@R / tma_info_thread_clks - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.L2_MISS@ * cpu_core@frontend_retired.l2_miss@R / tma_info_thread_clks",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.L2_MISS@ * cpu_core@FRONTEND_RETIRED.L2_MISS@R / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) ITLB was missed by instructions fetches, that later on hit in second-level TLB (STLB)",
- "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.ITLB_MISS@ * cpu_core@frontend_retired.itlb_miss@R / tma_info_thread_clks - tma_code_stlb_miss)",
+ "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.ITLB_MISS@ * cpu_core@FRONTEND_RETIRED.ITLB_MISS@R / tma_info_thread_clks - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by instruction fetches, performing a hardware page walk",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.STLB_MISS@ * cpu_core@frontend_retired.stlb_miss@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.STLB_MISS@ * cpu_core@FRONTEND_RETIRED.STLB_MISS@R / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks * cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks * cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@ * cpu_core@br_misp_retired.cond_ntaken_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@ * cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_nt_mispredicts",
- "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by backward-taken conditional branches",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_BWD_COST@ * cpu_core@br_misp_retired.cond_taken_bwd_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by backward-taken conditional branches.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_BWD_COST@ * cpu_core@BR_MISP_RETIRED.COND_TAKEN_BWD_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_tk_bwd_mispredicts",
- "MetricThreshold": "tma_cond_tk_bwd_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_tk_bwd_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by forward-taken conditional branches",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_FWD_COST@ * cpu_core@br_misp_retired.cond_taken_fwd_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by forward-taken conditional branches.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_FWD_COST@ * cpu_core@BR_MISP_RETIRED.COND_TAKEN_FWD_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_tk_fwd_mispredicts",
- "MetricThreshold": "tma_cond_tk_fwd_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_tk_fwd_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricExpr": "((min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * cpu_core@mem_load_l3_hit_retired.xsnp_miss@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_miss@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) + (min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@ * cpu_core@mem_load_l3_hit_retired.xsnp_hitm@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_hitm@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@R, 25 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1057,17 +1057,18 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricExpr": "((min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * cpu_core@mem_load_l3_hit_retired.xsnp_no_fwd@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_no_fwd@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) + (min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * cpu_core@mem_load_l3_hit_retired.xsnp_fwd@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_fwd@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@R, 25 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1076,7 +1077,7 @@
"MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIV_ACTIVE",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1086,18 +1087,18 @@
"MetricExpr": "cpu_core@MEMORY_STALLS.MEM@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
- "MetricExpr": "(cpu_core@IDQ.DSB_UOPS\\,cmask\\=0x8\\,inv\\=0x1@ + cpu_core@IDQ.DSB_UOPS@ / (cpu_core@IDQ.DSB_UOPS@ + cpu_core@IDQ.MITE_UOPS@) * (cpu_core@IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE@ - cpu_core@IDQ_BUBBLES.FETCH_LATENCY@)) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@IDQ.DSB_UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / 2 + cpu_core@IDQ.DSB_UOPS@ / (cpu_core@IDQ.DSB_UOPS@ + cpu_core@IDQ.MITE_UOPS@) * (cpu_core@IDQ_BUBBLES.STARVATION_CYCLES@ - cpu_core@IDQ_BUBBLES.FETCH_LATENCY@)) / tma_info_thread_clks",
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1106,28 +1107,28 @@
"MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * cpu_core@mem_inst_retired.stlb_hit_loads@R, cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * 7) if 0 < cpu_core@mem_inst_retired.stlb_hit_loads@R else cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * 7) / tma_info_thread_clks + tma_load_stlb_miss",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@R, 7) / tma_info_thread_clks + tma_load_stlb_miss",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * cpu_core@mem_inst_retired.stlb_hit_stores@R, cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * 7) if 0 < cpu_core@mem_inst_retired.stlb_hit_stores@R else cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * 7) / tma_info_thread_clks + tma_store_stlb_miss",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@R, 7) / tma_info_thread_clks + tma_store_stlb_miss",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1136,7 +1137,7 @@
"MetricExpr": "28 * tma_info_system_core_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "(tma_false_sharing > 0.05) & ((tma_store_bound > 0.2) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1147,7 +1148,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1158,18 +1159,18 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
- "MetricExpr": "topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-fetch\\-lat@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1179,7 +1180,7 @@
"MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
"MetricName": "tma_few_uops_instructions",
"MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or more uops. This highly-correlates with the number of uops in such instructions",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or more uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1189,7 +1190,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1199,63 +1200,63 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "cpu_core@ARITH.FPDIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@FP_ARITH_OPS_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
- "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "(cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
- "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR\\,umask\\=0x30@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@FP_ARITH_OPS_RETIRED.VECTOR\\,umask\\=0x30@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_int_vector_128b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-fe\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
@@ -1265,23 +1266,23 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.MACRO_FUSED@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
- "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "cpu_core@topdown\\-heavy\\-ops@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1290,26 +1291,26 @@
"MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@br_misp_retired.indirect_call_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ind_call_mispredicts",
- "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions",
- "MetricExpr": "max((cpu_core@BR_MISP_RETIRED.INDIRECT_COST@ * cpu_core@br_misp_retired.indirect_cost@R - cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@br_misp_retired.indirect_call_cost@R) / tma_info_thread_clks, 0)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions.",
+ "MetricExpr": "max((cpu_core@BR_MISP_RETIRED.INDIRECT_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_COST@R - cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@R) / tma_info_thread_clks, 0)",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ind_jump_mispredicts",
- "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1322,7 +1323,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_NTAKEN@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
@@ -1330,29 +1331,29 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional backward-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional backward-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN_BWD@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken_bwd",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional forward-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional forward-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN_FWD@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken_fwd",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.INDIRECT@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.RET@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -1376,7 +1377,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_lsd + tma_ms)))",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -1385,7 +1386,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -1394,10 +1395,11 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
"MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: ",
"Unit": "cpu_core"
},
{
@@ -1453,7 +1455,7 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_OPS_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE@) / tma_info_thread_clks",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc",
"Unit": "cpu_core"
@@ -1463,12 +1465,12 @@
"MetricExpr": "(cpu_core@FP_ARITH_DISPATCHED.V0@ + cpu_core@FP_ARITH_DISPATCHED.V1@ + cpu_core@FP_ARITH_DISPATCHED.V2@ + cpu_core@FP_ARITH_DISPATCHED.V3@) / (4 * tma_info_thread_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp",
"Unit": "cpu_core"
@@ -1483,15 +1485,15 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired DSB misses",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@ * cpu_core@frontend_retired.any_dsb_miss@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@ * cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@R / tma_info_thread_clks",
"MetricGroup": "DSBmiss;Fed;FetchLat",
"MetricName": "tma_info_frontend_dsb_switches_ret",
"MetricThreshold": "tma_info_frontend_dsb_switches_ret > 0.05",
@@ -1499,7 +1501,7 @@
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc",
"Unit": "cpu_core"
@@ -1549,7 +1551,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired operations that invoke the Microcode Sequencer",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.MS_FLOWS@ * cpu_core@frontend_retired.ms_flows@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.MS_FLOWS@ * cpu_core@FRONTEND_RETIRED.MS_FLOWS@R / tma_info_thread_clks",
"MetricGroup": "Fed;FetchLat;MicroSeq",
"MetricName": "tma_info_frontend_ms_latency_ret",
"MetricThreshold": "tma_info_frontend_ms_latency_ret > 0.05",
@@ -1564,21 +1566,21 @@
},
{
"BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
- "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed",
"MetricName": "tma_info_frontend_unknown_branch_cost",
- "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node",
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired branches who got branch address clears",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@ * cpu_core@frontend_retired.unknown_branch@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@ * cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@R / tma_info_thread_clks",
"MetricGroup": "Fed;FetchLat",
"MetricName": "tma_info_frontend_unknown_branches_ret",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch",
@@ -1594,47 +1596,47 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + cpu_core@FP_ARITH_INST_RETIRED.VECTOR@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + cpu_core@FP_ARITH_OPS_RETIRED.VECTOR@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_SINGLE@)",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE@)",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_OPS_RETIRED.SCALAR_DOUBLE@",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_OPS_RETIRED.SCALAR_SINGLE@",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1655,7 +1657,7 @@
},
{
"BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_OPS_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_ipflop",
"MetricThreshold": "tma_info_inst_mix_ipflop < 10",
@@ -1697,7 +1699,7 @@
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 8 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 17",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp",
"Unit": "cpu_core"
},
@@ -1709,6 +1711,13 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * cpu_core@L1D.L1_REPLACEMENT@ / 1e9 / tma_info_system_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l1d_cache_fill_bw",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Average per-thread data fill bandwidth to the Level 0 within L1D cache [GB / sec]",
"MetricExpr": "64 * cpu_core@L1D.L0_REPLACEMENT@ / 1e9 / tma_info_system_time",
"MetricGroup": "Mem;MemoryBW",
@@ -1716,6 +1725,13 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "L0 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * (cpu_core@MEM_LOAD_RETIRED.L1_MISS@ + cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@) / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l1dl0_mpki",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem",
@@ -1815,7 +1831,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp",
"Unit": "cpu_core"
@@ -1873,7 +1889,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to STLB misses by demand loads",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@ * cpu_core@mem_inst_retired.stlb_miss_loads@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@ * cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@R / tma_info_thread_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_load_stlb_miss_ret",
"MetricThreshold": "tma_info_memory_tlb_load_stlb_miss_ret > 0.05",
@@ -1896,7 +1912,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to STLB misses by demand stores",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@ * cpu_core@mem_inst_retired.stlb_miss_stores@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@ * cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@R / tma_info_thread_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_store_stlb_miss_ret",
"MetricThreshold": "tma_info_memory_tlb_store_stlb_miss_ret > 0.05",
@@ -1931,24 +1947,31 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "cpu_core@IDQ.MS_UOPS@ / cpu_core@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY@",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire",
"Unit": "cpu_core"
},
{
"BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
- "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "MicroSeq;Pipeline;Ret",
"MetricName": "tma_info_pipeline_strings_cycles",
"MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1",
@@ -1964,7 +1987,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc\\,cpu=cpu_core@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency",
"Unit": "cpu_core"
@@ -1978,14 +2001,22 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_core@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "32 * UNC_M_TOTAL_DATA / 1e9 / tma_info_system_time",
+ "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_system_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / 1e9 / tma_info_system_time",
+ "MetricExpr": "(cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_OPS_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE@) / 1e9 / tma_info_system_time",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_system_gflops",
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width",
@@ -1993,23 +2024,22 @@
},
{
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000",
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6",
"Unit": "cpu_core"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@INST_RETIRED.ANY_P@k",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_utilization",
"MetricThreshold": "tma_info_system_kernel_utilization > 0.05",
@@ -2053,7 +2083,14 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]",
+ "MetricExpr": "tma_info_system_socket_clks / 1e9 / tma_info_system_time",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_uncore_frequency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks",
@@ -2064,7 +2101,6 @@
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
"MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr",
"Unit": "cpu_core"
},
{
@@ -2072,7 +2108,7 @@
"MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_ISSUED.ANY@",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
"Unit": "cpu_core"
},
{
@@ -2084,7 +2120,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "cpu_core@TOPDOWN.SLOTS@",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots",
"Unit": "cpu_core"
@@ -2102,15 +2138,15 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 8 * 1.5",
+ "MetricThreshold": "tma_info_thread_uptb < 12",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2120,7 +2156,7 @@
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_int_operations",
"MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2129,8 +2165,8 @@
"MetricExpr": "cpu_core@INT_VEC_RETIRED.128BIT@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_128b",
- "MetricThreshold": "tma_int_vector_128b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2139,8 +2175,8 @@
"MetricExpr": "cpu_core@INT_VEC_RETIRED.256BIT@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_256b",
- "MetricThreshold": "tma_int_vector_256b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2149,8 +2185,8 @@
"MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2159,27 +2195,27 @@
"MetricExpr": "cpu_core@MEMORY_STALLS.L1@ / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit Level 1 after missing Level 0 within the L1D cache",
- "MetricExpr": "(min(cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@ * cpu_core@mem_load_retired.l1_hit_l1@R, cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@ * 9) if 0 < cpu_core@mem_load_retired.l1_hit_l1@R else cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@ * 9) / tma_info_thread_clks",
+ "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit Level 1 after missing Level 0 within the L1D cache.",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@ * min(cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@R, 9) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_capacity",
- "MetricThreshold": "tma_l1_latency_capacity > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_latency_capacity > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
- "MetricExpr": "4 * cpu_core@DEPENDENT_LOADS.ANY@ / tma_info_thread_clks",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "MetricExpr": "4 * cpu_core@DEPENDENT_LOADS.ANY\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: DEPENDENT_LOADS.ANY",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2188,17 +2224,17 @@
"MetricExpr": "cpu_core@MEMORY_STALLS.L2@ / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited)",
- "MetricExpr": "(min(cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * cpu_core@mem_load_retired.l2_hit@R, cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * (3 * tma_info_system_core_frequency)) if 0 < cpu_core@mem_load_retired.l2_hit@R else cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * (3 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L2_HIT@R, 3 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2208,18 +2244,18 @@
"MetricExpr": "cpu_core@MEMORY_STALLS.L3@ / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(min(cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * cpu_core@mem_load_retired.l3_hit@R, cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (12 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_retired.l3_hit@R else cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (12 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L3_HIT@R, 9 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2228,19 +2264,19 @@
"MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2250,7 +2286,7 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
"MetricThreshold": "tma_load_op_utilization > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.LOAD",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3_10",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2259,7 +2295,7 @@
"MetricExpr": "max(0, tma_dtlb_load - tma_load_stlb_miss)",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2268,54 +2304,55 @@
"MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ * cpu_core@mem_inst_retired.lock_loads@R / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ * cpu_core@MEM_INST_RETIRED.LOCK_LOADS@R / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit",
- "MetricExpr": "cpu_core@LSD.UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@LSD.UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / tma_info_thread_clks / 2",
"MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_lsd",
"MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2326,17 +2363,17 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2345,34 +2382,33 @@
"MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
- "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-mem\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
"MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
- "MetricThreshold": "tma_memory_fence > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -2395,14 +2431,14 @@
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
- "MetricExpr": "(cpu_core@IDQ.MITE_UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / tma_info_thread_clks + cpu_core@IDQ.MITE_UOPS@ / (cpu_core@IDQ.DSB_UOPS@ + cpu_core@IDQ.MITE_UOPS@) * (cpu_core@IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE@ - cpu_core@IDQ_BUBBLES.FETCH_LATENCY@)) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@IDQ.MITE_UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / 2 + cpu_core@IDQ.MITE_UOPS@ / (cpu_core@IDQ.DSB_UOPS@ + cpu_core@IDQ.MITE_UOPS@) * (cpu_core@IDQ_BUBBLES.STARVATION_CYCLES@ - cpu_core@IDQ_BUBBLES.FETCH_LATENCY@)) / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_mite",
"MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
@@ -2411,18 +2447,18 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "160 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_clks",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "cpu_core@IDQ.MS_CYCLES_ANY@ / tma_info_thread_clks",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "cpu_core@IDQ.MS_CYCLES_ANY@ / tma_info_thread_clks / 1.8",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -2434,7 +2470,7 @@
"MetricExpr": "3 * cpu_core@IDQ.MS_SWITCHES@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2445,7 +2481,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2454,14 +2490,15 @@
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.NOP@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
- "MetricExpr": "max(0, tma_light_operations - (tma_x87_use + (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + cpu_core@FP_ARITH_INST_RETIRED.VECTOR@) / (tma_retiring * tma_info_thread_slots) + (cpu_core@INT_VEC_RETIRED.ADD_128@ + cpu_core@INT_VEC_RETIRED.VNNI_128@ + cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots) + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "max(0, tma_light_operations - (tma_x87_use + (cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + cpu_core@FP_ARITH_OPS_RETIRED.VECTOR@) / (tma_retiring * tma_info_thread_slots) + (cpu_core@INT_VEC_RETIRED.ADD_128@ + cpu_core@INT_VEC_RETIRED.VNNI_128@ + cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots) + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
"MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
@@ -2470,20 +2507,20 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / (cpu_core@INT_MISC.CLEARS_COUNT@ - cpu_core@MACHINE_CLEARS.COUNT@)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - cpu_core@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_core@MACHINE_CLEARS.COUNT@), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2493,75 +2530,76 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_page_faults",
"MetricThreshold": "tma_page_faults > 0.05",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "((cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ + (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@)) / tma_info_thread_clks if cpu_core@ARITH.DIV_ACTIVE@ < cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ else (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.RET_COST@ * cpu_core@br_misp_retired.ret_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.RET_COST@ * cpu_core@BR_MISP_RETIRED.RET_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ret_mispredicts",
- "MetricThreshold": "tma_ret_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ret_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-retiring@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -2575,8 +2613,8 @@
"MetricExpr": "(cpu_core@BE_STALLS.SCOREBOARD@ + cpu_core@CPU_CLK_UNHALTED.C02@) / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: BE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2585,39 +2623,38 @@
"MetricExpr": "tma_light_operations * cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_shuffles_256b",
- "MetricThreshold": "tma_shuffles_256b > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * cpu_core@mem_inst_retired.split_loads@R, cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * tma_info_memory_load_miss_real_latency) if 0 < cpu_core@mem_inst_retired.split_loads@R else cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@R, tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ * cpu_core@mem_inst_retired.split_stores@R, cpu_core@MEM_INST_RETIRED.SPLIT_STORES@) if 0 < cpu_core@mem_inst_retired.split_stores@R else cpu_core@MEM_INST_RETIRED.SPLIT_STORES@) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_STORES@R, 1) / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2626,8 +2663,8 @@
"MetricExpr": "(cpu_core@XQ.FULL@ + cpu_core@L1D_MISS.L2_STALLS@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2636,8 +2673,17 @@
"MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates clocks wasted due to loads blocked due to unknown store address (did not do memory disambiguation) or due to unknown store data",
+ "MetricExpr": "7 * cpu_core@LD_BLOCKS.STORE_EARLY\\,cmask\\=1@ / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_early_blk",
+ "MetricThreshold": "tma_store_early_blk > 0.2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2646,8 +2692,8 @@
"MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2656,8 +2702,8 @@
"MetricExpr": "(cpu_core@MEM_STORE_RETIRED.L2_HIT@ * 10 * (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) + (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) * min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2667,7 +2713,7 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_store_op_utilization",
"MetricThreshold": "tma_store_op_utilization > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.STD, UOPS_DISPATCHED.STA",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2676,7 +2722,7 @@
"MetricExpr": "max(0, tma_dtlb_store - tma_store_stlb_miss)",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2685,34 +2731,34 @@
"MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2721,7 +2767,7 @@
"MetricExpr": "9 * cpu_core@OCR.STREAMING_WR.ANY_RESPONSE@ / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2731,7 +2777,7 @@
"MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2741,8 +2787,8 @@
"MetricExpr": "tma_retiring * cpu_core@UOPS_EXECUTED.X87@ / cpu_core@UOPS_EXECUTED.THREAD@",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
}
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/cache.json b/tools/perf/pmu-events/arch/x86/arrowlake/cache.json
index f63594b2cca8..fba4a0672f6c 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/cache.json
@@ -9,6 +9,26 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "DL1.DIRTY_EVICTION",
+ "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "DL1.DIRTY_EVICTION",
+ "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of cache lines replaced in L0 data cache.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x51",
@@ -19,6 +39,26 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cachelines replaced into the L1 d-cache. Successful replacements only (not blocked) and exclude WB-miss case",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x51",
+ "EventName": "L1D.L1_REPLACEMENT",
+ "PublicDescription": "Counts cachelines replaced into the L1 d-cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cachelines replaced into the L0 and L1 d-cache. Successful replacements only (not blocked) and exclude WB-miss case",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts cachelines replaced into the L0 and L1 d-cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x49",
@@ -80,6 +120,92 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.F",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.F",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Invalid state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Invalid state, does not count lines that go Invalid due to an eviction",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.M",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.M",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x26",
@@ -90,6 +216,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of L2 cache lines that are evicted due to an L2 cache fill",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of L2 cache lines that are evicted due to an L2 cache fill. Increments on the core that brought the line in originally.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x26",
@@ -100,6 +236,26 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of L2 cache lines that are silently dropped due to an L2 cache fill",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of L2 cache lines that are silently dropped due to an L2 cache fill. Increments on the core that brought the line in originally.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of L2 cache lines that have been L2 hardware prefetched but not used by demand accesses. Increments on the core that brought the line in originally.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x26",
@@ -110,6 +266,42 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of L2 prefetches initiated by either the L2 Stream or AMP that were throttled due to Dynamic Prefetch Throttling. The throttle requestor/source could be from the uncore/SOC or the Dead Block Predictor. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_PREFETCHES_THROTTLED.DPT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 prefetches initiated by the L2 Stream that were throttled due to Demand Throttle Prefetcher. DTP Global Triggered with no Local Override. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_PREFETCHES_THROTTLED.DTP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 prefetches initiated by the L2 Stream and not throttled by DTP due to local override. These prefetches may still be throttled due to another throttler mechanism besides DTP. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_PREFETCHES_THROTTLED.DTP_OVERRIDE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 prefetches initiated by either the L2 Stream or AMP that were throttled due to exceeding the XQ threshold set by either XQ_THRESHOLD_DTP or XQ_THRESHOLD. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x28",
+ "EventName": "L2_PREFETCHES_THROTTLED.XQ_THRESH",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x30",
@@ -119,6 +311,16 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of L2 Cache Accesses Counts the total number of L2 Cache Accesses - sum of hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only, per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts the number of L2 Cache Accesses Counts the total number of L2 Cache Accesses - sum of hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES, L2_RQSTS.ANY]",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x24",
@@ -129,6 +331,33 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of L2 Cache Accesses that resulted in a Hit from a front door request only (does not include rejects or recycles), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache Accesses that resulted in a Hit from a front door request only (does not include rejects or recycles), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of total L2 Cache Accesses that resulted in a Miss from a front door request only (does not include rejects or recycles), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x24",
@@ -139,6 +368,43 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of total L2 Cache Accesses that resulted in a Miss from a front door request only (does not include rejects or recycles), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache Accesses that miss the L2 and get BBL reject short and long rejects (includes those counted in L2_reject_XQ.any), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.REJECTS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache Accesses that miss the L2 and get BBL reject short and long rejects, per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.REJECTS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Demand Data Read access L2 cache",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x24",
@@ -239,6 +505,51 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of LLC prefetches that were throttled due to Dynamic Prefetch Throttling. The throttle requestor/source could be from the uncore/SOC or the Dead Block Predictor. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.DPT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC prefetches throttled due to Demand Throttle Prefetcher. DTP Global Triggered with no Local Override. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.DTP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC prefetches not throttled by DTP due to local override. These prefetches may still be throttled due to another throttler mechanism. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.DTP_OVERRIDE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC prefetches throttled due to LLC hit rate in <insert knob name here>. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.HIT_RATE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC prefetches throttled due to exceeding the XQ threshold set by either XQ_THRESHOLD_DTP or LLC_XQ_THRESHOLD. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.XQ_THRESH",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles when L1D is locked",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x42",
@@ -249,6 +560,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2e",
@@ -259,6 +580,26 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2e",
@@ -409,12 +750,30 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a store buffer full condition",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.SBFULL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a store buffer full condition",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.SBFULL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts all retired load instructions.",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "PublicDescription": "Counts Instructions with at least one architecturally visible load retired.",
+ "PublicDescription": "Counts Instructions with at least one architecturally visible load retired. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x81",
"Unit": "cpu_core"
@@ -425,7 +784,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
- "PublicDescription": "Counts all retired store instructions.",
+ "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x82",
"Unit": "cpu_core"
@@ -435,7 +794,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_SWPF",
- "PublicDescription": "Counts all retired software prefetch instructions.",
+ "PublicDescription": "Counts all retired software prefetch instructions. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x84",
"Unit": "cpu_core"
@@ -446,7 +805,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
- "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x87",
"Unit": "cpu_core"
@@ -457,7 +816,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "PublicDescription": "Counts retired load instructions with locked access.",
+ "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x21",
"Unit": "cpu_core"
@@ -468,7 +827,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x41",
"Unit": "cpu_core"
@@ -479,18 +838,29 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x42",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_ANY",
+ "PublicDescription": "Number of retired instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired load instructions that hit the STLB.",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS",
- "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x9",
"Unit": "cpu_core"
@@ -501,18 +871,39 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_STORES",
- "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0xa",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired SWPF instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_SWPF",
+ "PublicDescription": "Number of retired SWPF instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_ANY",
+ "PublicDescription": "Retired instructions that miss the STLB. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x17",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired load instructions that miss the STLB.",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x11",
"Unit": "cpu_core"
@@ -523,18 +914,28 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x12",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired SWPF instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_SWPF",
+ "PublicDescription": "Number of retired SWPF instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x14",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired load instructions whose data sources were a cross-core Snoop hits and forwards data from an in on-package core cache (induced by NI$)",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were a cross-core Snoop hits and forwards data from an in on-package core cache (induced by NI$)",
+ "PublicDescription": "Counts retired load instructions whose data sources were a cross-core Snoop hits and forwards data from an in on-package core cache (induced by NI$) Available PDIST counters: 0,1",
"SampleAfterValue": "20011",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -545,7 +946,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3, Hit-with-FWD is normally excluded.",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3, Hit-with-FWD is normally excluded. Available PDIST counters: 0,1",
"SampleAfterValue": "20011",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -556,7 +957,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0,1",
"SampleAfterValue": "20011",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -567,7 +968,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0,1",
"SampleAfterValue": "20011",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -578,7 +979,7 @@
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -589,7 +990,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -600,7 +1001,18 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x101",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts retired load instructions with at least one uop that hit in the Level 0 of the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT_L0",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the Level 0 of the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -610,6 +1022,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT_L1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the Level 1 of the L1 data cache. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"Unit": "cpu_core"
},
@@ -619,7 +1032,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0,1",
"SampleAfterValue": "200003",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -630,7 +1043,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0,1",
"SampleAfterValue": "200003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -641,7 +1054,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0,1",
"SampleAfterValue": "100021",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -652,7 +1065,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0,1",
"SampleAfterValue": "100021",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -663,12 +1076,21 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0,1",
"SampleAfterValue": "50021",
"UMask": "0x20",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired that hit the L1 data cache",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
@@ -751,6 +1173,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1c",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of loads that hit in a write combining buffer (WCB), excluding the first load that caused the WCB to allocate.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
@@ -850,6 +1281,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x83",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of load uops retired.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
@@ -903,7 +1344,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
@@ -927,7 +1368,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
@@ -963,7 +1404,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
@@ -987,7 +1428,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
@@ -1011,7 +1452,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
@@ -1035,7 +1476,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
@@ -1059,7 +1500,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
@@ -1083,7 +1524,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
@@ -1126,6 +1567,26 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of memory renamed load uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.MRN_LOADS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x9",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of memory renamed store uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.MRN_STORES",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of memory uops retired that were splits.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
@@ -1193,9 +1654,29 @@
"EventName": "MEM_UOPS_RETIRED.STLB_MISS",
"SampleAfterValue": "200003",
"UMask": "0x13",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x13",
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of load ops retired that filled the STLB - includes those in DTLB_LOAD_MISSES submasks",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x11",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
@@ -1206,6 +1687,16 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of store ops retired (store STLB miss)",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x12",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
@@ -1246,34 +1737,97 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts writebacks of modified cachelines that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E00008",
+ "PublicDescription": "Counts writebacks of modified cachelines that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts writebacks of non-modified cachelines that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_NONM.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E01000",
+ "PublicDescription": "Counts writebacks of non-modified cachelines that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40001E00001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x20001E00001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40001E00002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E04477",
+ "PublicDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/floating-point.json b/tools/perf/pmu-events/arch/x86/arrowlake/floating-point.json
index 23a80c526aa1..3e68c2468f11 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/floating-point.json
@@ -1,5 +1,15 @@
[
{
+ "BriefDescription": "Counts the number of cycles when any of the floating point dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles when floating-point divide unit is busy executing divide or square root operations.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -21,6 +31,24 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of active floating point dividers per cycle in the loop stage.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.FPDIV_OCCUPANCY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divider uops executed per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.FPDIV_UOPS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts all microcode FP assists.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc1",
@@ -474,6 +502,51 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of uops executed on all floating point ports.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 0.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.P0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 1.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.P1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 2.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.P2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.P3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of uops executed on floating point and vector integer port 0, 1, 2, 3.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb2",
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/frontend.json b/tools/perf/pmu-events/arch/x86/arrowlake/frontend.json
index fc5f4dd50fe6..a15de050a76c 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/frontend.json
@@ -30,6 +30,42 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of BACLEARS due to a conditional jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.COND",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to an indirect branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.INDIRECT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a return branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a direct, unconditional jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.UNCOND",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x87",
@@ -49,6 +85,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of times a decode restriction reduces the decode throughput due to wrong instruction length prediction.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe9",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "DSB-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x61",
@@ -59,13 +104,29 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of instructions retired that were tagged with having preceded with frontend bound behavior",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ALL",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged with having preceded with frontend bound behavior",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ALL",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Retired ANT branches",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_ANT",
"MSRIndex": "0x3F7",
"MSRValue": "0x9",
- "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted)",
+ "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted) Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -77,19 +138,93 @@
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a baclear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.BRANCH_DETECT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a baclear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.BRANCH_DETECT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles /empty issue slots due to a btclear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.BRANCH_RESTEER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles /empty issue slots due to a btclear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.BRANCH_RESTEER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.CISC",
+ "PublicDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.CISC",
+ "PublicDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged every cycle the decoder is unable to send 4 uops",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged every cycle the decoder is unable to send 3 uops per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x11",
- "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -104,6 +239,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to icache miss",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
@@ -119,7 +263,7 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x14",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -140,7 +284,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -152,7 +296,7 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x13",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -164,7 +308,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"MSRValue": "0x608006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -176,7 +320,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"MSRValue": "0x601006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -188,7 +332,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x600206",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -200,7 +344,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"MSRValue": "0x610006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -212,7 +356,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x100206",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -224,7 +368,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"MSRValue": "0x602006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -236,7 +380,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x600406",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -248,7 +392,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"MSRValue": "0x620006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -260,7 +404,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x604006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -272,7 +416,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
"MSRValue": "0x600806",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -284,7 +428,7 @@
"EventName": "FRONTEND_RETIRED.MISP_ANT",
"MSRIndex": "0x3F7",
"MSRValue": "0x9",
- "PublicDescription": "ANT retired branches that got just mispredicted",
+ "PublicDescription": "ANT retired branches that got just mispredicted Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -296,18 +440,55 @@
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
"MSRValue": "0x8",
+ "PublicDescription": "Counts flows delivered by the Microcode Sequencer Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of instruction retired tagged after a wasted issue slot if none of the previous events occurred",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired tagged after a wasted issue slot if none of the previous events occurred",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a predecode wrong",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a predecode wrong.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x15",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -319,7 +500,7 @@
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
"MSRValue": "0x17",
- "PublicDescription": "Number retired branch instructions that caused the front-end to be resteered when it finds the instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "PublicDescription": "Number retired branch instructions that caused the front-end to be resteered when it finds the instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -602,8 +783,35 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
"EventName": "MS_DECODED.MS_BUSY",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the micro-sequencer is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "MS_DECODED.MS_BUSY",
"SampleAfterValue": "1000003",
"UMask": "0x4",
"Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of times entered into a ucode flow in the FEC. Includes inserted flows due to front-end detected faults or assists.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "MS_DECODED.MS_ENTRY",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of times nanocode flow is executed.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "MS_DECODED.NANO_CODE",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/memory.json b/tools/perf/pmu-events/arch/x86/arrowlake/memory.json
index 08f01fc66fef..05cc46518232 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/memory.json
@@ -1,5 +1,14 @@
[
{
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7f",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
@@ -63,6 +72,16 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to other block cases.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.OTHER",
+ "PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to other block cases such as pipeline conflicts, fences, etc.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
@@ -83,6 +102,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a pagewalk.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.PGWALK",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
@@ -101,6 +129,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a store address match.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ST_ADDR",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
@@ -119,6 +156,24 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to store data forward block.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ST_DATA",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to request buffers full or lock in progress.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.WCB_FULL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to request buffers full or lock in progress.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
@@ -156,6 +211,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine without the use of microcode.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING_FAST",
+ "SampleAfterValue": "20003",
+ "UMask": "0x82",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
"Counter": "2,3,4,5,6,7,8,9",
"Data_LA": "1",
@@ -291,7 +355,7 @@
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
- "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -333,23 +397,37 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E780000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xFE7F8000001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xFE7F8000002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/other.json b/tools/perf/pmu-events/arch/x86/arrowlake/other.json
index 0175b2193201..c8feed3a99a6 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/other.json
@@ -19,148 +19,97 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0xa2",
- "EventName": "BE_STALLS.SCOREBOARD",
- "SampleAfterValue": "100003",
- "UMask": "0x2",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Count number of times a load is depending on another load that had just write back its data or in previous or 2 cycles back. This event supports in-direct dependency through a single uop.",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x02",
- "EventName": "DEPENDENT_LOADS.ANY",
- "SampleAfterValue": "1000003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts the number of uops executed on secondary integer ports 0,1,2,3.",
+ "BriefDescription": "Counts the number of unhalted cycles a Core is blocked due to a lock In Progress issued by another core",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.2ND",
- "SampleAfterValue": "1000003",
- "UMask": "0x80",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on a load port.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.LD",
- "PublicDescription": "Counts the number of uops executed on a load port. This event counts for integer uops even if the destination is FP/vector",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.BLOCKED_CYCLES",
+ "PublicDescription": "Counts the number of unhalted cycles a Core is blocked due to a lock In Progress issued by another core. Counts on a per core basis.",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of uops executed on integer port 0,1, 2, 3.",
+ "BriefDescription": "Counts the number of unhalted cycles a Core is blocked due to an Accepted lock it issued, includes both split and non-split lock cycles.",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.PRIMARY",
- "SampleAfterValue": "1000003",
- "UMask": "0x78",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on a Store address port.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.STA",
- "PublicDescription": "Counts the number of uops executed on a Store address port. This event counts integer uops even if the data source is FP/vector",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.LOCK_CYCLES",
+ "PublicDescription": "Counts the number of unhalted cycles a Core is blocked due to an Accepted lock it issued, includes both split and non-split lock cycles. Counts on a per core basis.",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of uops executed on an integer store data and jump port.",
+ "BriefDescription": "Counts the number of non-split locks such as UC locks issued by a Core (does not include cache locks)",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.STD_JMP",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.NON_SPLIT_LOCKS",
"SampleAfterValue": "1000003",
"UMask": "0x4",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]",
+ "BriefDescription": "Counts the number of split locks issued by a Core",
"Counter": "0,1,2,3,4,5,6,7",
- "Deprecated": "1",
- "EventCode": "0xe4",
- "EventName": "LBR_INSERTS.ANY",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.SPLIT_LOCKS",
"SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_lowpower"
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L1 cache (that is: no execution & load in flight & no load missed L1 cache)",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x46",
- "EventName": "MEMORY_STALLS.L1",
+ "BriefDescription": "Counts the number of cycles the L2 Prefetchers are at throttle level 0",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "DYNAMIC_PREFETCH_THROTTLER.LEVEL0_SOC",
"SampleAfterValue": "1000003",
"UMask": "0x1",
- "Unit": "cpu_core"
+ "Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L2 cache (that is: no execution & load in flight & load missed L1 & no load missed L2 cache)",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x46",
- "EventName": "MEMORY_STALLS.L2",
+ "BriefDescription": "Counts the number of cycles the L2 Prefetcher throttle level is at 1",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "DYNAMIC_PREFETCH_THROTTLER.LEVEL1_SOC",
"SampleAfterValue": "1000003",
"UMask": "0x2",
- "Unit": "cpu_core"
+ "Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L3 cache (that is: no execution & load in flight & load missed L1 & load missed L2 cache & no load missed L3 Cache)",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x46",
- "EventName": "MEMORY_STALLS.L3",
+ "BriefDescription": "Counts the number of cycles the L2 Prefetcher throttle level is at 2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "DYNAMIC_PREFETCH_THROTTLER.LEVEL2_SOC",
"SampleAfterValue": "1000003",
"UMask": "0x4",
- "Unit": "cpu_core"
+ "Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for Memory (that is: no execution & load in flight & a load missed L3 cache)",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x46",
- "EventName": "MEMORY_STALLS.MEM",
+ "BriefDescription": "Counts the number of cycles the L2 Prefetcher throttle level is at 3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "DYNAMIC_PREFETCH_THROTTLER.LEVEL3_SOC",
"SampleAfterValue": "1000003",
"UMask": "0x8",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
+ "Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1E780000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
+ "BriefDescription": "Counts the number of cycles the L2 Prefetcher throttle level is at 4",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x32",
+ "EventName": "DYNAMIC_PREFETCH_THROTTLER.LEVEL4_SOC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
+ "BriefDescription": "This event is deprecated. [This event is alias to MISC_RETIRED.LBR_INSERTS]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "EventCode": "0xe4",
+ "EventName": "LBR_INSERTS.ANY",
+ "SampleAfterValue": "1000003",
"UMask": "0x1",
- "Unit": "cpu_core"
+ "Unit": "cpu_lowpower"
},
{
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
@@ -169,6 +118,7 @@
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800000010000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -180,6 +130,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400000010000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -191,89 +142,68 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
- "SampleAfterValue": "1000003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "BriefDescription": "Cycles the uncore cannot take further requests",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_COUNT",
- "Invert": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "SampleAfterValue": "100003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_RESOURCE",
+ "EventCode": "0x2d",
+ "EventName": "XQ.FULL",
+ "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "BriefDescription": "Counts the number of prefetch requests that were promoted in the XQ to a demand request.",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
+ "EventCode": "0xf4",
+ "EventName": "XQ_PROMOTION.ALL",
"SampleAfterValue": "1000003",
- "UMask": "0x4",
+ "UMask": "0x7",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "BriefDescription": "Counts the number of prefetch requests that were promoted in the XQ to a demand code read.",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
- "SampleAfterValue": "200003",
+ "EventCode": "0xf4",
+ "EventName": "XQ_PROMOTION.CRDS",
+ "SampleAfterValue": "1000003",
"UMask": "0x4",
- "Unit": "cpu_lowpower"
+ "Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots where no uop could issue due to an IQ scoreboard that stalls allocation until a specified older uop retires or (in the case of jump scoreboard) executes. Commonly executed instructions with IQ scoreboards include LFENCE and MFENCE.",
+ "BriefDescription": "Counts the number of prefetch requests that were promoted in the XQ to a demand read.",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.IQ_JEU_SCB",
+ "EventCode": "0xf4",
+ "EventName": "XQ_PROMOTION.DRDS",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Cycles the uncore cannot take further requests",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "CounterMask": "1",
- "EventCode": "0x2d",
- "EventName": "XQ.FULL",
- "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).",
+ "BriefDescription": "Counts the number of prefetch requests that were promoted in the XQ to a demand RFO.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xf4",
+ "EventName": "XQ_PROMOTION.RFOS",
"SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_core"
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json b/tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json
index 6dbde51e7ead..805616052925 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/pipeline.json
@@ -31,6 +31,16 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of cycles when any of the integer dividers are active.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles when integer divide unit is busy executing divide or square root operations.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -42,6 +52,24 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of active integer dividers per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.IDIV_OCCUPANCY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divider uops executed per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcd",
+ "EventName": "ARITH.IDIV_UOPS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc1",
@@ -52,6 +80,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa2",
+ "EventName": "BE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
@@ -65,13 +102,14 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all branch instructions retired.",
+ "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "ARL010, ARL011",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
@@ -92,7 +130,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PublicDescription": "Counts conditional branch instructions retired.",
+ "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x111",
"Unit": "cpu_core"
@@ -100,6 +138,7 @@
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "ARL011",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"SampleAfterValue": "200003",
@@ -107,11 +146,20 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of not taken JCC branch instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7f",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts not taken branch instructions retired.",
+ "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -130,7 +178,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x101",
"Unit": "cpu_core"
@@ -149,7 +197,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN_BWD",
- "PublicDescription": "Counts taken backward conditional branch instructions retired.",
+ "PublicDescription": "Counts taken backward conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -159,7 +207,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN_FWD",
- "PublicDescription": "Counts taken forward conditional branch instructions retired.",
+ "PublicDescription": "Counts taken forward conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x102",
"Unit": "cpu_core"
@@ -178,7 +226,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PublicDescription": "Counts far branch instructions retired.",
+ "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -186,6 +234,7 @@
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "ARL011",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "200003",
@@ -206,7 +255,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x80",
"Unit": "cpu_core"
@@ -214,6 +263,7 @@
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "ARL011",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"SampleAfterValue": "200003",
@@ -232,6 +282,7 @@
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "ARL011",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT_CALL",
"SampleAfterValue": "200003",
@@ -239,6 +290,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of near indirect JMP branch instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xef",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of near indirect JMP branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
@@ -248,6 +308,17 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "Errata": "ARL011",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_lowpower"
+ },
+ {
"BriefDescription": "Counts the number of near CALL branch instructions retired",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
@@ -261,7 +332,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -269,6 +340,7 @@
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "ARL010, ARL011",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "200003",
@@ -289,7 +361,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts return instructions retired.",
+ "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -304,11 +376,20 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of taken branch instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc0",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Taken branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts taken branch instructions retired.",
+ "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -316,6 +397,7 @@
{
"BriefDescription": "Counts the number of near taken branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "ARL011",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "200003",
@@ -363,7 +445,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
@@ -381,6 +463,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST",
+ "PublicDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x44",
"Unit": "cpu_core"
@@ -399,7 +482,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x111",
"Unit": "cpu_core"
@@ -418,16 +501,26 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_COST",
+ "PublicDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x151",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of mispredicted not taken JCC branch instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7f",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -437,6 +530,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST",
+ "PublicDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x50",
"Unit": "cpu_core"
@@ -455,7 +549,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x101",
"Unit": "cpu_core"
@@ -474,7 +568,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_BWD",
- "PublicDescription": "Counts taken backward conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken backward conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -484,6 +578,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_BWD_COST",
+ "PublicDescription": "number of branch instructions retired that were mispredicted and taken backward. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x8001",
"Unit": "cpu_core"
@@ -493,6 +588,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_COST",
+ "PublicDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x141",
"Unit": "cpu_core"
@@ -502,7 +598,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_FWD",
- "PublicDescription": "Counts taken forward conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken forward conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
@@ -511,6 +607,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_FWD_COST",
+ "PublicDescription": "number of branch instructions retired that were mispredicted and taken forward. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x8002",
"Unit": "cpu_core"
@@ -529,7 +626,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x80",
"Unit": "cpu_core"
@@ -557,7 +654,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -576,6 +673,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST",
+ "PublicDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x42",
"Unit": "cpu_core"
@@ -585,11 +683,21 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_COST",
+ "PublicDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0xc0",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP branch instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xef",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of mispredicted near indirect JMP branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
@@ -599,11 +707,20 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of mispredicted near taken branch instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -622,6 +739,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST",
+ "PublicDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x60",
"Unit": "cpu_core"
@@ -631,7 +749,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -659,11 +777,21 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET_COST",
+ "PublicDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x48",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the total number of BTCLEARS.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe8",
+ "EventName": "BTCLEAR.ANY",
+ "PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xec",
@@ -889,6 +1017,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Count number of times a load is depending on another load that had just write back its data or in previous or 2 cycles back. This event supports in-direct dependency through a single uop.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x02",
+ "EventName": "DEPENDENT_LOADS.ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xa6",
@@ -982,6 +1119,7 @@
"BriefDescription": "Fixed Counter: Counts the number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -990,7 +1128,7 @@
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -999,6 +1137,7 @@
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_lowpower"
@@ -1016,7 +1155,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 0,1",
"SampleAfterValue": "2000003",
"Unit": "cpu_core"
},
@@ -1033,6 +1172,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.BR_FUSED",
+ "PublicDescription": "retired macro-fused uops when there is a branch in the macro-fused pair (the two instructions that got macro-fused count once in this pmon) Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -1042,6 +1182,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.MACRO_FUSED",
+ "PublicDescription": "INST_RETIRED.MACRO_FUSED Available PDIST counters: 0,1",
"SampleAfterValue": "2000003",
"UMask": "0x30",
"Unit": "cpu_core"
@@ -1051,7 +1192,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
- "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions Available PDIST counters: 0,1",
"SampleAfterValue": "2000003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -1060,7 +1201,7 @@
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
- "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1070,7 +1211,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.REP_ITERATION",
- "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.",
+ "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent. Available PDIST counters: 0,1",
"SampleAfterValue": "2000003",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -1140,6 +1281,98 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of uops executed on secondary integer ports 0,1,2,3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.2ND",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on all Integer ports.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on a load port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.LD",
+ "PublicDescription": "Counts the number of uops executed on a load port. This event counts for integer uops even if the destination is FP/vector",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 0.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 1.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 2.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 0,1, 2, 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.PRIMARY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x78",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on a Store address port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.STA",
+ "PublicDescription": "Counts the number of uops executed on a Store address port. This event counts integer uops even if the data source is FP/vector",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on an integer store data and jump port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.STD_JMP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of vector integer instructions retired of 128-bit vector-width.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xe7",
@@ -1242,6 +1475,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of occurrences a retired load was blocked for any of the following reasons: utlb_miss, 4k_alias, unknown_sta/bad_fwd, unready_fwd (includes md blocks and esp consuming load blocks)",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1f",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address exactly matches an older store whose data is not ready (a.k.a. unknown). unready_fwd",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
@@ -1270,6 +1512,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of times a load got early blocked due to preceding store operation with unknown address or unknown data. Excluding in-line (immediate) wakeups",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_EARLY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address partially overlaps with an older store (size mismatch) - unknown_sta/bad_forward",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
@@ -1298,6 +1549,25 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of demand loads that match on a wcb (request buffer) allocated by an L1 hardware prefetch [This event is alias to LOAD_HIT_PREFETCH.HW_PF]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.HWPF",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to LOAD_HIT_PREFETCH.HWPF]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.HW_PF",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -1338,6 +1608,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine without the use of microcode.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.ANY_FAST",
+ "SampleAfterValue": "20003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of machine clears (nukes) of any type.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -1368,6 +1647,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine without the use of microcode.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION_FAST",
+ "SampleAfterValue": "20003",
+ "UMask": "0x88",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of nukes due to memory renaming",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
@@ -1377,6 +1665,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine without the use of microcode.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MRN_NUKE_FAST",
+ "SampleAfterValue": "20003",
+ "UMask": "0x90",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of times that the machine clears due to a page fault. Covers both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc3",
@@ -1405,8 +1702,9 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -1433,6 +1731,42 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L1 cache (that is: no execution & load in flight & no load missed L1 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L2 cache (that is: no execution & load in flight & load missed L1 & no load missed L2 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L3 cache (that is: no execution & load in flight & load missed L1 & load missed L2 cache & no load missed L3 Cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for Memory (that is: no execution & load in flight & a load missed L3 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.MEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "LFENCE instructions retired",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xe0",
@@ -1443,10 +1777,20 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe4",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "LBR record is inserted",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "LBR record is inserted Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1461,6 +1805,154 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of CLFLUSH, CLWB, and CLDEMOTE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.CL_INST",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LFENCE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.LFENCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of RDPMC, RDTSC, and RDTSCP instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.RDPMC_RDTSC_P",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Count the number of WRMSR instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.WRMSR",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of faults and software interrupts with vector < 32.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.FAULT_ALL",
+ "PublicDescription": "Counts the number of faults and software interrupts with vector < 32, including VOE cases.",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of PSB+ nuke events and ToPA trap events.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.INTEL_PT_CLEARS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of user interrupts delivered.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.ULI_DELIVERY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of SENDUIPI instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.ULI_SENDUIPI",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of VM exits.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.VM_EXIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number issue slots not consumed due to a color request for an FCW or MXCSR control register when all 4 colors (copies) are already in use",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.COLOR_STALLS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots where no uop could issue due to an IQ scoreboard that stalls allocation until a specified older uop retires or (in the case of jump scoreboard) executes. Commonly executed instructions with IQ scoreboards include LFENCE and MFENCE.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.IQ_JEU_SCB",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x75",
@@ -1529,6 +2021,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Fixed Counter: Counts the number of issue slots not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "Counter": "Fixed counter 4",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Fixed Counter: Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the IQ. Also, includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
@@ -1645,6 +2146,14 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL_NON_ARCH",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls [This event is alias to TOPDOWN_BE_BOUND.ALL]",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
@@ -1753,7 +2262,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of retirement slots not consumed due to front end stalls.",
- "Counter": "37",
+ "Counter": "Fixed counter 5",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003",
"UMask": "0x6",
@@ -1762,6 +2271,14 @@
{
"BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls",
"Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL_NON_ARCH",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x9c",
"EventName": "TOPDOWN_FE_BOUND.ALL_P",
"SampleAfterValue": "1000003",
@@ -1950,7 +2467,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of consumed retirement slots.",
- "Counter": "38",
+ "Counter": "Fixed counter 6",
"EventName": "TOPDOWN_RETIRING.ALL",
"SampleAfterValue": "1000003",
"UMask": "0x7",
@@ -1959,6 +2476,14 @@
{
"BriefDescription": "Counts the number of consumed retirement slots.",
"Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x72",
+ "EventName": "TOPDOWN_RETIRING.ALL_NON_ARCH",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of consumed retirement slots.",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "TOPDOWN_RETIRING.ALL_P",
"SampleAfterValue": "1000003",
@@ -2176,6 +2701,14 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of uops retired",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the total number of uops retired.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
@@ -2223,6 +2756,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of uops retired that were delivered by the loop stream detector (LSD).",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.LSD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
diff --git a/tools/perf/pmu-events/arch/x86/arrowlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/arrowlake/virtual-memory.json
index a3e4a4f3ab45..602e2ad5de6e 100644
--- a/tools/perf/pmu-events/arch/x86/arrowlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/arrowlake/virtual-memory.json
@@ -9,6 +9,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts walks that miss the PDE_CACHE",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
@@ -48,6 +57,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x12",
@@ -176,6 +195,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts walks that miss the PDE_CACHE",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
@@ -216,6 +244,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x13",
@@ -245,6 +283,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x13",
@@ -325,6 +373,16 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of times there was an ITLB miss and a new translation was filled into the ITLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x81",
+ "EventName": "ITLB.FILLS",
+ "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) and a new translation was filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
@@ -343,6 +401,15 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts walks that miss the PDE_CACHE",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
@@ -502,6 +569,24 @@
"Unit": "cpu_lowpower"
},
{
+ "BriefDescription": "Counts the number of occurrences a load gets blocked because of a micro TLB miss",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DTLB_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a DTLB miss",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.DTLB_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x05",
@@ -518,5 +603,33 @@
"SampleAfterValue": "1000003",
"UMask": "0x90",
"Unit": "cpu_lowpower"
+ },
+ {
+ "BriefDescription": "Counts the number of PMH walks that hit in the L1 or WCBs",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xbc",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L1_HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of PMH walks that hit in the L2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xbc",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2_HIT",
+ "PublicDescription": "Counts the number of PMH walks that hit in the L2. Includes L2 Hit resulting from and L1D eviction of another core in the same module which is longer latency than a typical L2 hit.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Count number of any STLB flush attempts (Entire, PCID, InvPage, CR3 write, etc)",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xbd",
+ "EventName": "TLB_FLUSHES.STLB_ANY",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/other.json b/tools/perf/pmu-events/arch/x86/bonnell/other.json
index 3a55c101fbf7..6e6f64b96834 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/other.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/other.json
@@ -324,14 +324,6 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Memory cluster signals to block micro-op dispatch for any reason",
- "Counter": "0,1",
- "EventCode": "0x9",
- "EventName": "DISPATCH_BLOCKED.ANY",
- "SampleAfterValue": "200000",
- "UMask": "0x20"
- },
- {
"BriefDescription": "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
"Counter": "0,1",
"EventCode": "0x3A",
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
index 9ff032ab11e2..48d3d053a369 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
@@ -212,6 +212,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Memory cluster signals to block micro-op dispatch for any reason",
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "DISPATCH_BLOCKED.ANY",
+ "SampleAfterValue": "200000",
+ "UMask": "0x20"
+ },
+ {
"BriefDescription": "Divide operations retired",
"Counter": "0,1",
"EventCode": "0x13",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
index 40970fa5566c..1d8e910f5961 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -74,13 +74,12 @@
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -92,19 +91,18 @@
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY_WB_ASSIST",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"ScaleUnit": "100%"
},
{
@@ -114,7 +112,7 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
@@ -125,7 +123,7 @@
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_mispredicts_resteers",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers",
"ScaleUnit": "100%"
},
{
@@ -133,18 +131,17 @@
"MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -152,7 +149,7 @@
"MetricExpr": "MACHINE_CLEARS.COUNT * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -162,8 +159,8 @@
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -174,7 +171,7 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
@@ -183,8 +180,8 @@
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -192,8 +189,8 @@
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.FPU_DIV_ACTIVE",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -202,8 +199,8 @@
"MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -212,7 +209,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -220,26 +217,26 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=0x1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
+ "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS. Related metrics: tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=0x1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
+ "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES. Related metrics: tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -247,18 +244,18 @@
"MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM, OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -287,7 +284,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -295,8 +292,8 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -304,8 +301,8 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -313,8 +310,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -322,8 +319,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -333,33 +330,33 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"MetricExpr": "tma_microcode_sequencer",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
@@ -370,7 +367,7 @@
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "(CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks)",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks))",
"MetricGroup": "SMT",
"MetricName": "tma_info_core_core_clks"
},
@@ -391,11 +388,11 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -420,7 +417,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -438,7 +435,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -446,7 +443,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -454,7 +451,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -462,7 +459,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -470,7 +467,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -512,7 +509,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 4 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 9",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -634,26 +631,26 @@
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "(cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=0x1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=0x1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=0x1@ + 7 * (DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED)) / tma_info_core_core_clks",
+ "MetricExpr": "(cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * (DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED)) / tma_info_core_core_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_page_walks_utilization",
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -665,7 +662,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -688,14 +685,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -743,7 +739,7 @@
"MetricName": "tma_info_system_turbo_utilization"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -752,15 +748,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -786,14 +781,14 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 4 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 6"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
- "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=0x1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
+ "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
"ScaleUnit": "100%"
},
@@ -802,8 +797,8 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
@@ -811,8 +806,8 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
"ScaleUnit": "100%"
},
{
@@ -821,8 +816,8 @@
"MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
@@ -831,8 +826,8 @@
"MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT. Related metrics: tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -840,23 +835,22 @@
"MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "tma_retiring - tma_heavy_operations",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -870,8 +864,8 @@
"MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
@@ -882,15 +876,15 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
@@ -899,7 +893,7 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
"ScaleUnit": "100%"
},
@@ -911,7 +905,7 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
@@ -928,8 +922,8 @@
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Related metrics: tma_branch_mispredicts",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
{
@@ -938,7 +932,7 @@
"MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_mite",
"MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
"ScaleUnit": "100%"
},
{
@@ -946,8 +940,8 @@
"MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
{
@@ -956,7 +950,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -965,7 +959,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1001,7 +995,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_5",
"MetricThreshold": "tma_port_5 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1010,7 +1004,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1028,43 +1022,43 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_thread_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\=0x1\\,cmask\\=0x1@ / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1084,7 +1078,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -1092,8 +1086,8 @@
"MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES. Related metrics: tma_port_4",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1101,7 +1095,7 @@
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
@@ -1110,8 +1104,8 @@
"MetricExpr": "RESOURCE_STALLS.SB / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -1119,8 +1113,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1129,8 +1123,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -1146,7 +1140,7 @@
"MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -1155,8 +1149,8 @@
"MetricExpr": "INST_RETIRED.X87 * tma_info_thread_uoppi / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
index b03a5f2bcd82..a5e408ca46a7 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -74,17 +74,16 @@
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "(tma_4k_aliasing > 0.2) & ((tma_l1_bound > 0.1) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
- "MetricThreshold": "(tma_alu_op_utilization > 0.4)",
+ "MetricThreshold": "tma_alu_op_utilization > 0.4",
"ScaleUnit": "100%"
},
{
@@ -92,17 +91,16 @@
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "(tma_assists > 0.1) & ((tma_microcode_sequencer > 0.05) & ((tma_heavy_operations > 0.1)))",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
- "MetricThreshold": "(tma_backend_bound > 0.2)",
+ "MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
"PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
"ScaleUnit": "100%"
@@ -112,7 +110,7 @@
"MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_thread_slots",
"MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
- "MetricThreshold": "(tma_bad_speculation > 0.15)",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
@@ -123,7 +121,7 @@
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
- "MetricThreshold": "(tma_branch_mispredicts > 0.1) & ((tma_bad_speculation > 0.15))",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers",
"ScaleUnit": "100%"
@@ -133,17 +131,16 @@
"MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "(tma_branch_resteers > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15)))",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "(tma_cisc > 0.1) & ((tma_microcode_sequencer > 0.05) & ((tma_heavy_operations > 0.1)))",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
@@ -152,7 +149,7 @@
"MetricExpr": "MACHINE_CLEARS.COUNT * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "(tma_clears_resteers > 0.05) & ((tma_branch_resteers > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15))))",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -162,7 +159,7 @@
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "(tma_contested_accesses > 0.05) & ((tma_l3_bound > 0.05) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
@@ -172,7 +169,7 @@
"MetricExpr": "tma_backend_bound - tma_memory_bound",
"MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
- "MetricThreshold": "(tma_core_bound > 0.1) & ((tma_backend_bound > 0.2))",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
@@ -183,7 +180,7 @@
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "(tma_data_sharing > 0.05) & ((tma_l3_bound > 0.05) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
@@ -192,7 +189,7 @@
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "(tma_divider > 0.2) & ((tma_core_bound > 0.1) & ((tma_backend_bound > 0.2)))",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIV_ACTIVE",
"ScaleUnit": "100%"
},
@@ -202,7 +199,7 @@
"MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "(tma_dram_bound > 0.1) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2)))",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -211,7 +208,7 @@
"MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_core_clks / 2",
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
- "MetricThreshold": "(tma_dsb > 0.15) & ((tma_fetch_bandwidth > 0.2))",
+ "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
"PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
@@ -220,7 +217,7 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "(tma_dsb_switches > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15)))",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
@@ -229,7 +226,7 @@
"MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "(tma_dtlb_load > 0.1) & ((tma_l1_bound > 0.1) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
"ScaleUnit": "100%"
},
@@ -238,7 +235,7 @@
"MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "(tma_dtlb_store > 0.05) & ((tma_store_bound > 0.2) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
"ScaleUnit": "100%"
},
@@ -248,7 +245,7 @@
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
- "MetricThreshold": "(tma_fb_full > 0.3)",
+ "MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
@@ -257,7 +254,7 @@
"MetricExpr": "tma_frontend_bound - tma_fetch_latency",
"MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
"MetricName": "tma_fetch_bandwidth",
- "MetricThreshold": "(tma_fetch_bandwidth > 0.2)",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
@@ -267,7 +264,7 @@
"MetricExpr": "4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / tma_info_thread_slots",
"MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_fetch_latency",
- "MetricThreshold": "(tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15))",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
@@ -277,7 +274,7 @@
"MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
- "MetricThreshold": "(tma_fp_arith > 0.2) & ((tma_light_operations > 0.6))",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
"PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
@@ -286,7 +283,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "(tma_fp_scalar > 0.1) & ((tma_fp_arith > 0.2) & ((tma_light_operations > 0.6)))",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -295,7 +292,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "(tma_fp_vector > 0.1) & ((tma_fp_arith > 0.2) & ((tma_light_operations > 0.6)))",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -304,7 +301,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "(tma_fp_vector_128b > 0.1) & ((tma_fp_vector > 0.1) & ((tma_fp_arith > 0.2) & ((tma_light_operations > 0.6))))",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -313,7 +310,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "(tma_fp_vector_256b > 0.1) & ((tma_fp_vector > 0.1) & ((tma_fp_arith > 0.2) & ((tma_light_operations > 0.6))))",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -322,7 +319,7 @@
"MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_thread_slots",
"MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
- "MetricThreshold": "(tma_frontend_bound > 0.15)",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
"PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
@@ -332,7 +329,7 @@
"MetricExpr": "tma_microcode_sequencer",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
- "MetricThreshold": "(tma_heavy_operations > 0.1)",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
"ScaleUnit": "100%"
@@ -342,7 +339,7 @@
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "(tma_icache_misses > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15)))",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
@@ -351,14 +348,14 @@
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "(tma_info_bad_spec_ipmisp_indirect < 1000)"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;BadSpec;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmispredict",
- "MetricThreshold": "(tma_info_bad_spec_ipmispredict < 200)"
+ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
@@ -396,7 +393,7 @@
"MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
- "MetricThreshold": "(tma_info_frontend_dsb_coverage < 0.7) & ((tma_info_thread_ipc / 4) > 0.35)",
+ "MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 4 > 0.35",
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_inst_mix_iptb, tma_lcp"
},
{
@@ -429,7 +426,7 @@
"MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
- "MetricThreshold": "(tma_info_inst_mix_iparith < 10)",
+ "MetricThreshold": "tma_info_inst_mix_iparith < 10",
"PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
@@ -437,7 +434,7 @@
"MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
- "MetricThreshold": "(tma_info_inst_mix_iparith_avx128 < 10)",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
"PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
@@ -445,7 +442,7 @@
"MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
- "MetricThreshold": "(tma_info_inst_mix_iparith_avx256 < 10)",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
"PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
@@ -453,7 +450,7 @@
"MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
- "MetricThreshold": "(tma_info_inst_mix_iparith_scalar_dp < 10)",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
"PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
@@ -461,7 +458,7 @@
"MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
- "MetricThreshold": "(tma_info_inst_mix_iparith_scalar_sp < 10)",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
"PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
@@ -469,42 +466,42 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Branches;Fed;InsType",
"MetricName": "tma_info_inst_mix_ipbranch",
- "MetricThreshold": "(tma_info_inst_mix_ipbranch < 8)"
+ "MetricThreshold": "tma_info_inst_mix_ipbranch < 8"
},
{
"BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_ipcall",
- "MetricThreshold": "(tma_info_inst_mix_ipcall < 200)"
+ "MetricThreshold": "tma_info_inst_mix_ipcall < 200"
},
{
"BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * FP_ARITH_INST_RETIRED.4_FLOPS + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_ipflop",
- "MetricThreshold": "(tma_info_inst_mix_ipflop < 10)"
+ "MetricThreshold": "tma_info_inst_mix_ipflop < 10"
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
"MetricGroup": "InsType",
"MetricName": "tma_info_inst_mix_ipload",
- "MetricThreshold": "(tma_info_inst_mix_ipload < 3)"
+ "MetricThreshold": "tma_info_inst_mix_ipload < 3"
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
"MetricGroup": "InsType",
"MetricName": "tma_info_inst_mix_ipstore",
- "MetricThreshold": "(tma_info_inst_mix_ipstore < 8)"
+ "MetricThreshold": "tma_info_inst_mix_ipstore < 8"
},
{
"BriefDescription": "Instructions per taken branch",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "(tma_info_inst_mix_iptb < 4 * 2 + 1)",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 9",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -629,10 +626,10 @@
"MetricExpr": "(cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * (DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED)) / tma_info_core_core_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_page_walks_utilization",
- "MetricThreshold": "(tma_info_memory_tlb_page_walks_utilization > 0.5)"
+ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "",
+ "BriefDescription": "Mem;Backend;CacheHits",
"MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
@@ -645,7 +642,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -657,7 +654,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -680,7 +677,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "(tma_info_system_ipfarbranch < 1000000)"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
@@ -693,14 +690,14 @@
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_utilization",
- "MetricThreshold": "(tma_info_system_kernel_utilization > 0.05)"
+ "MetricThreshold": "tma_info_system_kernel_utilization > 0.05"
},
{
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_mux",
- "MetricThreshold": "((tma_info_system_mux > 1.1)|(tma_info_system_mux < 0.9))"
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
},
{
"BriefDescription": "Total package Power in Watts",
@@ -725,7 +722,7 @@
"MetricExpr": "duration_time",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_time",
- "MetricThreshold": "(tma_info_system_time < 1)"
+ "MetricThreshold": "tma_info_system_time < 1"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -769,21 +766,21 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;Ret;Retire",
"MetricName": "tma_info_thread_uoppi",
- "MetricThreshold": "(tma_info_thread_uoppi > 1.05)"
+ "MetricThreshold": "tma_info_thread_uoppi > 1.05"
},
{
"BriefDescription": "Uops per taken branch",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "(tma_info_thread_uptb < 4 * 1.5)"
+ "MetricThreshold": "tma_info_thread_uptb < 6"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "(tma_itlb_misses > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15)))",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
@@ -792,7 +789,7 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "(tma_l1_bound > 0.1) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2)))",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
@@ -801,7 +798,7 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "(tma_l2_bound > 0.05) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2)))",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -811,7 +808,7 @@
"MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "(tma_l3_bound > 0.05) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2)))",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
@@ -821,7 +818,7 @@
"MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "(tma_l3_hit_latency > 0.1) & ((tma_l3_bound > 0.05) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
"ScaleUnit": "100%"
},
@@ -830,7 +827,7 @@
"MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "(tma_lcp > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15)))",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
@@ -839,18 +836,17 @@
"MetricExpr": "tma_retiring - tma_heavy_operations",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
- "MetricThreshold": "(tma_light_operations > 0.6)",
+ "MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
- "MetricThreshold": "(tma_load_op_utilization > 0.6)",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
"PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3_10",
"ScaleUnit": "100%"
},
@@ -860,7 +856,7 @@
"MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "(tma_lock_latency > 0.2) & ((tma_l1_bound > 0.1) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -870,7 +866,7 @@
"MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
"MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
"MetricName": "tma_machine_clears",
- "MetricThreshold": "(tma_machine_clears > 0.1) & ((tma_bad_speculation > 0.15))",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%"
@@ -880,7 +876,7 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "(tma_mem_bandwidth > 0.2) & ((tma_dram_bound > 0.1) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
@@ -889,7 +885,7 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "(tma_mem_latency > 0.1) & ((tma_dram_bound > 0.1) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
"ScaleUnit": "100%"
},
@@ -899,7 +895,7 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + RESOURCE_STALLS.SB) / (CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_thread_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
"MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
- "MetricThreshold": "(tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
@@ -909,7 +905,7 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
- "MetricThreshold": "(tma_microcode_sequencer > 0.05) & ((tma_heavy_operations > 0.1))",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
"PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -918,7 +914,7 @@
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "(tma_mispredicts_resteers > 0.05) & ((tma_branch_resteers > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15))))",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -927,7 +923,7 @@
"MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_mite",
- "MetricThreshold": "(tma_mite > 0.1) & ((tma_fetch_bandwidth > 0.2))",
+ "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
"PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
"ScaleUnit": "100%"
},
@@ -936,7 +932,7 @@
"MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "(tma_ms_switches > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15)))",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -945,7 +941,7 @@
"MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_core_clks",
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
- "MetricThreshold": "(tma_port_0 > 0.6)",
+ "MetricThreshold": "tma_port_0 > 0.6",
"PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -954,7 +950,7 @@
"MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_core_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
- "MetricThreshold": "(tma_port_1 > 0.6)",
+ "MetricThreshold": "tma_port_1 > 0.6",
"PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -963,7 +959,7 @@
"MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_core_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
"MetricName": "tma_port_2",
- "MetricThreshold": "(tma_port_2 > 0.6)",
+ "MetricThreshold": "tma_port_2 > 0.6",
"ScaleUnit": "100%"
},
{
@@ -971,7 +967,7 @@
"MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_core_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
"MetricName": "tma_port_3",
- "MetricThreshold": "(tma_port_3 > 0.6)",
+ "MetricThreshold": "tma_port_3 > 0.6",
"ScaleUnit": "100%"
},
{
@@ -979,7 +975,7 @@
"MetricExpr": "tma_store_op_utilization",
"MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
"MetricName": "tma_port_4",
- "MetricThreshold": "(tma_port_4 > 0.6)",
+ "MetricThreshold": "tma_port_4 > 0.6",
"PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Related metrics: tma_split_stores",
"ScaleUnit": "100%"
},
@@ -988,7 +984,7 @@
"MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_core_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_5",
- "MetricThreshold": "(tma_port_5 > 0.6)",
+ "MetricThreshold": "tma_port_5 > 0.6",
"PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -997,7 +993,7 @@
"MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_core_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
- "MetricThreshold": "(tma_port_6 > 0.6)",
+ "MetricThreshold": "tma_port_6 > 0.6",
"PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -1006,7 +1002,7 @@
"MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_core_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
"MetricName": "tma_port_7",
- "MetricThreshold": "(tma_port_7 > 0.6)",
+ "MetricThreshold": "tma_port_7 > 0.6",
"ScaleUnit": "100%"
},
{
@@ -1015,34 +1011,34 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_thread_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "(tma_ports_utilization > 0.15) & ((tma_core_bound > 0.1) & ((tma_backend_bound > 0.2)))",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "(tma_ports_utilized_0 > 0.2) & ((tma_ports_utilization > 0.15) & ((tma_core_bound > 0.1) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "(tma_ports_utilized_1 > 0.2) & ((tma_ports_utilization > 0.15) & ((tma_core_bound > 0.1) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "(tma_ports_utilized_2 > 0.15) & ((tma_ports_utilization > 0.15) & ((tma_core_bound > 0.1) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
@@ -1051,7 +1047,7 @@
"MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "(tma_ports_utilized_3m > 0.4) & ((tma_ports_utilization > 0.15) & ((tma_core_bound > 0.1) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%"
},
@@ -1060,7 +1056,7 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_thread_slots",
"MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
- "MetricThreshold": "((tma_retiring > 0.7)|(tma_heavy_operations > 0.1))",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL1",
"PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
"ScaleUnit": "100%"
@@ -1071,7 +1067,7 @@
"MetricExpr": "tma_info_memory_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
- "MetricThreshold": "(tma_split_loads > 0.3)",
+ "MetricThreshold": "tma_split_loads > 0.3",
"PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
@@ -1080,7 +1076,7 @@
"MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "(tma_split_stores > 0.2) & ((tma_store_bound > 0.2) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
@@ -1089,7 +1085,7 @@
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "(tma_sq_full > 0.3) & ((tma_l3_bound > 0.05) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
@@ -1098,7 +1094,7 @@
"MetricExpr": "RESOURCE_STALLS.SB / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "(tma_store_bound > 0.2) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2)))",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
@@ -1107,7 +1103,7 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "(tma_store_fwd_blk > 0.1) & ((tma_l1_bound > 0.1) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
@@ -1117,7 +1113,7 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "(tma_store_latency > 0.1) & ((tma_store_bound > 0.2) & ((tma_memory_bound > 0.2) & ((tma_backend_bound > 0.2))))",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
@@ -1126,7 +1122,7 @@
"MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_core_clks",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_store_op_utilization",
- "MetricThreshold": "(tma_store_op_utilization > 0.6)",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
"PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
"ScaleUnit": "100%"
},
@@ -1135,7 +1131,7 @@
"MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "(tma_unknown_branches > 0.05) & ((tma_branch_resteers > 0.05) & ((tma_fetch_latency > 0.1) & ((tma_frontend_bound > 0.15))))",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
"ScaleUnit": "100%"
},
@@ -1144,7 +1140,7 @@
"MetricExpr": "INST_RETIRED.X87 * tma_info_thread_uoppi / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "(tma_x87_use > 0.1) & ((tma_fp_arith > 0.2) & ((tma_light_operations > 0.6)))",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
index 8016202bad1f..5b83b040060c 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -276,13 +276,12 @@
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -294,19 +293,18 @@
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY_WB_ASSIST",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"ScaleUnit": "100%"
},
{
@@ -316,7 +314,7 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
@@ -327,7 +325,7 @@
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_mispredicts_resteers",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers",
"ScaleUnit": "100%"
},
{
@@ -335,18 +333,17 @@
"MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -354,7 +351,7 @@
"MetricExpr": "MACHINE_CLEARS.COUNT * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -364,8 +361,8 @@
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -376,7 +373,7 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
@@ -385,8 +382,8 @@
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -394,8 +391,8 @@
"MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.FPU_DIV_ACTIVE",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -404,8 +401,8 @@
"MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -414,7 +411,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -422,26 +419,26 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=0x1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
+ "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS. Related metrics: tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=0x1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
+ "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES. Related metrics: tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -449,18 +446,18 @@
"MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM, OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE, OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -489,7 +486,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -497,8 +494,8 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -506,8 +503,8 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -515,8 +512,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -524,8 +521,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -535,33 +532,33 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"MetricExpr": "tma_microcode_sequencer",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
@@ -572,7 +569,7 @@
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "(CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks)",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks))",
"MetricGroup": "SMT",
"MetricName": "tma_info_core_core_clks"
},
@@ -593,11 +590,11 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -622,7 +619,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -640,7 +637,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -648,7 +645,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -656,7 +653,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -664,7 +661,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -672,7 +669,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -714,7 +711,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 4 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 9",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -842,20 +839,20 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -867,7 +864,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -890,14 +887,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -908,14 +904,15 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "cbox@UNC_C_TOR_OCCUPANCY.MISS_OPCODE\\,filter_opc\\=0x182@ / cbox@UNC_C_TOR_OCCUPANCY.MISS_OPCODE\\,filter_opc\\=0x182@",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
- "MetricExpr": "1e9 * (cbox@UNC_C_TOR_OCCUPANCY.MISS_OPCODE\\,filter_opc\\=0x182@ / cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@) / (tma_info_system_socket_clks / tma_info_system_time)",
+ "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_system_socket_clks / tma_info_system_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
"PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
@@ -965,7 +962,7 @@
"MetricName": "tma_info_system_uncore_frequency"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -974,15 +971,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1008,14 +1004,14 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 4 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 6"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
- "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=0x1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
+ "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
"ScaleUnit": "100%"
},
@@ -1024,8 +1020,8 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
@@ -1033,8 +1029,8 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
"ScaleUnit": "100%"
},
{
@@ -1043,8 +1039,8 @@
"MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
@@ -1053,8 +1049,8 @@
"MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT. Related metrics: tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1062,23 +1058,22 @@
"MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "tma_retiring - tma_heavy_operations",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -1088,11 +1083,12 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
- "MetricThreshold": "tma_local_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM_PS",
"ScaleUnit": "100%"
},
{
@@ -1101,8 +1097,8 @@
"MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
@@ -1118,10 +1114,10 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
@@ -1130,7 +1126,7 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
"ScaleUnit": "100%"
},
@@ -1142,7 +1138,7 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
@@ -1159,8 +1155,8 @@
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Related metrics: tma_branch_mispredicts",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
{
@@ -1169,7 +1165,7 @@
"MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_mite",
"MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
"ScaleUnit": "100%"
},
{
@@ -1177,8 +1173,8 @@
"MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
{
@@ -1187,7 +1183,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1196,7 +1192,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1232,7 +1228,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_5",
"MetricThreshold": "tma_port_5 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1241,7 +1237,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1259,43 +1255,43 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_thread_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\=0x1\\,cmask\\=0x1@ / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1304,17 +1300,18 @@
"MetricExpr": "(200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 180 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
- "MetricThreshold": "tma_remote_cache > 0.05 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM, MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "310 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
- "MetricThreshold": "tma_remote_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_PS",
"ScaleUnit": "100%"
},
{
@@ -1334,7 +1331,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -1342,8 +1339,8 @@
"MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES. Related metrics: tma_port_4",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1351,7 +1348,7 @@
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
@@ -1360,8 +1357,8 @@
"MetricExpr": "RESOURCE_STALLS.SB / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -1369,8 +1366,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1379,8 +1376,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -1396,7 +1393,7 @@
"MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -1405,8 +1402,8 @@
"MetricExpr": "INST_RETIRED.X87 * tma_info_thread_uoppi / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
index 8bad700ff8ea..d113c14aa7c9 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
@@ -1,5 +1,79 @@
[
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xFE",
+ "EventName": "IDI_MISC.WB_DOWNGRADE",
+ "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xFE",
+ "EventName": "IDI_MISC.WB_UPGRADE",
+ "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "L1D data line replacements",
"Counter": "0,1,2,3",
"EventCode": "0x51",
@@ -2344,6 +2418,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads have any response type.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -2704,6 +2788,116 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80400004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800020004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads have any response type.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -3064,6 +3258,116 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800020001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -3424,6 +3728,106 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800020002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.ANY_SNOOP OCR.OTHER.L3_HIT.ANY_SNOOP",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 5729b93a9c68..2e50a91b6728 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -313,12 +313,13 @@
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -330,7 +331,7 @@
"MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
"ScaleUnit": "100%"
},
@@ -341,7 +342,7 @@
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"ScaleUnit": "100%"
},
{
@@ -351,12 +352,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -370,39 +372,44 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)) - tma_bottleneck_big_code",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -410,7 +417,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -418,7 +426,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_mem + tma_remote_cache) + tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
@@ -426,7 +435,8 @@
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -434,14 +444,16 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -463,17 +475,18 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -481,7 +494,7 @@
"MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -490,7 +503,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -498,33 +511,33 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((47.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) + (47.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) + 44 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -535,25 +548,34 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external CXL Memory by loads (e.g",
+ "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 1)) * (CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_cxl_mem_bound",
+ "MetricThreshold": "tma_cxl_mem_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external CXL Memory by loads (e.g. 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory, PMM - Persistent Memory Module [from CLX to SPR] or any other CXL Type3 Memory [EMR onwards]).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(47.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -562,17 +584,17 @@
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound - tma_cxl_mem_bound if #has_pmem > 0 else CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound)",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -582,7 +604,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -590,27 +612,27 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -619,18 +641,18 @@
"MetricExpr": "(110 * tma_info_system_core_frequency * (OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM + OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_system_core_frequency * (OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -640,7 +662,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -650,7 +672,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -670,7 +692,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -679,7 +701,7 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
@@ -687,17 +709,16 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xFC@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -706,7 +727,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -715,7 +736,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -724,7 +745,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -735,50 +756,51 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"MetricExpr": "(UOPS_RETIRED.RETIRE_SLOTS + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY) / tma_info_thread_slots",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
- "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / tma_info_thread_clks",
+ "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bottleneck_mispredictions * tma_info_thread_slots / 4 / BR_MISP_RETIRED.ALL_BRANCHES / 100",
"MetricGroup": "Bad;BrMispredicts;tma_issueBM",
"MetricName": "tma_info_bad_spec_branch_misprediction_cost",
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
@@ -803,7 +825,8 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -812,7 +835,7 @@
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -820,10 +843,12 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -852,7 +877,7 @@
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "(CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks)",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks))",
"MetricGroup": "SMT",
"MetricName": "tma_info_core_core_clks"
},
@@ -877,14 +902,14 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xFC@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -897,20 +922,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@ + 2",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ + 2",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -946,7 +971,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -960,12 +985,11 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xFC@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -973,7 +997,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -981,7 +1005,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -989,7 +1013,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -997,7 +1021,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -1005,7 +1029,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -1061,7 +1085,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 4 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 9",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1248,8 +1272,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1266,22 +1290,28 @@
"MetricName": "tma_info_pipeline_fetch_mite"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "IDQ.MS_UOPS / cpu@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1293,16 +1323,28 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_read_bw"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_write_bw"
+ },
+ {
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1331,14 +1373,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1356,12 +1397,19 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=0x1@",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]",
+ "MetricExpr": "(1e9 * (UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS) / imc_0@event\\=0x0@ if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_system_mem_pmm_read_latency",
+ "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
"MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_system_socket_clks / tma_info_system_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
@@ -1386,7 +1434,7 @@
"MetricExpr": "(CORE_POWER.LVL0_TURBO_LICENSE / 2 / tma_info_core_core_clks if #SMT_on else CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks)",
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license0_utilization",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
@@ -1394,7 +1442,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license1_utilization",
"MetricThreshold": "tma_info_system_power_license1_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
@@ -1402,7 +1450,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license2_utilization",
"MetricThreshold": "tma_info_system_power_license2_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
@@ -1436,7 +1484,7 @@
"MetricName": "tma_info_system_uncore_frequency"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1445,15 +1493,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1479,15 +1526,15 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 4 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 6"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1495,26 +1542,27 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1523,7 +1571,7 @@
"MetricExpr": "3.5 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1532,17 +1580,17 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(20.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "17 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1550,22 +1598,23 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "tma_retiring - tma_heavy_operations",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -1579,7 +1628,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1587,48 +1636,52 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
- "MetricExpr": "(80 * tma_info_system_core_frequency - 20.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "59.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
- "MetricThreshold": "tma_local_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(12 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (11 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1645,11 +1698,11 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1657,8 +1710,8 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
@@ -1669,11 +1722,11 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -1682,7 +1735,6 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
@@ -1692,10 +1744,11 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1709,12 +1762,12 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
@@ -1722,7 +1775,7 @@
"MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1732,7 +1785,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%"
},
{
@@ -1740,12 +1793,13 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -1754,19 +1808,21 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -1775,7 +1831,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1784,7 +1840,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1820,7 +1876,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_5",
"MetricThreshold": "tma_port_5 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1829,7 +1885,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1843,11 +1899,12 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
@@ -1855,8 +1912,8 @@
"MetricExpr": "EXE_ACTIVITY.EXE_BOUND_0_PORTS / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
@@ -1864,7 +1921,7 @@
"MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_1 - UOPS_EXECUTED.CORE_CYCLES_GE_2) / 2 if #SMT_on else EXE_ACTIVITY.1_PORTS_UTIL) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
@@ -1873,35 +1930,35 @@
"MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_2 - UOPS_EXECUTED.CORE_CYCLES_GE_3) / 2 if #SMT_on else EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "((110 * tma_info_system_core_frequency - 20.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + (110 * tma_info_system_core_frequency - 20.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(89.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 89.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
- "MetricThreshold": "tma_remote_cache > 0.05 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM, MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
- "MetricExpr": "(147.5 * tma_info_system_core_frequency - 20.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "127 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
- "MetricThreshold": "tma_remote_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
"ScaleUnit": "100%"
},
{
@@ -1919,7 +1976,7 @@
"MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -1928,8 +1985,8 @@
"MetricExpr": "40 * ROB_MISC_EVENTS.PAUSE_INST / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: ROB_MISC_EVENTS.PAUSE_INST",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
"ScaleUnit": "100%"
},
{
@@ -1939,7 +1996,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -1947,8 +2004,8 @@
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES. Related metrics: tma_port_4",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1956,8 +2013,8 @@
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -1965,8 +2022,8 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -1974,8 +2031,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1984,8 +2041,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 11 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -2001,7 +2058,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -2009,31 +2066,34 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -2041,7 +2101,7 @@
"MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -2050,8 +2110,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
index 1c709983b65f..3ef6f00f1135 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
@@ -111,7 +111,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xCF",
"EventName": "FP_ARITH_INST_RETIRED2.128BIT_PACKED_BF16",
- "PublicDescription": "Counts once for each Intel AVX-512 computational 512-bit packed BFloat16 floating-point instruction retired. Applies to the ZMM based VDPBF16PS instruction. Each count represents 64 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
+ "PublicDescription": "Counts once for each Intel AVX-512 computational 128-bit packed BFloat16 floating-point instruction retired. Applies to the XMM based VDPBF16PS instruction. Each count represents 16 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
"SampleAfterValue": "2000003",
"UMask": "0x20"
},
@@ -120,7 +120,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xCF",
"EventName": "FP_ARITH_INST_RETIRED2.256BIT_PACKED_BF16",
- "PublicDescription": "Counts once for each Intel AVX-512 computational 128-bit packed BFloat16 floating-point instruction retired. Applies to the XMM based VDPBF16PS instruction. Each count represents 16 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
+ "PublicDescription": "Counts once for each Intel AVX-512 computational 256-bit packed BFloat16 floating-point instruction retired. Applies to the YMM based VDPBF16PS instruction. Each count represents 32 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
"SampleAfterValue": "2000003",
"UMask": "0x40"
},
@@ -129,7 +129,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xCF",
"EventName": "FP_ARITH_INST_RETIRED2.512BIT_PACKED_BF16",
- "PublicDescription": "Counts once for each Intel AVX-512 computational 256-bit packed BFloat16 floating-point instruction retired. Applies to the YMM based VDPBF16PS instruction. Each count represents 32 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
+ "PublicDescription": "Counts once for each Intel AVX-512 computational 512-bit packed BFloat16 floating-point instruction retired. Applies to the ZMM based VDPBF16PS instruction. Each count represents 64 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
"SampleAfterValue": "2000003",
"UMask": "0x80"
},
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
index f25693b17b8b..51833bce994e 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
@@ -36,62 +36,6 @@
"UMask": "0x40"
},
{
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
- "SampleAfterValue": "2000003",
- "UMask": "0x20"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
- "SampleAfterValue": "2000003",
- "UMask": "0x40"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
"BriefDescription": "Number of hardware interrupts received by the processor.",
"Counter": "0,1,2,3",
"EventCode": "0xCB",
@@ -101,24 +45,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
- "Counter": "0,1,2,3",
- "EventCode": "0xFE",
- "EventName": "IDI_MISC.WB_DOWNGRADE",
- "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
- "SampleAfterValue": "100003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
- "Counter": "0,1,2,3",
- "EventCode": "0xFE",
- "EventName": "IDI_MISC.WB_UPGRADE",
- "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
- "SampleAfterValue": "100003",
- "UMask": "0x2"
- },
- {
"BriefDescription": "OCR.ALL_DATA_RD.ANY_RESPONSE have any response type.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -669,336 +595,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads have any response type.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x80400004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100400004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x800020004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x400020004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100020004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x200020004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x80020004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads have any response type.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x80400001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100400001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x800020001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x400020001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100020001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x200020001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x80020001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x80400002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100400002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x800020002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x400020002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100020002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x200020002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x80020002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts any other requests have any response type.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
index 3dd296ab4d78..9a1349527b66 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
@@ -542,7 +542,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
index c9596e18ec09..30390d734051 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
@@ -22,7 +22,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu). Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.UNCACHEABLE",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
index 265cdf334f6a..aafa7af46e69 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
@@ -316,32 +316,32 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth read (MiB/sec). Derived from unc_m_pmm_rpq_inserts",
"Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M_PMM_BANDWIDTH.READ",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "ScaleUnit": "6.103515625E-5MiB/sec",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MiB/sec). Derived from unc_m_pmm_rpq_inserts",
"Counter": "0,1,2,3",
"EventCode": "0xE3",
"EventName": "UNC_M_PMM_BANDWIDTH.TOTAL",
"MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
"MetricName": "UNC_M_PMM_BANDWIDTH.TOTAL",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "ScaleUnit": "6.103515625E-5MiB/sec",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec). Derived from unc_m_pmm_wpq_inserts",
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth write (MiB/sec). Derived from unc_m_pmm_wpq_inserts",
"Counter": "0,1,2,3",
"EventCode": "0xE7",
"EventName": "UNC_M_PMM_BANDWIDTH.WRITE",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "ScaleUnit": "6.103515625E-5MiB/sec",
"Unit": "iMC"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/clearwaterforest/cache.json b/tools/perf/pmu-events/arch/x86/clearwaterforest/cache.json
index 875361b30f1d..ecb7dc252208 100644
--- a/tools/perf/pmu-events/arch/x86/clearwaterforest/cache.json
+++ b/tools/perf/pmu-events/arch/x86/clearwaterforest/cache.json
@@ -22,6 +22,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "Counts the number of load ops retired. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x81"
},
@@ -30,6 +31,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PublicDescription": "Counts the number of store ops retired. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x82"
},
@@ -40,6 +42,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
"MSRIndex": "0x3F6",
"MSRValue": "0x400",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -50,6 +53,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -60,6 +64,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -70,6 +75,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
"MSRIndex": "0x3F6",
"MSRValue": "0x800",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -80,6 +86,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -90,6 +97,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -100,6 +108,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -110,6 +119,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -120,6 +130,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -130,6 +141,7 @@
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x5"
},
@@ -138,7 +150,30 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PublicDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/clearwaterforest/memory.json b/tools/perf/pmu-events/arch/x86/clearwaterforest/memory.json
index f5007e56f39b..58e543550279 100644
--- a/tools/perf/pmu-events/arch/x86/clearwaterforest/memory.json
+++ b/tools/perf/pmu-events/arch/x86/clearwaterforest/memory.json
@@ -6,6 +6,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x33FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16,6 +17,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x33FBFC00002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/clearwaterforest/other.json b/tools/perf/pmu-events/arch/x86/clearwaterforest/other.json
deleted file mode 100644
index 80454e497f83..000000000000
--- a/tools/perf/pmu-events/arch/x86/clearwaterforest/other.json
+++ /dev/null
@@ -1,22 +0,0 @@
-[
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/clearwaterforest/pipeline.json b/tools/perf/pmu-events/arch/x86/clearwaterforest/pipeline.json
index 6a5faa704b85..26bd12fefa3d 100644
--- a/tools/perf/pmu-events/arch/x86/clearwaterforest/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/clearwaterforest/pipeline.json
@@ -4,7 +4,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003"
},
{
@@ -12,7 +12,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003"
},
{
@@ -63,6 +63,7 @@
"BriefDescription": "Fixed Counter: Counts the number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
@@ -71,6 +72,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Counts the number of instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json b/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
index 7882dca9d5e1..3410caf8a57a 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
@@ -161,6 +161,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in DRAM. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
@@ -171,6 +172,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core or module. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x20"
},
@@ -181,6 +183,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in the L1 data cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
@@ -191,6 +194,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that miss in the L1 data cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
@@ -201,6 +205,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in the L2 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
@@ -211,6 +216,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that miss in the L2 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x10"
},
@@ -221,6 +227,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
@@ -231,7 +238,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL",
"PEBS": "1",
- "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)",
+ "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST) Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x83"
},
@@ -242,7 +249,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "PublicDescription": "Counts the total number of load uops retired.",
+ "PublicDescription": "Counts the total number of load uops retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x81"
},
@@ -253,7 +260,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "PublicDescription": "Counts the total number of store uops retired.",
+ "PublicDescription": "Counts the total number of store uops retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x82"
},
@@ -264,6 +271,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that performed one or more locks. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x21"
},
@@ -274,6 +282,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of memory uops retired that were splits. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x43"
},
@@ -284,6 +293,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired split load uops. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
@@ -294,6 +304,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired split store uops. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x42"
},
@@ -304,6 +315,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -314,6 +326,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -324,6 +337,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -334,6 +348,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -344,6 +359,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -354,6 +370,18 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3000000010000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -364,6 +392,29 @@
"EventName": "OCR.COREWB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3001F803C0000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8003000000000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -374,6 +425,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -384,6 +436,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -394,6 +447,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -404,6 +458,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -414,6 +469,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -424,6 +480,18 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -434,6 +502,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -444,6 +513,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -454,6 +524,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -464,6 +535,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -474,6 +546,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -484,6 +557,30 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -495,6 +592,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -506,6 +604,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -517,6 +616,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -528,6 +628,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -539,6 +640,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -550,6 +652,30 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -560,6 +686,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -570,6 +697,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -580,6 +708,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -590,6 +719,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -600,6 +730,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -610,6 +741,18 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -620,6 +763,18 @@
"EventName": "OCR.FULL_STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x801F803C0000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "PublicDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -630,6 +785,18 @@
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0400",
+ "PublicDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -640,6 +807,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -650,6 +818,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -660,6 +829,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -670,6 +840,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -680,6 +851,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -690,6 +862,29 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -700,6 +895,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -710,6 +906,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -720,6 +917,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -730,6 +928,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -740,6 +939,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -750,6 +950,18 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -760,6 +972,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -770,6 +983,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -780,6 +994,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -790,6 +1005,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -800,6 +1016,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -810,6 +1027,29 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000010000",
+ "PublicDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -820,6 +1060,18 @@
"EventName": "OCR.L1WB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001F803C0000",
+ "PublicDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000000010000",
+ "PublicDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -830,6 +1082,7 @@
"EventName": "OCR.L2WB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2001F803C0000",
+ "PublicDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -840,6 +1093,18 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x401F803C0000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -850,6 +1115,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -860,6 +1126,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -870,6 +1137,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -880,6 +1148,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -890,6 +1159,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -900,6 +1170,18 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -910,6 +1192,7 @@
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0800",
+ "PublicDescription": "Counts streaming stores that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -920,6 +1203,7 @@
"EventName": "OCR.UC_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x101F803C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -930,6 +1214,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -940,6 +1225,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -950,6 +1236,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -960,6 +1247,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1002003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -970,6 +1258,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -980,6 +1269,7 @@
"EventName": "OCR.UC_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x201F803C0000",
+ "PublicDescription": "Counts uncached memory writes that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json b/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
index 79a4beba4b78..f47d97dfe0d9 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
@@ -23,6 +23,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
"PEBS": "1",
+ "PublicDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt). Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x8"
}
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json b/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
index 34306ec24e9b..417cd78fc048 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
@@ -13,6 +13,7 @@
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of misaligned load uops that are 4K page splits. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
@@ -22,16 +23,29 @@
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of misaligned store uops that are 4K page splits. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "PublicDescription": "Counts all code reads that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000044",
+ "PublicDescription": "Counts all code reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -42,6 +56,18 @@
"EventName": "OCR.ALL_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000044",
+ "PublicDescription": "Counts all code reads that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "PublicDescription": "Counts all code reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -52,6 +78,7 @@
"EventName": "OCR.COREWB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3002184000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -62,6 +89,18 @@
"EventName": "OCR.COREWB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3002184000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -72,6 +111,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -82,6 +122,29 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -92,6 +155,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -102,6 +166,30 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -113,6 +201,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -124,6 +213,30 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -134,6 +247,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -144,6 +258,18 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -154,6 +280,7 @@
"EventName": "OCR.FULL_STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x802184000000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -164,6 +291,18 @@
"EventName": "OCR.FULL_STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x802184000000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -174,6 +313,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -184,6 +324,29 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -194,6 +357,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -204,6 +368,29 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -214,6 +401,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -224,6 +412,18 @@
"EventName": "OCR.HWPF_L2_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -234,6 +434,7 @@
"EventName": "OCR.L1WB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1002184000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -244,6 +445,7 @@
"EventName": "OCR.L1WB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1002184000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -254,6 +456,7 @@
"EventName": "OCR.L2WB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2002184000000",
+ "PublicDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -264,6 +467,7 @@
"EventName": "OCR.L2WB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2002184000000",
+ "PublicDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -274,6 +478,7 @@
"EventName": "OCR.OTHER.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184008000",
+ "PublicDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -284,6 +489,7 @@
"EventName": "OCR.OTHER.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184008000",
+ "PublicDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -294,6 +500,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x402184000000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -304,6 +511,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x402184000000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -314,6 +522,18 @@
"EventName": "OCR.PREFETCHES.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000470",
+ "PublicDescription": "Counts all hardware and software prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -324,6 +544,7 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -334,6 +555,18 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -344,6 +577,7 @@
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000800",
+ "PublicDescription": "Counts streaming stores that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -354,6 +588,18 @@
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000800",
+ "PublicDescription": "Counts streaming stores that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -364,6 +610,7 @@
"EventName": "OCR.UC_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x102184000000",
+ "PublicDescription": "Counts uncached memory reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -374,6 +621,18 @@
"EventName": "OCR.UC_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x102184000000",
+ "PublicDescription": "Counts uncached memory reads that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -384,6 +643,7 @@
"EventName": "OCR.UC_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x202184000000",
+ "PublicDescription": "Counts uncached memory writes that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -394,6 +654,7 @@
"EventName": "OCR.UC_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x202184000000",
+ "PublicDescription": "Counts uncached memory writes that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
index 57613207f7ad..2cdc6b64f31d 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
@@ -113,26 +113,7 @@
"EventName": "OCR.ALL_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10044",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all code reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.ALL_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000044",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all code reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000044",
+ "PublicDescription": "Counts all code reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -143,180 +124,7 @@
"EventName": "OCR.ALL_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"MSRValue": "0x8000000000000044",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.COREWB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3000000010000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.COREWB_M.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8003000000000000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
- "Counter": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
- "Counter": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
- "Counter": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000002",
+ "PublicDescription": "Counts all code reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -327,146 +135,7 @@
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800000010000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10040",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000040",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000040",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000040",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.L1WB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000000010000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.L2WB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x2000000010000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -477,6 +146,7 @@
"EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x18000",
+ "PublicDescription": "Counts miscellaneous requests, such as I/O accesses, that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -487,6 +157,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400000010000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -497,46 +168,7 @@
"EventName": "OCR.PREFETCHES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10470",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.READS_TO_CORE.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.READS_TO_CORE.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000477",
+ "PublicDescription": "Counts all hardware and software prefetches that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -547,6 +179,7 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -557,26 +190,7 @@
"EventName": "OCR.UC_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100000010000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.UC_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100184000000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.UC_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100184000000",
+ "PublicDescription": "Counts uncached memory reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -587,6 +201,7 @@
"EventName": "OCR.UC_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"MSRValue": "0x8000100000000000",
+ "PublicDescription": "Counts uncached memory reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -597,6 +212,7 @@
"EventName": "OCR.UC_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200000010000",
+ "PublicDescription": "Counts uncached memory writes that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
index e4e7902c1162..0fc2e821b14a 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
@@ -5,7 +5,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
- "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for. Available PDIST counters: 0",
"SampleAfterValue": "200003"
},
{
@@ -14,6 +14,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xf9"
},
@@ -23,6 +24,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
+ "PublicDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xbf"
},
@@ -32,6 +34,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near indirect CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
@@ -41,6 +44,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x7e"
},
@@ -50,6 +54,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xeb"
},
@@ -59,6 +64,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near relative CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfd"
},
@@ -68,6 +74,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near RET branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xf7"
},
@@ -77,6 +84,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "1",
+ "PublicDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfe"
},
@@ -86,7 +94,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
- "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path. Available PDIST counters: 0",
"SampleAfterValue": "200003"
},
{
@@ -95,6 +103,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
@@ -104,6 +113,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x7e"
},
@@ -113,6 +123,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xeb"
},
@@ -122,6 +133,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted near RET branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xf7"
},
@@ -131,6 +143,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfe"
},
@@ -206,7 +219,7 @@
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
- "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -216,7 +229,7 @@
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
- "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses a programmable general purpose performance counter.",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses a programmable general purpose performance counter. Available PDIST counters: 0",
"SampleAfterValue": "2000003"
},
{
@@ -225,6 +238,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.4K_ALIAS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
@@ -234,6 +248,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks). Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x10"
},
@@ -243,6 +258,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
@@ -252,6 +268,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
@@ -464,6 +481,7 @@
"EventCode": "0xc2",
"EventName": "TOPDOWN_RETIRING.ALL",
"PEBS": "1",
+ "PublicDescription": "Counts the total number of consumed retirement slots. Available PDIST counters: 0",
"SampleAfterValue": "1000003"
},
{
@@ -480,6 +498,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
+ "PublicDescription": "Counts the total number of uops retired. Available PDIST counters: 0",
"SampleAfterValue": "2000003"
},
{
@@ -488,6 +507,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
"PEBS": "1",
+ "PublicDescription": "Counts the number of integer divide uops retired. Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
@@ -497,7 +517,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"PEBS": "1",
- "PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows. Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -507,6 +527,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
"PEBS": "1",
+ "PublicDescription": "Counts the number of x87 uops retired, includes those in MS flows. Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x2"
}
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
index f9a6caed8776..bf56d72bb4a7 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
@@ -242,6 +242,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DTLB_MISS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x8"
},
@@ -252,6 +253,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of memory uops retired that missed in the second level TLB. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x13"
},
@@ -262,6 +264,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that miss in the second Level TLB. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x11"
},
@@ -272,6 +275,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
"PEBS": "1",
+ "PublicDescription": "Counts the number of store uops retired that miss in the second level TLB. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x12"
}
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
index 3b0581151d63..26568e4b77f7 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/cache.json
@@ -1,5 +1,68 @@
[
{
+ "BriefDescription": "Hit snoop reply with data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.I_FWD_FE",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's cache, after the data is forwarded back to the requestor and indicating the data was found unmodified in the (FE) Forward or Exclusive State in this cores caches cache. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "HitM snoop reply with data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.I_FWD_M",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's caches, after the data is forwarded back to the requestor, and indicating the data was found modified(M) in this cores caches cache (aka HitM response). A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Hit snoop reply without sending the data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.I_HIT_FSE",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated in this core's caches without forwarded back to the requestor. The line was in Forward, Shared or Exclusive (FSE) state in this cores caches. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Line not found snoop reply",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.MISS",
+ "PublicDescription": "Counts responses to snoops indicating that the data was not found (IHitI) in this core's caches. A single snoop response from the core counts on all hyperthreads of the Core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Hit snoop reply with data, line kept in Shared state.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.S_FWD_FE",
+ "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (FS) Forward or Shared state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "HitM snoop reply with data, line kept in Shared state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.S_FWD_M",
+ "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (M)odified state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Hit snoop reply without sending the data, line kept in Shared state.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.S_HIT_FSE",
+ "PublicDescription": "Counts responses to snoops indicating the line was kept on this core in the (S)hared state, and that the data was found unmodified but not forwarded back to the requestor, initially the data was found in the cache in the (FSE) Forward, Shared state or Exclusive state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "L1D.HWPF_MISS",
"Counter": "0,1,2,3",
"EventCode": "0x51",
@@ -311,7 +374,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x81"
},
@@ -321,7 +384,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
- "PublicDescription": "Counts all retired store instructions.",
+ "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x82"
},
@@ -331,7 +394,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
- "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x83"
},
@@ -341,7 +404,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "PublicDescription": "Counts retired load instructions with locked access.",
+ "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x21"
},
@@ -351,7 +414,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x41"
},
@@ -361,7 +424,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x42"
},
@@ -371,7 +434,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x11"
},
@@ -381,7 +444,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x12"
},
@@ -400,7 +463,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x4"
},
@@ -410,7 +473,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x1"
},
@@ -420,7 +483,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x8"
},
@@ -430,7 +493,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x2"
},
@@ -440,7 +503,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -450,6 +513,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
@@ -459,7 +523,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -469,6 +533,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
@@ -478,7 +543,7 @@
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x4"
},
@@ -488,7 +553,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40"
},
@@ -498,7 +563,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
@@ -508,7 +573,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
@@ -518,7 +583,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
@@ -528,7 +593,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x10"
},
@@ -538,7 +603,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x4"
},
@@ -548,7 +613,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "50021",
"UMask": "0x20"
},
@@ -570,12 +635,24 @@
"UMask": "0x3"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -586,6 +663,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -596,6 +674,7 @@
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -606,6 +685,18 @@
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -616,6 +707,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0001",
+ "PublicDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -626,6 +718,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -636,6 +729,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -646,6 +740,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -656,6 +751,7 @@
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1030000001",
+ "PublicDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -666,6 +762,7 @@
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x830000001",
+ "PublicDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -676,6 +773,7 @@
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000001",
+ "PublicDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -686,6 +784,18 @@
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000001",
+ "PublicDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -696,6 +806,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -706,6 +817,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -716,6 +828,7 @@
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -726,6 +839,40 @@
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "PublicDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10070",
+ "PublicDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x12380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -736,6 +883,40 @@
"EventName": "OCR.HWPF_L3.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80082380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90002380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10808",
+ "PublicDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -746,6 +927,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -756,6 +938,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -766,6 +949,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -776,6 +960,18 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F33004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -786,6 +982,7 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1830004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -796,6 +993,7 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1030004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -806,6 +1004,7 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x830004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -816,6 +1015,7 @@
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -826,6 +1026,7 @@
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -836,6 +1037,7 @@
"EventName": "OCR.RFO_TO_CORE.L3_HIT_M",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F80040022",
+ "PublicDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -846,6 +1048,7 @@
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080800",
+ "PublicDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json
index d3b51fa6ec1c..433ae5f50704 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/emr-metrics.json
@@ -1,28 +1,28 @@
[
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
@@ -40,6 +40,18 @@
"ScaleUnit": "1per_instr"
},
{
+ "BriefDescription": "The average number of cores that are in cstate C0 as observed by the power control unit (PCU)",
+ "MetricExpr": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0 / UNC_P_CLOCKTICKS * #num_packages",
+ "MetricGroup": "cpu_cstate",
+ "MetricName": "cpu_cstate_c0"
+ },
+ {
+ "BriefDescription": "The average number of cores are in cstate C6 as observed by the power control unit (PCU)",
+ "MetricExpr": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6 / UNC_P_CLOCKTICKS * #num_packages",
+ "MetricGroup": "cpu_cstate",
+ "MetricName": "cpu_cstate_c6"
+ },
+ {
"BriefDescription": "CPU operating frequency (in GHz)",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ / 1e9",
"MetricName": "cpu_operating_frequency",
@@ -79,6 +91,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO reads that are initiated by end device controllers that are requesting memory from the CPU and miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_read_local",
@@ -97,6 +115,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO writes that are initiated by end device controllers that are writing memory to the CPU",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_write_local",
@@ -111,19 +135,19 @@
{
"BriefDescription": "Percentage of inbound full cacheline writes initiated by end device controllers that miss the L3 cache",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM",
- "MetricName": "io_percent_of_inbound_full_writes_that_miss_l3",
+ "MetricName": "io_full_write_l3_miss",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Percentage of inbound partial cacheline writes initiated by end device controllers that miss the L3 cache",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_RFO) / (UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_RFO)",
- "MetricName": "io_percent_of_inbound_partial_writes_that_miss_l3",
+ "MetricName": "io_partial_write_l3_miss",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Percentage of inbound reads initiated by end device controllers that miss the L3 cache",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR / UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
- "MetricName": "io_percent_of_inbound_reads_that_miss_l3",
+ "MetricName": "io_read_l3_miss",
"ScaleUnit": "100%"
},
{
@@ -300,7 +324,7 @@
"ScaleUnit": "1per_instr"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5_11 + UOPS_DISPATCHED.PORT_6) / (5 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -312,7 +336,7 @@
"MetricExpr": "EXE.AMX_BUSY / tma_info_core_core_clks",
"MetricGroup": "BvCB;Compute;HPC;Server;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_amx_busy",
- "MetricThreshold": "tma_amx_busy > 0.5 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_amx_busy > 0.5 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -320,12 +344,12 @@
"MetricExpr": "78 * ASSISTS.ANY / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists",
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
"MetricExpr": "63 * ASSISTS.SSE_AVX_MIX / tma_info_thread_slots",
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_avx_assists",
@@ -335,7 +359,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
@@ -351,12 +375,12 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -370,39 +394,39 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * tma_amx_busy / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + 0 / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + 0 / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_core_bound * tma_amx_busy / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_ms))) - tma_bottleneck_big_code",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + RS.EMPTY_RESOURCE / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + RS.EMPTY_RESOURCE / tma_info_thread_clks * tma_ports_utilized_0) / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -410,7 +434,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -418,7 +442,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_mem + tma_remote_cache) + tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
@@ -426,7 +450,7 @@
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -434,14 +458,14 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -450,7 +474,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;Default;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
@@ -463,24 +487,24 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
"MetricExpr": "CPU_CLK_UNHALTED.C01 / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c01_wait",
- "MetricThreshold": "tma_c01_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
"MetricExpr": "CPU_CLK_UNHALTED.C02 / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c02_wait",
- "MetricThreshold": "tma_c02_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -488,8 +512,8 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources. Sample with: FRONTEND_RETIRED.MS_FLOWS",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -497,24 +521,24 @@
"MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
"MetricExpr": "max(0, tma_icache_misses - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -522,7 +546,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -530,32 +554,32 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricExpr": "((81 * tma_info_system_core_frequency - 4.4 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + (79 * tma_info_system_core_frequency - 4.4 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(76.6 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 74.6 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -566,24 +590,25 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricExpr": "(79 * tma_info_system_core_frequency - 4.4 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "74.6 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -592,8 +617,8 @@
"MetricExpr": "ARITH.DIV_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIV_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -601,7 +626,7 @@
"MetricExpr": "MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -611,7 +636,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -619,34 +644,34 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
- "MetricExpr": "(170 * tma_info_system_core_frequency * cpu@OCR.DEMAND_RFO.L3_MISS\\,offcore_rsp\\=0x103b800002@ + 81 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM) / tma_info_thread_clks",
+ "MetricExpr": "(170 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_MISS@offcore_rsp\\=0x103b800002@ + 81 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
@@ -656,7 +681,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -667,7 +692,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -678,7 +703,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -696,7 +721,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -705,15 +730,15 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "ARITH.FPDIV_ACTIVE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -721,8 +746,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -730,8 +755,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.VECTOR + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -739,8 +764,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -748,8 +773,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -757,8 +782,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -769,27 +794,27 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.MACRO_FUSED / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "Default;Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
"ScaleUnit": "100%"
},
{
@@ -797,8 +822,8 @@
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -809,28 +834,28 @@
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -858,7 +883,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_ms)))",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -866,7 +891,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -874,10 +899,11 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -938,11 +964,11 @@
"MetricExpr": "(FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -955,20 +981,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_DATA.STALLS / cpu@ICACHE_DATA.STALLS\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "ICACHE_DATA.STALLS / cpu@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -1005,13 +1031,13 @@
},
{
"BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
- "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / cpu@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / cpu@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed",
"MetricName": "tma_info_frontend_unknown_branch_cost",
- "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node"
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node."
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -1029,7 +1055,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -1037,7 +1063,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -1045,7 +1071,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -1053,7 +1079,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -1061,7 +1087,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate)",
@@ -1069,7 +1095,7 @@
"MetricGroup": "Flops;FpScalar;InsType;Server",
"MetricName": "tma_info_inst_mix_iparith_scalar_hp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_hp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -1077,7 +1103,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -1132,7 +1158,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 6 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 13",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1269,7 +1295,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp"
},
@@ -1294,19 +1320,19 @@
{
"BriefDescription": "Off-core accesses per kilo instruction for modified write requests",
"MetricExpr": "1e3 * OCR.MODIFIED_WRITE.ANY_RESPONSE / tma_info_inst_mix_instructions",
- "MetricGroup": "Offcore",
+ "MetricGroup": "Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_mwrite_any_pki"
},
{
"BriefDescription": "Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
"MetricExpr": "1e3 * OCR.READS_TO_CORE.ANY_RESPONSE / tma_info_inst_mix_instructions",
- "MetricGroup": "CacheHits;Offcore",
+ "MetricGroup": "CacheHits;Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_read_any_pki"
},
{
"BriefDescription": "L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
"MetricExpr": "1e3 * OCR.READS_TO_CORE.L3_MISS / tma_info_inst_mix_instructions",
- "MetricGroup": "Offcore",
+ "MetricGroup": "Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_read_l3m_pki"
},
{
@@ -1332,23 +1358,23 @@
{
"BriefDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket",
"MetricExpr": "64 * OCR.READS_TO_CORE.DRAM / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_dram_bw",
- "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW"
+ "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW."
},
{
"BriefDescription": "Average L3-cache miss BW for Reads-to-Core (R2C)",
"MetricExpr": "64 * OCR.READS_TO_CORE.L3_MISS / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_l3m_bw",
- "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW"
+ "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW."
},
{
"BriefDescription": "Average Off-core access BW for Reads-to-Core (R2C)",
"MetricExpr": "64 * OCR.READS_TO_CORE.ANY_RESPONSE / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_offcore_bw",
- "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches"
+ "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches."
},
{
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
@@ -1376,8 +1402,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1398,18 +1424,18 @@
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
- "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "MicroSeq;Pipeline;Ret",
"MetricName": "tma_info_pipeline_strings_cycles",
"MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1"
@@ -1423,7 +1449,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1435,16 +1461,28 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_read_bw"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_write_bw"
+ },
+ {
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1472,14 +1510,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1490,7 +1527,7 @@
},
{
"BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]",
- "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / cha_0@event\\=0x0@",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / uncore_cha_0@event\\=0x1@",
"MetricGroup": "MemOffcore;MemoryLat;Server;SoC",
"MetricName": "tma_info_system_mem_dram_read_latency",
"PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
@@ -1500,18 +1537,17 @@
"MetricExpr": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH / UNC_CHA_CLOCKTICKS",
"MetricGroup": "LockCont;MemOffcore;Server;SoC",
"MetricName": "tma_info_system_mem_irq_duplicate_address",
- "MetricThreshold": "(tma_info_system_mem_irq_duplicate_address > 0.1)"
+ "MetricThreshold": "tma_info_system_mem_irq_duplicate_address > 0.1"
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=0x1@",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_system_socket_clks / tma_info_system_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
@@ -1538,7 +1574,7 @@
},
{
"BriefDescription": "Socket actual clocks when any core is active on that socket",
- "MetricExpr": "cha_0@event\\=0x0@",
+ "MetricExpr": "uncore_cha_0@event\\=0x1@",
"MetricGroup": "SoC",
"MetricName": "tma_info_system_socket_clks"
},
@@ -1568,7 +1604,7 @@
"MetricName": "tma_info_system_upi_data_transmit_bw"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1577,15 +1613,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1595,13 +1630,13 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "TOPDOWN.SLOTS",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization"
},
@@ -1617,14 +1652,14 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 6 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 9"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1633,7 +1668,7 @@
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_int_operations",
"MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
"ScaleUnit": "100%"
},
{
@@ -1641,8 +1676,8 @@
"MetricExpr": "(INT_VEC_RETIRED.ADD_128 + INT_VEC_RETIRED.VNNI_128) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_128b",
- "MetricThreshold": "tma_int_vector_128b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1650,8 +1685,8 @@
"MetricExpr": "(INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_256b",
- "MetricThreshold": "tma_int_vector_256b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1659,8 +1694,8 @@
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1668,17 +1703,17 @@
"MetricExpr": "max((EXE_ACTIVITY.BOUND_ON_LOADS - MEMORY_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
@@ -1686,7 +1721,7 @@
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1695,7 +1730,7 @@
"MetricExpr": "4.4 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1704,17 +1739,17 @@
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(37 * tma_info_system_core_frequency - 4.4 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "32.6 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1722,19 +1757,19 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"DefaultMetricgroupName": "TopdownL2",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Default;Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
@@ -1751,7 +1786,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1759,48 +1794,49 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
- "MetricExpr": "(109 * tma_info_system_core_frequency - 37 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "72 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
- "MetricThreshold": "tma_local_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1816,20 +1852,20 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling)",
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling).",
"MetricExpr": "INT_MISC.MBA_STALLS / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;Server;TopdownL5;tma_L5_group;tma_mem_bandwidth_group",
"MetricName": "tma_mba_stalls",
- "MetricThreshold": "tma_mba_stalls > 0.1 & tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mba_stalls > 0.1 & (tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1837,32 +1873,31 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "Backend;Default;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
"MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
- "MetricThreshold": "tma_memory_fence > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -1883,7 +1918,7 @@
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1897,17 +1932,17 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "160 * ASSISTS.SSE_AVX_MIX / tma_info_thread_clks",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "max(IDQ.MS_CYCLES_ANY, cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY)) / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "max(IDQ.MS_CYCLES_ANY, cpu@UOPS_RETIRED.MS\\,cmask\\=1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY)) / tma_info_core_core_clks / 2.4",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -1915,11 +1950,11 @@
},
{
"BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
- "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / tma_info_thread_clks",
+ "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
{
@@ -1928,7 +1963,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%"
},
{
@@ -1936,12 +1971,13 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -1950,19 +1986,19 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -1971,7 +2007,7 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_page_faults",
"MetricThreshold": "tma_page_faults > 0.05",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
"ScaleUnit": "100%"
},
{
@@ -1980,7 +2016,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1989,7 +2025,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1998,78 +2034,79 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_3_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_3_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + max(RS.EMPTY_RESOURCE - RESOURCE_STALLS.SCOREBOARD, 0)) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
- "MetricExpr": "((170 * tma_info_system_core_frequency - 37 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + (170 * tma_info_system_core_frequency - 37 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(133 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 133 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
- "MetricThreshold": "tma_remote_cache > 0.05 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM, MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
- "MetricExpr": "(190 * tma_info_system_core_frequency - 37 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "153 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
- "MetricThreshold": "tma_remote_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -2082,7 +2119,7 @@
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks + tma_c02_wait",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -2091,17 +2128,16 @@
"MetricExpr": "tma_light_operations * INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_shuffles_256b",
- "MetricThreshold": "tma_shuffles_256b > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
"ScaleUnit": "100%"
},
@@ -2111,7 +2147,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -2119,8 +2155,8 @@
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -2128,8 +2164,8 @@
"MetricExpr": "(XQ.FULL_CYCLES + L1D_PEND_MISS.L2_STALLS) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -2137,8 +2173,8 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -2146,8 +2182,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -2155,8 +2191,8 @@
"MetricExpr": "(MEM_STORE_RETIRED.L2_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -2173,7 +2209,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -2181,31 +2217,31 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -2213,7 +2249,7 @@
"MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%"
},
@@ -2222,7 +2258,7 @@
"MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
"ScaleUnit": "100%"
},
@@ -2231,8 +2267,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
index bf68493d4509..793c486ffabe 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/frontend.json
@@ -41,7 +41,7 @@
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -52,7 +52,7 @@
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x11",
- "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -63,7 +63,7 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x14",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -74,7 +74,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -85,7 +85,7 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x13",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -96,7 +96,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x600106",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -107,7 +107,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"MSRValue": "0x608006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -118,7 +118,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"MSRValue": "0x601006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -129,7 +129,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x600206",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -140,7 +140,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"MSRValue": "0x610006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -151,7 +151,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x100206",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -162,7 +162,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"MSRValue": "0x602006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -173,7 +173,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x600406",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -184,7 +184,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"MSRValue": "0x620006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -195,7 +195,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x604006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -206,7 +206,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
"MSRValue": "0x600806",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -217,6 +217,7 @@
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
"MSRValue": "0x8",
+ "PublicDescription": "FRONTEND_RETIRED.MS_FLOWS Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -227,7 +228,7 @@
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x15",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -238,6 +239,7 @@
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
"MSRValue": "0x17",
+ "PublicDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json
index 41d4120d4dae..5e6c1f05c981 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/memory.json
@@ -169,17 +169,62 @@
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
- "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -190,6 +235,51 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -200,6 +290,29 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FC00002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -210,6 +323,7 @@
"EventName": "OCR.HWPF_L3.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x94002380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -220,6 +334,18 @@
"EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84002380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -230,6 +356,7 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FC04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -240,6 +367,7 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F04C04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -250,6 +378,62 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x70CC04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70C004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x733004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -260,6 +444,7 @@
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x94000800",
+ "PublicDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -270,6 +455,18 @@
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000800",
+ "PublicDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "PublicDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM) Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -295,7 +492,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "PublicDescription": "Counts the number of times RTM abort was triggered. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x4"
},
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json
index c424facf1b95..21f49f609ed4 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/other.json
@@ -8,271 +8,28 @@
"UMask": "0x8"
},
{
- "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "BriefDescription": "HW_INTERRUPTS.MASKED",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb7",
- "EventName": "EXE.AMX_BUSY",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000004",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.MASKED",
"SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC0002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10070",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x12380",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.HWPF_L3.REMOTE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x90002380",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10808",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC4477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x70C004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F33004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x733004477",
+ "BriefDescription": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0x4"
},
{
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708004477",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
+ "SampleAfterValue": "203",
"UMask": "0x1"
},
{
@@ -282,70 +39,11 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0xFBFF80822",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
- "SampleAfterValue": "1000003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
- "Counter": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_COUNT",
- "Invert": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "SampleAfterValue": "100003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_RESOURCE",
- "SampleAfterValue": "1000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
- "Counter": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "Deprecated": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS_EMPTY.COUNT",
- "Invert": "1",
- "SampleAfterValue": "100003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
- "Counter": "0,1,2,3,4,5,6,7",
- "Deprecated": "1",
- "EventCode": "0xa5",
- "EventName": "RS_EMPTY.CYCLES",
- "SampleAfterValue": "1000003",
- "UMask": "0x7"
- },
- {
"BriefDescription": "Cycles the uncore cannot take further requests",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
index 50cacfbbc7cf..1fa7957956df 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
@@ -62,7 +62,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all branch instructions retired.",
+ "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009"
},
{
@@ -70,7 +70,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PublicDescription": "Counts conditional branch instructions retired.",
+ "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11"
},
@@ -79,7 +79,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts not taken branch instructions retired.",
+ "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10"
},
@@ -88,7 +88,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1"
},
@@ -97,7 +97,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PublicDescription": "Counts far branch instructions retired.",
+ "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40"
},
@@ -106,7 +106,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80"
},
@@ -115,7 +115,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x2"
},
@@ -124,7 +124,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts return instructions retired.",
+ "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -133,7 +133,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts taken branch instructions retired.",
+ "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20"
},
@@ -142,7 +142,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0",
"SampleAfterValue": "400009"
},
{
@@ -150,7 +150,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11"
},
@@ -159,7 +159,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10"
},
@@ -168,7 +168,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1"
},
@@ -177,7 +177,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80"
},
@@ -186,7 +186,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x2"
},
@@ -195,7 +195,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20"
},
@@ -204,7 +204,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -368,6 +368,14 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb7",
+ "EventName": "EXE.AMX_BUSY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
@@ -452,7 +460,7 @@
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -485,7 +493,7 @@
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
- "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -652,7 +660,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -741,6 +749,56 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.COUNT",
+ "Invert": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.CYCLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
index f453202d80c2..92cf47967f0b 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-cache.json
@@ -312,6 +312,17 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xaf",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
"Counter": "0,1,2,3",
"EventCode": "0xba",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
index 94340dee1c9c..d4cf2199d46b 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
@@ -1822,6 +1822,18 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "Posted requests sent by the integrated IO (IIO) controller to the Ubox, useful for counting message signaled interrupts (MSI).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED",
+ "Experimental": "1",
+ "FCMask": "0x01",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "ITC address map 1",
"Counter": "0,1,2,3",
"EventCode": "0x8f",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json
index aa06088dd26f..30044177ccf8 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-memory.json
@@ -2146,6 +2146,16 @@
"Unit": "MCHBM"
},
{
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x09",
+ "EventName": "UNC_MCHBM_ECC_CORRECTABLE_ERRORS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "ECC Correctable Errors. Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "MCHBM"
+ },
+ {
"BriefDescription": "HBM Precharge All Commands",
"Counter": "0,1,2,3",
"EventCode": "0x44",
@@ -2760,6 +2770,98 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x24",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH_PCH0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH_PCH1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC_PCH0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC_PCH1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x09",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "ECC Correctable Errors : Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "IMC Clockticks at HCLK frequency",
"Counter": "0,1,2,3",
"EventCode": "0x01",
@@ -3028,6 +3130,28 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Precharge due to read, write, underfill, or PGT.",
"Counter": "0,1,2,3",
"EventCode": "0x03",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json
index 9482ddaea4d1..71c35b165a3e 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-power.json
@@ -178,7 +178,6 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0",
- "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C0 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
@@ -198,7 +197,6 @@
"Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6",
- "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C6 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/cache.json b/tools/perf/pmu-events/arch/x86/grandridge/cache.json
index 04802e254e51..9abddb06a837 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/cache.json
@@ -1,5 +1,92 @@
[
{
+ "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "DL1.DIRTY_EVICTION",
+ "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.F",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.M",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache lines that are evicted due to an L2 cache fill",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of L2 cache lines that are evicted due to an L2 cache fill. Increments on the core that brought the line in originally.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache lines that are silently dropped due to an L2 cache fill",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of L2 cache lines that are silently dropped due to an L2 cache fill. Increments on the core that brought the line in originally.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache Accesses that resulted in a Hit from a front door request only (does not include rejects or recycles), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of total L2 Cache Accesses that resulted in a Miss from a front door request only (does not include rejects or recycles), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache Accesses that miss the L2 and get BBL reject short and long rejects, per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.REJECTS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
@@ -35,7 +122,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which hit in the LLC.",
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which hit in the LLC. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.LLC_HIT",
@@ -43,7 +130,7 @@
"UMask": "0x6"
},
{
- "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an icache or itlb miss which missed all the caches.",
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which missed all the caches. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
"EventName": "MEM_BOUND_STALLS_IFETCH.LLC_MISS",
@@ -68,7 +155,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC.",
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT",
@@ -76,7 +163,7 @@
"UMask": "0x6"
},
{
- "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.",
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS",
@@ -84,6 +171,14 @@
"UMask": "0x78"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a store buffer full condition",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.SBFULL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd3",
@@ -191,7 +286,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
@@ -202,7 +297,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
@@ -213,7 +308,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
@@ -224,7 +319,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
@@ -235,7 +330,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
@@ -246,7 +341,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
@@ -257,7 +352,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
@@ -268,7 +363,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
@@ -279,7 +374,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
@@ -290,7 +385,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
- "Counter": "0,1",
+ "Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
@@ -336,6 +431,33 @@
"UMask": "0x42"
},
{
+ "BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x13"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x12"
+ },
+ {
"BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
"Counter": "0,1,2,3,4,5,6,7",
"Data_LA": "1",
@@ -345,12 +467,24 @@
"UMask": "0x6"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -361,6 +495,18 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -371,6 +517,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/counter.json b/tools/perf/pmu-events/arch/x86/grandridge/counter.json
index 9fd5d8ad6d3b..d9ac3aca5bd5 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/counter.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/counter.json
@@ -37,6 +37,6 @@
{
"Unit": "CHACMS",
"CountersNumFixed": "0",
- "CountersNumGeneric": 4
+ "CountersNumGeneric": "4"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/frontend.json b/tools/perf/pmu-events/arch/x86/grandridge/frontend.json
index 7cdf611efb23..fef5cba533bb 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/frontend.json
@@ -31,5 +31,13 @@
"EventName": "ICACHE.MISSES",
"SampleAfterValue": "200003",
"UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the micro-sequencer is busy.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "MS_DECODED.MS_BUSY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json b/tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json
index 2f9959c61718..a0d637a24c1b 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/grr-metrics.json
@@ -1,56 +1,56 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
@@ -107,6 +107,30 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "The percent of inbound full cache line writes initiated by IO that miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "MetricName": "io_full_write_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Message Signaled Interrupts (MSI) per second sent by the integrated I/O traffic controller (IIO) to System Configuration Controller (Ubox)",
+ "MetricExpr": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED / duration_time",
+ "MetricName": "io_msi",
+ "ScaleUnit": "1per_sec"
+ },
+ {
+ "BriefDescription": "The percent of inbound partial writes initiated by IO that miss the L3 cache",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_RFO) / (UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_RFO)",
+ "MetricName": "io_partial_write_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The percent of inbound reads initiated by IO that miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR / UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "MetricName": "io_read_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions",
"MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
"MetricName": "itlb_2nd_level_large_page_mpi",
@@ -163,12 +187,6 @@
"ScaleUnit": "1per_instr"
},
{
- "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) in nano seconds",
- "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT) / (UNC_CHA_CLOCKTICKS / (source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT) * #num_packages)) * duration_time",
- "MetricName": "llc_demand_data_read_miss_latency",
- "ScaleUnit": "1ns"
- },
- {
"BriefDescription": "Load operations retired per instruction",
"MetricExpr": "MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
"MetricName": "loads_retired_per_instr",
@@ -216,15 +234,17 @@
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions",
"MetricExpr": "tma_core_bound",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_allocation_restriction",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
"MetricExpr": "TOPDOWN_BE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%"
@@ -232,92 +252,104 @@
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
"MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
"MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS)",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
"MetricExpr": "TOPDOWN_FE_BOUND.CISC / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation",
"MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
"MetricExpr": "TOPDOWN_FE_BOUND.DECODE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
"MetricExpr": "TOPDOWN_FE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
"MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
"MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_bandwidth",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.",
"MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_latency",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -348,33 +380,28 @@
{
"BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
"MetricExpr": "100 * (LD_HEAD.DTLB_MISS_AT_RET + LD_HEAD.PGWALK_AT_RET) / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles",
- "MetricName": "tma_info_bottleneck_dtlb_miss_bound_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles"
},
{
"BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.ALL / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Ifetch",
- "MetricName": "tma_info_bottleneck_ifetch_miss_bound_cycles",
- "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Ifetch",
+ "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound"
},
{
"BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.ALL / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Load_Store_Miss",
- "MetricName": "tma_info_bottleneck_load_miss_bound_cycles",
- "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound"
},
{
"BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
"MetricExpr": "100 * LD_HEAD.ANY_AT_RET / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Mem_Exec",
- "MetricName": "tma_info_bottleneck_mem_exec_bound_cycles",
- "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Mem_Exec",
+ "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -429,26 +456,22 @@
{
"BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.LD_BUF / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_load_buffer_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles"
},
{
"BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.RSV / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_mem_rsv_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles"
},
{
"BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.ST_BUF / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_store_buffer_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles"
},
{
"BriefDescription": "Cycles Per Instruction",
"MetricExpr": "CPU_CLK_UNHALTED.CORE / INST_RETIRED.ANY",
- "MetricName": "tma_info_core_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_core_cpi"
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
@@ -469,28 +492,46 @@
{
"BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.L2_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
- "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l2hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss doesn't hit in the L2",
+ "MetricExpr": "100 * (MEM_BOUND_STALLS_IFETCH.LLC_HIT + MEM_BOUND_STALLS_IFETCH.LLC_MISS) / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2miss"
},
{
"BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
- "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l3hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_MISS / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss"
},
{
"BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.L2_HIT / MEM_BOUND_STALLS_LOAD.ALL",
"MetricGroup": "load_store_bound",
- "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l2hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses in the L2",
+ "MetricExpr": "100 * (MEM_BOUND_STALLS_LOAD.LLC_HIT + MEM_BOUND_STALLS_LOAD.LLC_MISS) / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2miss"
},
{
"BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_HIT / MEM_BOUND_STALLS_LOAD.ALL",
"MetricGroup": "load_store_bound",
- "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l3hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_MISS / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss"
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block",
@@ -528,44 +569,37 @@
{
"BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
"MetricExpr": "100 * LD_BLOCKS.ADDRESS_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_mem_exec_blocks_loads_with_adressaliasing",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing"
},
{
"BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
"MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_mem_exec_blocks_loads_with_storefwdblk",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
"MetricExpr": "100 * LD_HEAD.L1_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_l1miss",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
"MetricExpr": "100 * LD_HEAD.OTHER_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_otherpipelineblks",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
"MetricExpr": "100 * LD_HEAD.PGWALK_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_pagewalk",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
"MetricExpr": "100 * LD_HEAD.DTLB_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_stlbhit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
"MetricExpr": "100 * LD_HEAD.ST_ADDR_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_storefwding",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding"
},
{
"BriefDescription": "Instructions per Load",
@@ -595,12 +629,11 @@
{
"BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
"MetricExpr": "100 * SERIALIZATION.C01_MS_SCB / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricName": "tma_info_serialization_tpause_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_serialization_%_tpause_cycles"
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricName": "tma_info_system_cpu_utilization"
},
{
@@ -613,16 +646,19 @@
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.CORE_P:k / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Summary",
"MetricName": "tma_info_system_kernel_utilization"
},
{
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "CPU_CLK_UNHALTED.CORE_P / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_system_mux"
+ "MetricName": "tma_info_system_mux",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.CORE / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
"MetricName": "tma_info_system_turbo_utilization"
},
{
@@ -646,90 +682,102 @@
"MetricName": "tma_info_uop_mix_x87_uop_ratio"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
"MetricExpr": "TOPDOWN_FE_BOUND.ITLB_MISS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops",
"MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops",
"MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
"MetricExpr": "TOPDOWN_FE_BOUND.OTHER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
"MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)",
"MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)",
"MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation",
"MetricExpr": "tma_backend_bound - tma_core_bound",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that result in retirement slots",
"MetricExpr": "TOPDOWN_RETIRING.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
"MetricgroupNoGroup": "TopdownL1",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)",
"MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/memory.json b/tools/perf/pmu-events/arch/x86/grandridge/memory.json
index 22d23077618e..48b6301e7696 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/memory.json
@@ -79,6 +79,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -89,6 +90,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/other.json b/tools/perf/pmu-events/arch/x86/grandridge/other.json
index 28f9a4c3ea84..ea34103a8292 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/other.json
@@ -9,41 +9,14 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts streaming stores that have any type of response.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
- "SampleAfterValue": "200003",
- "UMask": "0x4"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
index 40fa4f5ae261..f56d8d816e53 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
@@ -57,6 +57,14 @@
"UMask": "0xfb"
},
{
+ "BriefDescription": "Counts the number of near indirect JMP branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xef"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
"Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
@@ -82,6 +90,30 @@
"UMask": "0xf7"
},
{
+ "BriefDescription": "Counts the number of near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc0"
+ },
+ {
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "Counts the number of near relative JMP branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.REL_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xdf"
+ },
+ {
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
@@ -122,6 +154,14 @@
"UMask": "0xfb"
},
{
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xef"
+ },
+ {
"BriefDescription": "Counts the number of mispredicted near taken branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
@@ -185,6 +225,7 @@
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -236,8 +277,9 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -260,6 +302,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json
index 6a80cf6cbd36..b89ab6e5cfb5 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-cache.json
@@ -9,6 +9,16 @@
"Unit": "CHACMS"
},
{
+ "BriefDescription": "Counts the number of cycles FAST trigger is received from the global FAST distress wire.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHACMS_RING_SRC_THRTL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "Unit": "CHACMS"
+ },
+ {
"BriefDescription": "Number of CHA clock cycles while the event is enabled",
"Counter": "0,1,2,3",
"EventCode": "0x01",
@@ -531,6 +541,26 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IRQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "Counter": "0",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "All TOR Inserts",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -603,7 +633,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "Data read opt prefetch from local IA that miss the cache",
+ "BriefDescription": "Data read opt prefetch from local IA",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
@@ -764,7 +794,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "Last level cache prefetch read for ownership from local IA that miss the cache",
+ "BriefDescription": "Last level cache prefetch read for ownership from local IA",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
@@ -859,7 +889,7 @@
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF_LOCAL",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "PublicDescription": "TOR Inserts : Data read opt prefetch from local iA that missed the LLC targeting local memory",
"UMask": "0xc8a6fe01",
"Unit": "CHA"
},
@@ -934,7 +964,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "Read for ownership from local IA that miss the cache",
+ "BriefDescription": "Read for ownership from local IA that miss the LLC targeting local memory",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
@@ -954,7 +984,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "BriefDescription": "Read for ownership prefetch from local IA that miss the LLC targeting local memory",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
@@ -1024,7 +1054,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "Read for ownership from local IA that miss the cache",
+ "BriefDescription": "Read for ownership from local IA",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
@@ -1034,7 +1064,7 @@
"Unit": "CHA"
},
{
- "BriefDescription": "Read for ownership prefetch from local IA that miss the cache",
+ "BriefDescription": "Read for ownership prefetch from local IA",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
@@ -1406,7 +1436,6 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
- "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
"UMask": "0xc827ff01",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json
index 2c18767511f3..c7250332d8aa 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-interconnect.json
@@ -261,5 +261,15 @@
"PerPkg": "1",
"UMask": "0x8",
"Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json
index c5b05c71c56d..764cf2f0b4a8 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-io.json
@@ -908,6 +908,18 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "Posted requests sent by the integrated IO (IIO) controller to the Ubox, useful for counting message signaled interrupts (MSI).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED",
+ "FCMask": "0x01",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "PublicDescription": "-",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "All 9 bits of Page Walk Tracker Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json b/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json
index e75b3050ccd5..6a11e5505957 100644
--- a/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/grandridge/uncore-memory.json
@@ -189,6 +189,256 @@
"Unit": "IMC"
},
{
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK2",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK3",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x10",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x20",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK2",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x40",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK3",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x80",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles a given rank is in Power Down Mode and all pages are closed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M_POWER_CHANNEL_PPD_CYCLES",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "MR4 temp reading is throttling",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.MR4BLKEN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "RAPL is throttling",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RAPLBLK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"Counter": "0,1,2,3",
"EventCode": "0x03",
@@ -361,6 +611,94 @@
"Unit": "IMC"
},
{
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at High level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at High level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "Write Pending Queue Allocations",
"Counter": "0,1,2,3",
"EventCode": "0x22",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/cache.json b/tools/perf/pmu-events/arch/x86/graniterapids/cache.json
index d155da8610d8..db28866444b6 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/cache.json
@@ -320,7 +320,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x81"
},
@@ -330,7 +330,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
- "PublicDescription": "Counts all retired store instructions.",
+ "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x82"
},
@@ -340,7 +340,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
- "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x83"
},
@@ -350,7 +350,10 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "PublicDescription": "Counts retired load instructions with locked access.",
+ "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0",
+ "RetirementLatencyMax": 5156,
+ "RetirementLatencyMean": 63.76,
+ "RetirementLatencyMin": 15,
"SampleAfterValue": "100007",
"UMask": "0x21"
},
@@ -360,7 +363,10 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0",
+ "RetirementLatencyMax": 4704,
+ "RetirementLatencyMean": 3.97,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100003",
"UMask": "0x41"
},
@@ -370,7 +376,10 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0",
+ "RetirementLatencyMax": 65535,
+ "RetirementLatencyMean": 19.0,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100003",
"UMask": "0x42"
},
@@ -380,7 +389,10 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS",
- "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0",
+ "RetirementLatencyMax": 3424,
+ "RetirementLatencyMean": 1.57,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100003",
"UMask": "0x9"
},
@@ -390,7 +402,10 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_STORES",
- "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0",
+ "RetirementLatencyMax": 65535,
+ "RetirementLatencyMean": 5.24,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100003",
"UMask": "0xa"
},
@@ -400,7 +415,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x11"
},
@@ -410,7 +425,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x12"
},
@@ -429,7 +444,10 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0",
+ "RetirementLatencyMax": 4472,
+ "RetirementLatencyMean": 353.04,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "20011",
"UMask": "0x4"
},
@@ -439,7 +457,10 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0",
+ "RetirementLatencyMax": 830,
+ "RetirementLatencyMean": 125.27,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "20011",
"UMask": "0x1"
},
@@ -449,7 +470,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x8"
},
@@ -459,26 +480,45 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0",
+ "RetirementLatencyMax": 3939,
+ "RetirementLatencyMean": 289.9,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "20011",
"UMask": "0x2"
},
{
- "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from dram homed in the local socket",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from DRAM homed in the local socket. Available PDIST counters: 0",
+ "RetirementLatencyMax": 4146,
+ "RetirementLatencyMean": 115.83,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x1"
},
{
+ "BriefDescription": "Retired load instructions with remote cxl mem as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_CXL_MEM",
+ "PublicDescription": "Counts retired load instructions with remote cxl mem as the data source and the data request missed L3. Available PDIST counters: 0",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM Available PDIST counters: 0",
+ "RetirementLatencyMax": 3572,
+ "RetirementLatencyMean": 430.22,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
@@ -488,7 +528,10 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache. Available PDIST counters: 0",
+ "RetirementLatencyMax": 8552,
+ "RetirementLatencyMean": 125.36,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -498,6 +541,10 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM Available PDIST counters: 0",
+ "RetirementLatencyMax": 2580,
+ "RetirementLatencyMean": 135.29,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
@@ -507,7 +554,7 @@
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x4"
},
@@ -517,7 +564,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40"
},
@@ -527,7 +574,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
@@ -537,7 +584,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
@@ -547,7 +594,10 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0",
+ "RetirementLatencyMax": 7140,
+ "RetirementLatencyMean": 5.71,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "200003",
"UMask": "0x2"
},
@@ -557,7 +607,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x10"
},
@@ -567,7 +617,10 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0",
+ "RetirementLatencyMax": 5630,
+ "RetirementLatencyMean": 57.64,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100021",
"UMask": "0x4"
},
@@ -577,11 +630,21 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "50021",
"UMask": "0x20"
},
{
+ "BriefDescription": "Retired load instructions with local cxl mem as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_CXL_MEM",
+ "PublicDescription": "Counts retired load instructions with local cxl mem as the data source and the data request missed L3. Available PDIST counters: 0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
"BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
"Counter": "0,1,2,3",
"EventCode": "0x44",
@@ -599,12 +662,24 @@
"UMask": "0x3"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -615,6 +690,29 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 or Type 3).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C00001",
+ "PublicDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 or Type 3). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -625,6 +723,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0001",
+ "PublicDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -635,6 +734,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -645,6 +745,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -655,6 +756,18 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C00001",
+ "PublicDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -665,6 +778,7 @@
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1030000001",
+ "PublicDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -675,6 +789,62 @@
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x830000001",
+ "PublicDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000001",
+ "PublicDescription": "Counts demand data reads that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000001",
+ "PublicDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000001",
+ "PublicDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 or Type 3).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C00002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 or Type 3). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -685,6 +855,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -695,6 +866,62 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C00002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.REMOTE_CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10808",
+ "PublicDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 or Type 3).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 or Type 3). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -705,6 +932,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -715,6 +943,29 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 and Type 3) attached to local socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F33004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -725,6 +976,7 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1830004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -735,6 +987,7 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1030004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -745,6 +998,18 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x830004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CXL_MEM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by CXL MEM (Type 2 or Type 3) attached to another socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -755,6 +1020,7 @@
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -765,6 +1031,7 @@
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -775,6 +1042,7 @@
"EventName": "OCR.RFO_TO_CORE.L3_HIT_M",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F80040022",
+ "PublicDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -824,6 +1092,15 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Offcore Uncacheable memory data read transactions.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.MEM_UC",
+ "PublicDescription": "This event counts noncacheable memory data read transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
"BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/counter.json b/tools/perf/pmu-events/arch/x86/graniterapids/counter.json
index 137da7efa8b1..d97211a0227e 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/counter.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/counter.json
@@ -60,6 +60,11 @@
"CountersNumGeneric": 4
},
{
+ "Unit": "UBOX",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "2"
+ },
+ {
"Unit": "PCU",
"CountersNumFixed": "0",
"CountersNumGeneric": "4"
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json b/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
index dc81055941b1..d580d305c926 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
@@ -41,7 +41,7 @@
"EventName": "FRONTEND_RETIRED.ANY_ANT",
"MSRIndex": "0x3F7",
"MSRValue": "0x9",
- "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted)",
+ "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted) Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -52,7 +52,10 @@
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0",
+ "RetirementLatencyMax": 65535,
+ "RetirementLatencyMean": 2.46,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -63,7 +66,7 @@
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x11",
- "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -74,7 +77,10 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x14",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0",
+ "RetirementLatencyMax": 980,
+ "RetirementLatencyMean": 41.96,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -85,7 +91,10 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0",
+ "RetirementLatencyMax": 1785,
+ "RetirementLatencyMean": 9.83,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -96,7 +105,10 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x13",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0",
+ "RetirementLatencyMax": 2854,
+ "RetirementLatencyMean": 137.41,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -107,7 +119,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x600106",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -118,7 +130,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"MSRValue": "0x608006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -129,7 +141,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"MSRValue": "0x601006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -140,7 +152,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x600206",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -151,7 +163,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"MSRValue": "0x610006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -162,7 +174,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x100206",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -173,7 +185,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"MSRValue": "0x602006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -184,7 +196,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x600406",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -195,7 +207,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"MSRValue": "0x620006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -206,7 +218,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x604006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -217,7 +229,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
"MSRValue": "0x600806",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -228,7 +240,7 @@
"EventName": "FRONTEND_RETIRED.LATE_SWPF",
"MSRIndex": "0x3F7",
"MSRValue": "0xA",
- "PublicDescription": "Number of Instruction Cache demand miss in shadow of an on-going i-fetch cache-line triggered by PREFETCHIT0/1 instructions",
+ "PublicDescription": "Number of Instruction Cache demand miss in shadow of an on-going i-fetch cache-line triggered by PREFETCHIT0/1 instructions Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -239,7 +251,7 @@
"EventName": "FRONTEND_RETIRED.MISP_ANT",
"MSRIndex": "0x3F7",
"MSRValue": "0x9",
- "PublicDescription": "ANT retired branches that got just mispredicted",
+ "PublicDescription": "ANT retired branches that got just mispredicted Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x2"
},
@@ -250,6 +262,10 @@
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
"MSRValue": "0x8",
+ "PublicDescription": "FRONTEND_RETIRED.MS_FLOWS Available PDIST counters: 0",
+ "RetirementLatencyMax": 65535,
+ "RetirementLatencyMean": 77.14,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -260,7 +276,10 @@
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x15",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0",
+ "RetirementLatencyMax": 754,
+ "RetirementLatencyMean": 206.85,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x3"
},
@@ -271,6 +290,10 @@
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
"MSRValue": "0x17",
+ "PublicDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH Available PDIST counters: 0",
+ "RetirementLatencyMax": 532,
+ "RetirementLatencyMean": 3.85,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100007",
"UMask": "0x3"
},
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json b/tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json
index a345b6874606..cc3c834ca286 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/gnr-metrics.json
@@ -1,28 +1,28 @@
[
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
@@ -96,6 +96,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO reads that are initiated by end device controllers that are requesting memory from the CPU and miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_read_local",
@@ -114,6 +120,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO writes that are initiated by end device controllers that are writing memory to the CPU",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_write_local",
@@ -126,6 +138,30 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "The percent of inbound full cache line writes initiated by IO that miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "MetricName": "io_full_write_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Message Signaled Interrupts (MSI) per second sent by the integrated I/O traffic controller (IIO) to System Configuration Controller (Ubox)",
+ "MetricExpr": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED / duration_time",
+ "MetricName": "io_msi",
+ "ScaleUnit": "1per_sec"
+ },
+ {
+ "BriefDescription": "The percent of inbound partial writes initiated by IO that miss the L3 cache",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_RFO) / (UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_RFO)",
+ "MetricName": "io_partial_write_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The percent of inbound reads initiated by IO that miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR / UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "MetricName": "io_read_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions",
"MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
"MetricName": "itlb_2nd_level_mpi",
@@ -310,7 +346,7 @@
"ScaleUnit": "1per_instr"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5_11 + UOPS_DISPATCHED.PORT_6) / (5 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -322,7 +358,7 @@
"MetricExpr": "EXE.AMX_BUSY / tma_info_core_core_clks",
"MetricGroup": "BvCB;Compute;HPC;Server;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_amx_busy",
- "MetricThreshold": "tma_amx_busy > 0.5 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_amx_busy > 0.5 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -330,12 +366,12 @@
"MetricExpr": "78 * ASSISTS.ANY / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists",
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
"MetricExpr": "63 * ASSISTS.SSE_AVX_MIX / tma_info_thread_slots",
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_avx_assists",
@@ -345,7 +381,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
@@ -361,12 +397,12 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -380,39 +416,39 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * tma_amx_busy / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + 0 / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency) + tma_memory_bound * (tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_core_bound * tma_amx_busy / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_ms))) - tma_bottleneck_big_code",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + RS.EMPTY_RESOURCE / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + RS.EMPTY_RESOURCE / tma_info_thread_clks * tma_ports_utilized_0) / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -420,7 +456,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -428,7 +464,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_mem + tma_remote_cache) + tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
@@ -436,7 +472,7 @@
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -444,14 +480,14 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -460,7 +496,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;Default;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
@@ -473,24 +509,24 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
"MetricExpr": "CPU_CLK_UNHALTED.C01 / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c01_wait",
- "MetricThreshold": "tma_c01_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
"MetricExpr": "CPU_CLK_UNHALTED.C02 / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c02_wait",
- "MetricThreshold": "tma_c02_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -498,8 +534,8 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -507,24 +543,24 @@
"MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
"MetricExpr": "max(0, FRONTEND_RETIRED.L1I_MISS * FRONTEND_RETIRED.L1I_MISS:R / tma_info_thread_clks - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
"MetricExpr": "FRONTEND_RETIRED.L2_MISS * FRONTEND_RETIRED.L2_MISS:R / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -532,7 +568,7 @@
"MetricExpr": "max(0, FRONTEND_RETIRED.ITLB_MISS * FRONTEND_RETIRED.ITLB_MISS:R / tma_info_thread_clks - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -540,48 +576,48 @@
"MetricExpr": "FRONTEND_RETIRED.STLB_MISS * FRONTEND_RETIRED.STLB_MISS:R / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches.",
"MetricExpr": "BR_MISP_RETIRED.COND_NTAKEN_COST * BR_MISP_RETIRED.COND_NTAKEN_COST:R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_nt_mispredicts",
- "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by taken conditional branches",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by taken conditional branches.",
"MetricExpr": "BR_MISP_RETIRED.COND_TAKEN_COST * BR_MISP_RETIRED.COND_TAKEN_COST:R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_tk_mispredicts",
- "MetricThreshold": "tma_cond_tk_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_tk_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricExpr": "((min(MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS:R, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS * (79 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) if 0 < MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS:R else MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS * (79 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) + (min(MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD:R, MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (81 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) if 0 < MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD:R else MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (81 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS * min(MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS:R, 74.6 * tma_info_system_core_frequency) + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * min(MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD:R, 76.6 * tma_info_system_core_frequency) * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -592,24 +628,34 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external CXL Memory by loads (e.g",
+ "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_CXL_MEM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_CXL_MEM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 1)) * (MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_CXL_MEM + MEM_LOAD_RETIRED.LOCAL_CXL_MEM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_cxl_mem_bound",
+ "MetricThreshold": "tma_cxl_mem_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external CXL Memory by loads (e.g. 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory, PMM - Persistent Memory Module [from CLX to SPR] or any other CXL Type3 Memory [EMR onwards]).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricExpr": "((min(MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD * MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD:R, MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD * (79 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) if 0 < MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD:R else MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD * (79 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) + (min(MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD:R, MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (79 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) if 0 < MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD:R else MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (79 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD * min(MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD:R, 74.6 * tma_info_system_core_frequency) + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * min(MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD:R, 74.6 * tma_info_system_core_frequency) * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -618,16 +664,16 @@
"MetricExpr": "ARITH.DIV_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIV_ACTIVE",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
- "MetricExpr": "MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks - tma_cxl_mem_bound if #has_pmem > 0 else MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks)",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -637,7 +683,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -645,34 +691,34 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "(min(MEM_INST_RETIRED.STLB_HIT_LOADS * MEM_INST_RETIRED.STLB_HIT_LOADS:R, MEM_INST_RETIRED.STLB_HIT_LOADS * 7) if 0 < MEM_INST_RETIRED.STLB_HIT_LOADS:R else MEM_INST_RETIRED.STLB_HIT_LOADS * 7) / tma_info_thread_clks + tma_load_stlb_miss",
+ "MetricExpr": "MEM_INST_RETIRED.STLB_HIT_LOADS * min(MEM_INST_RETIRED.STLB_HIT_LOADS:R, 7) / tma_info_thread_clks + tma_load_stlb_miss",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(min(MEM_INST_RETIRED.STLB_HIT_STORES * MEM_INST_RETIRED.STLB_HIT_STORES:R, MEM_INST_RETIRED.STLB_HIT_STORES * 7) if 0 < MEM_INST_RETIRED.STLB_HIT_STORES:R else MEM_INST_RETIRED.STLB_HIT_STORES * 7) / tma_info_thread_clks + tma_store_stlb_miss",
+ "MetricExpr": "MEM_INST_RETIRED.STLB_HIT_STORES * min(MEM_INST_RETIRED.STLB_HIT_STORES:R, 7) / tma_info_thread_clks + tma_store_stlb_miss",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
- "MetricExpr": "(170 * tma_info_system_core_frequency * cpu@OCR.DEMAND_RFO.L3_MISS\\,offcore_rsp\\=0x103b800002@ + 81 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM) / tma_info_thread_clks",
+ "MetricExpr": "(170 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_MISS@offcore_rsp\\=0x103b800002@ + 81 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
@@ -682,7 +728,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -693,7 +739,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -704,7 +750,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -722,7 +768,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -731,15 +777,15 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "ARITH.FPDIV_ACTIVE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -747,8 +793,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -756,8 +802,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.VECTOR + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -765,8 +811,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -774,8 +820,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -783,8 +829,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -795,27 +841,27 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.MACRO_FUSED / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "Default;Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
"ScaleUnit": "100%"
},
{
@@ -823,24 +869,24 @@
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions.",
"MetricExpr": "BR_MISP_RETIRED.INDIRECT_CALL_COST * BR_MISP_RETIRED.INDIRECT_CALL_COST:R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ind_call_mispredicts",
- "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions.",
"MetricExpr": "max((BR_MISP_RETIRED.INDIRECT_COST * BR_MISP_RETIRED.INDIRECT_COST:R - BR_MISP_RETIRED.INDIRECT_CALL_COST * BR_MISP_RETIRED.INDIRECT_CALL_COST:R) / tma_info_thread_clks, 0)",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ind_jump_mispredicts",
- "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -851,28 +897,28 @@
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -900,7 +946,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_ms)))",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -908,7 +954,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -916,10 +962,11 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -980,11 +1027,11 @@
"MetricExpr": "(FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -997,8 +1044,8 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
@@ -1011,7 +1058,7 @@
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
@@ -1061,10 +1108,10 @@
},
{
"BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
- "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / cpu@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / cpu@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed",
"MetricName": "tma_info_frontend_unknown_branch_cost",
- "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node"
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node."
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired branches who got branch address clears",
@@ -1073,7 +1120,7 @@
"MetricName": "tma_info_frontend_unknown_branches_ret"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -1091,7 +1138,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -1099,7 +1146,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -1107,7 +1154,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -1115,7 +1162,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -1123,7 +1170,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate)",
@@ -1131,7 +1178,7 @@
"MetricGroup": "Flops;FpScalar;InsType;Server",
"MetricName": "tma_info_inst_mix_iparith_scalar_hp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_hp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -1139,7 +1186,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -1194,7 +1241,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 6 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 13",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1331,7 +1378,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp"
},
@@ -1356,19 +1403,19 @@
{
"BriefDescription": "Off-core accesses per kilo instruction for modified write requests",
"MetricExpr": "1e3 * OCR.MODIFIED_WRITE.ANY_RESPONSE / tma_info_inst_mix_instructions",
- "MetricGroup": "Offcore",
+ "MetricGroup": "Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_mwrite_any_pki"
},
{
"BriefDescription": "Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
"MetricExpr": "1e3 * OCR.READS_TO_CORE.ANY_RESPONSE / tma_info_inst_mix_instructions",
- "MetricGroup": "CacheHits;Offcore",
+ "MetricGroup": "CacheHits;Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_read_any_pki"
},
{
"BriefDescription": "L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
"MetricExpr": "1e3 * OCR.READS_TO_CORE.L3_MISS / tma_info_inst_mix_instructions",
- "MetricGroup": "Offcore",
+ "MetricGroup": "Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_read_l3m_pki"
},
{
@@ -1394,23 +1441,23 @@
{
"BriefDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket",
"MetricExpr": "64 * OCR.READS_TO_CORE.DRAM / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_dram_bw",
- "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW"
+ "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW."
},
{
"BriefDescription": "Average L3-cache miss BW for Reads-to-Core (R2C)",
"MetricExpr": "64 * OCR.READS_TO_CORE.L3_MISS / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_l3m_bw",
- "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW"
+ "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW."
},
{
"BriefDescription": "Average Off-core access BW for Reads-to-Core (R2C)",
"MetricExpr": "64 * OCR.READS_TO_CORE.ANY_RESPONSE / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_offcore_bw",
- "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches"
+ "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches."
},
{
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
@@ -1452,8 +1499,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1474,18 +1521,18 @@
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
- "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "MicroSeq;Pipeline;Ret",
"MetricName": "tma_info_pipeline_strings_cycles",
"MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1"
@@ -1499,7 +1546,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1511,16 +1558,28 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_DATA / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_read_bw"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_CXLDP_TxC_AGF_INSERTS.M2S_DATA / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_write_bw"
+ },
+ {
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * (UNC_M_CAS_COUNT_SCH0.RD + UNC_M_CAS_COUNT_SCH1.RD + UNC_M_CAS_COUNT_SCH0.WR + UNC_M_CAS_COUNT_SCH1.WR) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1548,14 +1607,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1566,14 +1624,14 @@
},
{
"BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]",
- "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / cha_0@event\\=0x0@",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / uncore_cha_0@event\\=0x1@",
"MetricGroup": "MemOffcore;MemoryLat;Server;SoC",
"MetricName": "tma_info_system_mem_dram_read_latency",
"PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=0x1@",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
@@ -1599,7 +1657,7 @@
},
{
"BriefDescription": "Socket actual clocks when any core is active on that socket",
- "MetricExpr": "cha_0@event\\=0x0@",
+ "MetricExpr": "uncore_cha_0@event\\=0x1@",
"MetricGroup": "SoC",
"MetricName": "tma_info_system_socket_clks"
},
@@ -1629,7 +1687,7 @@
"MetricName": "tma_info_system_upi_data_transmit_bw"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1638,15 +1696,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1656,13 +1713,13 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "TOPDOWN.SLOTS",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization"
},
@@ -1678,14 +1735,14 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 6 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 9"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1694,7 +1751,7 @@
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_int_operations",
"MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
"ScaleUnit": "100%"
},
{
@@ -1702,8 +1759,8 @@
"MetricExpr": "(INT_VEC_RETIRED.ADD_128 + INT_VEC_RETIRED.VNNI_128) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_128b",
- "MetricThreshold": "tma_int_vector_128b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1711,8 +1768,8 @@
"MetricExpr": "(INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_256b",
- "MetricThreshold": "tma_int_vector_256b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1720,8 +1777,8 @@
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1729,17 +1786,17 @@
"MetricExpr": "max((EXE_ACTIVITY.BOUND_ON_LOADS - MEMORY_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
@@ -1747,16 +1804,16 @@
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited)",
- "MetricExpr": "(min(MEM_LOAD_RETIRED.L2_HIT * MEM_LOAD_RETIRED.L2_HIT:R, MEM_LOAD_RETIRED.L2_HIT * (4.4 * tma_info_system_core_frequency)) if 0 < MEM_LOAD_RETIRED.L2_HIT:R else MEM_LOAD_RETIRED.L2_HIT * (4.4 * tma_info_system_core_frequency)) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * min(MEM_LOAD_RETIRED.L2_HIT:R, 4.4 * tma_info_system_core_frequency) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1765,17 +1822,17 @@
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(min(MEM_LOAD_RETIRED.L3_HIT * MEM_LOAD_RETIRED.L3_HIT:R, MEM_LOAD_RETIRED.L3_HIT * (37 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) if 0 < MEM_LOAD_RETIRED.L3_HIT:R else MEM_LOAD_RETIRED.L3_HIT * (37 * tma_info_system_core_frequency) - 4.4 * tma_info_system_core_frequency) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "MEM_LOAD_RETIRED.L3_HIT * min(MEM_LOAD_RETIRED.L3_HIT:R, 32.6 * tma_info_system_core_frequency) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1783,19 +1840,19 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"DefaultMetricgroupName": "TopdownL2",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Default;Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
@@ -1812,7 +1869,7 @@
"MetricExpr": "max(0, tma_dtlb_load - tma_load_stlb_miss)",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1820,31 +1877,31 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -1852,16 +1909,17 @@
"MetricExpr": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM:R * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
- "MetricThreshold": "tma_local_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_INST_RETIRED.LOCK_LOADS * MEM_INST_RETIRED.LOCK_LOADS:R / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1877,20 +1935,20 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling)",
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling).",
"MetricExpr": "INT_MISC.MBA_STALLS / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;Server;TopdownL5;tma_L5_group;tma_mem_bandwidth_group",
"MetricName": "tma_mba_stalls",
- "MetricThreshold": "tma_mba_stalls > 0.1 & tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mba_stalls > 0.1 & (tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1898,32 +1956,31 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "Backend;Default;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
"MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
- "MetricThreshold": "tma_memory_fence > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -1944,7 +2001,7 @@
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1958,17 +2015,17 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "160 * ASSISTS.SSE_AVX_MIX / tma_info_thread_clks",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "max(IDQ.MS_CYCLES_ANY, cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY)) / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "max(IDQ.MS_CYCLES_ANY, cpu@UOPS_RETIRED.MS\\,cmask\\=1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY)) / tma_info_core_core_clks / 2.4",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -1976,10 +2033,10 @@
},
{
"BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
- "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / tma_info_thread_clks",
+ "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1989,7 +2046,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%"
},
{
@@ -1997,12 +2054,13 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -2011,19 +2069,19 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -2032,7 +2090,7 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_page_faults",
"MetricThreshold": "tma_page_faults > 0.05",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
"ScaleUnit": "100%"
},
{
@@ -2041,7 +2099,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -2050,7 +2108,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -2059,86 +2117,87 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_3_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_3_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "max(EXE_ACTIVITY.EXE_BOUND_0_PORTS - RESOURCE_STALLS.SCOREBOARD, 0) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
- "MetricExpr": "(MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM:R + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD:R) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * PEBS + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * PEBS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
- "MetricThreshold": "tma_remote_cache > 0.05 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM, MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
- "MetricExpr": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM:R * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * PEBS * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
- "MetricThreshold": "tma_remote_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions.",
"MetricExpr": "BR_MISP_RETIRED.RET_COST * BR_MISP_RETIRED.RET_COST:R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ret_mispredicts",
- "MetricThreshold": "tma_ret_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ret_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -2151,7 +2210,7 @@
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks + tma_c02_wait",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -2160,36 +2219,35 @@
"MetricExpr": "tma_light_operations * INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_shuffles_256b",
- "MetricThreshold": "tma_shuffles_256b > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
- "MetricExpr": "(min(MEM_INST_RETIRED.SPLIT_LOADS * MEM_INST_RETIRED.SPLIT_LOADS:R, MEM_INST_RETIRED.SPLIT_LOADS * tma_info_memory_load_miss_real_latency) if 0 < MEM_INST_RETIRED.SPLIT_LOADS:R else MEM_INST_RETIRED.SPLIT_LOADS * tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_LOADS * min(MEM_INST_RETIRED.SPLIT_LOADS:R, tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricExpr": "(min(MEM_INST_RETIRED.SPLIT_STORES * MEM_INST_RETIRED.SPLIT_STORES:R, MEM_INST_RETIRED.SPLIT_STORES) if 0 < MEM_INST_RETIRED.SPLIT_STORES:R else MEM_INST_RETIRED.SPLIT_STORES) / tma_info_thread_clks",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES * min(MEM_INST_RETIRED.SPLIT_STORES:R, 1) / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -2197,8 +2255,8 @@
"MetricExpr": "(XQ.FULL_CYCLES + L1D_PEND_MISS.L2_STALLS) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -2206,8 +2264,8 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -2215,8 +2273,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -2224,8 +2282,8 @@
"MetricExpr": "(MEM_STORE_RETIRED.L2_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -2242,7 +2300,7 @@
"MetricExpr": "max(0, tma_dtlb_store - tma_store_stlb_miss)",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -2250,31 +2308,31 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -2282,7 +2340,7 @@
"MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%"
},
@@ -2291,7 +2349,7 @@
"MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
"ScaleUnit": "100%"
},
@@ -2300,8 +2358,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/memory.json b/tools/perf/pmu-events/arch/x86/graniterapids/memory.json
index 5da5a10275ba..96f40390becf 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/memory.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/memory.json
@@ -72,7 +72,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
"MSRIndex": "0x3F6",
"MSRValue": "0x400",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "53",
"UMask": "0x1"
},
@@ -84,7 +84,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "1009",
"UMask": "0x1"
},
@@ -96,7 +96,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x1"
},
@@ -108,7 +108,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048",
"MSRIndex": "0x3F6",
"MSRValue": "0x800",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "23",
"UMask": "0x1"
},
@@ -120,7 +120,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "503",
"UMask": "0x1"
},
@@ -132,7 +132,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -144,7 +144,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -156,7 +156,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "101",
"UMask": "0x1"
},
@@ -168,7 +168,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "2003",
"UMask": "0x1"
},
@@ -180,7 +180,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "50021",
"UMask": "0x1"
},
@@ -190,17 +190,51 @@
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
- "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -211,6 +245,40 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -221,6 +289,29 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FC00002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -231,6 +322,7 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FC04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -241,6 +333,7 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F04C04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -251,6 +344,62 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x70CC04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70C004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x733004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "PublicDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM) Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -286,7 +435,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "PublicDescription": "Counts the number of times RTM abort was triggered. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x4"
},
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/other.json b/tools/perf/pmu-events/arch/x86/graniterapids/other.json
index 8df37f303273..c0747750b1a8 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/other.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/other.json
@@ -17,201 +17,28 @@
"UMask": "0x8"
},
{
- "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "BriefDescription": "HW_INTERRUPTS.MASKED",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb7",
- "EventName": "EXE.AMX_BUSY",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC0002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10808",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.MASKED",
"SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC4477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x70C004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F33004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x733004477",
+ "BriefDescription": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0x4"
},
{
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708004477",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
+ "SampleAfterValue": "203",
"UMask": "0x1"
},
{
@@ -221,49 +48,11 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0xFBFF80822",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
- "SampleAfterValue": "1000003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
- "Counter": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_COUNT",
- "Invert": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "SampleAfterValue": "100003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_RESOURCE",
- "SampleAfterValue": "1000003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Cycles the uncore cannot take further requests",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json b/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json
index da6478607984..0fef8fd61974 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json
@@ -32,7 +32,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all branch instructions retired.",
+ "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009"
},
{
@@ -40,7 +40,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PublicDescription": "Counts conditional branch instructions retired.",
+ "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11"
},
@@ -49,7 +49,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts not taken branch instructions retired.",
+ "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10"
},
@@ -58,7 +58,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1"
},
@@ -67,7 +67,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PublicDescription": "Counts far branch instructions retired.",
+ "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40"
},
@@ -76,7 +76,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80"
},
@@ -85,7 +85,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x2"
},
@@ -94,7 +94,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts return instructions retired.",
+ "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -103,7 +103,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts taken branch instructions retired.",
+ "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20"
},
@@ -112,7 +112,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0",
"SampleAfterValue": "400009"
},
{
@@ -120,6 +120,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST",
+ "PublicDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x44"
},
@@ -128,7 +129,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11"
},
@@ -137,6 +138,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_COST",
+ "PublicDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x51"
},
@@ -145,7 +147,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10"
},
@@ -154,6 +156,10 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST",
+ "PublicDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
+ "RetirementLatencyMax": 888,
+ "RetirementLatencyMean": 6.11,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "400009",
"UMask": "0x50"
},
@@ -162,7 +168,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1"
},
@@ -171,6 +177,10 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_COST",
+ "PublicDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
+ "RetirementLatencyMax": 2750,
+ "RetirementLatencyMean": 5.09,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "400009",
"UMask": "0x41"
},
@@ -179,7 +189,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80"
},
@@ -188,7 +198,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x2"
},
@@ -197,6 +207,10 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST",
+ "PublicDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
+ "RetirementLatencyMax": 703,
+ "RetirementLatencyMean": 15.56,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "400009",
"UMask": "0x42"
},
@@ -205,6 +219,10 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_COST",
+ "PublicDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
+ "RetirementLatencyMax": 1562,
+ "RetirementLatencyMean": 11.07,
+ "RetirementLatencyMin": 0,
"SampleAfterValue": "100003",
"UMask": "0xc0"
},
@@ -213,7 +231,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20"
},
@@ -222,6 +240,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST",
+ "PublicDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x60"
},
@@ -230,7 +249,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -239,6 +258,10 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET_COST",
+ "PublicDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
+ "RetirementLatencyMax": 1082,
+ "RetirementLatencyMean": 32.37,
+ "RetirementLatencyMin": 9,
"SampleAfterValue": "100007",
"UMask": "0x48"
},
@@ -402,6 +425,14 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb7",
+ "EventName": "EXE.AMX_BUSY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
@@ -486,7 +517,7 @@
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -519,7 +550,7 @@
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
- "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -686,7 +717,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -775,6 +806,35 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json
index 53055986534d..721fc42797b1 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-cache.json
@@ -10,6 +10,15 @@
"Unit": "CHACMS"
},
{
+ "BriefDescription": "UNC_CHACMS_DISTRESS_ASSERTED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHACMS_DISTRESS_ASSERTED",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "Unit": "CHACMS"
+ },
+ {
"BriefDescription": "Counts the number of cycles FAST trigger is received from the global FAST distress wire.",
"Counter": "0,1,2,3",
"EventCode": "0x34",
@@ -854,6 +863,16 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Ingress (from CMS) Occupancy : IRQ : Counts number of entries in the specified Ingress queue in each cycle.",
"Counter": "0",
"EventCode": "0x11",
@@ -864,6 +883,38 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : E state",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : M state",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : S state",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "All TOR Inserts",
"Counter": "0,1,2,3",
"EventCode": "0x35",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json
index 5c50275c79b0..5eb1145f204f 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-interconnect.json
@@ -833,12 +833,20 @@
"Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.LOST_FWD",
- "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
+ "BriefDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
"BriefDescription": "Snoop Hit E/S responses",
"Counter": "0,1,2,3",
"EventCode": "0x12",
@@ -1076,7 +1084,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress bypasses for for AD_BNC",
+ "BriefDescription": "Egress bypasses for AD_BNC",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_MDF_TxR_BYPASS.AD_BNC",
@@ -1086,7 +1094,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress bypasses for for AD_CRD",
+ "BriefDescription": "Egress bypasses for AD_CRD",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_MDF_TxR_BYPASS.AD_CRD",
@@ -1096,7 +1104,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress bypasses for for AK",
+ "BriefDescription": "Egress bypasses for AK",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_MDF_TxR_BYPASS.AK",
@@ -1106,7 +1114,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress bypasses for for BL_BNC",
+ "BriefDescription": "Egress bypasses for BL_BNC",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_MDF_TxR_BYPASS.BL_BNC",
@@ -1116,7 +1124,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress bypasses for for BL_CRD",
+ "BriefDescription": "Egress bypasses for BL_CRD",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_MDF_TxR_BYPASS.BL_CRD",
@@ -1126,7 +1134,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress bypasses for for IV",
+ "BriefDescription": "Egress bypasses for IV",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_MDF_TxR_BYPASS.IV",
@@ -1136,7 +1144,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Number of egress inserts for for AD_BNC",
+ "BriefDescription": "Number of egress inserts for AD_BNC",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_MDF_TxR_INSERTS.AD_BNC",
@@ -1146,7 +1154,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Number of egress inserts for for AD_CRD",
+ "BriefDescription": "Number of egress inserts for AD_CRD",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_MDF_TxR_INSERTS.AD_CRD",
@@ -1156,7 +1164,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Number of egress inserts for for AK",
+ "BriefDescription": "Number of egress inserts for AK",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_MDF_TxR_INSERTS.AK",
@@ -1166,7 +1174,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Number of egress inserts for for BL_BNC",
+ "BriefDescription": "Number of egress inserts for BL_BNC",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_MDF_TxR_INSERTS.BL_BNC",
@@ -1176,7 +1184,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Number of egress inserts for for BL_CRD",
+ "BriefDescription": "Number of egress inserts for BL_CRD",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_MDF_TxR_INSERTS.BL_CRD",
@@ -1186,7 +1194,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Number of egress inserts for for IV",
+ "BriefDescription": "Number of egress inserts for IV",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_MDF_TxR_INSERTS.IV",
@@ -1196,7 +1204,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress occupancy for for AD_BNC",
+ "BriefDescription": "Egress occupancy for AD_BNC",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_MDF_TxR_OCCUPANCY.AD_BNC",
@@ -1206,7 +1214,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress occupancy for for AD_CRD",
+ "BriefDescription": "Egress occupancy for AD_CRD",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_MDF_TxR_OCCUPANCY.AD_CRD",
@@ -1216,7 +1224,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress occupancy for for AK",
+ "BriefDescription": "Egress occupancy for AK",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_MDF_TxR_OCCUPANCY.AK",
@@ -1226,7 +1234,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress occupancy for for BL_BNC",
+ "BriefDescription": "Egress occupancy for BL_BNC",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_MDF_TxR_OCCUPANCY.BL_BNC",
@@ -1236,7 +1244,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress occupancy for for BL_CRD",
+ "BriefDescription": "Egress occupancy for BL_CRD",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_MDF_TxR_OCCUPANCY.BL_CRD",
@@ -1246,7 +1254,7 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Egress occupancy for for IV",
+ "BriefDescription": "Egress occupancy for IV",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_MDF_TxR_OCCUPANCY.IV",
@@ -1916,21 +1924,56 @@
"Unit": "UPI"
},
{
- "BriefDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
- "Counter": "0,1,2,3",
- "EventCode": "0x40",
- "EventName": "UNC_UPI_TxL_INSERTS",
+ "BriefDescription": "Message Received : Doorbell",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
"Experimental": "1",
"PerPkg": "1",
- "Unit": "UPI"
+ "UMask": "0x8",
+ "Unit": "UBOX"
},
{
- "BriefDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Message Received : Interrupt",
+ "Counter": "0,1",
"EventCode": "0x42",
- "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
"Experimental": "1",
"PerPkg": "1",
- "Unit": "UPI"
+ "PublicDescription": "Message Received : Interrupt : Interrupts",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : IPI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : VLW",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json
index 886b99a971be..2ea2637df3fb 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-io.json
@@ -1121,8 +1121,9 @@
"Unit": "IIO"
},
{
- "BriefDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing",
+ "BriefDescription": "This event is deprecated. [This event is alias to UNC_IIO_NUM_OUTSTANDING_REQ_FROM_CPU.TO_IO]",
"Counter": "2,3",
+ "Deprecated": "1",
"EventCode": "0xc5",
"EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
"Experimental": "1",
@@ -1133,6 +1134,18 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing [This event is alias to UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO]",
+ "Counter": "2,3",
+ "EventCode": "0xc5",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_FROM_CPU.TO_IO",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "Passing data to be written",
"Counter": "0,1,2,3",
"EventCode": "0x88",
@@ -1301,6 +1314,17 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "Posted requests sent by the integrated IO (IIO) controller to the Ubox, useful for counting message signaled interrupts (MSI).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED",
+ "FCMask": "0x01",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "All 9 bits of Page Walk Tracker Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json
index 5f4783ff6ce5..f559e27e2815 100644
--- a/tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/uncore-memory.json
@@ -60,6 +60,33 @@
"BriefDescription": "CAS count for SubChannel 0 regular reads",
"Counter": "0,1,2,3",
"EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_NON_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc3",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 auto-precharge reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 auto-precharge underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 regular reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.RD_REG",
"PerPkg": "1",
"UMask": "0xc1",
@@ -75,6 +102,15 @@
"Unit": "IMC"
},
{
+ "BriefDescription": "CAS count for SubChannel 0 underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL_ALL",
+ "PerPkg": "1",
+ "UMask": "0xcc",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "CAS count for SubChannel 0, all writes",
"Counter": "0,1,2,3",
"EventCode": "0x05",
@@ -125,6 +161,33 @@
"BriefDescription": "CAS count for SubChannel 1 regular reads",
"Counter": "0,1,2,3",
"EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_NON_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc3",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 auto-precharge reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 auto-precharge underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 regular reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.RD_REG",
"PerPkg": "1",
"UMask": "0xc1",
@@ -140,6 +203,15 @@
"Unit": "IMC"
},
{
+ "BriefDescription": "CAS count for SubChannel 1 underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL_ALL",
+ "PerPkg": "1",
+ "UMask": "0xcc",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "CAS count for SubChannel 1, all writes",
"Counter": "0,1,2,3",
"EventCode": "0x06",
@@ -189,13 +261,132 @@
"Unit": "IMC"
},
{
+ "BriefDescription": "PMMNT is sending REF* commands while being in specified Refresh rate",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_M_MNTCMD_REFRATE.REFAB1X",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "PMMNT is sending REF* commands while being in specified Refresh rate",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_M_MNTCMD_REFRATE.REFAB2X",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "PMMNT is sending REF* commands while being in specified Refresh rate",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_M_MNTCMD_REFRATE.REFSB1X",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "PMMNT is sending REF* commands while being in specified Refresh rate",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_M_MNTCMD_REFRATE.REFSB2X",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "# of cycles a given rank is in Power Down Mode",
"Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK0",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x1",
"Unit": "IMC"
},
@@ -206,7 +397,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK1",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x2",
"Unit": "IMC"
},
@@ -217,7 +407,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK2",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x4",
"Unit": "IMC"
},
@@ -228,7 +417,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK3",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x8",
"Unit": "IMC"
},
@@ -239,7 +427,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK0",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x10",
"Unit": "IMC"
},
@@ -250,7 +437,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK1",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x20",
"Unit": "IMC"
},
@@ -261,7 +447,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK2",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x40",
"Unit": "IMC"
},
@@ -272,7 +457,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK3",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x80",
"Unit": "IMC"
},
@@ -283,7 +467,66 @@
"EventName": "UNC_M_POWER_CHANNEL_PPD_CYCLES",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "MR4 temp reading is throttling",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.MR4BLKEN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "RAPL is throttling",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RAPLBLK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
"Unit": "IMC"
},
{
@@ -463,7 +706,6 @@
"EventName": "UNC_M_SELF_REFRESH.ENTER_SUCCESS",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "UNC_M_SELF_REFRESH.ENTER_SUCCESS",
"UMask": "0x2",
"Unit": "IMC"
},
@@ -474,11 +716,90 @@
"EventName": "UNC_M_SELF_REFRESH.ENTER_SUCCESS_CYCLES",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x1",
"Unit": "IMC"
},
{
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at High level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at High level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "Write Pending Queue Allocations",
"Counter": "0,1,2,3",
"EventCode": "0x22",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
index 0c1040b7e38c..aebd82ced1cf 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -74,13 +74,12 @@
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -92,19 +91,18 @@
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY_WB_ASSIST",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"ScaleUnit": "100%"
},
{
@@ -114,7 +112,7 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
@@ -125,7 +123,7 @@
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers",
"ScaleUnit": "100%"
},
{
@@ -133,18 +131,17 @@
"MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -153,8 +150,8 @@
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -165,7 +162,7 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
@@ -174,8 +171,8 @@
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -183,8 +180,8 @@
"MetricExpr": "10 * ARITH.DIVIDER_UOPS / tma_info_core_core_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -193,8 +190,8 @@
"MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -203,7 +200,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -211,7 +208,7 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
@@ -220,8 +217,8 @@
"MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS. Related metrics: tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
"ScaleUnit": "100%"
},
{
@@ -229,8 +226,8 @@
"MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES. Related metrics: tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -238,18 +235,18 @@
"MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM, OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.REQUEST_FB_FULL\\,cmask\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.REQUEST_FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -279,33 +276,33 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"MetricExpr": "tma_microcode_sequencer",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
@@ -316,7 +313,7 @@
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "(CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks)",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks))",
"MetricGroup": "SMT",
"MetricName": "tma_info_core_core_clks"
},
@@ -328,7 +325,7 @@
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "(UOPS_EXECUTED.CORE / 2 / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@) if #SMT_on else UOPS_EXECUTED.CORE / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@))",
+ "MetricExpr": "(UOPS_EXECUTED.CORE / 2 / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) if #SMT_on else UOPS_EXECUTED.CORE / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@))",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -353,7 +350,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -398,7 +395,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 4 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 9",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -502,14 +499,14 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -521,7 +518,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -537,14 +534,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -592,7 +588,7 @@
"MetricName": "tma_info_system_turbo_utilization"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -601,8 +597,7 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -628,14 +623,14 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 4 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 6"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
"ScaleUnit": "100%"
},
@@ -644,8 +639,8 @@
"MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT. Related metrics: tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
@@ -653,8 +648,8 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
"ScaleUnit": "100%"
},
{
@@ -663,8 +658,8 @@
"MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
@@ -673,8 +668,8 @@
"MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT. Related metrics: tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -682,23 +677,22 @@
"MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "tma_retiring - tma_heavy_operations",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -712,8 +706,8 @@
"MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
@@ -724,15 +718,15 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x6@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
@@ -741,19 +735,19 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
"MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
@@ -762,7 +756,7 @@
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
"MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
"ScaleUnit": "100%"
},
{
@@ -771,7 +765,7 @@
"MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_mite",
"MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
"ScaleUnit": "100%"
},
{
@@ -779,8 +773,8 @@
"MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
{
@@ -789,7 +783,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -798,7 +792,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -834,7 +828,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_5",
"MetricThreshold": "tma_port_5 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_5. Related metrics: tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -843,7 +837,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -858,46 +852,46 @@
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING)) / tma_info_thread_clks",
+ "MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING)) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\=0x1\\,cmask\\=0x1@ / 2 if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) / tma_info_core_core_clks",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@) / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@) / tma_info_core_core_clks",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@) / tma_info_core_core_clks",
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_core_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -917,7 +911,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -925,8 +919,8 @@
"MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES. Related metrics: tma_port_4",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -934,7 +928,7 @@
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
@@ -943,8 +937,8 @@
"MetricExpr": "RESOURCE_STALLS.SB / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -952,8 +946,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -962,8 +956,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
index 1a05b74be575..b8845f8a28b9 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -276,13 +276,12 @@
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -294,19 +293,18 @@
"MetricExpr": "66 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY_WB_ASSIST",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"ScaleUnit": "100%"
},
{
@@ -316,7 +314,7 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
@@ -327,7 +325,7 @@
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers",
"ScaleUnit": "100%"
},
{
@@ -335,18 +333,17 @@
"MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -355,8 +352,8 @@
"MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -367,7 +364,7 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
@@ -376,8 +373,8 @@
"MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -385,8 +382,8 @@
"MetricExpr": "10 * ARITH.DIVIDER_UOPS / tma_info_core_core_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -395,8 +392,8 @@
"MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -405,7 +402,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -413,7 +410,7 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
@@ -422,8 +419,8 @@
"MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS. Related metrics: tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
"ScaleUnit": "100%"
},
{
@@ -431,8 +428,8 @@
"MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES. Related metrics: tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -440,18 +437,18 @@
"MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM, OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE, OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.REQUEST_FB_FULL\\,cmask\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.REQUEST_FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -481,33 +478,33 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"MetricExpr": "tma_microcode_sequencer",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
"MetricExpr": "ICACHE.IFDATA_STALL / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
@@ -518,7 +515,7 @@
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "(CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks)",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks))",
"MetricGroup": "SMT",
"MetricName": "tma_info_core_core_clks"
},
@@ -530,7 +527,7 @@
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "(UOPS_EXECUTED.CORE / 2 / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@) if #SMT_on else UOPS_EXECUTED.CORE / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@))",
+ "MetricExpr": "(UOPS_EXECUTED.CORE / 2 / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) if #SMT_on else UOPS_EXECUTED.CORE / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@))",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -555,7 +552,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -600,7 +597,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 4 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 9",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -704,14 +701,14 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -723,7 +720,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -739,14 +736,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -757,14 +753,15 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "cbox@UNC_C_TOR_OCCUPANCY.MISS_OPCODE\\,filter_opc\\=0x182@ / cbox@UNC_C_TOR_OCCUPANCY.MISS_OPCODE\\,filter_opc\\=0x182@",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
- "MetricExpr": "1e9 * (cbox@UNC_C_TOR_OCCUPANCY.MISS_OPCODE\\,filter_opc\\=0x182@ / cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@) / (tma_info_system_socket_clks / tma_info_system_time)",
+ "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_system_socket_clks / tma_info_system_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
"PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
@@ -814,7 +811,7 @@
"MetricName": "tma_info_system_uncore_frequency"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -823,8 +820,7 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -850,14 +846,14 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 4 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 6"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
"ScaleUnit": "100%"
},
@@ -866,8 +862,8 @@
"MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT. Related metrics: tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
@@ -875,8 +871,8 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
"ScaleUnit": "100%"
},
{
@@ -885,8 +881,8 @@
"MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
@@ -895,8 +891,8 @@
"MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT. Related metrics: tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -904,23 +900,22 @@
"MetricExpr": "ILD_STALL.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "tma_retiring - tma_heavy_operations",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -930,11 +925,12 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
- "MetricThreshold": "tma_local_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM_PS",
"ScaleUnit": "100%"
},
{
@@ -943,8 +939,8 @@
"MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
{
@@ -955,15 +951,15 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x6@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
@@ -972,19 +968,19 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
"MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
@@ -993,7 +989,7 @@
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
"MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
"ScaleUnit": "100%"
},
{
@@ -1002,7 +998,7 @@
"MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_mite",
"MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
"ScaleUnit": "100%"
},
{
@@ -1010,8 +1006,8 @@
"MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
{
@@ -1020,7 +1016,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1029,7 +1025,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1065,7 +1061,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_5",
"MetricThreshold": "tma_port_5 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_5. Related metrics: tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1074,7 +1070,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1089,46 +1085,46 @@
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING)) / tma_info_thread_clks",
+ "MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_thread_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING)) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\=0x1\\,cmask\\=0x1@ / 2 if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@) / tma_info_core_core_clks",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@) / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@) / tma_info_core_core_clks",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@) / tma_info_core_core_clks",
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_core_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1137,17 +1133,18 @@
"MetricExpr": "(200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 180 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
- "MetricThreshold": "tma_remote_cache > 0.05 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM, MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "310 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
- "MetricThreshold": "tma_remote_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_PS",
"ScaleUnit": "100%"
},
{
@@ -1167,7 +1164,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -1175,8 +1172,8 @@
"MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES. Related metrics: tma_port_4",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1184,7 +1181,7 @@
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
@@ -1193,8 +1190,8 @@
"MetricExpr": "RESOURCE_STALLS.SB / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -1202,8 +1199,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1212,8 +1209,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/icelake/cache.json b/tools/perf/pmu-events/arch/x86/icelake/cache.json
index 015f70f157d1..e7bb2ca6f183 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/cache.json
@@ -446,6 +446,16 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -506,6 +516,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -566,6 +586,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -626,6 +656,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -656,6 +696,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -716,6 +766,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
index 63e28a03dc60..cf9ed3edb694 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -1,63 +1,63 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C9 residency percent per package",
- "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C9_Pkg_Residency",
"ScaleUnit": "100%"
@@ -85,16 +85,15 @@
},
{
"BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -106,7 +105,7 @@
"MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%"
},
@@ -129,12 +128,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -148,39 +148,44 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms))) - tma_bottleneck_big_code",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -188,7 +193,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -196,15 +202,17 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
- "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears"
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache"
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -212,21 +220,23 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
"MetricThreshold": "tma_bottleneck_useful_work > 20"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
"MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_branch_instructions",
@@ -248,8 +258,8 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
@@ -257,8 +267,8 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -266,24 +276,24 @@
"MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
"MetricExpr": "max(0, tma_icache_misses - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -291,7 +301,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -299,33 +309,33 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((32.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + (27 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(29 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -335,25 +345,25 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(27 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -362,7 +372,7 @@
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
@@ -372,7 +382,7 @@
"MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -382,7 +392,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -390,26 +400,26 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -417,8 +427,8 @@
"MetricExpr": "32.5 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -427,7 +437,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -437,7 +447,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -447,7 +457,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -465,7 +475,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -474,15 +484,15 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "ARITH.FP_DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -490,7 +500,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -499,7 +509,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -508,7 +518,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -517,7 +527,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -526,7 +536,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -538,17 +548,17 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
- "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@) / IDQ.MITE_UOPS",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=1@) / IDQ.MITE_UOPS",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
@@ -556,8 +566,8 @@
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -569,28 +579,28 @@
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -619,7 +629,8 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_lsd + tma_ms)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -628,7 +639,7 @@
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -637,10 +648,11 @@
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -701,11 +713,11 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -718,20 +730,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -773,7 +785,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -791,7 +803,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -799,7 +811,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -807,7 +819,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -815,7 +827,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -823,7 +835,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -831,7 +843,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -886,7 +898,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 5 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 11",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1005,7 +1017,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp"
},
@@ -1067,8 +1079,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1091,22 +1103,28 @@
"MetricName": "tma_info_pipeline_fetch_mite"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "IDQ.MS_UOPS / cpu@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1118,7 +1136,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1127,7 +1145,7 @@
"MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / tma_info_system_time / 1e3",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1141,14 +1159,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1175,7 +1192,7 @@
"MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks",
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license0_utilization",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
@@ -1183,7 +1200,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license1_utilization",
"MetricThreshold": "tma_info_system_power_license1_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
@@ -1191,7 +1208,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license2_utilization",
"MetricThreshold": "tma_info_system_power_license2_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
@@ -1219,7 +1236,7 @@
"MetricName": "tma_info_system_turbo_utilization"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1228,15 +1245,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1246,13 +1262,13 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "TOPDOWN.SLOTS",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization"
},
@@ -1268,14 +1284,14 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 5 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 7.5"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1283,8 +1299,8 @@
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1292,17 +1308,17 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
@@ -1311,7 +1327,7 @@
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1320,27 +1336,26 @@
"MetricExpr": "3.5 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(12.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "9 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1348,18 +1363,18 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
@@ -1376,7 +1391,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1384,31 +1399,31 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -1417,7 +1432,7 @@
"MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1427,7 +1442,7 @@
"MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_lsd",
"MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
"ScaleUnit": "100%"
},
{
@@ -1437,16 +1452,16 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1454,8 +1469,8 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
@@ -1465,11 +1480,11 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
@@ -1491,7 +1506,7 @@
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1506,24 +1521,24 @@
},
{
"BriefDescription": "This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline",
- "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=0x4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=0x5@) / tma_info_thread_clks",
+ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group",
"MetricName": "tma_mite_4wide",
- "MetricThreshold": "tma_mite_4wide > 0.05 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "cpu@IDQ.MS_UOPS\\,cmask\\=0x1@ / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "cpu@IDQ.MS_UOPS\\,cmask\\=1@ / tma_info_core_core_clks / 3.3",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -1534,7 +1549,7 @@
"MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1543,7 +1558,7 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%"
},
@@ -1558,19 +1573,19 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -1614,8 +1629,8 @@
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
@@ -1623,8 +1638,8 @@
"MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
@@ -1632,7 +1647,7 @@
"MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
@@ -1641,7 +1656,7 @@
"MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
@@ -1650,14 +1665,14 @@
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -1670,7 +1685,7 @@
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -1679,7 +1694,7 @@
"MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
"ScaleUnit": "100%"
},
@@ -1689,17 +1704,16 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1707,8 +1721,8 @@
"MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -1716,18 +1730,17 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1735,8 +1748,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -1753,7 +1766,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1761,31 +1774,31 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -1793,7 +1806,7 @@
"MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%"
},
@@ -1802,7 +1815,7 @@
"MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -1811,8 +1824,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/icelake/memory.json b/tools/perf/pmu-events/arch/x86/icelake/memory.json
index abaf3f4f9d63..1455aaac37b1 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/memory.json
@@ -177,6 +177,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -187,6 +197,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -197,6 +227,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -207,6 +257,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -217,6 +287,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -227,6 +317,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -237,6 +347,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -247,6 +377,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts streaming stores that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -257,6 +407,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xb0",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/other.json b/tools/perf/pmu-events/arch/x86/icelake/other.json
index a96b2a989d3f..141cd30a30af 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/other.json
@@ -27,186 +27,6 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -217,26 +37,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184008000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184008000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts streaming stores that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -245,25 +45,5 @@
"MSRValue": "0x10800",
"SampleAfterValue": "100003",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.STREAMING_WR.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000800",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000800",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/cache.json b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
index e8ab6ef2cd50..e46fd6f91d6b 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
@@ -1,5 +1,68 @@
[
{
+ "BriefDescription": "Hit snoop reply with data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.I_FWD_FE",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's cache, after the data is forwarded back to the requestor and indicating the data was found unmodified in the (FE) Forward or Exclusive State in this cores caches cache. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "HitM snoop reply with data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.I_FWD_M",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's caches, after the data is forwarded back to the requestor, and indicating the data was found modified(M) in this cores caches cache (aka HitM response). A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Hit snoop reply without sending the data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.I_HIT_FSE",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated in this core's caches without being forwarded back to the requestor. The line was in Forward, Shared or Exclusive (FSE) state in this cores caches. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Line not found snoop reply",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.MISS",
+ "PublicDescription": "Counts responses to snoops indicating that the data was not found (IHitI) in this core's caches. A single snoop response from the core counts on all hyperthreads of the Core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Hit snoop reply with data, line kept in Shared state.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.S_FWD_FE",
+ "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (FS) Forward or Shared state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "HitM snoop reply with data, line kept in Shared state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.S_FWD_M",
+ "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (M)odified state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Hit snoop reply without sending the data, line kept in Shared state.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.S_HIT_FSE",
+ "PublicDescription": "Counts responses to snoops indicating the line was kept on this core in the (S)hared state, and that the data was found unmodified but not forwarded back to the requestor, initially the data was found in the cache in the (FSE) Forward, Shared state or Exclusive state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
"Counter": "0,1,2,3",
"EventCode": "0x51",
@@ -507,6 +570,16 @@
"UMask": "0x80"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -547,6 +620,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -587,6 +670,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -607,6 +710,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -627,6 +740,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700800001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -647,6 +780,36 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -667,6 +830,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SNC_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700800002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -677,6 +850,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10070",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x12380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -687,6 +880,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts full cacheline writes (ItoM) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ITOM.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware and software prefetches to all cache levels that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -697,6 +910,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -737,6 +960,36 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C00477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F33000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -767,6 +1020,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -787,6 +1050,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.SNC_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700800477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index 7bee03e532e4..f58eec2a1788 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -1,28 +1,28 @@
[
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
@@ -79,6 +79,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO reads that are initiated by end device controllers that are requesting memory from the CPU and miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_read_local",
@@ -97,6 +103,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO writes that are initiated by end device controllers that are writing memory to the CPU",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_write_local",
@@ -109,6 +121,24 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "The percent of inbound full cache line writes initiated by IO that miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "MetricName": "io_full_write_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The percent of inbound partial writes initiated by IO that miss the L3 cache",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_RFO) / (UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_RFO)",
+ "MetricName": "io_partial_write_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The percent of inbound reads initiated by IO that miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR / UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "MetricName": "io_read_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions",
"MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
"MetricName": "itlb_2nd_level_large_page_mpi",
@@ -331,16 +361,15 @@
},
{
"BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -352,7 +381,7 @@
"MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%"
},
@@ -375,12 +404,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -394,39 +424,44 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_ms))) - tma_bottleneck_big_code",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -434,7 +469,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -442,7 +478,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_mem + tma_remote_cache) + tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
@@ -450,7 +487,8 @@
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -458,21 +496,23 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
"MetricThreshold": "tma_bottleneck_useful_work > 20"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
"MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_branch_instructions",
@@ -494,8 +534,8 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
@@ -503,8 +543,8 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -512,24 +552,24 @@
"MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
"MetricExpr": "max(0, tma_icache_misses - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -537,7 +577,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -545,33 +585,33 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((48 * tma_info_system_core_frequency - 4 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + (47.5 * tma_info_system_core_frequency - 4 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 43.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -581,25 +621,34 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external CXL Memory by loads (e.g",
+ "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 1)) * (CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_cxl_mem_bound",
+ "MetricThreshold": "tma_cxl_mem_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external CXL Memory by loads (e.g. 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory, PMM - Persistent Memory Module [from CLX to SPR] or any other CXL Type3 Memory [EMR onwards]).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(47.5 * tma_info_system_core_frequency - 4 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "43.5 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -608,17 +657,17 @@
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound - tma_cxl_mem_bound if #has_pmem > 0 else CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound)",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -628,7 +677,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -636,34 +685,34 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
- "MetricExpr": "(120 * tma_info_system_core_frequency * cpu@OCR.DEMAND_RFO.L3_MISS\\,offcore_rsp\\=0x103b800002@ + 48 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM) / tma_info_thread_clks",
+ "MetricExpr": "(120 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_MISS@offcore_rsp\\=0x103b800002@ + 48 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
@@ -673,7 +722,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -683,7 +732,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -693,7 +742,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -711,7 +760,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -720,15 +769,15 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "ARITH.FP_DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -736,7 +785,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -745,7 +794,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -754,7 +803,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -763,7 +812,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -772,7 +821,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -784,17 +833,17 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
- "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@) / IDQ.MITE_UOPS",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=1@) / IDQ.MITE_UOPS",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
@@ -802,8 +851,8 @@
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -815,28 +864,28 @@
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -865,7 +914,8 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_ms)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -874,7 +924,7 @@
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -883,10 +933,11 @@
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -947,11 +998,11 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -964,20 +1015,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -1013,7 +1064,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -1031,7 +1082,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -1039,7 +1090,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -1047,7 +1098,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -1055,7 +1106,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -1063,7 +1114,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -1071,7 +1122,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -1126,7 +1177,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 5 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 11",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1257,7 +1308,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp"
},
@@ -1319,8 +1370,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1337,22 +1388,28 @@
"MetricName": "tma_info_pipeline_fetch_mite"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "IDQ.MS_UOPS / cpu@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1364,16 +1421,28 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_read_bw"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_write_bw"
+ },
+ {
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1401,14 +1470,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1429,16 +1497,24 @@
"MetricExpr": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH / UNC_CHA_CLOCKTICKS",
"MetricGroup": "LockCont;MemOffcore;Server;SoC",
"MetricName": "tma_info_system_mem_irq_duplicate_address",
- "MetricThreshold": "(tma_info_system_mem_irq_duplicate_address > 0.1)"
+ "MetricThreshold": "tma_info_system_mem_irq_duplicate_address > 0.1"
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=0x1@",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]",
+ "MetricExpr": "(1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM) / cha_0@event\\=0x0@ if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_system_mem_pmm_read_latency",
+ "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
"MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_system_socket_clks / tma_info_system_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
@@ -1463,7 +1539,7 @@
"MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks",
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license0_utilization",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
@@ -1471,7 +1547,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license1_utilization",
"MetricThreshold": "tma_info_system_power_license1_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
@@ -1479,7 +1555,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license2_utilization",
"MetricThreshold": "tma_info_system_power_license2_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
@@ -1513,7 +1589,7 @@
"MetricName": "tma_info_system_uncore_frequency"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1522,15 +1598,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1540,13 +1615,13 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "TOPDOWN.SLOTS",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization"
},
@@ -1562,14 +1637,14 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 5 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 7.5"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1577,8 +1652,8 @@
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1586,17 +1661,17 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
@@ -1605,7 +1680,7 @@
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1614,27 +1689,26 @@
"MetricExpr": "4 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(23 * tma_info_system_core_frequency - 4 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "19 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1642,18 +1716,18 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
@@ -1670,7 +1744,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1678,39 +1752,39 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
- "MetricExpr": "(66.5 * tma_info_system_core_frequency - 23 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "43.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
- "MetricThreshold": "tma_local_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
@@ -1720,7 +1794,7 @@
"MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1736,11 +1810,11 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1748,8 +1822,8 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
@@ -1759,11 +1833,11 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
@@ -1785,7 +1859,7 @@
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1800,24 +1874,24 @@
},
{
"BriefDescription": "This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline",
- "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=0x4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=0x5@) / tma_info_thread_clks",
+ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group",
"MetricName": "tma_mite_4wide",
- "MetricThreshold": "tma_mite_4wide > 0.05 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "cpu@IDQ.MS_UOPS\\,cmask\\=0x1@ / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "cpu@IDQ.MS_UOPS\\,cmask\\=1@ / tma_info_core_core_clks / 3.3",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -1828,7 +1902,7 @@
"MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1837,7 +1911,7 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%"
},
@@ -1852,19 +1926,19 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -1908,8 +1982,8 @@
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
@@ -1917,8 +1991,8 @@
"MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
@@ -1926,7 +2000,7 @@
"MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
@@ -1935,7 +2009,7 @@
"MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
@@ -1944,32 +2018,32 @@
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
- "MetricExpr": "((120 * tma_info_system_core_frequency - 23 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + (120 * tma_info_system_core_frequency - 23 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(97 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 97 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
- "MetricThreshold": "tma_remote_cache > 0.05 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM, MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
- "MetricExpr": "(131 * tma_info_system_core_frequency - 23 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "108 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
- "MetricThreshold": "tma_remote_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -1982,7 +2056,7 @@
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -1991,7 +2065,7 @@
"MetricExpr": "37 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
"ScaleUnit": "100%"
},
@@ -2001,17 +2075,16 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -2019,8 +2092,8 @@
"MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -2028,18 +2101,17 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -2047,8 +2119,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -2065,7 +2137,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -2073,31 +2145,31 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -2105,7 +2177,7 @@
"MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%"
},
@@ -2114,7 +2186,7 @@
"MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -2123,8 +2195,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/memory.json b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
index ec9577cce3ac..ca7f68f67463 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
@@ -114,6 +114,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -134,6 +144,36 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -154,6 +194,46 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -174,6 +254,36 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -194,6 +304,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -254,6 +374,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -284,6 +414,56 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70C000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x731800477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -304,6 +484,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xb0",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json
index 05b348d9c838..141cd30a30af 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json
@@ -27,339 +27,6 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Hit snoop reply with data, line invalidated.",
- "Counter": "0,1,2,3",
- "EventCode": "0xef",
- "EventName": "CORE_SNOOP_RESPONSE.I_FWD_FE",
- "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's cache, after the data is forwarded back to the requestor and indicating the data was found unmodified in the (FE) Forward or Exclusive State in this cores caches cache. A single snoop response from the core counts on all hyperthreads of the core.",
- "SampleAfterValue": "1000003",
- "UMask": "0x20"
- },
- {
- "BriefDescription": "HitM snoop reply with data, line invalidated.",
- "Counter": "0,1,2,3",
- "EventCode": "0xef",
- "EventName": "CORE_SNOOP_RESPONSE.I_FWD_M",
- "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's caches, after the data is forwarded back to the requestor, and indicating the data was found modified(M) in this cores caches cache (aka HitM response). A single snoop response from the core counts on all hyperthreads of the core.",
- "SampleAfterValue": "1000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "Hit snoop reply without sending the data, line invalidated.",
- "Counter": "0,1,2,3",
- "EventCode": "0xef",
- "EventName": "CORE_SNOOP_RESPONSE.I_HIT_FSE",
- "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated in this core's caches without being forwarded back to the requestor. The line was in Forward, Shared or Exclusive (FSE) state in this cores caches. A single snoop response from the core counts on all hyperthreads of the core.",
- "SampleAfterValue": "1000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Line not found snoop reply",
- "Counter": "0,1,2,3",
- "EventCode": "0xef",
- "EventName": "CORE_SNOOP_RESPONSE.MISS",
- "PublicDescription": "Counts responses to snoops indicating that the data was not found (IHitI) in this core's caches. A single snoop response from the core counts on all hyperthreads of the Core.",
- "SampleAfterValue": "1000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Hit snoop reply with data, line kept in Shared state.",
- "Counter": "0,1,2,3",
- "EventCode": "0xef",
- "EventName": "CORE_SNOOP_RESPONSE.S_FWD_FE",
- "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (FS) Forward or Shared state. A single snoop response from the core counts on all hyperthreads of the core.",
- "SampleAfterValue": "1000003",
- "UMask": "0x40"
- },
- {
- "BriefDescription": "HitM snoop reply with data, line kept in Shared state",
- "Counter": "0,1,2,3",
- "EventCode": "0xef",
- "EventName": "CORE_SNOOP_RESPONSE.S_FWD_M",
- "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (M)odified state. A single snoop response from the core counts on all hyperthreads of the core.",
- "SampleAfterValue": "1000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "Hit snoop reply without sending the data, line kept in Shared state.",
- "Counter": "0,1,2,3",
- "EventCode": "0xef",
- "EventName": "CORE_SNOOP_RESPONSE.S_HIT_FSE",
- "PublicDescription": "Counts responses to snoops indicating the line was kept on this core in the (S)hared state, and that the data was found unmodified but not forwarded back to the requestor, initially the data was found in the cache in the (FSE) Forward, Shared state or Exclusive state. A single snoop response from the core counts on all hyperthreads of the core.",
- "SampleAfterValue": "1000003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100400001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by PMM.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x703C00001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x703000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SNC_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x700800001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC0002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.LOCAL_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100400002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x703C00002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.REMOTE_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x703000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SNC_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x700800002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch (which bring data to L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10070",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x12380",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L3.REMOTE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x90002380",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts full cacheline writes (ItoM) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ITOM.REMOTE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x90000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -370,126 +37,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC0477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.LOCAL_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100400477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x70C000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x700C00477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.REMOTE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F33000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x731800477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x703000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.READS_TO_CORE.SNC_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x700800477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts streaming stores that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -498,15 +45,5 @@
"MSRValue": "0x10800",
"SampleAfterValue": "100003",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0xFBFF80822",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
index f1446f1b67c6..f3a0d7f49af4 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -477,7 +477,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json
index 8c73708befef..1c225192ba34 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json
@@ -6050,7 +6050,7 @@
"EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "PublicDescription": "Counts when a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
"UMask": "0x4",
"Unit": "CHA"
},
@@ -6072,7 +6072,7 @@
"EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "PublicDescription": "Counts when a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
"UMask": "0x8",
"Unit": "CHA"
},
@@ -8193,7 +8193,6 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
- "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803fe04",
@@ -8234,7 +8233,6 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
- "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0xc803ff04",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
index 77d37db98b70..969cb519eec1 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -80,7 +80,6 @@
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5) / (3 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -98,7 +97,6 @@
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
@@ -139,7 +137,6 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
@@ -151,7 +148,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS))) + 43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS)))) / tma_info_thread_clks",
- "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -184,7 +181,7 @@
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -236,7 +233,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE / tma_info_thread_clks",
- "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -246,7 +243,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -305,7 +302,7 @@
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
"MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -314,7 +311,7 @@
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
"MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -334,7 +331,7 @@
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
@@ -346,7 +343,7 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
@@ -398,6 +395,12 @@
"MetricName": "tma_info_frontend_ipunknown_branch"
},
{
+ "BriefDescription": "Taken Branches retired Per Cycle",
+ "MetricExpr": "BR_INST_RETIRED.NEAR_TAKEN / tma_info_thread_clks",
+ "MetricGroup": "Branches;FetchBW",
+ "MetricName": "tma_info_frontend_tbpc"
+ },
+ {
"BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
@@ -474,7 +477,7 @@
},
{
"BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / tma_info_system_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
@@ -486,7 +489,7 @@
},
{
"BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / tma_info_system_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
@@ -504,7 +507,7 @@
},
{
"BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / tma_info_system_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
},
@@ -523,7 +526,7 @@
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "MetricGroup": "Memory_Lat;Offcore",
+ "MetricGroup": "LockCont;Memory_Lat;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
{
@@ -555,7 +558,7 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
+ "BriefDescription": "Mem;Backend;CacheHits",
"MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
@@ -568,7 +571,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -580,20 +583,20 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / tma_info_system_time / 1e3",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
"PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / tma_info_system_time",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_system_gflops",
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width"
@@ -619,6 +622,19 @@
"MetricThreshold": "tma_info_system_kernel_utilization > 0.05"
},
{
+ "BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_mux",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
+ },
+ {
+ "BriefDescription": "Total package Power in Watts",
+ "MetricExpr": "power@energy\\-pkg@ * 15.6 / (tma_info_system_time * 1e6)",
+ "MetricGroup": "Power;SoC",
+ "MetricName": "tma_info_system_power"
+ },
+ {
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
"MetricGroup": "SMT",
@@ -631,6 +647,13 @@
"MetricName": "tma_info_system_socket_clks"
},
{
+ "BriefDescription": "Run duration time in seconds",
+ "MetricExpr": "duration_time",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_time",
+ "MetricThreshold": "tma_info_system_time < 1"
+ },
+ {
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
@@ -691,12 +714,12 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache",
"MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
"MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
- "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
@@ -749,7 +772,6 @@
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -761,7 +783,7 @@
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_thread_clks",
- "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
@@ -781,7 +803,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -840,7 +862,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -849,7 +871,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -900,7 +922,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -909,7 +931,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
"MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -918,7 +940,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
"MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -949,7 +971,7 @@
"MetricExpr": "13 * LD_BLOCKS.NO_SR / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
- "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "MetricThreshold": "tma_split_loads > 0.3",
"PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
@@ -965,7 +987,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -993,7 +1015,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json b/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json
index 4193c90c3459..0863375bdead 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/metricgroups.json
@@ -9,6 +9,7 @@
"BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -34,6 +35,7 @@
"InsType": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "LockCont": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -51,6 +53,7 @@
"Pipeline": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"PortsUtil": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Prefetches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Ret": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Retire": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"SMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -78,6 +81,7 @@
"tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
"tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category",
"tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_divider_group": "Metrics contributing to tma_divider category",
"tma_dram_bound_group": "Metrics contributing to tma_dram_bound category",
"tma_dtlb_load_group": "Metrics contributing to tma_dtlb_load category",
"tma_dtlb_store_group": "Metrics contributing to tma_dtlb_store category",
@@ -103,6 +107,7 @@
"tma_issueSpSt": "Metrics related by the issue $issueSpSt",
"tma_issueSyncxn": "Metrics related by the issue $issueSyncxn",
"tma_issueTLB": "Metrics related by the issue $issueTLB",
+ "tma_itlb_misses_group": "Metrics contributing to tma_itlb_misses category",
"tma_l1_bound_group": "Metrics contributing to tma_l1_bound category",
"tma_l3_bound_group": "Metrics contributing to tma_l3_bound category",
"tma_light_operations_group": "Metrics contributing to tma_light_operations category",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
index 8fe0512c938f..1cdd197ac883 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -80,7 +80,6 @@
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5) / (3 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -98,7 +97,6 @@
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
@@ -139,7 +137,6 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
@@ -151,7 +148,7 @@
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(60 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD)))) / tma_info_thread_clks",
- "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
"MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
@@ -184,7 +181,7 @@
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -236,7 +233,7 @@
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
"MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_thread_clks",
- "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
"MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
@@ -246,7 +243,7 @@
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
- "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
"PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
@@ -305,7 +302,7 @@
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
"MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -314,7 +311,7 @@
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
"MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -334,7 +331,7 @@
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
@@ -346,7 +343,7 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
@@ -398,6 +395,12 @@
"MetricName": "tma_info_frontend_ipunknown_branch"
},
{
+ "BriefDescription": "Taken Branches retired Per Cycle",
+ "MetricExpr": "BR_INST_RETIRED.NEAR_TAKEN / tma_info_thread_clks",
+ "MetricGroup": "Branches;FetchBW",
+ "MetricName": "tma_info_frontend_tbpc"
+ },
+ {
"BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
@@ -474,7 +477,7 @@
},
{
"BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / tma_info_system_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw"
},
@@ -486,7 +489,7 @@
},
{
"BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / tma_info_system_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l2_cache_fill_bw"
},
@@ -504,7 +507,7 @@
},
{
"BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / tma_info_system_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l3_cache_fill_bw"
},
@@ -523,7 +526,7 @@
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "MetricGroup": "Memory_Lat;Offcore",
+ "MetricGroup": "LockCont;Memory_Lat;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_miss_latency"
},
{
@@ -555,7 +558,7 @@
"MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
+ "BriefDescription": "Mem;Backend;CacheHits",
"MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
@@ -568,7 +571,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -580,20 +583,20 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
"PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / tma_info_system_time",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_system_gflops",
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width"
@@ -620,6 +623,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
@@ -627,12 +631,25 @@
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
- "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_system_socket_clks / duration_time)",
+ "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_system_socket_clks / tma_info_system_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
"PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
},
{
+ "BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_mux",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
+ },
+ {
+ "BriefDescription": "Total package Power in Watts",
+ "MetricExpr": "(power@energy\\-pkg@ + power@energy\\-ram@) * 15.6 / (duration_time * 1e6)",
+ "MetricGroup": "Power;SoC",
+ "MetricName": "tma_info_system_power"
+ },
+ {
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
"MetricGroup": "SMT",
@@ -645,6 +662,13 @@
"MetricName": "tma_info_system_socket_clks"
},
{
+ "BriefDescription": "Run duration time in seconds",
+ "MetricExpr": "duration_time",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_time",
+ "MetricThreshold": "tma_info_system_time < 1"
+ },
+ {
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
@@ -652,7 +676,7 @@
},
{
"BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]",
- "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time",
+ "MetricExpr": "tma_info_system_socket_clks / 1e9 / tma_info_system_time",
"MetricGroup": "SoC",
"MetricName": "tma_info_system_uncore_frequency"
},
@@ -711,12 +735,12 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache",
"MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
"MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
- "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
@@ -769,7 +793,6 @@
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -779,6 +802,7 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "200 * (MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
@@ -790,7 +814,7 @@
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_thread_clks",
- "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
"MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
@@ -810,7 +834,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
@@ -869,7 +893,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -878,7 +902,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -929,7 +953,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
"MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -938,7 +962,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
"MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -947,7 +971,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
"MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -974,6 +998,7 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "310 * (MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
@@ -997,7 +1022,7 @@
"MetricExpr": "13 * LD_BLOCKS.NO_SR / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
- "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "MetricThreshold": "tma_split_loads > 0.3",
"PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
@@ -1013,7 +1038,7 @@
{
"BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
- "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
"MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
@@ -1041,7 +1066,7 @@
"BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
- "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
"MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json b/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json
index 4193c90c3459..0863375bdead 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/metricgroups.json
@@ -9,6 +9,7 @@
"BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -34,6 +35,7 @@
"InsType": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "LockCont": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -51,6 +53,7 @@
"Pipeline": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"PortsUtil": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Prefetches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Ret": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Retire": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"SMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -78,6 +81,7 @@
"tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
"tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category",
"tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_divider_group": "Metrics contributing to tma_divider category",
"tma_dram_bound_group": "Metrics contributing to tma_dram_bound category",
"tma_dtlb_load_group": "Metrics contributing to tma_dtlb_load category",
"tma_dtlb_store_group": "Metrics contributing to tma_dtlb_store category",
@@ -103,6 +107,7 @@
"tma_issueSpSt": "Metrics related by the issue $issueSpSt",
"tma_issueSyncxn": "Metrics related by the issue $issueSyncxn",
"tma_issueTLB": "Metrics related by the issue $issueTLB",
+ "tma_itlb_misses_group": "Metrics contributing to tma_itlb_misses category",
"tma_l1_bound_group": "Metrics contributing to tma_l1_bound category",
"tma_l3_bound_group": "Metrics contributing to tma_l3_bound category",
"tma_light_operations_group": "Metrics contributing to tma_light_operations category",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
index 3cb468da7011..97e7760aeb26 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
@@ -278,5 +278,13 @@
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Valid instructions written to IQ per cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index f8c18741b360..250c73b21385 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -71,7 +71,6 @@
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
@@ -127,7 +126,7 @@
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -211,7 +210,7 @@
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
"MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -220,7 +219,7 @@
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
"MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -240,7 +239,7 @@
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
@@ -276,6 +275,12 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_lcp"
},
{
+ "BriefDescription": "Taken Branches retired Per Cycle",
+ "MetricExpr": "BR_INST_RETIRED.NEAR_TAKEN / tma_info_thread_clks",
+ "MetricGroup": "Branches;FetchBW",
+ "MetricName": "tma_info_frontend_tbpc"
+ },
+ {
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1;tma_L1_group",
@@ -290,7 +295,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -302,20 +307,20 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
"PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_mem_bandwidth"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / tma_info_system_time",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_system_gflops",
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width"
@@ -342,6 +347,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
@@ -349,12 +355,19 @@
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
- "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_system_socket_clks / duration_time)",
+ "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_system_socket_clks / tma_info_system_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
"PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
},
{
+ "BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_mux",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
+ },
+ {
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
"MetricGroup": "SMT",
@@ -367,6 +380,13 @@
"MetricName": "tma_info_system_socket_clks"
},
{
+ "BriefDescription": "Run duration time in seconds",
+ "MetricExpr": "duration_time",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_time",
+ "MetricThreshold": "tma_info_system_time < 1"
+ },
+ {
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
@@ -374,7 +394,7 @@
},
{
"BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]",
- "MetricExpr": "tma_info_system_socket_clks / 1e9 / duration_time",
+ "MetricExpr": "tma_info_system_socket_clks / 1e9 / tma_info_system_time",
"MetricGroup": "SoC",
"MetricName": "tma_info_system_uncore_frequency"
},
@@ -468,7 +488,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_system_dram_bw_use",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json b/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json
index 7dc7eb0d3dd3..eb8fbd14138a 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/metricgroups.json
@@ -9,6 +9,7 @@
"BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -33,6 +34,7 @@
"InsType": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "LockCont": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -48,6 +50,7 @@
"Pipeline": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"PortsUtil": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Prefetches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Ret": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Retire": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"SMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -75,6 +78,7 @@
"tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
"tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category",
"tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_divider_group": "Metrics contributing to tma_divider category",
"tma_dram_bound_group": "Metrics contributing to tma_dram_bound category",
"tma_dtlb_load_group": "Metrics contributing to tma_dtlb_load category",
"tma_dtlb_store_group": "Metrics contributing to tma_dtlb_store category",
@@ -99,6 +103,7 @@
"tma_issueSmSt": "Metrics related by the issue $issueSmSt",
"tma_issueSyncxn": "Metrics related by the issue $issueSyncxn",
"tma_issueTLB": "Metrics related by the issue $issueTLB",
+ "tma_itlb_misses_group": "Metrics contributing to tma_itlb_misses category",
"tma_l1_bound_group": "Metrics contributing to tma_l1_bound category",
"tma_light_operations_group": "Metrics contributing to tma_light_operations category",
"tma_machine_clears_group": "Metrics contributing to tma_machine_clears category",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/other.json b/tools/perf/pmu-events/arch/x86/jaketown/other.json
index 42692fa24b6c..970839a9c786 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/other.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/other.json
@@ -34,14 +34,6 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Valid instructions written to IQ per cycle.",
- "Counter": "0,1,2,3",
- "EventCode": "0x17",
- "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
"Counter": "0,1,2,3",
"EventCode": "0x63",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
index 15fb9921f4fc..3d2616be8ec1 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/cache.json
@@ -29,6 +29,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cachelines replaced into the L1 d-cache. Successful replacements only (not blocked) and exclude WB-miss case",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x51",
+ "EventName": "L1D.L1_REPLACEMENT",
+ "PublicDescription": "Counts cachelines replaced into the L1 d-cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cachelines replaced into the L0 and L1 d-cache. Successful replacements only (not blocked) and exclude WB-miss case",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x51",
@@ -233,7 +243,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of L2 prefetches initiated by either the L2 Stream or AMP that were throttled due to exceeding the XQ threshold set by either XQ_THRESOLD_DTP or XQ_THRESHOLD. Counts on a per core basis.",
+ "BriefDescription": "Counts the number of L2 prefetches initiated by either the L2 Stream or AMP that were throttled due to exceeding the XQ threshold set by either XQ_THRESHOLD_DTP or XQ_THRESHOLD. Counts on a per core basis.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x28",
"EventName": "L2_PREFETCHES_THROTTLED.XQ_THRESH",
@@ -418,6 +428,51 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of LLC prefetches that were throttled due to Dynamic Prefetch Throttling. The throttle requestor/source could be from the uncore/SOC or the Dead Block Predictor. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.DPT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC prefetches throttled due to Demand Throttle Prefetcher. DTP Global Triggered with no Local Override. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.DTP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC prefetches not throttled by DTP due to local override. These prefetches may still be throttled due to another throttler mechanism. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.DTP_OVERRIDE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC prefetches throttled due to LLC hit rate in <insert knob name here>. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.HIT_RATE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC prefetches throttled due to exceeding the XQ threshold set by either XQ_THRESHOLD_DTP or LLC_XQ_THRESHOLD. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x29",
+ "EventName": "LLC_PREFETCHES_THROTTLED.XQ_THRESH",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Cycles when L1D is locked",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x42",
@@ -547,7 +602,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "PublicDescription": "Counts Instructions with at least one architecturally visible load retired.",
+ "PublicDescription": "Counts Instructions with at least one architecturally visible load retired. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x81",
"Unit": "cpu_core"
@@ -558,7 +613,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
- "PublicDescription": "Counts all retired store instructions.",
+ "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x82",
"Unit": "cpu_core"
@@ -568,7 +623,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_SWPF",
- "PublicDescription": "Counts all retired software prefetch instructions.",
+ "PublicDescription": "Counts all retired software prefetch instructions. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x84",
"Unit": "cpu_core"
@@ -579,7 +634,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
- "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x87",
"Unit": "cpu_core"
@@ -590,7 +645,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "PublicDescription": "Counts retired load instructions with locked access.",
+ "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x21",
"Unit": "cpu_core"
@@ -601,7 +656,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x41",
"Unit": "cpu_core"
@@ -612,18 +667,29 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x42",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_ANY",
+ "PublicDescription": "Number of retired instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired load instructions that hit the STLB.",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS",
- "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x9",
"Unit": "cpu_core"
@@ -634,18 +700,39 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_STORES",
- "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0xa",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired SWPF instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_SWPF",
+ "PublicDescription": "Number of retired SWPF instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_ANY",
+ "PublicDescription": "Retired instructions that miss the STLB. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x17",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired load instructions that miss the STLB.",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x11",
"Unit": "cpu_core"
@@ -656,18 +743,28 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x12",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Retired SWPF instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_SWPF",
+ "PublicDescription": "Number of retired SWPF instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x14",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired load instructions whose data sources were a cross-core Snoop hits and forwards data from an in on-package core cache (induced by NI$)",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were a cross-core Snoop hits and forwards data from an in on-package core cache (induced by NI$)",
+ "PublicDescription": "Counts retired load instructions whose data sources were a cross-core Snoop hits and forwards data from an in on-package core cache (induced by NI$) Available PDIST counters: 0,1",
"SampleAfterValue": "20011",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -678,7 +775,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3, Hit-with-FWD is normally excluded.",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3, Hit-with-FWD is normally excluded. Available PDIST counters: 0,1",
"SampleAfterValue": "20011",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -689,7 +786,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0,1",
"SampleAfterValue": "20011",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -700,7 +797,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0,1",
"SampleAfterValue": "20011",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -711,6 +808,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.MEMSIDE_CACHE",
+ "PublicDescription": "Retired load instructions which data source is memory side cache. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"Unit": "cpu_core"
},
@@ -720,7 +818,7 @@
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -731,7 +829,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -742,7 +840,18 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x101",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts retired load instructions with at least one uop that hit in the Level 0 of the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT_L0",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the Level 0 of the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -752,6 +861,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT_L1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the Level 1 of the L1 data cache. Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"Unit": "cpu_core"
},
@@ -761,7 +871,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0,1",
"SampleAfterValue": "200003",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -772,7 +882,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0,1",
"SampleAfterValue": "200003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -783,7 +893,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0,1",
"SampleAfterValue": "100021",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -794,7 +904,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0,1",
"SampleAfterValue": "100021",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -805,7 +915,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0,1",
"SampleAfterValue": "50021",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -979,7 +1089,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
@@ -991,7 +1101,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
@@ -1003,7 +1113,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
@@ -1015,7 +1125,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
@@ -1027,7 +1137,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
@@ -1039,7 +1149,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
@@ -1051,7 +1161,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
@@ -1063,7 +1173,7 @@
},
{
"BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled",
- "Counter": "0,1,2,3,4,5,6,7",
+ "Counter": "0,1",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
@@ -1184,34 +1294,121 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts writebacks of modified cachelines that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
+ "PublicDescription": "Counts writebacks of modified cachelines that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E00008",
+ "PublicDescription": "Counts writebacks of modified cachelines that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts writebacks of non-modified cachelines that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.COREWB_NONM.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x11000",
+ "PublicDescription": "Counts writebacks of non-modified cachelines that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts writebacks of non-modified cachelines that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_NONM.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E01000",
+ "PublicDescription": "Counts writebacks of non-modified cachelines that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by mem side cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_CODE_RD.MEMSIDE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x11F80000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by mem side cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40001E00001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x20001E00001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1223,17 +1420,55 @@
"EventName": "OCR.DEMAND_DATA_RD.MEMSIDE_CACHE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x11F80000001",
+ "PublicDescription": "Counts demand data reads that were supplied by mem side cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40001E00002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E04477",
+ "PublicDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
index 07bd38a1904e..b21d602e9f1a 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/frontend.json
@@ -108,7 +108,7 @@
"EventName": "FRONTEND_RETIRED.ANY_ANT",
"MSRIndex": "0x3F7",
"MSRValue": "0x9",
- "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted)",
+ "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted) Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -120,7 +120,7 @@
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -169,7 +169,7 @@
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x11",
- "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -199,7 +199,7 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x14",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -211,7 +211,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -223,7 +223,7 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x13",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -235,7 +235,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"MSRValue": "0x608006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -247,7 +247,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"MSRValue": "0x601006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -259,7 +259,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x600206",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -271,7 +271,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"MSRValue": "0x610006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -283,7 +283,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x100206",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -295,7 +295,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"MSRValue": "0x602006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -307,7 +307,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x600406",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -319,7 +319,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"MSRValue": "0x620006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -331,7 +331,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x604006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -343,7 +343,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
"MSRValue": "0x600806",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -355,7 +355,7 @@
"EventName": "FRONTEND_RETIRED.MISP_ANT",
"MSRIndex": "0x3F7",
"MSRValue": "0x9",
- "PublicDescription": "ANT retired branches that got just mispredicted",
+ "PublicDescription": "ANT retired branches that got just mispredicted Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -367,6 +367,7 @@
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
"MSRValue": "0x8",
+ "PublicDescription": "Counts flows delivered by the Microcode Sequencer Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -396,7 +397,7 @@
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x15",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -408,7 +409,7 @@
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
"MSRValue": "0x17",
- "PublicDescription": "Number retired branch instructions that caused the front-end to be resteered when it finds the instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "PublicDescription": "Number retired branch instructions that caused the front-end to be resteered when it finds the instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/lnl-metrics.json b/tools/perf/pmu-events/arch/x86/lunarlake/lnl-metrics.json
index e748f839c4bd..06390a72110d 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/lnl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/lnl-metrics.json
@@ -1,75 +1,47 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency",
- "ScaleUnit": "100%"
- },
- {
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
- "MetricGroup": "Power",
- "MetricName": "C8_Pkg_Residency",
- "ScaleUnit": "100%"
- },
- {
- "BriefDescription": "C9 residency percent per package",
- "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
- "MetricGroup": "Power",
- "MetricName": "C9_Pkg_Residency",
- "ScaleUnit": "100%"
- },
- {
"BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
"MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
"MetricGroup": "smi",
@@ -89,7 +61,7 @@
"MetricExpr": "tma_core_bound",
"MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_allocation_restriction",
- "MetricThreshold": "(tma_allocation_restriction >0.10) & ((tma_core_bound >0.10) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -99,7 +71,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL_P@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
- "MetricThreshold": "(tma_backend_bound >0.10)",
+ "MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%",
@@ -111,7 +83,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.ALL_P@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
- "MetricThreshold": "(tma_bad_speculation >0.15)",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"ScaleUnit": "100%",
@@ -122,7 +94,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "MetricThreshold": "(tma_branch_detect >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -132,7 +104,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
- "MetricThreshold": "(tma_branch_mispredicts >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -142,7 +114,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
- "MetricThreshold": "(tma_branch_resteer >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -151,7 +123,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "(tma_cisc >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -160,7 +132,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
- "MetricThreshold": "(tma_core_bound >0.10) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -170,7 +142,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
- "MetricThreshold": "(tma_decode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -179,7 +151,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
- "MetricThreshold": "(tma_fast_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -189,7 +161,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
- "MetricThreshold": "(tma_frontend_bound >0.20)",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -199,7 +171,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "(tma_icache_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -208,7 +180,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_bandwidth",
- "MetricThreshold": "(tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -218,7 +190,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_latency",
- "MetricThreshold": "(tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -555,7 +527,7 @@
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_atom@",
"MetricName": "tma_info_system_cpu_utilization",
"Unit": "cpu_atom"
},
@@ -578,7 +550,7 @@
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
"MetricName": "tma_info_system_mux",
- "MetricThreshold": "((tma_info_system_mux > 1.1)|(tma_info_system_mux < 0.9))",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9",
"Unit": "cpu_atom"
},
{
@@ -617,7 +589,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB_MISS@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "(tma_itlb_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -626,7 +598,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
- "MetricThreshold": "(tma_machine_clears >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -636,7 +608,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
- "MetricThreshold": "(tma_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -645,7 +617,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
- "MetricThreshold": "(tma_non_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -654,7 +626,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
- "MetricThreshold": "(tma_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -663,7 +635,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
- "MetricThreshold": "(tma_other_fb >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -672,7 +644,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
- "MetricThreshold": "(tma_predecode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -681,7 +653,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
- "MetricThreshold": "(tma_register >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -690,7 +662,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
- "MetricThreshold": "(tma_reorder_buffer >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -699,7 +671,7 @@
"MetricExpr": "tma_backend_bound - tma_core_bound",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
- "MetricThreshold": "(tma_resource_bound >0.20) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -710,7 +682,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
- "MetricThreshold": "(tma_retiring >0.75)",
+ "MetricThreshold": "tma_retiring > 0.75",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -720,12 +692,19 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / (8 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
- "MetricThreshold": "(tma_serialization >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "cpu_core@UOPS_DISPATCHED.ALU@ / (6 * tma_info_thread_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -738,13 +717,13 @@
"MetricExpr": "78 * cpu_core@ASSISTS.ANY@ / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists",
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
"MetricExpr": "63 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_slots",
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_avx_assists",
@@ -755,7 +734,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-be\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
@@ -767,18 +746,18 @@
{
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-bad\\-spec / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-bad\\-spec@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20",
@@ -794,35 +773,35 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: ",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_l1_latency_capacity + tma_lock_latency + tma_split_loads + tma_fb_full)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
- "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_l1_latency_capacity + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_capacity / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_l1_latency_capacity + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_l1_latency_capacity + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_l1_latency_capacity + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_capacity / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms))) - tma_bottleneck_big_code",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20",
@@ -830,7 +809,7 @@
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_microcode_sequencer + tma_few_uops_instructions) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_microcode_sequencer + tma_few_uops_instructions) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -839,7 +818,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_l1_latency_capacity + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_fb_full + tma_l1_latency_capacity + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_early_blk + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -848,16 +827,16 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
- "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -866,15 +845,15 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls",
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
"MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_microcode_sequencer + tma_few_uops_instructions) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -883,7 +862,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
- "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-br\\-mispredict@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
@@ -897,26 +876,26 @@
"MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C01@ / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c01_wait",
- "MetricThreshold": "tma_c01_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C02@ / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c02_wait",
- "MetricThreshold": "tma_c02_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -925,8 +904,8 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -935,99 +914,99 @@
"MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
- "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.L1I_MISS@ * cpu_core@frontend_retired.l1i_miss@R / tma_info_thread_clks - tma_code_l2_miss)",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
+ "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.L1I_MISS@ * cpu_core@FRONTEND_RETIRED.L1I_MISS@R / tma_info_thread_clks - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.L2_MISS@ * cpu_core@frontend_retired.l2_miss@R / tma_info_thread_clks",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.L2_MISS@ * cpu_core@FRONTEND_RETIRED.L2_MISS@R / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) ITLB was missed by instructions fetches, that later on hit in second-level TLB (STLB)",
- "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.ITLB_MISS@ * cpu_core@frontend_retired.itlb_miss@R / tma_info_thread_clks - tma_code_stlb_miss)",
+ "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.ITLB_MISS@ * cpu_core@FRONTEND_RETIRED.ITLB_MISS@R / tma_info_thread_clks - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by instruction fetches, performing a hardware page walk",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.STLB_MISS@ * cpu_core@frontend_retired.stlb_miss@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.STLB_MISS@ * cpu_core@FRONTEND_RETIRED.STLB_MISS@R / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks * cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks * cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@ * cpu_core@br_misp_retired.cond_ntaken_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@ * cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_nt_mispredicts",
- "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by backward-taken conditional branches",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_BWD_COST@ * cpu_core@br_misp_retired.cond_taken_bwd_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by backward-taken conditional branches.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_BWD_COST@ * cpu_core@BR_MISP_RETIRED.COND_TAKEN_BWD_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_tk_bwd_mispredicts",
- "MetricThreshold": "tma_cond_tk_bwd_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_tk_bwd_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by forward-taken conditional branches",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_FWD_COST@ * cpu_core@br_misp_retired.cond_taken_fwd_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by forward-taken conditional branches.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_FWD_COST@ * cpu_core@BR_MISP_RETIRED.COND_TAKEN_FWD_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_tk_fwd_mispredicts",
- "MetricThreshold": "tma_cond_tk_fwd_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_tk_fwd_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricExpr": "((min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * cpu_core@mem_load_l3_hit_retired.xsnp_miss@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_miss@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) + (min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@ * cpu_core@mem_load_l3_hit_retired.xsnp_hitm@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_hitm@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM@R, 25 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1038,17 +1017,18 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricExpr": "((min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * cpu_core@mem_load_l3_hit_retired.xsnp_no_fwd@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_no_fwd@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) + (min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * cpu_core@mem_load_l3_hit_retired.xsnp_fwd@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_fwd@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@R, 25 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1057,7 +1037,7 @@
"MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIV_ACTIVE",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1067,18 +1047,18 @@
"MetricExpr": "cpu_core@MEMORY_STALLS.MEM@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
- "MetricExpr": "(cpu_core@IDQ.DSB_UOPS\\,cmask\\=0x8\\,inv\\=0x1@ + cpu_core@IDQ.DSB_UOPS@ / (cpu_core@IDQ.DSB_UOPS@ + cpu_core@IDQ.MITE_UOPS@) * (cpu_core@IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE@ - cpu_core@IDQ_BUBBLES.FETCH_LATENCY@)) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@IDQ.DSB_UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / 2 + cpu_core@IDQ.DSB_UOPS@ / (cpu_core@IDQ.DSB_UOPS@ + cpu_core@IDQ.MITE_UOPS@) * (cpu_core@IDQ_BUBBLES.STARVATION_CYCLES@ - cpu_core@IDQ_BUBBLES.FETCH_LATENCY@)) / tma_info_thread_clks",
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1087,28 +1067,28 @@
"MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * cpu_core@mem_inst_retired.stlb_hit_loads@R, cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * 7) if 0 < cpu_core@mem_inst_retired.stlb_hit_loads@R else cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * 7) / tma_info_thread_clks + tma_load_stlb_miss",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@R, 7) / tma_info_thread_clks + tma_load_stlb_miss",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * cpu_core@mem_inst_retired.stlb_hit_stores@R, cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * 7) if 0 < cpu_core@mem_inst_retired.stlb_hit_stores@R else cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * 7) / tma_info_thread_clks + tma_store_stlb_miss",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@R, 7) / tma_info_thread_clks + tma_store_stlb_miss",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1117,8 +1097,8 @@
"MetricExpr": "28 * tma_info_system_core_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1128,7 +1108,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1145,12 +1125,12 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
- "MetricExpr": "topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-fetch\\-lat@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1160,7 +1140,7 @@
"MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
"MetricName": "tma_few_uops_instructions",
"MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or more uops. This highly-correlates with the number of uops in such instructions",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or more uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1170,7 +1150,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1180,89 +1160,89 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "cpu_core@ARITH.FPDIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
- "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@FP_ARITH_OPS_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
- "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "(cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
- "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR\\,umask\\=0x30@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@FP_ARITH_OPS_RETIRED.VECTOR\\,umask\\=0x30@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_int_vector_128b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-fe\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.MACRO_FUSED@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
- "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "cpu_core@topdown\\-heavy\\-ops@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1271,26 +1251,26 @@
"MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@br_misp_retired.indirect_call_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ind_call_mispredicts",
- "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions",
- "MetricExpr": "max((cpu_core@BR_MISP_RETIRED.INDIRECT_COST@ * cpu_core@br_misp_retired.indirect_cost@R - cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@br_misp_retired.indirect_call_cost@R) / tma_info_thread_clks, 0)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions.",
+ "MetricExpr": "max((cpu_core@BR_MISP_RETIRED.INDIRECT_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_COST@R - cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@R) / tma_info_thread_clks, 0)",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ind_jump_mispredicts",
- "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1303,7 +1283,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_NTAKEN@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
@@ -1311,29 +1291,29 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional backward-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional backward-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN_BWD@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken_bwd",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional forward-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional forward-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN_FWD@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken_fwd",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.INDIRECT@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.RET@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -1357,7 +1337,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_lsd + tma_ms)))",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -1366,7 +1346,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -1375,10 +1355,11 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
"MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: ",
"Unit": "cpu_core"
},
{
@@ -1434,7 +1415,7 @@
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_OPS_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE@) / tma_info_thread_clks",
"MetricGroup": "Flops;Ret",
"MetricName": "tma_info_core_flopc",
"Unit": "cpu_core"
@@ -1444,12 +1425,12 @@
"MetricExpr": "(cpu_core@FP_ARITH_DISPATCHED.V0@ + cpu_core@FP_ARITH_DISPATCHED.V1@ + cpu_core@FP_ARITH_DISPATCHED.V2@ + cpu_core@FP_ARITH_DISPATCHED.V3@) / (4 * tma_info_thread_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp",
"Unit": "cpu_core"
@@ -1464,15 +1445,15 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired DSB misses",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@ * cpu_core@frontend_retired.any_dsb_miss@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@ * cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@R / tma_info_thread_clks",
"MetricGroup": "DSBmiss;Fed;FetchLat",
"MetricName": "tma_info_frontend_dsb_switches_ret",
"MetricThreshold": "tma_info_frontend_dsb_switches_ret > 0.05",
@@ -1480,7 +1461,7 @@
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc",
"Unit": "cpu_core"
@@ -1530,7 +1511,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired operations that invoke the Microcode Sequencer",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.MS_FLOWS@ * cpu_core@frontend_retired.ms_flows@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.MS_FLOWS@ * cpu_core@FRONTEND_RETIRED.MS_FLOWS@R / tma_info_thread_clks",
"MetricGroup": "Fed;FetchLat;MicroSeq",
"MetricName": "tma_info_frontend_ms_latency_ret",
"MetricThreshold": "tma_info_frontend_ms_latency_ret > 0.05",
@@ -1545,21 +1526,21 @@
},
{
"BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
- "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed",
"MetricName": "tma_info_frontend_unknown_branch_cost",
- "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node",
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired branches who got branch address clears",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@ * cpu_core@frontend_retired.unknown_branch@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@ * cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@R / tma_info_thread_clks",
"MetricGroup": "Fed;FetchLat",
"MetricName": "tma_info_frontend_unknown_branches_ret",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch",
@@ -1575,47 +1556,47 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + cpu_core@FP_ARITH_INST_RETIRED.VECTOR@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + cpu_core@FP_ARITH_OPS_RETIRED.VECTOR@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_SINGLE@)",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE@)",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_OPS_RETIRED.SCALAR_DOUBLE@",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_OPS_RETIRED.SCALAR_SINGLE@",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1636,7 +1617,7 @@
},
{
"BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_OPS_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_ipflop",
"MetricThreshold": "tma_info_inst_mix_ipflop < 10",
@@ -1678,7 +1659,7 @@
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 8 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 17",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp",
"Unit": "cpu_core"
},
@@ -1691,7 +1672,7 @@
},
{
"BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * cpu_core@L1D.REPLACEMENT@ / 1e9 / tma_info_system_time",
+ "MetricExpr": "64 * cpu_core@L1D.L1_REPLACEMENT@ / 1e9 / tma_info_system_time",
"MetricGroup": "Mem;MemoryBW",
"MetricName": "tma_info_memory_l1d_cache_fill_bw",
"Unit": "cpu_core"
@@ -1704,6 +1685,13 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "L0 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * (cpu_core@MEM_LOAD_RETIRED.L1_MISS@ + cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@) / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l1dl0_mpki",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / cpu_core@INST_RETIRED.ANY@",
"MetricGroup": "CacheHits;Mem",
@@ -1803,7 +1791,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp",
"Unit": "cpu_core"
@@ -1861,7 +1849,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to STLB misses by demand loads",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@ * cpu_core@mem_inst_retired.stlb_miss_loads@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@ * cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@R / tma_info_thread_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_load_stlb_miss_ret",
"MetricThreshold": "tma_info_memory_tlb_load_stlb_miss_ret > 0.05",
@@ -1884,7 +1872,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to STLB misses by demand stores",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@ * cpu_core@mem_inst_retired.stlb_miss_stores@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@ * cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@R / tma_info_thread_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_store_stlb_miss_ret",
"MetricThreshold": "tma_info_memory_tlb_store_stlb_miss_ret > 0.05",
@@ -1919,24 +1907,31 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "cpu_core@IDQ.MS_UOPS@ / cpu_core@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY@",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire",
"Unit": "cpu_core"
},
{
"BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
- "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "MicroSeq;Pipeline;Ret",
"MetricName": "tma_info_pipeline_strings_cycles",
"MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1",
@@ -1952,7 +1947,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc\\,cpu=cpu_core@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency",
"Unit": "cpu_core"
@@ -1966,14 +1961,22 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_core@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "32 * UNC_M_TOTAL_DATA / 1e9 / tma_info_system_time",
+ "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_system_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / 1e9 / tma_info_system_time",
+ "MetricExpr": "(cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_OPS_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE@) / 1e9 / tma_info_system_time",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_system_gflops",
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width",
@@ -1981,23 +1984,22 @@
},
{
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000",
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6",
"Unit": "cpu_core"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@INST_RETIRED.ANY_P@k",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_utilization",
"MetricThreshold": "tma_info_system_kernel_utilization > 0.05",
@@ -2019,6 +2021,13 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_socket_clks",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Run duration time in seconds",
"MetricExpr": "duration_time",
"MetricGroup": "Summary",
@@ -2034,7 +2043,14 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]",
+ "MetricExpr": "tma_info_system_socket_clks / 1e9 / tma_info_system_time",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_uncore_frequency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks",
@@ -2045,7 +2061,6 @@
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
"MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr",
"Unit": "cpu_core"
},
{
@@ -2053,7 +2068,7 @@
"MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_ISSUED.ANY@",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
"Unit": "cpu_core"
},
{
@@ -2065,7 +2080,7 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "cpu_core@TOPDOWN.SLOTS@",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots",
"Unit": "cpu_core"
@@ -2083,15 +2098,15 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 8 * 1.5",
+ "MetricThreshold": "tma_info_thread_uptb < 12",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2101,7 +2116,7 @@
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_int_operations",
"MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2110,8 +2125,8 @@
"MetricExpr": "cpu_core@INT_VEC_RETIRED.128BIT@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_128b",
- "MetricThreshold": "tma_int_vector_128b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_256b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2120,8 +2135,8 @@
"MetricExpr": "cpu_core@INT_VEC_RETIRED.256BIT@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_256b",
- "MetricThreshold": "tma_int_vector_256b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2130,8 +2145,8 @@
"MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2140,27 +2155,27 @@
"MetricExpr": "cpu_core@MEMORY_STALLS.L1@ / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit Level 1 after missing Level 0 within the L1D cache",
- "MetricExpr": "(min(cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@ * cpu_core@mem_load_retired.l1_hit_l1@R, cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@ * 9) if 0 < cpu_core@mem_load_retired.l1_hit_l1@R else cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@ * 9) / tma_info_thread_clks",
+ "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit Level 1 after missing Level 0 within the L1D cache.",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@ * min(cpu_core@MEM_LOAD_RETIRED.L1_HIT_L1@R, 9) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_capacity",
- "MetricThreshold": "tma_l1_latency_capacity > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_latency_capacity > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
- "MetricExpr": "4 * cpu_core@DEPENDENT_LOADS.ANY@ / tma_info_thread_clks",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "MetricExpr": "4 * cpu_core@DEPENDENT_LOADS.ANY\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: DEPENDENT_LOADS.ANY",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2169,17 +2184,17 @@
"MetricExpr": "cpu_core@MEMORY_STALLS.L2@ / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited)",
- "MetricExpr": "(min(cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * cpu_core@mem_load_retired.l2_hit@R, cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * (3 * tma_info_system_core_frequency)) if 0 < cpu_core@mem_load_retired.l2_hit@R else cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * (3 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L2_HIT@R, 3 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2189,18 +2204,18 @@
"MetricExpr": "cpu_core@MEMORY_STALLS.L3@ / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(min(cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * cpu_core@mem_load_retired.l3_hit@R, cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (12 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_retired.l3_hit@R else cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (12 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L3_HIT@R, 9 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2209,19 +2224,19 @@
"MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2231,7 +2246,7 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
"MetricThreshold": "tma_load_op_utilization > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.LOAD",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2240,7 +2255,7 @@
"MetricExpr": "max(0, tma_dtlb_load - tma_load_stlb_miss)",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2249,54 +2264,55 @@
"MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ * cpu_core@mem_inst_retired.lock_loads@R / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ * cpu_core@MEM_INST_RETIRED.LOCK_LOADS@R / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit",
- "MetricExpr": "cpu_core@LSD.UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@LSD.UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / tma_info_thread_clks / 2",
"MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_lsd",
"MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2307,17 +2323,17 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2326,34 +2342,33 @@
"MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
- "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-mem\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
"MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
- "MetricThreshold": "tma_memory_fence > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -2376,14 +2391,14 @@
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
- "MetricExpr": "(cpu_core@IDQ.MITE_UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / tma_info_thread_clks + cpu_core@IDQ.MITE_UOPS@ / (cpu_core@IDQ.DSB_UOPS@ + cpu_core@IDQ.MITE_UOPS@) * (cpu_core@IDQ_BUBBLES.CYCLES_0_UOPS_DELIV.CORE@ - cpu_core@IDQ_BUBBLES.FETCH_LATENCY@)) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@IDQ.MITE_UOPS\\,cmask\\=0x8\\,inv\\=0x1@ / 2 + cpu_core@IDQ.MITE_UOPS@ / (cpu_core@IDQ.DSB_UOPS@ + cpu_core@IDQ.MITE_UOPS@) * (cpu_core@IDQ_BUBBLES.STARVATION_CYCLES@ - cpu_core@IDQ_BUBBLES.FETCH_LATENCY@)) / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_mite",
"MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
@@ -2392,18 +2407,18 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "160 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_clks",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "cpu_core@IDQ.MS_CYCLES_ANY@ / tma_info_thread_clks",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "cpu_core@IDQ.MS_CYCLES_ANY@ / tma_info_thread_clks / 1.8",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -2415,7 +2430,7 @@
"MetricExpr": "3 * cpu_core@IDQ.MS_SWITCHES@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2426,7 +2441,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2435,14 +2450,15 @@
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.NOP@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
- "MetricExpr": "max(0, tma_light_operations - (tma_x87_use + (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + cpu_core@FP_ARITH_INST_RETIRED.VECTOR@) / (tma_retiring * tma_info_thread_slots) + (cpu_core@INT_VEC_RETIRED.ADD_128@ + cpu_core@INT_VEC_RETIRED.VNNI_128@ + cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots) + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "max(0, tma_light_operations - (tma_x87_use + (cpu_core@FP_ARITH_OPS_RETIRED.SCALAR@ + cpu_core@FP_ARITH_OPS_RETIRED.VECTOR@) / (tma_retiring * tma_info_thread_slots) + (cpu_core@INT_VEC_RETIRED.ADD_128@ + cpu_core@INT_VEC_RETIRED.VNNI_128@ + cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots) + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
"MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
@@ -2451,20 +2467,20 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / (cpu_core@INT_MISC.CLEARS_COUNT@ - cpu_core@MACHINE_CLEARS.COUNT@)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - cpu_core@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_core@MACHINE_CLEARS.COUNT@), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2474,75 +2490,76 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_page_faults",
"MetricThreshold": "tma_page_faults > 0.05",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "((cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ + (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@)) / tma_info_thread_clks if cpu_core@ARITH.DIV_ACTIVE@ < cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ else (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.RET_COST@ * cpu_core@br_misp_retired.ret_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.RET_COST@ * cpu_core@BR_MISP_RETIRED.RET_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ret_mispredicts",
- "MetricThreshold": "tma_ret_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ret_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-retiring@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -2556,8 +2573,8 @@
"MetricExpr": "(cpu_core@BE_STALLS.SCOREBOARD@ + cpu_core@CPU_CLK_UNHALTED.C02@) / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: BE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2566,39 +2583,38 @@
"MetricExpr": "tma_light_operations * cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_shuffles_256b",
- "MetricThreshold": "tma_shuffles_256b > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * cpu_core@mem_inst_retired.split_loads@R, cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * tma_info_memory_load_miss_real_latency) if 0 < cpu_core@mem_inst_retired.split_loads@R else cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@R, tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ * cpu_core@mem_inst_retired.split_stores@R, cpu_core@MEM_INST_RETIRED.SPLIT_STORES@) if 0 < cpu_core@mem_inst_retired.split_stores@R else cpu_core@MEM_INST_RETIRED.SPLIT_STORES@) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_STORES@R, 1) / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2607,8 +2623,8 @@
"MetricExpr": "(cpu_core@XQ.FULL@ + cpu_core@L1D_MISS.L2_STALLS@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2617,8 +2633,17 @@
"MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates clocks wasted due to loads blocked due to unknown store address (did not do memory disambiguation) or due to unknown store data",
+ "MetricExpr": "7 * cpu_core@LD_BLOCKS.STORE_EARLY\\,cmask\\=1@ / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_early_blk",
+ "MetricThreshold": "tma_store_early_blk > 0.2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2627,8 +2652,8 @@
"MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2637,8 +2662,8 @@
"MetricExpr": "(cpu_core@MEM_STORE_RETIRED.L2_HIT@ * 10 * (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) + (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) * min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2648,7 +2673,6 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_store_op_utilization",
"MetricThreshold": "tma_store_op_utilization > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.STD, UOPS_DISPATCHED.STA",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2657,7 +2681,7 @@
"MetricExpr": "max(0, tma_dtlb_store - tma_store_stlb_miss)",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2666,34 +2690,34 @@
"MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2702,7 +2726,7 @@
"MetricExpr": "9 * cpu_core@OCR.STREAMING_WR.ANY_RESPONSE@ / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2712,7 +2736,7 @@
"MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2722,8 +2746,8 @@
"MetricExpr": "tma_retiring * cpu_core@UOPS_EXECUTED.X87@ / cpu_core@UOPS_EXECUTED.THREAD@",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
}
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/memory.json b/tools/perf/pmu-events/arch/x86/lunarlake/memory.json
index 60daff922a89..caa387e10259 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/memory.json
@@ -291,7 +291,7 @@
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
- "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -315,56 +315,109 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1FBC000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO).",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13FBFC00004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1FBC000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E780000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO).",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO).",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x9E7FA000001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1FBC000002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO).",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13FBFC00002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO).",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x9E7FA000002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache and were supplied by the system memory (DRAM, MSC, or MMIO). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/other.json b/tools/perf/pmu-events/arch/x86/lunarlake/other.json
index 667707d4fe37..164374edf293 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/other.json
@@ -19,15 +19,6 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0xa2",
- "EventName": "BE_STALLS.SCOREBOARD",
- "SampleAfterValue": "100003",
- "UMask": "0x2",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Counts the number of unhalted cycles a Core is blocked due to a lock In Progress issued by another core",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x63",
@@ -66,15 +57,6 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Count number of times a load is depending on another load that had just write back its data or in previous or 2 cycles back. This event supports in-direct dependency through a single uop.",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x02",
- "EventName": "DEPENDENT_LOADS.ANY",
- "SampleAfterValue": "1000003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Counts the number of cycles the L2 Prefetchers are at throttle level 0",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x32",
@@ -120,297 +102,13 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of uops executed on all Integer ports.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.ALL",
- "SampleAfterValue": "1000003",
- "UMask": "0xff",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on a load port.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.LD",
- "PublicDescription": "Counts the number of uops executed on a load port. This event counts for integer uops even if the destination is FP/vector",
- "SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on integer port 0.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.P0",
- "SampleAfterValue": "1000003",
- "UMask": "0x8",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on integer port 1.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.P1",
- "SampleAfterValue": "1000003",
- "UMask": "0x10",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on integer port 2.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.P2",
- "SampleAfterValue": "1000003",
- "UMask": "0x20",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on integer port 3.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.P3",
- "SampleAfterValue": "1000003",
- "UMask": "0x40",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on integer port 0,1, 2, 3.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.PRIMARY",
- "SampleAfterValue": "1000003",
- "UMask": "0x78",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on a Store address port.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.STA",
- "PublicDescription": "Counts the number of uops executed on a Store address port. This event counts integer uops even if the data source is FP/vector",
- "SampleAfterValue": "1000003",
- "UMask": "0x2",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of uops executed on an integer store data and jump port.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb3",
- "EventName": "INT_UOPS_EXECUTED.STD_JMP",
- "SampleAfterValue": "1000003",
- "UMask": "0x4",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of LLC prefetches that were throttled due to Dynamic Prefetch Throttling. The throttle requestor/source could be from the uncore/SOC or the Dead Block Predictor. Counts on a per core basis.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x29",
- "EventName": "LLC_PREFETCHES_THROTTLED.DPT",
- "SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of LLC prefetches throttled due to Demand Throttle Prefetcher. DTP Global Triggered with no Local Override. Counts on a per core basis.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x29",
- "EventName": "LLC_PREFETCHES_THROTTLED.DTP",
- "SampleAfterValue": "1000003",
- "UMask": "0x2",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of LLC prefetches not throttled by DTP due to local override. These prefetches may still be throttled due to another throttler mechanism. Counts on a per core basis.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x29",
- "EventName": "LLC_PREFETCHES_THROTTLED.DTP_OVERRIDE",
- "SampleAfterValue": "1000003",
- "UMask": "0x4",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of LLC prefetches throttled due to LLC hit rate in <insert knob name here>. Counts on a per core basis.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x29",
- "EventName": "LLC_PREFETCHES_THROTTLED.HIT_RATE",
- "SampleAfterValue": "1000003",
- "UMask": "0x10",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of LLC prefetches throttled due to exceeding the XQ threshold set by either XQ_THRESOLD_DTP or LLC_XQ_THRESHOLD. Counts on a per core basis.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x29",
- "EventName": "LLC_PREFETCHES_THROTTLED.XQ_THRESH",
- "SampleAfterValue": "1000003",
- "UMask": "0x8",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L1 cache (that is: no execution & load in flight & no load missed L1 cache)",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x46",
- "EventName": "MEMORY_STALLS.L1",
- "SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L2 cache (that is: no execution & load in flight & load missed L1 & no load missed L2 cache)",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x46",
- "EventName": "MEMORY_STALLS.L2",
- "SampleAfterValue": "1000003",
- "UMask": "0x2",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L3 cache (that is: no execution & load in flight & load missed L1 & load missed L2 cache & no load missed L3 Cache)",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x46",
- "EventName": "MEMORY_STALLS.L3",
- "SampleAfterValue": "1000003",
- "UMask": "0x4",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for Memory (that is: no execution & load in flight & a load missed L3 cache)",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0x46",
- "EventName": "MEMORY_STALLS.MEM",
- "SampleAfterValue": "1000003",
- "UMask": "0x8",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Counts all requests that have any type of response.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.ALL_REQUESTS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xFF0000001DFFF",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts writebacks of modified cachelines that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.COREWB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10008",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts writebacks of non-modified cachelines that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.COREWB_NONM.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x11000",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1FBC000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1FBC000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1E780000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1FBC000002",
+ "PublicDescription": "Counts all requests that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -422,6 +120,7 @@
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800000010000",
+ "PublicDescription": "Counts full streaming stores (64 bytes, WCiLF) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -433,6 +132,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400000010000",
+ "PublicDescription": "Counts partial streaming stores (less than 64 bytes, WCiL) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -444,72 +144,24 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
- "SampleAfterValue": "1000003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_COUNT",
- "Invert": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "SampleAfterValue": "100003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
- "Counter": "0,1,2,3,4,5,6,7,8,9",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_RESOURCE",
- "SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
- "SampleAfterValue": "1000003",
- "UMask": "0x4",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number issue slots not consumed due to a color request for an FCW or MXCSR control register when all 4 colors (copies) are already in use",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.COLOR_STALLS",
- "SampleAfterValue": "1000003",
- "UMask": "0x8",
- "Unit": "cpu_atom"
- },
- {
"BriefDescription": "Cycles the uncore cannot take further requests",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
@@ -526,6 +178,7 @@
"EventCode": "0xf4",
"EventName": "XQ_PROMOTION.ALL",
"SampleAfterValue": "1000003",
+ "UMask": "0x7",
"Unit": "cpu_atom"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
index f4ec7a884937..97797f7b072e 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/pipeline.json
@@ -21,8 +21,9 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of active floating point and integer dividers per cycle.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_OCCUPANCY",
"SampleAfterValue": "1000003",
@@ -30,8 +31,9 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of floating point and integer divider uops executed per cycle.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
"EventCode": "0xcd",
"EventName": "ARITH.DIV_UOPS",
"SampleAfterValue": "1000003",
@@ -88,6 +90,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa2",
+ "EventName": "BE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
@@ -101,7 +112,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all branch instructions retired.",
+ "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
@@ -119,7 +130,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PublicDescription": "Counts conditional branch instructions retired.",
+ "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x111",
"Unit": "cpu_core"
@@ -138,7 +149,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts not taken branch instructions retired.",
+ "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -157,7 +168,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x101",
"Unit": "cpu_core"
@@ -167,7 +178,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN_BWD",
- "PublicDescription": "Counts taken backward conditional branch instructions retired.",
+ "PublicDescription": "Counts taken backward conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -177,7 +188,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN_FWD",
- "PublicDescription": "Counts taken forward conditional branch instructions retired.",
+ "PublicDescription": "Counts taken forward conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x102",
"Unit": "cpu_core"
@@ -196,7 +207,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PublicDescription": "Counts far branch instructions retired.",
+ "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -215,7 +226,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x80",
"Unit": "cpu_core"
@@ -252,7 +263,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -271,13 +282,13 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts return instructions retired.",
+ "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x8",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of taken branch instructions retired",
+ "BriefDescription": "Counts the number of near taken branch instructions retired",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
@@ -290,7 +301,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts taken branch instructions retired.",
+ "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -318,7 +329,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
@@ -327,6 +338,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST",
+ "PublicDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x44",
"Unit": "cpu_core"
@@ -345,7 +357,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x111",
"Unit": "cpu_core"
@@ -355,6 +367,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_COST",
+ "PublicDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x151",
"Unit": "cpu_core"
@@ -373,7 +386,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -383,6 +396,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST",
+ "PublicDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x50",
"Unit": "cpu_core"
@@ -401,7 +415,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x101",
"Unit": "cpu_core"
@@ -411,7 +425,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_BWD",
- "PublicDescription": "Counts taken backward conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken backward conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -421,6 +435,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_BWD_COST",
+ "PublicDescription": "number of branch instructions retired that were mispredicted and taken backward. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x8001",
"Unit": "cpu_core"
@@ -430,6 +445,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_COST",
+ "PublicDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x141",
"Unit": "cpu_core"
@@ -439,7 +455,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_FWD",
- "PublicDescription": "Counts taken forward conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken forward conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
@@ -448,6 +464,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_FWD_COST",
+ "PublicDescription": "number of branch instructions retired that were mispredicted and taken forward. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x8002",
"Unit": "cpu_core"
@@ -466,7 +483,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0x80",
"Unit": "cpu_core"
@@ -485,7 +502,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -495,6 +512,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST",
+ "PublicDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x42",
"Unit": "cpu_core"
@@ -504,6 +522,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_COST",
+ "PublicDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "100003",
"UMask": "0xc0",
"Unit": "cpu_core"
@@ -531,7 +550,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -541,6 +560,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST",
+ "PublicDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "400009",
"UMask": "0x60",
"Unit": "cpu_core"
@@ -550,7 +570,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -569,6 +589,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET_COST",
+ "PublicDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0,1",
"SampleAfterValue": "100007",
"UMask": "0x48",
"Unit": "cpu_core"
@@ -758,6 +779,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Count number of times a load is depending on another load that had just write back its data or in previous or 2 cycles back. This event supports in-direct dependency through a single uop.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x02",
+ "EventName": "DEPENDENT_LOADS.ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xa6",
@@ -851,6 +881,7 @@
"BriefDescription": "Fixed Counter: Counts the number of instructions retired.",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -859,7 +890,7 @@
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -877,7 +908,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 0,1",
"SampleAfterValue": "2000003",
"Unit": "cpu_core"
},
@@ -886,6 +917,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.BR_FUSED",
+ "PublicDescription": "retired macro-fused uops when there is a branch in the macro-fused pair (the two instructions that got macro-fused count once in this pmon) Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -895,6 +927,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.MACRO_FUSED",
+ "PublicDescription": "INST_RETIRED.MACRO_FUSED Available PDIST counters: 0,1",
"SampleAfterValue": "2000003",
"UMask": "0x30",
"Unit": "cpu_core"
@@ -904,7 +937,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
- "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions Available PDIST counters: 0,1",
"SampleAfterValue": "2000003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -913,7 +946,7 @@
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
- "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -923,7 +956,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.REP_ITERATION",
- "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.",
+ "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent. Available PDIST counters: 0,1",
"SampleAfterValue": "2000003",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -993,6 +1026,98 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of uops executed on secondary integer ports 0,1,2,3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.2ND",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on all Integer ports.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on a load port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.LD",
+ "PublicDescription": "Counts the number of uops executed on a load port. This event counts for integer uops even if the destination is FP/vector",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 0.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 1.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 2.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 0,1, 2, 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.PRIMARY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x78",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on a Store address port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.STA",
+ "PublicDescription": "Counts the number of uops executed on a Store address port. This event counts integer uops even if the data source is FP/vector",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on an integer store data and jump port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.STD_JMP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Number of vector integer instructions retired of 128-bit vector-width.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xe7",
@@ -1091,7 +1216,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL",
"SampleAfterValue": "1000003",
- "UMask": "0x10",
+ "UMask": "0x1f",
"Unit": "cpu_atom"
},
{
@@ -1114,6 +1239,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of times a load got early blocked due to preceding store operation with unknown address or unknown data. Excluding in-line (immediate) wakeups",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_EARLY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address partially overlaps with an older store (size mismatch) - unknown_sta/bad_forward",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
@@ -1133,8 +1267,18 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of demand loads that match on a wcb (request buffer) allocated by an L1 hardware prefetch",
+ "BriefDescription": "Counts the number of demand loads that match on a wcb (request buffer) allocated by an L1 hardware prefetch [This event is alias to LOAD_HIT_PREFETCH.HW_PF]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.HWPF",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to LOAD_HIT_PREFETCH.HWPF]",
"Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.HW_PF",
"SampleAfterValue": "1000003",
@@ -1268,6 +1412,42 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L1 cache (that is: no execution & load in flight & no load missed L1 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L2 cache (that is: no execution & load in flight & load missed L1 & no load missed L2 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L3 cache (that is: no execution & load in flight & load missed L1 & load missed L2 cache & no load missed L3 Cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for Memory (that is: no execution & load in flight & a load missed L3 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.MEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "LFENCE instructions retired",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xe0",
@@ -1291,6 +1471,7 @@
"Counter": "0,1,2,3,4,5,6,7,8,9",
"EventCode": "0xe4",
"EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "LBR record is inserted Available PDIST counters: 0,1",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1394,6 +1575,65 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number issue slots not consumed due to a color request for an FCW or MXCSR control register when all 4 colors (copies) are already in use",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.COLOR_STALLS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots where no uop could issue due to an IQ scoreboard that stalls allocation until a specified older uop retires or (in the case of jump scoreboard) executes. Commonly executed instructions with IQ scoreboards include LFENCE and MFENCE.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.IQ_JEU_SCB",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x75",
@@ -1463,7 +1703,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of issue slots not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
- "Counter": "36",
+ "Counter": "Fixed counter 4",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PublicDescription": "Fixed Counter: Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the IQ. Also, includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"SampleAfterValue": "1000003",
@@ -1596,7 +1836,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of retirement slots not consumed due to front end stalls.",
- "Counter": "37",
+ "Counter": "Fixed counter 5",
"EventName": "TOPDOWN_FE_BOUND.ALL",
"SampleAfterValue": "1000003",
"UMask": "0x6",
@@ -1702,7 +1942,7 @@
},
{
"BriefDescription": "Fixed Counter: Counts the number of consumed retirement slots.",
- "Counter": "38",
+ "Counter": "Fixed counter 6",
"EventName": "TOPDOWN_RETIRING.ALL",
"SampleAfterValue": "1000003",
"UMask": "0x7",
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/lunarlake/uncore-interconnect.json
new file mode 100644
index 000000000000..69ef928d57f6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/uncore-interconnect.json
@@ -0,0 +1,10 @@
+[
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "SANTA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/lunarlake/uncore-memory.json
index 7d63580302de..63c4aa2791e4 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/uncore-memory.json
@@ -32,5 +32,13 @@
"Experimental": "1",
"PerPkg": "1",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Total number of read and write byte transfers to/from DRAM, in 32B chunk, per DDR channel. Counter increments by 1 after sending or receiving 32B chunk data.",
+ "Counter": "0,1,2,3,4",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M_TOTAL_DATA",
+ "PerPkg": "1",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json
index defa3a967754..e60a5e904da2 100644
--- a/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/lunarlake/virtual-memory.json
@@ -37,24 +37,6 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Account for 4k page size only. Will result in a DTLB write from STLB.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x08",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
- "SampleAfterValue": "200003",
- "UMask": "0x20",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Account for large page sizes only. Will result in a DTLB write from STLB.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x08",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_LGPG",
- "SampleAfterValue": "200003",
- "UMask": "0x40",
- "Unit": "cpu_atom"
- },
- {
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
"Counter": "0,1,2,3,4,5,6,7,8,9",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 56d5fc419acf..3d0c57198056 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -1,40 +1,41 @@
Family-model,Version,Filename,EventType
-GenuineIntel-6-(97|9A|B7|BA|BF),v1.28,alderlake,core
-GenuineIntel-6-BE,v1.28,alderlaken,core
-GenuineIntel-6-C[56],v1.07,arrowlake,core
+GenuineIntel-6-(97|9A|B7|BA|BF),v1.35,alderlake,core
+GenuineIntel-6-BE,v1.35,alderlaken,core
+GenuineIntel-6-C[56],v1.14,arrowlake,core
GenuineIntel-6-(1C|26|27|35|36),v5,bonnell,core
GenuineIntel-6-(3D|47),v30,broadwell,core
GenuineIntel-6-56,v12,broadwellde,core
GenuineIntel-6-4F,v23,broadwellx,core
-GenuineIntel-6-55-[56789ABCDEF],v1.23,cascadelakex,core
+GenuineIntel-6-55-[56789ABCDEF],v1.25,cascadelakex,core
GenuineIntel-6-DD,v1.00,clearwaterforest,core
GenuineIntel-6-9[6C],v1.05,elkhartlake,core
-GenuineIntel-6-CF,v1.11,emeraldrapids,core
+GenuineIntel-6-CF,v1.20,emeraldrapids,core
GenuineIntel-6-5[CF],v13,goldmont,core
GenuineIntel-6-7A,v1.01,goldmontplus,core
-GenuineIntel-6-B6,v1.05,grandridge,core
-GenuineIntel-6-A[DE],v1.06,graniterapids,core
+GenuineIntel-6-B6,v1.10,grandridge,core
+GenuineIntel-6-A[DE],v1.16,graniterapids,core
GenuineIntel-6-(3C|45|46),v36,haswell,core
GenuineIntel-6-3F,v29,haswellx,core
GenuineIntel-6-7[DE],v1.24,icelake,core
-GenuineIntel-6-6[AC],v1.27,icelakex,core
+GenuineIntel-6-6[AC],v1.30,icelakex,core
GenuineIntel-6-3A,v24,ivybridge,core
GenuineIntel-6-3E,v24,ivytown,core
GenuineIntel-6-2D,v24,jaketown,core
GenuineIntel-6-(57|85),v16,knightslanding,core
-GenuineIntel-6-BD,v1.11,lunarlake,core
-GenuineIntel-6-(AA|AC|B5),v1.12,meteorlake,core
+GenuineIntel-6-BD,v1.19,lunarlake,core
+GenuineIntel-6-(AA|AC|B5),v1.18,meteorlake,core
GenuineIntel-6-1[AEF],v4,nehalemep,core
GenuineIntel-6-2E,v4,nehalemex,core
+GenuineIntel-6-CC,v1.02,pantherlake,core
GenuineIntel-6-A7,v1.04,rocketlake,core
GenuineIntel-6-2A,v19,sandybridge,core
-GenuineIntel-6-8F,v1.25,sapphirerapids,core
-GenuineIntel-6-AF,v1.08,sierraforest,core
+GenuineIntel-6-8F,v1.35,sapphirerapids,core
+GenuineIntel-6-AF,v1.13,sierraforest,core
GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v59,skylake,core
-GenuineIntel-6-55-[01234],v1.36,skylakex,core
+GenuineIntel-6-55-[01234],v1.37,skylakex,core
GenuineIntel-6-86,v1.23,snowridgex,core
-GenuineIntel-6-8[CD],v1.17,tigerlake,core
+GenuineIntel-6-8[CD],v1.18,tigerlake,core
GenuineIntel-6-2C,v5,westmereep-dp,core
GenuineIntel-6-25,v4,westmereep-sp,core
GenuineIntel-6-2F,v4,westmereex,core
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
index ce351cd7caaf..d3fc04b2ffbd 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
@@ -1,5 +1,15 @@
[
{
+ "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x51",
+ "EventName": "DL1.DIRTY_EVICTION",
+ "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "L1D.HWPF_MISS",
"Counter": "0,1,2,3",
"EventCode": "0x51",
@@ -82,6 +92,56 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.F",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Forward state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.M",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Modified state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Shared state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache lines that are evicted due to an L2 cache fill",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of L2 cache lines that are evicted due to an L2 cache fill. Increments on the core that brought the line in originally.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
"Counter": "0,1,2,3",
"EventCode": "0x26",
@@ -92,6 +152,16 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of L2 cache lines that are silently dropped due to an L2 cache fill",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of L2 cache lines that are silently dropped due to an L2 cache fill. Increments on the core that brought the line in originally.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache.",
"Counter": "0,1,2,3",
"EventCode": "0x26",
@@ -122,6 +192,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of L2 Cache Accesses that resulted in a Hit from a front door request only (does not include rejects or recycles), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "All requests that hit L2 cache. [This event is alias to L2_RQSTS.HIT]",
"Counter": "0,1,2,3",
"EventCode": "0x24",
@@ -132,6 +211,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of total L2 Cache Accesses that resulted in a Miss from a front door request only (does not include rejects or recycles), per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]",
"Counter": "0,1,2,3",
"EventCode": "0x24",
@@ -142,6 +230,15 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of L2 Cache Accesses that miss the L2 and get BBL reject short and long rejects, per core event",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.REJECTS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "L2 code requests",
"Counter": "0,1,2,3",
"EventCode": "0x24",
@@ -399,6 +496,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which missed in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x35",
+ "EventName": "MEM_BOUND_STALLS_IFETCH.L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an ICACHE or ITLB miss which hit in the LLC. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x35",
@@ -436,6 +542,15 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which missed in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -454,12 +569,21 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled to a store buffer full condition",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.SBFULL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Retired load instructions.",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x81",
"Unit": "cpu_core"
@@ -470,7 +594,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
- "PublicDescription": "Counts all retired store instructions.",
+ "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x82",
"Unit": "cpu_core"
@@ -481,7 +605,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
- "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x83",
"Unit": "cpu_core"
@@ -492,7 +616,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "PublicDescription": "Counts retired load instructions with locked access.",
+ "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x21",
"Unit": "cpu_core"
@@ -503,7 +627,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x41",
"Unit": "cpu_core"
@@ -514,7 +638,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x42",
"Unit": "cpu_core"
@@ -525,7 +649,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS",
- "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x9",
"Unit": "cpu_core"
@@ -536,7 +660,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_HIT_STORES",
- "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0xa",
"Unit": "cpu_core"
@@ -547,7 +671,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x11",
"Unit": "cpu_core"
@@ -558,7 +682,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x12",
"Unit": "cpu_core"
@@ -579,7 +703,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -590,7 +714,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -601,7 +725,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -612,7 +736,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -623,7 +747,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -634,7 +758,7 @@
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -645,7 +769,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -656,7 +780,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -667,7 +791,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -678,7 +802,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -689,7 +813,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -700,7 +824,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x4",
"Unit": "cpu_core"
@@ -711,7 +835,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "50021",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -1055,12 +1179,49 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts writebacks of modified cachelines that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0008",
+ "PublicDescription": "Counts writebacks of modified cachelines that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts writebacks of non-modified cachelines that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_NONM.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C1000",
+ "PublicDescription": "Counts writebacks of non-modified cachelines that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1072,6 +1233,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1083,6 +1245,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1094,17 +1257,43 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1116,6 +1305,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1127,6 +1317,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1138,6 +1329,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1149,6 +1341,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1160,6 +1353,31 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1171,6 +1389,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1182,6 +1401,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -1193,6 +1413,19 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C4477",
+ "PublicDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
index a10614513c8d..6484834b1127 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
@@ -49,13 +49,21 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of instructions retired that were tagged with having preceded with frontend bound behavior",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ALL",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Retired ANT branches",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.ANY_ANT",
"MSRIndex": "0x3F7",
"MSRValue": "0x9",
- "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted)",
+ "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted) Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -67,24 +75,70 @@
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a baclear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.BRANCH_DETECT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles /empty issue slots due to a btclear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.BRANCH_RESTEER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.CISC",
+ "PublicDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged every cycle the decoder is unable to send 3 uops per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x11",
- "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to icache miss",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
@@ -100,7 +154,7 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x14",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -112,7 +166,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -124,7 +178,7 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x13",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -136,7 +190,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x600106",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -148,7 +202,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"MSRValue": "0x608006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -160,7 +214,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"MSRValue": "0x601006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -172,7 +226,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x600206",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -184,7 +238,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"MSRValue": "0x610006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -196,7 +250,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x100206",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -208,7 +262,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"MSRValue": "0x602006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -220,7 +274,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x600406",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -232,7 +286,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"MSRValue": "0x620006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -244,7 +298,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x604006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -256,7 +310,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
"MSRValue": "0x600806",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -268,7 +322,7 @@
"EventName": "FRONTEND_RETIRED.MISP_ANT",
"MSRIndex": "0x3F7",
"MSRValue": "0x9",
- "PublicDescription": "ANT retired branches that got just mispredicted",
+ "PublicDescription": "ANT retired branches that got just mispredicted Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -280,18 +334,37 @@
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
"MSRValue": "0x8",
+ "PublicDescription": "FRONTEND_RETIRED.MS_FLOWS Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts the number of instruction retired tagged after a wasted issue slot if none of the previous events occurred",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a predecode wrong.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x15",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
@@ -303,6 +376,7 @@
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
"MSRValue": "0x17",
+ "PublicDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x3",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
index e4481fbc1e13..f0cbeda4d5ca 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
@@ -143,7 +143,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
"MSRIndex": "0x3F6",
"MSRValue": "0x400",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "53",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -156,7 +156,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "1009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -169,7 +169,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -182,7 +182,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048",
"MSRIndex": "0x3F6",
"MSRValue": "0x800",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "23",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -195,7 +195,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "503",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -208,7 +208,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -221,7 +221,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -234,7 +234,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "101",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -247,7 +247,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "2003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -260,7 +260,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0",
"SampleAfterValue": "50021",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -271,7 +271,7 @@
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
- "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -295,23 +295,61 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -323,17 +361,31 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -345,6 +397,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json b/tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json
index 20c52630127e..948c16a1f95b 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json
@@ -1,56 +1,56 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
@@ -75,7 +75,7 @@
"MetricExpr": "tma_core_bound",
"MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_allocation_restriction",
- "MetricThreshold": "(tma_allocation_restriction >0.10) & ((tma_core_bound >0.10) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -85,7 +85,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
- "MetricThreshold": "(tma_backend_bound >0.10)",
+ "MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%",
@@ -97,7 +97,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
- "MetricThreshold": "(tma_bad_speculation >0.15)",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"ScaleUnit": "100%",
@@ -108,7 +108,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "MetricThreshold": "(tma_branch_detect >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -118,7 +118,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
- "MetricThreshold": "(tma_branch_mispredicts >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -128,7 +128,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
- "MetricThreshold": "(tma_branch_resteer >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -137,7 +137,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "(tma_cisc >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -146,7 +146,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
- "MetricThreshold": "(tma_core_bound >0.10) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -156,7 +156,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
- "MetricThreshold": "(tma_decode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -165,7 +165,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
- "MetricThreshold": "(tma_fast_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -175,7 +175,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
- "MetricThreshold": "(tma_frontend_bound >0.20)",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -185,7 +185,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "(tma_icache_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -194,7 +194,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_bandwidth",
- "MetricThreshold": "(tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -204,7 +204,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_latency",
- "MetricThreshold": "(tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20))",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -541,7 +541,7 @@
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_atom@",
"MetricName": "tma_info_system_cpu_utilization",
"Unit": "cpu_atom"
},
@@ -564,7 +564,7 @@
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
"MetricName": "tma_info_system_mux",
- "MetricThreshold": "((tma_info_system_mux > 1.1)|(tma_info_system_mux < 0.9))",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9",
"Unit": "cpu_atom"
},
{
@@ -603,7 +603,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB_MISS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "(tma_itlb_misses >0.05) & ((tma_ifetch_latency >0.15) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -612,7 +612,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
- "MetricThreshold": "(tma_machine_clears >0.05) & ((tma_bad_speculation >0.15))",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -622,7 +622,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
- "MetricThreshold": "(tma_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -631,7 +631,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
- "MetricThreshold": "(tma_non_mem_scheduler >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -640,7 +640,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
- "MetricThreshold": "(tma_nuke >0.05) & ((tma_machine_clears >0.05) & ((tma_bad_speculation >0.15)))",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -649,7 +649,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
- "MetricThreshold": "(tma_other_fb >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -658,7 +658,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
- "MetricThreshold": "(tma_predecode >0.05) & ((tma_ifetch_bandwidth >0.10) & ((tma_frontend_bound >0.20)))",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -667,7 +667,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
- "MetricThreshold": "(tma_register >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -676,7 +676,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
- "MetricThreshold": "(tma_reorder_buffer >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -685,7 +685,7 @@
"MetricExpr": "tma_backend_bound - tma_core_bound",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
- "MetricThreshold": "(tma_resource_bound >0.20) & ((tma_backend_bound >0.10))",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -696,7 +696,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
- "MetricThreshold": "(tma_retiring >0.75)",
+ "MetricThreshold": "tma_retiring > 0.75",
"MetricgroupNoGroup": "TopdownL1;Default",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
@@ -706,7 +706,7 @@
"MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
- "MetricThreshold": "(tma_serialization >0.10) & ((tma_resource_bound >0.20) & ((tma_backend_bound >0.10)))",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%",
"Unit": "cpu_atom"
},
@@ -718,7 +718,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_0@ + cpu_core@UOPS_DISPATCHED.PORT_1@ + cpu_core@UOPS_DISPATCHED.PORT_5_11@ + cpu_core@UOPS_DISPATCHED.PORT_6@) / (5 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -731,13 +731,13 @@
"MetricExpr": "78 * cpu_core@ASSISTS.ANY@ / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists",
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
"MetricExpr": "63 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_slots",
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_avx_assists",
@@ -748,7 +748,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-be\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
@@ -765,13 +765,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20",
@@ -787,35 +787,35 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: ",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
- "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy",
- "Unit": "cpu_core"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms))) - tma_bottleneck_big_code",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20",
@@ -823,7 +823,7 @@
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY_RESOURCE@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -832,7 +832,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -841,16 +841,16 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
- "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"Unit": "cpu_core"
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -859,15 +859,15 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls",
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
"MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -876,7 +876,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
- "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-br\\-mispredict@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
@@ -890,26 +890,26 @@
"MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C01@ / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c01_wait",
- "MetricThreshold": "tma_c01_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C02@ / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c02_wait",
- "MetricThreshold": "tma_c02_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -918,7 +918,7 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources. Sample with: FRONTEND_RETIRED.MS_FLOWS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -928,90 +928,90 @@
"MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
- "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.L1I_MISS@ * cpu_core@frontend_retired.l1i_miss@R / tma_info_thread_clks - tma_code_l2_miss)",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
+ "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.L1I_MISS@ * cpu_core@FRONTEND_RETIRED.L1I_MISS@R / tma_info_thread_clks - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.L2_MISS@ * cpu_core@frontend_retired.l2_miss@R / tma_info_thread_clks",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.L2_MISS@ * cpu_core@FRONTEND_RETIRED.L2_MISS@R / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) ITLB was missed by instructions fetches, that later on hit in second-level TLB (STLB)",
- "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.ITLB_MISS@ * cpu_core@frontend_retired.itlb_miss@R / tma_info_thread_clks - tma_code_stlb_miss)",
+ "MetricExpr": "max(0, cpu_core@FRONTEND_RETIRED.ITLB_MISS@ * cpu_core@FRONTEND_RETIRED.ITLB_MISS@R / tma_info_thread_clks - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by instruction fetches, performing a hardware page walk",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.STLB_MISS@ * cpu_core@frontend_retired.stlb_miss@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.STLB_MISS@ * cpu_core@FRONTEND_RETIRED.STLB_MISS@R / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks * cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "cpu_core@ITLB_MISSES.WALK_ACTIVE@ / tma_info_thread_clks * cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ / (cpu_core@ITLB_MISSES.WALK_COMPLETED_4K@ + cpu_core@ITLB_MISSES.WALK_COMPLETED_2M_4M@)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@ * cpu_core@br_misp_retired.cond_ntaken_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by non-taken conditional branches.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@ * cpu_core@BR_MISP_RETIRED.COND_NTAKEN_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_nt_mispredicts",
- "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_nt_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by taken conditional branches",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_COST@ * cpu_core@br_misp_retired.cond_taken_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to misprediction by taken conditional branches.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.COND_TAKEN_COST@ * cpu_core@BR_MISP_RETIRED.COND_TAKEN_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_cond_tk_mispredicts",
- "MetricThreshold": "tma_cond_tk_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_cond_tk_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricExpr": "((min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * cpu_core@mem_load_l3_hit_retired.xsnp_miss@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_miss@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) + (min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * cpu_core@mem_load_l3_hit_retired.xsnp_fwd@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_fwd@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (28 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@R, 25 * tma_info_system_core_frequency) * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1022,26 +1022,27 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricExpr": "((min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * cpu_core@mem_load_l3_hit_retired.xsnp_no_fwd@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_no_fwd@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) + (min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * cpu_core@mem_load_l3_hit_retired.xsnp_fwd@R, cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_l3_hit_retired.xsnp_fwd@R else cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (27 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@R, 24 * tma_info_system_core_frequency) * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu_core@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu_core@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu_core@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu_core@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1051,7 +1052,7 @@
"MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIV_ACTIVE",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1061,7 +1062,7 @@
"MetricExpr": "cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -1072,7 +1073,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1081,28 +1082,28 @@
"MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * cpu_core@mem_inst_retired.stlb_hit_loads@R, cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * 7) if 0 < cpu_core@mem_inst_retired.stlb_hit_loads@R else cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * 7) / tma_info_thread_clks + tma_load_stlb_miss",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@R, 7) / tma_info_thread_clks + tma_load_stlb_miss",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * cpu_core@mem_inst_retired.stlb_hit_stores@R, cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * 7) if 0 < cpu_core@mem_inst_retired.stlb_hit_stores@R else cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * 7) / tma_info_thread_clks + tma_store_stlb_miss",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@R, 7) / tma_info_thread_clks + tma_store_stlb_miss",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1111,8 +1112,8 @@
"MetricExpr": "28 * tma_info_system_core_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1122,7 +1123,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1133,18 +1134,18 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
- "MetricExpr": "topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
+ "MetricExpr": "cpu_core@topdown\\-fetch\\-lat@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
"MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1164,7 +1165,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1174,16 +1175,16 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "cpu_core@ARITH.FPDIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1192,8 +1193,8 @@
"MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1202,8 +1203,8 @@
"MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1212,8 +1213,8 @@
"MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1222,41 +1223,41 @@
"MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
+ "MetricExpr": "cpu_core@topdown\\-fe\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
"MetricGroup": "BvFB;BvIO;Default;PGO;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.MACRO_FUSED@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
- "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "cpu_core@topdown\\-heavy\\-ops@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1265,26 +1266,26 @@
"MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@br_misp_retired.indirect_call_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect CALL instructions.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ind_call_mispredicts",
- "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ind_call_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions",
- "MetricExpr": "max((cpu_core@BR_MISP_RETIRED.INDIRECT_COST@ * cpu_core@br_misp_retired.indirect_cost@R - cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@br_misp_retired.indirect_call_cost@R) / tma_info_thread_clks, 0)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by indirect JMP instructions.",
+ "MetricExpr": "max((cpu_core@BR_MISP_RETIRED.INDIRECT_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_COST@R - cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@ * cpu_core@BR_MISP_RETIRED.INDIRECT_CALL_COST@R) / tma_info_thread_clks, 0)",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ind_jump_mispredicts",
- "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ind_jump_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -1297,7 +1298,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_NTAKEN@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
@@ -1305,7 +1306,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
@@ -1313,15 +1314,15 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.INDIRECT@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.RET@",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -1353,7 +1354,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_lsd + tma_ms)))",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -1362,7 +1363,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -1371,10 +1372,11 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
"MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: ",
"Unit": "cpu_core"
},
{
@@ -1445,12 +1447,12 @@
"MetricExpr": "(cpu_core@FP_ARITH_DISPATCHED.PORT_0@ + cpu_core@FP_ARITH_DISPATCHED.PORT_1@ + cpu_core@FP_ARITH_DISPATCHED.PORT_5@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp",
"Unit": "cpu_core"
@@ -1465,15 +1467,15 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired DSB misses",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@ * cpu_core@frontend_retired.any_dsb_miss@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@ * cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@R / tma_info_thread_clks",
"MetricGroup": "DSBmiss;Fed;FetchLat",
"MetricName": "tma_info_frontend_dsb_switches_ret",
"MetricThreshold": "tma_info_frontend_dsb_switches_ret > 0.05",
@@ -1481,7 +1483,7 @@
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc",
"Unit": "cpu_core"
@@ -1531,7 +1533,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired operations that invoke the Microcode Sequencer",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.MS_FLOWS@ * cpu_core@frontend_retired.ms_flows@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.MS_FLOWS@ * cpu_core@FRONTEND_RETIRED.MS_FLOWS@R / tma_info_thread_clks",
"MetricGroup": "Fed;FetchLat;MicroSeq",
"MetricName": "tma_info_frontend_ms_latency_ret",
"MetricThreshold": "tma_info_frontend_ms_latency_ret > 0.05",
@@ -1546,21 +1548,21 @@
},
{
"BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
- "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed",
"MetricName": "tma_info_frontend_unknown_branch_cost",
- "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node",
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to retired branches who got branch address clears",
- "MetricExpr": "cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@ * cpu_core@frontend_retired.unknown_branch@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@ * cpu_core@FRONTEND_RETIRED.UNKNOWN_BRANCH@R / tma_info_thread_clks",
"MetricGroup": "Fed;FetchLat",
"MetricName": "tma_info_frontend_unknown_branches_ret",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch",
@@ -1580,7 +1582,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.",
"Unit": "cpu_core"
},
{
@@ -1589,7 +1591,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1598,7 +1600,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1607,7 +1609,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1616,7 +1618,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
"Unit": "cpu_core"
},
{
@@ -1679,7 +1681,7 @@
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 6 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 13",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp",
"Unit": "cpu_core"
},
@@ -1825,7 +1827,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp",
"Unit": "cpu_core"
@@ -1883,7 +1885,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to STLB misses by demand loads",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@ * cpu_core@mem_inst_retired.stlb_miss_loads@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@ * cpu_core@MEM_INST_RETIRED.STLB_MISS_LOADS@R / tma_info_thread_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_load_stlb_miss_ret",
"MetricThreshold": "tma_info_memory_tlb_load_stlb_miss_ret > 0.05",
@@ -1906,7 +1908,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU retirement was stalled likely due to STLB misses by demand stores",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@ * cpu_core@mem_inst_retired.stlb_miss_stores@R / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@ * cpu_core@MEM_INST_RETIRED.STLB_MISS_STORES@R / tma_info_thread_clks",
"MetricGroup": "Mem;MemoryTLB",
"MetricName": "tma_info_memory_tlb_store_stlb_miss_ret",
"MetricThreshold": "tma_info_memory_tlb_store_stlb_miss_ret > 0.05",
@@ -1920,8 +1922,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "",
- "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute",
"Unit": "cpu_core"
@@ -1952,20 +1954,20 @@
"MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY@",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire",
"Unit": "cpu_core"
},
{
"BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
- "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "MicroSeq;Pipeline;Ret",
"MetricName": "tma_info_pipeline_strings_cycles",
"MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1",
@@ -1981,7 +1983,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc\\,cpu=cpu_core@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency",
"Unit": "cpu_core"
@@ -1995,7 +1997,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / msr@tsc\\,cpu=cpu_core@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized",
"Unit": "cpu_core"
@@ -2005,7 +2007,7 @@
"MetricExpr": "64 * (UNC_HAC_ARB_TRK_REQUESTS.ALL + UNC_HAC_ARB_COH_TRK_REQUESTS.ALL) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full",
"Unit": "cpu_core"
},
{
@@ -2018,23 +2020,22 @@
},
{
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
- "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000",
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6",
"Unit": "cpu_core"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@INST_RETIRED.ANY_P@k",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "OS",
"MetricName": "tma_info_system_kernel_utilization",
"MetricThreshold": "tma_info_system_kernel_utilization > 0.05",
@@ -2042,7 +2043,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@thresh\\=0x1@",
+ "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
@@ -2093,7 +2094,14 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Measured Average Uncore Frequency for the SoC [GHz]",
+ "MetricExpr": "tma_info_system_socket_clks / 1e9 / tma_info_system_time",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_uncore_frequency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD@",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks",
@@ -2104,7 +2112,6 @@
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
"MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr",
"Unit": "cpu_core"
},
{
@@ -2112,7 +2119,7 @@
"MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_ISSUED.ANY@",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
"Unit": "cpu_core"
},
{
@@ -2124,14 +2131,14 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "cpu_core@TOPDOWN.SLOTS@",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (cpu_core@TOPDOWN.SLOTS@ / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization",
"Unit": "cpu_core"
@@ -2149,15 +2156,15 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 6 * 1.5",
+ "MetricThreshold": "tma_info_thread_uptb < 9",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2167,7 +2174,7 @@
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_int_operations",
"MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2176,8 +2183,8 @@
"MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_128@ + cpu_core@INT_VEC_RETIRED.VNNI_128@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_128b",
- "MetricThreshold": "tma_int_vector_128b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2186,8 +2193,8 @@
"MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_256b",
- "MetricThreshold": "tma_int_vector_256b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2196,8 +2203,8 @@
"MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2206,18 +2213,18 @@
"MetricExpr": "max((cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (cpu_core@MEM_INST_RETIRED.ALL_LOADS@ - cpu_core@MEM_LOAD_RETIRED.FB_HIT@ - cpu_core@MEM_LOAD_RETIRED.L1_MISS@) * 20 / 100, max(cpu_core@CYCLE_ACTIVITY.CYCLES_MEM_ANY@ - cpu_core@MEMORY_ACTIVITY.CYCLES_L1D_MISS@, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2226,17 +2233,17 @@
"MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited)",
- "MetricExpr": "(min(cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * cpu_core@mem_load_retired.l2_hit@R, cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * (3 * tma_info_system_core_frequency)) if 0 < cpu_core@mem_load_retired.l2_hit@R else cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * (3 * tma_info_system_core_frequency)) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L2_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L2_HIT@R, 3 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2246,18 +2253,18 @@
"MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(min(cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * cpu_core@mem_load_retired.l3_hit@R, cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (12 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) if 0 < cpu_core@mem_load_retired.l3_hit@R else cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * (12 * tma_info_system_core_frequency) - 3 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L3_HIT@R, 9 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2266,19 +2273,19 @@
"MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2297,7 +2304,7 @@
"MetricExpr": "max(0, tma_dtlb_load - tma_load_stlb_miss)",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2306,43 +2313,44 @@
"MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
- "MetricExpr": "cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ * cpu_core@mem_inst_retired.lock_loads@R / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ * cpu_core@MEM_INST_RETIRED.LOCK_LOADS@R / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2353,7 +2361,7 @@
"MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_lsd",
"MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2364,17 +2372,17 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2383,34 +2391,33 @@
"MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
- "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-mem\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
"MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
- "MetricThreshold": "tma_memory_fence > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -2433,7 +2440,7 @@
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2449,18 +2456,18 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "160 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_clks",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "max(cpu_core@IDQ.MS_CYCLES_ANY@, cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@)) / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "max(cpu_core@IDQ.MS_CYCLES_ANY@, cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@)) / tma_info_core_core_clks / 2.4",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -2469,10 +2476,10 @@
},
{
"BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
- "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@) / tma_info_thread_clks",
+ "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@) / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2483,7 +2490,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2492,13 +2499,14 @@
"MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.NOP@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -2508,20 +2516,20 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / (cpu_core@INT_MISC.CLEARS_COUNT@ - cpu_core@MACHINE_CLEARS.COUNT@)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - cpu_core@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_core@MACHINE_CLEARS.COUNT@), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2531,7 +2539,7 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_page_faults",
"MetricThreshold": "tma_page_faults > 0.05",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2541,7 +2549,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2551,7 +2559,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2561,75 +2569,76 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@)) / tma_info_thread_clks if cpu_core@ARITH.DIV_ACTIVE@ < cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ else (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_3_PORTS_UTIL@) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "max(cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ - cpu_core@RESOURCE_STALLS.SCOREBOARD@, 0) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions",
- "MetricExpr": "cpu_core@BR_MISP_RETIRED.RET_COST@ * cpu_core@br_misp_retired.ret_cost@R / tma_info_thread_clks",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to retired misprediction by (indirect) RET instructions.",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.RET_COST@ * cpu_core@BR_MISP_RETIRED.RET_COST@R / tma_info_thread_clks",
"MetricGroup": "BrMispredicts;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_ret_mispredicts",
- "MetricThreshold": "tma_ret_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_ret_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "cpu_core@topdown\\-retiring@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -2643,7 +2652,7 @@
"MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks + tma_c02_wait",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2653,39 +2662,38 @@
"MetricExpr": "tma_light_operations * cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_shuffles_256b",
- "MetricThreshold": "tma_shuffles_256b > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * cpu_core@mem_inst_retired.split_loads@R, cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * tma_info_memory_load_miss_real_latency) if 0 < cpu_core@mem_inst_retired.split_loads@R else cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@R, tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricExpr": "(min(cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ * cpu_core@mem_inst_retired.split_stores@R, cpu_core@MEM_INST_RETIRED.SPLIT_STORES@) if 0 < cpu_core@mem_inst_retired.split_stores@R else cpu_core@MEM_INST_RETIRED.SPLIT_STORES@) / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_STORES@R, 1) / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2694,8 +2702,8 @@
"MetricExpr": "(cpu_core@XQ.FULL_CYCLES@ + cpu_core@L1D_PEND_MISS.L2_STALLS@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2704,8 +2712,8 @@
"MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2714,8 +2722,8 @@
"MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2724,8 +2732,8 @@
"MetricExpr": "(cpu_core@MEM_STORE_RETIRED.L2_HIT@ * 10 * (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) + (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) * min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2744,7 +2752,7 @@
"MetricExpr": "max(0, tma_dtlb_store - tma_store_stlb_miss)",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2753,34 +2761,34 @@
"MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ / (cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_4K@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M@ + cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED_1G@)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%",
"Unit": "cpu_core"
},
@@ -2789,7 +2797,7 @@
"MetricExpr": "9 * cpu_core@OCR.STREAMING_WR.ANY_RESPONSE@ / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2799,7 +2807,7 @@
"MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
"ScaleUnit": "100%",
"Unit": "cpu_core"
@@ -2809,8 +2817,8 @@
"MetricExpr": "tma_retiring * cpu_core@UOPS_EXECUTED.X87@ / cpu_core@UOPS_EXECUTED.THREAD@",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%",
"Unit": "cpu_core"
}
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/other.json b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
index 46a21776a4e9..8320ffd83c51 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
@@ -29,111 +29,13 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
- "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1",
- "Unit": "cpu_atom"
- },
- {
"BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800000010000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -145,6 +47,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400000010000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -156,6 +59,7 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -167,52 +71,12 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
- "SampleAfterValue": "1000003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
- "Counter": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_COUNT",
- "Invert": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "SampleAfterValue": "100003",
- "UMask": "0x7",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_RESOURCE",
- "SampleAfterValue": "1000003",
- "UMask": "0x1",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
- "SampleAfterValue": "200003",
- "UMask": "0x4",
- "Unit": "cpu_atom"
- },
- {
"BriefDescription": "Cycles the uncore cannot take further requests",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
index 265f6c5a0248..bfdaabe9377d 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
@@ -43,6 +43,7 @@
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "MTL012, MTL013",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
@@ -54,13 +55,14 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all branch instructions retired.",
+ "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "MTL013",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"SampleAfterValue": "200003",
@@ -72,7 +74,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PublicDescription": "Counts conditional branch instructions retired.",
+ "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11",
"Unit": "cpu_core"
@@ -82,7 +84,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts not taken branch instructions retired.",
+ "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -101,7 +103,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -109,6 +111,7 @@
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "MTL013",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "200003",
@@ -120,7 +123,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PublicDescription": "Counts far branch instructions retired.",
+ "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -128,6 +131,7 @@
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "MTL013",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"SampleAfterValue": "200003",
@@ -139,7 +143,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80",
"Unit": "cpu_core"
@@ -147,6 +151,7 @@
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "MTL013",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT_CALL",
"SampleAfterValue": "200003",
@@ -166,6 +171,7 @@
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
"Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
+ "Errata": "MTL013",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
@@ -175,6 +181,7 @@
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "MTL012, MTL013",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "200003",
@@ -186,7 +193,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -205,7 +212,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts return instructions retired.",
+ "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -213,6 +220,7 @@
{
"BriefDescription": "Counts the number of near taken branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "MTL013",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "200003",
@@ -224,7 +232,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts taken branch instructions retired.",
+ "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -261,7 +269,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"Unit": "cpu_core"
},
@@ -270,6 +278,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST",
+ "PublicDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x44",
"Unit": "cpu_core"
@@ -288,7 +297,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11",
"Unit": "cpu_core"
@@ -298,6 +307,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_COST",
+ "PublicDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x51",
"Unit": "cpu_core"
@@ -307,7 +317,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10",
"Unit": "cpu_core"
@@ -317,6 +327,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST",
+ "PublicDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x50",
"Unit": "cpu_core"
@@ -335,7 +346,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -345,6 +356,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN_COST",
+ "PublicDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x41",
"Unit": "cpu_core"
@@ -363,7 +375,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80",
"Unit": "cpu_core"
@@ -382,7 +394,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x2",
"Unit": "cpu_core"
@@ -392,6 +404,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST",
+ "PublicDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x42",
"Unit": "cpu_core"
@@ -401,6 +414,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_COST",
+ "PublicDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0xc0",
"Unit": "cpu_core"
@@ -428,7 +442,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20",
"Unit": "cpu_core"
@@ -438,6 +452,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST",
+ "PublicDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x60",
"Unit": "cpu_core"
@@ -447,7 +462,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8",
"Unit": "cpu_core"
@@ -466,6 +481,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET_COST",
+ "PublicDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x48",
"Unit": "cpu_core"
@@ -791,6 +807,7 @@
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_atom"
@@ -799,7 +816,7 @@
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -844,7 +861,7 @@
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
- "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1048,7 +1065,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1",
"Unit": "cpu_core"
@@ -1133,8 +1150,9 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "BriefDescription": "This event is deprecated.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SLOW",
"SampleAfterValue": "20003",
@@ -1209,6 +1227,47 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json
index 783a4f7fd05b..ceb8839f0767 100644
--- a/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/uncore-memory.json
@@ -100,6 +100,24 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Any Rank at Hot state",
+ "Counter": "0,1,2,3,4",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_DRAM_THERMAL_HOT",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Any Rank at Warm state",
+ "Counter": "0,1,2,3,4",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M_DRAM_THERMAL_WARM",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "PRE command sent to DRAM due to page table idle timer expiration",
"Counter": "0,1,2,3,4",
"EventCode": "0x28",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
index b90026df2ce7..c9d154f1d09a 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
@@ -240,6 +240,38 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3"
+ },
+ {
"BriefDescription": "All L2 data requests",
"Counter": "0,1,2,3",
"EventCode": "0x26",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/other.json b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
index f6887b234b0e..5fe5ca778e9f 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/other.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
@@ -16,46 +16,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "L1I instruction fetch stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.CYCLES_STALLED",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "L1I instruction fetch hits",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.HITS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "L1I instruction fetch misses",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.MISSES",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "L1I Instruction fetches",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.READS",
- "SampleAfterValue": "2000000",
- "UMask": "0x3"
- },
- {
- "BriefDescription": "Large ITLB hit",
- "Counter": "0,1,2,3",
- "EventCode": "0x82",
- "EventName": "LARGE_ITLB.HIT",
- "SampleAfterValue": "200000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "All loads dispatched",
"Counter": "0,1,2,3",
"EventCode": "0x13",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
index e88c0802e679..accd263cfbfd 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
@@ -89,6 +89,14 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
"Counter": "0,1,2,3",
"EventCode": "0xCB",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
index 2c0ea6f8c4e0..b6c6b22a3188 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
@@ -240,6 +240,38 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3"
+ },
+ {
"BriefDescription": "All L2 data requests",
"Counter": "0,1,2,3",
"EventCode": "0x26",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/other.json b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
index f6887b234b0e..5fe5ca778e9f 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/other.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
@@ -16,46 +16,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "L1I instruction fetch stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.CYCLES_STALLED",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "L1I instruction fetch hits",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.HITS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "L1I instruction fetch misses",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.MISSES",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "L1I Instruction fetches",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.READS",
- "SampleAfterValue": "2000000",
- "UMask": "0x3"
- },
- {
- "BriefDescription": "Large ITLB hit",
- "Counter": "0,1,2,3",
- "EventCode": "0x82",
- "EventName": "LARGE_ITLB.HIT",
- "SampleAfterValue": "200000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "All loads dispatched",
"Counter": "0,1,2,3",
"EventCode": "0x13",
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
index e88c0802e679..accd263cfbfd 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
@@ -89,6 +89,14 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
"Counter": "0,1,2,3",
"EventCode": "0xCB",
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/cache.json b/tools/perf/pmu-events/arch/x86/pantherlake/cache.json
new file mode 100644
index 000000000000..91f5ab908926
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/cache.json
@@ -0,0 +1,1413 @@
+[
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L0 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x51",
+ "EventName": "L1D.L0_REPLACEMENT",
+ "PublicDescription": "Counts L0 data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cachelines replaced into the L1 d-cache. Successful replacements only (not blocked) and exclude WB-miss case",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x51",
+ "EventName": "L1D.L1_REPLACEMENT",
+ "PublicDescription": "Counts cachelines replaced into the L1 d-cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cachelines replaced into the L0 and L1 d-cache. Successful replacements only (not blocked) and exclude WB-miss case",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts cachelines replaced into the L0 and L1 d-cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x49",
+ "EventName": "L1D_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x49",
+ "EventName": "L1D_MISS.L2_STALLS",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of demand requests that missed L1D cache",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x49",
+ "EventName": "L1D_MISS.LOAD",
+ "PublicDescription": "Count occurrences (rising-edge) of DCACHE_PENDING sub-event0. Impl. sends per-port binary inc-bit the occupancy increases* (at FB alloc or promotion).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "Counter": "2",
+ "EventCode": "0x48",
+ "EventName": "L1D_PENDING.LOAD",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PENDING.LOAD_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "Counts the number of cache lines filled into the L2 cache that are in Exclusive state. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache accesses from front door requests for Code Read, Data Read, RFO, ITOM, and L2 Prefetches. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1ff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES, L2_RQSTS.ANY]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES, L2_RQSTS.ANY]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 cache accesses from front door requests that resulted in a Hit. Does not include rejects or recycles, per core event.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1bf",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_RQSTS.MISS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES, L2_REQUEST.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ANY",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES, L2_REQUEST.ALL]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x44",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2 cache",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache [This event is alias to L2_REQUEST.MISS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL,L2_RQSTS.ANY]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL,L2_RQSTS.ANY]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x42",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x42",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "This event counts the number of cycles when the L1D is locked.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the core has access to an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to an L1 demand load miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.L2_HIT",
+ "PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x38",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the caches. DRAM, MMIO or other LOCAL memory type provides the data.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS_LOCALMEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the caches, a snoop was required, and hits in other core or module on same die. Another core provides the data with a fwd, no fwd, or hitM.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_MISS_OTHERMOD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts all retired load instructions.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PublicDescription": "Counts Instructions with at least one architecturally visible load retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired software prefetch instructions.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_SWPF",
+ "PublicDescription": "Counts all retired software prefetch instructions. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x84",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x87",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_ANY",
+ "PublicDescription": "Number of retired instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_LOADS",
+ "PublicDescription": "Number of retired load instructions with a clean hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_STORES",
+ "PublicDescription": "Number of retired store instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired SWPF instructions that hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_HIT_SWPF",
+ "PublicDescription": "Number of retired SWPF instructions that hit in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_ANY",
+ "PublicDescription": "Number of retired instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x17",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired SWPF instructions that miss the STLB.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_SWPF",
+ "PublicDescription": "Number of retired SWPF instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x14",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were a cross-core Snoop hits and forwards data from an in on-package core cache (induced by NI$)",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PublicDescription": "Counts retired load instructions whose data sources were a cross-core Snoop hits and forwards data from an in on-package core cache (induced by NI$) Available PDIST counters: 0,1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3, Hit-with-FWD is normally excluded.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3, Hit-with-FWD is normally excluded. Available PDIST counters: 0,1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data source is memory side cache.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.MEMSIDE_CACHE",
+ "PublicDescription": "Retired load instructions which data source is memory side cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x101",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts retired load instructions with at least one uop that hit in the Level 0 of the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT_L0",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the Level 0 of the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts retired load instructions with at least one uop that hit in the Level 1 of the L1 data cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT_L1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the Level 1 of the L1 data cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0,1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PublicDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in memside cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.MEMSIDE_CACHE",
+ "PublicDescription": "Counts the number of load ops retired that miss the L3 cache and hit in memside cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and modified data was forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.L3_HIT_SNOOP_HITM",
+ "PublicDescription": "Counts the number of load ops retired that hit in the L3 cache in which a snoop was required and modified data was forwarded. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PublicDescription": "Counts the number of load ops retired that hit the L1 data cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss in the L1 data cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PublicDescription": "Counts the number of load ops retired that miss in the L1 data cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PublicDescription": "Counts the number of load ops retired that hit in the L2 cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PublicDescription": "Counts the number of load ops retired that miss in the L2 cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PublicDescription": "Counts the number of load ops retired that hit in the L3 cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1c",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.RSV",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x44",
+ "EventName": "MEM_STORE_RETIRED.L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of cache-lines required by retired stores whose Data Source is: Memory Side Cache",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x44",
+ "EventName": "MEM_STORE_RETIRED.MEMSIDE_CACHE",
+ "SampleAfterValue": "100021",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST).",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
+ "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "Counts the number of load ops retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of store ops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PublicDescription": "Counts the number of store ops retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PublicDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that performed one or more locks",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PublicDescription": "Counts the number of load uops retired that performed one or more locks Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x21",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "PublicDescription": "Counts the number of memory uops retired that were splits. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x43",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split load uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "Counts the number of retired split load uops. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split store uops.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Counts the number of retired split store uops. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x42",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS",
+ "PublicDescription": "Counts the number of memory uops retired that missed in the second level TLB. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x13",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "PublicDescription": "Counts the number of load uops retired that miss in the second Level TLB. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x11",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PublicDescription": "Counts the number of store uops retired that miss in the second level TLB. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x12",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PublicDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Retired memory uops for any access",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe5",
+ "EventName": "MEM_UOP_RETIRED.ANY",
+ "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xf",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E00008",
+ "PublicDescription": "Counts writebacks of modified cachelines that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts writebacks of non-modified cachelines that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.COREWB_NONM.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E01000",
+ "PublicDescription": "Counts writebacks of non-modified cachelines that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40001E00001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20001E00001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop hit in another cores caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40001E00002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop hit in another cores caches, data forwarding is required as the data is modified. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7E001E04477",
+ "PublicDescription": "Counts all data read, code read, RFO and ITOM requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cacheable and Non-Cacheable code read requests",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and Non-Cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2c",
+ "EventName": "SQ_MISC.BUS_LOCK",
+ "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of PREFETCHNTA, PREFETCHW, PREFETCHT0, PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xf",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to an icache miss",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/counter.json b/tools/perf/pmu-events/arch/x86/pantherlake/counter.json
new file mode 100644
index 000000000000..432b6946ccbc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/counter.json
@@ -0,0 +1,17 @@
+[
+ {
+ "Unit": "cpu_atom",
+ "CountersNumFixed": "7",
+ "CountersNumGeneric": "8"
+ },
+ {
+ "Unit": "cpu_core",
+ "CountersNumFixed": "4",
+ "CountersNumGeneric": "10"
+ },
+ {
+ "Unit": "iMC",
+ "CountersNumFixed": "0",
+ "CountersNumGeneric": "5"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json b/tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json
new file mode 100644
index 000000000000..e306a45b22ee
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/floating-point.json
@@ -0,0 +1,359 @@
+[
+ {
+ "BriefDescription": "Cycles when floating-point divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for floating-point operations only.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.SSE_AVX_MIX",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of FP-arith-uops dispatched on 1st VEC port (port 0). FP-arith-uops are of type ADD* / SUB* / MUL / FMA* / DPP.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of FP-arith-uops dispatched on 2nd VEC port (port 1)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of FP-arith-uops dispatched on 3rd VEC port (port 5)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of FP-arith-uops dispatched on 4th VEC port",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.VECTOR",
+ "PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3c",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FP_ARITH_OPS_RETIRED.VECTOR_128B",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.VECTOR_128B",
+ "SampleAfterValue": "100003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FP_ARITH_OPS_RETIRED.VECTOR_256B",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_OPS_RETIRED.VECTOR_256B",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of all types of floating point operations per uop with all default weighting",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc8",
+ "EventName": "FP_FLOPS_RETIRED.ALL",
+ "PublicDescription": "Counts the number of all types of floating point operations per uop with all default weighting Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point operations that produce 32 bit single precision results",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc8",
+ "EventName": "FP_FLOPS_RETIRED.FP32",
+ "PublicDescription": "Counts the number of floating point operations that produce 32 bit single precision results Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point operations that produce 64 bit double precision results",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc8",
+ "EventName": "FP_FLOPS_RETIRED.FP64",
+ "PublicDescription": "Counts the number of floating point operations that produce 64 bit double precision results Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 128 bit double precision floating point. This may be SSE or AVX.128 operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.128B_DP",
+ "PublicDescription": "Counts the number of retired instructions whose sources are a packed 128 bit double precision floating point. This may be SSE or AVX.128 operations. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 128 bit single precision floating point. This may be SSE or AVX.128 operations.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.128B_SP",
+ "PublicDescription": "Counts the number of retired instructions whose sources are a packed 128 bit single precision floating point. This may be SSE or AVX.128 operations. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 256 bit double precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.256B_DP",
+ "PublicDescription": "Counts the number of retired instructions whose sources are a packed 256 bit double precision floating point. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a packed 256 bit single precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.256B_SP",
+ "PublicDescription": "Counts the number of retired instructions whose sources are a packed 256 bit single precision floating point. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 32bit single precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.32B_SP",
+ "PublicDescription": "Counts the number of retired instructions whose sources are a scalar 32bit single precision floating point. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired instructions whose sources are a scalar 64 bit double precision floating point.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.64B_DP",
+ "PublicDescription": "Counts the number of retired instructions whose sources are a scalar 64 bit double precision floating point. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of floating point retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_INST_RETIRED.ALL",
+ "PublicDescription": "Counts the total number of floating point retired instructions. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on all floating point ports.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 0.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.P0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 1.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.P1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 2.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.P2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.P3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer port 0, 1, 2, 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.PRIMARY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on floating point and vector integer store data port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "FP_VINT_UOPS_EXECUTED.STD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt).",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "PublicDescription": "Counts the number of floating point divide uops retired (x87 and sse, including x87 sqrt). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/frontend.json b/tools/perf/pmu-events/arch/x86/pantherlake/frontend.json
new file mode 100644
index 000000000000..d36faa683d3f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/frontend.json
@@ -0,0 +1,565 @@
+[
+ {
+ "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Clears due to Unknown Branches.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x60",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a conditional jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.COND",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to an indirect branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.INDIRECT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a return branch.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.RETURN",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a direct, unconditional jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.UNCOND",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x87",
+ "EventName": "DECODE.MS_BUSY",
+ "SampleAfterValue": "500009",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x61",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired ANT branches",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_ANT",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x9",
+ "PublicDescription": "Always Not Taken (ANT) conditional retired branches (no BTB entry and not mispredicted) Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "PublicDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x608006",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x601006",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600206",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x610006",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x602006",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600406",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x620006",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x604006",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600806",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted Retired ANT branches",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MISP_ANT",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x9",
+ "PublicDescription": "ANT retired branches that got just mispredicted Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts flows delivered by the Microcode Sequencer",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MS_FLOWS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PublicDescription": "Counts flows delivered by the Microcode Sequencer Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that caused clears due to being Unknown Branches.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x17",
+ "PublicDescription": "Number retired branch instructions that caused the front-end to be resteered when it finds the instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to Instruction L1 cache miss, that hit in the L2 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "FRONTEND_RETIRED_SOURCE.ICACHE_L2_HIT",
+ "PublicDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to Instruction L1 cache miss, that hit in the L2 cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss that hit in the second level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "FRONTEND_RETIRED_SOURCE.ITLB_STLB_HIT",
+ "PublicDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss that hit in the second level TLB. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss that also missed the second level TLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "FRONTEND_RETIRED_SOURCE.ITLB_STLB_MISS",
+ "PublicDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss that also missed the second level TLB. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are present.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ICACHE_DATA.STALL_PERIODS",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALL_PERIODS",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "8",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the DSB (Decode Stream Buffer) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "8",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the number of uops initiated by MITE or Decode Stream Buffer (DSB) and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CORE",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that when no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are delivered by the IDQ for 2 or more cycles when backend of the machine is not stalled - normally indicating a Fetch Latency issue",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.FETCH_LATENCY",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls for 2 or more cycles - normally indicating a Fetch Latency issue.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "8",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.STARVATION_CYCLES",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/memory.json b/tools/perf/pmu-events/arch/x86/pantherlake/memory.json
new file mode 100644
index 000000000000..3d31e620383d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/memory.json
@@ -0,0 +1,302 @@
+[
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_BOUND_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xf4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to request buffers full or lock in progress.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.WCB_FULL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 1024 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "53",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 2048 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "23",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "503",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "101",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Counter": "2,3,4,5,6,7,8,9",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency. Available PDIST counters: 0,1",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Counter": "0,1",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts misaligned loads that are 4K page splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "PublicDescription": "Counts misaligned loads that are 4K page splits. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts misaligned stores that are 4K page splits.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "PublicDescription": "Counts misaligned stores that are 4K page splits. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7BC000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E780000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x9E7FA000001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13FBFC00002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x9E7FA000002",
+ "PublicDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where data return is pending for a Demand Data Read request who miss L3 cache.",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/other.json b/tools/perf/pmu-events/arch/x86/pantherlake/other.json
new file mode 100644
index 000000000000..d49651d4f112
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/other.json
@@ -0,0 +1,44 @@
+[
+ {
+ "BriefDescription": "Count all other hardware assists or traps that are not necessarily architecturally exposed (through a software handler) beyond FP; SSE-AVX mix and A/D assists who are counted by dedicated sub-events.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.HARDWARE",
+ "PublicDescription": "Count all other hardware assists or traps that are not necessarily architecturally exposed (through a software handler) beyond FP; SSE-AVX mix and A/D assists who are counted by dedicated sub-events. This includes, but not limited to, assists at EXE or MEM uop writeback like AVX* load/store/gather/scatter (non-FP GSSE-assist ) , assists generated by ROB like PEBS and RTIT, Uncore trap, RAR (Remote Action Request) and CET (Control flow Enforcement Technology) assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ASSISTS.PAGE_FAULT",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.PAGE_FAULT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles the uncore cannot take further requests",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0x2d",
+ "EventName": "XQ.FULL",
+ "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json b/tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json
new file mode 100644
index 000000000000..fb87d30c403d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/pipeline.json
@@ -0,0 +1,2194 @@
+[
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIV_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when integer divide unit is busy executing divide or square root operations.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer operations only.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa2",
+ "EventName": "BE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of not taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PublicDescription": "Counts the number of not taken conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Taken backward conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN_BWD",
+ "PublicDescription": "Counts taken backward conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Taken forward conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN_FWD",
+ "PublicDescription": "Counts taken forward conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x50",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PublicDescription": "Counts the number of near CALL branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x30",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x30",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "near relative call instructions retired. [This event is alias to BR_INST_RETIRED.NEAR_REL_CALL]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_DIRECT_CALL",
+ "PublicDescription": "Counts near relative call instructions retired. [This event is alias to BR_INST_RETIRED.NEAR_REL_CALL] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "near relative jump instructions retired. [This event is alias to BR_INST_RETIRED.NEAR_REL_JMP]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_DIRECT_JMP",
+ "PublicDescription": "Counts near relative jump instructions retired. [This event is alias to BR_INST_RETIRED.NEAR_REL_JMP] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns) [This event is alias to BR_INST_RETIRED.INDIRECT]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_INDIRECT",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. [This event is alias to BR_INST_RETIRED.INDIRECT] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x50",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Indirect near call instructions retired. [This event is alias to BR_INST_RETIRED.NEAR_IND_CALL]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_INDIRECT_CALL",
+ "PublicDescription": "Counts indirect near call instructions retired. [This event is alias to BR_INST_RETIRED.NEAR_IND_CALL] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Indirect near jump instructions retired. [This event is alias to BR_INST_RETIRED.NEAR_IND_JMP]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_INDIRECT_JMP",
+ "PublicDescription": "Counts indirect near jump instructions retired. [This event is alias to BR_INST_RETIRED.NEAR_IND_JMP] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT_CALL]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_IND_CALL",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT_CALL] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT_JMP]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_IND_JMP",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_INDIRECT_JMP] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Indirect and Direct Relative near jump instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_JMP",
+ "PublicDescription": "Counts near jump instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0xc0",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_DIRECT_CALL]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_REL_CALL",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_DIRECT_CALL] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_DIRECT_JMP]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_REL_JMP",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_INST_RETIRED.NEAR_DIRECT_JMP] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of near taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Counts the number of near taken branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0xfb",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.ALL_BRANCHES_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.ALL_BRANCHES_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.ALL_BRANCHES_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_TPEBS",
+ "PublicDescription": "All mispredicted branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.ALL_BRANCHES_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_NEAR_IND",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT] Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8007",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted not taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PublicDescription": "Counts the number of mispredicted not taken conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_NTAKEN_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_NTAKEN_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8004",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_NTAKEN_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN_TPEBS",
+ "PublicDescription": "Mispredicted non-taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_NTAKEN_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8004",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted taken conditional branch instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PublicDescription": "Counts the number of mispredicted taken conditional branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken backward.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_BWD",
+ "PublicDescription": "Counts taken backward conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_BWD_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_BWD_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_BWD_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8001",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken backward. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_BWD_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_BWD_TPEBS",
+ "PublicDescription": "number of branch instructions retired that were mispredicted and taken backward. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_BWD_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8001",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken forward.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_FWD",
+ "PublicDescription": "Counts taken forward conditional mispredicted branch instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_FWD_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_FWD_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_FWD_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8002",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken forward. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_FWD_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_FWD_TPEBS",
+ "PublicDescription": "number of branch instructions retired that were mispredicted and taken forward. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_FWD_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8002",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN_TPEBS",
+ "PublicDescription": "Mispredicted taken conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_TAKEN_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TPEBS",
+ "PublicDescription": "Mispredicted conditional branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.COND_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8007",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x50",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_CALL]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_CALL] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_CALL_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_CALL_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8010",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8050",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_JMP]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_JMP",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_JMP] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_JMP_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_JMP_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_INDIRECT_JMP_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8040",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired. [This event is alias to BR_MISP_RETIRED.ALL_NEAR_IND]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_INDIRECT",
+ "PublicDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired. [This event is alias to BR_MISP_RETIRED.ALL_NEAR_IND] Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x50",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns) [This event is alias to BR_MISP_RETIRED.INDIRECT]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_INDIRECT",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. [This event is alias to BR_MISP_RETIRED.INDIRECT] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x50",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired. [This event is alias to BR_MISP_RETIRED.INDIRECT_CALL]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_INDIRECT_CALL",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. [This event is alias to BR_MISP_RETIRED.INDIRECT_CALL] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.INDIRECT_CALL_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_INDIRECT_CALL_TPEBS",
+ "PublicDescription": "Mispredicted indirect CALL retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.INDIRECT_CALL_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x8010",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect jump instructions retired. [This event is alias to BR_MISP_RETIRED.INDIRECT_JMP]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_INDIRECT_JMP",
+ "PublicDescription": "Miss-predicted near indirect jump instructions retired. [This event is alias to BR_MISP_RETIRED.INDIRECT_JMP] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect jump instructions retired. Precise cost. [This event is alias to BR_MISP_RETIRED.INDIRECT_JMP_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_INDIRECT_JMP_TPEBS",
+ "PublicDescription": "Miss-predicted near indirect jump instructions retired. Precise cost. [This event is alias to BR_MISP_RETIRED.INDIRECT_JMP_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8040",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.INDIRECT_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_INDIRECT_TPEBS",
+ "PublicDescription": "Mispredicted near indirect branch instructions retired (excluding returns). This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.INDIRECT_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8050",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS [This event is alias to BR_MISP_RETIRED.RET]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_RETURN",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. [This event is alias to BR_MISP_RETIRED.RET] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.RET_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_RETURN_TPEBS",
+ "PublicDescription": "Mispredicted ret instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.RET_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8008",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0xfb",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_TAKEN_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_TAKEN_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x80fb",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.NEAR_TAKEN_COST]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN_TPEBS",
+ "PublicDescription": "Mispredicted taken near branch instructions retired. This precise event may be used to get the misprediction cost via the Retire_Latency field of PEBS. It fires on the instruction that immediately follows the mispredicted branch. [This event is alias to BR_MISP_RETIRED.NEAR_TAKEN_COST] Available PDIST counters: 0,1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x80fb",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_RETURN]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_RETURN] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_RETURN_TPEBS]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET_COST",
+ "PublicDescription": "This event is deprecated. [This event is alias to BR_MISP_RETIRED.NEAR_RETURN_TPEBS] Available PDIST counters: 0,1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8008",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C01",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C02",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C0_WAIT",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x70",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core cycles when the core is not in a halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time. [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when a PAUSE is pending.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Pause instructions",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles.",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in a halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles. [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time. [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "16",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Count number of times a load is depending on another load that had just write back its data or in previous or 2 cycles back. This event supports in-direct dependency through a single uop.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x02",
+ "EventName": "DEPENDENT_LOADS.ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 or 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_3_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "5",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "2",
+ "EventCode": "0x75",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired.",
+ "Counter": "Fixed counter 0",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired. Available PDIST counters: 32",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "Counter": "Fixed counter 0",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Counts the number of instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 0,1",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "retired macro-fused uops when there is a branch in the macro-fused pair (the two instructions that got macro-fused count once in this pmon)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.BR_FUSED",
+ "PublicDescription": "retired macro-fused uops when there is a branch in the macro-fused pair (the two instructions that got macro-fused count once in this pmon) Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.MACRO_FUSED",
+ "PublicDescription": "INST_RETIRED.MACRO_FUSED Available PDIST counters: 0,1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired NOP instructions.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 or PREFETCHIT0/1 instructions Available PDIST counters: 0,1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "Counter": "Fixed counter 0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Iterations of Repeat string retired instructions.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.REP_ITERATION",
+ "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent. Available PDIST counters: 0,1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Bubble cycles of BPClear.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.BPCLEAR_CYCLES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0xB",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Clears speculative count",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on secondary integer ports 0,1,2,3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.2ND",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on all Integer ports.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on a load port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.LD",
+ "PublicDescription": "Counts the number of uops executed on a load port. This event counts for integer uops even if the destination is FP/vector",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 0.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 1.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 2.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.P3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on integer port 0,1, 2, 3.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.PRIMARY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x78",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on a Store address port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.STA",
+ "PublicDescription": "Counts the number of uops executed on a Store address port. This event counts integer uops even if the data source is FP/vector",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops executed on an integer store data and jump port.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "INT_UOPS_EXECUTED.STD_JMP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of vector integer instructions retired of 128-bit vector-width.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.128BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x13",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of vector integer instructions retired of 256-bit vector-width.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.256BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xac",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_128",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_256",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.MUL_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.SHUFFLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_128",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks).",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL",
+ "PublicDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks). Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Bank conflicts in DCU due to limited lookup ports.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.BANK_CONFLICT",
+ "PublicDescription": "Counts the number of times a load got blocked due to bank conflicts in DCU",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PublicDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x88",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of times a load got early blocked due to preceding store operation with unknown address or unknown data. Excluding in-line (immediate) wakeups",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_EARLY",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "8",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L1 cache (that is: no execution & load in flight & no load missed L1 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L2 cache (that is: no execution & load in flight & load missed L1 & no load missed L2 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for L3 cache (that is: no execution & load in flight & load missed L1 & load missed L2 cache & no load missed L3 Cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.L3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where no execution is happening due to loads waiting for Memory (that is: no execution & load in flight & a load missed L3 cache)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x46",
+ "EventName": "MEMORY_STALLS.MEM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "LFENCE instructions retired",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe0",
+ "EventName": "MISC2_RETIRED.LFENCE",
+ "PublicDescription": "number of LFENCE retired instructions",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe4",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Counts the number of LBR entries recorded. Requires LBRs to be enabled in IA32_LBR_CTL. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "LBR record is inserted",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xe4",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "LBR record is inserted Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of LFENCE instructions retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC_RETIRED1.LFENCE",
+ "PublicDescription": "Counts the number of LFENCE instructions retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of accesses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_ACCESS",
+ "PublicDescription": "Counts the number of accesses to KeyLocker cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of misses to KeyLocker cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe1",
+ "EventName": "MISC_RETIRED2.KEYLOCKER_MISS",
+ "PublicDescription": "Counts the number of misses to KeyLocker cache. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x11",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when RS was empty and a resource allocation stall is asserted",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state. For Tremont, UMWAIT and TPAUSE will only put the CPU into C0.1 activity state (not C0.2 activity state).",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots where no uop could issue due to an IQ scoreboard that stalls allocation until a specified older uop retires or (in the case of jump scoreboard) executes. Commonly executed instructions with IQ scoreboards include LFENCE and MFENCE.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.IQ_JEU_SCB",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.NON_C01_MS_SCB",
+ "PublicDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires. The most commonly executed instruction with an MS scoreboard is PAUSE.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "Counter": "0",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BAD_SPEC_SLOTS",
+ "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "Counter": "0",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of speculative operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "Counter": "3",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "Counter": "Fixed counter 3",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "Counter": "Fixed counter 4",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL_P",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Fast Nukes such as Memory Ordering Machine clears and MRN nukes",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a branch mispredict that resulted in LSD exit.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.LSD_MISPREDICT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to Branch Mispredict",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls. [This event is alias to TOPDOWN_BE_BOUND.ALL_P]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to due to certain allocation restrictions.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls. [This event is alias to TOPDOWN_BE_BOUND.ALL]",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN_BE_BOUND.ALL_P",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to LSD entry.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.LSD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stall (scheduler not being able to accept another uop). This could be caused by RSV full or load/store buffer block.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to iq/jeu scoreboards or ms scb",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of retirement slots not consumed due to front end stalls.",
+ "Counter": "Fixed counter 5",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x9c",
+ "EventName": "TOPDOWN_FE_BOUND.ALL_P",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BAClear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTClear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ms",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.CISC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stall",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x72",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to itlb miss",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ITLB_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend that do not categorize into any other common frontend stall",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to predecode wrong",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of consumed retirement slots.",
+ "Counter": "Fixed counter 6",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of consumed retirement slots.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "TOPDOWN_RETIRING.ALL_P",
+ "PublicDescription": "Counts the number of consumed retirement slots. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of non dec-by-all uops decoded by decoder",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x76",
+ "EventName": "UOPS_DECODED.DEC0_UOPS",
+ "PublicDescription": "This event counts the number of not dec-by-all uops decoded by decoder 0.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on INT EU ALU ports.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.ALU",
+ "PublicDescription": "Number of ALU integer uops dispatch to execution.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on any INT EU ports",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.INT_EU_ALL",
+ "PublicDescription": "Number of integer uops dispatched to execution.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Uops dispatched/executed by any of the 3 JEUs (all ups that hold the JEU including macro; micro jumps; fetch-from-eip)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.JMP",
+ "PublicDescription": "Number of jump uops dispatch to execution",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on Load ports",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.LOAD",
+ "PublicDescription": "Number of Load uops dispatched to execution.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of (shift) 1-cycle Uops dispatched/executed by any of the Shift Eus",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.SHIFT",
+ "PublicDescription": "Number of SHIFT integer uops dispatch to execution",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Uops dispatched/executed by Slow EU (e.g. 3+ cycles LEA, >1 cycles shift, iDIVs, CR; *H operation)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.SLOW",
+ "PublicDescription": "Number of Slow integer uops dispatch to execution.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Uops dispatched on STA ports",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.STA",
+ "PublicDescription": "Number of STA (Store Address) uops dispatch to execution",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on STD ports",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.STD",
+ "PublicDescription": "Number of STD (Store Data) uops dispatch to execution",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "3",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "3",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "3",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "3",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "3",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "UOPS_ISSUED.CYCLES",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with retired uop(s).",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.CYCLES",
+ "PublicDescription": "Counts cycles where at least one uop has retired.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired uops except the last uop of each instruction.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.HEAVY",
+ "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divide uops retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PublicDescription": "Counts the number of integer divide uops retired. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "UOPS_RETIRED.MS",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of non-speculative switches to the Microcode Sequencer (MS)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS_SWITCHES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PublicDescription": "Switches to the Microcode Sequencer",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric. Software can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops retired, includes those in ms flows.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.X87",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/pantherlake/uncore-memory.json
new file mode 100644
index 000000000000..a881b99be5f3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/uncore-memory.json
@@ -0,0 +1,26 @@
+[
+ {
+ "BriefDescription": "Read CAS command sent to DRAM",
+ "Counter": "0,1,2,3,4",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_CAS_COUNT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write CAS command sent to DRAM",
+ "Counter": "0,1,2,3,4",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_CAS_COUNT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Total number of read and write byte transfers to/from DRAM, in 32B chunk, per DDR channel. Counter increments by 1 after sending or receiving 32B chunk data.",
+ "Counter": "0,1,2,3,4",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M_TOTAL_DATA",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json
new file mode 100644
index 000000000000..8d56c16b2a39
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/pantherlake/virtual-memory.json
@@ -0,0 +1,310 @@
+[
+ {
+ "BriefDescription": "Counts the number of page walks initiated by a demand load that missed the first and second level TLBs.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSED_WALK",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x320",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks initiated by a store that missed the first and second level TLBs.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSED_WALK",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Accounts for all page sizes. Will result in a DTLB write from STLB.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x320",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x120",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "CounterMask": "1",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding for iside in PMH every cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for iside in PMH every cycle. A PMH page walk is outstanding from page walk start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals. Walks could be counted by edge detecting on this event, but would count restarted suspended walks.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "Counter": "0,1,2,3,4,5,6,7,8,9",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DTLB_MISS",
+ "PublicDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss. Available PDIST counters: 0,1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/cache.json b/tools/perf/pmu-events/arch/x86/rocketlake/cache.json
index 791fa526d192..0f543325ec1a 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/cache.json
@@ -446,6 +446,16 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -506,6 +516,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -566,6 +586,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -626,6 +656,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -656,6 +696,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -716,6 +766,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/memory.json b/tools/perf/pmu-events/arch/x86/rocketlake/memory.json
index abaf3f4f9d63..1455aaac37b1 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/memory.json
@@ -177,6 +177,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -187,6 +197,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -197,6 +227,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -207,6 +257,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -217,6 +287,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -227,6 +317,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -237,6 +347,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -247,6 +377,26 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts streaming stores that was not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -257,6 +407,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data read requests that miss the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0xb0",
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/other.json b/tools/perf/pmu-events/arch/x86/rocketlake/other.json
index a96b2a989d3f..141cd30a30af 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/other.json
@@ -27,186 +27,6 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -217,26 +37,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184008000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184008000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts streaming stores that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -245,25 +45,5 @@
"MSRValue": "0x10800",
"SampleAfterValue": "100003",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.STREAMING_WR.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000800",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
- "Counter": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000800",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
index cfda8956353e..79dc34157481 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
@@ -1,63 +1,63 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C9 residency percent per package",
- "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C9_Pkg_Residency",
"ScaleUnit": "100%"
@@ -85,16 +85,15 @@
},
{
"BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -106,7 +105,7 @@
"MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%"
},
@@ -129,12 +128,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -148,39 +148,44 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms))) - tma_bottleneck_big_code",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -188,7 +193,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -196,15 +202,17 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
- "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears"
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache"
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -212,21 +220,23 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
"MetricThreshold": "tma_bottleneck_useful_work > 20"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
"MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_branch_instructions",
@@ -248,8 +258,8 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
@@ -257,8 +267,8 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -266,24 +276,24 @@
"MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
"MetricExpr": "max(0, tma_icache_misses - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -291,7 +301,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -299,33 +309,33 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((32.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + (27 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(29 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -335,25 +345,25 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(27 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "23.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -362,7 +372,7 @@
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
@@ -372,7 +382,7 @@
"MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -382,7 +392,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -390,26 +400,26 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -417,8 +427,8 @@
"MetricExpr": "32.5 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -427,7 +437,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -437,7 +447,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -447,7 +457,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -465,7 +475,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -474,15 +484,15 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "ARITH.FP_DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -490,7 +500,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -499,7 +509,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -508,7 +518,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -517,7 +527,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -526,7 +536,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -538,17 +548,17 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
- "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@) / IDQ.MITE_UOPS",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=1@) / IDQ.MITE_UOPS",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
@@ -556,8 +566,8 @@
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -569,28 +579,28 @@
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -619,7 +629,8 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_lsd + tma_ms)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -628,7 +639,7 @@
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -637,10 +648,11 @@
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -701,11 +713,11 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -718,20 +730,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -773,7 +785,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -791,7 +803,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -799,7 +811,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -807,7 +819,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -815,7 +827,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -823,7 +835,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -831,7 +843,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -886,7 +898,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 5 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 11",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1011,7 +1023,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp"
},
@@ -1073,8 +1085,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1097,22 +1109,28 @@
"MetricName": "tma_info_pipeline_fetch_mite"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "IDQ.MS_UOPS / cpu@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1124,7 +1142,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1133,7 +1151,7 @@
"MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / tma_info_system_time / 1e3",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1147,14 +1165,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1165,6 +1182,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
@@ -1195,7 +1213,7 @@
"MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks",
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license0_utilization",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
@@ -1203,7 +1221,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license1_utilization",
"MetricThreshold": "tma_info_system_power_license1_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
@@ -1211,7 +1229,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license2_utilization",
"MetricThreshold": "tma_info_system_power_license2_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
@@ -1239,7 +1257,7 @@
"MetricName": "tma_info_system_turbo_utilization"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1248,15 +1266,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1266,13 +1283,13 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "TOPDOWN.SLOTS",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization"
},
@@ -1288,14 +1305,14 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 5 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 7.5"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1303,8 +1320,8 @@
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1312,17 +1329,17 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
@@ -1331,7 +1348,7 @@
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1340,27 +1357,26 @@
"MetricExpr": "3.5 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(12.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "9 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1368,18 +1384,18 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
@@ -1396,7 +1412,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1404,31 +1420,31 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -1437,7 +1453,7 @@
"MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1447,7 +1463,7 @@
"MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_lsd",
"MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
"ScaleUnit": "100%"
},
{
@@ -1457,16 +1473,16 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1474,8 +1490,8 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
@@ -1485,11 +1501,11 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
@@ -1511,7 +1527,7 @@
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1526,24 +1542,24 @@
},
{
"BriefDescription": "This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline",
- "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=0x4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=0x5@) / tma_info_thread_clks",
+ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group",
"MetricName": "tma_mite_4wide",
- "MetricThreshold": "tma_mite_4wide > 0.05 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "cpu@IDQ.MS_UOPS\\,cmask\\=0x1@ / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "cpu@IDQ.MS_UOPS\\,cmask\\=1@ / tma_info_core_core_clks / 3.3",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -1554,7 +1570,7 @@
"MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1563,7 +1579,7 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%"
},
@@ -1578,19 +1594,19 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -1634,8 +1650,8 @@
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
@@ -1643,8 +1659,8 @@
"MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
@@ -1652,7 +1668,7 @@
"MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
@@ -1661,7 +1677,7 @@
"MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
@@ -1670,14 +1686,14 @@
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -1690,7 +1706,7 @@
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -1699,7 +1715,7 @@
"MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
"ScaleUnit": "100%"
},
@@ -1709,17 +1725,16 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1727,8 +1742,8 @@
"MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -1736,18 +1751,17 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1755,8 +1769,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -1773,7 +1787,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1781,31 +1795,31 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -1813,7 +1827,7 @@
"MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%"
},
@@ -1822,7 +1836,7 @@
"MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -1831,8 +1845,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
index e95d1005e22f..5c9ab7680762 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
@@ -278,5 +278,13 @@
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Valid instructions written to IQ per cycle.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json b/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json
index 7dc7eb0d3dd3..eb8fbd14138a 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/metricgroups.json
@@ -9,6 +9,7 @@
"BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -33,6 +34,7 @@
"InsType": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "LockCont": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -48,6 +50,7 @@
"Pipeline": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"PortsUtil": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Prefetches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Ret": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"Retire": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
"SMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
@@ -75,6 +78,7 @@
"tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
"tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category",
"tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_divider_group": "Metrics contributing to tma_divider category",
"tma_dram_bound_group": "Metrics contributing to tma_dram_bound category",
"tma_dtlb_load_group": "Metrics contributing to tma_dtlb_load category",
"tma_dtlb_store_group": "Metrics contributing to tma_dtlb_store category",
@@ -99,6 +103,7 @@
"tma_issueSmSt": "Metrics related by the issue $issueSmSt",
"tma_issueSyncxn": "Metrics related by the issue $issueSyncxn",
"tma_issueTLB": "Metrics related by the issue $issueTLB",
+ "tma_itlb_misses_group": "Metrics contributing to tma_itlb_misses category",
"tma_l1_bound_group": "Metrics contributing to tma_l1_bound category",
"tma_light_operations_group": "Metrics contributing to tma_light_operations category",
"tma_machine_clears_group": "Metrics contributing to tma_machine_clears category",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/other.json b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
index 42692fa24b6c..970839a9c786 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
@@ -34,14 +34,6 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Valid instructions written to IQ per cycle.",
- "Counter": "0,1,2,3",
- "EventCode": "0x17",
- "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
"Counter": "0,1,2,3",
"EventCode": "0x63",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
index ff2e515c744a..d40761903429 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -71,7 +71,6 @@
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
"MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
@@ -127,7 +126,7 @@
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
- "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
@@ -211,7 +210,7 @@
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
"MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -220,7 +219,7 @@
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
"MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -240,7 +239,7 @@
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
@@ -276,6 +275,12 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_lcp"
},
{
+ "BriefDescription": "Taken Branches retired Per Cycle",
+ "MetricExpr": "BR_INST_RETIRED.NEAR_TAKEN / tma_info_thread_clks",
+ "MetricGroup": "Branches;FetchBW",
+ "MetricName": "tma_info_frontend_tbpc"
+ },
+ {
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1;tma_L1_group",
@@ -290,7 +295,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -302,20 +307,20 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / tma_info_system_time / 1e3",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
"PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_mem_bandwidth"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / tma_info_system_time",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_system_gflops",
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width"
@@ -341,6 +346,13 @@
"MetricThreshold": "tma_info_system_kernel_utilization > 0.05"
},
{
+ "BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_mux",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
+ },
+ {
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
"MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
"MetricGroup": "SMT",
@@ -353,6 +365,13 @@
"MetricName": "tma_info_system_socket_clks"
},
{
+ "BriefDescription": "Run duration time in seconds",
+ "MetricExpr": "duration_time",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_time",
+ "MetricThreshold": "tma_info_system_time < 1"
+ },
+ {
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
@@ -448,7 +467,7 @@
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_thread_clks",
- "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
"MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_system_dram_bw_use",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
index e35dbb7c2ccd..c66324d41a89 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
@@ -1,5 +1,68 @@
[
{
+ "BriefDescription": "Hit snoop reply with data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.I_FWD_FE",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's cache, after the data is forwarded back to the requestor and indicating the data was found unmodified in the (FE) Forward or Exclusive State in this cores caches cache. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "HitM snoop reply with data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.I_FWD_M",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's caches, after the data is forwarded back to the requestor, and indicating the data was found modified(M) in this cores caches cache (aka HitM response). A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Hit snoop reply without sending the data, line invalidated.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.I_HIT_FSE",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated in this core's caches without forwarded back to the requestor. The line was in Forward, Shared or Exclusive (FSE) state in this cores caches. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Line not found snoop reply",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.MISS",
+ "PublicDescription": "Counts responses to snoops indicating that the data was not found (IHitI) in this core's caches. A single snoop response from the core counts on all hyperthreads of the Core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Hit snoop reply with data, line kept in Shared state.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.S_FWD_FE",
+ "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (FS) Forward or Shared state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "HitM snoop reply with data, line kept in Shared state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.S_FWD_M",
+ "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (M)odified state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Hit snoop reply without sending the data, line kept in Shared state.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "CORE_SNOOP_RESPONSE.S_HIT_FSE",
+ "PublicDescription": "Counts responses to snoops indicating the line was kept on this core in the (S)hared state, and that the data was found unmodified but not forwarded back to the requestor, initially the data was found in the cache in the (FSE) Forward, Shared state or Exclusive state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "L1D.HWPF_MISS",
"Counter": "0,1,2,3",
"EventCode": "0x51",
@@ -311,7 +374,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x81"
},
@@ -321,7 +384,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
- "PublicDescription": "Counts all retired store instructions.",
+ "PublicDescription": "Counts all retired store instructions. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x82"
},
@@ -331,7 +394,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.ANY",
- "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x83"
},
@@ -341,7 +404,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "PublicDescription": "Counts retired load instructions with locked access.",
+ "PublicDescription": "Counts retired load instructions with locked access. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x21"
},
@@ -351,7 +414,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x41"
},
@@ -361,7 +424,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x42"
},
@@ -371,7 +434,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x11"
},
@@ -381,7 +444,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x12"
},
@@ -400,7 +463,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x4"
},
@@ -410,7 +473,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x1"
},
@@ -420,7 +483,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x8"
},
@@ -430,7 +493,7 @@
"Data_LA": "1",
"EventCode": "0xd2",
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache. Available PDIST counters: 0",
"SampleAfterValue": "20011",
"UMask": "0x2"
},
@@ -440,7 +503,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -450,6 +513,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
@@ -459,7 +523,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -469,6 +533,7 @@
"Data_LA": "1",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PublicDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
@@ -477,7 +542,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
- "PublicDescription": "Counts retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3.",
+ "PublicDescription": "Counts retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x10"
},
@@ -487,7 +552,7 @@
"Data_LA": "1",
"EventCode": "0xd4",
"EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock). Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x4"
},
@@ -497,7 +562,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40"
},
@@ -507,7 +572,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
@@ -517,7 +582,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
@@ -527,7 +592,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
@@ -537,7 +602,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x10"
},
@@ -547,7 +612,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100021",
"UMask": "0x4"
},
@@ -557,7 +622,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "50021",
"UMask": "0x20"
},
@@ -567,7 +632,7 @@
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
- "PublicDescription": "Counts retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3.",
+ "PublicDescription": "Counts retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x80"
},
@@ -589,12 +654,24 @@
"UMask": "0x3"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -605,6 +682,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -615,6 +693,7 @@
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -625,6 +704,18 @@
"EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -635,6 +726,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0001",
+ "PublicDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -645,6 +737,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -655,6 +748,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -665,6 +759,29 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_SOCKET_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C00001",
+ "PublicDescription": "Counts demand data reads that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C00001",
+ "PublicDescription": "Counts demand data reads that were supplied by PMM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -675,6 +792,7 @@
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1030000001",
+ "PublicDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -685,6 +803,18 @@
"EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x830000001",
+ "PublicDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000001",
+ "PublicDescription": "Counts demand data reads that were supplied by PMM attached to another socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -695,6 +825,7 @@
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000001",
+ "PublicDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -705,6 +836,18 @@
"EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000001",
+ "PublicDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -715,6 +858,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -725,6 +869,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -735,6 +880,7 @@
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -745,6 +891,40 @@
"EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "PublicDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10070",
+ "PublicDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x12380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -755,6 +935,40 @@
"EventName": "OCR.HWPF_L3.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80082380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90002380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10808",
+ "PublicDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -765,6 +979,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -775,6 +990,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -785,6 +1001,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -795,6 +1012,29 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C4477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F33004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -805,6 +1045,7 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1830004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -815,6 +1056,7 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1030004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -825,6 +1067,18 @@
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x830004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -835,6 +1089,7 @@
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -845,6 +1100,7 @@
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -855,6 +1111,7 @@
"EventName": "OCR.RFO_TO_CORE.L3_HIT_M",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F80040022",
+ "PublicDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -865,6 +1122,7 @@
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080800",
+ "PublicDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
index bf68493d4509..793c486ffabe 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
@@ -41,7 +41,7 @@
"EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -52,7 +52,7 @@
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x11",
- "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -63,7 +63,7 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x14",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -74,7 +74,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -85,7 +85,7 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x13",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -96,7 +96,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x600106",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -107,7 +107,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"MSRValue": "0x608006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -118,7 +118,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"MSRValue": "0x601006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -129,7 +129,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"MSRValue": "0x600206",
- "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -140,7 +140,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"MSRValue": "0x610006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -151,7 +151,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"MSRValue": "0x100206",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -162,7 +162,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"MSRValue": "0x602006",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -173,7 +173,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x600406",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -184,7 +184,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"MSRValue": "0x620006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -195,7 +195,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x604006",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -206,7 +206,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
"MSRValue": "0x600806",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -217,6 +217,7 @@
"EventName": "FRONTEND_RETIRED.MS_FLOWS",
"MSRIndex": "0x3F7",
"MSRValue": "0x8",
+ "PublicDescription": "FRONTEND_RETIRED.MS_FLOWS Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -227,7 +228,7 @@
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x15",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
@@ -238,6 +239,7 @@
"EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"MSRIndex": "0x3F7",
"MSRValue": "0x17",
+ "PublicDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
index 41d4120d4dae..5e6c1f05c981 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
@@ -169,17 +169,62 @@
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
- "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8 Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
{
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -190,6 +235,51 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -200,6 +290,29 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FC00002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -210,6 +323,7 @@
"EventName": "OCR.HWPF_L3.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x94002380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -220,6 +334,18 @@
"EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84002380",
+ "PublicDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -230,6 +356,7 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FC04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -240,6 +367,7 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F04C04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -250,6 +378,62 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x70CC04477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70C004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x733004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708004477",
+ "PublicDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -260,6 +444,7 @@
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x94000800",
+ "PublicDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -270,6 +455,18 @@
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000800",
+ "PublicDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "PublicDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM) Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -295,7 +492,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "PublicDescription": "Counts the number of times RTM abort was triggered. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x4"
},
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
index 05d8f14956ee..21f49f609ed4 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
@@ -8,321 +8,28 @@
"UMask": "0x8"
},
{
- "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "BriefDescription": "HW_INTERRUPTS.MASKED",
"Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xb7",
- "EventName": "EXE.AMX_BUSY",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_SOCKET_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x700C00001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by PMM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x703C00001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x703000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC0002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10070",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x12380",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.HWPF_L3.REMOTE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x90002380",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10808",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F3FFC4477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x73C004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x104004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x70C004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x700C04477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F33004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730004477",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.MASKED",
"SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x733004477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x703004477",
+ "BriefDescription": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0x4"
},
{
- "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x708004477",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
+ "SampleAfterValue": "203",
"UMask": "0x1"
},
{
@@ -332,70 +39,11 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
- "Counter": "0,1,2,3",
- "EventCode": "0x2A,0x2B",
- "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0xFBFF80822",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
- "SampleAfterValue": "1000003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
- "Counter": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_COUNT",
- "Invert": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "SampleAfterValue": "100003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa5",
- "EventName": "RS.EMPTY_RESOURCE",
- "SampleAfterValue": "1000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
- "Counter": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "Deprecated": "1",
- "EdgeDetect": "1",
- "EventCode": "0xa5",
- "EventName": "RS_EMPTY.COUNT",
- "Invert": "1",
- "SampleAfterValue": "100003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
- "Counter": "0,1,2,3,4,5,6,7",
- "Deprecated": "1",
- "EventCode": "0xa5",
- "EventName": "RS_EMPTY.CYCLES",
- "SampleAfterValue": "1000003",
- "UMask": "0x7"
- },
- {
"BriefDescription": "Cycles the uncore cannot take further requests",
"Counter": "0,1,2,3",
"CounterMask": "1",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
index 50cacfbbc7cf..1fa7957956df 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
@@ -62,7 +62,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all branch instructions retired.",
+ "PublicDescription": "Counts all branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009"
},
{
@@ -70,7 +70,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
- "PublicDescription": "Counts conditional branch instructions retired.",
+ "PublicDescription": "Counts conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11"
},
@@ -79,7 +79,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts not taken branch instructions retired.",
+ "PublicDescription": "Counts not taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10"
},
@@ -88,7 +88,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "PublicDescription": "Counts taken conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1"
},
@@ -97,7 +97,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PublicDescription": "Counts far branch instructions retired.",
+ "PublicDescription": "Counts far branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x40"
},
@@ -106,7 +106,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
- "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80"
},
@@ -115,7 +115,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x2"
},
@@ -124,7 +124,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts return instructions retired.",
+ "PublicDescription": "Counts return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -133,7 +133,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts taken branch instructions retired.",
+ "PublicDescription": "Counts taken branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20"
},
@@ -142,7 +142,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path. Available PDIST counters: 0",
"SampleAfterValue": "400009"
},
{
@@ -150,7 +150,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x11"
},
@@ -159,7 +159,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_NTAKEN",
- "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x10"
},
@@ -168,7 +168,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x1"
},
@@ -177,7 +177,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT",
- "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x80"
},
@@ -186,7 +186,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
- "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x2"
},
@@ -195,7 +195,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken. Available PDIST counters: 0",
"SampleAfterValue": "400009",
"UMask": "0x20"
},
@@ -204,7 +204,7 @@
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "100007",
"UMask": "0x8"
},
@@ -368,6 +368,14 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb7",
+ "EventName": "EXE.AMX_BUSY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
@@ -452,7 +460,7 @@
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -485,7 +493,7 @@
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
- "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0. Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -652,7 +660,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -741,6 +749,56 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty due to a resource in the back-end",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_RESOURCE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.COUNT",
+ "Invert": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Deprecated": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.CYCLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
"BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
index b59fae4a887d..b6a28227a58a 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -1,28 +1,28 @@
[
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
@@ -40,6 +40,18 @@
"ScaleUnit": "1per_instr"
},
{
+ "BriefDescription": "The average number of cores that are in cstate C0 as observed by the power control unit (PCU)",
+ "MetricExpr": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0 / UNC_P_CLOCKTICKS * #num_packages",
+ "MetricGroup": "cpu_cstate",
+ "MetricName": "cpu_cstate_c0"
+ },
+ {
+ "BriefDescription": "The average number of cores are in cstate C6 as observed by the power control unit (PCU)",
+ "MetricExpr": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6 / UNC_P_CLOCKTICKS * #num_packages",
+ "MetricGroup": "cpu_cstate",
+ "MetricName": "cpu_cstate_c6"
+ },
+ {
"BriefDescription": "CPU operating frequency (in GHz)",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ / 1e9",
"MetricName": "cpu_operating_frequency",
@@ -91,6 +103,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO reads that are initiated by end device controllers that are requesting memory from the CPU and miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_read_local",
@@ -109,6 +127,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO writes that are initiated by end device controllers that are writing memory to the CPU",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_write_local",
@@ -123,19 +147,19 @@
{
"BriefDescription": "Percentage of inbound full cacheline writes initiated by end device controllers that miss the L3 cache",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM",
- "MetricName": "io_percent_of_inbound_full_writes_that_miss_l3",
+ "MetricName": "io_full_write_l3_miss",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Percentage of inbound partial cacheline writes initiated by end device controllers that miss the L3 cache",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_RFO) / (UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_RFO)",
- "MetricName": "io_percent_of_inbound_partial_writes_that_miss_l3",
+ "MetricName": "io_partial_write_l3_miss",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Percentage of inbound reads initiated by end device controllers that miss the L3 cache",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR / UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
- "MetricName": "io_percent_of_inbound_reads_that_miss_l3",
+ "MetricName": "io_read_l3_miss",
"ScaleUnit": "100%"
},
{
@@ -360,7 +384,7 @@
"ScaleUnit": "1per_instr"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5_11 + UOPS_DISPATCHED.PORT_6) / (5 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -372,7 +396,7 @@
"MetricExpr": "EXE.AMX_BUSY / tma_info_core_core_clks",
"MetricGroup": "BvCB;Compute;HPC;Server;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_amx_busy",
- "MetricThreshold": "tma_amx_busy > 0.5 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_amx_busy > 0.5 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
@@ -380,12 +404,12 @@
"MetricExpr": "78 * ASSISTS.ANY / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists",
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
"MetricExpr": "63 * ASSISTS.SSE_AVX_MIX / tma_info_thread_slots",
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_avx_assists",
@@ -395,7 +419,7 @@
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvOB;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
@@ -411,12 +435,12 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -430,39 +454,39 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * tma_amx_busy / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_core_bound * tma_amx_busy / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_ms))) - tma_bottleneck_big_code",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + RS.EMPTY_RESOURCE / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_amx_busy + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricExpr": "100 * ((1 - INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + RS.EMPTY_RESOURCE / tma_info_thread_clks * tma_ports_utilized_0) / (tma_amx_busy + tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -470,7 +494,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -478,7 +502,7 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_mem + tma_remote_cache) + tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_cxl_mem_bound + tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
@@ -486,7 +510,7 @@
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -494,14 +518,14 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -510,7 +534,7 @@
{
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;Default;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
@@ -523,24 +547,24 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
"MetricExpr": "CPU_CLK_UNHALTED.C01 / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c01_wait",
- "MetricThreshold": "tma_c01_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings)",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
"MetricExpr": "CPU_CLK_UNHALTED.C02 / tma_info_thread_clks",
"MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_c02_wait",
- "MetricThreshold": "tma_c02_wait > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -548,7 +572,7 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources. Sample with: FRONTEND_RETIRED.MS_FLOWS",
"ScaleUnit": "100%"
},
@@ -557,24 +581,24 @@
"MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
"MetricExpr": "max(0, tma_icache_misses - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -582,7 +606,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -590,32 +614,32 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricExpr": "((81 * tma_info_system_core_frequency - 4.4 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + (79 * tma_info_system_core_frequency - 4.4 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(76.6 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 74.6 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -626,24 +650,34 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external CXL Memory by loads (e.g",
+ "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 1)) * (MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_cxl_mem_bound",
+ "MetricThreshold": "tma_cxl_mem_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external CXL Memory by loads (e.g. 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory, PMM - Persistent Memory Module [from CLX to SPR] or any other CXL Type3 Memory [EMR onwards]).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricExpr": "(79 * tma_info_system_core_frequency - 4.4 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "74.6 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -652,16 +686,16 @@
"MetricExpr": "ARITH.DIV_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
- "MetricExpr": "MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks - tma_cxl_mem_bound if #has_pmem > 0 else MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks)",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -671,7 +705,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -679,34 +713,34 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
- "MetricExpr": "(170 * tma_info_system_core_frequency * cpu@OCR.DEMAND_RFO.L3_MISS\\,offcore_rsp\\=0x103b800002@ + 81 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM) / tma_info_thread_clks",
+ "MetricExpr": "(170 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_MISS@offcore_rsp\\=0x103b800002@ + 81 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
@@ -716,7 +750,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -727,7 +761,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -738,7 +772,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -756,7 +790,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -765,15 +799,15 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "ARITH.FPDIV_ACTIVE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -781,8 +815,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED2.SCALAR) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -790,8 +824,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.VECTOR + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -799,8 +833,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -808,8 +842,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -817,8 +851,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -829,27 +863,27 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * INST_RETIRED.MACRO_FUSED / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "Default;Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+]). Sample with: UOPS_RETIRED.HEAVY",
"ScaleUnit": "100%"
},
{
@@ -857,8 +891,8 @@
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -869,28 +903,28 @@
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -918,7 +952,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_ms)))",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -926,7 +960,7 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -934,10 +968,11 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -998,11 +1033,11 @@
"MetricExpr": "(FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -1015,20 +1050,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_DATA.STALLS / cpu@ICACHE_DATA.STALLS\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "ICACHE_DATA.STALLS / cpu@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -1065,13 +1100,13 @@
},
{
"BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
- "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / cpu@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / cpu@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed",
"MetricName": "tma_info_frontend_unknown_branch_cost",
- "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node"
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node."
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -1089,7 +1124,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -1097,7 +1132,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -1105,7 +1140,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -1113,7 +1148,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -1121,7 +1156,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate)",
@@ -1129,7 +1164,7 @@
"MetricGroup": "Flops;FpScalar;InsType;Server",
"MetricName": "tma_info_inst_mix_iparith_scalar_hp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_hp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Half-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -1137,7 +1172,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -1192,7 +1227,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 6 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 13",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1329,7 +1364,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp"
},
@@ -1354,19 +1389,19 @@
{
"BriefDescription": "Off-core accesses per kilo instruction for modified write requests",
"MetricExpr": "1e3 * OCR.MODIFIED_WRITE.ANY_RESPONSE / tma_info_inst_mix_instructions",
- "MetricGroup": "Offcore",
+ "MetricGroup": "Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_mwrite_any_pki"
},
{
"BriefDescription": "Off-core accesses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
"MetricExpr": "1e3 * OCR.READS_TO_CORE.ANY_RESPONSE / tma_info_inst_mix_instructions",
- "MetricGroup": "CacheHits;Offcore",
+ "MetricGroup": "CacheHits;Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_read_any_pki"
},
{
"BriefDescription": "L3 cache misses per kilo instruction for reads-to-core requests (speculative; including in-core HW prefetches)",
"MetricExpr": "1e3 * OCR.READS_TO_CORE.L3_MISS / tma_info_inst_mix_instructions",
- "MetricGroup": "Offcore",
+ "MetricGroup": "Offcore;Server",
"MetricName": "tma_info_memory_mix_offcore_read_l3m_pki"
},
{
@@ -1392,23 +1427,23 @@
{
"BriefDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket",
"MetricExpr": "64 * OCR.READS_TO_CORE.DRAM / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_dram_bw",
- "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW"
+ "PublicDescription": "Average DRAM BW for Reads-to-Core (R2C) covering for memory attached to local- and remote-socket. See R2C_Offcore_BW."
},
{
"BriefDescription": "Average L3-cache miss BW for Reads-to-Core (R2C)",
"MetricExpr": "64 * OCR.READS_TO_CORE.L3_MISS / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_l3m_bw",
- "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW"
+ "PublicDescription": "Average L3-cache miss BW for Reads-to-Core (R2C). This covering going to DRAM or other memory off-chip memory tears. See R2C_Offcore_BW."
},
{
"BriefDescription": "Average Off-core access BW for Reads-to-Core (R2C)",
"MetricExpr": "64 * OCR.READS_TO_CORE.ANY_RESPONSE / 1e9 / tma_info_system_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricGroup": "HPC;Mem;MemoryBW;Offcore;Server",
"MetricName": "tma_info_memory_soc_r2c_offcore_bw",
- "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches"
+ "PublicDescription": "Average Off-core access BW for Reads-to-Core (R2C). R2C account for demand or prefetch load/RFO/code access that fill data into the Core caches."
},
{
"BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
@@ -1436,8 +1471,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1458,18 +1493,18 @@
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
- "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "MicroSeq;Pipeline;Ret",
"MetricName": "tma_info_pipeline_strings_cycles",
"MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1"
@@ -1483,7 +1518,7 @@
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1495,16 +1530,28 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
{
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_read_bw"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / tma_info_system_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_system_cxl_mem_write_bw"
+ },
+ {
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1532,14 +1579,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1550,7 +1596,7 @@
},
{
"BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]",
- "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / cha_0@event\\=0x0@",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / uncore_cha_0@event\\=0x1@",
"MetricGroup": "MemOffcore;MemoryLat;Server;SoC",
"MetricName": "tma_info_system_mem_dram_read_latency",
"PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
@@ -1560,18 +1606,24 @@
"MetricExpr": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH / UNC_CHA_CLOCKTICKS",
"MetricGroup": "LockCont;MemOffcore;Server;SoC",
"MetricName": "tma_info_system_mem_irq_duplicate_address",
- "MetricThreshold": "(tma_info_system_mem_irq_duplicate_address > 0.1)"
+ "MetricThreshold": "tma_info_system_mem_irq_duplicate_address > 0.1"
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=0x1@",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]",
+ "MetricExpr": "(1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM) / uncore_cha_0@event\\=0x1@ if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemOffcore;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_system_mem_pmm_read_latency",
+ "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_system_socket_clks / tma_info_system_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
@@ -1598,7 +1650,7 @@
},
{
"BriefDescription": "Socket actual clocks when any core is active on that socket",
- "MetricExpr": "cha_0@event\\=0x0@",
+ "MetricExpr": "uncore_cha_0@event\\=0x1@",
"MetricGroup": "SoC",
"MetricName": "tma_info_system_socket_clks"
},
@@ -1628,7 +1680,7 @@
"MetricName": "tma_info_system_upi_data_transmit_bw"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1637,15 +1689,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1655,13 +1706,13 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "TOPDOWN.SLOTS",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization"
},
@@ -1677,14 +1728,14 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 6 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 9"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1693,7 +1744,7 @@
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_int_operations",
"MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
"ScaleUnit": "100%"
},
{
@@ -1701,8 +1752,8 @@
"MetricExpr": "(INT_VEC_RETIRED.ADD_128 + INT_VEC_RETIRED.VNNI_128) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_128b",
- "MetricThreshold": "tma_int_vector_128b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1710,8 +1761,8 @@
"MetricExpr": "(INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
"MetricName": "tma_int_vector_256b",
- "MetricThreshold": "tma_int_vector_256b > 0.1 & tma_int_operations > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1719,8 +1770,8 @@
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1728,17 +1779,17 @@
"MetricExpr": "max((EXE_ACTIVITY.BOUND_ON_LOADS - MEMORY_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
@@ -1746,7 +1797,7 @@
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1755,7 +1806,7 @@
"MetricExpr": "4.4 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1764,17 +1815,17 @@
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(37 * tma_info_system_core_frequency - 4.4 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "32.6 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1782,19 +1833,19 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"DefaultMetricgroupName": "TopdownL2",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Default;Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
@@ -1811,7 +1862,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1819,48 +1870,49 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
- "MetricExpr": "(109 * tma_info_system_core_frequency - 37 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "72 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
- "MetricThreshold": "tma_local_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1876,20 +1928,20 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling)",
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling).",
"MetricExpr": "INT_MISC.MBA_STALLS / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;Server;TopdownL5;tma_L5_group;tma_mem_bandwidth_group",
"MetricName": "tma_mba_stalls",
- "MetricThreshold": "tma_mba_stalls > 0.1 & tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_mba_stalls > 0.1 & (tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1897,32 +1949,31 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
"DefaultMetricgroupName": "TopdownL2",
- "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "Backend;Default;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2;Default",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
"MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
- "MetricThreshold": "tma_memory_fence > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -1943,7 +1994,7 @@
"MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1957,17 +2008,17 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "160 * ASSISTS.SSE_AVX_MIX / tma_info_thread_clks",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "max(IDQ.MS_CYCLES_ANY, cpu@UOPS_RETIRED.MS\\,cmask\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY)) / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "max(IDQ.MS_CYCLES_ANY, cpu@UOPS_RETIRED.MS\\,cmask\\=1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY)) / tma_info_core_core_clks / 2.4",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -1975,10 +2026,10 @@
},
{
"BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
- "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=0x1\\,edge\\=0x1@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / tma_info_thread_clks",
+ "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (UOPS_RETIRED.SLOTS / UOPS_ISSUED.ANY) / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1988,7 +2039,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%"
},
{
@@ -1996,12 +2047,13 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -2010,19 +2062,19 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -2031,7 +2083,7 @@
"MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_page_faults",
"MetricThreshold": "tma_page_faults > 0.05",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
"ScaleUnit": "100%"
},
{
@@ -2040,7 +2092,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -2049,7 +2101,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -2058,78 +2110,79 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_3_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_3_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "(EXE_ACTIVITY.EXE_BOUND_0_PORTS + max(RS.EMPTY_RESOURCE - RESOURCE_STALLS.SCOREBOARD, 0)) / tma_info_thread_clks * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_THRESHOLD_AND_NMI",
"MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
- "MetricExpr": "((170 * tma_info_system_core_frequency - 37 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + (170 * tma_info_system_core_frequency - 37 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(133 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 133 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
- "MetricThreshold": "tma_remote_cache > 0.05 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM, MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
- "MetricExpr": "(190 * tma_info_system_core_frequency - 37 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "153 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
- "MetricThreshold": "tma_remote_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -2142,7 +2195,7 @@
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks + tma_c02_wait",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -2151,17 +2204,16 @@
"MetricExpr": "tma_light_operations * INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_shuffles_256b",
- "MetricThreshold": "tma_shuffles_256b > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
"ScaleUnit": "100%"
},
@@ -2171,7 +2223,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -2179,8 +2231,8 @@
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -2188,8 +2240,8 @@
"MetricExpr": "(XQ.FULL_CYCLES + L1D_PEND_MISS.L2_STALLS) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -2197,8 +2249,8 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -2206,8 +2258,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -2215,8 +2267,8 @@
"MetricExpr": "(MEM_STORE_RETIRED.L2_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -2233,7 +2285,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -2241,31 +2293,31 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -2273,7 +2325,7 @@
"MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%"
},
@@ -2282,7 +2334,7 @@
"MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
"ScaleUnit": "100%"
},
@@ -2291,8 +2343,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
index a38db3e253f2..1bdda3c3ccbf 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
@@ -312,6 +312,17 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xaf",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Egress Blocking due to Ordering requirements : Down",
"Counter": "0,1,2,3",
"EventCode": "0xba",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
index aab082ff9402..dac7e6c50f31 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
@@ -1902,6 +1902,18 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "Posted requests sent by the integrated IO (IIO) controller to the Ubox, useful for counting message signaled interrupts (MSI).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED",
+ "Experimental": "1",
+ "FCMask": "0x01",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "ITC address map 1",
"Counter": "0,1,2,3",
"EventCode": "0x8f",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
index aa06088dd26f..30044177ccf8 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
@@ -2146,6 +2146,16 @@
"Unit": "MCHBM"
},
{
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x09",
+ "EventName": "UNC_MCHBM_ECC_CORRECTABLE_ERRORS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "ECC Correctable Errors. Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "MCHBM"
+ },
+ {
"BriefDescription": "HBM Precharge All Commands",
"Counter": "0,1,2,3",
"EventCode": "0x44",
@@ -2760,6 +2770,98 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x24",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH_PCH0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH_PCH1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC_ALL",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC_PCH0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC_PCH1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x09",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "ECC Correctable Errors : Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "IMC Clockticks at HCLK frequency",
"Counter": "0,1,2,3",
"EventCode": "0x01",
@@ -3028,6 +3130,28 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Precharge due to read, write, underfill, or PGT.",
"Counter": "0,1,2,3",
"EventCode": "0x03",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
index 9482ddaea4d1..71c35b165a3e 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
@@ -178,7 +178,6 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0",
- "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C0 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
@@ -198,7 +197,6 @@
"Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6",
- "Experimental": "1",
"PerPkg": "1",
"PublicDescription": "Number of cores in C6 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
index 072df00aff92..de0e7661a52d 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
@@ -163,6 +163,14 @@
"UMask": "0x6"
},
{
+ "BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which hit in the LLC, no snoop was required. LLC provides the data. If the core has access to an L3 cache, an LLC hit refers to an L3 cache hit, otherwise it counts zeros.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS_LOAD.LLC_HIT_NOSNOOP",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Counts the number of unhalted cycles when the core is stalled due to a demand load miss which missed all the local caches. If the core has access to an L3 cache, an LLC miss refers to an L3 cache miss, otherwise it is an L2 cache miss.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x34",
@@ -179,6 +187,14 @@
"UMask": "0x80"
},
{
+ "BriefDescription": "Counts the total number of load ops retired that miss the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.ALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in DRAM",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd3",
@@ -187,6 +203,31 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in a Remote DRAM",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_OR_NOFWD",
+ "PublicDescription": "Counts the number of load ops retired that miss the L3 cache and hit in a Remote DRAM, OR had a Remote snoop miss/no fwd and hit in the Local Dram",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in a Remote Cache and modified data was forwarded",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD_HITM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired that miss the L3 cache and hit in a Remote Cache and non-modified data was forwarded",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD_NONM",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of load ops retired that hit the L1 data cache.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xd1",
@@ -467,12 +508,24 @@
"UMask": "0x6"
},
{
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts demand data reads that have any type of response. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -483,6 +536,18 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -493,6 +558,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json b/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json
index fef5cba533bb..8a591e31d331 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json
@@ -9,6 +9,54 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of instructions retired that were tagged with having preceded with frontend bound behavior",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a baclear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.BRANCH_DETECT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles /empty issue slots due to a btclear",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.BRANCH_RESTEER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.CISC",
+ "PublicDescription": "Counts the number of instructions retired that were tagged following an ms flow due to the bubble/wasted issue slot from exiting long ms flow",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged every cycle the decoder is unable to send 3 uops per cycle.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to icache miss",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
"BriefDescription": "Counts the number of instructions retired that were tagged because empty issue slots were seen before the uop due to ITLB miss",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
@@ -17,6 +65,22 @@
"UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of instruction retired tagged after a wasted issue slot if none of the previous events occurred",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction retired that are tagged after a branch instruction causes bubbles/empty issue slots due to a predecode wrong.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
index 22d23077618e..dc850a179517 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
@@ -79,6 +79,29 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
+ "PublicDescription": "Counts demand data reads that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to this socket. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "PublicDescription": "Counts demand data reads that were supplied by DRAM attached to another socket. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -89,6 +112,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/other.json b/tools/perf/pmu-events/arch/x86/sierraforest/other.json
index 4c77dac8ec78..ea34103a8292 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/other.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/other.json
@@ -9,61 +9,14 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Counts streaming stores that have any type of response.",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xB7",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
- "SampleAfterValue": "200003",
- "UMask": "0x4"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
index df2c7bb474a0..70af13143024 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
@@ -11,6 +11,7 @@
{
"BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "SRF6, SRF7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
@@ -19,6 +20,7 @@
{
"BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "SRF7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.COND",
"SampleAfterValue": "200003",
@@ -35,6 +37,7 @@
{
"BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "SRF7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "200003",
@@ -43,6 +46,7 @@
{
"BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "SRF7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT",
"SampleAfterValue": "200003",
@@ -51,6 +55,7 @@
{
"BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "SRF7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.INDIRECT_CALL",
"SampleAfterValue": "200003",
@@ -68,6 +73,7 @@
"BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
"Counter": "0,1,2,3,4,5,6,7",
"Deprecated": "1",
+ "Errata": "SRF7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
@@ -76,6 +82,7 @@
{
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "SRF6, SRF7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "200003",
@@ -92,6 +99,7 @@
{
"BriefDescription": "Counts the number of near taken branch instructions retired.",
"Counter": "0,1,2,3,4,5,6,7",
+ "Errata": "SRF7",
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "200003",
@@ -225,6 +233,7 @@
"BriefDescription": "Fixed Counter: Counts the number of instructions retired",
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Fixed Counter: Counts the number of instructions retired Available PDIST counters: 32",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -301,6 +310,14 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x73",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json b/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json
index 83c86afd2960..ca2c55917e55 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json
@@ -1,56 +1,56 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C1 residency percent per core",
- "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c1\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C1_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
@@ -62,6 +62,18 @@
"ScaleUnit": "1per_instr"
},
{
+ "BriefDescription": "The average number of cores that are in cstate C0 as observed by the power control unit (PCU)",
+ "MetricExpr": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0 / pcu_0@UNC_P_CLOCKTICKS@ * #num_packages",
+ "MetricGroup": "cpu_cstate",
+ "MetricName": "cpu_cstate_c0"
+ },
+ {
+ "BriefDescription": "The average number of cores that are in cstate C6 as observed by the power control unit (PCU)",
+ "MetricExpr": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6 / pcu_0@UNC_P_CLOCKTICKS@ * #num_packages",
+ "MetricGroup": "cpu_cstate",
+ "MetricName": "cpu_cstate_c6"
+ },
+ {
"BriefDescription": "CPU operating frequency (in GHz)",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ / 1e9",
"MetricName": "cpu_operating_frequency",
@@ -113,6 +125,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO reads that are initiated by end device controllers that are requesting memory from the CPU and miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_read_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the local CPU socket",
"MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR_LOCAL * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_read_local",
@@ -131,6 +149,12 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "Bandwidth of inbound IO writes that are initiated by end device controllers that are writing memory to the CPU",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e6 / duration_time",
+ "MetricName": "io_bandwidth_write_l3_miss",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the local CPU socket",
"MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL) * 64 / 1e6 / duration_time",
"MetricName": "io_bandwidth_write_local",
@@ -143,6 +167,30 @@
"ScaleUnit": "1MB/s"
},
{
+ "BriefDescription": "The percent of inbound full cache line writes initiated by IO that miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM / UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "MetricName": "io_full_write_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Message Signaled Interrupts (MSI) per second sent by the integrated I/O traffic controller (IIO) to System Configuration Controller (Ubox)",
+ "MetricExpr": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED / duration_time",
+ "MetricName": "io_msi",
+ "ScaleUnit": "1per_sec"
+ },
+ {
+ "BriefDescription": "The percent of inbound partial writes initiated by IO that miss the L3 cache",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_RFO) / (UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_RFO)",
+ "MetricName": "io_partial_write_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The percent of inbound reads initiated by IO that miss the L3 cache",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR / UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "MetricName": "io_read_l3_miss",
+ "ScaleUnit": "100%"
+ },
+ {
"BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions",
"MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
"MetricName": "itlb_2nd_level_large_page_mpi",
@@ -288,15 +336,17 @@
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions",
"MetricExpr": "tma_core_bound",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_allocation_restriction",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
"MetricExpr": "TOPDOWN_BE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%"
@@ -304,92 +354,104 @@
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
"MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
"MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS)",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
"MetricExpr": "TOPDOWN_FE_BOUND.CISC / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation",
"MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
"MetricExpr": "TOPDOWN_FE_BOUND.DECODE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
"MetricExpr": "TOPDOWN_FE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
"MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
"MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_bandwidth",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.",
"MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_latency",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -420,33 +482,28 @@
{
"BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
"MetricExpr": "100 * (LD_HEAD.DTLB_MISS_AT_RET + LD_HEAD.PGWALK_AT_RET) / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles",
- "MetricName": "tma_info_bottleneck_dtlb_miss_bound_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles"
},
{
"BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.ALL / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Ifetch",
- "MetricName": "tma_info_bottleneck_ifetch_miss_bound_cycles",
- "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Ifetch",
+ "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound"
},
{
"BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.ALL / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Load_Store_Miss",
- "MetricName": "tma_info_bottleneck_load_miss_bound_cycles",
- "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound"
},
{
"BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
"MetricExpr": "100 * LD_HEAD.ANY_AT_RET / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Mem_Exec",
- "MetricName": "tma_info_bottleneck_mem_exec_bound_cycles",
- "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Mem_Exec",
+ "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -501,26 +558,22 @@
{
"BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.LD_BUF / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_load_buffer_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles"
},
{
"BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.RSV / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_mem_rsv_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles"
},
{
"BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.ST_BUF / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_store_buffer_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles"
},
{
"BriefDescription": "Cycles Per Instruction",
"MetricExpr": "CPU_CLK_UNHALTED.CORE / INST_RETIRED.ANY",
- "MetricName": "tma_info_core_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_core_cpi"
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
@@ -541,28 +594,46 @@
{
"BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.L2_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
- "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l2hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss doesn't hit in the L2",
+ "MetricExpr": "100 * (MEM_BOUND_STALLS_IFETCH.LLC_HIT + MEM_BOUND_STALLS_IFETCH.LLC_MISS) / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2miss"
},
{
"BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
- "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l3hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_MISS / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss"
},
{
"BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.L2_HIT / MEM_BOUND_STALLS_LOAD.ALL",
"MetricGroup": "load_store_bound",
- "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l2hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses in the L2",
+ "MetricExpr": "100 * (MEM_BOUND_STALLS_LOAD.LLC_HIT + MEM_BOUND_STALLS_LOAD.LLC_MISS) / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2miss"
},
{
"BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_HIT / MEM_BOUND_STALLS_LOAD.ALL",
"MetricGroup": "load_store_bound",
- "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l3hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_MISS / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss"
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block",
@@ -600,44 +671,37 @@
{
"BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
"MetricExpr": "100 * LD_BLOCKS.ADDRESS_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_mem_exec_blocks_loads_with_adressaliasing",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing"
},
{
"BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
"MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_mem_exec_blocks_loads_with_storefwdblk",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
"MetricExpr": "100 * LD_HEAD.L1_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_l1miss",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
"MetricExpr": "100 * LD_HEAD.OTHER_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_otherpipelineblks",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
"MetricExpr": "100 * LD_HEAD.PGWALK_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_pagewalk",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
"MetricExpr": "100 * LD_HEAD.DTLB_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_stlbhit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
"MetricExpr": "100 * LD_HEAD.ST_ADDR_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_storefwding",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding"
},
{
"BriefDescription": "Instructions per Load",
@@ -667,12 +731,11 @@
{
"BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
"MetricExpr": "100 * SERIALIZATION.C01_MS_SCB / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricName": "tma_info_serialization_tpause_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_serialization_%_tpause_cycles"
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricName": "tma_info_system_cpu_utilization"
},
{
@@ -685,16 +748,19 @@
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.CORE_P:k / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Summary",
"MetricName": "tma_info_system_kernel_utilization"
},
{
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "CPU_CLK_UNHALTED.CORE_P / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_system_mux"
+ "MetricName": "tma_info_system_mux",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.CORE / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
"MetricName": "tma_info_system_turbo_utilization"
},
{
@@ -718,90 +784,102 @@
"MetricName": "tma_info_uop_mix_x87_uop_ratio"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
"MetricExpr": "TOPDOWN_FE_BOUND.ITLB_MISS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops",
"MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops",
"MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
"MetricExpr": "TOPDOWN_FE_BOUND.OTHER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
"MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)",
"MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)",
"MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation",
"MetricExpr": "tma_backend_bound - tma_core_bound",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that result in retirement slots",
"MetricExpr": "TOPDOWN_RETIRING.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
"MetricgroupNoGroup": "TopdownL1",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)",
"MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
index a779a1a73ea5..35b1763b0020 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
@@ -10,6 +10,15 @@
"Unit": "CHACMS"
},
{
+ "BriefDescription": "UNC_CHACMS_DISTRESS_ASSERTED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHACMS_DISTRESS_ASSERTED",
+ "PerPkg": "1",
+ "PortMask": "0x000",
+ "Unit": "CHACMS"
+ },
+ {
"BriefDescription": "Counts the number of cycles FAST trigger is received from the global FAST distress wire.",
"Counter": "0,1,2,3",
"EventCode": "0x34",
@@ -874,6 +883,38 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : E state",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : M state",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the core's cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a core's cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : S state",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "All TOR Inserts",
"Counter": "0,1,2,3",
"EventCode": "0x35",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json
index 2ccbc8bca24e..251e5d20fefe 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-interconnect.json
@@ -839,12 +839,20 @@
"Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_I_MISC1.LOST_FWD",
- "Experimental": "1",
"PerPkg": "1",
"UMask": "0x10",
"Unit": "IRP"
},
{
+ "BriefDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
"BriefDescription": "Snoop Hit E/S responses",
"Counter": "0,1,2,3",
"EventCode": "0x12",
@@ -1562,21 +1570,56 @@
"Unit": "UPI"
},
{
- "BriefDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
- "Counter": "0,1,2,3",
- "EventCode": "0x40",
- "EventName": "UNC_UPI_TxL_INSERTS",
+ "BriefDescription": "Message Received : Doorbell",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
"Experimental": "1",
"PerPkg": "1",
- "Unit": "UPI"
+ "UMask": "0x8",
+ "Unit": "UBOX"
},
{
- "BriefDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Message Received : Interrupt",
+ "Counter": "0,1",
"EventCode": "0x42",
- "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
"Experimental": "1",
"PerPkg": "1",
- "Unit": "UPI"
+ "PublicDescription": "Message Received : Interrupt : Interrupts",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : IPI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : VLW",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json
index 886b99a971be..2ea2637df3fb 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-io.json
@@ -1121,8 +1121,9 @@
"Unit": "IIO"
},
{
- "BriefDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing",
+ "BriefDescription": "This event is deprecated. [This event is alias to UNC_IIO_NUM_OUTSTANDING_REQ_FROM_CPU.TO_IO]",
"Counter": "2,3",
+ "Deprecated": "1",
"EventCode": "0xc5",
"EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
"Experimental": "1",
@@ -1133,6 +1134,18 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing [This event is alias to UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO]",
+ "Counter": "2,3",
+ "EventCode": "0xc5",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_FROM_CPU.TO_IO",
+ "Experimental": "1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "Passing data to be written",
"Counter": "0,1,2,3",
"EventCode": "0x88",
@@ -1301,6 +1314,17 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "Posted requests sent by the integrated IO (IIO) controller to the Ubox, useful for counting message signaled interrupts (MSI).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX_POSTED",
+ "FCMask": "0x01",
+ "PerPkg": "1",
+ "PortMask": "0x0FF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "All 9 bits of Page Walk Tracker Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
index ae9c62b32e92..a9fd7a34b24b 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
@@ -60,6 +60,33 @@
"BriefDescription": "CAS count for SubChannel 0 regular reads",
"Counter": "0,1,2,3",
"EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_NON_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc3",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 auto-precharge reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 auto-precharge underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 0 regular reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
"EventName": "UNC_M_CAS_COUNT_SCH0.RD_REG",
"PerPkg": "1",
"UMask": "0xc1",
@@ -75,6 +102,15 @@
"Unit": "IMC"
},
{
+ "BriefDescription": "CAS count for SubChannel 0 underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT_SCH0.RD_UNDERFILL_ALL",
+ "PerPkg": "1",
+ "UMask": "0xcc",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "CAS count for SubChannel 0, all writes",
"Counter": "0,1,2,3",
"EventCode": "0x05",
@@ -125,6 +161,33 @@
"BriefDescription": "CAS count for SubChannel 1 regular reads",
"Counter": "0,1,2,3",
"EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_NON_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc3",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 auto-precharge reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 auto-precharge underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "CAS count for SubChannel 1 regular reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
"EventName": "UNC_M_CAS_COUNT_SCH1.RD_REG",
"PerPkg": "1",
"UMask": "0xc1",
@@ -140,6 +203,15 @@
"Unit": "IMC"
},
{
+ "BriefDescription": "CAS count for SubChannel 1 underfill reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_COUNT_SCH1.RD_UNDERFILL_ALL",
+ "PerPkg": "1",
+ "UMask": "0xcc",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "CAS count for SubChannel 1, all writes",
"Counter": "0,1,2,3",
"EventCode": "0x06",
@@ -189,13 +261,92 @@
"Unit": "IMC"
},
{
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "# of cycles a given rank is in Power Down Mode",
"Counter": "0,1,2,3",
"EventCode": "0x47",
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK0",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x1",
"Unit": "IMC"
},
@@ -206,7 +357,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK1",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x2",
"Unit": "IMC"
},
@@ -217,7 +367,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK2",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x4",
"Unit": "IMC"
},
@@ -228,7 +377,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH0_RANK3",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x8",
"Unit": "IMC"
},
@@ -239,7 +387,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK0",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x10",
"Unit": "IMC"
},
@@ -250,7 +397,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK1",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x20",
"Unit": "IMC"
},
@@ -261,7 +407,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK2",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x40",
"Unit": "IMC"
},
@@ -272,7 +417,6 @@
"EventName": "UNC_M_POWERDOWN_CYCLES.SCH1_RANK3",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x80",
"Unit": "IMC"
},
@@ -283,7 +427,66 @@
"EventName": "UNC_M_POWER_CHANNEL_PPD_CYCLES",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "MR4 temp reading is throttling",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.MR4BLKEN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "RAPL is throttling",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RAPLBLK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
"Unit": "IMC"
},
{
@@ -465,7 +668,6 @@
"EventName": "UNC_M_SELF_REFRESH.ENTER_SUCCESS",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "UNC_M_SELF_REFRESH.ENTER_SUCCESS",
"UMask": "0x2",
"Unit": "IMC"
},
@@ -476,11 +678,90 @@
"EventName": "UNC_M_SELF_REFRESH.ENTER_SUCCESS_CYCLES",
"Experimental": "1",
"PerPkg": "1",
- "PublicDescription": "-",
"UMask": "0x1",
"Unit": "IMC"
},
{
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at High level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at High level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
"BriefDescription": "Write Pending Queue Allocations",
"Counter": "0,1,2,3",
"EventCode": "0x22",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 2a76dd01fb52..707bd8790ed2 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -74,12 +74,13 @@
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -91,7 +92,7 @@
"MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
"ScaleUnit": "100%"
},
@@ -102,7 +103,7 @@
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"ScaleUnit": "100%"
},
{
@@ -112,12 +113,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -131,39 +133,44 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)) - tma_bottleneck_big_code",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -171,7 +178,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -179,15 +187,17 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
- "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears"
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache"
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -195,14 +205,16 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -224,17 +236,18 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -242,7 +255,7 @@
"MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -251,7 +264,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -259,33 +272,33 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((22 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + (20 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(18.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 16.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -296,25 +309,25 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(20 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "16.5 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -323,7 +336,7 @@
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
@@ -333,7 +346,7 @@
"MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -343,7 +356,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -351,27 +364,27 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -380,18 +393,18 @@
"MetricExpr": "22 * tma_info_system_core_frequency * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -401,7 +414,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -411,7 +424,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -431,7 +444,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -440,7 +453,7 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
@@ -448,18 +461,17 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -467,8 +479,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -476,8 +488,8 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -487,50 +499,51 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"MetricExpr": "(UOPS_RETIRED.RETIRE_SLOTS + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY) / tma_info_thread_slots",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
- "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / tma_info_thread_clks",
+ "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bottleneck_mispredictions * tma_info_thread_slots / 4 / BR_MISP_RETIRED.ALL_BRANCHES / 100",
"MetricGroup": "Bad;BrMispredicts;tma_issueBM",
"MetricName": "tma_info_bad_spec_branch_misprediction_cost",
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
@@ -555,7 +568,8 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -564,7 +578,7 @@
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -572,10 +586,12 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -604,7 +620,7 @@
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "(CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks)",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks))",
"MetricGroup": "SMT",
"MetricName": "tma_info_core_core_clks"
},
@@ -632,11 +648,11 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -649,20 +665,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@ + 2",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ + 2",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -698,7 +714,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -712,12 +728,11 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -725,7 +740,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -733,7 +748,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -741,7 +756,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -749,7 +764,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -799,7 +814,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 4 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 9",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -974,8 +989,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -992,22 +1007,28 @@
"MetricName": "tma_info_pipeline_fetch_mite"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "IDQ.MS_UOPS / cpu@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1019,7 +1040,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1028,7 +1049,7 @@
"MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / tma_info_system_time / 1e3",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1043,14 +1064,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1061,7 +1081,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.DATA_READ / UNC_ARB_TRK_OCCUPANCY.DATA_READ@cmask\\=0x1@",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.DATA_READ / UNC_ARB_TRK_OCCUPANCY.DATA_READ@cmask\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
@@ -1112,7 +1132,7 @@
"MetricName": "tma_info_system_turbo_utilization"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1121,15 +1141,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1155,15 +1174,15 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 4 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 6"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1171,26 +1190,27 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1199,7 +1219,7 @@
"MetricExpr": "3.5 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1208,17 +1228,17 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(10 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "6.5 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1226,22 +1246,23 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "tma_retiring - tma_heavy_operations",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -1255,7 +1276,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1263,39 +1284,43 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(12 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (9 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1307,16 +1332,16 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1324,8 +1349,8 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
@@ -1336,11 +1361,11 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -1349,7 +1374,6 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
@@ -1359,10 +1383,11 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1376,12 +1401,12 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
@@ -1389,7 +1414,7 @@
"MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1399,7 +1424,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%"
},
{
@@ -1407,12 +1432,13 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -1421,19 +1447,21 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -1442,7 +1470,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1451,7 +1479,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1487,7 +1515,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_5",
"MetricThreshold": "tma_port_5 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1496,7 +1524,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1510,11 +1538,12 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
@@ -1522,8 +1551,8 @@
"MetricExpr": "EXE_ACTIVITY.EXE_BOUND_0_PORTS / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
@@ -1531,7 +1560,7 @@
"MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_1 - UOPS_EXECUTED.CORE_CYCLES_GE_2) / 2 if #SMT_on else EXE_ACTIVITY.1_PORTS_UTIL) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
@@ -1540,16 +1569,16 @@
"MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_2 - UOPS_EXECUTED.CORE_CYCLES_GE_3) / 2 if #SMT_on else EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1567,7 +1596,7 @@
"MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -1578,7 +1607,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -1586,8 +1615,8 @@
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES. Related metrics: tma_port_4",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1595,8 +1624,8 @@
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -1604,8 +1633,8 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -1613,8 +1642,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1623,8 +1652,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -1640,7 +1669,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1648,31 +1677,34 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -1680,7 +1712,7 @@
"MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -1689,8 +1721,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index 2ce070629c52..7aeeb5725630 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -1,5 +1,79 @@
[
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xFE",
+ "EventName": "IDI_MISC.WB_DOWNGRADE",
+ "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xFE",
+ "EventName": "IDI_MISC.WB_UPGRADE",
+ "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "L1D data line replacements",
"Counter": "0,1,2,3",
"EventCode": "0x51",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/other.json b/tools/perf/pmu-events/arch/x86/skylakex/other.json
index 44c820518e12..adf7b6bb5838 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/other.json
@@ -36,62 +36,6 @@
"UMask": "0x40"
},
{
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
- "SampleAfterValue": "2000003",
- "UMask": "0x20"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
- "SampleAfterValue": "2000003",
- "UMask": "0x40"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
- "Counter": "0,1,2,3",
- "EventCode": "0xEF",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
"BriefDescription": "Number of hardware interrupts received by the processor.",
"Counter": "0,1,2,3",
"EventCode": "0xCB",
@@ -101,24 +45,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
- "Counter": "0,1,2,3",
- "EventCode": "0xFE",
- "EventName": "IDI_MISC.WB_DOWNGRADE",
- "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
- "SampleAfterValue": "100003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
- "Counter": "0,1,2,3",
- "EventCode": "0xFE",
- "EventName": "IDI_MISC.WB_UPGRADE",
- "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
- "SampleAfterValue": "100003",
- "UMask": "0x2"
- },
- {
"BriefDescription": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
"Counter": "0,1,2,3",
"EventCode": "0x09",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index 3dd296ab4d78..9a1349527b66 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -542,7 +542,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 2fe630cd4927..56c2caaafef4 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -1,49 +1,49 @@
[
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per core",
- "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
@@ -295,12 +295,13 @@
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_thread_slots",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -312,7 +313,7 @@
"MetricExpr": "34 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
"ScaleUnit": "100%"
},
@@ -323,7 +324,7 @@
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
"ScaleUnit": "100%"
},
{
@@ -333,12 +334,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -352,39 +354,44 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)) - tma_bottleneck_big_code",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * (10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts)) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -392,7 +399,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -400,7 +408,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_mem + tma_remote_cache) + tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) * tma_remote_cache / (tma_local_mem + tma_remote_cache + tma_remote_mem) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
@@ -408,7 +417,8 @@
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -416,14 +426,16 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
@@ -445,17 +457,18 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -463,7 +476,7 @@
"MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -472,7 +485,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -480,33 +493,33 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((47.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + (47.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 44 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -517,25 +530,25 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(47.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "44 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -544,7 +557,7 @@
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
@@ -554,7 +567,7 @@
"MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -564,7 +577,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -572,27 +585,27 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -601,18 +614,18 @@
"MetricExpr": "(110 * tma_info_system_core_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM + OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_system_core_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM, OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ / tma_info_thread_clks",
+ "MetricExpr": "tma_info_memory_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -622,7 +635,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -632,7 +645,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -652,7 +665,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -661,7 +674,7 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
@@ -669,17 +682,16 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
- "MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xFC@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -688,7 +700,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -697,7 +709,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -706,7 +718,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -717,50 +729,51 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
"MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fused_instructions",
"MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions , where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
"MetricExpr": "(UOPS_RETIRED.RETIRE_SLOTS + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY) / tma_info_thread_slots",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
- "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@) / tma_info_thread_clks",
+ "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_bottleneck_mispredictions * tma_info_thread_slots / 4 / BR_MISP_RETIRED.ALL_BRANCHES / 100",
"MetricGroup": "Bad;BrMispredicts;tma_issueBM",
"MetricName": "tma_info_bad_spec_branch_misprediction_cost",
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "tma_info_inst_mix_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * BR_MISP_EXEC.INDIRECT)",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
@@ -785,7 +798,8 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_mite)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -794,7 +808,7 @@
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -802,10 +816,12 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -834,7 +850,7 @@
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "(CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks)",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_thread_clks))",
"MetricGroup": "SMT",
"MetricName": "tma_info_core_core_clks"
},
@@ -859,14 +875,14 @@
},
{
"BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
- "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xFC@) / (2 * tma_info_core_core_clks)",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -879,20 +895,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@ + 2",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ + 2",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -928,7 +944,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -942,12 +958,11 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xFC@)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -955,7 +970,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -963,7 +978,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -971,7 +986,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -979,7 +994,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -987,7 +1002,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -1037,7 +1052,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 4 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 9",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1224,8 +1239,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1242,22 +1257,28 @@
"MetricName": "tma_info_pipeline_fetch_mite"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "IDQ.MS_UOPS / cpu@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / (FP_ASSIST.ANY + OTHER_ASSISTS.ANY)",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1269,7 +1290,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1278,7 +1299,7 @@
"MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / tma_info_system_time",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1307,14 +1328,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1332,7 +1352,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=0x1@",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
@@ -1362,7 +1382,7 @@
"MetricExpr": "(CORE_POWER.LVL0_TURBO_LICENSE / 2 / tma_info_core_core_clks if #SMT_on else CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks)",
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license0_utilization",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
@@ -1370,7 +1390,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license1_utilization",
"MetricThreshold": "tma_info_system_power_license1_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
@@ -1378,7 +1398,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license2_utilization",
"MetricThreshold": "tma_info_system_power_license2_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
@@ -1412,7 +1432,7 @@
"MetricName": "tma_info_system_uncore_frequency"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1421,15 +1441,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1455,15 +1474,15 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 4 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 6"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1471,26 +1490,27 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1499,7 +1519,7 @@
"MetricExpr": "3.5 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1508,17 +1528,17 @@
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(20.5 * tma_info_system_core_frequency - 3.5 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "17 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1526,22 +1546,23 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "tma_retiring - tma_heavy_operations",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
@@ -1555,7 +1576,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1563,48 +1584,52 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
- "MetricExpr": "(80 * tma_info_system_core_frequency - 20.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "59.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_local_mem",
- "MetricThreshold": "tma_local_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_local_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(12 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (11 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1621,11 +1646,11 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1633,8 +1658,8 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
@@ -1645,11 +1670,11 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -1658,7 +1683,6 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_thread_slots",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
@@ -1668,10 +1692,11 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1685,12 +1710,12 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
@@ -1698,7 +1723,7 @@
"MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1708,7 +1733,7 @@
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_non_fused_branches",
"MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
"ScaleUnit": "100%"
},
{
@@ -1716,12 +1741,13 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -1730,19 +1756,21 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -1751,7 +1779,7 @@
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1760,7 +1788,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1796,7 +1824,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_5",
"MetricThreshold": "tma_port_5 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1805,7 +1833,7 @@
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
- "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
{
@@ -1819,11 +1847,12 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
@@ -1831,8 +1860,8 @@
"MetricExpr": "EXE_ACTIVITY.EXE_BOUND_0_PORTS / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
@@ -1840,7 +1869,7 @@
"MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_1 - UOPS_EXECUTED.CORE_CYCLES_GE_2) / 2 if #SMT_on else EXE_ACTIVITY.1_PORTS_UTIL) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
@@ -1849,35 +1878,35 @@
"MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_2 - UOPS_EXECUTED.CORE_CYCLES_GE_3) / 2 if #SMT_on else EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_core_core_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
"MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_core_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
"MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "((110 * tma_info_system_core_frequency - 20.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + (110 * tma_info_system_core_frequency - 20.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(89.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 89.5 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
"MetricName": "tma_remote_cache",
- "MetricThreshold": "tma_remote_cache > 0.05 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM, MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
- "MetricExpr": "(147.5 * tma_info_system_core_frequency - 20.5 * tma_info_system_core_frequency) * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "127 * tma_info_system_core_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
"MetricName": "tma_remote_mem",
- "MetricThreshold": "tma_remote_mem > 0.1 & tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "MetricThreshold": "tma_remote_mem > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
"ScaleUnit": "100%"
},
{
@@ -1895,7 +1924,7 @@
"MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -1906,7 +1935,7 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
@@ -1914,8 +1943,8 @@
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES. Related metrics: tma_port_4",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1923,8 +1952,8 @@
"MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_core_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -1932,8 +1961,8 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
@@ -1941,8 +1970,8 @@
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1951,8 +1980,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 11 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -1968,7 +1997,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1976,31 +2005,34 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -2008,7 +2040,7 @@
"MetricExpr": "9 * BACLEARS.ANY / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -2017,8 +2049,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
index 7882dca9d5e1..3410caf8a57a 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
@@ -161,6 +161,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in DRAM. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
@@ -171,6 +172,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core or module. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x20"
},
@@ -181,6 +183,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in the L1 data cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
@@ -191,6 +194,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that miss in the L1 data cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
@@ -201,6 +205,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in the L2 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
@@ -211,6 +216,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that miss in the L2 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x10"
},
@@ -221,6 +227,7 @@
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that hit in the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
@@ -231,7 +238,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL",
"PEBS": "1",
- "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)",
+ "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST) Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x83"
},
@@ -242,7 +249,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "PublicDescription": "Counts the total number of load uops retired.",
+ "PublicDescription": "Counts the total number of load uops retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x81"
},
@@ -253,7 +260,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "PublicDescription": "Counts the total number of store uops retired.",
+ "PublicDescription": "Counts the total number of store uops retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x82"
},
@@ -264,6 +271,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that performed one or more locks. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x21"
},
@@ -274,6 +282,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of memory uops retired that were splits. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x43"
},
@@ -284,6 +293,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired split load uops. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
@@ -294,6 +304,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired split store uops. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x42"
},
@@ -304,6 +315,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -314,6 +326,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -324,6 +337,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -334,6 +348,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -344,6 +359,7 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -354,6 +370,18 @@
"EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0044",
+ "PublicDescription": "Counts all code reads that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3000000010000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -364,6 +392,29 @@
"EventName": "OCR.COREWB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3001F803C0000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8003000000000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -374,6 +425,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -384,6 +436,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -394,6 +447,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -404,6 +458,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -414,6 +469,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -424,6 +480,18 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -434,6 +502,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -444,6 +513,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -454,6 +524,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -464,6 +535,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -474,6 +546,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -484,6 +557,30 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -495,6 +592,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -506,6 +604,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -517,6 +616,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -528,6 +628,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -539,6 +640,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -550,6 +652,30 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -560,6 +686,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -570,6 +697,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -580,6 +708,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -590,6 +719,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -600,6 +730,7 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -610,6 +741,18 @@
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -620,6 +763,18 @@
"EventName": "OCR.FULL_STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x801F803C0000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "PublicDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -630,6 +785,18 @@
"EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0400",
+ "PublicDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -640,6 +807,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -650,6 +818,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -660,6 +829,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -670,6 +840,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -680,6 +851,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -690,6 +862,29 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -700,6 +895,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -710,6 +906,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -720,6 +917,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -730,6 +928,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -740,6 +939,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -750,6 +950,18 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -760,6 +972,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -770,6 +983,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -780,6 +994,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -790,6 +1005,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -800,6 +1016,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -810,6 +1027,29 @@
"EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000010000",
+ "PublicDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -820,6 +1060,18 @@
"EventName": "OCR.L1WB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001F803C0000",
+ "PublicDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000000010000",
+ "PublicDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -830,6 +1082,7 @@
"EventName": "OCR.L2WB_M.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2001F803C0000",
+ "PublicDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -840,6 +1093,18 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x401F803C0000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -850,6 +1115,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -860,6 +1126,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -870,6 +1137,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -880,6 +1148,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -890,6 +1159,7 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -900,6 +1170,18 @@
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -910,6 +1192,7 @@
"EventName": "OCR.STREAMING_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1F803C0800",
+ "PublicDescription": "Counts streaming stores that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -920,6 +1203,7 @@
"EventName": "OCR.UC_RD.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x101F803C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -930,6 +1214,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -940,6 +1225,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -950,6 +1236,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -960,6 +1247,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1002003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent but the snoop missed. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -970,6 +1258,7 @@
"EventName": "OCR.UC_RD.L3_HIT.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001003C0000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by the L3 cache where no snoop was needed to satisfy the request. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -980,6 +1269,7 @@
"EventName": "OCR.UC_WR.L3_HIT",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x201F803C0000",
+ "PublicDescription": "Counts uncached memory writes that were supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json b/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
index 79a4beba4b78..f47d97dfe0d9 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
@@ -23,6 +23,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.FPDIV",
"PEBS": "1",
+ "PublicDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt). Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x8"
}
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
index 34306ec24e9b..417cd78fc048 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
@@ -13,6 +13,7 @@
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of misaligned load uops that are 4K page splits. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
@@ -22,16 +23,29 @@
"EventCode": "0x13",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"PEBS": "1",
+ "PublicDescription": "Counts the number of misaligned store uops that are 4K page splits. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "PublicDescription": "Counts all code reads that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.ALL_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000044",
+ "PublicDescription": "Counts all code reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -42,6 +56,18 @@
"EventName": "OCR.ALL_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000044",
+ "PublicDescription": "Counts all code reads that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "PublicDescription": "Counts all code reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -52,6 +78,7 @@
"EventName": "OCR.COREWB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3002184000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -62,6 +89,18 @@
"EventName": "OCR.COREWB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3002184000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -72,6 +111,7 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -82,6 +122,29 @@
"EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -92,6 +155,7 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -102,6 +166,30 @@
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -113,6 +201,7 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -124,6 +213,30 @@
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "PublicDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -134,6 +247,7 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -144,6 +258,18 @@
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -154,6 +280,7 @@
"EventName": "OCR.FULL_STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x802184000000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -164,6 +291,18 @@
"EventName": "OCR.FULL_STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x802184000000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -174,6 +313,7 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -184,6 +324,29 @@
"EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "PublicDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -194,6 +357,7 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -204,6 +368,29 @@
"EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "PublicDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -214,6 +401,7 @@
"EventName": "OCR.HWPF_L2_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -224,6 +412,18 @@
"EventName": "OCR.HWPF_L2_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "PublicDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -234,6 +434,7 @@
"EventName": "OCR.L1WB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1002184000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -244,6 +445,7 @@
"EventName": "OCR.L1WB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1002184000000",
+ "PublicDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -254,6 +456,7 @@
"EventName": "OCR.L2WB_M.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2002184000000",
+ "PublicDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -264,6 +467,7 @@
"EventName": "OCR.L2WB_M.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2002184000000",
+ "PublicDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -274,6 +478,7 @@
"EventName": "OCR.OTHER.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184008000",
+ "PublicDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -284,6 +489,7 @@
"EventName": "OCR.OTHER.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184008000",
+ "PublicDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -294,6 +500,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x402184000000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -304,6 +511,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x402184000000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -314,6 +522,18 @@
"EventName": "OCR.PREFETCHES.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000470",
+ "PublicDescription": "Counts all hardware and software prefetches that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -324,6 +544,7 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -334,6 +555,18 @@
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "PublicDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -344,6 +577,7 @@
"EventName": "OCR.STREAMING_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000800",
+ "PublicDescription": "Counts streaming stores that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -354,6 +588,18 @@
"EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2184000800",
+ "PublicDescription": "Counts streaming stores that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -364,6 +610,7 @@
"EventName": "OCR.UC_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x102184000000",
+ "PublicDescription": "Counts uncached memory reads that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -374,6 +621,18 @@
"EventName": "OCR.UC_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x102184000000",
+ "PublicDescription": "Counts uncached memory reads that were not supplied by the L3 cache. Available PDIST counters: 0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "PublicDescription": "Counts uncached memory reads that were supplied by DRAM. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -384,6 +643,7 @@
"EventName": "OCR.UC_WR.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x202184000000",
+ "PublicDescription": "Counts uncached memory writes that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -394,6 +654,7 @@
"EventName": "OCR.UC_WR.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x202184000000",
+ "PublicDescription": "Counts uncached memory writes that were not supplied by the L3 cache. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/other.json b/tools/perf/pmu-events/arch/x86/snowridgex/other.json
index 57613207f7ad..2cdc6b64f31d 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/other.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/other.json
@@ -113,26 +113,7 @@
"EventName": "OCR.ALL_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10044",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all code reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.ALL_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000044",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all code reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000044",
+ "PublicDescription": "Counts all code reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -143,180 +124,7 @@
"EventName": "OCR.ALL_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"MSRValue": "0x8000000000000044",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.COREWB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3000000010000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.COREWB_M.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8003000000000000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000004",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
- "Counter": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
- "Counter": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
- "Counter": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.DEMAND_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000002",
+ "PublicDescription": "Counts all code reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -327,146 +135,7 @@
"EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800000010000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10400",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10040",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000040",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000040",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000040",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000010",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_RFO.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000020",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.L1WB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000000010000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.L2WB_M.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x2000000010000",
+ "PublicDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -477,6 +146,7 @@
"EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x18000",
+ "PublicDescription": "Counts miscellaneous requests, such as I/O accesses, that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -487,6 +157,7 @@
"EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400000010000",
+ "PublicDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -497,46 +168,7 @@
"EventName": "OCR.PREFETCHES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10470",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.READS_TO_CORE.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000477",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.READS_TO_CORE.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "MSRValue": "0x8000000000000477",
+ "PublicDescription": "Counts all hardware and software prefetches that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -547,6 +179,7 @@
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
+ "PublicDescription": "Counts streaming stores that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -557,26 +190,7 @@
"EventName": "OCR.UC_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100000010000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.UC_RD.DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100184000000",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
- "Counter": "0,1,2,3",
- "EventCode": "0XB7",
- "EventName": "OCR.UC_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x100184000000",
+ "PublicDescription": "Counts uncached memory reads that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -587,6 +201,7 @@
"EventName": "OCR.UC_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"MSRValue": "0x8000100000000000",
+ "PublicDescription": "Counts uncached memory reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency). Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -597,6 +212,7 @@
"EventName": "OCR.UC_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200000010000",
+ "PublicDescription": "Counts uncached memory writes that have any type of response. Available PDIST counters: 0",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json b/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
index e4e7902c1162..0fc2e821b14a 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
@@ -5,7 +5,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "1",
- "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for. Available PDIST counters: 0",
"SampleAfterValue": "200003"
},
{
@@ -14,6 +14,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.CALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xf9"
},
@@ -23,6 +24,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "1",
+ "PublicDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xbf"
},
@@ -32,6 +34,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near indirect CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
@@ -41,6 +44,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.JCC",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x7e"
},
@@ -50,6 +54,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xeb"
},
@@ -59,6 +64,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near relative CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfd"
},
@@ -68,6 +74,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "1",
+ "PublicDescription": "Counts the number of near RET branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xf7"
},
@@ -77,6 +84,7 @@
"EventCode": "0xc4",
"EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "1",
+ "PublicDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfe"
},
@@ -86,7 +94,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
- "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path. Available PDIST counters: 0",
"SampleAfterValue": "200003"
},
{
@@ -95,6 +103,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfb"
},
@@ -104,6 +113,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x7e"
},
@@ -113,6 +123,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xeb"
},
@@ -122,6 +133,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted near RET branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xf7"
},
@@ -131,6 +143,7 @@
"EventCode": "0xc5",
"EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"PEBS": "1",
+ "PublicDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0xfe"
},
@@ -206,7 +219,7 @@
"Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
- "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -216,7 +229,7 @@
"EventCode": "0xc0",
"EventName": "INST_RETIRED.ANY_P",
"PEBS": "1",
- "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses a programmable general purpose performance counter.",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses a programmable general purpose performance counter. Available PDIST counters: 0",
"SampleAfterValue": "2000003"
},
{
@@ -225,6 +238,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.4K_ALIAS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
@@ -234,6 +248,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.ALL",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks). Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x10"
},
@@ -243,6 +258,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
@@ -252,6 +268,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
@@ -464,6 +481,7 @@
"EventCode": "0xc2",
"EventName": "TOPDOWN_RETIRING.ALL",
"PEBS": "1",
+ "PublicDescription": "Counts the total number of consumed retirement slots. Available PDIST counters: 0",
"SampleAfterValue": "1000003"
},
{
@@ -480,6 +498,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
+ "PublicDescription": "Counts the total number of uops retired. Available PDIST counters: 0",
"SampleAfterValue": "2000003"
},
{
@@ -488,6 +507,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.IDIV",
"PEBS": "1",
+ "PublicDescription": "Counts the number of integer divide uops retired. Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
@@ -497,7 +517,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
"PEBS": "1",
- "PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows. Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -507,6 +527,7 @@
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.X87",
"PEBS": "1",
+ "PublicDescription": "Counts the number of x87 uops retired, includes those in MS flows. Available PDIST counters: 0",
"SampleAfterValue": "2000003",
"UMask": "0x2"
}
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
index f9a6caed8776..bf56d72bb4a7 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
@@ -242,6 +242,7 @@
"EventCode": "0x03",
"EventName": "LD_BLOCKS.DTLB_MISS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss. Available PDIST counters: 0",
"SampleAfterValue": "1000003",
"UMask": "0x8"
},
@@ -252,6 +253,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of memory uops retired that missed in the second level TLB. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x13"
},
@@ -262,6 +264,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"PEBS": "1",
+ "PublicDescription": "Counts the number of load uops retired that miss in the second Level TLB. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x11"
},
@@ -272,6 +275,7 @@
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
"PEBS": "1",
+ "PublicDescription": "Counts the number of store uops retired that miss in the second level TLB. Available PDIST counters: 0",
"SampleAfterValue": "200003",
"UMask": "0x12"
}
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
index 7ef1bac08463..b417e1db9e07 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
@@ -497,7 +497,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4c",
"EventName": "LOAD_HIT_PREFETCH.SWPF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "PublicDescription": "Counts all software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
index 8c0cd6e63a2a..908da985c594 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
@@ -1,63 +1,63 @@
[
{
"BriefDescription": "C10 residency percent per package",
- "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C10_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C2 residency percent per package",
- "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C3 residency percent per package",
- "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C3_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per core",
- "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C6 residency percent per package",
- "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per core",
- "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_core@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Core_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C7 residency percent per package",
- "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C8 residency percent per package",
- "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C8_Pkg_Residency",
"ScaleUnit": "100%"
},
{
"BriefDescription": "C9 residency percent per package",
- "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / msr@tsc@",
"MetricGroup": "Power",
"MetricName": "C9_Pkg_Residency",
"ScaleUnit": "100%"
@@ -85,16 +85,15 @@
},
{
"BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_4k_aliasing",
- "MetricThreshold": "tma_4k_aliasing > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound)",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations",
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_alu_op_utilization",
@@ -106,7 +105,7 @@
"MetricExpr": "34 * ASSISTS.ANY / tma_info_thread_slots",
"MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_assists",
- "MetricThreshold": "tma_assists > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
"PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
"ScaleUnit": "100%"
},
@@ -129,12 +128,13 @@
"MetricName": "tma_bad_speculation",
"MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
"MetricName": "tma_bottleneck_big_code",
"MetricThreshold": "tma_bottleneck_big_code > 20"
@@ -148,39 +148,44 @@
"PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)"
},
{
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: "
+ },
+ {
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
"MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
- "MetricName": "tma_bottleneck_cache_memory_bandwidth",
- "MetricThreshold": "tma_bottleneck_cache_memory_bandwidth > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_bandwidth",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_bandwidth > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
- "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_lock_latency / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_l1_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_loads / (tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_split_stores / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_store_latency / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_latency_dependency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_lock_latency / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_loads / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_split_stores / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
- "MetricName": "tma_bottleneck_cache_memory_latency",
- "MetricThreshold": "tma_bottleneck_cache_memory_latency > 20",
+ "MetricName": "tma_bottleneck_data_cache_memory_latency",
+ "MetricThreshold": "tma_bottleneck_data_cache_memory_latency > 20",
"PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
- "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_serializing_operation + tma_ports_utilization)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
- "MetricGroup": "BvCB;Cor;tma_issueComp",
- "MetricName": "tma_bottleneck_compute_bound_est",
- "MetricThreshold": "tma_bottleneck_compute_bound_est > 20",
- "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy"
- },
- {
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
- "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms))) - tma_bottleneck_big_code",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms)) - tma_bottleneck_big_code",
"MetricGroup": "BvFB;Fed;FetchBW;Frontend",
"MetricName": "tma_bottleneck_instruction_fetch_bw",
"MetricThreshold": "tma_bottleneck_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total pipeline cost of irregular execution (e.g",
- "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_mispredicts_resteers + tma_clears_resteers + tma_unknown_branches)) / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_ms / (tma_mite + tma_dsb + tma_lsd + tma_ms)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_serializing_operation + tma_ports_utilization) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_mispredicts_resteers) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_ms) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + tma_core_bound * RS_EVENTS.EMPTY_CYCLES / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
"MetricName": "tma_bottleneck_irregular_overhead",
"MetricThreshold": "tma_bottleneck_irregular_overhead > 10",
@@ -188,7 +193,8 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_store_fwd_blk + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_4k_aliasing + tma_fb_full)) + tma_memory_bound * (tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound)) * (tma_dtlb_store / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_l1_latency_dependency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_bottleneck_memory_data_tlbs",
"MetricThreshold": "tma_bottleneck_memory_data_tlbs > 20",
@@ -196,15 +202,17 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
- "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound + tma_store_bound) * tma_false_sharing / (tma_store_latency + tma_false_sharing + tma_split_stores + tma_streaming_stores + tma_dtlb_store - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
"MetricGroup": "BvMS;LockCont;Mem;Offcore;tma_issueSyncxn",
"MetricName": "tma_bottleneck_memory_synchronization",
"MetricThreshold": "tma_bottleneck_memory_synchronization > 10",
- "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears"
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache"
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
"MetricName": "tma_bottleneck_mispredictions",
"MetricThreshold": "tma_bottleneck_mispredictions > 20",
@@ -212,21 +220,23 @@
},
{
"BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
- "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_cache_memory_bandwidth + tma_bottleneck_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 - (tma_bottleneck_big_code + tma_bottleneck_instruction_fetch_bw + tma_bottleneck_mispredictions + tma_bottleneck_data_cache_memory_bandwidth + tma_bottleneck_data_cache_memory_latency + tma_bottleneck_memory_data_tlbs + tma_bottleneck_memory_synchronization + tma_bottleneck_compute_bound_est + tma_bottleneck_irregular_overhead + tma_bottleneck_branching_overhead + tma_bottleneck_useful_work)",
"MetricGroup": "BvOB;Cor;Offcore",
"MetricName": "tma_bottleneck_other_bottlenecks",
"MetricThreshold": "tma_bottleneck_other_bottlenecks > 20",
- "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls"
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls."
},
{
- "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead",
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_retiring - (BR_INST_RETIRED.ALL_BRANCHES + 2 * BR_INST_RETIRED.NEAR_CALL + INST_RETIRED.NOP) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
"MetricGroup": "BvUW;Ret",
"MetricName": "tma_bottleneck_useful_work",
"MetricThreshold": "tma_bottleneck_useful_work > 20"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
"MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_branch_instructions",
@@ -248,8 +258,8 @@
"MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
- "MetricThreshold": "tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_l3_hit_latency, tma_store_latency",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
"ScaleUnit": "100%"
},
{
@@ -257,8 +267,8 @@
"MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
"MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
"MetricName": "tma_cisc",
- "MetricThreshold": "tma_cisc > 0.1 & tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
- "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
"ScaleUnit": "100%"
},
{
@@ -266,24 +276,24 @@
"MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
"MetricName": "tma_clears_resteers",
- "MetricThreshold": "tma_clears_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that hit in the L2 cache.",
"MetricExpr": "max(0, tma_icache_misses - tma_code_l2_miss)",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_hit",
- "MetricThreshold": "tma_code_l2_hit > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_hit > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache",
+ "BriefDescription": "This metric estimates fraction of cycles the CPU was stalled due to instruction cache misses that miss in the L2 cache.",
"MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD / tma_info_thread_clks",
"MetricGroup": "FetchLat;IcMiss;Offcore;TopdownL4;tma_L4_group;tma_icache_misses_group",
"MetricName": "tma_code_l2_miss",
- "MetricThreshold": "tma_code_l2_miss > 0.05 & tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_l2_miss > 0.05 & (tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -291,7 +301,7 @@
"MetricExpr": "max(0, tma_itlb_misses - tma_code_stlb_miss)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_hit",
- "MetricThreshold": "tma_code_stlb_hit > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_hit > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
@@ -299,33 +309,33 @@
"MetricExpr": "ITLB_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL4;tma_L4_group;tma_itlb_misses_group",
"MetricName": "tma_code_stlb_miss",
- "MetricThreshold": "tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_2M_4M / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_2m",
- "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_2m > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for (instruction) code accesses.",
"MetricExpr": "tma_code_stlb_miss * ITLB_MISSES.WALK_COMPLETED_4K / (ITLB_MISSES.WALK_COMPLETED_4K + ITLB_MISSES.WALK_COMPLETED_2M_4M)",
"MetricGroup": "FetchLat;MemoryTLB;TopdownL5;tma_L5_group;tma_code_stlb_miss_group",
"MetricName": "tma_code_stlb_miss_4k",
- "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & tma_code_stlb_miss > 0.05 & tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_code_stlb_miss_4k > 0.05 & (tma_code_stlb_miss > 0.05 & (tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)))",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "((54 * tma_info_system_core_frequency - 5 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + (53 * tma_info_system_core_frequency - 5 * tma_info_system_core_frequency) * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "(49 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 48 * tma_info_system_core_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
- "MetricThreshold": "tma_contested_accesses > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD, MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_bottleneck_memory_synchronization, tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -335,25 +345,25 @@
"MetricName": "tma_core_bound",
"MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations)",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "(53 * tma_info_system_core_frequency - 5 * tma_info_system_core_frequency) * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
+ "MetricExpr": "48 * tma_info_system_core_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
- "MetricThreshold": "tma_data_sharing > 0.05 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
- "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=0x1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=0x2@) / tma_info_core_core_clks / 2",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
"MetricName": "tma_decoder0_alone",
- "MetricThreshold": "tma_decoder0_alone > 0.1 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
"ScaleUnit": "100%"
},
@@ -362,7 +372,7 @@
"MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
- "MetricThreshold": "tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
"ScaleUnit": "100%"
},
@@ -372,7 +382,7 @@
"MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks - tma_l2_bound",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
- "MetricThreshold": "tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS",
"ScaleUnit": "100%"
},
@@ -382,7 +392,7 @@
"MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_dsb",
"MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
"ScaleUnit": "100%"
},
{
@@ -390,26 +400,26 @@
"MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
- "MetricThreshold": "tma_dsb_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
- "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
"MetricName": "tma_dtlb_load",
- "MetricThreshold": "tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_store",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
- "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=0x1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_core_clks",
"MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
"MetricName": "tma_dtlb_store",
- "MetricThreshold": "tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_bottleneck_memory_data_tlbs, tma_dtlb_load",
"ScaleUnit": "100%"
},
{
@@ -417,8 +427,8 @@
"MetricExpr": "54 * tma_info_system_core_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_thread_clks",
"MetricGroup": "BvMS;DataSharing;LockCont;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
"MetricName": "tma_false_sharing",
- "MetricThreshold": "tma_false_sharing > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_bottleneck_memory_synchronization, tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
"ScaleUnit": "100%"
},
{
@@ -427,7 +437,7 @@
"MetricGroup": "BvMB;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
- "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
"ScaleUnit": "100%"
},
{
@@ -437,7 +447,7 @@
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1, FRONTEND_RETIRED.LATENCY_GE_1, FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1;FRONTEND_RETIRED.LATENCY_GE_1;FRONTEND_RETIRED.LATENCY_GE_2. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
"ScaleUnit": "100%"
},
{
@@ -447,7 +457,7 @@
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16, FRONTEND_RETIRED.LATENCY_GE_8",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
"ScaleUnit": "100%"
},
{
@@ -465,7 +475,7 @@
"MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_fp_arith",
"MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
"ScaleUnit": "100%"
},
{
@@ -474,15 +484,15 @@
"MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
"MetricName": "tma_fp_assists",
"MetricThreshold": "tma_fp_assists > 0.1",
- "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals)",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Floating-Point Divider unit was active.",
"MetricExpr": "ARITH.FP_DIVIDER_ACTIVE / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_fp_divider",
- "MetricThreshold": "tma_fp_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_fp_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -490,7 +500,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.SCALAR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_scalar",
- "MetricThreshold": "tma_fp_scalar > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -499,7 +509,7 @@
"MetricExpr": "FP_ARITH_INST_RETIRED.VECTOR / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
"MetricName": "tma_fp_vector",
- "MetricThreshold": "tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
"PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -508,7 +518,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_128b",
- "MetricThreshold": "tma_fp_vector_128b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -517,7 +527,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_256b",
- "MetricThreshold": "tma_fp_vector_256b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting prior to LNL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -526,7 +536,7 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
"MetricName": "tma_fp_vector_512b",
- "MetricThreshold": "tma_fp_vector_512b > 0.1 & tma_fp_vector > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
"PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
"ScaleUnit": "100%"
},
@@ -538,17 +548,17 @@
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.15",
"MetricgroupNoGroup": "TopdownL1;Default",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences",
- "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@) / IDQ.MITE_UOPS",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=1@) / IDQ.MITE_UOPS",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_heavy_operations",
"MetricThreshold": "tma_heavy_operations > 0.1",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations , instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.([ICL+] Note this may overcount due to approximation using indirect events; [ADL+])",
"ScaleUnit": "100%"
},
{
@@ -556,8 +566,8 @@
"MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
- "MetricThreshold": "tma_icache_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS, FRONTEND_RETIRED.L1I_MISS",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -569,28 +579,28 @@
"PublicDescription": "Branch Misprediction Cost: Cycles representing fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for conditional taken branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_indirect",
- "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1000"
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per retired Mispredicts for return branches (lower number means higher occurrence rate).",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
@@ -619,7 +629,8 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_latency + tma_fetch_bandwidth)) * (tma_dsb / (tma_mite + tma_dsb + tma_lsd + tma_ms)))",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite + tma_ms)))",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
"MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
@@ -628,7 +639,7 @@
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches) + tma_fetch_bandwidth * tma_mite / (tma_mite + tma_dsb + tma_lsd + tma_ms))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite + tma_ms))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
"MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
@@ -637,10 +648,11 @@
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
"MetricConstraint": "NO_GROUP_EVENTS",
- "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_icache_misses + tma_itlb_misses + tma_branch_resteers + tma_ms_switches + tma_lcp + tma_dsb_switches))",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
- "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5"
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
},
{
"BriefDescription": "Fraction of branches that are CALL or RET",
@@ -701,11 +713,11 @@
"MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR + FP_ARITH_INST_RETIRED.VECTOR) / (2 * tma_info_core_core_clks)",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "tma_info_core_fp_arith_utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)"
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp"
},
@@ -718,20 +730,20 @@
"PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp"
},
{
- "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=0x1@",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency"
},
@@ -773,7 +785,7 @@
"MetricName": "tma_info_frontend_tbpc"
},
{
- "BriefDescription": "Branch instructions per taken branch",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch"
@@ -791,7 +803,7 @@
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
- "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW"
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
@@ -799,7 +811,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
@@ -807,7 +819,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
@@ -815,7 +827,7 @@
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx512",
"MetricThreshold": "tma_info_inst_mix_iparith_avx512 < 10",
- "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
@@ -823,7 +835,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
@@ -831,7 +843,7 @@
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
- "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting"
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting."
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -886,7 +898,7 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
- "MetricThreshold": "tma_info_inst_mix_iptb < 5 * 2 + 1",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 11",
"PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp"
},
{
@@ -1011,7 +1023,7 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=0x1@",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_latency_load_l2_mlp"
},
@@ -1073,8 +1085,8 @@
"MetricName": "tma_info_memory_tlb_store_stlb_mpki"
},
{
- "BriefDescription": "",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=0x1@)",
+ "BriefDescription": "Mem;Backend;CacheHits",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute"
},
@@ -1097,22 +1109,28 @@
"MetricName": "tma_info_pipeline_fetch_mite"
},
{
+ "BriefDescription": "Average number of uops fetched from MS per cycle",
+ "MetricExpr": "IDQ.MS_UOPS / cpu@IDQ.MS_UOPS\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchLat;MicroSeq",
+ "MetricName": "tma_info_pipeline_fetch_ms"
+ },
+ {
"BriefDescription": "Instructions per a microcode Assist invocation",
"MetricExpr": "INST_RETIRED.ANY / ASSISTS.ANY",
"MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
- "MetricThreshold": "tma_info_pipeline_ipassist < 100000",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
"PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
},
{
- "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired",
- "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=0x1@",
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_retire"
},
{
"BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
- "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / tma_info_system_time",
+ "MetricExpr": "tma_info_system_turbo_utilization * msr@tsc@ / 1e9 / tma_info_system_time",
"MetricGroup": "Power;Summary",
"MetricName": "tma_info_system_core_frequency"
},
@@ -1124,7 +1142,7 @@
},
{
"BriefDescription": "Average number of utilized CPUs",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "Summary",
"MetricName": "tma_info_system_cpus_utilized"
},
@@ -1133,7 +1151,7 @@
"MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / tma_info_system_time / 1e3",
"MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
"MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_mem_bandwidth, tma_sq_full"
},
{
"BriefDescription": "Giga Floating Point Operations Per Second",
@@ -1147,14 +1165,13 @@
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
- "MetricThreshold": "tma_info_system_ipfarbranch < 1000000"
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6"
},
{
"BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
"MetricGroup": "OS",
- "MetricName": "tma_info_system_kernel_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_system_kernel_cpi"
},
{
"BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
@@ -1165,6 +1182,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
@@ -1195,7 +1213,7 @@
"MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_core_clks",
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license0_utilization",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
@@ -1203,7 +1221,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license1_utilization",
"MetricThreshold": "tma_info_system_power_license1_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
@@ -1211,7 +1229,7 @@
"MetricGroup": "Power",
"MetricName": "tma_info_system_power_license2_utilization",
"MetricThreshold": "tma_info_system_power_license2_utilization > 0.5",
- "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions"
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
},
{
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
@@ -1239,7 +1257,7 @@
"MetricName": "tma_info_system_turbo_utilization"
},
{
- "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
"MetricName": "tma_info_thread_clks"
@@ -1248,15 +1266,14 @@
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / tma_info_thread_ipc",
"MetricGroup": "Mem;Pipeline",
- "MetricName": "tma_info_thread_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_thread_cpi"
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
"MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
- "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage"
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
@@ -1266,13 +1283,13 @@
},
{
"BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
- "MetricExpr": "slots",
+ "MetricExpr": "TOPDOWN.SLOTS",
"MetricGroup": "TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots"
},
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
- "MetricExpr": "(tma_info_thread_slots / (slots / 2) if #SMT_on else 1)",
+ "MetricExpr": "(tma_info_thread_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
"MetricGroup": "SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_thread_slots_utilization"
},
@@ -1288,14 +1305,14 @@
"MetricExpr": "tma_retiring * tma_info_thread_slots / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW",
"MetricName": "tma_info_thread_uptb",
- "MetricThreshold": "tma_info_thread_uptb < 5 * 1.5"
+ "MetricThreshold": "tma_info_thread_uptb < 7.5"
},
{
- "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active",
+ "BriefDescription": "This metric represents fraction of cycles where the Integer Divider unit was active.",
"MetricExpr": "tma_divider - tma_fp_divider",
"MetricGroup": "TopdownL4;tma_L4_group;tma_divider_group",
"MetricName": "tma_int_divider",
- "MetricThreshold": "tma_int_divider > 0.2 & tma_divider > 0.2 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_int_divider > 0.2 & (tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"ScaleUnit": "100%"
},
{
@@ -1303,8 +1320,8 @@
"MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
- "MetricThreshold": "tma_itlb_misses > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS, FRONTEND_RETIRED.ITLB_MISS",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
"ScaleUnit": "100%"
},
{
@@ -1312,17 +1329,17 @@
"MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_thread_clks, 0)",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
- "MetricThreshold": "tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 Data (L1D) cache. The L1D cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1D. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
+ "BriefDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache",
"MetricExpr": "min(2 * (MEM_INST_RETIRED.ALL_LOADS - MEM_LOAD_RETIRED.FB_HIT - MEM_LOAD_RETIRED.L1_MISS) * 20 / 100, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_l1_latency_dependency",
- "MetricThreshold": "tma_l1_latency_dependency > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "MetricThreshold": "tma_l1_latency_dependency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric ([SKL+] roughly; [LNL]) estimates fraction of cycles with demand load accesses that hit the L1D cache. The short latency of the L1D cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
"ScaleUnit": "100%"
},
{
@@ -1331,7 +1348,7 @@
"MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks)",
"MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
- "MetricThreshold": "tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
@@ -1340,27 +1357,26 @@
"MetricExpr": "5 * tma_info_system_core_frequency * MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_l2_bound_group",
"MetricName": "tma_l2_hit_latency",
- "MetricThreshold": "tma_l2_hit_latency > 0.05 & tma_l2_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_l2_hit_latency > 0.05 & (tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L2 cache under unloaded scenarios (possibly L2 latency limited). Avoiding L1 cache misses (i.e. L1 misses/L2 hits) will improve the latency. Sample with: MEM_LOAD_RETIRED.L2_HIT",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
- "MetricThreshold": "tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
- "MetricExpr": "(22.5 * tma_info_system_core_frequency - 5 * tma_info_system_core_frequency) * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
+ "MetricExpr": "17.5 * tma_info_system_core_frequency * (MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2)) / tma_info_thread_clks",
"MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
"MetricName": "tma_l3_hit_latency",
- "MetricThreshold": "tma_l3_hit_latency > 0.1 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT. Related metrics: tma_bottleneck_cache_memory_latency, tma_branch_resteers, tma_mem_latency, tma_store_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_bottleneck_data_cache_memory_latency, tma_mem_latency",
"ScaleUnit": "100%"
},
{
@@ -1368,18 +1384,18 @@
"MetricExpr": "DECODE.LCP / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
- "MetricThreshold": "tma_lcp > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
- "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation)",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
"MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
"MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_light_operations",
"MetricThreshold": "tma_light_operations > 0.6",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations , instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
"ScaleUnit": "100%"
},
{
@@ -1396,7 +1412,7 @@
"MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_hit",
- "MetricThreshold": "tma_load_stlb_hit > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1404,31 +1420,31 @@
"MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
- "MetricThreshold": "tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_1G / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_1g",
- "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_1g > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_2m",
- "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_2m > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data load accesses.",
"MetricExpr": "tma_load_stlb_miss * DTLB_LOAD_MISSES.WALK_COMPLETED_4K / (DTLB_LOAD_MISSES.WALK_COMPLETED_4K + DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M + DTLB_LOAD_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_load_stlb_miss_group",
"MetricName": "tma_load_stlb_miss_4k",
- "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & tma_load_stlb_miss > 0.05 & tma_dtlb_load > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_load_stlb_miss_4k > 0.05 & (tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -1437,7 +1453,7 @@
"MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "LockCont;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
- "MetricThreshold": "tma_lock_latency > 0.2 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
"ScaleUnit": "100%"
},
@@ -1447,7 +1463,7 @@
"MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_lsd",
"MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2",
- "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
"ScaleUnit": "100%"
},
{
@@ -1457,16 +1473,16 @@
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_bottleneck_memory_synchronization, tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
- "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=0x4@) / tma_info_thread_clks",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
"MetricName": "tma_mem_bandwidth",
- "MetricThreshold": "tma_mem_bandwidth > 0.2 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_sq_full",
"ScaleUnit": "100%"
},
{
@@ -1474,8 +1490,8 @@
"MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_thread_clks - tma_mem_bandwidth",
"MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
"MetricName": "tma_mem_latency",
- "MetricThreshold": "tma_mem_latency > 0.1 & tma_dram_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_bottleneck_data_cache_memory_latency, tma_l3_hit_latency",
"ScaleUnit": "100%"
},
{
@@ -1485,11 +1501,11 @@
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
- "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two)",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations , uops for memory load or store accesses",
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
"MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
@@ -1511,7 +1527,7 @@
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks",
"MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
"MetricName": "tma_mispredicts_resteers",
- "MetricThreshold": "tma_mispredicts_resteers > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_bottleneck_mispredictions, tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost",
"ScaleUnit": "100%"
},
@@ -1526,24 +1542,24 @@
},
{
"BriefDescription": "This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline",
- "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=0x4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=0x5@) / tma_info_thread_clks",
+ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group",
"MetricName": "tma_mite_4wide",
- "MetricThreshold": "tma_mite_4wide > 0.05 & tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles)",
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
"MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
"MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
"MetricName": "tma_mixing_vectors",
"MetricThreshold": "tma_mixing_vectors > 0.05",
- "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued , the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details",
- "MetricExpr": "cpu@IDQ.MS_UOPS\\,cmask\\=0x1@ / tma_info_core_core_clks / 2",
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the Microcode Sequencer (MS) unit - see Microcode_Sequencer node for details.",
+ "MetricExpr": "cpu@IDQ.MS_UOPS\\,cmask\\=1@ / tma_info_core_core_clks / 3.3",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_ms",
"MetricThreshold": "tma_ms > 0.05 & tma_fetch_bandwidth > 0.2",
@@ -1554,7 +1570,7 @@
"MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_thread_clks",
"MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
"MetricName": "tma_ms_switches",
- "MetricThreshold": "tma_ms_switches > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
"PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_bottleneck_irregular_overhead, tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
"ScaleUnit": "100%"
},
@@ -1563,7 +1579,7 @@
"MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
"MetricName": "tma_nop_instructions",
- "MetricThreshold": "tma_nop_instructions > 0.1 & tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
"PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
"ScaleUnit": "100%"
},
@@ -1578,19 +1594,19 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types)",
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
"MetricExpr": "max(tma_branch_mispredicts * (1 - BR_MISP_RETIRED.ALL_BRANCHES / (INT_MISC.CLEARS_COUNT - MACHINE_CLEARS.COUNT)), 0.0001)",
"MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
"MetricName": "tma_other_mispredicts",
- "MetricThreshold": "tma_other_mispredicts > 0.05 & tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering",
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
"MetricExpr": "max(tma_machine_clears * (1 - MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.COUNT), 0.0001)",
"MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_other_nukes",
- "MetricThreshold": "tma_other_nukes > 0.05 & tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
@@ -1634,8 +1650,8 @@
"MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_thread_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_thread_clks)",
"MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_ports_utilization",
- "MetricThreshold": "tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
"ScaleUnit": "100%"
},
{
@@ -1643,8 +1659,8 @@
"MetricExpr": "EXE_ACTIVITY.EXE_BOUND_0_PORTS / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_0",
- "MetricThreshold": "tma_ports_utilized_0 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
"ScaleUnit": "100%"
},
{
@@ -1652,7 +1668,7 @@
"MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
- "MetricThreshold": "tma_ports_utilized_1 > 0.2 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
"ScaleUnit": "100%"
},
@@ -1661,7 +1677,7 @@
"MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
- "MetricThreshold": "tma_ports_utilized_2 > 0.15 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
"ScaleUnit": "100%"
},
@@ -1670,14 +1686,14 @@
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
"MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
- "MetricThreshold": "tma_ports_utilized_3m > 0.4 & tma_ports_utilization > 0.15 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * slots",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound)",
"MetricGroup": "BvUW;Default;TmaL1;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
@@ -1690,7 +1706,7 @@
"MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
"MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
"MetricName": "tma_serializing_operation",
- "MetricThreshold": "tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
"PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
"ScaleUnit": "100%"
},
@@ -1699,7 +1715,7 @@
"MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
- "MetricThreshold": "tma_slow_pause > 0.05 & tma_serializing_operation > 0.1 & tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
"ScaleUnit": "100%"
},
@@ -1709,17 +1725,16 @@
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_split_loads",
"MetricThreshold": "tma_split_loads > 0.3",
- "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
- "MetricThreshold": "tma_split_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
"ScaleUnit": "100%"
},
{
@@ -1727,8 +1742,8 @@
"MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_thread_clks",
"MetricGroup": "BvMB;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
"MetricName": "tma_sq_full",
- "MetricThreshold": "tma_sq_full > 0.3 & tma_l3_bound > 0.05 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_bottleneck_data_cache_memory_bandwidth, tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth",
"ScaleUnit": "100%"
},
{
@@ -1736,18 +1751,17 @@
"MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
- "MetricThreshold": "tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
"ScaleUnit": "100%"
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
- "MetricThreshold": "tma_store_fwd_blk > 0.1 & tma_l1_bound > 0.1 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
"ScaleUnit": "100%"
},
{
@@ -1755,8 +1769,8 @@
"MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_thread_clks",
"MetricGroup": "BvML;LockCont;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
"MetricName": "tma_store_latency",
- "MetricThreshold": "tma_store_latency > 0.1 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
- "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_branch_resteers, tma_fb_full, tma_l3_hit_latency, tma_lock_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
"ScaleUnit": "100%"
},
{
@@ -1773,7 +1787,7 @@
"MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_hit",
- "MetricThreshold": "tma_store_stlb_hit > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
@@ -1781,31 +1795,31 @@
"MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
- "MetricThreshold": "tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 1 GB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_1G / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_1g",
- "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_1g > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 2 or 4 MB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_2m",
- "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_2m > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses",
+ "BriefDescription": "This metric estimates the fraction of cycles to walk the memory paging structures to cache translation of 4 KB pages for data store accesses.",
"MetricExpr": "tma_store_stlb_miss * DTLB_STORE_MISSES.WALK_COMPLETED_4K / (DTLB_STORE_MISSES.WALK_COMPLETED_4K + DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M + DTLB_STORE_MISSES.WALK_COMPLETED_1G)",
"MetricGroup": "MemoryTLB;TopdownL6;tma_L6_group;tma_store_stlb_miss_group",
"MetricName": "tma_store_stlb_miss_4k",
- "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & tma_store_stlb_miss > 0.05 & tma_dtlb_store > 0.05 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_store_stlb_miss_4k > 0.05 & (tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))))",
"ScaleUnit": "100%"
},
{
@@ -1813,7 +1827,7 @@
"MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_thread_clks",
"MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
"MetricName": "tma_streaming_stores",
- "MetricThreshold": "tma_streaming_stores > 0.2 & tma_store_bound > 0.2 & tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
"PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
"ScaleUnit": "100%"
},
@@ -1822,7 +1836,7 @@
"MetricExpr": "10 * BACLEARS.ANY / tma_info_thread_clks",
"MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
- "MetricThreshold": "tma_unknown_branches > 0.05 & tma_branch_resteers > 0.05 & tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
"PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: BACLEARS.ANY",
"ScaleUnit": "100%"
},
@@ -1831,8 +1845,8 @@
"MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
"MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
"MetricName": "tma_x87_use",
- "MetricThreshold": "tma_x87_use > 0.1 & tma_fp_arith > 0.2 & tma_light_operations > 0.6",
- "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
index 30845c7dbf08..f6f95f3ff301 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
@@ -120,6 +120,38 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3"
+ },
+ {
"BriefDescription": "All L2 data requests",
"Counter": "0,1,2,3",
"EventCode": "0x26",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
index bcf5bcf637c0..c0cf8bae8074 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
@@ -16,46 +16,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "L1I instruction fetch stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.CYCLES_STALLED",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "L1I instruction fetch hits",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.HITS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "L1I instruction fetch misses",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.MISSES",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "L1I Instruction fetches",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.READS",
- "SampleAfterValue": "2000000",
- "UMask": "0x3"
- },
- {
- "BriefDescription": "Large ITLB hit",
- "Counter": "0,1,2,3",
- "EventCode": "0x82",
- "EventName": "LARGE_ITLB.HIT",
- "SampleAfterValue": "200000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Loads that partially overlap an earlier store",
"Counter": "0,1,2,3",
"EventCode": "0x3",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
index 53d7f76325a3..84c920637b12 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
@@ -153,6 +153,14 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
"Counter": "0,1,2,3",
"EventCode": "0xCB",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
index 90cb367f5798..0cd571472dca 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
@@ -120,6 +120,38 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3"
+ },
+ {
"BriefDescription": "All L2 data requests",
"Counter": "0,1,2,3",
"EventCode": "0x26",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
index bcf5bcf637c0..c0cf8bae8074 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
@@ -16,46 +16,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "L1I instruction fetch stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.CYCLES_STALLED",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "L1I instruction fetch hits",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.HITS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "L1I instruction fetch misses",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.MISSES",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "L1I Instruction fetches",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.READS",
- "SampleAfterValue": "2000000",
- "UMask": "0x3"
- },
- {
- "BriefDescription": "Large ITLB hit",
- "Counter": "0,1,2,3",
- "EventCode": "0x82",
- "EventName": "LARGE_ITLB.HIT",
- "SampleAfterValue": "200000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Loads that partially overlap an earlier store",
"Counter": "0,1,2,3",
"EventCode": "0x3",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
index e7affdf7f41b..a1b22c82a9bf 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
@@ -129,6 +129,14 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
"Counter": "0,1,2,3",
"EventCode": "0xCB",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/cache.json b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
index 9f922370ee8b..2a677d10f688 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
@@ -120,6 +120,38 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "L1I instruction fetch stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch hits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L1I instruction fetch misses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1I Instruction fetches",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3"
+ },
+ {
"BriefDescription": "All L2 data requests",
"Counter": "0,1,2,3",
"EventCode": "0x26",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/other.json b/tools/perf/pmu-events/arch/x86/westmereex/other.json
index bcf5bcf637c0..c0cf8bae8074 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/other.json
@@ -16,46 +16,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "L1I instruction fetch stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.CYCLES_STALLED",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "L1I instruction fetch hits",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.HITS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "L1I instruction fetch misses",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.MISSES",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "L1I Instruction fetches",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "L1I.READS",
- "SampleAfterValue": "2000000",
- "UMask": "0x3"
- },
- {
- "BriefDescription": "Large ITLB hit",
- "Counter": "0,1,2,3",
- "EventCode": "0x82",
- "EventName": "LARGE_ITLB.HIT",
- "SampleAfterValue": "200000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "Loads that partially overlap an earlier store",
"Counter": "0,1,2,3",
"EventCode": "0x3",
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
index 0c3501e6e5a3..1800c6ecbf80 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
@@ -153,6 +153,14 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "Large ITLB hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
"Counter": "0,1,2,3",
"EventCode": "0xCB",
diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
index 0cb7ba7912e8..76c395cf513c 100644
--- a/tools/perf/pmu-events/empty-pmu-events.c
+++ b/tools/perf/pmu-events/empty-pmu-events.c
@@ -19,198 +19,2753 @@ struct pmu_table_entry {
};
static const char *const big_c_string =
-/* offset=0 */ "tool\000"
-/* offset=5 */ "duration_time\000tool\000Wall clock interval time in nanoseconds\000config=1\000\00000\000\000"
-/* offset=78 */ "user_time\000tool\000User (non-kernel) time in nanoseconds\000config=2\000\00000\000\000"
-/* offset=145 */ "system_time\000tool\000System/kernel time in nanoseconds\000config=3\000\00000\000\000"
-/* offset=210 */ "has_pmem\000tool\0001 if persistent memory installed otherwise 0\000config=4\000\00000\000\000"
-/* offset=283 */ "num_cores\000tool\000Number of cores. A core consists of 1 or more thread, with each thread being associated with a logical Linux CPU\000config=5\000\00000\000\000"
-/* offset=425 */ "num_cpus\000tool\000Number of logical Linux CPUs. There may be multiple such CPUs on a core\000config=6\000\00000\000\000"
-/* offset=525 */ "num_cpus_online\000tool\000Number of online logical Linux CPUs. There may be multiple such CPUs on a core\000config=7\000\00000\000\000"
-/* offset=639 */ "num_dies\000tool\000Number of dies. Each die has 1 or more cores\000config=8\000\00000\000\000"
-/* offset=712 */ "num_packages\000tool\000Number of packages. Each package has 1 or more die\000config=9\000\00000\000\000"
-/* offset=795 */ "slots\000tool\000Number of functional units that in parallel can execute parts of an instruction\000config=0xa\000\00000\000\000"
-/* offset=902 */ "smt_on\000tool\0001 if simultaneous multithreading (aka hyperthreading) is enable otherwise 0\000config=0xb\000\00000\000\000"
-/* offset=1006 */ "system_tsc_freq\000tool\000The amount a Time Stamp Counter (TSC) increases per second\000config=0xc\000\00000\000\000"
-/* offset=1102 */ "default_core\000"
-/* offset=1115 */ "bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000"
-/* offset=1174 */ "bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000"
-/* offset=1233 */ "l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000Attributable Level 3 cache access, read\000"
-/* offset=1328 */ "segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000"
-/* offset=1427 */ "dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000"
-/* offset=1557 */ "eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000"
-/* offset=1672 */ "hisi_sccl,ddrc\000"
-/* offset=1687 */ "uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000DDRC write commands\000"
-/* offset=1773 */ "uncore_cbox\000"
-/* offset=1785 */ "unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000"
-/* offset=2016 */ "event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000UNC_CBO_HYPHEN\000"
-/* offset=2081 */ "event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000UNC_CBO_TWO_HYPH\000"
-/* offset=2152 */ "hisi_sccl,l3c\000"
-/* offset=2166 */ "uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000Total read hits\000"
-/* offset=2246 */ "uncore_imc_free_running\000"
-/* offset=2270 */ "uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000Total cache misses\000"
-/* offset=2365 */ "uncore_imc\000"
-/* offset=2376 */ "uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000Total cache hits\000"
-/* offset=2454 */ "uncore_sys_ddr_pmu\000"
-/* offset=2473 */ "sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000"
-/* offset=2546 */ "uncore_sys_ccn_pmu\000"
-/* offset=2565 */ "sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000"
-/* offset=2639 */ "uncore_sys_cmn_pmu\000"
-/* offset=2658 */ "sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000"
-/* offset=2798 */ "CPI\000\0001 / IPC\000\000\000\000\000\000\000\00000"
-/* offset=2820 */ "IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\00000"
-/* offset=2883 */ "Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\00000"
-/* offset=3049 */ "dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000"
-/* offset=3113 */ "icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000"
-/* offset=3180 */ "cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\00000"
-/* offset=3251 */ "DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\00000"
-/* offset=3345 */ "DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\00000"
-/* offset=3479 */ "DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\00000"
-/* offset=3543 */ "DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\00000"
-/* offset=3611 */ "DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\00000"
-/* offset=3681 */ "M1\000\000ipc + M2\000\000\000\000\000\000\000\00000"
-/* offset=3703 */ "M2\000\000ipc + M1\000\000\000\000\000\000\000\00000"
-/* offset=3725 */ "M3\000\0001 / M3\000\000\000\000\000\000\000\00000"
-/* offset=3745 */ "L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\00000"
+/* offset=0 */ "default_core\000"
+/* offset=13 */ "l1-dcache\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=99 */ "l1-dcache-load\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=190 */ "l1-dcache-load-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=286 */ "l1-dcache-load-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=387 */ "l1-dcache-load-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=482 */ "l1-dcache-load-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=580 */ "l1-dcache-load-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00000\000\000\000\000\000"
+/* offset=682 */ "l1-dcache-load-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=782 */ "l1-dcache-loads\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00000\000\000\000\000\000"
+/* offset=874 */ "l1-dcache-loads-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=971 */ "l1-dcache-loads-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=1073 */ "l1-dcache-loads-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=1169 */ "l1-dcache-loads-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=1268 */ "l1-dcache-loads-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=1371 */ "l1-dcache-loads-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=1472 */ "l1-dcache-read\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=1563 */ "l1-dcache-read-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=1659 */ "l1-dcache-read-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=1760 */ "l1-dcache-read-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=1855 */ "l1-dcache-read-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=1953 */ "l1-dcache-read-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=2055 */ "l1-dcache-read-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=2155 */ "l1-dcache-store\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=2252 */ "l1-dcache-store-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=2354 */ "l1-dcache-store-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=2461 */ "l1-dcache-store-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=2562 */ "l1-dcache-store-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=2666 */ "l1-dcache-store-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00000\000\000\000\000\000"
+/* offset=2770 */ "l1-dcache-store-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=2872 */ "l1-dcache-stores\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00000\000\000\000\000\000"
+/* offset=2970 */ "l1-dcache-stores-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=3073 */ "l1-dcache-stores-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=3181 */ "l1-dcache-stores-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=3283 */ "l1-dcache-stores-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=3388 */ "l1-dcache-stores-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=3493 */ "l1-dcache-stores-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=3596 */ "l1-dcache-write\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=3693 */ "l1-dcache-write-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=3795 */ "l1-dcache-write-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=3902 */ "l1-dcache-write-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=4003 */ "l1-dcache-write-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=4107 */ "l1-dcache-write-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=4211 */ "l1-dcache-write-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=4313 */ "l1-dcache-prefetch\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=4416 */ "l1-dcache-prefetch-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=4524 */ "l1-dcache-prefetch-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=4637 */ "l1-dcache-prefetch-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=4744 */ "l1-dcache-prefetch-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=4854 */ "l1-dcache-prefetch-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00000\000\000\000\000\000"
+/* offset=4964 */ "l1-dcache-prefetch-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=5072 */ "l1-dcache-prefetches\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00000\000\000\000\000\000"
+/* offset=5177 */ "l1-dcache-prefetches-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=5287 */ "l1-dcache-prefetches-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=5402 */ "l1-dcache-prefetches-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=5511 */ "l1-dcache-prefetches-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=5623 */ "l1-dcache-prefetches-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=5735 */ "l1-dcache-prefetches-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=5845 */ "l1-dcache-speculative-read\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=5956 */ "l1-dcache-speculative-read-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=6072 */ "l1-dcache-speculative-read-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=6193 */ "l1-dcache-speculative-read-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=6308 */ "l1-dcache-speculative-read-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=6426 */ "l1-dcache-speculative-read-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=6544 */ "l1-dcache-speculative-read-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=6660 */ "l1-dcache-speculative-load\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=6771 */ "l1-dcache-speculative-load-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=6887 */ "l1-dcache-speculative-load-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=7008 */ "l1-dcache-speculative-load-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=7123 */ "l1-dcache-speculative-load-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=7241 */ "l1-dcache-speculative-load-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=7359 */ "l1-dcache-speculative-load-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=7475 */ "l1-dcache-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=7566 */ "l1-dcache-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=7662 */ "l1-dcache-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=7752 */ "l1-dcache-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=7845 */ "l1-dcache-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=7942 */ "l1-dcache-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=8037 */ "l1-d\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=8118 */ "l1-d-load\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=8204 */ "l1-d-load-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=8295 */ "l1-d-load-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=8391 */ "l1-d-load-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=8481 */ "l1-d-load-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=8574 */ "l1-d-load-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=8671 */ "l1-d-load-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=8766 */ "l1-d-loads\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=8853 */ "l1-d-loads-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=8945 */ "l1-d-loads-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=9042 */ "l1-d-loads-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=9133 */ "l1-d-loads-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=9227 */ "l1-d-loads-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=9325 */ "l1-d-loads-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=9421 */ "l1-d-read\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=9507 */ "l1-d-read-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=9598 */ "l1-d-read-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=9694 */ "l1-d-read-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=9784 */ "l1-d-read-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=9877 */ "l1-d-read-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=9974 */ "l1-d-read-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=10069 */ "l1-d-store\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=10161 */ "l1-d-store-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=10258 */ "l1-d-store-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=10360 */ "l1-d-store-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=10456 */ "l1-d-store-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=10555 */ "l1-d-store-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=10654 */ "l1-d-store-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=10751 */ "l1-d-stores\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=10844 */ "l1-d-stores-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=10942 */ "l1-d-stores-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=11045 */ "l1-d-stores-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=11142 */ "l1-d-stores-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=11242 */ "l1-d-stores-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=11342 */ "l1-d-stores-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=11440 */ "l1-d-write\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=11532 */ "l1-d-write-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=11629 */ "l1-d-write-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=11731 */ "l1-d-write-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=11827 */ "l1-d-write-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=11926 */ "l1-d-write-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=12025 */ "l1-d-write-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=12122 */ "l1-d-prefetch\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=12220 */ "l1-d-prefetch-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=12323 */ "l1-d-prefetch-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=12431 */ "l1-d-prefetch-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=12533 */ "l1-d-prefetch-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=12638 */ "l1-d-prefetch-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=12743 */ "l1-d-prefetch-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=12846 */ "l1-d-prefetches\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=12946 */ "l1-d-prefetches-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=13051 */ "l1-d-prefetches-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=13161 */ "l1-d-prefetches-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=13265 */ "l1-d-prefetches-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=13372 */ "l1-d-prefetches-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=13479 */ "l1-d-prefetches-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=13584 */ "l1-d-speculative-read\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=13690 */ "l1-d-speculative-read-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=13801 */ "l1-d-speculative-read-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=13917 */ "l1-d-speculative-read-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=14027 */ "l1-d-speculative-read-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=14140 */ "l1-d-speculative-read-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=14253 */ "l1-d-speculative-read-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=14364 */ "l1-d-speculative-load\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=14470 */ "l1-d-speculative-load-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=14581 */ "l1-d-speculative-load-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=14697 */ "l1-d-speculative-load-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=14807 */ "l1-d-speculative-load-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=14920 */ "l1-d-speculative-load-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=15033 */ "l1-d-speculative-load-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=15144 */ "l1-d-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=15230 */ "l1-d-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=15321 */ "l1-d-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=15406 */ "l1-d-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=15494 */ "l1-d-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=15586 */ "l1-d-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=15676 */ "l1d\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=15756 */ "l1d-load\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=15841 */ "l1d-load-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=15931 */ "l1d-load-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=16026 */ "l1d-load-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=16115 */ "l1d-load-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=16207 */ "l1d-load-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=16303 */ "l1d-load-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=16397 */ "l1d-loads\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=16483 */ "l1d-loads-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=16574 */ "l1d-loads-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=16670 */ "l1d-loads-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=16760 */ "l1d-loads-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=16853 */ "l1d-loads-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=16950 */ "l1d-loads-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=17045 */ "l1d-read\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=17130 */ "l1d-read-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=17220 */ "l1d-read-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=17315 */ "l1d-read-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=17404 */ "l1d-read-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=17496 */ "l1d-read-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=17592 */ "l1d-read-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=17686 */ "l1d-store\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=17777 */ "l1d-store-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=17873 */ "l1d-store-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=17974 */ "l1d-store-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=18069 */ "l1d-store-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=18167 */ "l1d-store-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=18265 */ "l1d-store-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=18361 */ "l1d-stores\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=18453 */ "l1d-stores-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=18550 */ "l1d-stores-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=18652 */ "l1d-stores-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=18748 */ "l1d-stores-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=18847 */ "l1d-stores-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=18946 */ "l1d-stores-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=19043 */ "l1d-write\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=19134 */ "l1d-write-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=19230 */ "l1d-write-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=19331 */ "l1d-write-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=19426 */ "l1d-write-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=19524 */ "l1d-write-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=19622 */ "l1d-write-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=19718 */ "l1d-prefetch\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=19815 */ "l1d-prefetch-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=19917 */ "l1d-prefetch-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=20024 */ "l1d-prefetch-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=20125 */ "l1d-prefetch-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=20229 */ "l1d-prefetch-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=20333 */ "l1d-prefetch-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=20435 */ "l1d-prefetches\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=20534 */ "l1d-prefetches-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=20638 */ "l1d-prefetches-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=20747 */ "l1d-prefetches-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=20850 */ "l1d-prefetches-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=20956 */ "l1d-prefetches-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=21062 */ "l1d-prefetches-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=21166 */ "l1d-speculative-read\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=21271 */ "l1d-speculative-read-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=21381 */ "l1d-speculative-read-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=21496 */ "l1d-speculative-read-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=21605 */ "l1d-speculative-read-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=21717 */ "l1d-speculative-read-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=21829 */ "l1d-speculative-read-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=21939 */ "l1d-speculative-load\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=22044 */ "l1d-speculative-load-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=22154 */ "l1d-speculative-load-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=22269 */ "l1d-speculative-load-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=22378 */ "l1d-speculative-load-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=22490 */ "l1d-speculative-load-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=22602 */ "l1d-speculative-load-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=22712 */ "l1d-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=22797 */ "l1d-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=22887 */ "l1d-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=22971 */ "l1d-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=23058 */ "l1d-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=23149 */ "l1d-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=23238 */ "l1-data\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=23322 */ "l1-data-load\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=23411 */ "l1-data-load-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=23505 */ "l1-data-load-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=23604 */ "l1-data-load-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=23697 */ "l1-data-load-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=23793 */ "l1-data-load-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=23893 */ "l1-data-load-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=23991 */ "l1-data-loads\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=24081 */ "l1-data-loads-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=24176 */ "l1-data-loads-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=24276 */ "l1-data-loads-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=24370 */ "l1-data-loads-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=24467 */ "l1-data-loads-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=24568 */ "l1-data-loads-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=24667 */ "l1-data-read\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=24756 */ "l1-data-read-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=24850 */ "l1-data-read-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=24949 */ "l1-data-read-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=25042 */ "l1-data-read-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=25138 */ "l1-data-read-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=25238 */ "l1-data-read-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=25336 */ "l1-data-store\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=25431 */ "l1-data-store-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=25531 */ "l1-data-store-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=25636 */ "l1-data-store-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=25735 */ "l1-data-store-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=25837 */ "l1-data-store-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=25939 */ "l1-data-store-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=26039 */ "l1-data-stores\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=26135 */ "l1-data-stores-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=26236 */ "l1-data-stores-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=26342 */ "l1-data-stores-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=26442 */ "l1-data-stores-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=26545 */ "l1-data-stores-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=26648 */ "l1-data-stores-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=26749 */ "l1-data-write\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=26844 */ "l1-data-write-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=26944 */ "l1-data-write-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=27049 */ "l1-data-write-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=27148 */ "l1-data-write-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000"
+/* offset=27250 */ "l1-data-write-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=27352 */ "l1-data-write-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000"
+/* offset=27452 */ "l1-data-prefetch\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=27553 */ "l1-data-prefetch-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=27659 */ "l1-data-prefetch-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=27770 */ "l1-data-prefetch-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=27875 */ "l1-data-prefetch-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=27983 */ "l1-data-prefetch-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=28091 */ "l1-data-prefetch-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=28197 */ "l1-data-prefetches\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=28300 */ "l1-data-prefetches-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=28408 */ "l1-data-prefetches-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=28521 */ "l1-data-prefetches-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=28628 */ "l1-data-prefetches-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=28738 */ "l1-data-prefetches-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=28848 */ "l1-data-prefetches-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=28956 */ "l1-data-speculative-read\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=29065 */ "l1-data-speculative-read-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=29179 */ "l1-data-speculative-read-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=29298 */ "l1-data-speculative-read-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=29411 */ "l1-data-speculative-read-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=29527 */ "l1-data-speculative-read-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=29643 */ "l1-data-speculative-read-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=29757 */ "l1-data-speculative-load\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=29866 */ "l1-data-speculative-load-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=29980 */ "l1-data-speculative-load-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=30099 */ "l1-data-speculative-load-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=30212 */ "l1-data-speculative-load-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000"
+/* offset=30328 */ "l1-data-speculative-load-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=30444 */ "l1-data-speculative-load-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000"
+/* offset=30558 */ "l1-data-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=30647 */ "l1-data-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=30741 */ "l1-data-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=30829 */ "l1-data-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000"
+/* offset=30920 */ "l1-data-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=31015 */ "l1-data-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000"
+/* offset=31108 */ "l1-icache\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=31201 */ "l1-icache-load\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=31299 */ "l1-icache-load-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=31402 */ "l1-icache-load-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=31510 */ "l1-icache-load-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=31612 */ "l1-icache-load-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=31717 */ "l1-icache-load-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00000\000\000\000\000\000"
+/* offset=31826 */ "l1-icache-load-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=31933 */ "l1-icache-loads\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00000\000\000\000\000\000"
+/* offset=32032 */ "l1-icache-loads-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=32136 */ "l1-icache-loads-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=32245 */ "l1-icache-loads-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=32348 */ "l1-icache-loads-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=32454 */ "l1-icache-loads-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=32564 */ "l1-icache-loads-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=32672 */ "l1-icache-read\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=32770 */ "l1-icache-read-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=32873 */ "l1-icache-read-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=32981 */ "l1-icache-read-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=33083 */ "l1-icache-read-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=33188 */ "l1-icache-read-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=33297 */ "l1-icache-read-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=33404 */ "l1-icache-prefetch\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=33514 */ "l1-icache-prefetch-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=33629 */ "l1-icache-prefetch-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=33749 */ "l1-icache-prefetch-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=33863 */ "l1-icache-prefetch-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=33980 */ "l1-icache-prefetch-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00000\000\000\000\000\000"
+/* offset=34097 */ "l1-icache-prefetch-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=34212 */ "l1-icache-prefetches\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00000\000\000\000\000\000"
+/* offset=34324 */ "l1-icache-prefetches-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=34441 */ "l1-icache-prefetches-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=34563 */ "l1-icache-prefetches-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=34679 */ "l1-icache-prefetches-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=34798 */ "l1-icache-prefetches-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=34917 */ "l1-icache-prefetches-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=35034 */ "l1-icache-speculative-read\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=35152 */ "l1-icache-speculative-read-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=35275 */ "l1-icache-speculative-read-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=35403 */ "l1-icache-speculative-read-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=35525 */ "l1-icache-speculative-read-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=35650 */ "l1-icache-speculative-read-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=35775 */ "l1-icache-speculative-read-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=35898 */ "l1-icache-speculative-load\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=36016 */ "l1-icache-speculative-load-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=36139 */ "l1-icache-speculative-load-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=36267 */ "l1-icache-speculative-load-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=36389 */ "l1-icache-speculative-load-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=36514 */ "l1-icache-speculative-load-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=36639 */ "l1-icache-speculative-load-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=36762 */ "l1-icache-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=36860 */ "l1-icache-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=36963 */ "l1-icache-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=37060 */ "l1-icache-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=37160 */ "l1-icache-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=37264 */ "l1-icache-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=37366 */ "l1-i\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=37454 */ "l1-i-load\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=37547 */ "l1-i-load-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=37645 */ "l1-i-load-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=37748 */ "l1-i-load-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=37845 */ "l1-i-load-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=37945 */ "l1-i-load-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=38049 */ "l1-i-load-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=38151 */ "l1-i-loads\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=38245 */ "l1-i-loads-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=38344 */ "l1-i-loads-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=38448 */ "l1-i-loads-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=38546 */ "l1-i-loads-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=38647 */ "l1-i-loads-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=38752 */ "l1-i-loads-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=38855 */ "l1-i-read\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=38948 */ "l1-i-read-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=39046 */ "l1-i-read-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=39149 */ "l1-i-read-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=39246 */ "l1-i-read-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=39346 */ "l1-i-read-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=39450 */ "l1-i-read-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=39552 */ "l1-i-prefetch\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=39657 */ "l1-i-prefetch-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=39767 */ "l1-i-prefetch-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=39882 */ "l1-i-prefetch-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=39991 */ "l1-i-prefetch-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=40103 */ "l1-i-prefetch-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=40215 */ "l1-i-prefetch-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=40325 */ "l1-i-prefetches\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=40432 */ "l1-i-prefetches-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=40544 */ "l1-i-prefetches-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=40661 */ "l1-i-prefetches-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=40772 */ "l1-i-prefetches-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=40886 */ "l1-i-prefetches-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=41000 */ "l1-i-prefetches-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=41112 */ "l1-i-speculative-read\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=41225 */ "l1-i-speculative-read-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=41343 */ "l1-i-speculative-read-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=41466 */ "l1-i-speculative-read-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=41583 */ "l1-i-speculative-read-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=41703 */ "l1-i-speculative-read-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=41823 */ "l1-i-speculative-read-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=41941 */ "l1-i-speculative-load\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=42054 */ "l1-i-speculative-load-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=42172 */ "l1-i-speculative-load-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=42295 */ "l1-i-speculative-load-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=42412 */ "l1-i-speculative-load-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=42532 */ "l1-i-speculative-load-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=42652 */ "l1-i-speculative-load-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=42770 */ "l1-i-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=42863 */ "l1-i-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=42961 */ "l1-i-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=43053 */ "l1-i-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=43148 */ "l1-i-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=43247 */ "l1-i-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=43344 */ "l1i\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=43431 */ "l1i-load\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=43523 */ "l1i-load-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=43620 */ "l1i-load-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=43722 */ "l1i-load-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=43818 */ "l1i-load-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=43917 */ "l1i-load-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=44020 */ "l1i-load-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=44121 */ "l1i-loads\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=44214 */ "l1i-loads-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=44312 */ "l1i-loads-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=44415 */ "l1i-loads-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=44512 */ "l1i-loads-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=44612 */ "l1i-loads-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=44716 */ "l1i-loads-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=44818 */ "l1i-read\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=44910 */ "l1i-read-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=45007 */ "l1i-read-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=45109 */ "l1i-read-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=45205 */ "l1i-read-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=45304 */ "l1i-read-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=45407 */ "l1i-read-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=45508 */ "l1i-prefetch\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=45612 */ "l1i-prefetch-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=45721 */ "l1i-prefetch-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=45835 */ "l1i-prefetch-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=45943 */ "l1i-prefetch-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=46054 */ "l1i-prefetch-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=46165 */ "l1i-prefetch-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=46274 */ "l1i-prefetches\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=46380 */ "l1i-prefetches-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=46491 */ "l1i-prefetches-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=46607 */ "l1i-prefetches-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=46717 */ "l1i-prefetches-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=46830 */ "l1i-prefetches-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=46943 */ "l1i-prefetches-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=47054 */ "l1i-speculative-read\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=47166 */ "l1i-speculative-read-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=47283 */ "l1i-speculative-read-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=47405 */ "l1i-speculative-read-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=47521 */ "l1i-speculative-read-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=47640 */ "l1i-speculative-read-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=47759 */ "l1i-speculative-read-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=47876 */ "l1i-speculative-load\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=47988 */ "l1i-speculative-load-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=48105 */ "l1i-speculative-load-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=48227 */ "l1i-speculative-load-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=48343 */ "l1i-speculative-load-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=48462 */ "l1i-speculative-load-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=48581 */ "l1i-speculative-load-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=48698 */ "l1i-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=48790 */ "l1i-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=48887 */ "l1i-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=48978 */ "l1i-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=49072 */ "l1i-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=49170 */ "l1i-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=49266 */ "l1-instruction\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=49364 */ "l1-instruction-load\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=49467 */ "l1-instruction-load-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=49575 */ "l1-instruction-load-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=49688 */ "l1-instruction-load-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=49795 */ "l1-instruction-load-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=49905 */ "l1-instruction-load-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=50019 */ "l1-instruction-load-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=50131 */ "l1-instruction-loads\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=50235 */ "l1-instruction-loads-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=50344 */ "l1-instruction-loads-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=50458 */ "l1-instruction-loads-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=50566 */ "l1-instruction-loads-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=50677 */ "l1-instruction-loads-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=50792 */ "l1-instruction-loads-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=50905 */ "l1-instruction-read\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=51008 */ "l1-instruction-read-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=51116 */ "l1-instruction-read-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=51229 */ "l1-instruction-read-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=51336 */ "l1-instruction-read-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=51446 */ "l1-instruction-read-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=51560 */ "l1-instruction-read-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=51672 */ "l1-instruction-prefetch\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=51787 */ "l1-instruction-prefetch-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=51907 */ "l1-instruction-prefetch-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=52032 */ "l1-instruction-prefetch-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=52151 */ "l1-instruction-prefetch-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=52273 */ "l1-instruction-prefetch-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=52395 */ "l1-instruction-prefetch-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=52515 */ "l1-instruction-prefetches\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=52632 */ "l1-instruction-prefetches-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=52754 */ "l1-instruction-prefetches-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=52881 */ "l1-instruction-prefetches-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=53002 */ "l1-instruction-prefetches-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=53126 */ "l1-instruction-prefetches-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=53250 */ "l1-instruction-prefetches-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=53372 */ "l1-instruction-speculative-read\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=53495 */ "l1-instruction-speculative-read-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=53623 */ "l1-instruction-speculative-read-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=53756 */ "l1-instruction-speculative-read-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=53883 */ "l1-instruction-speculative-read-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=54013 */ "l1-instruction-speculative-read-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=54143 */ "l1-instruction-speculative-read-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=54271 */ "l1-instruction-speculative-load\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=54394 */ "l1-instruction-speculative-load-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=54522 */ "l1-instruction-speculative-load-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=54655 */ "l1-instruction-speculative-load-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=54782 */ "l1-instruction-speculative-load-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000"
+/* offset=54912 */ "l1-instruction-speculative-load-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=55042 */ "l1-instruction-speculative-load-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000"
+/* offset=55170 */ "l1-instruction-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=55273 */ "l1-instruction-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=55381 */ "l1-instruction-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=55483 */ "l1-instruction-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000"
+/* offset=55588 */ "l1-instruction-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=55697 */ "l1-instruction-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000"
+/* offset=55804 */ "llc\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=55882 */ "llc-load\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=55965 */ "llc-load-refs\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=56053 */ "llc-load-reference\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=56146 */ "llc-load-ops\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=56233 */ "llc-load-access\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=56323 */ "llc-load-misses\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00000\000\000\000\000\000"
+/* offset=56417 */ "llc-load-miss\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=56509 */ "llc-loads\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00000\000\000\000\000\000"
+/* offset=56593 */ "llc-loads-refs\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=56682 */ "llc-loads-reference\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=56776 */ "llc-loads-ops\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=56864 */ "llc-loads-access\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=56955 */ "llc-loads-misses\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=57050 */ "llc-loads-miss\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=57143 */ "llc-read\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=57226 */ "llc-read-refs\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=57314 */ "llc-read-reference\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=57407 */ "llc-read-ops\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=57494 */ "llc-read-access\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=57584 */ "llc-read-misses\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=57678 */ "llc-read-miss\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=57770 */ "llc-store\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=57859 */ "llc-store-refs\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=57953 */ "llc-store-reference\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=58052 */ "llc-store-ops\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=58145 */ "llc-store-access\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=58241 */ "llc-store-misses\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00000\000\000\000\000\000"
+/* offset=58337 */ "llc-store-miss\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=58431 */ "llc-stores\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00000\000\000\000\000\000"
+/* offset=58521 */ "llc-stores-refs\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=58616 */ "llc-stores-reference\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=58716 */ "llc-stores-ops\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=58810 */ "llc-stores-access\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=58907 */ "llc-stores-misses\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=59004 */ "llc-stores-miss\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=59099 */ "llc-write\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=59188 */ "llc-write-refs\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=59282 */ "llc-write-reference\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=59381 */ "llc-write-ops\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=59474 */ "llc-write-access\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=59570 */ "llc-write-misses\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=59666 */ "llc-write-miss\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=59760 */ "llc-prefetch\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=59855 */ "llc-prefetch-refs\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=59955 */ "llc-prefetch-reference\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=60060 */ "llc-prefetch-ops\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=60159 */ "llc-prefetch-access\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=60261 */ "llc-prefetch-misses\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00000\000\000\000\000\000"
+/* offset=60363 */ "llc-prefetch-miss\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=60463 */ "llc-prefetches\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00000\000\000\000\000\000"
+/* offset=60560 */ "llc-prefetches-refs\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=60662 */ "llc-prefetches-reference\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=60769 */ "llc-prefetches-ops\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=60870 */ "llc-prefetches-access\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=60974 */ "llc-prefetches-misses\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=61078 */ "llc-prefetches-miss\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=61180 */ "llc-speculative-read\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=61283 */ "llc-speculative-read-refs\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=61391 */ "llc-speculative-read-reference\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=61504 */ "llc-speculative-read-ops\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=61611 */ "llc-speculative-read-access\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=61721 */ "llc-speculative-read-misses\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=61831 */ "llc-speculative-read-miss\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=61939 */ "llc-speculative-load\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=62042 */ "llc-speculative-load-refs\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=62150 */ "llc-speculative-load-reference\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=62263 */ "llc-speculative-load-ops\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=62370 */ "llc-speculative-load-access\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=62480 */ "llc-speculative-load-misses\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=62590 */ "llc-speculative-load-miss\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=62698 */ "llc-refs\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=62781 */ "llc-reference\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=62869 */ "llc-ops\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=62951 */ "llc-access\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=63036 */ "llc-misses\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=63125 */ "llc-miss\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=63212 */ "l2\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=63309 */ "l2-load\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=63411 */ "l2-load-refs\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=63518 */ "l2-load-reference\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=63630 */ "l2-load-ops\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=63736 */ "l2-load-access\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=63845 */ "l2-load-misses\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=63958 */ "l2-load-miss\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=64069 */ "l2-loads\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=64172 */ "l2-loads-refs\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=64280 */ "l2-loads-reference\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=64393 */ "l2-loads-ops\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=64500 */ "l2-loads-access\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=64610 */ "l2-loads-misses\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=64724 */ "l2-loads-miss\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=64836 */ "l2-read\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=64938 */ "l2-read-refs\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=65045 */ "l2-read-reference\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=65157 */ "l2-read-ops\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=65263 */ "l2-read-access\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=65372 */ "l2-read-misses\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=65485 */ "l2-read-miss\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=65596 */ "l2-store\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=65704 */ "l2-store-refs\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=65817 */ "l2-store-reference\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=65935 */ "l2-store-ops\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=66047 */ "l2-store-access\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=66162 */ "l2-store-misses\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=66277 */ "l2-store-miss\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=66390 */ "l2-stores\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=66499 */ "l2-stores-refs\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=66613 */ "l2-stores-reference\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=66732 */ "l2-stores-ops\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=66845 */ "l2-stores-access\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=66961 */ "l2-stores-misses\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=67077 */ "l2-stores-miss\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=67191 */ "l2-write\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=67299 */ "l2-write-refs\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=67412 */ "l2-write-reference\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=67530 */ "l2-write-ops\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=67642 */ "l2-write-access\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000"
+/* offset=67757 */ "l2-write-misses\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=67872 */ "l2-write-miss\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000"
+/* offset=67985 */ "l2-prefetch\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=68099 */ "l2-prefetch-refs\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=68218 */ "l2-prefetch-reference\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=68342 */ "l2-prefetch-ops\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=68460 */ "l2-prefetch-access\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=68581 */ "l2-prefetch-misses\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=68702 */ "l2-prefetch-miss\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=68821 */ "l2-prefetches\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=68937 */ "l2-prefetches-refs\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=69058 */ "l2-prefetches-reference\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=69184 */ "l2-prefetches-ops\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=69304 */ "l2-prefetches-access\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=69427 */ "l2-prefetches-misses\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=69550 */ "l2-prefetches-miss\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=69671 */ "l2-speculative-read\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=69793 */ "l2-speculative-read-refs\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=69920 */ "l2-speculative-read-reference\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=70052 */ "l2-speculative-read-ops\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=70178 */ "l2-speculative-read-access\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=70307 */ "l2-speculative-read-misses\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=70436 */ "l2-speculative-read-miss\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=70563 */ "l2-speculative-load\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=70685 */ "l2-speculative-load-refs\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=70812 */ "l2-speculative-load-reference\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=70944 */ "l2-speculative-load-ops\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=71070 */ "l2-speculative-load-access\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000"
+/* offset=71199 */ "l2-speculative-load-misses\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=71328 */ "l2-speculative-load-miss\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000"
+/* offset=71455 */ "l2-refs\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=71557 */ "l2-reference\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=71664 */ "l2-ops\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=71765 */ "l2-access\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000"
+/* offset=71869 */ "l2-misses\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=71977 */ "l2-miss\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000"
+/* offset=72083 */ "dtlb\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=72154 */ "dtlb-load\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=72230 */ "dtlb-load-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=72311 */ "dtlb-load-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=72397 */ "dtlb-load-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=72477 */ "dtlb-load-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=72560 */ "dtlb-load-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00000\000\000\000\000\000"
+/* offset=72647 */ "dtlb-load-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=72732 */ "dtlb-loads\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00000\000\000\000\000\000"
+/* offset=72809 */ "dtlb-loads-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=72891 */ "dtlb-loads-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=72978 */ "dtlb-loads-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=73059 */ "dtlb-loads-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=73143 */ "dtlb-loads-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=73231 */ "dtlb-loads-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=73317 */ "dtlb-read\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=73393 */ "dtlb-read-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=73474 */ "dtlb-read-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=73560 */ "dtlb-read-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=73640 */ "dtlb-read-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=73723 */ "dtlb-read-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=73810 */ "dtlb-read-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=73895 */ "dtlb-store\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=73977 */ "dtlb-store-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=74064 */ "dtlb-store-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=74156 */ "dtlb-store-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=74242 */ "dtlb-store-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=74331 */ "dtlb-store-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00000\000\000\000\000\000"
+/* offset=74420 */ "dtlb-store-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=74507 */ "dtlb-stores\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00000\000\000\000\000\000"
+/* offset=74590 */ "dtlb-stores-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=74678 */ "dtlb-stores-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=74771 */ "dtlb-stores-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=74858 */ "dtlb-stores-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=74948 */ "dtlb-stores-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=75038 */ "dtlb-stores-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=75126 */ "dtlb-write\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=75208 */ "dtlb-write-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=75295 */ "dtlb-write-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=75387 */ "dtlb-write-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=75473 */ "dtlb-write-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=75562 */ "dtlb-write-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=75651 */ "dtlb-write-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=75738 */ "dtlb-prefetch\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=75826 */ "dtlb-prefetch-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=75919 */ "dtlb-prefetch-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=76017 */ "dtlb-prefetch-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=76109 */ "dtlb-prefetch-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=76204 */ "dtlb-prefetch-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00000\000\000\000\000\000"
+/* offset=76299 */ "dtlb-prefetch-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=76392 */ "dtlb-prefetches\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00000\000\000\000\000\000"
+/* offset=76482 */ "dtlb-prefetches-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=76577 */ "dtlb-prefetches-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=76677 */ "dtlb-prefetches-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=76771 */ "dtlb-prefetches-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=76868 */ "dtlb-prefetches-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=76965 */ "dtlb-prefetches-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=77060 */ "dtlb-speculative-read\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=77156 */ "dtlb-speculative-read-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=77257 */ "dtlb-speculative-read-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=77363 */ "dtlb-speculative-read-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=77463 */ "dtlb-speculative-read-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=77566 */ "dtlb-speculative-read-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=77669 */ "dtlb-speculative-read-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=77770 */ "dtlb-speculative-load\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=77866 */ "dtlb-speculative-load-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=77967 */ "dtlb-speculative-load-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=78073 */ "dtlb-speculative-load-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=78173 */ "dtlb-speculative-load-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=78276 */ "dtlb-speculative-load-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=78379 */ "dtlb-speculative-load-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=78480 */ "dtlb-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=78556 */ "dtlb-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=78637 */ "dtlb-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=78712 */ "dtlb-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=78790 */ "dtlb-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=78872 */ "dtlb-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=78952 */ "d-tlb\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79024 */ "d-tlb-load\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79101 */ "d-tlb-load-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79183 */ "d-tlb-load-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79270 */ "d-tlb-load-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79351 */ "d-tlb-load-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79435 */ "d-tlb-load-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=79523 */ "d-tlb-load-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=79609 */ "d-tlb-loads\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79687 */ "d-tlb-loads-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79770 */ "d-tlb-loads-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79858 */ "d-tlb-loads-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=79940 */ "d-tlb-loads-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=80025 */ "d-tlb-loads-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=80114 */ "d-tlb-loads-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=80201 */ "d-tlb-read\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=80278 */ "d-tlb-read-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=80360 */ "d-tlb-read-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=80447 */ "d-tlb-read-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=80528 */ "d-tlb-read-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=80612 */ "d-tlb-read-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=80700 */ "d-tlb-read-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=80786 */ "d-tlb-store\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=80869 */ "d-tlb-store-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=80957 */ "d-tlb-store-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=81050 */ "d-tlb-store-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=81137 */ "d-tlb-store-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=81227 */ "d-tlb-store-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=81317 */ "d-tlb-store-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=81405 */ "d-tlb-stores\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=81489 */ "d-tlb-stores-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=81578 */ "d-tlb-stores-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=81672 */ "d-tlb-stores-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=81760 */ "d-tlb-stores-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=81851 */ "d-tlb-stores-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=81942 */ "d-tlb-stores-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=82031 */ "d-tlb-write\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=82114 */ "d-tlb-write-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=82202 */ "d-tlb-write-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=82295 */ "d-tlb-write-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=82382 */ "d-tlb-write-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=82472 */ "d-tlb-write-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=82562 */ "d-tlb-write-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=82650 */ "d-tlb-prefetch\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=82739 */ "d-tlb-prefetch-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=82833 */ "d-tlb-prefetch-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=82932 */ "d-tlb-prefetch-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=83025 */ "d-tlb-prefetch-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=83121 */ "d-tlb-prefetch-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=83217 */ "d-tlb-prefetch-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=83311 */ "d-tlb-prefetches\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=83402 */ "d-tlb-prefetches-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=83498 */ "d-tlb-prefetches-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=83599 */ "d-tlb-prefetches-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=83694 */ "d-tlb-prefetches-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=83792 */ "d-tlb-prefetches-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=83890 */ "d-tlb-prefetches-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=83986 */ "d-tlb-speculative-read\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=84083 */ "d-tlb-speculative-read-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=84185 */ "d-tlb-speculative-read-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=84292 */ "d-tlb-speculative-read-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=84393 */ "d-tlb-speculative-read-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=84497 */ "d-tlb-speculative-read-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=84601 */ "d-tlb-speculative-read-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=84703 */ "d-tlb-speculative-load\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=84800 */ "d-tlb-speculative-load-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=84902 */ "d-tlb-speculative-load-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=85009 */ "d-tlb-speculative-load-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=85110 */ "d-tlb-speculative-load-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=85214 */ "d-tlb-speculative-load-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=85318 */ "d-tlb-speculative-load-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=85420 */ "d-tlb-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=85497 */ "d-tlb-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=85579 */ "d-tlb-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=85655 */ "d-tlb-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=85734 */ "d-tlb-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=85817 */ "d-tlb-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=85898 */ "data-tlb\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=85973 */ "data-tlb-load\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86053 */ "data-tlb-load-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86138 */ "data-tlb-load-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86228 */ "data-tlb-load-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86312 */ "data-tlb-load-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86399 */ "data-tlb-load-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=86490 */ "data-tlb-load-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=86579 */ "data-tlb-loads\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86660 */ "data-tlb-loads-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86746 */ "data-tlb-loads-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86837 */ "data-tlb-loads-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=86922 */ "data-tlb-loads-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=87010 */ "data-tlb-loads-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=87102 */ "data-tlb-loads-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=87192 */ "data-tlb-read\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=87272 */ "data-tlb-read-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=87357 */ "data-tlb-read-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=87447 */ "data-tlb-read-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=87531 */ "data-tlb-read-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=87618 */ "data-tlb-read-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=87709 */ "data-tlb-read-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=87798 */ "data-tlb-store\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=87884 */ "data-tlb-store-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=87975 */ "data-tlb-store-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=88071 */ "data-tlb-store-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=88161 */ "data-tlb-store-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=88254 */ "data-tlb-store-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=88347 */ "data-tlb-store-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=88438 */ "data-tlb-stores\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=88525 */ "data-tlb-stores-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=88617 */ "data-tlb-stores-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=88714 */ "data-tlb-stores-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=88805 */ "data-tlb-stores-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=88899 */ "data-tlb-stores-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=88993 */ "data-tlb-stores-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=89085 */ "data-tlb-write\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=89171 */ "data-tlb-write-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=89262 */ "data-tlb-write-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=89358 */ "data-tlb-write-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=89448 */ "data-tlb-write-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000"
+/* offset=89541 */ "data-tlb-write-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=89634 */ "data-tlb-write-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000"
+/* offset=89725 */ "data-tlb-prefetch\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=89817 */ "data-tlb-prefetch-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=89914 */ "data-tlb-prefetch-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=90016 */ "data-tlb-prefetch-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=90112 */ "data-tlb-prefetch-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=90211 */ "data-tlb-prefetch-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=90310 */ "data-tlb-prefetch-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=90407 */ "data-tlb-prefetches\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=90501 */ "data-tlb-prefetches-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=90600 */ "data-tlb-prefetches-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=90704 */ "data-tlb-prefetches-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=90802 */ "data-tlb-prefetches-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=90903 */ "data-tlb-prefetches-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=91004 */ "data-tlb-prefetches-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=91103 */ "data-tlb-speculative-read\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=91203 */ "data-tlb-speculative-read-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=91308 */ "data-tlb-speculative-read-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=91418 */ "data-tlb-speculative-read-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=91522 */ "data-tlb-speculative-read-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=91629 */ "data-tlb-speculative-read-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=91736 */ "data-tlb-speculative-read-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=91841 */ "data-tlb-speculative-load\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=91941 */ "data-tlb-speculative-load-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=92046 */ "data-tlb-speculative-load-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=92156 */ "data-tlb-speculative-load-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=92260 */ "data-tlb-speculative-load-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000"
+/* offset=92367 */ "data-tlb-speculative-load-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=92474 */ "data-tlb-speculative-load-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000"
+/* offset=92579 */ "data-tlb-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=92659 */ "data-tlb-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=92744 */ "data-tlb-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=92823 */ "data-tlb-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000"
+/* offset=92905 */ "data-tlb-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=92991 */ "data-tlb-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000"
+/* offset=93075 */ "itlb\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=93153 */ "itlb-load\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=93236 */ "itlb-load-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=93324 */ "itlb-load-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=93417 */ "itlb-load-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=93504 */ "itlb-load-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=93594 */ "itlb-load-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00000\000\000\000\000\000"
+/* offset=93688 */ "itlb-load-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=93780 */ "itlb-loads\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00000\000\000\000\000\000"
+/* offset=93864 */ "itlb-loads-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=93953 */ "itlb-loads-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=94047 */ "itlb-loads-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=94135 */ "itlb-loads-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=94226 */ "itlb-loads-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=94321 */ "itlb-loads-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=94414 */ "itlb-read\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=94497 */ "itlb-read-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=94585 */ "itlb-read-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=94678 */ "itlb-read-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=94765 */ "itlb-read-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=94855 */ "itlb-read-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=94949 */ "itlb-read-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=95041 */ "itlb-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95124 */ "itlb-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95212 */ "itlb-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95294 */ "itlb-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95379 */ "itlb-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=95468 */ "itlb-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=95555 */ "i-tlb\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95634 */ "i-tlb-load\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95718 */ "i-tlb-load-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95807 */ "i-tlb-load-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95901 */ "i-tlb-load-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=95989 */ "i-tlb-load-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=96080 */ "i-tlb-load-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=96175 */ "i-tlb-load-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=96268 */ "i-tlb-loads\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=96353 */ "i-tlb-loads-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=96443 */ "i-tlb-loads-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=96538 */ "i-tlb-loads-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=96627 */ "i-tlb-loads-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=96719 */ "i-tlb-loads-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=96815 */ "i-tlb-loads-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=96909 */ "i-tlb-read\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=96993 */ "i-tlb-read-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=97082 */ "i-tlb-read-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=97176 */ "i-tlb-read-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=97264 */ "i-tlb-read-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=97355 */ "i-tlb-read-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=97450 */ "i-tlb-read-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=97543 */ "i-tlb-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=97627 */ "i-tlb-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=97716 */ "i-tlb-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=97799 */ "i-tlb-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=97885 */ "i-tlb-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=97975 */ "i-tlb-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=98063 */ "instruction-tlb\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=98152 */ "instruction-tlb-load\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=98246 */ "instruction-tlb-load-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=98345 */ "instruction-tlb-load-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=98449 */ "instruction-tlb-load-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=98547 */ "instruction-tlb-load-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=98648 */ "instruction-tlb-load-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=98753 */ "instruction-tlb-load-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=98856 */ "instruction-tlb-loads\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=98951 */ "instruction-tlb-loads-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=99051 */ "instruction-tlb-loads-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=99156 */ "instruction-tlb-loads-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=99255 */ "instruction-tlb-loads-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=99357 */ "instruction-tlb-loads-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=99463 */ "instruction-tlb-loads-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=99567 */ "instruction-tlb-read\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=99661 */ "instruction-tlb-read-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=99760 */ "instruction-tlb-read-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=99864 */ "instruction-tlb-read-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=99962 */ "instruction-tlb-read-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=100063 */ "instruction-tlb-read-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=100168 */ "instruction-tlb-read-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=100271 */ "instruction-tlb-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=100365 */ "instruction-tlb-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=100464 */ "instruction-tlb-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=100557 */ "instruction-tlb-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000"
+/* offset=100653 */ "instruction-tlb-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=100753 */ "instruction-tlb-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000"
+/* offset=100851 */ "branch\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=100938 */ "branch-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=101030 */ "branch-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=101127 */ "branch-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=101229 */ "branch-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=101325 */ "branch-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=101424 */ "branch-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00000\000\000\000\000\000"
+/* offset=101527 */ "branch-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=101628 */ "branch-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00000\000\000\000\000\000"
+/* offset=101721 */ "branch-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=101819 */ "branch-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=101922 */ "branch-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=102019 */ "branch-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=102119 */ "branch-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=102223 */ "branch-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=102325 */ "branch-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=102417 */ "branch-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=102514 */ "branch-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=102616 */ "branch-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=102712 */ "branch-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=102811 */ "branch-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=102914 */ "branch-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=103015 */ "branch-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103107 */ "branch-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103204 */ "branch-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103295 */ "branch-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103389 */ "branch-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=103485 */ "branches-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103579 */ "branches-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103678 */ "branches-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103782 */ "branches-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103880 */ "branches-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=103981 */ "branches-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=104086 */ "branches-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=104189 */ "branches-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=104284 */ "branches-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=104384 */ "branches-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=104489 */ "branches-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=104588 */ "branches-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=104690 */ "branches-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=104796 */ "branches-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=104900 */ "branches-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=104994 */ "branches-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=105093 */ "branches-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=105197 */ "branches-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=105295 */ "branches-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=105396 */ "branches-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=105501 */ "branches-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=105604 */ "branches-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=105698 */ "branches-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=105797 */ "branches-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=105890 */ "branches-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=105986 */ "branches-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=106086 */ "branches-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=106184 */ "bpu\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=106268 */ "bpu-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=106357 */ "bpu-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=106451 */ "bpu-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=106550 */ "bpu-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=106643 */ "bpu-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=106739 */ "bpu-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=106839 */ "bpu-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=106937 */ "bpu-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107027 */ "bpu-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107122 */ "bpu-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107222 */ "bpu-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107316 */ "bpu-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107413 */ "bpu-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=107514 */ "bpu-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=107613 */ "bpu-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107702 */ "bpu-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107796 */ "bpu-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107895 */ "bpu-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=107988 */ "bpu-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=108084 */ "bpu-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=108184 */ "bpu-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=108282 */ "bpu-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=108371 */ "bpu-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=108465 */ "bpu-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=108553 */ "bpu-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=108644 */ "bpu-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=108739 */ "bpu-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=108832 */ "btb\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=108916 */ "btb-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109005 */ "btb-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109099 */ "btb-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109198 */ "btb-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109291 */ "btb-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109387 */ "btb-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=109487 */ "btb-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=109585 */ "btb-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109675 */ "btb-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109770 */ "btb-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109870 */ "btb-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=109964 */ "btb-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=110061 */ "btb-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=110162 */ "btb-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=110261 */ "btb-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=110350 */ "btb-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=110444 */ "btb-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=110543 */ "btb-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=110636 */ "btb-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=110732 */ "btb-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=110832 */ "btb-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=110930 */ "btb-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111019 */ "btb-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111113 */ "btb-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111201 */ "btb-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111292 */ "btb-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=111387 */ "btb-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=111480 */ "bpc\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111564 */ "bpc-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111653 */ "bpc-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111747 */ "bpc-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111846 */ "bpc-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=111939 */ "bpc-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=112035 */ "bpc-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=112135 */ "bpc-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=112233 */ "bpc-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=112323 */ "bpc-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=112418 */ "bpc-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=112518 */ "bpc-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=112612 */ "bpc-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=112709 */ "bpc-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=112810 */ "bpc-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=112909 */ "bpc-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=112998 */ "bpc-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=113092 */ "bpc-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=113191 */ "bpc-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=113284 */ "bpc-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=113380 */ "bpc-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=113480 */ "bpc-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=113578 */ "bpc-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=113667 */ "bpc-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=113761 */ "bpc-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=113849 */ "bpc-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000"
+/* offset=113940 */ "bpc-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=114035 */ "bpc-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000"
+/* offset=114128 */ "node\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=114203 */ "node-load\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=114283 */ "node-load-refs\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=114368 */ "node-load-reference\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=114458 */ "node-load-ops\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=114542 */ "node-load-access\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=114629 */ "node-load-misses\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00000\000\000\000\000\000"
+/* offset=114720 */ "node-load-miss\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000"
+/* offset=114809 */ "node-loads\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00000\000\000\000\000\000"
+/* offset=114890 */ "node-loads-refs\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=114976 */ "node-loads-reference\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=115067 */ "node-loads-ops\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=115152 */ "node-loads-access\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=115240 */ "node-loads-misses\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000"
+/* offset=115332 */ "node-loads-miss\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000"
+/* offset=115422 */ "node-read\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=115502 */ "node-read-refs\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=115587 */ "node-read-reference\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=115677 */ "node-read-ops\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=115761 */ "node-read-access\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=115848 */ "node-read-misses\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000"
+/* offset=115939 */ "node-read-miss\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000"
+/* offset=116028 */ "node-store\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=116114 */ "node-store-refs\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=116205 */ "node-store-reference\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=116301 */ "node-store-ops\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=116391 */ "node-store-access\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=116484 */ "node-store-misses\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00000\000\000\000\000\000"
+/* offset=116577 */ "node-store-miss\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000"
+/* offset=116668 */ "node-stores\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00000\000\000\000\000\000"
+/* offset=116755 */ "node-stores-refs\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=116847 */ "node-stores-reference\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=116944 */ "node-stores-ops\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=117035 */ "node-stores-access\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=117129 */ "node-stores-misses\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000"
+/* offset=117223 */ "node-stores-miss\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000"
+/* offset=117315 */ "node-write\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=117401 */ "node-write-refs\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=117492 */ "node-write-reference\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=117588 */ "node-write-ops\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=117678 */ "node-write-access\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000"
+/* offset=117771 */ "node-write-misses\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000"
+/* offset=117864 */ "node-write-miss\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000"
+/* offset=117955 */ "node-prefetch\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=118047 */ "node-prefetch-refs\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=118144 */ "node-prefetch-reference\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=118246 */ "node-prefetch-ops\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=118342 */ "node-prefetch-access\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=118441 */ "node-prefetch-misses\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00000\000\000\000\000\000"
+/* offset=118540 */ "node-prefetch-miss\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000"
+/* offset=118637 */ "node-prefetches\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00000\000\000\000\000\000"
+/* offset=118731 */ "node-prefetches-refs\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=118830 */ "node-prefetches-reference\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=118934 */ "node-prefetches-ops\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=119032 */ "node-prefetches-access\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=119133 */ "node-prefetches-misses\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000"
+/* offset=119234 */ "node-prefetches-miss\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000"
+/* offset=119333 */ "node-speculative-read\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=119433 */ "node-speculative-read-refs\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=119538 */ "node-speculative-read-reference\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=119648 */ "node-speculative-read-ops\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=119752 */ "node-speculative-read-access\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=119859 */ "node-speculative-read-misses\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000"
+/* offset=119966 */ "node-speculative-read-miss\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000"
+/* offset=120071 */ "node-speculative-load\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=120171 */ "node-speculative-load-refs\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=120276 */ "node-speculative-load-reference\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=120386 */ "node-speculative-load-ops\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=120490 */ "node-speculative-load-access\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000"
+/* offset=120597 */ "node-speculative-load-misses\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000"
+/* offset=120704 */ "node-speculative-load-miss\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000"
+/* offset=120809 */ "node-refs\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=120889 */ "node-reference\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=120974 */ "node-ops\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=121053 */ "node-access\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000"
+/* offset=121135 */ "node-misses\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000"
+/* offset=121221 */ "node-miss\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000"
+/* offset=121305 */ "cpu-cycles\000legacy hardware\000Total cycles. Be wary of what happens during CPU frequency scaling [This event is an alias of cycles]\000legacy-hardware-config=0\000\00000\000\000\000\000\000"
+/* offset=121467 */ "cycles\000legacy hardware\000Total cycles. Be wary of what happens during CPU frequency scaling [This event is an alias of cpu-cycles]\000legacy-hardware-config=0\000\00000\000\000\000\000\000"
+/* offset=121629 */ "instructions\000legacy hardware\000Retired instructions. Be careful, these can be affected by various issues, most notably hardware interrupt counts\000legacy-hardware-config=1\000\00000\000\000\000\000\000"
+/* offset=121805 */ "cache-references\000legacy hardware\000Cache accesses. Usually this indicates Last Level Cache accesses but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU\000legacy-hardware-config=2\000\00000\000\000\000\000\000"
+/* offset=122075 */ "cache-misses\000legacy hardware\000Cache misses. Usually this indicates Last Level Cache misses; this is intended to be used in conjunction with the PERF_COUNT_HW_CACHE_REFERENCES event to calculate cache miss rates\000legacy-hardware-config=3\000\00000\000\000\000\000\000"
+/* offset=122318 */ "branches\000legacy hardware\000Retired branch instructions [This event is an alias of branch-instructions]\000legacy-hardware-config=4\000\00000\000\000\000\000\000"
+/* offset=122452 */ "branch-instructions\000legacy hardware\000Retired branch instructions [This event is an alias of branches]\000legacy-hardware-config=4\000\00000\000\000\000\000\000"
+/* offset=122586 */ "branch-misses\000legacy hardware\000Mispredicted branch instructions\000legacy-hardware-config=5\000\00000\000\000\000\000\000"
+/* offset=122682 */ "bus-cycles\000legacy hardware\000Bus cycles, which can be different from total cycles\000legacy-hardware-config=6\000\00000\000\000\000\000\000"
+/* offset=122795 */ "stalled-cycles-frontend\000legacy hardware\000Stalled cycles during issue [This event is an alias of idle-cycles-frontend]\000legacy-hardware-config=7\000\00000\000\000\000\000\000"
+/* offset=122945 */ "idle-cycles-frontend\000legacy hardware\000Stalled cycles during issue [This event is an alias of stalled-cycles-fronted]\000legacy-hardware-config=7\000\00000\000\000\000\000\000"
+/* offset=123094 */ "stalled-cycles-backend\000legacy hardware\000Stalled cycles during retirement [This event is an alias of idle-cycles-backend]\000legacy-hardware-config=8\000\00000\000\000\000\000\000"
+/* offset=123247 */ "idle-cycles-backend\000legacy hardware\000Stalled cycles during retirement [This event is an alias of stalled-cycles-backend]\000legacy-hardware-config=8\000\00000\000\000\000\000\000"
+/* offset=123400 */ "ref-cycles\000legacy hardware\000Total cycles; not affected by CPU frequency scaling\000legacy-hardware-config=9\000\00000\000\000\000\000\000"
+/* offset=123512 */ "software\000"
+/* offset=123521 */ "cpu-clock\000software\000Per-CPU high-resolution timer based event\000config=0\000\000001e-6msec\000\000\000\000\000"
+/* offset=123607 */ "task-clock\000software\000Per-task high-resolution timer based event\000config=1\000\000001e-6msec\000\000\000\000\000"
+/* offset=123695 */ "faults\000software\000Number of page faults [This event is an alias of page-faults]\000config=2\000\00000\000\000\000\000\000"
+/* offset=123790 */ "page-faults\000software\000Number of page faults [This event is an alias of faults]\000config=2\000\00000\000\000\000\000\000"
+/* offset=123885 */ "context-switches\000software\000Number of context switches [This event is an alias of cs]\000config=3\000\00000\000\000\000\000\000"
+/* offset=123986 */ "cs\000software\000Number of context switches [This event is an alias of context-switches]\000config=3\000\00000\000\000\000\000\000"
+/* offset=124087 */ "cpu-migrations\000software\000Number of times a process has migrated to a new CPU [This event is an alias of migrations]\000config=4\000\00000\000\000\000\000\000"
+/* offset=124219 */ "migrations\000software\000Number of times a process has migrated to a new CPU [This event is an alias of cpu-migrations]\000config=4\000\00000\000\000\000\000\000"
+/* offset=124351 */ "minor-faults\000software\000Number of minor page faults. Minor faults don't require I/O to handle\000config=5\000\00000\000\000\000\000\000"
+/* offset=124460 */ "major-faults\000software\000Number of major page faults. Major faults require I/O to handle\000config=6\000\00000\000\000\000\000\000"
+/* offset=124563 */ "alignment-faults\000software\000Number of kernel handled memory alignment faults\000config=7\000\00000\000\000\000\000\000"
+/* offset=124655 */ "emulation-faults\000software\000Number of kernel handled unimplemented instruction faults handled through emulation\000config=8\000\00000\000\000\000\000\000"
+/* offset=124782 */ "dummy\000software\000A placeholder event that doesn't count anything\000config=9\000\00000\000\000\000\000\000"
+/* offset=124862 */ "bpf-output\000software\000An event used by BPF programs to write to the perf ring buffer\000config=0xa\000\00000\000\000\000\000\000"
+/* offset=124964 */ "cgroup-switches\000software\000Number of context switches to a task in a different cgroup\000config=0xb\000\00000\000\000\000\000\000"
+/* offset=125067 */ "tool\000"
+/* offset=125072 */ "duration_time\000tool\000Wall clock interval time in nanoseconds\000config=1\000\00000\000\000\000\000\000"
+/* offset=125148 */ "user_time\000tool\000User (non-kernel) time in nanoseconds\000config=2\000\00000\000\000\000\000\000"
+/* offset=125218 */ "system_time\000tool\000System/kernel time in nanoseconds\000config=3\000\00000\000\000\000\000\000"
+/* offset=125286 */ "has_pmem\000tool\0001 if persistent memory installed otherwise 0\000config=4\000\00000\000\000\000\000\000"
+/* offset=125362 */ "num_cores\000tool\000Number of cores. A core consists of 1 or more thread, with each thread being associated with a logical Linux CPU\000config=5\000\00000\000\000\000\000\000"
+/* offset=125507 */ "num_cpus\000tool\000Number of logical Linux CPUs. There may be multiple such CPUs on a core\000config=6\000\00000\000\000\000\000\000"
+/* offset=125610 */ "num_cpus_online\000tool\000Number of online logical Linux CPUs. There may be multiple such CPUs on a core\000config=7\000\00000\000\000\000\000\000"
+/* offset=125727 */ "num_dies\000tool\000Number of dies. Each die has 1 or more cores\000config=8\000\00000\000\000\000\000\000"
+/* offset=125803 */ "num_packages\000tool\000Number of packages. Each package has 1 or more die\000config=9\000\00000\000\000\000\000\000"
+/* offset=125889 */ "slots\000tool\000Number of functional units that in parallel can execute parts of an instruction\000config=0xa\000\00000\000\000\000\000\000"
+/* offset=125999 */ "smt_on\000tool\0001 if simultaneous multithreading (aka hyperthreading) is enable otherwise 0\000config=0xb\000\00000\000\000\000\000\000"
+/* offset=126106 */ "system_tsc_freq\000tool\000The amount a Time Stamp Counter (TSC) increases per second\000config=0xc\000\00000\000\000\000\000\000"
+/* offset=126205 */ "core_wide\000tool\0001 if not SMT, if SMT are events being gathered on all SMT threads 1 otherwise 0\000config=0xd\000\00000\000\000\000\000\000"
+/* offset=126319 */ "target_cpu\000tool\0001 if CPUs being analyzed, 0 if threads/processes\000config=0xe\000\00000\000\000\000\000\000"
+/* offset=126403 */ "bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000\000\000\000"
+/* offset=126465 */ "bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000\000\000\000"
+/* offset=126527 */ "l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000\000\000\000Attributable Level 3 cache access, read\000"
+/* offset=126625 */ "segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000\000\000\000"
+/* offset=126727 */ "dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000\000\000\000"
+/* offset=126860 */ "eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000\000\000\000"
+/* offset=126978 */ "hisi_sccl,ddrc\000"
+/* offset=126993 */ "uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000\000\000\000\000"
+/* offset=127063 */ "uncore_cbox\000"
+/* offset=127075 */ "unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000\000\000\000\000"
+/* offset=127229 */ "event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000\000\000\000\000"
+/* offset=127283 */ "event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000\000\000\000\000"
+/* offset=127341 */ "hisi_sccl,l3c\000"
+/* offset=127355 */ "uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000\000\000\000\000"
+/* offset=127423 */ "uncore_imc_free_running\000"
+/* offset=127447 */ "uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000\000\000\000\000"
+/* offset=127527 */ "uncore_imc\000"
+/* offset=127538 */ "uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000\000\000\000\000"
+/* offset=127603 */ "uncore_sys_ddr_pmu\000"
+/* offset=127622 */ "sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000\000\000\000"
+/* offset=127698 */ "uncore_sys_ccn_pmu\000"
+/* offset=127717 */ "sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000\000\000\000"
+/* offset=127794 */ "uncore_sys_cmn_pmu\000"
+/* offset=127813 */ "sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000\000\000\000"
+/* offset=127956 */ "CPUs_utilized\000Default\000(software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@) / (duration_time * 1e9)\000\000Average CPU utilization\000\0001CPUs\000\000\000\000011"
+/* offset=128142 */ "cs_per_second\000Default\000software@context\\-switches\\,name\\=context\\-switches@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Context switches per CPU second\000\0001cs/sec\000\000\000\000011"
+/* offset=128375 */ "migrations_per_second\000Default\000software@cpu\\-migrations\\,name\\=cpu\\-migrations@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Process migrations to a new CPU per CPU second\000\0001migrations/sec\000\000\000\000011"
+/* offset=128635 */ "page_faults_per_second\000Default\000software@page\\-faults\\,name\\=page\\-faults@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Page faults per CPU second\000\0001faults/sec\000\000\000\000011"
+/* offset=128866 */ "insn_per_cycle\000Default\000instructions / cpu\\-cycles\000insn_per_cycle < 1\000Instructions Per Cycle\000\0001instructions\000\000\000\000001"
+/* offset=128979 */ "stalled_cycles_per_instruction\000Default\000max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions\000\000Max front or backend stalls per instruction\000\000\000\000\000\000001"
+/* offset=129143 */ "frontend_cycles_idle\000Default\000stalled\\-cycles\\-frontend / cpu\\-cycles\000frontend_cycles_idle > 0.1\000Frontend stalls per cycle\000\000\000\000\000\000001"
+/* offset=129273 */ "backend_cycles_idle\000Default\000stalled\\-cycles\\-backend / cpu\\-cycles\000backend_cycles_idle > 0.2\000Backend stalls per cycle\000\000\000\000\000\000001"
+/* offset=129399 */ "cycles_frequency\000Default\000cpu\\-cycles / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Cycles per CPU second\000\0001GHz\000\000\000\000011"
+/* offset=129575 */ "branch_frequency\000Default\000branches / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Branches per CPU second\000\0001000M/sec\000\000\000\000011"
+/* offset=129755 */ "branch_miss_rate\000Default\000branch\\-misses / branches\000branch_miss_rate > 0.05\000Branch miss rate\000\000100%\000\000\000\000001"
+/* offset=129859 */ "l1d_miss_rate\000Default2\000L1\\-dcache\\-load\\-misses / L1\\-dcache\\-loads\000l1d_miss_rate > 0.05\000L1D miss rate\000\000100%\000\000\000\000001"
+/* offset=129975 */ "llc_miss_rate\000Default2\000LLC\\-load\\-misses / LLC\\-loads\000llc_miss_rate > 0.05\000LLC miss rate\000\000100%\000\000\000\000001"
+/* offset=130076 */ "l1i_miss_rate\000Default3\000L1\\-icache\\-load\\-misses / L1\\-icache\\-loads\000l1i_miss_rate > 0.05\000L1I miss rate\000\000100%\000\000\000\000001"
+/* offset=130191 */ "dtlb_miss_rate\000Default3\000dTLB\\-load\\-misses / dTLB\\-loads\000dtlb_miss_rate > 0.05\000dTLB miss rate\000\000100%\000\000\000\000001"
+/* offset=130297 */ "itlb_miss_rate\000Default3\000iTLB\\-load\\-misses / iTLB\\-loads\000itlb_miss_rate > 0.05\000iTLB miss rate\000\000100%\000\000\000\000001"
+/* offset=130403 */ "l1_prefetch_miss_rate\000Default4\000L1\\-dcache\\-prefetch\\-misses / L1\\-dcache\\-prefetches\000l1_prefetch_miss_rate > 0.05\000L1 prefetch miss rate\000\000100%\000\000\000\000001"
+/* offset=130551 */ "CPI\000\0001 / IPC\000\000\000\000\000\000\000\000000"
+/* offset=130574 */ "IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\000000"
+/* offset=130638 */ "Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\000000"
+/* offset=130805 */ "dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000"
+/* offset=130870 */ "icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000"
+/* offset=130938 */ "cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\000000"
+/* offset=131010 */ "DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\000000"
+/* offset=131105 */ "DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\000000"
+/* offset=131240 */ "DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\000000"
+/* offset=131305 */ "DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\000000"
+/* offset=131374 */ "DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\000000"
+/* offset=131445 */ "M1\000\000ipc + M2\000\000\000\000\000\000\000\000000"
+/* offset=131468 */ "M2\000\000ipc + M1\000\000\000\000\000\000\000\000000"
+/* offset=131491 */ "M3\000\0001 / M3\000\000\000\000\000\000\000\000000"
+/* offset=131512 */ "L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\000000"
;
+static const struct compact_pmu_event pmu_events__common_default_core[] = {
+{ 111480 }, /* bpc\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 113849 }, /* bpc-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 111564 }, /* bpc-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 111939 }, /* bpc-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 112135 }, /* bpc-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 112035 }, /* bpc-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 111846 }, /* bpc-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 111747 }, /* bpc-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 111653 }, /* bpc-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 112233 }, /* bpc-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 112612 }, /* bpc-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 112810 }, /* bpc-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 112709 }, /* bpc-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 112518 }, /* bpc-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 112418 }, /* bpc-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 112323 }, /* bpc-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 114035 }, /* bpc-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 113940 }, /* bpc-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 113761 }, /* bpc-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 112909 }, /* bpc-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 113284 }, /* bpc-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 113480 }, /* bpc-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 113380 }, /* bpc-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 113191 }, /* bpc-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 113092 }, /* bpc-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 112998 }, /* bpc-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 113667 }, /* bpc-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 113578 }, /* bpc-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 106184 }, /* bpu\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 108553 }, /* bpu-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 106268 }, /* bpu-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 106643 }, /* bpu-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 106839 }, /* bpu-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 106739 }, /* bpu-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 106550 }, /* bpu-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 106451 }, /* bpu-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 106357 }, /* bpu-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 106937 }, /* bpu-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 107316 }, /* bpu-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 107514 }, /* bpu-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 107413 }, /* bpu-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 107222 }, /* bpu-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 107122 }, /* bpu-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 107027 }, /* bpu-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 108739 }, /* bpu-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 108644 }, /* bpu-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 108465 }, /* bpu-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 107613 }, /* bpu-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 107988 }, /* bpu-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 108184 }, /* bpu-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 108084 }, /* bpu-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 107895 }, /* bpu-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 107796 }, /* bpu-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 107702 }, /* bpu-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 108371 }, /* bpu-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 108282 }, /* bpu-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 100851 }, /* branch\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 103295 }, /* branch-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 122452 }, /* branch-instructions\000legacy hardware\000Retired branch instructions [This event is an alias of branches]\000legacy-hardware-config=4\000\00000\000\000\000\000\000 */
+{ 100938 }, /* branch-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 101325 }, /* branch-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 101527 }, /* branch-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 101424 }, /* branch-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00000\000\000\000\000\000 */
+{ 101229 }, /* branch-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 101127 }, /* branch-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 101030 }, /* branch-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 101628 }, /* branch-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00000\000\000\000\000\000 */
+{ 102019 }, /* branch-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 102223 }, /* branch-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 102119 }, /* branch-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 101922 }, /* branch-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 101819 }, /* branch-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 101721 }, /* branch-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 103389 }, /* branch-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 122586 }, /* branch-misses\000legacy hardware\000Mispredicted branch instructions\000legacy-hardware-config=5\000\00000\000\000\000\000\000 */
+{ 103204 }, /* branch-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 102325 }, /* branch-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 102712 }, /* branch-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 102914 }, /* branch-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 102811 }, /* branch-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 102616 }, /* branch-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 102514 }, /* branch-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 102417 }, /* branch-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 103107 }, /* branch-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 103015 }, /* branch-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 122318 }, /* branches\000legacy hardware\000Retired branch instructions [This event is an alias of branch-instructions]\000legacy-hardware-config=4\000\00000\000\000\000\000\000 */
+{ 105890 }, /* branches-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 103485 }, /* branches-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 103880 }, /* branches-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 104086 }, /* branches-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 103981 }, /* branches-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 103782 }, /* branches-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 103678 }, /* branches-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 103579 }, /* branches-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 104189 }, /* branches-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 104588 }, /* branches-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 104796 }, /* branches-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 104690 }, /* branches-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 104489 }, /* branches-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 104384 }, /* branches-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 104284 }, /* branches-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 106086 }, /* branches-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 105986 }, /* branches-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 105797 }, /* branches-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 104900 }, /* branches-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 105295 }, /* branches-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 105501 }, /* branches-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 105396 }, /* branches-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 105197 }, /* branches-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 105093 }, /* branches-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 104994 }, /* branches-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 105698 }, /* branches-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 105604 }, /* branches-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 108832 }, /* btb\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 111201 }, /* btb-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 108916 }, /* btb-load\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 109291 }, /* btb-load-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 109487 }, /* btb-load-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 109387 }, /* btb-load-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 109198 }, /* btb-load-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 109099 }, /* btb-load-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 109005 }, /* btb-load-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 109585 }, /* btb-loads\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 109964 }, /* btb-loads-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 110162 }, /* btb-loads-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 110061 }, /* btb-loads-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 109870 }, /* btb-loads-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 109770 }, /* btb-loads-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 109675 }, /* btb-loads-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 111387 }, /* btb-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 111292 }, /* btb-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 111113 }, /* btb-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 110261 }, /* btb-read\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 110636 }, /* btb-read-access\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 110832 }, /* btb-read-miss\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 110732 }, /* btb-read-misses\000legacy cache\000Branch prediction unit read misses\000legacy-cache-config=0x10005\000\00010\000\000\000\000\000 */
+{ 110543 }, /* btb-read-ops\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 110444 }, /* btb-read-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 110350 }, /* btb-read-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 111019 }, /* btb-reference\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 110930 }, /* btb-refs\000legacy cache\000Branch prediction unit read accesses\000legacy-cache-config=5\000\00010\000\000\000\000\000 */
+{ 122682 }, /* bus-cycles\000legacy hardware\000Bus cycles, which can be different from total cycles\000legacy-hardware-config=6\000\00000\000\000\000\000\000 */
+{ 122075 }, /* cache-misses\000legacy hardware\000Cache misses. Usually this indicates Last Level Cache misses; this is intended to be used in conjunction with the PERF_COUNT_HW_CACHE_REFERENCES event to calculate cache miss rates\000legacy-hardware-config=3\000\00000\000\000\000\000\000 */
+{ 121805 }, /* cache-references\000legacy hardware\000Cache accesses. Usually this indicates Last Level Cache accesses but this may vary depending on your CPU. This may include prefetches and coherency messages; again this depends on the design of your CPU\000legacy-hardware-config=2\000\00000\000\000\000\000\000 */
+{ 121305 }, /* cpu-cycles\000legacy hardware\000Total cycles. Be wary of what happens during CPU frequency scaling [This event is an alias of cycles]\000legacy-hardware-config=0\000\00000\000\000\000\000\000 */
+{ 121467 }, /* cycles\000legacy hardware\000Total cycles. Be wary of what happens during CPU frequency scaling [This event is an alias of cpu-cycles]\000legacy-hardware-config=0\000\00000\000\000\000\000\000 */
+{ 78952 }, /* d-tlb\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 85655 }, /* d-tlb-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79024 }, /* d-tlb-load\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79351 }, /* d-tlb-load-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79523 }, /* d-tlb-load-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 79435 }, /* d-tlb-load-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 79270 }, /* d-tlb-load-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79183 }, /* d-tlb-load-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79101 }, /* d-tlb-load-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79609 }, /* d-tlb-loads\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79940 }, /* d-tlb-loads-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 80114 }, /* d-tlb-loads-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 80025 }, /* d-tlb-loads-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 79858 }, /* d-tlb-loads-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79770 }, /* d-tlb-loads-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 79687 }, /* d-tlb-loads-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 85817 }, /* d-tlb-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 85734 }, /* d-tlb-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 85579 }, /* d-tlb-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 82650 }, /* d-tlb-prefetch\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 83025 }, /* d-tlb-prefetch-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 83217 }, /* d-tlb-prefetch-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 83121 }, /* d-tlb-prefetch-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 82932 }, /* d-tlb-prefetch-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 82833 }, /* d-tlb-prefetch-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 82739 }, /* d-tlb-prefetch-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 83311 }, /* d-tlb-prefetches\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 83694 }, /* d-tlb-prefetches-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 83890 }, /* d-tlb-prefetches-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 83792 }, /* d-tlb-prefetches-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 83599 }, /* d-tlb-prefetches-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 83498 }, /* d-tlb-prefetches-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 83402 }, /* d-tlb-prefetches-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 80201 }, /* d-tlb-read\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 80528 }, /* d-tlb-read-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 80700 }, /* d-tlb-read-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 80612 }, /* d-tlb-read-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 80447 }, /* d-tlb-read-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 80360 }, /* d-tlb-read-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 80278 }, /* d-tlb-read-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 85497 }, /* d-tlb-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 85420 }, /* d-tlb-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 84703 }, /* d-tlb-speculative-load\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 85110 }, /* d-tlb-speculative-load-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 85318 }, /* d-tlb-speculative-load-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 85214 }, /* d-tlb-speculative-load-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 85009 }, /* d-tlb-speculative-load-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 84902 }, /* d-tlb-speculative-load-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 84800 }, /* d-tlb-speculative-load-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 83986 }, /* d-tlb-speculative-read\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 84393 }, /* d-tlb-speculative-read-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 84601 }, /* d-tlb-speculative-read-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 84497 }, /* d-tlb-speculative-read-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 84292 }, /* d-tlb-speculative-read-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 84185 }, /* d-tlb-speculative-read-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 84083 }, /* d-tlb-speculative-read-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 80786 }, /* d-tlb-store\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 81137 }, /* d-tlb-store-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 81317 }, /* d-tlb-store-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 81227 }, /* d-tlb-store-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 81050 }, /* d-tlb-store-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 80957 }, /* d-tlb-store-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 80869 }, /* d-tlb-store-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 81405 }, /* d-tlb-stores\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 81760 }, /* d-tlb-stores-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 81942 }, /* d-tlb-stores-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 81851 }, /* d-tlb-stores-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 81672 }, /* d-tlb-stores-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 81578 }, /* d-tlb-stores-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 81489 }, /* d-tlb-stores-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 82031 }, /* d-tlb-write\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 82382 }, /* d-tlb-write-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 82562 }, /* d-tlb-write-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 82472 }, /* d-tlb-write-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 82295 }, /* d-tlb-write-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 82202 }, /* d-tlb-write-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 82114 }, /* d-tlb-write-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 85898 }, /* data-tlb\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 92823 }, /* data-tlb-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 85973 }, /* data-tlb-load\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 86312 }, /* data-tlb-load-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 86490 }, /* data-tlb-load-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 86399 }, /* data-tlb-load-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 86228 }, /* data-tlb-load-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 86138 }, /* data-tlb-load-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 86053 }, /* data-tlb-load-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 86579 }, /* data-tlb-loads\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 86922 }, /* data-tlb-loads-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 87102 }, /* data-tlb-loads-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 87010 }, /* data-tlb-loads-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 86837 }, /* data-tlb-loads-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 86746 }, /* data-tlb-loads-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 86660 }, /* data-tlb-loads-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 92991 }, /* data-tlb-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 92905 }, /* data-tlb-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 92744 }, /* data-tlb-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 89725 }, /* data-tlb-prefetch\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 90112 }, /* data-tlb-prefetch-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 90310 }, /* data-tlb-prefetch-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 90211 }, /* data-tlb-prefetch-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 90016 }, /* data-tlb-prefetch-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 89914 }, /* data-tlb-prefetch-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 89817 }, /* data-tlb-prefetch-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 90407 }, /* data-tlb-prefetches\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 90802 }, /* data-tlb-prefetches-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 91004 }, /* data-tlb-prefetches-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 90903 }, /* data-tlb-prefetches-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 90704 }, /* data-tlb-prefetches-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 90600 }, /* data-tlb-prefetches-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 90501 }, /* data-tlb-prefetches-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 87192 }, /* data-tlb-read\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 87531 }, /* data-tlb-read-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 87709 }, /* data-tlb-read-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 87618 }, /* data-tlb-read-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 87447 }, /* data-tlb-read-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 87357 }, /* data-tlb-read-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 87272 }, /* data-tlb-read-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 92659 }, /* data-tlb-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 92579 }, /* data-tlb-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 91841 }, /* data-tlb-speculative-load\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 92260 }, /* data-tlb-speculative-load-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 92474 }, /* data-tlb-speculative-load-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 92367 }, /* data-tlb-speculative-load-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 92156 }, /* data-tlb-speculative-load-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 92046 }, /* data-tlb-speculative-load-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 91941 }, /* data-tlb-speculative-load-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 91103 }, /* data-tlb-speculative-read\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 91522 }, /* data-tlb-speculative-read-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 91736 }, /* data-tlb-speculative-read-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 91629 }, /* data-tlb-speculative-read-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 91418 }, /* data-tlb-speculative-read-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 91308 }, /* data-tlb-speculative-read-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 91203 }, /* data-tlb-speculative-read-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 87798 }, /* data-tlb-store\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 88161 }, /* data-tlb-store-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 88347 }, /* data-tlb-store-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 88254 }, /* data-tlb-store-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 88071 }, /* data-tlb-store-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 87975 }, /* data-tlb-store-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 87884 }, /* data-tlb-store-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 88438 }, /* data-tlb-stores\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 88805 }, /* data-tlb-stores-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 88993 }, /* data-tlb-stores-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 88899 }, /* data-tlb-stores-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 88714 }, /* data-tlb-stores-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 88617 }, /* data-tlb-stores-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 88525 }, /* data-tlb-stores-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 89085 }, /* data-tlb-write\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 89448 }, /* data-tlb-write-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 89634 }, /* data-tlb-write-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 89541 }, /* data-tlb-write-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 89358 }, /* data-tlb-write-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 89262 }, /* data-tlb-write-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 89171 }, /* data-tlb-write-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 72083 }, /* dtlb\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 78712 }, /* dtlb-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 72154 }, /* dtlb-load\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 72477 }, /* dtlb-load-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 72647 }, /* dtlb-load-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 72560 }, /* dtlb-load-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00000\000\000\000\000\000 */
+{ 72397 }, /* dtlb-load-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 72311 }, /* dtlb-load-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 72230 }, /* dtlb-load-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 72732 }, /* dtlb-loads\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00000\000\000\000\000\000 */
+{ 73059 }, /* dtlb-loads-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 73231 }, /* dtlb-loads-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 73143 }, /* dtlb-loads-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 72978 }, /* dtlb-loads-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 72891 }, /* dtlb-loads-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 72809 }, /* dtlb-loads-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 78872 }, /* dtlb-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 78790 }, /* dtlb-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 78637 }, /* dtlb-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 75738 }, /* dtlb-prefetch\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 76109 }, /* dtlb-prefetch-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 76299 }, /* dtlb-prefetch-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 76204 }, /* dtlb-prefetch-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00000\000\000\000\000\000 */
+{ 76017 }, /* dtlb-prefetch-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 75919 }, /* dtlb-prefetch-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 75826 }, /* dtlb-prefetch-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 76392 }, /* dtlb-prefetches\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00000\000\000\000\000\000 */
+{ 76771 }, /* dtlb-prefetches-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 76965 }, /* dtlb-prefetches-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 76868 }, /* dtlb-prefetches-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 76677 }, /* dtlb-prefetches-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 76577 }, /* dtlb-prefetches-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 76482 }, /* dtlb-prefetches-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 73317 }, /* dtlb-read\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 73640 }, /* dtlb-read-access\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 73810 }, /* dtlb-read-miss\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 73723 }, /* dtlb-read-misses\000legacy cache\000Data TLB read misses\000legacy-cache-config=0x10003\000\00010\000\000\000\000\000 */
+{ 73560 }, /* dtlb-read-ops\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 73474 }, /* dtlb-read-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 73393 }, /* dtlb-read-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 78556 }, /* dtlb-reference\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 78480 }, /* dtlb-refs\000legacy cache\000Data TLB read accesses\000legacy-cache-config=3\000\00010\000\000\000\000\000 */
+{ 77770 }, /* dtlb-speculative-load\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 78173 }, /* dtlb-speculative-load-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 78379 }, /* dtlb-speculative-load-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 78276 }, /* dtlb-speculative-load-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 78073 }, /* dtlb-speculative-load-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 77967 }, /* dtlb-speculative-load-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 77866 }, /* dtlb-speculative-load-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 77060 }, /* dtlb-speculative-read\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 77463 }, /* dtlb-speculative-read-access\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 77669 }, /* dtlb-speculative-read-miss\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 77566 }, /* dtlb-speculative-read-misses\000legacy cache\000Data TLB prefetch misses\000legacy-cache-config=0x10203\000\00010\000\000\000\000\000 */
+{ 77363 }, /* dtlb-speculative-read-ops\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 77257 }, /* dtlb-speculative-read-reference\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 77156 }, /* dtlb-speculative-read-refs\000legacy cache\000Data TLB prefetch accesses\000legacy-cache-config=0x203\000\00010\000\000\000\000\000 */
+{ 73895 }, /* dtlb-store\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 74242 }, /* dtlb-store-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 74420 }, /* dtlb-store-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 74331 }, /* dtlb-store-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00000\000\000\000\000\000 */
+{ 74156 }, /* dtlb-store-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 74064 }, /* dtlb-store-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 73977 }, /* dtlb-store-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 74507 }, /* dtlb-stores\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00000\000\000\000\000\000 */
+{ 74858 }, /* dtlb-stores-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 75038 }, /* dtlb-stores-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 74948 }, /* dtlb-stores-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 74771 }, /* dtlb-stores-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 74678 }, /* dtlb-stores-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 74590 }, /* dtlb-stores-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 75126 }, /* dtlb-write\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 75473 }, /* dtlb-write-access\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 75651 }, /* dtlb-write-miss\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 75562 }, /* dtlb-write-misses\000legacy cache\000Data TLB write misses\000legacy-cache-config=0x10103\000\00010\000\000\000\000\000 */
+{ 75387 }, /* dtlb-write-ops\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 75295 }, /* dtlb-write-reference\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 75208 }, /* dtlb-write-refs\000legacy cache\000Data TLB write accesses\000legacy-cache-config=0x103\000\00010\000\000\000\000\000 */
+{ 95555 }, /* i-tlb\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 97799 }, /* i-tlb-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 95634 }, /* i-tlb-load\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 95989 }, /* i-tlb-load-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 96175 }, /* i-tlb-load-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 96080 }, /* i-tlb-load-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 95901 }, /* i-tlb-load-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 95807 }, /* i-tlb-load-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 95718 }, /* i-tlb-load-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 96268 }, /* i-tlb-loads\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 96627 }, /* i-tlb-loads-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 96815 }, /* i-tlb-loads-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 96719 }, /* i-tlb-loads-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 96538 }, /* i-tlb-loads-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 96443 }, /* i-tlb-loads-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 96353 }, /* i-tlb-loads-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 97975 }, /* i-tlb-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 97885 }, /* i-tlb-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 97716 }, /* i-tlb-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 96909 }, /* i-tlb-read\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 97264 }, /* i-tlb-read-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 97450 }, /* i-tlb-read-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 97355 }, /* i-tlb-read-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 97176 }, /* i-tlb-read-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 97082 }, /* i-tlb-read-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 96993 }, /* i-tlb-read-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 97627 }, /* i-tlb-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 97543 }, /* i-tlb-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 123247 }, /* idle-cycles-backend\000legacy hardware\000Stalled cycles during retirement [This event is an alias of stalled-cycles-backend]\000legacy-hardware-config=8\000\00000\000\000\000\000\000 */
+{ 122945 }, /* idle-cycles-frontend\000legacy hardware\000Stalled cycles during issue [This event is an alias of stalled-cycles-fronted]\000legacy-hardware-config=7\000\00000\000\000\000\000\000 */
+{ 98063 }, /* instruction-tlb\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 100557 }, /* instruction-tlb-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 98152 }, /* instruction-tlb-load\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 98547 }, /* instruction-tlb-load-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 98753 }, /* instruction-tlb-load-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 98648 }, /* instruction-tlb-load-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 98449 }, /* instruction-tlb-load-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 98345 }, /* instruction-tlb-load-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 98246 }, /* instruction-tlb-load-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 98856 }, /* instruction-tlb-loads\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 99255 }, /* instruction-tlb-loads-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 99463 }, /* instruction-tlb-loads-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 99357 }, /* instruction-tlb-loads-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 99156 }, /* instruction-tlb-loads-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 99051 }, /* instruction-tlb-loads-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 98951 }, /* instruction-tlb-loads-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 100753 }, /* instruction-tlb-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 100653 }, /* instruction-tlb-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 100464 }, /* instruction-tlb-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 99567 }, /* instruction-tlb-read\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 99962 }, /* instruction-tlb-read-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 100168 }, /* instruction-tlb-read-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 100063 }, /* instruction-tlb-read-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 99864 }, /* instruction-tlb-read-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 99760 }, /* instruction-tlb-read-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 99661 }, /* instruction-tlb-read-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 100365 }, /* instruction-tlb-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 100271 }, /* instruction-tlb-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 121629 }, /* instructions\000legacy hardware\000Retired instructions. Be careful, these can be affected by various issues, most notably hardware interrupt counts\000legacy-hardware-config=1\000\00000\000\000\000\000\000 */
+{ 93075 }, /* itlb\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 95294 }, /* itlb-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 93153 }, /* itlb-load\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 93504 }, /* itlb-load-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 93688 }, /* itlb-load-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 93594 }, /* itlb-load-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00000\000\000\000\000\000 */
+{ 93417 }, /* itlb-load-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 93324 }, /* itlb-load-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 93236 }, /* itlb-load-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 93780 }, /* itlb-loads\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00000\000\000\000\000\000 */
+{ 94135 }, /* itlb-loads-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 94321 }, /* itlb-loads-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 94226 }, /* itlb-loads-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 94047 }, /* itlb-loads-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 93953 }, /* itlb-loads-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 93864 }, /* itlb-loads-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 95468 }, /* itlb-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 95379 }, /* itlb-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 95212 }, /* itlb-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 94414 }, /* itlb-read\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 94765 }, /* itlb-read-access\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 94949 }, /* itlb-read-miss\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 94855 }, /* itlb-read-misses\000legacy cache\000Instruction TLB read misses\000legacy-cache-config=0x10004\000\00010\000\000\000\000\000 */
+{ 94678 }, /* itlb-read-ops\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 94585 }, /* itlb-read-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 94497 }, /* itlb-read-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 95124 }, /* itlb-reference\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 95041 }, /* itlb-refs\000legacy cache\000Instruction TLB read accesses\000legacy-cache-config=4\000\00010\000\000\000\000\000 */
+{ 8037 }, /* l1-d\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 15406 }, /* l1-d-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 8118 }, /* l1-d-load\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 8481 }, /* l1-d-load-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 8671 }, /* l1-d-load-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 8574 }, /* l1-d-load-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 8391 }, /* l1-d-load-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 8295 }, /* l1-d-load-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 8204 }, /* l1-d-load-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 8766 }, /* l1-d-loads\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 9133 }, /* l1-d-loads-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 9325 }, /* l1-d-loads-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 9227 }, /* l1-d-loads-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 9042 }, /* l1-d-loads-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 8945 }, /* l1-d-loads-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 8853 }, /* l1-d-loads-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 15586 }, /* l1-d-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 15494 }, /* l1-d-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 15321 }, /* l1-d-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 12122 }, /* l1-d-prefetch\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 12533 }, /* l1-d-prefetch-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 12743 }, /* l1-d-prefetch-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 12638 }, /* l1-d-prefetch-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 12431 }, /* l1-d-prefetch-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 12323 }, /* l1-d-prefetch-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 12220 }, /* l1-d-prefetch-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 12846 }, /* l1-d-prefetches\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 13265 }, /* l1-d-prefetches-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 13479 }, /* l1-d-prefetches-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 13372 }, /* l1-d-prefetches-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 13161 }, /* l1-d-prefetches-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 13051 }, /* l1-d-prefetches-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 12946 }, /* l1-d-prefetches-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 9421 }, /* l1-d-read\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 9784 }, /* l1-d-read-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 9974 }, /* l1-d-read-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 9877 }, /* l1-d-read-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 9694 }, /* l1-d-read-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 9598 }, /* l1-d-read-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 9507 }, /* l1-d-read-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 15230 }, /* l1-d-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 15144 }, /* l1-d-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 14364 }, /* l1-d-speculative-load\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 14807 }, /* l1-d-speculative-load-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 15033 }, /* l1-d-speculative-load-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 14920 }, /* l1-d-speculative-load-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 14697 }, /* l1-d-speculative-load-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 14581 }, /* l1-d-speculative-load-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 14470 }, /* l1-d-speculative-load-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 13584 }, /* l1-d-speculative-read\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 14027 }, /* l1-d-speculative-read-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 14253 }, /* l1-d-speculative-read-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 14140 }, /* l1-d-speculative-read-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 13917 }, /* l1-d-speculative-read-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 13801 }, /* l1-d-speculative-read-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 13690 }, /* l1-d-speculative-read-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 10069 }, /* l1-d-store\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 10456 }, /* l1-d-store-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 10654 }, /* l1-d-store-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 10555 }, /* l1-d-store-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 10360 }, /* l1-d-store-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 10258 }, /* l1-d-store-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 10161 }, /* l1-d-store-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 10751 }, /* l1-d-stores\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 11142 }, /* l1-d-stores-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 11342 }, /* l1-d-stores-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 11242 }, /* l1-d-stores-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 11045 }, /* l1-d-stores-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 10942 }, /* l1-d-stores-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 10844 }, /* l1-d-stores-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 11440 }, /* l1-d-write\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 11827 }, /* l1-d-write-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 12025 }, /* l1-d-write-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 11926 }, /* l1-d-write-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 11731 }, /* l1-d-write-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 11629 }, /* l1-d-write-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 11532 }, /* l1-d-write-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 23238 }, /* l1-data\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 30829 }, /* l1-data-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 23322 }, /* l1-data-load\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 23697 }, /* l1-data-load-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 23893 }, /* l1-data-load-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 23793 }, /* l1-data-load-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 23604 }, /* l1-data-load-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 23505 }, /* l1-data-load-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 23411 }, /* l1-data-load-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 23991 }, /* l1-data-loads\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 24370 }, /* l1-data-loads-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 24568 }, /* l1-data-loads-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 24467 }, /* l1-data-loads-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 24276 }, /* l1-data-loads-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 24176 }, /* l1-data-loads-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 24081 }, /* l1-data-loads-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 31015 }, /* l1-data-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 30920 }, /* l1-data-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 30741 }, /* l1-data-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 27452 }, /* l1-data-prefetch\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 27875 }, /* l1-data-prefetch-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 28091 }, /* l1-data-prefetch-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 27983 }, /* l1-data-prefetch-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 27770 }, /* l1-data-prefetch-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 27659 }, /* l1-data-prefetch-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 27553 }, /* l1-data-prefetch-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 28197 }, /* l1-data-prefetches\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 28628 }, /* l1-data-prefetches-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 28848 }, /* l1-data-prefetches-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 28738 }, /* l1-data-prefetches-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 28521 }, /* l1-data-prefetches-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 28408 }, /* l1-data-prefetches-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 28300 }, /* l1-data-prefetches-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 24667 }, /* l1-data-read\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 25042 }, /* l1-data-read-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 25238 }, /* l1-data-read-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 25138 }, /* l1-data-read-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 24949 }, /* l1-data-read-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 24850 }, /* l1-data-read-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 24756 }, /* l1-data-read-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 30647 }, /* l1-data-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 30558 }, /* l1-data-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 29757 }, /* l1-data-speculative-load\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 30212 }, /* l1-data-speculative-load-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 30444 }, /* l1-data-speculative-load-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 30328 }, /* l1-data-speculative-load-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 30099 }, /* l1-data-speculative-load-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 29980 }, /* l1-data-speculative-load-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 29866 }, /* l1-data-speculative-load-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 28956 }, /* l1-data-speculative-read\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 29411 }, /* l1-data-speculative-read-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 29643 }, /* l1-data-speculative-read-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 29527 }, /* l1-data-speculative-read-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 29298 }, /* l1-data-speculative-read-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 29179 }, /* l1-data-speculative-read-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 29065 }, /* l1-data-speculative-read-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 25336 }, /* l1-data-store\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 25735 }, /* l1-data-store-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 25939 }, /* l1-data-store-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 25837 }, /* l1-data-store-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 25636 }, /* l1-data-store-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 25531 }, /* l1-data-store-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 25431 }, /* l1-data-store-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 26039 }, /* l1-data-stores\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 26442 }, /* l1-data-stores-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 26648 }, /* l1-data-stores-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 26545 }, /* l1-data-stores-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 26342 }, /* l1-data-stores-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 26236 }, /* l1-data-stores-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 26135 }, /* l1-data-stores-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 26749 }, /* l1-data-write\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 27148 }, /* l1-data-write-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 27352 }, /* l1-data-write-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 27250 }, /* l1-data-write-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 27049 }, /* l1-data-write-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 26944 }, /* l1-data-write-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 26844 }, /* l1-data-write-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 13 }, /* l1-dcache\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 7752 }, /* l1-dcache-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 99 }, /* l1-dcache-load\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 482 }, /* l1-dcache-load-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 682 }, /* l1-dcache-load-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 580 }, /* l1-dcache-load-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00000\000\000\000\000\000 */
+{ 387 }, /* l1-dcache-load-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 286 }, /* l1-dcache-load-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 190 }, /* l1-dcache-load-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 782 }, /* l1-dcache-loads\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00000\000\000\000\000\000 */
+{ 1169 }, /* l1-dcache-loads-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 1371 }, /* l1-dcache-loads-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 1268 }, /* l1-dcache-loads-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 1073 }, /* l1-dcache-loads-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 971 }, /* l1-dcache-loads-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 874 }, /* l1-dcache-loads-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 7942 }, /* l1-dcache-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 7845 }, /* l1-dcache-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 7662 }, /* l1-dcache-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 4313 }, /* l1-dcache-prefetch\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 4744 }, /* l1-dcache-prefetch-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 4964 }, /* l1-dcache-prefetch-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 4854 }, /* l1-dcache-prefetch-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00000\000\000\000\000\000 */
+{ 4637 }, /* l1-dcache-prefetch-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 4524 }, /* l1-dcache-prefetch-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 4416 }, /* l1-dcache-prefetch-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 5072 }, /* l1-dcache-prefetches\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00000\000\000\000\000\000 */
+{ 5511 }, /* l1-dcache-prefetches-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 5735 }, /* l1-dcache-prefetches-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 5623 }, /* l1-dcache-prefetches-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 5402 }, /* l1-dcache-prefetches-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 5287 }, /* l1-dcache-prefetches-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 5177 }, /* l1-dcache-prefetches-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 1472 }, /* l1-dcache-read\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 1855 }, /* l1-dcache-read-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 2055 }, /* l1-dcache-read-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 1953 }, /* l1-dcache-read-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 1760 }, /* l1-dcache-read-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 1659 }, /* l1-dcache-read-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 1563 }, /* l1-dcache-read-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 7566 }, /* l1-dcache-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 7475 }, /* l1-dcache-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 6660 }, /* l1-dcache-speculative-load\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 7123 }, /* l1-dcache-speculative-load-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 7359 }, /* l1-dcache-speculative-load-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 7241 }, /* l1-dcache-speculative-load-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 7008 }, /* l1-dcache-speculative-load-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 6887 }, /* l1-dcache-speculative-load-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 6771 }, /* l1-dcache-speculative-load-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 5845 }, /* l1-dcache-speculative-read\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 6308 }, /* l1-dcache-speculative-read-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 6544 }, /* l1-dcache-speculative-read-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 6426 }, /* l1-dcache-speculative-read-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 6193 }, /* l1-dcache-speculative-read-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 6072 }, /* l1-dcache-speculative-read-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 5956 }, /* l1-dcache-speculative-read-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 2155 }, /* l1-dcache-store\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 2562 }, /* l1-dcache-store-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 2770 }, /* l1-dcache-store-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 2666 }, /* l1-dcache-store-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00000\000\000\000\000\000 */
+{ 2461 }, /* l1-dcache-store-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 2354 }, /* l1-dcache-store-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 2252 }, /* l1-dcache-store-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 2872 }, /* l1-dcache-stores\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00000\000\000\000\000\000 */
+{ 3283 }, /* l1-dcache-stores-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 3493 }, /* l1-dcache-stores-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 3388 }, /* l1-dcache-stores-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 3181 }, /* l1-dcache-stores-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 3073 }, /* l1-dcache-stores-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 2970 }, /* l1-dcache-stores-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 3596 }, /* l1-dcache-write\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 4003 }, /* l1-dcache-write-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 4211 }, /* l1-dcache-write-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 4107 }, /* l1-dcache-write-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 3902 }, /* l1-dcache-write-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 3795 }, /* l1-dcache-write-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 3693 }, /* l1-dcache-write-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 37366 }, /* l1-i\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 43053 }, /* l1-i-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 37454 }, /* l1-i-load\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 37845 }, /* l1-i-load-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 38049 }, /* l1-i-load-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 37945 }, /* l1-i-load-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 37748 }, /* l1-i-load-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 37645 }, /* l1-i-load-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 37547 }, /* l1-i-load-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 38151 }, /* l1-i-loads\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 38546 }, /* l1-i-loads-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 38752 }, /* l1-i-loads-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 38647 }, /* l1-i-loads-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 38448 }, /* l1-i-loads-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 38344 }, /* l1-i-loads-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 38245 }, /* l1-i-loads-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 43247 }, /* l1-i-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 43148 }, /* l1-i-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 42961 }, /* l1-i-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 39552 }, /* l1-i-prefetch\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 39991 }, /* l1-i-prefetch-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 40215 }, /* l1-i-prefetch-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 40103 }, /* l1-i-prefetch-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 39882 }, /* l1-i-prefetch-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 39767 }, /* l1-i-prefetch-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 39657 }, /* l1-i-prefetch-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 40325 }, /* l1-i-prefetches\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 40772 }, /* l1-i-prefetches-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 41000 }, /* l1-i-prefetches-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 40886 }, /* l1-i-prefetches-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 40661 }, /* l1-i-prefetches-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 40544 }, /* l1-i-prefetches-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 40432 }, /* l1-i-prefetches-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 38855 }, /* l1-i-read\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 39246 }, /* l1-i-read-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 39450 }, /* l1-i-read-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 39346 }, /* l1-i-read-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 39149 }, /* l1-i-read-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 39046 }, /* l1-i-read-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 38948 }, /* l1-i-read-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 42863 }, /* l1-i-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 42770 }, /* l1-i-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 41941 }, /* l1-i-speculative-load\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 42412 }, /* l1-i-speculative-load-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 42652 }, /* l1-i-speculative-load-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 42532 }, /* l1-i-speculative-load-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 42295 }, /* l1-i-speculative-load-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 42172 }, /* l1-i-speculative-load-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 42054 }, /* l1-i-speculative-load-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 41112 }, /* l1-i-speculative-read\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 41583 }, /* l1-i-speculative-read-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 41823 }, /* l1-i-speculative-read-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 41703 }, /* l1-i-speculative-read-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 41466 }, /* l1-i-speculative-read-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 41343 }, /* l1-i-speculative-read-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 41225 }, /* l1-i-speculative-read-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 31108 }, /* l1-icache\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 37060 }, /* l1-icache-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 31201 }, /* l1-icache-load\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 31612 }, /* l1-icache-load-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 31826 }, /* l1-icache-load-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 31717 }, /* l1-icache-load-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00000\000\000\000\000\000 */
+{ 31510 }, /* l1-icache-load-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 31402 }, /* l1-icache-load-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 31299 }, /* l1-icache-load-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 31933 }, /* l1-icache-loads\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00000\000\000\000\000\000 */
+{ 32348 }, /* l1-icache-loads-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 32564 }, /* l1-icache-loads-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 32454 }, /* l1-icache-loads-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 32245 }, /* l1-icache-loads-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 32136 }, /* l1-icache-loads-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 32032 }, /* l1-icache-loads-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 37264 }, /* l1-icache-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 37160 }, /* l1-icache-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 36963 }, /* l1-icache-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 33404 }, /* l1-icache-prefetch\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 33863 }, /* l1-icache-prefetch-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 34097 }, /* l1-icache-prefetch-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 33980 }, /* l1-icache-prefetch-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00000\000\000\000\000\000 */
+{ 33749 }, /* l1-icache-prefetch-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 33629 }, /* l1-icache-prefetch-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 33514 }, /* l1-icache-prefetch-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 34212 }, /* l1-icache-prefetches\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00000\000\000\000\000\000 */
+{ 34679 }, /* l1-icache-prefetches-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 34917 }, /* l1-icache-prefetches-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 34798 }, /* l1-icache-prefetches-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 34563 }, /* l1-icache-prefetches-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 34441 }, /* l1-icache-prefetches-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 34324 }, /* l1-icache-prefetches-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 32672 }, /* l1-icache-read\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 33083 }, /* l1-icache-read-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 33297 }, /* l1-icache-read-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 33188 }, /* l1-icache-read-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 32981 }, /* l1-icache-read-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 32873 }, /* l1-icache-read-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 32770 }, /* l1-icache-read-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 36860 }, /* l1-icache-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 36762 }, /* l1-icache-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 35898 }, /* l1-icache-speculative-load\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 36389 }, /* l1-icache-speculative-load-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 36639 }, /* l1-icache-speculative-load-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 36514 }, /* l1-icache-speculative-load-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 36267 }, /* l1-icache-speculative-load-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 36139 }, /* l1-icache-speculative-load-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 36016 }, /* l1-icache-speculative-load-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 35034 }, /* l1-icache-speculative-read\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 35525 }, /* l1-icache-speculative-read-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 35775 }, /* l1-icache-speculative-read-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 35650 }, /* l1-icache-speculative-read-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 35403 }, /* l1-icache-speculative-read-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 35275 }, /* l1-icache-speculative-read-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 35152 }, /* l1-icache-speculative-read-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 49266 }, /* l1-instruction\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 55483 }, /* l1-instruction-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 49364 }, /* l1-instruction-load\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 49795 }, /* l1-instruction-load-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 50019 }, /* l1-instruction-load-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 49905 }, /* l1-instruction-load-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 49688 }, /* l1-instruction-load-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 49575 }, /* l1-instruction-load-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 49467 }, /* l1-instruction-load-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 50131 }, /* l1-instruction-loads\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 50566 }, /* l1-instruction-loads-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 50792 }, /* l1-instruction-loads-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 50677 }, /* l1-instruction-loads-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 50458 }, /* l1-instruction-loads-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 50344 }, /* l1-instruction-loads-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 50235 }, /* l1-instruction-loads-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 55697 }, /* l1-instruction-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 55588 }, /* l1-instruction-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 55381 }, /* l1-instruction-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 51672 }, /* l1-instruction-prefetch\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 52151 }, /* l1-instruction-prefetch-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 52395 }, /* l1-instruction-prefetch-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 52273 }, /* l1-instruction-prefetch-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 52032 }, /* l1-instruction-prefetch-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 51907 }, /* l1-instruction-prefetch-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 51787 }, /* l1-instruction-prefetch-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 52515 }, /* l1-instruction-prefetches\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 53002 }, /* l1-instruction-prefetches-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 53250 }, /* l1-instruction-prefetches-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 53126 }, /* l1-instruction-prefetches-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 52881 }, /* l1-instruction-prefetches-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 52754 }, /* l1-instruction-prefetches-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 52632 }, /* l1-instruction-prefetches-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 50905 }, /* l1-instruction-read\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 51336 }, /* l1-instruction-read-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 51560 }, /* l1-instruction-read-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 51446 }, /* l1-instruction-read-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 51229 }, /* l1-instruction-read-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 51116 }, /* l1-instruction-read-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 51008 }, /* l1-instruction-read-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 55273 }, /* l1-instruction-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 55170 }, /* l1-instruction-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 54271 }, /* l1-instruction-speculative-load\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 54782 }, /* l1-instruction-speculative-load-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 55042 }, /* l1-instruction-speculative-load-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 54912 }, /* l1-instruction-speculative-load-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 54655 }, /* l1-instruction-speculative-load-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 54522 }, /* l1-instruction-speculative-load-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 54394 }, /* l1-instruction-speculative-load-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 53372 }, /* l1-instruction-speculative-read\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 53883 }, /* l1-instruction-speculative-read-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 54143 }, /* l1-instruction-speculative-read-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 54013 }, /* l1-instruction-speculative-read-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 53756 }, /* l1-instruction-speculative-read-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 53623 }, /* l1-instruction-speculative-read-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 53495 }, /* l1-instruction-speculative-read-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 15676 }, /* l1d\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 22971 }, /* l1d-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 15756 }, /* l1d-load\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 16115 }, /* l1d-load-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 16303 }, /* l1d-load-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 16207 }, /* l1d-load-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 16026 }, /* l1d-load-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 15931 }, /* l1d-load-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 15841 }, /* l1d-load-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 16397 }, /* l1d-loads\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 16760 }, /* l1d-loads-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 16950 }, /* l1d-loads-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 16853 }, /* l1d-loads-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 16670 }, /* l1d-loads-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 16574 }, /* l1d-loads-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 16483 }, /* l1d-loads-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 23149 }, /* l1d-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 23058 }, /* l1d-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 22887 }, /* l1d-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 19718 }, /* l1d-prefetch\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 20125 }, /* l1d-prefetch-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 20333 }, /* l1d-prefetch-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 20229 }, /* l1d-prefetch-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 20024 }, /* l1d-prefetch-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 19917 }, /* l1d-prefetch-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 19815 }, /* l1d-prefetch-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 20435 }, /* l1d-prefetches\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 20850 }, /* l1d-prefetches-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 21062 }, /* l1d-prefetches-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 20956 }, /* l1d-prefetches-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 20747 }, /* l1d-prefetches-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 20638 }, /* l1d-prefetches-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 20534 }, /* l1d-prefetches-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 17045 }, /* l1d-read\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 17404 }, /* l1d-read-access\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 17592 }, /* l1d-read-miss\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 17496 }, /* l1d-read-misses\000legacy cache\000Level 1 data cache read misses\000legacy-cache-config=0x10000\000\00010\000\000\000\000\000 */
+{ 17315 }, /* l1d-read-ops\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 17220 }, /* l1d-read-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 17130 }, /* l1d-read-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 22797 }, /* l1d-reference\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 22712 }, /* l1d-refs\000legacy cache\000Level 1 data cache read accesses\000legacy-cache-config=0\000\00010\000\000\000\000\000 */
+{ 21939 }, /* l1d-speculative-load\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 22378 }, /* l1d-speculative-load-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 22602 }, /* l1d-speculative-load-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 22490 }, /* l1d-speculative-load-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 22269 }, /* l1d-speculative-load-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 22154 }, /* l1d-speculative-load-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 22044 }, /* l1d-speculative-load-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 21166 }, /* l1d-speculative-read\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 21605 }, /* l1d-speculative-read-access\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 21829 }, /* l1d-speculative-read-miss\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 21717 }, /* l1d-speculative-read-misses\000legacy cache\000Level 1 data cache prefetch misses\000legacy-cache-config=0x10200\000\00010\000\000\000\000\000 */
+{ 21496 }, /* l1d-speculative-read-ops\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 21381 }, /* l1d-speculative-read-reference\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 21271 }, /* l1d-speculative-read-refs\000legacy cache\000Level 1 data cache prefetch accesses\000legacy-cache-config=0x200\000\00010\000\000\000\000\000 */
+{ 17686 }, /* l1d-store\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 18069 }, /* l1d-store-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 18265 }, /* l1d-store-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 18167 }, /* l1d-store-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 17974 }, /* l1d-store-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 17873 }, /* l1d-store-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 17777 }, /* l1d-store-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 18361 }, /* l1d-stores\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 18748 }, /* l1d-stores-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 18946 }, /* l1d-stores-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 18847 }, /* l1d-stores-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 18652 }, /* l1d-stores-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 18550 }, /* l1d-stores-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 18453 }, /* l1d-stores-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 19043 }, /* l1d-write\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 19426 }, /* l1d-write-access\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 19622 }, /* l1d-write-miss\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 19524 }, /* l1d-write-misses\000legacy cache\000Level 1 data cache write misses\000legacy-cache-config=0x10100\000\00010\000\000\000\000\000 */
+{ 19331 }, /* l1d-write-ops\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 19230 }, /* l1d-write-reference\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 19134 }, /* l1d-write-refs\000legacy cache\000Level 1 data cache write accesses\000legacy-cache-config=0x100\000\00010\000\000\000\000\000 */
+{ 43344 }, /* l1i\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 48978 }, /* l1i-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 43431 }, /* l1i-load\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 43818 }, /* l1i-load-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 44020 }, /* l1i-load-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 43917 }, /* l1i-load-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 43722 }, /* l1i-load-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 43620 }, /* l1i-load-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 43523 }, /* l1i-load-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 44121 }, /* l1i-loads\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 44512 }, /* l1i-loads-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 44716 }, /* l1i-loads-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 44612 }, /* l1i-loads-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 44415 }, /* l1i-loads-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 44312 }, /* l1i-loads-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 44214 }, /* l1i-loads-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 49170 }, /* l1i-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 49072 }, /* l1i-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 48887 }, /* l1i-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 45508 }, /* l1i-prefetch\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 45943 }, /* l1i-prefetch-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 46165 }, /* l1i-prefetch-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 46054 }, /* l1i-prefetch-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 45835 }, /* l1i-prefetch-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 45721 }, /* l1i-prefetch-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 45612 }, /* l1i-prefetch-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 46274 }, /* l1i-prefetches\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 46717 }, /* l1i-prefetches-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 46943 }, /* l1i-prefetches-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 46830 }, /* l1i-prefetches-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 46607 }, /* l1i-prefetches-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 46491 }, /* l1i-prefetches-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 46380 }, /* l1i-prefetches-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 44818 }, /* l1i-read\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 45205 }, /* l1i-read-access\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 45407 }, /* l1i-read-miss\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 45304 }, /* l1i-read-misses\000legacy cache\000Level 1 instruction cache read misses\000legacy-cache-config=0x10001\000\00010\000\000\000\000\000 */
+{ 45109 }, /* l1i-read-ops\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 45007 }, /* l1i-read-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 44910 }, /* l1i-read-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 48790 }, /* l1i-reference\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 48698 }, /* l1i-refs\000legacy cache\000Level 1 instruction cache read accesses\000legacy-cache-config=1\000\00010\000\000\000\000\000 */
+{ 47876 }, /* l1i-speculative-load\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 48343 }, /* l1i-speculative-load-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 48581 }, /* l1i-speculative-load-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 48462 }, /* l1i-speculative-load-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 48227 }, /* l1i-speculative-load-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 48105 }, /* l1i-speculative-load-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 47988 }, /* l1i-speculative-load-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 47054 }, /* l1i-speculative-read\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 47521 }, /* l1i-speculative-read-access\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 47759 }, /* l1i-speculative-read-miss\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 47640 }, /* l1i-speculative-read-misses\000legacy cache\000Level 1 instruction cache prefetch misses\000legacy-cache-config=0x10201\000\00010\000\000\000\000\000 */
+{ 47405 }, /* l1i-speculative-read-ops\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 47283 }, /* l1i-speculative-read-reference\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 47166 }, /* l1i-speculative-read-refs\000legacy cache\000Level 1 instruction cache prefetch accesses\000legacy-cache-config=0x201\000\00010\000\000\000\000\000 */
+{ 63212 }, /* l2\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 71765 }, /* l2-access\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 63309 }, /* l2-load\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 63736 }, /* l2-load-access\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 63958 }, /* l2-load-miss\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 63845 }, /* l2-load-misses\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 63630 }, /* l2-load-ops\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 63518 }, /* l2-load-reference\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 63411 }, /* l2-load-refs\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 64069 }, /* l2-loads\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 64500 }, /* l2-loads-access\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 64724 }, /* l2-loads-miss\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 64610 }, /* l2-loads-misses\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 64393 }, /* l2-loads-ops\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 64280 }, /* l2-loads-reference\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 64172 }, /* l2-loads-refs\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 71977 }, /* l2-miss\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 71869 }, /* l2-misses\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 71664 }, /* l2-ops\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 67985 }, /* l2-prefetch\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 68460 }, /* l2-prefetch-access\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 68702 }, /* l2-prefetch-miss\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 68581 }, /* l2-prefetch-misses\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 68342 }, /* l2-prefetch-ops\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 68218 }, /* l2-prefetch-reference\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 68099 }, /* l2-prefetch-refs\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 68821 }, /* l2-prefetches\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 69304 }, /* l2-prefetches-access\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 69550 }, /* l2-prefetches-miss\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 69427 }, /* l2-prefetches-misses\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 69184 }, /* l2-prefetches-ops\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 69058 }, /* l2-prefetches-reference\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 68937 }, /* l2-prefetches-refs\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 64836 }, /* l2-read\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 65263 }, /* l2-read-access\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 65485 }, /* l2-read-miss\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 65372 }, /* l2-read-misses\000legacy cache\000Level 2 (or higher) last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 65157 }, /* l2-read-ops\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 65045 }, /* l2-read-reference\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 64938 }, /* l2-read-refs\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 71557 }, /* l2-reference\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 71455 }, /* l2-refs\000legacy cache\000Level 2 (or higher) last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 70563 }, /* l2-speculative-load\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 71070 }, /* l2-speculative-load-access\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 71328 }, /* l2-speculative-load-miss\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 71199 }, /* l2-speculative-load-misses\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 70944 }, /* l2-speculative-load-ops\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 70812 }, /* l2-speculative-load-reference\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 70685 }, /* l2-speculative-load-refs\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 69671 }, /* l2-speculative-read\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 70178 }, /* l2-speculative-read-access\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 70436 }, /* l2-speculative-read-miss\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 70307 }, /* l2-speculative-read-misses\000legacy cache\000Level 2 (or higher) last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 70052 }, /* l2-speculative-read-ops\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 69920 }, /* l2-speculative-read-reference\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 69793 }, /* l2-speculative-read-refs\000legacy cache\000Level 2 (or higher) last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 65596 }, /* l2-store\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 66047 }, /* l2-store-access\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 66277 }, /* l2-store-miss\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 66162 }, /* l2-store-misses\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 65935 }, /* l2-store-ops\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 65817 }, /* l2-store-reference\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 65704 }, /* l2-store-refs\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 66390 }, /* l2-stores\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 66845 }, /* l2-stores-access\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 67077 }, /* l2-stores-miss\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 66961 }, /* l2-stores-misses\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 66732 }, /* l2-stores-ops\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 66613 }, /* l2-stores-reference\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 66499 }, /* l2-stores-refs\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 67191 }, /* l2-write\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 67642 }, /* l2-write-access\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 67872 }, /* l2-write-miss\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 67757 }, /* l2-write-misses\000legacy cache\000Level 2 (or higher) last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 67530 }, /* l2-write-ops\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 67412 }, /* l2-write-reference\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 67299 }, /* l2-write-refs\000legacy cache\000Level 2 (or higher) last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 55804 }, /* llc\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 62951 }, /* llc-access\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 55882 }, /* llc-load\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 56233 }, /* llc-load-access\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 56417 }, /* llc-load-miss\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 56323 }, /* llc-load-misses\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00000\000\000\000\000\000 */
+{ 56146 }, /* llc-load-ops\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 56053 }, /* llc-load-reference\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 55965 }, /* llc-load-refs\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 56509 }, /* llc-loads\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00000\000\000\000\000\000 */
+{ 56864 }, /* llc-loads-access\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 57050 }, /* llc-loads-miss\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 56955 }, /* llc-loads-misses\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 56776 }, /* llc-loads-ops\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 56682 }, /* llc-loads-reference\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 56593 }, /* llc-loads-refs\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 63125 }, /* llc-miss\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 63036 }, /* llc-misses\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 62869 }, /* llc-ops\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 59760 }, /* llc-prefetch\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 60159 }, /* llc-prefetch-access\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 60363 }, /* llc-prefetch-miss\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 60261 }, /* llc-prefetch-misses\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00000\000\000\000\000\000 */
+{ 60060 }, /* llc-prefetch-ops\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 59955 }, /* llc-prefetch-reference\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 59855 }, /* llc-prefetch-refs\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 60463 }, /* llc-prefetches\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00000\000\000\000\000\000 */
+{ 60870 }, /* llc-prefetches-access\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 61078 }, /* llc-prefetches-miss\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 60974 }, /* llc-prefetches-misses\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 60769 }, /* llc-prefetches-ops\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 60662 }, /* llc-prefetches-reference\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 60560 }, /* llc-prefetches-refs\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 57143 }, /* llc-read\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 57494 }, /* llc-read-access\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 57678 }, /* llc-read-miss\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 57584 }, /* llc-read-misses\000legacy cache\000Last level cache read misses\000legacy-cache-config=0x10002\000\00010\000\000\000\000\000 */
+{ 57407 }, /* llc-read-ops\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 57314 }, /* llc-read-reference\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 57226 }, /* llc-read-refs\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 62781 }, /* llc-reference\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 62698 }, /* llc-refs\000legacy cache\000Last level cache read accesses\000legacy-cache-config=2\000\00010\000\000\000\000\000 */
+{ 61939 }, /* llc-speculative-load\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 62370 }, /* llc-speculative-load-access\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 62590 }, /* llc-speculative-load-miss\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 62480 }, /* llc-speculative-load-misses\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 62263 }, /* llc-speculative-load-ops\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 62150 }, /* llc-speculative-load-reference\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 62042 }, /* llc-speculative-load-refs\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 61180 }, /* llc-speculative-read\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 61611 }, /* llc-speculative-read-access\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 61831 }, /* llc-speculative-read-miss\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 61721 }, /* llc-speculative-read-misses\000legacy cache\000Last level cache prefetch misses\000legacy-cache-config=0x10202\000\00010\000\000\000\000\000 */
+{ 61504 }, /* llc-speculative-read-ops\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 61391 }, /* llc-speculative-read-reference\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 61283 }, /* llc-speculative-read-refs\000legacy cache\000Last level cache prefetch accesses\000legacy-cache-config=0x202\000\00010\000\000\000\000\000 */
+{ 57770 }, /* llc-store\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 58145 }, /* llc-store-access\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 58337 }, /* llc-store-miss\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 58241 }, /* llc-store-misses\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00000\000\000\000\000\000 */
+{ 58052 }, /* llc-store-ops\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 57953 }, /* llc-store-reference\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 57859 }, /* llc-store-refs\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 58431 }, /* llc-stores\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00000\000\000\000\000\000 */
+{ 58810 }, /* llc-stores-access\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 59004 }, /* llc-stores-miss\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 58907 }, /* llc-stores-misses\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 58716 }, /* llc-stores-ops\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 58616 }, /* llc-stores-reference\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 58521 }, /* llc-stores-refs\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 59099 }, /* llc-write\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 59474 }, /* llc-write-access\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 59666 }, /* llc-write-miss\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 59570 }, /* llc-write-misses\000legacy cache\000Last level cache write misses\000legacy-cache-config=0x10102\000\00010\000\000\000\000\000 */
+{ 59381 }, /* llc-write-ops\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 59282 }, /* llc-write-reference\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 59188 }, /* llc-write-refs\000legacy cache\000Last level cache write accesses\000legacy-cache-config=0x102\000\00010\000\000\000\000\000 */
+{ 114128 }, /* node\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 121053 }, /* node-access\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 114203 }, /* node-load\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 114542 }, /* node-load-access\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 114720 }, /* node-load-miss\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000 */
+{ 114629 }, /* node-load-misses\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00000\000\000\000\000\000 */
+{ 114458 }, /* node-load-ops\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 114368 }, /* node-load-reference\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 114283 }, /* node-load-refs\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 114809 }, /* node-loads\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00000\000\000\000\000\000 */
+{ 115152 }, /* node-loads-access\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 115332 }, /* node-loads-miss\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000 */
+{ 115240 }, /* node-loads-misses\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000 */
+{ 115067 }, /* node-loads-ops\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 114976 }, /* node-loads-reference\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 114890 }, /* node-loads-refs\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 121221 }, /* node-miss\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000 */
+{ 121135 }, /* node-misses\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000 */
+{ 120974 }, /* node-ops\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 117955 }, /* node-prefetch\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 118342 }, /* node-prefetch-access\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 118540 }, /* node-prefetch-miss\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000 */
+{ 118441 }, /* node-prefetch-misses\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00000\000\000\000\000\000 */
+{ 118246 }, /* node-prefetch-ops\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 118144 }, /* node-prefetch-reference\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 118047 }, /* node-prefetch-refs\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 118637 }, /* node-prefetches\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00000\000\000\000\000\000 */
+{ 119032 }, /* node-prefetches-access\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 119234 }, /* node-prefetches-miss\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000 */
+{ 119133 }, /* node-prefetches-misses\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000 */
+{ 118934 }, /* node-prefetches-ops\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 118830 }, /* node-prefetches-reference\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 118731 }, /* node-prefetches-refs\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 115422 }, /* node-read\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 115761 }, /* node-read-access\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 115939 }, /* node-read-miss\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000 */
+{ 115848 }, /* node-read-misses\000legacy cache\000Local memory read misses\000legacy-cache-config=0x10006\000\00010\000\000\000\000\000 */
+{ 115677 }, /* node-read-ops\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 115587 }, /* node-read-reference\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 115502 }, /* node-read-refs\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 120889 }, /* node-reference\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 120809 }, /* node-refs\000legacy cache\000Local memory read accesses\000legacy-cache-config=6\000\00010\000\000\000\000\000 */
+{ 120071 }, /* node-speculative-load\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 120490 }, /* node-speculative-load-access\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 120704 }, /* node-speculative-load-miss\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000 */
+{ 120597 }, /* node-speculative-load-misses\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000 */
+{ 120386 }, /* node-speculative-load-ops\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 120276 }, /* node-speculative-load-reference\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 120171 }, /* node-speculative-load-refs\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 119333 }, /* node-speculative-read\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 119752 }, /* node-speculative-read-access\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 119966 }, /* node-speculative-read-miss\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000 */
+{ 119859 }, /* node-speculative-read-misses\000legacy cache\000Local memory prefetch misses\000legacy-cache-config=0x10206\000\00010\000\000\000\000\000 */
+{ 119648 }, /* node-speculative-read-ops\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 119538 }, /* node-speculative-read-reference\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 119433 }, /* node-speculative-read-refs\000legacy cache\000Local memory prefetch accesses\000legacy-cache-config=0x206\000\00010\000\000\000\000\000 */
+{ 116028 }, /* node-store\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 116391 }, /* node-store-access\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 116577 }, /* node-store-miss\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000 */
+{ 116484 }, /* node-store-misses\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00000\000\000\000\000\000 */
+{ 116301 }, /* node-store-ops\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 116205 }, /* node-store-reference\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 116114 }, /* node-store-refs\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 116668 }, /* node-stores\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00000\000\000\000\000\000 */
+{ 117035 }, /* node-stores-access\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 117223 }, /* node-stores-miss\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000 */
+{ 117129 }, /* node-stores-misses\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000 */
+{ 116944 }, /* node-stores-ops\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 116847 }, /* node-stores-reference\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 116755 }, /* node-stores-refs\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 117315 }, /* node-write\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 117678 }, /* node-write-access\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 117864 }, /* node-write-miss\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000 */
+{ 117771 }, /* node-write-misses\000legacy cache\000Local memory write misses\000legacy-cache-config=0x10106\000\00010\000\000\000\000\000 */
+{ 117588 }, /* node-write-ops\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 117492 }, /* node-write-reference\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 117401 }, /* node-write-refs\000legacy cache\000Local memory write accesses\000legacy-cache-config=0x106\000\00010\000\000\000\000\000 */
+{ 123400 }, /* ref-cycles\000legacy hardware\000Total cycles; not affected by CPU frequency scaling\000legacy-hardware-config=9\000\00000\000\000\000\000\000 */
+{ 123094 }, /* stalled-cycles-backend\000legacy hardware\000Stalled cycles during retirement [This event is an alias of idle-cycles-backend]\000legacy-hardware-config=8\000\00000\000\000\000\000\000 */
+{ 122795 }, /* stalled-cycles-frontend\000legacy hardware\000Stalled cycles during issue [This event is an alias of idle-cycles-frontend]\000legacy-hardware-config=7\000\00000\000\000\000\000\000 */
+};
+static const struct compact_pmu_event pmu_events__common_software[] = {
+{ 124563 }, /* alignment-faults\000software\000Number of kernel handled memory alignment faults\000config=7\000\00000\000\000\000\000\000 */
+{ 124862 }, /* bpf-output\000software\000An event used by BPF programs to write to the perf ring buffer\000config=0xa\000\00000\000\000\000\000\000 */
+{ 124964 }, /* cgroup-switches\000software\000Number of context switches to a task in a different cgroup\000config=0xb\000\00000\000\000\000\000\000 */
+{ 123885 }, /* context-switches\000software\000Number of context switches [This event is an alias of cs]\000config=3\000\00000\000\000\000\000\000 */
+{ 123521 }, /* cpu-clock\000software\000Per-CPU high-resolution timer based event\000config=0\000\000001e-6msec\000\000\000\000\000 */
+{ 124087 }, /* cpu-migrations\000software\000Number of times a process has migrated to a new CPU [This event is an alias of migrations]\000config=4\000\00000\000\000\000\000\000 */
+{ 123986 }, /* cs\000software\000Number of context switches [This event is an alias of context-switches]\000config=3\000\00000\000\000\000\000\000 */
+{ 124782 }, /* dummy\000software\000A placeholder event that doesn't count anything\000config=9\000\00000\000\000\000\000\000 */
+{ 124655 }, /* emulation-faults\000software\000Number of kernel handled unimplemented instruction faults handled through emulation\000config=8\000\00000\000\000\000\000\000 */
+{ 123695 }, /* faults\000software\000Number of page faults [This event is an alias of page-faults]\000config=2\000\00000\000\000\000\000\000 */
+{ 124460 }, /* major-faults\000software\000Number of major page faults. Major faults require I/O to handle\000config=6\000\00000\000\000\000\000\000 */
+{ 124219 }, /* migrations\000software\000Number of times a process has migrated to a new CPU [This event is an alias of cpu-migrations]\000config=4\000\00000\000\000\000\000\000 */
+{ 124351 }, /* minor-faults\000software\000Number of minor page faults. Minor faults don't require I/O to handle\000config=5\000\00000\000\000\000\000\000 */
+{ 123790 }, /* page-faults\000software\000Number of page faults [This event is an alias of faults]\000config=2\000\00000\000\000\000\000\000 */
+{ 123607 }, /* task-clock\000software\000Per-task high-resolution timer based event\000config=1\000\000001e-6msec\000\000\000\000\000 */
+};
static const struct compact_pmu_event pmu_events__common_tool[] = {
-{ 5 }, /* duration_time\000tool\000Wall clock interval time in nanoseconds\000config=1\000\00000\000\000 */
-{ 210 }, /* has_pmem\000tool\0001 if persistent memory installed otherwise 0\000config=4\000\00000\000\000 */
-{ 283 }, /* num_cores\000tool\000Number of cores. A core consists of 1 or more thread, with each thread being associated with a logical Linux CPU\000config=5\000\00000\000\000 */
-{ 425 }, /* num_cpus\000tool\000Number of logical Linux CPUs. There may be multiple such CPUs on a core\000config=6\000\00000\000\000 */
-{ 525 }, /* num_cpus_online\000tool\000Number of online logical Linux CPUs. There may be multiple such CPUs on a core\000config=7\000\00000\000\000 */
-{ 639 }, /* num_dies\000tool\000Number of dies. Each die has 1 or more cores\000config=8\000\00000\000\000 */
-{ 712 }, /* num_packages\000tool\000Number of packages. Each package has 1 or more die\000config=9\000\00000\000\000 */
-{ 795 }, /* slots\000tool\000Number of functional units that in parallel can execute parts of an instruction\000config=0xa\000\00000\000\000 */
-{ 902 }, /* smt_on\000tool\0001 if simultaneous multithreading (aka hyperthreading) is enable otherwise 0\000config=0xb\000\00000\000\000 */
-{ 145 }, /* system_time\000tool\000System/kernel time in nanoseconds\000config=3\000\00000\000\000 */
-{ 1006 }, /* system_tsc_freq\000tool\000The amount a Time Stamp Counter (TSC) increases per second\000config=0xc\000\00000\000\000 */
-{ 78 }, /* user_time\000tool\000User (non-kernel) time in nanoseconds\000config=2\000\00000\000\000 */
+{ 126205 }, /* core_wide\000tool\0001 if not SMT, if SMT are events being gathered on all SMT threads 1 otherwise 0\000config=0xd\000\00000\000\000\000\000\000 */
+{ 125072 }, /* duration_time\000tool\000Wall clock interval time in nanoseconds\000config=1\000\00000\000\000\000\000\000 */
+{ 125286 }, /* has_pmem\000tool\0001 if persistent memory installed otherwise 0\000config=4\000\00000\000\000\000\000\000 */
+{ 125362 }, /* num_cores\000tool\000Number of cores. A core consists of 1 or more thread, with each thread being associated with a logical Linux CPU\000config=5\000\00000\000\000\000\000\000 */
+{ 125507 }, /* num_cpus\000tool\000Number of logical Linux CPUs. There may be multiple such CPUs on a core\000config=6\000\00000\000\000\000\000\000 */
+{ 125610 }, /* num_cpus_online\000tool\000Number of online logical Linux CPUs. There may be multiple such CPUs on a core\000config=7\000\00000\000\000\000\000\000 */
+{ 125727 }, /* num_dies\000tool\000Number of dies. Each die has 1 or more cores\000config=8\000\00000\000\000\000\000\000 */
+{ 125803 }, /* num_packages\000tool\000Number of packages. Each package has 1 or more die\000config=9\000\00000\000\000\000\000\000 */
+{ 125889 }, /* slots\000tool\000Number of functional units that in parallel can execute parts of an instruction\000config=0xa\000\00000\000\000\000\000\000 */
+{ 125999 }, /* smt_on\000tool\0001 if simultaneous multithreading (aka hyperthreading) is enable otherwise 0\000config=0xb\000\00000\000\000\000\000\000 */
+{ 125218 }, /* system_time\000tool\000System/kernel time in nanoseconds\000config=3\000\00000\000\000\000\000\000 */
+{ 126106 }, /* system_tsc_freq\000tool\000The amount a Time Stamp Counter (TSC) increases per second\000config=0xc\000\00000\000\000\000\000\000 */
+{ 126319 }, /* target_cpu\000tool\0001 if CPUs being analyzed, 0 if threads/processes\000config=0xe\000\00000\000\000\000\000\000 */
+{ 125148 }, /* user_time\000tool\000User (non-kernel) time in nanoseconds\000config=2\000\00000\000\000\000\000\000 */
};
-const struct pmu_table_entry pmu_events__common[] = {
+static const struct pmu_table_entry pmu_events__common[] = {
+{
+ .entries = pmu_events__common_default_core,
+ .num_entries = ARRAY_SIZE(pmu_events__common_default_core),
+ .pmu_name = { 0 /* default_core\000 */ },
+},
+{
+ .entries = pmu_events__common_software,
+ .num_entries = ARRAY_SIZE(pmu_events__common_software),
+ .pmu_name = { 123512 /* software\000 */ },
+},
{
.entries = pmu_events__common_tool,
.num_entries = ARRAY_SIZE(pmu_events__common_tool),
- .pmu_name = { 0 /* tool\000 */ },
+ .pmu_name = { 125067 /* tool\000 */ },
+},
+};
+
+static const struct compact_pmu_event pmu_metrics__common_default_core[] = {
+{ 127956 }, /* CPUs_utilized\000Default\000(software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@) / (duration_time * 1e9)\000\000Average CPU utilization\000\0001CPUs\000\000\000\000011 */
+{ 129273 }, /* backend_cycles_idle\000Default\000stalled\\-cycles\\-backend / cpu\\-cycles\000backend_cycles_idle > 0.2\000Backend stalls per cycle\000\000\000\000\000\000001 */
+{ 129575 }, /* branch_frequency\000Default\000branches / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Branches per CPU second\000\0001000M/sec\000\000\000\000011 */
+{ 129755 }, /* branch_miss_rate\000Default\000branch\\-misses / branches\000branch_miss_rate > 0.05\000Branch miss rate\000\000100%\000\000\000\000001 */
+{ 128142 }, /* cs_per_second\000Default\000software@context\\-switches\\,name\\=context\\-switches@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Context switches per CPU second\000\0001cs/sec\000\000\000\000011 */
+{ 129399 }, /* cycles_frequency\000Default\000cpu\\-cycles / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Cycles per CPU second\000\0001GHz\000\000\000\000011 */
+{ 130191 }, /* dtlb_miss_rate\000Default3\000dTLB\\-load\\-misses / dTLB\\-loads\000dtlb_miss_rate > 0.05\000dTLB miss rate\000\000100%\000\000\000\000001 */
+{ 129143 }, /* frontend_cycles_idle\000Default\000stalled\\-cycles\\-frontend / cpu\\-cycles\000frontend_cycles_idle > 0.1\000Frontend stalls per cycle\000\000\000\000\000\000001 */
+{ 128866 }, /* insn_per_cycle\000Default\000instructions / cpu\\-cycles\000insn_per_cycle < 1\000Instructions Per Cycle\000\0001instructions\000\000\000\000001 */
+{ 130297 }, /* itlb_miss_rate\000Default3\000iTLB\\-load\\-misses / iTLB\\-loads\000itlb_miss_rate > 0.05\000iTLB miss rate\000\000100%\000\000\000\000001 */
+{ 130403 }, /* l1_prefetch_miss_rate\000Default4\000L1\\-dcache\\-prefetch\\-misses / L1\\-dcache\\-prefetches\000l1_prefetch_miss_rate > 0.05\000L1 prefetch miss rate\000\000100%\000\000\000\000001 */
+{ 129859 }, /* l1d_miss_rate\000Default2\000L1\\-dcache\\-load\\-misses / L1\\-dcache\\-loads\000l1d_miss_rate > 0.05\000L1D miss rate\000\000100%\000\000\000\000001 */
+{ 130076 }, /* l1i_miss_rate\000Default3\000L1\\-icache\\-load\\-misses / L1\\-icache\\-loads\000l1i_miss_rate > 0.05\000L1I miss rate\000\000100%\000\000\000\000001 */
+{ 129975 }, /* llc_miss_rate\000Default2\000LLC\\-load\\-misses / LLC\\-loads\000llc_miss_rate > 0.05\000LLC miss rate\000\000100%\000\000\000\000001 */
+{ 128375 }, /* migrations_per_second\000Default\000software@cpu\\-migrations\\,name\\=cpu\\-migrations@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Process migrations to a new CPU per CPU second\000\0001migrations/sec\000\000\000\000011 */
+{ 128635 }, /* page_faults_per_second\000Default\000software@page\\-faults\\,name\\=page\\-faults@ * 1e9 / (software@cpu\\-clock\\,name\\=cpu\\-clock@ if #target_cpu else software@task\\-clock\\,name\\=task\\-clock@)\000\000Page faults per CPU second\000\0001faults/sec\000\000\000\000011 */
+{ 128979 }, /* stalled_cycles_per_instruction\000Default\000max(stalled\\-cycles\\-frontend, stalled\\-cycles\\-backend) / instructions\000\000Max front or backend stalls per instruction\000\000\000\000\000\000001 */
+
+};
+
+static const struct pmu_table_entry pmu_metrics__common[] = {
+{
+ .entries = pmu_metrics__common_default_core,
+ .num_entries = ARRAY_SIZE(pmu_metrics__common_default_core),
+ .pmu_name = { 0 /* default_core\000 */ },
},
};
static const struct compact_pmu_event pmu_events__test_soc_cpu_default_core[] = {
-{ 1115 }, /* bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000 */
-{ 1174 }, /* bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000 */
-{ 1427 }, /* dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000 */
-{ 1557 }, /* eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000 */
-{ 1233 }, /* l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000Attributable Level 3 cache access, read\000 */
-{ 1328 }, /* segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000 */
+{ 126403 }, /* bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000\000\000\000 */
+{ 126465 }, /* bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000\000\000\000 */
+{ 126727 }, /* dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000\000\000\000 */
+{ 126860 }, /* eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000\000\000\000 */
+{ 126527 }, /* l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000\000\000\000Attributable Level 3 cache access, read\000 */
+{ 126625 }, /* segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000\000\000\000 */
};
static const struct compact_pmu_event pmu_events__test_soc_cpu_hisi_sccl_ddrc[] = {
-{ 1687 }, /* uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000DDRC write commands\000 */
+{ 126993 }, /* uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000\000\000\000\000 */
};
static const struct compact_pmu_event pmu_events__test_soc_cpu_hisi_sccl_l3c[] = {
-{ 2166 }, /* uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000Total read hits\000 */
+{ 127355 }, /* uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000\000\000\000\000 */
};
static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_cbox[] = {
-{ 2016 }, /* event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000UNC_CBO_HYPHEN\000 */
-{ 2081 }, /* event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000UNC_CBO_TWO_HYPH\000 */
-{ 1785 }, /* unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000 */
+{ 127229 }, /* event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000\000\000\000\000 */
+{ 127283 }, /* event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000\000\000\000\000 */
+{ 127075 }, /* unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000\000\000\000\000 */
};
static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_imc[] = {
-{ 2376 }, /* uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000Total cache hits\000 */
+{ 127538 }, /* uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000\000\000\000\000 */
};
static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_imc_free_running[] = {
-{ 2270 }, /* uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000Total cache misses\000 */
+{ 127447 }, /* uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000\000\000\000\000 */
};
-const struct pmu_table_entry pmu_events__test_soc_cpu[] = {
+static const struct pmu_table_entry pmu_events__test_soc_cpu[] = {
{
.entries = pmu_events__test_soc_cpu_default_core,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_default_core),
- .pmu_name = { 1102 /* default_core\000 */ },
+ .pmu_name = { 0 /* default_core\000 */ },
},
{
.entries = pmu_events__test_soc_cpu_hisi_sccl_ddrc,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_ddrc),
- .pmu_name = { 1672 /* hisi_sccl,ddrc\000 */ },
+ .pmu_name = { 126978 /* hisi_sccl,ddrc\000 */ },
},
{
.entries = pmu_events__test_soc_cpu_hisi_sccl_l3c,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_l3c),
- .pmu_name = { 2152 /* hisi_sccl,l3c\000 */ },
+ .pmu_name = { 127341 /* hisi_sccl,l3c\000 */ },
},
{
.entries = pmu_events__test_soc_cpu_uncore_cbox,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_cbox),
- .pmu_name = { 1773 /* uncore_cbox\000 */ },
+ .pmu_name = { 127063 /* uncore_cbox\000 */ },
},
{
.entries = pmu_events__test_soc_cpu_uncore_imc,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc),
- .pmu_name = { 2365 /* uncore_imc\000 */ },
+ .pmu_name = { 127527 /* uncore_imc\000 */ },
},
{
.entries = pmu_events__test_soc_cpu_uncore_imc_free_running,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc_free_running),
- .pmu_name = { 2246 /* uncore_imc_free_running\000 */ },
+ .pmu_name = { 127423 /* uncore_imc_free_running\000 */ },
},
};
static const struct compact_pmu_event pmu_metrics__test_soc_cpu_default_core[] = {
-{ 2798 }, /* CPI\000\0001 / IPC\000\000\000\000\000\000\000\00000 */
-{ 3479 }, /* DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\00000 */
-{ 3251 }, /* DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\00000 */
-{ 3345 }, /* DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\00000 */
-{ 3543 }, /* DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\00000 */
-{ 3611 }, /* DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\00000 */
-{ 2883 }, /* Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\00000 */
-{ 2820 }, /* IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\00000 */
-{ 3745 }, /* L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\00000 */
-{ 3681 }, /* M1\000\000ipc + M2\000\000\000\000\000\000\000\00000 */
-{ 3703 }, /* M2\000\000ipc + M1\000\000\000\000\000\000\000\00000 */
-{ 3725 }, /* M3\000\0001 / M3\000\000\000\000\000\000\000\00000 */
-{ 3180 }, /* cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\00000 */
-{ 3049 }, /* dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000 */
-{ 3113 }, /* icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000 */
+{ 130551 }, /* CPI\000\0001 / IPC\000\000\000\000\000\000\000\000000 */
+{ 131240 }, /* DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\000000 */
+{ 131010 }, /* DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\000000 */
+{ 131105 }, /* DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\000000 */
+{ 131305 }, /* DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\000000 */
+{ 131374 }, /* DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\000000 */
+{ 130638 }, /* Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\000000 */
+{ 130574 }, /* IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\000000 */
+{ 131512 }, /* L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\000000 */
+{ 131445 }, /* M1\000\000ipc + M2\000\000\000\000\000\000\000\000000 */
+{ 131468 }, /* M2\000\000ipc + M1\000\000\000\000\000\000\000\000000 */
+{ 131491 }, /* M3\000\0001 / M3\000\000\000\000\000\000\000\000000 */
+{ 130938 }, /* cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\000000 */
+{ 130805 }, /* dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000 */
+{ 130870 }, /* icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\000000 */
};
-const struct pmu_table_entry pmu_metrics__test_soc_cpu[] = {
+static const struct pmu_table_entry pmu_metrics__test_soc_cpu[] = {
{
.entries = pmu_metrics__test_soc_cpu_default_core,
.num_entries = ARRAY_SIZE(pmu_metrics__test_soc_cpu_default_core),
- .pmu_name = { 1102 /* default_core\000 */ },
+ .pmu_name = { 0 /* default_core\000 */ },
},
};
static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_ccn_pmu[] = {
-{ 2565 }, /* sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000 */
+{ 127717 }, /* sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000\000\000\000 */
};
static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_cmn_pmu[] = {
-{ 2658 }, /* sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000 */
+{ 127813 }, /* sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000\000\000\000 */
};
static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_ddr_pmu[] = {
-{ 2473 }, /* sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000 */
+{ 127622 }, /* sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000\000\000\000 */
};
-const struct pmu_table_entry pmu_events__test_soc_sys[] = {
+static const struct pmu_table_entry pmu_events__test_soc_sys[] = {
{
.entries = pmu_events__test_soc_sys_uncore_sys_ccn_pmu,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_ccn_pmu),
- .pmu_name = { 2546 /* uncore_sys_ccn_pmu\000 */ },
+ .pmu_name = { 127698 /* uncore_sys_ccn_pmu\000 */ },
},
{
.entries = pmu_events__test_soc_sys_uncore_sys_cmn_pmu,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_cmn_pmu),
- .pmu_name = { 2639 /* uncore_sys_cmn_pmu\000 */ },
+ .pmu_name = { 127794 /* uncore_sys_cmn_pmu\000 */ },
},
{
.entries = pmu_events__test_soc_sys_uncore_sys_ddr_pmu,
.num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_ddr_pmu),
- .pmu_name = { 2454 /* uncore_sys_ddr_pmu\000 */ },
+ .pmu_name = { 127603 /* uncore_sys_ddr_pmu\000 */ },
},
};
@@ -246,7 +2801,7 @@ struct pmu_events_map {
* Global table mapping each known CPU for the architecture to its
* table of PMU events.
*/
-const struct pmu_events_map pmu_events_map[] = {
+static const struct pmu_events_map pmu_events_map[] = {
{
.arch = "common",
.cpuid = "common",
@@ -254,7 +2809,10 @@ const struct pmu_events_map pmu_events_map[] = {
.pmus = pmu_events__common,
.num_pmus = ARRAY_SIZE(pmu_events__common),
},
- .metric_table = {},
+ .metric_table = {
+ .pmus = pmu_metrics__common,
+ .num_pmus = ARRAY_SIZE(pmu_metrics__common),
+ },
},
{
.arch = "testarch",
@@ -316,6 +2874,12 @@ static void decompress_event(int offset, struct pmu_event *pe)
p++;
pe->unit = (*p == '\0' ? NULL : p);
while (*p++);
+ pe->retirement_latency_mean = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pe->retirement_latency_min = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pe->retirement_latency_max = (*p == '\0' ? NULL : p);
+ while (*p++);
pe->long_desc = (*p == '\0' ? NULL : p);
}
@@ -346,6 +2910,8 @@ static void decompress_metric(int offset, struct pmu_metric *pm)
pm->aggr_mode = *p - '0';
p++;
pm->event_grouping = *p - '0';
+ p++;
+ pm->default_show_events = *p - '0';
}
static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
@@ -417,6 +2983,8 @@ int pmu_events_table__for_each_event(const struct pmu_events_table *table,
pmu_event_iter_fn fn,
void *data)
{
+ if (!table)
+ return 0;
for (size_t i = 0; i < table->num_pmus; i++) {
const struct pmu_table_entry *table_pmu = &table->pmus[i];
const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
@@ -438,12 +3006,14 @@ int pmu_events_table__find_event(const struct pmu_events_table *table,
pmu_event_iter_fn fn,
void *data)
{
+ if (!table)
+ return PMU_EVENTS__NOT_FOUND;
for (size_t i = 0; i < table->num_pmus; i++) {
const struct pmu_table_entry *table_pmu = &table->pmus[i];
const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
int ret;
- if (!perf_pmu__name_wildcard_match(pmu, pmu_name))
+ if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
continue;
ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
@@ -458,6 +3028,8 @@ size_t pmu_events_table__num_events(const struct pmu_events_table *table,
{
size_t count = 0;
+ if (!table)
+ return 0;
for (size_t i = 0; i < table->num_pmus; i++) {
const struct pmu_table_entry *table_pmu = &table->pmus[i];
const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
@@ -489,10 +3061,55 @@ static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table
return 0;
}
+static int pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table *table,
+ const struct pmu_table_entry *pmu,
+ const char *metric,
+ pmu_metric_iter_fn fn,
+ void *data)
+{
+ struct pmu_metric pm = {
+ .pmu = &big_c_string[pmu->pmu_name.offset],
+ };
+ int low = 0, high = pmu->num_entries - 1;
+
+ while (low <= high) {
+ int cmp, mid = (low + high) / 2;
+
+ decompress_metric(pmu->entries[mid].offset, &pm);
+
+ if (!pm.metric_name && !metric)
+ goto do_call;
+
+ if (!pm.metric_name && metric) {
+ low = mid + 1;
+ continue;
+ }
+ if (pm.metric_name && !metric) {
+ high = mid - 1;
+ continue;
+ }
+
+ cmp = strcmp(pm.metric_name, metric);
+ if (cmp < 0) {
+ low = mid + 1;
+ continue;
+ }
+ if (cmp > 0) {
+ high = mid - 1;
+ continue;
+ }
+ do_call:
+ return fn ? fn(&pm, table, data) : 0;
+ }
+ return PMU_METRICS__NOT_FOUND;
+}
+
int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
pmu_metric_iter_fn fn,
void *data)
{
+ if (!table)
+ return 0;
for (size_t i = 0; i < table->num_pmus; i++) {
int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
fn, data);
@@ -503,6 +3120,29 @@ int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
return 0;
}
+int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
+ struct perf_pmu *pmu,
+ const char *metric,
+ pmu_metric_iter_fn fn,
+ void *data)
+{
+ if (!table)
+ return 0;
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+ int ret;
+
+ if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
+ continue;
+
+ ret = pmu_metrics_table__find_metric_pmu(table, table_pmu, metric, fn, data);
+ if (ret != PMU_METRICS__NOT_FOUND)
+ return ret;
+ }
+ return PMU_METRICS__NOT_FOUND;
+}
+
static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu)
{
static struct {
@@ -562,8 +3202,20 @@ static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
{
struct perf_cpu cpu = {-1};
- if (pmu)
+ if (pmu) {
+ for (size_t i = 0; i < ARRAY_SIZE(pmu_events__common); i++) {
+ const char *pmu_name = &big_c_string[pmu_events__common[i].pmu_name.offset];
+
+ if (!strcmp(pmu_name, pmu->name)) {
+ const struct pmu_events_map *map = &pmu_events_map[0];
+
+ while (strcmp("common", map->arch))
+ map++;
+ return map;
+ }
+ }
cpu = perf_cpu_map__min(pmu->cpus);
+ }
return map_for_cpu(cpu);
}
@@ -587,6 +3239,22 @@ const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
return NULL;
}
+const struct pmu_events_table *perf_pmu__default_core_events_table(void)
+{
+ int i = 0;
+
+ for (;;) {
+ const struct pmu_events_map *map = &pmu_events_map[i++];
+
+ if (!map->arch)
+ break;
+
+ if (!strcmp(map->cpuid, "common"))
+ return &map->event_table;
+ }
+ return NULL;
+}
+
const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
struct perf_cpu cpu = {-1};
@@ -595,6 +3263,22 @@ const struct pmu_metrics_table *pmu_metrics_table__find(void)
return map ? &map->metric_table : NULL;
}
+const struct pmu_metrics_table *pmu_metrics_table__default(void)
+{
+ int i = 0;
+
+ for (;;) {
+ const struct pmu_events_map *map = &pmu_events_map[i++];
+
+ if (!map->arch)
+ break;
+
+ if (!strcmp(map->cpuid, "common"))
+ return &map->metric_table;
+ }
+ return NULL;
+}
+
const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
{
for (const struct pmu_events_map *tables = &pmu_events_map[0];
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index 7499a35bfadd..3a1bcdcdc685 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -47,6 +47,9 @@ _json_event_attributes = [
'event',
# Short things in alphabetical order.
'compat', 'deprecated', 'perpkg', 'unit',
+ # Retirement latency specific to Intel granite rapids currently.
+ 'retirement_latency_mean', 'retirement_latency_min',
+ 'retirement_latency_max',
# Longer things (the last won't be iterated over during decompress).
'long_desc'
]
@@ -55,10 +58,12 @@ _json_event_attributes = [
_json_metric_attributes = [
'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
- 'default_metricgroup_name', 'aggr_mode', 'event_grouping'
+ 'default_metricgroup_name', 'aggr_mode', 'event_grouping',
+ 'default_show_events'
]
# Attributes that are bools or enum int values, encoded as '0', '1',...
-_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
+_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg',
+ 'default_show_events']
def removesuffix(s: str, suffix: str) -> str:
"""Remove the suffix from a string
@@ -232,6 +237,7 @@ class JsonEvent:
'NO_GROUP_EVENTS_NMI': '2',
'NO_NMI_WATCHDOG': '2',
'NO_GROUP_EVENTS_SMT': '3',
+ 'NO_THRESHOLD_AND_NMI': '4',
}
return metric_constraint_to_enum[metric_constraint]
@@ -292,6 +298,7 @@ class JsonEvent:
'cpu_atom': 'cpu_atom',
'ali_drw': 'ali_drw',
'arm_cmn': 'arm_cmn',
+ 'software': 'software',
'tool': 'tool',
}
return table[unit] if unit in table else f'uncore_{unit.lower()}'
@@ -320,6 +327,8 @@ class JsonEvent:
eventcode |= int(jd['ExtSel']) << 8
configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None
+ legacy_hw_config = int(jd['LegacyConfigCode'], 0) if 'LegacyConfigCode' in jd else None
+ legacy_cache_config = int(jd['LegacyCacheCode'], 0) if 'LegacyCacheCode' in jd else None
self.name = jd['EventName'].lower() if 'EventName' in jd else None
self.topic = ''
self.compat = jd.get('Compat')
@@ -341,11 +350,15 @@ class JsonEvent:
self.perpkg = jd.get('PerPkg')
self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
self.deprecated = jd.get('Deprecated')
+ self.retirement_latency_mean = jd.get('RetirementLatencyMean')
+ self.retirement_latency_min = jd.get('RetirementLatencyMin')
+ self.retirement_latency_max = jd.get('RetirementLatencyMax')
self.metric_name = jd.get('MetricName')
self.metric_group = jd.get('MetricGroup')
self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
self.default_metricgroup_name = jd.get('DefaultMetricgroupName')
self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
+ self.default_show_events = jd.get('DefaultShowEvents')
self.metric_expr = None
if 'MetricExpr' in jd:
self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
@@ -362,6 +375,10 @@ class JsonEvent:
event = f'config={llx(configcode)}'
elif eventidcode is not None:
event = f'eventid={llx(eventidcode)}'
+ elif legacy_hw_config is not None:
+ event = f'legacy-hardware-config={llx(legacy_hw_config)}'
+ elif legacy_cache_config is not None:
+ event = f'legacy-cache-config={llx(legacy_cache_config)}'
else:
event = f'event={llx(eventcode)}'
event_fields = [
@@ -391,6 +408,9 @@ class JsonEvent:
self.desc += extra_desc
if self.long_desc and extra_desc:
self.long_desc += extra_desc
+ if self.desc and self.long_desc and self.desc == self.long_desc:
+ # Avoid duplicated descriptions.
+ self.long_desc = None
if arch_std:
if arch_std.lower() in _arch_std_events:
event = _arch_std_events[arch_std.lower()].event
@@ -481,7 +501,8 @@ def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
for e in read_json_events(item.path, topic):
if e.name:
_pending_events.append(e)
- if e.metric_name:
+ if e.metric_name and not any(e.metric_name == x.metric_name and
+ e.pmu == x.pmu for x in _pending_metrics):
_pending_metrics.append(e)
@@ -533,7 +554,7 @@ def print_pending_events() -> None:
_args.output_file.write(f"""
}};
-const struct pmu_table_entry {_pending_events_tblname}[] = {{
+static const struct pmu_table_entry {_pending_events_tblname}[] = {{
""")
for (pmu, tbl_pmu) in sorted(pmus):
pmu_name = f"{pmu}\\000"
@@ -588,7 +609,7 @@ def print_pending_metrics() -> None:
_args.output_file.write(f"""
}};
-const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
+static const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
""")
for (pmu, tbl_pmu) in sorted(pmus):
pmu_name = f"{pmu}\\000"
@@ -620,7 +641,7 @@ def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
if not item.is_file() or not item.name.endswith('.json'):
return
- if item.name == 'metricgroups.json':
+ if item.name.endswith('metricgroups.json'):
metricgroup_descriptions = json.load(open(item.path))
for mgroup in metricgroup_descriptions:
assert len(mgroup) > 1, parents
@@ -673,7 +694,7 @@ def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
# Ignore other directories. If the file name does not have a .json
# extension, ignore it. It could be a readme.txt for instance.
- if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json':
+ if not item.is_file() or not item.name.endswith('.json') or item.name.endswith('metricgroups.json'):
return
add_events_table_entries(item, get_topic(item.name))
@@ -713,7 +734,7 @@ struct pmu_events_map {
* Global table mapping each known CPU for the architecture to its
* table of PMU events.
*/
-const struct pmu_events_map pmu_events_map[] = {
+static const struct pmu_events_map pmu_events_map[] = {
""")
for arch in archs:
if arch == 'test':
@@ -738,7 +759,10 @@ const struct pmu_events_map pmu_events_map[] = {
\t\t.pmus = pmu_events__common,
\t\t.num_pmus = ARRAY_SIZE(pmu_events__common),
\t},
-\t.metric_table = {},
+\t.metric_table = {
+\t\t.pmus = pmu_metrics__common,
+\t\t.num_pmus = ARRAY_SIZE(pmu_metrics__common),
+\t},
},
""")
else:
@@ -940,6 +964,8 @@ int pmu_events_table__for_each_event(const struct pmu_events_table *table,
pmu_event_iter_fn fn,
void *data)
{
+ if (!table)
+ return 0;
for (size_t i = 0; i < table->num_pmus; i++) {
const struct pmu_table_entry *table_pmu = &table->pmus[i];
const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
@@ -961,12 +987,14 @@ int pmu_events_table__find_event(const struct pmu_events_table *table,
pmu_event_iter_fn fn,
void *data)
{
+ if (!table)
+ return PMU_EVENTS__NOT_FOUND;
for (size_t i = 0; i < table->num_pmus; i++) {
const struct pmu_table_entry *table_pmu = &table->pmus[i];
const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
int ret;
- if (!perf_pmu__name_wildcard_match(pmu, pmu_name))
+ if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
continue;
ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
@@ -981,6 +1009,8 @@ size_t pmu_events_table__num_events(const struct pmu_events_table *table,
{
size_t count = 0;
+ if (!table)
+ return 0;
for (size_t i = 0; i < table->num_pmus; i++) {
const struct pmu_table_entry *table_pmu = &table->pmus[i];
const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
@@ -1012,10 +1042,55 @@ static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table
return 0;
}
+static int pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table *table,
+ const struct pmu_table_entry *pmu,
+ const char *metric,
+ pmu_metric_iter_fn fn,
+ void *data)
+{
+ struct pmu_metric pm = {
+ .pmu = &big_c_string[pmu->pmu_name.offset],
+ };
+ int low = 0, high = pmu->num_entries - 1;
+
+ while (low <= high) {
+ int cmp, mid = (low + high) / 2;
+
+ decompress_metric(pmu->entries[mid].offset, &pm);
+
+ if (!pm.metric_name && !metric)
+ goto do_call;
+
+ if (!pm.metric_name && metric) {
+ low = mid + 1;
+ continue;
+ }
+ if (pm.metric_name && !metric) {
+ high = mid - 1;
+ continue;
+ }
+
+ cmp = strcmp(pm.metric_name, metric);
+ if (cmp < 0) {
+ low = mid + 1;
+ continue;
+ }
+ if (cmp > 0) {
+ high = mid - 1;
+ continue;
+ }
+ do_call:
+ return fn ? fn(&pm, table, data) : 0;
+ }
+ return PMU_METRICS__NOT_FOUND;
+}
+
int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
pmu_metric_iter_fn fn,
void *data)
{
+ if (!table)
+ return 0;
for (size_t i = 0; i < table->num_pmus; i++) {
int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
fn, data);
@@ -1026,6 +1101,29 @@ int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
return 0;
}
+int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
+ struct perf_pmu *pmu,
+ const char *metric,
+ pmu_metric_iter_fn fn,
+ void *data)
+{
+ if (!table)
+ return 0;
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+ int ret;
+
+ if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
+ continue;
+
+ ret = pmu_metrics_table__find_metric_pmu(table, table_pmu, metric, fn, data);
+ if (ret != PMU_METRICS__NOT_FOUND)
+ return ret;
+ }
+ return PMU_METRICS__NOT_FOUND;
+}
+
static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu)
{
static struct {
@@ -1085,8 +1183,20 @@ static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
{
struct perf_cpu cpu = {-1};
- if (pmu)
+ if (pmu) {
+ for (size_t i = 0; i < ARRAY_SIZE(pmu_events__common); i++) {
+ const char *pmu_name = &big_c_string[pmu_events__common[i].pmu_name.offset];
+
+ if (!strcmp(pmu_name, pmu->name)) {
+ const struct pmu_events_map *map = &pmu_events_map[0];
+
+ while (strcmp("common", map->arch))
+ map++;
+ return map;
+ }
+ }
cpu = perf_cpu_map__min(pmu->cpus);
+ }
return map_for_cpu(cpu);
}
@@ -1110,6 +1220,22 @@ const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
return NULL;
}
+const struct pmu_events_table *perf_pmu__default_core_events_table(void)
+{
+ int i = 0;
+
+ for (;;) {
+ const struct pmu_events_map *map = &pmu_events_map[i++];
+
+ if (!map->arch)
+ break;
+
+ if (!strcmp(map->cpuid, "common"))
+ return &map->event_table;
+ }
+ return NULL;
+}
+
const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
struct perf_cpu cpu = {-1};
@@ -1118,6 +1244,22 @@ const struct pmu_metrics_table *pmu_metrics_table__find(void)
return map ? &map->metric_table : NULL;
}
+const struct pmu_metrics_table *pmu_metrics_table__default(void)
+{
+ int i = 0;
+
+ for (;;) {
+ const struct pmu_events_map *map = &pmu_events_map[i++];
+
+ if (!map->arch)
+ break;
+
+ if (!strcmp(map->cpuid, "common"))
+ return &map->metric_table;
+ }
+ return NULL;
+}
+
const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
{
for (const struct pmu_events_map *tables = &pmu_events_map[0];
diff --git a/tools/perf/pmu-events/make_legacy_cache.py b/tools/perf/pmu-events/make_legacy_cache.py
new file mode 100755
index 000000000000..28a1ff804f86
--- /dev/null
+++ b/tools/perf/pmu-events/make_legacy_cache.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+import json
+
+hw_cache_id = [
+ (0, # PERF_COUNT_HW_CACHE_L1D
+ ["L1-dcache", "l1-d", "l1d", "L1-data",],
+ [0, 1, 2,], # read, write, prefetch
+ "Level 1 data cache",
+ ),
+ (1, # PERF_COUNT_HW_CACHE_L1I
+ ["L1-icache", "l1-i", "l1i", "L1-instruction",],
+ [0, 2,], # read, prefetch
+ "Level 1 instruction cache",
+ ),
+ (2, # PERF_COUNT_HW_CACHE_LL
+ ["LLC", "L2"],
+ [0, 1, 2,], # read, write, prefetch
+ "Last level cache",
+ ),
+ (3, # PERF_COUNT_HW_CACHE_DTLB
+ ["dTLB", "d-tlb", "Data-TLB",],
+ [0, 1, 2,], # read, write, prefetch
+ "Data TLB",
+ ),
+ (4, # PERF_COUNT_HW_CACHE_ITLB
+ ["iTLB", "i-tlb", "Instruction-TLB",],
+ [0,], # read
+ "Instruction TLB",
+ ),
+ (5, # PERF_COUNT_HW_CACHE_BPU
+ ["branch", "branches", "bpu", "btb", "bpc",],
+ [0,], # read
+ "Branch prediction unit",
+ ),
+ (6, # PERF_COUNT_HW_CACHE_NODE
+ ["node",],
+ [0, 1, 2,], # read, write, prefetch
+ "Local memory",
+ ),
+]
+
+hw_cache_op = [
+ (0, # PERF_COUNT_HW_CACHE_OP_READ
+ ["load", "loads", "read",],
+ "read"),
+ (1, # PERF_COUNT_HW_CACHE_OP_WRITE
+ ["store", "stores", "write",],
+ "write"),
+ (2, # PERF_COUNT_HW_CACHE_OP_PREFETCH
+ ["prefetch", "prefetches", "speculative-read", "speculative-load",],
+ "prefetch"),
+]
+
+hw_cache_result = [
+ (0, # PERF_COUNT_HW_CACHE_RESULT_ACCESS
+ ["refs", "Reference", "ops", "access",],
+ "accesses"),
+ (1, # PERF_COUNT_HW_CACHE_RESULT_MISS
+ ["misses", "miss",],
+ "misses"),
+]
+
+events = []
+def add_event(name: str,
+ cache_id: int, cache_op: int, cache_result: int,
+ desc: str,
+ deprecated: bool) -> None:
+ # Avoid conflicts with PERF_TYPE_HARDWARE events which are higher priority.
+ if name in ["branch-misses", "branches"]:
+ return
+
+ # Tweak and deprecate L2 named events.
+ if name.startswith("L2"):
+ desc = desc.replace("Last level cache", "Level 2 (or higher) last level cache")
+ deprecated = True
+
+ event = {
+ "EventName": name,
+ "BriefDescription": desc,
+ "LegacyCacheCode": f"0x{cache_id | (cache_op << 8) | (cache_result << 16):06x}",
+ }
+
+ # Deprecate events with the name starting L2 as it is actively
+ # confusing as on many machines it actually means the L3 cache.
+ if deprecated:
+ event["Deprecated"] = "1"
+ events.append(event)
+
+for (cache_id, names, ops, cache_desc) in hw_cache_id:
+ for name in names:
+ add_event(name,
+ cache_id,
+ 0, # PERF_COUNT_HW_CACHE_OP_READ
+ 0, # PERF_COUNT_HW_CACHE_RESULT_ACCESS
+ f"{cache_desc} read accesses.",
+ deprecated=True)
+
+ for (op, op_names, op_desc) in hw_cache_op:
+ if op not in ops:
+ continue
+ for op_name in op_names:
+ deprecated = (names[0] != name or op_names[1] != op_name)
+ add_event(f"{name}-{op_name}",
+ cache_id,
+ op,
+ 0, # PERF_COUNT_HW_CACHE_RESULT_ACCESS
+ f"{cache_desc} {op_desc} accesses.",
+ deprecated)
+
+ for (result, result_names, result_desc) in hw_cache_result:
+ for result_name in result_names:
+ deprecated = ((names[0] != name or op_names[0] != op_name) or
+ (result == 0) or (result_names[0] != result_name))
+ add_event(f"{name}-{op_name}-{result_name}",
+ cache_id, op, result,
+ f"{cache_desc} {op_desc} {result_desc}.",
+ deprecated)
+
+ for (result, result_names, result_desc) in hw_cache_result:
+ for result_name in result_names:
+ add_event(f"{name}-{result_name}",
+ cache_id,
+ 0, # PERF_COUNT_HW_CACHE_OP_READ
+ result,
+ f"{cache_desc} read {result_desc}.",
+ deprecated=True)
+
+print(json.dumps(events, indent=2))
diff --git a/tools/perf/pmu-events/metric.py b/tools/perf/pmu-events/metric.py
index 92acd89ed97a..dd8fd06940e6 100644
--- a/tools/perf/pmu-events/metric.py
+++ b/tools/perf/pmu-events/metric.py
@@ -4,8 +4,14 @@ import ast
import decimal
import json
import re
+from enum import Enum
from typing import Dict, List, Optional, Set, Tuple, Union
+class MetricConstraint(Enum):
+ GROUPED_EVENTS = 0
+ NO_GROUP_EVENTS = 1
+ NO_GROUP_EVENTS_NMI = 2
+ NO_GROUP_EVENTS_SMT = 3
class Expression:
"""Abstract base class of elements in a metric expression."""
@@ -423,14 +429,16 @@ class Metric:
groups: Set[str]
expr: Expression
scale_unit: str
- constraint: bool
+ constraint: MetricConstraint
+ threshold: Optional[Expression]
def __init__(self,
name: str,
description: str,
expr: Expression,
scale_unit: str,
- constraint: bool = False):
+ constraint: MetricConstraint = MetricConstraint.GROUPED_EVENTS,
+ threshold: Optional[Expression] = None):
self.name = name
self.description = description
self.expr = expr.Simplify()
@@ -441,6 +449,7 @@ class Metric:
else:
self.scale_unit = f'1{scale_unit}'
self.constraint = constraint
+ self.threshold = threshold
self.groups = set()
def __lt__(self, other):
@@ -449,7 +458,8 @@ class Metric:
def AddToMetricGroup(self, group):
"""Callback used when being added to a MetricGroup."""
- self.groups.add(group.name)
+ if group.name:
+ self.groups.add(group.name)
def Flatten(self) -> Set['Metric']:
"""Return a leaf metric."""
@@ -464,20 +474,15 @@ class Metric:
'MetricExpr': self.expr.ToPerfJson(),
'ScaleUnit': self.scale_unit
}
- if self.constraint:
- result['MetricConstraint'] = 'NO_NMI_WATCHDOG'
+ if self.constraint != MetricConstraint.GROUPED_EVENTS:
+ result['MetricConstraint'] = self.constraint.name
+ if self.threshold:
+ result['MetricThreshold'] = self.threshold.ToPerfJson()
return result
-
-class _MetricJsonEncoder(json.JSONEncoder):
- """Special handling for Metric objects."""
-
- def default(self, o):
- if isinstance(o, Metric):
- return o.ToPerfJson()
- return json.JSONEncoder.default(self, o)
-
+ def ToMetricGroupDescriptions(self, root: bool = True) -> Dict[str, str]:
+ return {}
class MetricGroup:
"""A group of metrics.
@@ -487,12 +492,16 @@ class MetricGroup:
which can facilitate arrangements similar to trees.
"""
- def __init__(self, name: str, metric_list: List[Union[Metric,
- 'MetricGroup']]):
+ def __init__(self, name: str,
+ metric_list: List[Union[Optional[Metric], Optional['MetricGroup']]],
+ description: Optional[str] = None):
self.name = name
- self.metric_list = metric_list
+ self.metric_list = []
+ self.description = description
for metric in metric_list:
- metric.AddToMetricGroup(self)
+ if metric:
+ self.metric_list.append(metric)
+ metric.AddToMetricGroup(self)
def AddToMetricGroup(self, group):
"""Callback used when a MetricGroup is added into another."""
@@ -507,11 +516,36 @@ class MetricGroup:
return result
- def ToPerfJson(self) -> str:
- return json.dumps(sorted(self.Flatten()), indent=2, cls=_MetricJsonEncoder)
+ def ToPerfJson(self) -> List[Dict[str, str]]:
+ result = []
+ for x in sorted(self.Flatten()):
+ result.append(x.ToPerfJson())
+ return result
+
+ def ToMetricGroupDescriptions(self, root: bool = True) -> Dict[str, str]:
+ result = {self.name: self.description} if self.description else {}
+ for x in self.metric_list:
+ result.update(x.ToMetricGroupDescriptions(False))
+ return result
def __str__(self) -> str:
- return self.ToPerfJson()
+ return str(self.ToPerfJson())
+
+
+def JsonEncodeMetric(x: MetricGroup):
+ class MetricJsonEncoder(json.JSONEncoder):
+ """Special handling for Metric objects."""
+
+ def default(self, o):
+ if isinstance(o, Metric) or isinstance(o, MetricGroup):
+ return o.ToPerfJson()
+ return json.JSONEncoder.default(self, o)
+
+ return json.dumps(x, indent=2, cls=MetricJsonEncoder)
+
+
+def JsonEncodeMetricGroupDescriptions(x: MetricGroup):
+ return json.dumps(x.ToMetricGroupDescriptions(), indent=2)
class _RewriteIfExpToSelect(ast.NodeTransformer):
@@ -551,12 +585,18 @@ def ParsePerfJson(orig: str) -> Expression:
r'Event(r"\1")', py)
# If it started with a # it should have been a literal, rather than an event name
py = re.sub(r'#Event\(r"([^"]*)"\)', r'Literal("#\1")', py)
+ # Fix events wrongly broken at a ','
+ while True:
+ prev_py = py
+ py = re.sub(r'Event\(r"([^"]*)"\),Event\(r"([^"]*)"\)', r'Event(r"\1,\2")', py)
+ if py == prev_py:
+ break
# Convert accidentally converted hex constants ("0Event(r"xDEADBEEF)"") back to a constant,
# but keep it wrapped in Event(), otherwise Python drops the 0x prefix and it gets interpreted as
# a double by the Bison parser
py = re.sub(r'0Event\(r"[xX]([0-9a-fA-F]*)"\)', r'Event("0x\1")', py)
# Convert accidentally converted scientific notation constants back
- py = re.sub(r'([0-9]+)Event\(r"(e[0-9]+)"\)', r'\1\2', py)
+ py = re.sub(r'([0-9]+)Event\(r"(e[0-9]*)"\)', r'\1\2', py)
# Convert all the known keywords back from events to just the keyword
keywords = ['if', 'else', 'min', 'max', 'd_ratio', 'source_count', 'has_event', 'strcmp_cpuid_str']
for kw in keywords:
@@ -569,7 +609,6 @@ def ParsePerfJson(orig: str) -> Expression:
parsed = ast.fix_missing_locations(parsed)
return _Constify(eval(compile(parsed, orig, 'eval')))
-
def RewriteMetricsInTermsOfOthers(metrics: List[Tuple[str, str, Expression]]
)-> Dict[Tuple[str, str], Expression]:
"""Shorten metrics by rewriting in terms of others.
diff --git a/tools/perf/pmu-events/metric_test.py b/tools/perf/pmu-events/metric_test.py
index ee22ff43ddd7..8acfe4652b55 100755
--- a/tools/perf/pmu-events/metric_test.py
+++ b/tools/perf/pmu-events/metric_test.py
@@ -61,6 +61,10 @@ class TestMetricExpressions(unittest.TestCase):
after = before
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
+ before = r'a + 3e-12 + b'
+ after = before
+ self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
+
def test_IfElseTests(self):
# if-else needs rewriting to Select and back.
before = r'Event1 if #smt_on else Event2'
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index 675562e6f770..d3b24014c6ff 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -25,15 +25,21 @@ enum metric_event_groups {
*/
MetricNoGroupEvents = 1,
/**
- * @MetricNoGroupEventsNmi: Don't group events for the metric if the NMI
- * watchdog is enabled.
+ * @MetricNoGroupEventsNmi:
+ * Don't group events for the metric if the NMI watchdog is enabled.
*/
MetricNoGroupEventsNmi = 2,
/**
- * @MetricNoGroupEventsSmt: Don't group events for the metric if SMT is
- * enabled.
+ * @MetricNoGroupEventsSmt:
+ * Don't group events for the metric if SMT is enabled.
*/
MetricNoGroupEventsSmt = 3,
+ /**
+ * @MetricNoGroupEventsThresholdAndNmi:
+ * Don't group events for the metric thresholds and if the NMI watchdog
+ * is enabled.
+ */
+ MetricNoGroupEventsThresholdAndNmi = 4,
};
/*
* Describe each PMU event. Each CPU has a table of PMU events.
@@ -47,6 +53,9 @@ struct pmu_event {
const char *long_desc;
const char *pmu;
const char *unit;
+ const char *retirement_latency_mean;
+ const char *retirement_latency_min;
+ const char *retirement_latency_max;
bool perpkg;
bool deprecated;
};
@@ -65,12 +74,14 @@ struct pmu_metric {
const char *default_metricgroup_name;
enum aggr_mode_class aggr_mode;
enum metric_event_groups event_grouping;
+ bool default_show_events;
};
struct pmu_events_table;
struct pmu_metrics_table;
#define PMU_EVENTS__NOT_FOUND -1000
+#define PMU_METRICS__NOT_FOUND -1000
typedef int (*pmu_event_iter_fn)(const struct pmu_event *pe,
const struct pmu_events_table *table,
@@ -85,11 +96,11 @@ int pmu_events_table__for_each_event(const struct pmu_events_table *table,
pmu_event_iter_fn fn,
void *data);
/*
- * Search for table and entry matching with pmu__name_match. Each matching event
- * has fn called on it. 0 implies to success/continue the search while non-zero
- * means to terminate. The special value PMU_EVENTS__NOT_FOUND is used to
- * indicate no event was found in one of the tables which doesn't terminate the
- * search of all tables.
+ * Search for a table and entry matching with pmu__name_wildcard_match or any
+ * tables if pmu is NULL. Each matching event has fn called on it. 0 implies to
+ * success/continue the search while non-zero means to terminate. The special
+ * value PMU_EVENTS__NOT_FOUND is used to indicate no event was found in one of
+ * the tables which doesn't terminate the search of all tables.
*/
int pmu_events_table__find_event(const struct pmu_events_table *table,
struct perf_pmu *pmu,
@@ -101,9 +112,23 @@ size_t pmu_events_table__num_events(const struct pmu_events_table *table,
int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
void *data);
+/*
+ * Search for a table and entry matching with pmu__name_wildcard_match or any
+ * tables if pmu is NULL. Each matching metric has fn called on it. 0 implies to
+ * success/continue the search while non-zero means to terminate. The special
+ * value PMU_METRICS__NOT_FOUND is used to indicate no metric was found in one
+ * of the tables which doesn't terminate the search of all tables.
+ */
+int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
+ struct perf_pmu *pmu,
+ const char *metric,
+ pmu_metric_iter_fn fn,
+ void *data);
const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu);
+const struct pmu_events_table *perf_pmu__default_core_events_table(void);
const struct pmu_metrics_table *pmu_metrics_table__find(void);
+const struct pmu_metrics_table *pmu_metrics_table__default(void);
const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid);
const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid);
int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data);
diff --git a/tools/perf/python/counting.py b/tools/perf/python/counting.py
new file mode 100755
index 000000000000..02121d2bb11d
--- /dev/null
+++ b/tools/perf/python/counting.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# -*- python -*-
+# -*- coding: utf-8 -*-
+
+import argparse
+import perf
+
+def main(event: str):
+ evlist = perf.parse_events(event)
+
+ for evsel in evlist:
+ evsel.read_format = perf.FORMAT_TOTAL_TIME_ENABLED | perf.FORMAT_TOTAL_TIME_RUNNING
+
+ evlist.open()
+ evlist.enable()
+
+ count = 100000
+ while count > 0:
+ count -= 1
+
+ evlist.disable()
+
+ for evsel in evlist:
+ for cpu in evsel.cpus():
+ for thread in evsel.threads():
+ counts = evsel.read(cpu, thread)
+ print(f"For {evsel} val: {counts.val} enable: {counts.ena} run: {counts.run}")
+
+ evlist.close()
+
+if __name__ == '__main__':
+ ap = argparse.ArgumentParser()
+ ap.add_argument('-e', '--event', help="Events to open", default="cpu-clock,task-clock")
+ args = ap.parse_args()
+ main(args.event)
diff --git a/tools/perf/python/ilist.py b/tools/perf/python/ilist.py
new file mode 100755
index 000000000000..0d757ddb4795
--- /dev/null
+++ b/tools/perf/python/ilist.py
@@ -0,0 +1,515 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+"""Interactive perf list."""
+
+from abc import ABC, abstractmethod
+import argparse
+from dataclasses import dataclass
+import math
+from typing import Any, Dict, Optional, Tuple
+import perf
+from textual import on
+from textual.app import App, ComposeResult
+from textual.binding import Binding
+from textual.containers import Horizontal, HorizontalGroup, Vertical, VerticalScroll
+from textual.css.query import NoMatches
+from textual.command import SearchIcon
+from textual.screen import ModalScreen
+from textual.widgets import Button, Footer, Header, Input, Label, Sparkline, Static, Tree
+from textual.widgets.tree import TreeNode
+
+
+def get_info(info: Dict[str, str], key: str):
+ return (info[key] + "\n") if key in info else ""
+
+
+class TreeValue(ABC):
+ """Abstraction for the data of value in the tree."""
+
+ @abstractmethod
+ def name(self) -> str:
+ pass
+
+ @abstractmethod
+ def description(self) -> str:
+ pass
+
+ @abstractmethod
+ def matches(self, query: str) -> bool:
+ pass
+
+ @abstractmethod
+ def parse(self) -> perf.evlist:
+ pass
+
+ @abstractmethod
+ def value(self, evlist: perf.evlist, evsel: perf.evsel, cpu: int, thread: int) -> float:
+ pass
+
+
+@dataclass
+class Metric(TreeValue):
+ """A metric in the tree."""
+ metric_name: str
+ metric_pmu: str
+
+ def name(self) -> str:
+ return self.metric_name
+
+ def description(self) -> str:
+ """Find and format metric description."""
+ for metric in perf.metrics():
+ if metric["MetricName"] != self.metric_name:
+ continue
+ if self.metric_pmu and metric["PMU"] != self.metric_pmu:
+ continue
+ desc = get_info(metric, "BriefDescription")
+ desc += get_info(metric, "PublicDescription")
+ desc += get_info(metric, "MetricExpr")
+ desc += get_info(metric, "MetricThreshold")
+ return desc
+ return "description"
+
+ def matches(self, query: str) -> bool:
+ return query in self.metric_name
+
+ def parse(self) -> perf.evlist:
+ return perf.parse_metrics(self.metric_name, self.metric_pmu)
+
+ def value(self, evlist: perf.evlist, evsel: perf.evsel, cpu: int, thread: int) -> float:
+ try:
+ val = evlist.compute_metric(self.metric_name, cpu, thread)
+ return 0 if math.isnan(val) else val
+ except:
+ # Be tolerant of failures to compute metrics on particular CPUs/threads.
+ return 0
+
+
+@dataclass
+class PmuEvent(TreeValue):
+ """A PMU and event within the tree."""
+ pmu: str
+ event: str
+
+ def name(self) -> str:
+ if self.event.startswith(self.pmu) or ':' in self.event:
+ return self.event
+ else:
+ return f"{self.pmu}/{self.event}/"
+
+ def description(self) -> str:
+ """Find and format event description for {pmu}/{event}/."""
+ for p in perf.pmus():
+ if p.name() != self.pmu:
+ continue
+ for info in p.events():
+ if "name" not in info or info["name"] != self.event:
+ continue
+
+ desc = get_info(info, "topic")
+ desc += get_info(info, "event_type_desc")
+ desc += get_info(info, "desc")
+ desc += get_info(info, "long_desc")
+ desc += get_info(info, "encoding_desc")
+ return desc
+ return "description"
+
+ def matches(self, query: str) -> bool:
+ return query in self.pmu or query in self.event
+
+ def parse(self) -> perf.evlist:
+ return perf.parse_events(self.name())
+
+ def value(self, evlist: perf.evlist, evsel: perf.evsel, cpu: int, thread: int) -> float:
+ return evsel.read(cpu, thread).val
+
+
+class ErrorScreen(ModalScreen[bool]):
+ """Pop up dialog for errors."""
+
+ CSS = """
+ ErrorScreen {
+ align: center middle;
+ }
+ """
+
+ def __init__(self, error: str):
+ self.error = error
+ super().__init__()
+
+ def compose(self) -> ComposeResult:
+ yield Button(f"Error: {self.error}", variant="primary", id="error")
+
+ def on_button_pressed(self, event: Button.Pressed) -> None:
+ self.dismiss(True)
+
+
+class SearchScreen(ModalScreen[str]):
+ """Pop up dialog for search."""
+
+ CSS = """
+ SearchScreen Horizontal {
+ align: center middle;
+ margin-top: 1;
+ }
+ SearchScreen Input {
+ width: 1fr;
+ }
+ """
+
+ def compose(self) -> ComposeResult:
+ yield Horizontal(SearchIcon(), Input(placeholder="Event name"))
+
+ def on_input_submitted(self, event: Input.Submitted) -> None:
+ """Handle the user pressing Enter in the input field."""
+ self.dismiss(event.value)
+
+
+class Counter(HorizontalGroup):
+ """Two labels for a CPU and its counter value."""
+
+ CSS = """
+ Label {
+ gutter: 1;
+ }
+ """
+
+ def __init__(self, cpu: int) -> None:
+ self.cpu = cpu
+ super().__init__()
+
+ def compose(self) -> ComposeResult:
+ label = f"cpu{self.cpu}" if self.cpu >= 0 else "total"
+ yield Label(label + " ")
+ yield Label("0", id=f"counter_{label}")
+
+
+class CounterSparkline(HorizontalGroup):
+ """A Sparkline for a performance counter."""
+
+ def __init__(self, cpu: int) -> None:
+ self.cpu = cpu
+ super().__init__()
+
+ def compose(self) -> ComposeResult:
+ label = f"cpu{self.cpu}" if self.cpu >= 0 else "total"
+ yield Label(label)
+ yield Sparkline([], summary_function=max, id=f"sparkline_{label}")
+
+
+class IListApp(App):
+ TITLE = "Interactive Perf List"
+
+ BINDINGS = [
+ Binding(key="s", action="search", description="Search",
+ tooltip="Search events and PMUs"),
+ Binding(key="n", action="next", description="Next",
+ tooltip="Next search result or item"),
+ Binding(key="p", action="prev", description="Previous",
+ tooltip="Previous search result or item"),
+ Binding(key="c", action="collapse", description="Collapse",
+ tooltip="Collapse the current PMU"),
+ Binding(key="^q", action="quit", description="Quit",
+ tooltip="Quit the app"),
+ ]
+
+ CSS = """
+ /* Make the 'total' sparkline a different color. */
+ #sparkline_total > .sparkline--min-color {
+ color: $accent;
+ }
+ #sparkline_total > .sparkline--max-color {
+ color: $accent 30%;
+ }
+ /*
+ * Make the active_search initially not displayed with the text in
+ * the middle of the line.
+ */
+ #active_search {
+ display: none;
+ width: 100%;
+ text-align: center;
+ }
+ """
+
+ def __init__(self, interval: float) -> None:
+ self.interval = interval
+ self.evlist = None
+ self.selected: Optional[TreeValue] = None
+ self.search_results: list[TreeNode[TreeValue]] = []
+ self.cur_search_result: TreeNode[TreeValue] | None = None
+ super().__init__()
+
+ def expand_and_select(self, node: TreeNode[Any]) -> None:
+ """Expand select a node in the tree."""
+ if node.parent:
+ node.parent.expand()
+ if node.parent.parent:
+ node.parent.parent.expand()
+ node.expand()
+ node.tree.select_node(node)
+ node.tree.scroll_to_node(node)
+
+ def set_searched_tree_node(self, previous: bool) -> None:
+ """Set the cur_search_result node to either the next or previous."""
+ l = len(self.search_results)
+
+ if l < 1:
+ tree: Tree[TreeValue] = self.query_one("#root", Tree)
+ if previous:
+ tree.action_cursor_up()
+ else:
+ tree.action_cursor_down()
+ return
+
+ if self.cur_search_result:
+ idx = self.search_results.index(self.cur_search_result)
+ if previous:
+ idx = idx - 1 if idx > 0 else l - 1
+ else:
+ idx = idx + 1 if idx < l - 1 else 0
+ else:
+ idx = l - 1 if previous else 0
+
+ node = self.search_results[idx]
+ if node == self.cur_search_result:
+ return
+
+ self.cur_search_result = node
+ self.expand_and_select(node)
+
+ def action_search(self) -> None:
+ """Search was chosen."""
+ def set_initial_focus(event: str | None) -> None:
+ """Sets the focus after the SearchScreen is dismissed."""
+
+ search_label = self.query_one("#active_search", Label)
+ search_label.display = True if event else False
+ if not event:
+ return
+ event = event.lower()
+ search_label.update(f'Searching for events matching "{event}"')
+
+ tree: Tree[str] = self.query_one("#root", Tree)
+
+ def find_search_results(event: str, node: TreeNode[str],
+ cursor_seen: bool = False,
+ match_after_cursor: Optional[TreeNode[str]] = None
+ ) -> Tuple[bool, Optional[TreeNode[str]]]:
+ """Find nodes that match the search remembering the one after the cursor."""
+ if not cursor_seen and node == tree.cursor_node:
+ cursor_seen = True
+ if node.data and node.data.matches(event):
+ if cursor_seen and not match_after_cursor:
+ match_after_cursor = node
+ self.search_results.append(node)
+
+ if node.children:
+ for child in node.children:
+ (cursor_seen, match_after_cursor) = \
+ find_search_results(event, child, cursor_seen, match_after_cursor)
+ return (cursor_seen, match_after_cursor)
+
+ self.search_results.clear()
+ (_, self.cur_search_result) = find_search_results(event, tree.root)
+ if len(self.search_results) < 1:
+ self.push_screen(ErrorScreen(f"Failed to find pmu/event or metric {event}"))
+ search_label.display = False
+ elif self.cur_search_result:
+ self.expand_and_select(self.cur_search_result)
+ else:
+ self.set_searched_tree_node(previous=False)
+
+ self.push_screen(SearchScreen(), set_initial_focus)
+
+ def action_next(self) -> None:
+ """Next was chosen."""
+ self.set_searched_tree_node(previous=False)
+
+ def action_prev(self) -> None:
+ """Previous was chosen."""
+ self.set_searched_tree_node(previous=True)
+
+ def action_collapse(self) -> None:
+ """Collapse the part of the tree currently on."""
+ tree: Tree[str] = self.query_one("#root", Tree)
+ node = tree.cursor_node
+ if node and node.parent:
+ node.parent.collapse_all()
+ node.tree.scroll_to_node(node.parent)
+
+ def update_counts(self) -> None:
+ """Called every interval to update counts."""
+ if not self.selected or not self.evlist:
+ return
+
+ def update_count(cpu: int, count: int):
+ # Update the raw count display.
+ counter: Label = self.query(f"#counter_cpu{cpu}" if cpu >= 0 else "#counter_total")
+ if not counter:
+ return
+ counter = counter.first(Label)
+ counter.update(str(count))
+
+ # Update the sparkline.
+ line: Sparkline = self.query(f"#sparkline_cpu{cpu}" if cpu >= 0 else "#sparkline_total")
+ if not line:
+ return
+ line = line.first(Sparkline)
+ # If there are more events than the width, remove the front event.
+ if len(line.data) > line.size.width:
+ line.data.pop(0)
+ line.data.append(count)
+ line.mutate_reactive(Sparkline.data)
+
+ # Update the total and each CPU counts, assume there's just 1 evsel.
+ total = 0
+ self.evlist.disable()
+ for evsel in self.evlist:
+ for cpu in evsel.cpus():
+ aggr = 0
+ for thread in evsel.threads():
+ aggr += self.selected.value(self.evlist, evsel, cpu, thread)
+ update_count(cpu, aggr)
+ total += aggr
+ update_count(-1, total)
+ self.evlist.enable()
+
+ def on_mount(self) -> None:
+ """When App starts set up periodic event updating."""
+ self.update_counts()
+ self.set_interval(self.interval, self.update_counts)
+
+ def set_selected(self, value: TreeValue) -> None:
+ """Updates the event/description and starts the counters."""
+ try:
+ label_name = self.query_one("#event_name", Label)
+ event_description = self.query_one("#event_description", Static)
+ lines = self.query_one("#lines")
+ except NoMatches:
+ # A race with rendering, ignore the update as we can't
+ # mount the assumed output widgets.
+ return
+
+ self.selected = value
+
+ # Remove previous event information.
+ if self.evlist:
+ self.evlist.disable()
+ self.evlist.close()
+ old_lines = self.query(CounterSparkline)
+ for line in old_lines:
+ line.remove()
+ old_counters = self.query(Counter)
+ for counter in old_counters:
+ counter.remove()
+
+ # Update event/metric text and description.
+ label_name.update(value.name())
+ event_description.update(value.description())
+
+ # Open the event.
+ try:
+ self.evlist = value.parse()
+ if self.evlist:
+ self.evlist.open()
+ self.evlist.enable()
+ except:
+ self.evlist = None
+
+ if not self.evlist:
+ self.push_screen(ErrorScreen(f"Failed to open {value.name()}"))
+ return
+
+ # Add spark lines for all the CPUs. Note, must be done after
+ # open so that the evlist CPUs have been computed by propagate
+ # maps.
+ line = CounterSparkline(cpu=-1)
+ lines.mount(line)
+ for cpu in self.evlist.all_cpus():
+ line = CounterSparkline(cpu)
+ lines.mount(line)
+ line = Counter(cpu=-1)
+ lines.mount(line)
+ for cpu in self.evlist.all_cpus():
+ line = Counter(cpu)
+ lines.mount(line)
+
+ def compose(self) -> ComposeResult:
+ """Draws the app."""
+ def metric_event_tree() -> Tree:
+ """Create tree of PMUs and metricgroups with events or metrics under."""
+ tree: Tree[TreeValue] = Tree("Root", id="root")
+ pmus = tree.root.add("PMUs")
+ for pmu in perf.pmus():
+ pmu_name = pmu.name().lower()
+ pmu_node = pmus.add(pmu_name)
+ try:
+ for event in sorted(pmu.events(), key=lambda x: x["name"]):
+ if "deprecated" in event:
+ continue
+ if "name" in event:
+ e = event["name"].lower()
+ if "alias" in event:
+ pmu_node.add_leaf(f'{e} ({event["alias"]})',
+ data=PmuEvent(pmu_name, e))
+ else:
+ pmu_node.add_leaf(e, data=PmuEvent(pmu_name, e))
+ except:
+ # Reading events may fail with EPERM, ignore.
+ pass
+ metrics = tree.root.add("Metrics")
+ groups = set()
+ for metric in perf.metrics():
+ groups.update(metric["MetricGroup"])
+
+ def add_metrics_to_tree(node: TreeNode[TreeValue], parent: str, pmu: str = None):
+ for metric in sorted(perf.metrics(), key=lambda x: x["MetricName"]):
+ metric_pmu = metric.get('PMU')
+ if pmu and metric_pmu and metric_pmu != pmu:
+ continue
+ if parent in metric["MetricGroup"]:
+ name = metric["MetricName"]
+ display_name = name
+ if metric_pmu:
+ display_name += f" ({metric_pmu})"
+ node.add_leaf(display_name, data=Metric(name, metric_pmu))
+ child_group_name = f'{name}_group'
+ if child_group_name in groups:
+ display_child_group_name = child_group_name
+ if metric_pmu:
+ display_child_group_name += f" ({metric_pmu})"
+ add_metrics_to_tree(node.add(display_child_group_name),
+ child_group_name,
+ metric_pmu)
+
+ for group in sorted(groups):
+ if group.endswith('_group'):
+ continue
+ add_metrics_to_tree(metrics.add(group), group)
+
+ tree.root.expand()
+ return tree
+
+ yield Header(id="header")
+ yield Horizontal(Vertical(metric_event_tree(), id="events"),
+ Vertical(Label("event name", id="event_name"),
+ Static("description", markup=False, id="event_description"),
+ ))
+ yield Label(id="active_search")
+ yield VerticalScroll(id="lines")
+ yield Footer(id="footer")
+
+ @on(Tree.NodeSelected)
+ def on_tree_node_selected(self, event: Tree.NodeSelected[TreeValue]) -> None:
+ """Called when a tree node is selected, selecting the event."""
+ if event.node.data:
+ self.set_selected(event.node.data)
+
+
+if __name__ == "__main__":
+ ap = argparse.ArgumentParser()
+ ap.add_argument('-I', '--interval', help="Counter update interval in seconds", default=0.1)
+ args = ap.parse_args()
+ app = IListApp(float(args.interval))
+ app.run()
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index 9b0e5a8b5070..01a1a0ed51ae 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -2,7 +2,7 @@ perf-util-y += Context.o
CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
-CFLAGS_Context.o += -Wno-switch-default -Wno-shadow
+CFLAGS_Context.o += -Wno-switch-default -Wno-shadow -Wno-thread-safety-analysis
ifeq ($(CC_NO_CLANG), 1)
CFLAGS_Context.o += -Wno-unused-command-line-argument
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 121cf61ba1b3..e0b2e7268ef6 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -680,7 +680,10 @@ class CallGraphModelBase(TreeModel):
s = value.replace("%", "\\%")
s = s.replace("_", "\\_")
# Translate * and ? into SQL LIKE pattern characters % and _
- trans = string.maketrans("*?", "%_")
+ if sys.version_info[0] == 3:
+ trans = str.maketrans("*?", "%_")
+ else:
+ trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
else:
match = " GLOB '" + str(value) + "'"
diff --git a/tools/perf/scripts/python/flamegraph.py b/tools/perf/scripts/python/flamegraph.py
index cf7ce8229a6c..ad735990c5be 100755
--- a/tools/perf/scripts/python/flamegraph.py
+++ b/tools/perf/scripts/python/flamegraph.py
@@ -18,7 +18,6 @@
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
-from __future__ import print_function
import argparse
import hashlib
import io
@@ -26,9 +25,10 @@ import json
import os
import subprocess
import sys
+from typing import Dict, Optional, Union
import urllib.request
-minimal_html = """<head>
+MINIMAL_HTML = """<head>
<link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/d3-flame-graph@4.1.3/dist/d3-flamegraph.css">
</head>
<body>
@@ -50,20 +50,20 @@ minimal_html = """<head>
# pylint: disable=too-few-public-methods
class Node:
- def __init__(self, name, libtype):
+ def __init__(self, name: str, libtype: str):
self.name = name
# "root" | "kernel" | ""
# "" indicates user space
self.libtype = libtype
- self.value = 0
- self.children = []
+ self.value: int = 0
+ self.children: list[Node] = []
- def to_json(self):
+ def to_json(self) -> Dict[str, Union[str, int, list[Dict]]]:
return {
"n": self.name,
"l": self.libtype,
"v": self.value,
- "c": self.children
+ "c": [x.to_json() for x in self.children]
}
@@ -73,7 +73,7 @@ class FlameGraphCLI:
self.stack = Node("all", "root")
@staticmethod
- def get_libtype_from_dso(dso):
+ def get_libtype_from_dso(dso: Optional[str]) -> str:
"""
when kernel-debuginfo is installed,
dso points to /usr/lib/debug/lib/modules/*/vmlinux
@@ -84,7 +84,7 @@ class FlameGraphCLI:
return ""
@staticmethod
- def find_or_create_node(node, name, libtype):
+ def find_or_create_node(node: Node, name: str, libtype: str) -> Node:
for child in node.children:
if child.name == name:
return child
@@ -93,7 +93,12 @@ class FlameGraphCLI:
node.children.append(child)
return child
- def process_event(self, event):
+ def process_event(self, event) -> None:
+ # ignore events where the event name does not match
+ # the one specified by the user
+ if self.args.event_name and event.get("ev_name") != self.args.event_name:
+ return
+
pid = event.get("sample", {}).get("pid", 0)
# event["dso"] sometimes contains /usr/lib/debug/lib/modules/*/vmlinux
# for user-space processes; let's use pid for kernel or user-space distinction
@@ -101,7 +106,7 @@ class FlameGraphCLI:
comm = event["comm"]
libtype = "kernel"
else:
- comm = "{} ({})".format(event["comm"], pid)
+ comm = f"{event['comm']} ({pid})"
libtype = ""
node = self.find_or_create_node(self.stack, comm, libtype)
@@ -116,20 +121,30 @@ class FlameGraphCLI:
node = self.find_or_create_node(node, name, libtype)
node.value += 1
- def get_report_header(self):
+ def get_report_header(self) -> str:
if self.args.input == "-":
# when this script is invoked with "perf script flamegraph",
# no perf.data is created and we cannot read the header of it
return ""
try:
- output = subprocess.check_output(["perf", "report", "--header-only"])
- return output.decode("utf-8")
+ # if the file name other than perf.data is given,
+ # we read the header of that file
+ if self.args.input:
+ output = subprocess.check_output(["perf", "report", "--header-only",
+ "-i", self.args.input])
+ else:
+ output = subprocess.check_output(["perf", "report", "--header-only"])
+
+ result = output.decode("utf-8")
+ if self.args.event_name:
+ result += "\nFocused event: " + self.args.event_name
+ return result
except Exception as err: # pylint: disable=broad-except
- print("Error reading report header: {}".format(err), file=sys.stderr)
+ print(f"Error reading report header: {err}", file=sys.stderr)
return ""
- def trace_end(self):
+ def trace_end(self) -> None:
stacks_json = json.dumps(self.stack, default=lambda x: x.to_json())
if self.args.format == "html":
@@ -153,7 +168,8 @@ graph template (--template PATH) or use another output format (--format
FORMAT).""",
file=sys.stderr)
if self.args.input == "-":
- print("""Not attempting to download Flame Graph template as script command line
+ print(
+"""Not attempting to download Flame Graph template as script command line
input is disabled due to using live mode. If you want to download the
template retry without live mode. For example, use 'perf record -a -g
-F 99 sleep 60' and 'perf script report flamegraph'. Alternatively,
@@ -162,37 +178,40 @@ https://cdn.jsdelivr.net/npm/d3-flame-graph@4.1.3/dist/templates/d3-flamegraph-b
and place it at:
/usr/share/d3-flame-graph/d3-flamegraph-base.html""",
file=sys.stderr)
- quit()
+ sys.exit(1)
s = None
- while s != "y" and s != "n":
- s = input("Do you wish to download a template from cdn.jsdelivr.net? (this warning can be suppressed with --allow-download) [yn] ").lower()
+ while s not in ["y", "n"]:
+ s = input("Do you wish to download a template from cdn.jsdelivr.net?" +
+ "(this warning can be suppressed with --allow-download) [yn] "
+ ).lower()
if s == "n":
- quit()
- template = "https://cdn.jsdelivr.net/npm/d3-flame-graph@4.1.3/dist/templates/d3-flamegraph-base.html"
+ sys.exit(1)
+ template = ("https://cdn.jsdelivr.net/npm/d3-flame-graph@4.1.3/dist/templates/"
+ "d3-flamegraph-base.html")
template_md5sum = "143e0d06ba69b8370b9848dcd6ae3f36"
try:
- with urllib.request.urlopen(template) as template:
+ with urllib.request.urlopen(template) as url_template:
output_str = "".join([
- l.decode("utf-8") for l in template.readlines()
+ l.decode("utf-8") for l in url_template.readlines()
])
except Exception as err:
print(f"Error reading template {template}: {err}\n"
"a minimal flame graph will be generated", file=sys.stderr)
- output_str = minimal_html
+ output_str = MINIMAL_HTML
template_md5sum = None
if template_md5sum:
download_md5sum = hashlib.md5(output_str.encode("utf-8")).hexdigest()
if download_md5sum != template_md5sum:
s = None
- while s != "y" and s != "n":
+ while s not in ["y", "n"]:
s = input(f"""Unexpected template md5sum.
{download_md5sum} != {template_md5sum}, for:
{output_str}
continue?[yn] """).lower()
if s == "n":
- quit()
+ sys.exit(1)
output_str = output_str.replace("/** @options_json **/", options_json)
output_str = output_str.replace("/** @flamegraph_json **/", stacks_json)
@@ -206,12 +225,12 @@ continue?[yn] """).lower()
with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out:
out.write(output_str)
else:
- print("dumping data to {}".format(output_fn))
+ print(f"dumping data to {output_fn}")
try:
with io.open(output_fn, "w", encoding="utf-8") as out:
out.write(output_str)
except IOError as err:
- print("Error writing output file: {}".format(err), file=sys.stderr)
+ print(f"Error writing output file: {err}", file=sys.stderr)
sys.exit(1)
@@ -235,6 +254,11 @@ if __name__ == "__main__":
default=False,
action="store_true",
help="allow unprompted downloading of HTML template")
+ parser.add_argument("-e", "--event",
+ default="",
+ dest="event_name",
+ type=str,
+ help="specify the event to generate flamegraph for")
cli_args = parser.parse_args()
cli = FlameGraphCLI(cli_args)
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 934f32090553..c2a67ce45941 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -20,7 +20,6 @@ perf-test-y += hists_link.o
perf-test-y += hists_filter.o
perf-test-y += hists_output.o
perf-test-y += hists_cumulate.o
-perf-test-y += python-use.o
perf-test-y += bp_signal.o
perf-test-y += bp_signal_overflow.o
perf-test-y += bp_account.o
@@ -56,6 +55,7 @@ perf-test-y += genelf.o
perf-test-y += api-io.o
perf-test-y += demangle-java-test.o
perf-test-y += demangle-ocaml-test.o
+perf-test-y += demangle-rust-v0-test.o
perf-test-y += pfm.o
perf-test-y += parse-metric.o
perf-test-y += pe-file-parsing.o
@@ -68,12 +68,13 @@ perf-test-y += symbols.o
perf-test-y += util.o
perf-test-y += hwmon_pmu.o
perf-test-y += tool_pmu.o
+perf-test-y += subcmd-help.o
+perf-test-y += kallsyms-split.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
perf-test-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif
-CFLAGS_python-use.o += -DPYTHONPATH="BUILD_STR($(OUTPUT)python)" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
CFLAGS_dwarf-unwind.o += -fno-optimize-sibling-calls
perf-test-y += workloads/
@@ -88,7 +89,7 @@ endif
$(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
- $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+ $(Q)$(call echo-cmd,test)$(SHELLCHECK) "$<" > $@ || (cat $@ && rm $@ && false)
perf-test-y += $(SHELL_TEST_LOGS)
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 79a980b1e786..c5e7999f2817 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -91,7 +91,6 @@ static int test__backward_ring_buffer(struct test_suite *test __maybe_unused, in
struct parse_events_error parse_error;
struct record_opts opts = {
.target = {
- .uid = UINT_MAX,
.uses_mmap = true,
},
.freq = 0,
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index 4cb7d486b5c1..047433c977bc 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -104,6 +104,7 @@ static int bp_accounting(int wp_cnt, int share)
fd_wp = wp_event((void *)&the_var, &attr_new);
TEST_ASSERT_VAL("failed to create max wp\n", fd_wp != -1);
pr_debug("wp max created\n");
+ close(fd_wp);
}
for (i = 0; i < wp_cnt; i++)
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 14d30a5053be..bd6ffa8e4578 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -4,8 +4,12 @@
*
* Builtin regression testing command: ever growing number of sanity tests
*/
+#include <ctype.h>
#include <fcntl.h>
#include <errno.h>
+#ifdef HAVE_BACKTRACE_SUPPORT
+#include <execinfo.h>
+#endif
#include <poll.h>
#include <unistd.h>
#include <setjmp.h>
@@ -80,7 +84,6 @@ static struct test_suite *generic_tests[] = {
&suite__syscall_openat_tp_fields,
#endif
&suite__hists_link,
- &suite__python_use,
&suite__bp_signal,
&suite__bp_signal_overflow,
&suite__bp_accounting,
@@ -123,9 +126,10 @@ static struct test_suite *generic_tests[] = {
&suite__jit_write_elf,
&suite__pfm,
&suite__api_io,
- &suite__maps__merge_in,
+ &suite__maps,
&suite__demangle_java,
&suite__demangle_ocaml,
+ &suite__demangle_rust,
&suite__parse_metric,
&suite__pe_file_parsing,
&suite__expand_cgroup_events,
@@ -135,6 +139,8 @@ static struct test_suite *generic_tests[] = {
&suite__event_groups,
&suite__symbols,
&suite__util,
+ &suite__subcmd_help,
+ &suite__kallsyms_split,
NULL,
};
@@ -146,6 +152,7 @@ static struct test_workload *workloads[] = {
&workload__brstack,
&workload__datasym,
&workload__landlock,
+ &workload__traploop,
};
#define workloads__for_each(workload) \
@@ -154,6 +161,71 @@ static struct test_workload *workloads[] = {
#define test_suite__for_each_test_case(suite, idx) \
for (idx = 0; (suite)->test_cases && (suite)->test_cases[idx].name != NULL; idx++)
+static void close_parent_fds(void)
+{
+ DIR *dir = opendir("/proc/self/fd");
+ struct dirent *ent;
+
+ while ((ent = readdir(dir))) {
+ char *end;
+ long fd;
+
+ if (ent->d_type != DT_LNK)
+ continue;
+
+ if (!isdigit(ent->d_name[0]))
+ continue;
+
+ fd = strtol(ent->d_name, &end, 10);
+ if (*end)
+ continue;
+
+ if (fd <= 3 || fd == dirfd(dir))
+ continue;
+
+ close(fd);
+ }
+ closedir(dir);
+}
+
+static void check_leaks(void)
+{
+ DIR *dir = opendir("/proc/self/fd");
+ struct dirent *ent;
+ int leaks = 0;
+
+ while ((ent = readdir(dir))) {
+ char path[PATH_MAX];
+ char *end;
+ long fd;
+ ssize_t len;
+
+ if (ent->d_type != DT_LNK)
+ continue;
+
+ if (!isdigit(ent->d_name[0]))
+ continue;
+
+ fd = strtol(ent->d_name, &end, 10);
+ if (*end)
+ continue;
+
+ if (fd <= 3 || fd == dirfd(dir))
+ continue;
+
+ leaks++;
+ len = readlinkat(dirfd(dir), ent->d_name, path, sizeof(path));
+ if (len > 0 && (size_t)len < sizeof(path))
+ path[len] = '\0';
+ else
+ strncpy(path, ent->d_name, sizeof(path));
+ pr_err("Leak of file descriptor %s that opened: '%s'\n", ent->d_name, path);
+ }
+ closedir(dir);
+ if (leaks)
+ abort();
+}
+
static int test_suite__num_test_cases(const struct test_suite *t)
{
int num;
@@ -230,6 +302,16 @@ static jmp_buf run_test_jmp_buf;
static void child_test_sig_handler(int sig)
{
+#ifdef HAVE_BACKTRACE_SUPPORT
+ void *stackdump[32];
+ size_t stackdump_size;
+#endif
+
+ fprintf(stderr, "\n---- unexpected signal (%d) ----\n", sig);
+#ifdef HAVE_BACKTRACE_SUPPORT
+ stackdump_size = backtrace(stackdump, ARRAY_SIZE(stackdump));
+ __dump_stack(stderr, stackdump, stackdump_size);
+#endif
siglongjmp(run_test_jmp_buf, sig);
}
@@ -241,9 +323,11 @@ static int run_test_child(struct child_process *process)
struct child_test *child = container_of(process, struct child_test, process);
int err;
+ close_parent_fds();
+
err = sigsetjmp(run_test_jmp_buf, 1);
if (err) {
- fprintf(stderr, "\n---- unexpected signal (%d) ----\n", err);
+ /* Received signal. */
err = err > 0 ? -err : -1;
goto err_out;
}
@@ -256,6 +340,7 @@ static int run_test_child(struct child_process *process)
err = test_function(child->test, child->test_case_num)(child->test, child->test_case_num);
pr_debug("---- end(%d) ----\n", err);
+ check_leaks();
err_out:
fflush(NULL);
for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
@@ -525,6 +610,7 @@ static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
for (struct test_suite **t = suites; *t; t++, curr_suite++) {
int curr_test_case;
+ bool suite_matched = false;
if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) {
/*
@@ -542,6 +628,8 @@ static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
}
if (skip)
continue;
+ } else {
+ suite_matched = true;
}
if (intlist__find(skiplist, curr_suite + 1)) {
@@ -553,10 +641,10 @@ static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
for (unsigned int run = 0; run < runs_per_test; run++) {
test_suite__for_each_test_case(*t, curr_test_case) {
- if (!perf_test__matches(test_description(*t, curr_test_case),
+ if (!suite_matched &&
+ !perf_test__matches(test_description(*t, curr_test_case),
curr_suite, argc, argv))
continue;
-
err = start_test(*t, curr_suite, curr_test_case,
&child_tests[child_test_num++],
width, pass);
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index cf6edbe697b2..5927d1ea20e2 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -2,6 +2,7 @@
#include <errno.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
+#include <linux/rbtree.h>
#include <linux/types.h>
#include <inttypes.h>
#include <stdlib.h>
@@ -39,11 +40,69 @@
#define BUFSZ 1024
#define READLEN 128
-struct state {
- u64 done[1024];
- size_t done_cnt;
+struct tested_section {
+ struct rb_node rb_node;
+ u64 addr;
+ char *path;
};
+static bool tested_code_insert_or_exists(const char *path, u64 addr,
+ struct rb_root *tested_sections)
+{
+ struct rb_node **node = &tested_sections->rb_node;
+ struct rb_node *parent = NULL;
+ struct tested_section *data;
+
+ while (*node) {
+ int cmp;
+
+ parent = *node;
+ data = rb_entry(*node, struct tested_section, rb_node);
+ cmp = strcmp(path, data->path);
+ if (!cmp) {
+ if (addr < data->addr)
+ cmp = -1;
+ else if (addr > data->addr)
+ cmp = 1;
+ else
+ return true; /* already tested */
+ }
+
+ if (cmp < 0)
+ node = &(*node)->rb_left;
+ else
+ node = &(*node)->rb_right;
+ }
+
+ data = zalloc(sizeof(*data));
+ if (!data)
+ return true;
+
+ data->addr = addr;
+ data->path = strdup(path);
+ if (!data->path) {
+ free(data);
+ return true;
+ }
+ rb_link_node(&data->rb_node, parent, node);
+ rb_insert_color(&data->rb_node, tested_sections);
+ return false;
+}
+
+static void tested_sections__free(struct rb_root *root)
+{
+ while (!RB_EMPTY_ROOT(root)) {
+ struct rb_node *node = rb_first(root);
+ struct tested_section *ts = rb_entry(node,
+ struct tested_section,
+ rb_node);
+
+ rb_erase(node, root);
+ free(ts->path);
+ free(ts);
+ }
+}
+
static size_t read_objdump_chunk(const char **line, unsigned char **buf,
size_t *buf_len)
{
@@ -316,13 +375,15 @@ static void dump_buf(unsigned char *buf, size_t len)
}
static int read_object_code(u64 addr, size_t len, u8 cpumode,
- struct thread *thread, struct state *state)
+ struct thread *thread,
+ struct rb_root *tested_sections)
{
struct addr_location al;
unsigned char buf1[BUFSZ] = {0};
unsigned char buf2[BUFSZ] = {0};
size_t ret_len;
u64 objdump_addr;
+ u64 skip_addr;
const char *objdump_name;
char decomp_name[KMOD_DECOMP_LEN];
bool decomp = false;
@@ -350,6 +411,18 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
goto out;
}
+ /*
+ * Don't retest the same addresses. objdump struggles with kcore - try
+ * each map only once even if the address is different.
+ */
+ skip_addr = dso__is_kcore(dso) ? map__start(al.map) : al.addr;
+ if (tested_code_insert_or_exists(dso__long_name(dso), skip_addr,
+ tested_sections)) {
+ pr_debug("Already tested %s @ %#"PRIx64" - skipping\n",
+ dso__long_name(dso), skip_addr);
+ goto out;
+ }
+
pr_debug("On file address is: %#"PRIx64"\n", al.addr);
if (len > BUFSZ)
@@ -387,24 +460,6 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
goto out;
}
- /* objdump struggles with kcore - try each map only once */
- if (dso__is_kcore(dso)) {
- size_t d;
-
- for (d = 0; d < state->done_cnt; d++) {
- if (state->done[d] == map__start(al.map)) {
- pr_debug("kcore map tested already");
- pr_debug(" - skipping\n");
- goto out;
- }
- }
- if (state->done_cnt >= ARRAY_SIZE(state->done)) {
- pr_debug("Too many kcore maps - skipping\n");
- goto out;
- }
- state->done[state->done_cnt++] = map__start(al.map);
- }
-
objdump_name = dso__long_name(dso);
if (dso__needs_decompress(dso)) {
if (dso__decompress_kmodule_path(dso, objdump_name,
@@ -471,9 +526,9 @@ out:
return err;
}
-static int process_sample_event(struct machine *machine,
- struct evlist *evlist,
- union perf_event *event, struct state *state)
+static int process_sample_event(struct machine *machine, struct evlist *evlist,
+ union perf_event *event,
+ struct rb_root *tested_sections)
{
struct perf_sample sample;
struct thread *thread;
@@ -494,7 +549,8 @@ static int process_sample_event(struct machine *machine,
goto out;
}
- ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
+ ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread,
+ tested_sections);
thread__put(thread);
out:
perf_sample__exit(&sample);
@@ -502,10 +558,11 @@ out:
}
static int process_event(struct machine *machine, struct evlist *evlist,
- union perf_event *event, struct state *state)
+ union perf_event *event, struct rb_root *tested_sections)
{
if (event->header.type == PERF_RECORD_SAMPLE)
- return process_sample_event(machine, evlist, event, state);
+ return process_sample_event(machine, evlist, event,
+ tested_sections);
if (event->header.type == PERF_RECORD_THROTTLE ||
event->header.type == PERF_RECORD_UNTHROTTLE)
@@ -525,7 +582,7 @@ static int process_event(struct machine *machine, struct evlist *evlist,
}
static int process_events(struct machine *machine, struct evlist *evlist,
- struct state *state)
+ struct rb_root *tested_sections)
{
union perf_event *event;
struct mmap *md;
@@ -537,7 +594,7 @@ static int process_events(struct machine *machine, struct evlist *evlist,
continue;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
- ret = process_event(machine, evlist, event, state);
+ ret = process_event(machine, evlist, event, tested_sections);
perf_mmap__consume(&md->core);
if (ret < 0)
return ret;
@@ -637,9 +694,7 @@ static int do_test_code_reading(bool try_kcore)
.uses_mmap = true,
},
};
- struct state state = {
- .done_cnt = 0,
- };
+ struct rb_root tested_sections = RB_ROOT;
struct perf_thread_map *threads = NULL;
struct perf_cpu_map *cpus = NULL;
struct evlist *evlist = NULL;
@@ -649,13 +704,14 @@ static int do_test_code_reading(bool try_kcore)
struct map *map;
bool have_vmlinux, have_kcore;
struct dso *dso;
- const char *events[] = { "cycles", "cycles:u", "cpu-clock", "cpu-clock:u", NULL };
+ const char *events[] = { "cpu-cycles", "cpu-cycles:u", "cpu-clock", "cpu-clock:u", NULL };
int evidx = 0;
+ struct perf_env host_env;
pid = getpid();
- machine = machine__new_host();
- machine->env = &perf_env;
+ perf_env__init(&host_env);
+ machine = machine__new_host(&host_env);
ret = machine__create_kernel_maps(machine);
if (ret < 0) {
@@ -749,13 +805,6 @@ static int do_test_code_reading(bool try_kcore)
pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
}
- /*
- * Both cpus and threads are now owned by evlist
- * and will be freed by following perf_evlist__set_maps
- * call. Getting reference to keep them alive.
- */
- perf_cpu_map__get(cpus);
- perf_thread_map__get(threads);
perf_evlist__set_maps(&evlist->core, NULL, NULL);
evlist__delete(evlist);
evlist = NULL;
@@ -779,7 +828,7 @@ static int do_test_code_reading(bool try_kcore)
evlist__disable(evlist);
- ret = process_events(machine, evlist, &state);
+ ret = process_events(machine, evlist, &tested_sections);
if (ret < 0)
goto out_put;
@@ -798,6 +847,8 @@ out_err:
perf_cpu_map__put(cpus);
perf_thread_map__put(threads);
machine__delete(machine);
+ perf_env__exit(&host_env);
+ tested_sections__free(&tested_sections);
return err;
}
diff --git a/tools/perf/tests/demangle-java-test.c b/tools/perf/tests/demangle-java-test.c
index 93c94408bdc8..0fb3e5a4a0ed 100644
--- a/tools/perf/tests/demangle-java-test.c
+++ b/tools/perf/tests/demangle-java-test.c
@@ -3,10 +3,9 @@
#include <stdlib.h>
#include <stdio.h>
#include <linux/kernel.h>
-#include "tests.h"
-#include "session.h"
#include "debug.h"
-#include "demangle-java.h"
+#include "symbol.h"
+#include "tests.h"
static int test__demangle_java(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
@@ -18,19 +17,24 @@ static int test__demangle_java(struct test_suite *test __maybe_unused, int subte
const char *mangled, *demangled;
} test_cases[] = {
{ "Ljava/lang/StringLatin1;equals([B[B)Z",
- "boolean java.lang.StringLatin1.equals(byte[], byte[])" },
+ "java.lang.StringLatin1.equals(byte[], byte[])" },
{ "Ljava/util/zip/ZipUtils;CENSIZ([BI)J",
- "long java.util.zip.ZipUtils.CENSIZ(byte[], int)" },
+ "java.util.zip.ZipUtils.CENSIZ(byte[], int)" },
{ "Ljava/util/regex/Pattern$BmpCharProperty;match(Ljava/util/regex/Matcher;ILjava/lang/CharSequence;)Z",
- "boolean java.util.regex.Pattern$BmpCharProperty.match(java.util.regex.Matcher, int, java.lang.CharSequence)" },
+ "java.util.regex.Pattern$BmpCharProperty.match(java.util.regex.Matcher, int, java.lang.CharSequence)" },
{ "Ljava/lang/AbstractStringBuilder;appendChars(Ljava/lang/String;II)V",
- "void java.lang.AbstractStringBuilder.appendChars(java.lang.String, int, int)" },
+ "java.lang.AbstractStringBuilder.appendChars(java.lang.String, int, int)" },
{ "Ljava/lang/Object;<init>()V",
- "void java.lang.Object<init>()" },
+ "java.lang.Object<init>()" },
};
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
- buf = java_demangle_sym(test_cases[i].mangled, 0);
+ buf = dso__demangle_sym(/*dso=*/NULL, /*kmodule=*/0, test_cases[i].mangled);
+ if (!buf) {
+ pr_debug("FAILED to demangle: \"%s\"\n \"%s\"\n", test_cases[i].mangled,
+ test_cases[i].demangled);
+ continue;
+ }
if (strcmp(buf, test_cases[i].demangled)) {
pr_debug("FAILED: %s: %s != %s\n", test_cases[i].mangled,
buf, test_cases[i].demangled);
diff --git a/tools/perf/tests/demangle-ocaml-test.c b/tools/perf/tests/demangle-ocaml-test.c
index 90a4285e2ad5..612c788b7e0d 100644
--- a/tools/perf/tests/demangle-ocaml-test.c
+++ b/tools/perf/tests/demangle-ocaml-test.c
@@ -2,10 +2,9 @@
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
-#include "tests.h"
-#include "session.h"
#include "debug.h"
-#include "demangle-ocaml.h"
+#include "symbol.h"
+#include "tests.h"
static int test__demangle_ocaml(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
@@ -27,7 +26,7 @@ static int test__demangle_ocaml(struct test_suite *test __maybe_unused, int subt
};
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
- buf = ocaml_demangle_sym(test_cases[i].mangled);
+ buf = dso__demangle_sym(/*dso=*/NULL, /*kmodule=*/0, test_cases[i].mangled);
if ((buf == NULL && test_cases[i].demangled != NULL)
|| (buf != NULL && test_cases[i].demangled == NULL)
|| (buf != NULL && strcmp(buf, test_cases[i].demangled))) {
diff --git a/tools/perf/tests/demangle-rust-v0-test.c b/tools/perf/tests/demangle-rust-v0-test.c
new file mode 100644
index 000000000000..904f966c65d7
--- /dev/null
+++ b/tools/perf/tests/demangle-rust-v0-test.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+#include "tests.h"
+#include "debug.h"
+#include "symbol.h"
+#include <linux/kernel.h>
+#include <stdlib.h>
+#include <string.h>
+
+static int test__demangle_rust(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int ret = TEST_OK;
+ char *buf = NULL;
+ size_t i;
+
+ struct {
+ const char *mangled, *demangled;
+ } test_cases[] = {
+ { "_RNvMsr_NtCs3ssYzQotkvD_3std4pathNtB5_7PathBuf3newCs15kBYyAo9fc_7mycrate",
+ "<std::path::PathBuf>::new" },
+ { "_RNvCs15kBYyAo9fc_7mycrate7example",
+ "mycrate::example" },
+ { "_RNvMs_Cs4Cv8Wi1oAIB_7mycrateNtB4_7Example3foo",
+ "<mycrate::Example>::foo" },
+ { "_RNvXCs15kBYyAo9fc_7mycrateNtB2_7ExampleNtB2_5Trait3foo",
+ "<mycrate::Example as mycrate::Trait>::foo" },
+ { "_RNvMCs7qp2U7fqm6G_7mycrateNtB2_7Example3foo",
+ "<mycrate::Example>::foo" },
+ { "_RNvMs_Cs7qp2U7fqm6G_7mycrateNtB4_7Example3bar",
+ "<mycrate::Example>::bar" },
+ { "_RNvYNtCs15kBYyAo9fc_7mycrate7ExampleNtB4_5Trait7exampleB4_",
+ "<mycrate::Example as mycrate::Trait>::example" },
+ { "_RNCNvCsgStHSCytQ6I_7mycrate4main0B3_",
+ "mycrate::main::{closure#0}" },
+ { "_RNCNvCsgStHSCytQ6I_7mycrate4mains_0B3_",
+ "mycrate::main::{closure#1}" },
+ { "_RINvCsgStHSCytQ6I_7mycrate7examplelKj1_EB2_",
+ "mycrate::example::<i32, 1>" },
+ { "_RINvCs7qp2U7fqm6G_7mycrate7exampleFG0_RL1_hRL0_tEuEB2_",
+ "mycrate::example::<for<'a, 'b> fn(&'a u8, &'b u16)>",
+ },
+ { "_RINvCs7qp2U7fqm6G_7mycrate7exampleKy12345678_EB2_",
+ "mycrate::example::<305419896>" },
+ { "_RNvNvMCsd9PVOYlP1UU_7mycrateINtB4_7ExamplepKpE3foo14EXAMPLE_STATIC",
+ "<mycrate::Example<_, _>>::foo::EXAMPLE_STATIC",
+ },
+ { "_RINvCs7qp2U7fqm6G_7mycrate7exampleAtj8_EB2_",
+ "mycrate::example::<[u16; 8]>" },
+ { "_RINvCs7qp2U7fqm6G_7mycrate7exampleNtB2_7ExampleBw_EB2_",
+ "mycrate::example::<mycrate::Example, mycrate::Example>" },
+ { "_RINvMsY_NtCseXNvpPnDBDp_3std4pathNtB6_4Path3neweECs7qp2U7fqm6G_7mycrate",
+ "<std::path::Path>::new::<str>" },
+ { "_RNvNvNvCs7qp2U7fqm6G_7mycrate7EXAMPLE7___getit5___KEY",
+ "mycrate::EXAMPLE::__getit::__KEY" },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ buf = dso__demangle_sym(/*dso=*/NULL, /*kmodule=*/0, test_cases[i].mangled);
+ if (!buf) {
+ pr_debug("FAILED to demangle: \"%s\"\n \"%s\"\n", test_cases[i].mangled,
+ test_cases[i].demangled);
+ continue;
+ }
+ if (strcmp(buf, test_cases[i].demangled)) {
+ pr_debug("FAILED: %s: %s != %s\n", test_cases[i].mangled,
+ buf, test_cases[i].demangled);
+ ret = TEST_FAIL;
+ }
+ free(buf);
+ }
+
+ return ret;
+}
+
+DEFINE_SUITE("Demangle Rust", demangle_rust);
diff --git a/tools/perf/tests/dlfilter-test.c b/tools/perf/tests/dlfilter-test.c
index 54f59d1246bc..80a1c941138d 100644
--- a/tools/perf/tests/dlfilter-test.c
+++ b/tools/perf/tests/dlfilter-test.c
@@ -319,11 +319,12 @@ static int run_perf_script(struct test_data *td)
static int test__dlfilter_test(struct test_data *td)
{
+ struct perf_env host_env;
u64 sample_type = TEST_SAMPLE_TYPE;
pid_t pid = 12345;
pid_t tid = 12346;
u64 id = 99;
- int err;
+ int err = TEST_OK;
if (get_dlfilters_path(td->name, td->dlfilters, PATH_MAX))
return test_result("dlfilters not found", TEST_SKIP);
@@ -352,38 +353,42 @@ static int test__dlfilter_test(struct test_data *td)
return test_result("Failed to find program symbols", TEST_FAIL);
pr_debug("Creating new host machine structure\n");
- td->machine = machine__new_host();
- td->machine->env = &perf_env;
+ perf_env__init(&host_env);
+ td->machine = machine__new_host(&host_env);
td->fd = creat(td->perf_data_file_name, 0644);
if (td->fd < 0)
return test_result("Failed to create test perf.data file", TEST_FAIL);
err = perf_header__write_pipe(td->fd);
- if (err < 0)
- return test_result("perf_header__write_pipe() failed", TEST_FAIL);
-
+ if (err < 0) {
+ err = test_result("perf_header__write_pipe() failed", TEST_FAIL);
+ goto out;
+ }
err = write_attr(td, sample_type, &id);
- if (err)
- return test_result("perf_event__synthesize_attr() failed", TEST_FAIL);
-
- if (write_comm(td->fd, pid, tid, "test-prog"))
- return TEST_FAIL;
-
- if (write_mmap(td->fd, pid, tid, MAP_START, 0x10000, 0, td->prog_file_name))
- return TEST_FAIL;
-
- if (write_sample(td, sample_type, id, pid, tid) != TEST_OK)
- return TEST_FAIL;
-
+ if (err) {
+ err = test_result("perf_event__synthesize_attr() failed", TEST_FAIL);
+ goto out;
+ }
+ if (write_comm(td->fd, pid, tid, "test-prog")) {
+ err = TEST_FAIL;
+ goto out;
+ }
+ if (write_mmap(td->fd, pid, tid, MAP_START, 0x10000, 0, td->prog_file_name)) {
+ err = TEST_FAIL;
+ goto out;
+ }
+ if (write_sample(td, sample_type, id, pid, tid) != TEST_OK) {
+ err = TEST_FAIL;
+ goto out;
+ }
if (verbose > 1)
system_cmd("%s script -i %s -D", td->perf, td->perf_data_file_name);
- err = run_perf_script(td);
- if (err)
- return TEST_FAIL;
-
- return TEST_OK;
+ err = run_perf_script(td) ? TEST_FAIL : TEST_OK;
+out:
+ perf_env__exit(&host_env);
+ return err;
}
static void unlink_path(const char *path)
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 4803ab2d97ba..9ed78d00fb87 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -7,6 +7,7 @@
#include <unistd.h>
#include "tests.h"
#include "debug.h"
+#include "env.h"
#include "machine.h"
#include "event.h"
#include "../util/unwind.h"
@@ -15,7 +16,6 @@
#include "symbol.h"
#include "thread.h"
#include "callchain.h"
-#include "util/synthetic-events.h"
/* For bsearch. We try to unwind functions in shared object. */
#include <stdlib.h>
@@ -37,24 +37,6 @@
#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
#endif
-static int mmap_handler(const struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
-{
- return machine__process_mmap2_event(machine, event, sample);
-}
-
-static int init_live_machine(struct machine *machine)
-{
- union perf_event event;
- pid_t pid = getpid();
-
- memset(&event, 0, sizeof(event));
- return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
- mmap_handler, machine, true);
-}
-
/*
* We need to keep these functions global, despite the
* fact that they are used only locally in this object,
@@ -199,33 +181,31 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *th
noinline int test__dwarf_unwind(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
+ struct perf_env host_env;
struct machine *machine;
struct thread *thread;
int err = -1;
+ pid_t pid = getpid();
+
+ callchain_param.record_mode = CALLCHAIN_DWARF;
+ dwarf_callchain_users = true;
- machine = machine__new_host();
+ perf_env__init(&host_env);
+ machine = machine__new_live(&host_env, /*kernel_maps=*/true, pid);
if (!machine) {
pr_err("Could not get machine\n");
- return -1;
+ goto out;
}
if (machine__create_kernel_maps(machine)) {
pr_err("Failed to create kernel maps\n");
- return -1;
- }
-
- callchain_param.record_mode = CALLCHAIN_DWARF;
- dwarf_callchain_users = true;
-
- if (init_live_machine(machine)) {
- pr_err("Could not init machine\n");
goto out;
}
if (verbose > 1)
machine__fprintf(machine, stderr);
- thread = machine__find_thread(machine, getpid(), getpid());
+ thread = machine__find_thread(machine, pid, pid);
if (!thread) {
pr_err("Could not get thread\n");
goto out;
@@ -236,6 +216,7 @@ noinline int test__dwarf_unwind(struct test_suite *test __maybe_unused,
out:
machine__delete(machine);
+ perf_env__exit(&host_env);
return err;
}
diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c
index deefe5003bfc..ae3b98bb42cf 100644
--- a/tools/perf/tests/event-times.c
+++ b/tools/perf/tests/event-times.c
@@ -17,9 +17,7 @@
static int attach__enable_on_exec(struct evlist *evlist)
{
struct evsel *evsel = evlist__last(evlist);
- struct target target = {
- .uid = UINT_MAX,
- };
+ struct target target = {};
const char *argv[] = { "true", NULL, };
char sbuf[STRERR_BUFSIZE];
int err;
@@ -64,7 +62,7 @@ static int attach__current_disabled(struct evlist *evlist)
pr_debug("attaching to current thread as disabled\n");
- threads = thread_map__new(-1, getpid(), UINT_MAX);
+ threads = thread_map__new_by_tid(getpid());
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
@@ -90,7 +88,7 @@ static int attach__current_enabled(struct evlist *evlist)
pr_debug("attaching to current thread as enabled\n");
- threads = thread_map__new(-1, getpid(), UINT_MAX);
+ threads = thread_map__new_by_tid(getpid());
if (threads == NULL) {
pr_debug("failed to call thread_map__new\n");
return -1;
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index 9301fde11366..cb9e6de2e033 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -109,8 +109,8 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
TEST_ASSERT_VAL("failed to synthesize attr update name",
!perf_event__synthesize_event_update_name(&tmp.tool, evsel, process_event_name));
- perf_cpu_map__put(evsel->core.own_cpus);
- evsel->core.own_cpus = perf_cpu_map__new("1,2,3");
+ perf_cpu_map__put(evsel->core.pmu_cpus);
+ evsel->core.pmu_cpus = perf_cpu_map__new("1,2,3");
TEST_ASSERT_VAL("failed to synthesize attr update cpus",
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c
index 31966ff856f8..c7b32a220ca1 100644
--- a/tools/perf/tests/expand-cgroup.c
+++ b/tools/perf/tests/expand-cgroup.c
@@ -13,8 +13,7 @@
#include <stdlib.h>
#include <string.h>
-static int test_expand_events(struct evlist *evlist,
- struct rblist *metric_events)
+static int test_expand_events(struct evlist *evlist)
{
int i, ret = TEST_FAIL;
int nr_events;
@@ -47,7 +46,7 @@ static int test_expand_events(struct evlist *evlist,
was_group_event = evsel__is_group_event(evlist__first(evlist));
nr_members = evlist__first(evlist)->core.nr_members;
- ret = evlist__expand_cgroup(evlist, cgrp_str, metric_events, false);
+ ret = evlist__expand_cgroup(evlist, cgrp_str, false);
if (ret < 0) {
pr_debug("failed to expand events for cgroups\n");
goto out;
@@ -100,13 +99,11 @@ out: for (i = 0; i < nr_events; i++)
static int expand_default_events(void)
{
int ret;
- struct rblist metric_events;
struct evlist *evlist = evlist__new_default();
TEST_ASSERT_VAL("failed to get evlist", evlist);
- rblist__init(&metric_events);
- ret = test_expand_events(evlist, &metric_events);
+ ret = test_expand_events(evlist);
evlist__delete(evlist);
return ret;
}
@@ -115,7 +112,6 @@ static int expand_group_events(void)
{
int ret;
struct evlist *evlist;
- struct rblist metric_events;
struct parse_events_error err;
const char event_str[] = "{cycles,instructions}";
@@ -132,8 +128,7 @@ static int expand_group_events(void)
goto out;
}
- rblist__init(&metric_events);
- ret = test_expand_events(evlist, &metric_events);
+ ret = test_expand_events(evlist);
out:
parse_events_error__exit(&err);
evlist__delete(evlist);
@@ -144,7 +139,6 @@ static int expand_libpfm_events(void)
{
int ret;
struct evlist *evlist;
- struct rblist metric_events;
const char event_str[] = "CYCLES";
struct option opt = {
.value = &evlist,
@@ -166,8 +160,7 @@ static int expand_libpfm_events(void)
goto out;
}
- rblist__init(&metric_events);
- ret = test_expand_events(evlist, &metric_events);
+ ret = test_expand_events(evlist);
out:
evlist__delete(evlist);
return ret;
@@ -177,25 +170,22 @@ static int expand_metric_events(void)
{
int ret;
struct evlist *evlist;
- struct rblist metric_events;
const char metric_str[] = "CPI";
const struct pmu_metrics_table *pme_test;
evlist = evlist__new();
TEST_ASSERT_VAL("failed to get evlist", evlist);
- rblist__init(&metric_events);
pme_test = find_core_metrics_table("testarch", "testcpu");
- ret = metricgroup__parse_groups_test(evlist, pme_test, metric_str, &metric_events);
+ ret = metricgroup__parse_groups_test(evlist, pme_test, metric_str);
if (ret < 0) {
pr_debug("failed to parse '%s' metric\n", metric_str);
goto out;
}
- ret = test_expand_events(evlist, &metric_events);
+ ret = test_expand_events(evlist);
out:
- metricgroup__rblist_exit(&metric_events);
evlist__delete(evlist);
return ret;
}
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 1e0f5a310fd5..3eb9ef8d7ec6 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -295,7 +295,7 @@ static int test1(struct evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = false;
evsel__reset_sample_bit(evsel, CALLCHAIN);
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@@ -442,7 +442,7 @@ static int test2(struct evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = false;
evsel__set_sample_bit(evsel, CALLCHAIN);
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@@ -500,7 +500,7 @@ static int test3(struct evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = true;
evsel__reset_sample_bit(evsel, CALLCHAIN);
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
callchain_register_param(&callchain_param);
err = add_hist_entries(hists, machine);
@@ -684,7 +684,7 @@ static int test4(struct evsel *evsel, struct machine *machine)
symbol_conf.cumulate_callchain = true;
evsel__set_sample_bit(evsel, CALLCHAIN);
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
callchain_param = callchain_param_default;
callchain_register_param(&callchain_param);
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 4b2e4f2fbe48..1cebd20cc91c 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -131,10 +131,6 @@ static int test__hists_filter(struct test_suite *test __maybe_unused, int subtes
goto out;
err = TEST_FAIL;
- /* default sort order (comm,dso,sym) will be used */
- if (setup_sorting(NULL) < 0)
- goto out;
-
machines__init(&machines);
/* setup threads/dso/map/symbols also */
@@ -145,6 +141,10 @@ static int test__hists_filter(struct test_suite *test __maybe_unused, int subtes
if (verbose > 1)
machine__fprintf(machine, stderr);
+ /* default sort order (comm,dso,sym) will be used */
+ if (setup_sorting(evlist, machine->env) < 0)
+ goto out;
+
/* process sample events */
err = add_hist_entries(evlist, machine);
if (err < 0)
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 5b6f1e883466..996f5f0b3bd1 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -303,10 +303,6 @@ static int test__hists_link(struct test_suite *test __maybe_unused, int subtest
goto out;
err = TEST_FAIL;
- /* default sort order (comm,dso,sym) will be used */
- if (setup_sorting(NULL) < 0)
- goto out;
-
machines__init(&machines);
/* setup threads/dso/map/symbols also */
@@ -317,6 +313,10 @@ static int test__hists_link(struct test_suite *test __maybe_unused, int subtest
if (verbose > 1)
machine__fprintf(machine, stderr);
+ /* default sort order (comm,dso,sym) will be used */
+ if (setup_sorting(evlist, machine->env) < 0)
+ goto out;
+
/* process sample events */
err = add_hist_entries(evlist, machine);
if (err < 0)
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index 33b5cc8352a7..ee5ec8bda60e 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -146,7 +146,7 @@ static int test1(struct evsel *evsel, struct machine *machine)
field_order = NULL;
sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
/*
* expected output:
@@ -248,7 +248,7 @@ static int test2(struct evsel *evsel, struct machine *machine)
field_order = "overhead,cpu";
sort_order = "pid";
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
/*
* expected output:
@@ -304,7 +304,7 @@ static int test3(struct evsel *evsel, struct machine *machine)
field_order = "comm,overhead,dso";
sort_order = NULL;
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
/*
* expected output:
@@ -378,7 +378,7 @@ static int test4(struct evsel *evsel, struct machine *machine)
field_order = "dso,sym,comm,overhead,dso";
sort_order = "sym";
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
/*
* expected output:
@@ -480,7 +480,7 @@ static int test5(struct evsel *evsel, struct machine *machine)
field_order = "cpu,pid,comm,dso,sym";
sort_order = "dso,pid";
- setup_sorting(NULL);
+ setup_sorting(/*evlist=*/NULL, machine->env);
/*
* expected output:
diff --git a/tools/perf/tests/hwmon_pmu.c b/tools/perf/tests/hwmon_pmu.c
index 0837aca1cdfa..4aa4aac94f09 100644
--- a/tools/perf/tests/hwmon_pmu.c
+++ b/tools/perf/tests/hwmon_pmu.c
@@ -4,6 +4,7 @@
#include "hwmon_pmu.h"
#include "parse-events.h"
#include "tests.h"
+#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <linux/compiler.h>
@@ -93,9 +94,10 @@ static struct perf_pmu *test_pmu_get(char *dir, size_t sz)
pr_err("Failed to mkdir hwmon directory\n");
goto err_out;
}
- hwmon_dirfd = openat(test_dirfd, "hwmon1234", O_DIRECTORY);
+ strncat(dir, "/hwmon1234", sz - strlen(dir));
+ hwmon_dirfd = open(dir, O_PATH|O_DIRECTORY);
if (hwmon_dirfd < 0) {
- pr_err("Failed to open test hwmon directory \"%s/hwmon1234\"\n", dir);
+ pr_err("Failed to open test hwmon directory \"%s\"\n", dir);
goto err_out;
}
file = openat(hwmon_dirfd, "name", O_WRONLY | O_CREAT, 0600);
@@ -130,18 +132,18 @@ static struct perf_pmu *test_pmu_get(char *dir, size_t sz)
}
/* Make the PMU reading the files created above. */
- hwm = perf_pmus__add_test_hwmon_pmu(hwmon_dirfd, "hwmon1234", test_hwmon_name);
+ hwm = perf_pmus__add_test_hwmon_pmu(dir, "hwmon1234", test_hwmon_name);
if (!hwm)
pr_err("Test hwmon creation failed\n");
err_out:
if (!hwm) {
test_pmu_put(dir, hwm);
- if (hwmon_dirfd >= 0)
- close(hwmon_dirfd);
}
if (test_dirfd >= 0)
close(test_dirfd);
+ if (hwmon_dirfd >= 0)
+ close(hwmon_dirfd);
return hwm;
}
diff --git a/tools/perf/tests/kallsyms-split.c b/tools/perf/tests/kallsyms-split.c
new file mode 100644
index 000000000000..bbbc66957e5d
--- /dev/null
+++ b/tools/perf/tests/kallsyms-split.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include "util/dso.h"
+#include "util/map.h"
+#include "util/symbol.h"
+#include "util/debug.h"
+#include "util/machine.h"
+#include "tests.h"
+
+/*
+ * This test is to check whether a bad symbol in a module won't split kallsyms maps.
+ * The main_symbol[1-3] should belong to the main [kernel.kallsyms] map even if the
+ * bad_symbol from the module is found in the middle.
+ */
+static char root_template[] = "/tmp/perf-test.XXXXXX";
+static char *root_dir;
+
+static const char proc_version[] = "Linux version X.Y.Z (just for perf test)\n";
+static const char proc_modules[] = "module 4096 1 - Live 0xffffffffcd000000\n";
+static const char proc_kallsyms[] =
+ "ffffffffab200000 T _stext\n"
+ "ffffffffab200010 T good_symbol\n"
+ "ffffffffab200020 t bad_symbol\n"
+ "ffffffffab200030 t main_symbol1\n"
+ "ffffffffab200040 t main_symbol2\n"
+ "ffffffffab200050 t main_symbol3\n"
+ "ffffffffab200060 T _etext\n"
+ "ffffffffcd000000 T start_module\t[module]\n"
+ "ffffffffab200020 u bad_symbol\t[module]\n"
+ "ffffffffcd000040 T end_module\t[module]\n";
+
+static struct {
+ const char *name;
+ const char *contents;
+ long len;
+} proc_files[] = {
+ { "version", proc_version, sizeof(proc_version) - 1 },
+ { "modules", proc_modules, sizeof(proc_modules) - 1 },
+ { "kallsyms", proc_kallsyms, sizeof(proc_kallsyms) - 1 },
+};
+
+static void remove_proc_dir(int sig __maybe_unused)
+{
+ char buf[128];
+
+ if (root_dir == NULL)
+ return;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(proc_files); i++) {
+ scnprintf(buf, sizeof(buf), "%s/proc/%s", root_dir, proc_files[i].name);
+ remove(buf);
+ }
+
+ scnprintf(buf, sizeof(buf), "%s/proc", root_dir);
+ rmdir(buf);
+
+ rmdir(root_dir);
+ root_dir = NULL;
+}
+
+static int create_proc_dir(void)
+{
+ char buf[128];
+
+ root_dir = mkdtemp(root_template);
+ if (root_dir == NULL)
+ return -1;
+
+ scnprintf(buf, sizeof(buf), "%s/proc", root_dir);
+ if (mkdir(buf, 0700) < 0)
+ goto err;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(proc_files); i++) {
+ int fd, len;
+
+ scnprintf(buf, sizeof(buf), "%s/proc/%s", root_dir, proc_files[i].name);
+ fd = open(buf, O_RDWR | O_CREAT, 0600);
+ if (fd < 0)
+ goto err;
+
+ len = write(fd, proc_files[i].contents, proc_files[i].len);
+ close(fd);
+ if (len != proc_files[i].len)
+ goto err;
+ }
+ return 0;
+
+err:
+ remove_proc_dir(0);
+ return -1;
+}
+
+static int test__kallsyms_split(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct machine m;
+ struct map *map = NULL;
+ int ret = TEST_FAIL;
+
+ pr_debug("try to create fake root directory\n");
+ if (create_proc_dir() < 0) {
+ pr_debug("SKIP: cannot create a fake root directory\n");
+ return TEST_SKIP;
+ }
+
+ signal(SIGINT, remove_proc_dir);
+ signal(SIGPIPE, remove_proc_dir);
+ signal(SIGSEGV, remove_proc_dir);
+ signal(SIGTERM, remove_proc_dir);
+
+ pr_debug("create kernel maps from the fake root directory\n");
+ machine__init(&m, root_dir, HOST_KERNEL_ID);
+ if (machine__create_kernel_maps(&m) < 0) {
+ pr_debug("FAIL: failed to create kernel maps\n");
+ goto out;
+ }
+
+ /* force to use /proc/kallsyms */
+ symbol_conf.ignore_vmlinux = true;
+ symbol_conf.ignore_vmlinux_buildid = true;
+ symbol_conf.allow_aliases = true;
+
+ if (map__load(machine__kernel_map(&m)) < 0) {
+ pr_debug("FAIL: failed to load kallsyms\n");
+ goto out;
+ }
+
+ pr_debug("kernel map loaded - check symbol and map\n");
+ if (maps__nr_maps(machine__kernel_maps(&m)) != 2) {
+ pr_debug("FAIL: it should have the kernel and a module, but has %u maps\n",
+ maps__nr_maps(machine__kernel_maps(&m)));
+ goto out;
+ }
+
+ if (machine__find_kernel_symbol_by_name(&m, "main_symbol3", &map) == NULL) {
+ pr_debug("FAIL: failed to find a symbol\n");
+ goto out;
+ }
+
+ if (!RC_CHK_EQUAL(map, machine__kernel_map(&m))) {
+ pr_debug("FAIL: the symbol is not in the kernel map\n");
+ goto out;
+ }
+ ret = TEST_OK;
+
+out:
+ remove_proc_dir(0);
+ machine__exit(&m);
+ return ret;
+}
+
+DEFINE_SUITE("split kallsyms", kallsyms_split);
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index 5a3b2bed07f3..729cc9cc1cb7 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -78,7 +78,7 @@ static int test__keep_tracking(struct test_suite *test __maybe_unused, int subte
int found, err = -1;
const char *comm;
- threads = thread_map__new(-1, getpid(), UINT_MAX);
+ threads = thread_map__new_by_tid(getpid());
CHECK_NOT_NULL__(threads);
cpus = perf_cpu_map__new_online_cpus();
@@ -90,7 +90,7 @@ static int test__keep_tracking(struct test_suite *test __maybe_unused, int subte
perf_evlist__set_maps(&evlist->core, cpus, threads);
CHECK__(parse_event(evlist, "dummy:u"));
- CHECK__(parse_event(evlist, "cycles:u"));
+ CHECK__(parse_event(evlist, "cpu-cycles:u"));
evlist__config(evlist, &opts, NULL);
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 0ee94caf9ec1..6641701e4828 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -73,25 +73,24 @@ make_extra_tests := EXTRA_TESTS=1
make_jevents_all := JEVENTS_ARCH=all
make_no_bpf_skel := BUILD_BPF_SKEL=0
make_gen_vmlinux_h := GEN_VMLINUX_H=1
-make_no_libperl := NO_LIBPERL=1
+make_libperl := LIBPERL=1
make_no_libpython := NO_LIBPYTHON=1
-make_no_scripts := NO_LIBPYTHON=1 NO_LIBPERL=1
+make_no_scripts := NO_LIBPYTHON=1
make_no_slang := NO_SLANG=1
make_no_gtk2 := NO_GTK2=1
make_no_ui := NO_SLANG=1 NO_GTK2=1
make_no_demangle := NO_DEMANGLE=1
make_no_libelf := NO_LIBELF=1
+make_no_libdw := NO_LIBDW=1
make_libunwind := LIBUNWIND=1
make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1
make_no_backtrace := NO_BACKTRACE=1
make_no_libcapstone := NO_CAPSTONE=1
make_no_libnuma := NO_LIBNUMA=1
make_no_libbionic := NO_LIBBIONIC=1
-make_no_auxtrace := NO_AUXTRACE=1
make_no_libbpf := NO_LIBBPF=1
make_libbpf_dynamic := LIBBPF_DYNAMIC=1
make_no_libbpf_DEBUG := NO_LIBBPF=1 DEBUG=1
-make_no_libcrypto := NO_LIBCRYPTO=1
make_no_libllvm := NO_LIBLLVM=1
make_with_babeltrace:= LIBBABELTRACE=1
make_with_coresight := CORESIGHT=1
@@ -118,11 +117,11 @@ make_install_prefix_slash := install prefix=/tmp/krava/
make_static := LDFLAGS=-static NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX32=1 NO_JVMTI=1 NO_LIBTRACEEVENT=1 NO_LIBELF=1
# all the NO_* variable combined
-make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_GTK2=1
+make_minimal := NO_LIBPYTHON=1 NO_GTK2=1
make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_BACKTRACE=1
-make_minimal += NO_LIBNUMA=1 NO_LIBBIONIC=1
-make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
-make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
+make_minimal += NO_LIBNUMA=1 NO_LIBBIONIC=1 NO_LIBDW=1
+make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_LIBBPF=1
+make_minimal += NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
make_minimal += NO_LIBCAP=1 NO_CAPSTONE=1
# $(run) contains all available tests
@@ -143,7 +142,7 @@ run += make_extra_tests
run += make_jevents_all
run += make_no_bpf_skel
run += make_gen_vmlinux_h
-run += make_no_libperl
+run += make_libperl
run += make_no_libpython
run += make_no_scripts
run += make_no_slang
@@ -151,16 +150,15 @@ run += make_no_gtk2
run += make_no_ui
run += make_no_demangle
run += make_no_libelf
+run += make_no_libdw
run += make_libunwind
run += make_no_libdw_dwarf_unwind
run += make_no_backtrace
run += make_no_libcapstone
run += make_no_libnuma
run += make_no_libbionic
-run += make_no_auxtrace
run += make_no_libbpf
run += make_no_libbpf_DEBUG
-run += make_no_libcrypto
run += make_no_libllvm
run += make_no_sdt
run += make_no_syscall_tbl
diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c
index 4f1f9385ea9c..2c05d62f40dc 100644
--- a/tools/perf/tests/maps.c
+++ b/tools/perf/tests/maps.c
@@ -162,4 +162,84 @@ static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest
return TEST_OK;
}
-DEFINE_SUITE("maps__merge_in", maps__merge_in);
+static int test__maps__fixup_overlap_and_insert(struct test_suite *t __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct map_def initial_maps[] = {
+ { "target_map", 1000, 2000 },
+ { "next_map", 3000, 4000 },
+ };
+ struct map_def insert_split = { "split_map", 1400, 1600 };
+ struct map_def expected_after_split[] = {
+ { "target_map", 1000, 1400 },
+ { "split_map", 1400, 1600 },
+ { "target_map", 1600, 2000 },
+ { "next_map", 3000, 4000 },
+ };
+
+ struct map_def insert_eclipse = { "eclipse_map", 2500, 4500 };
+ struct map_def expected_final[] = {
+ { "target_map", 1000, 1400 },
+ { "split_map", 1400, 1600 },
+ { "target_map", 1600, 2000 },
+ { "eclipse_map", 2500, 4500 },
+ /* "next_map" (3000-4000) is removed */
+ };
+
+ struct map *map_split, *map_eclipse;
+ int ret;
+ unsigned int i;
+ struct maps *maps = maps__new(NULL);
+
+ TEST_ASSERT_VAL("failed to create maps", maps);
+
+ for (i = 0; i < ARRAY_SIZE(initial_maps); i++) {
+ struct map *map = dso__new_map(initial_maps[i].name);
+
+ TEST_ASSERT_VAL("failed to create map", map);
+ map__set_start(map, initial_maps[i].start);
+ map__set_end(map, initial_maps[i].end);
+ TEST_ASSERT_VAL("failed to insert map", maps__insert(maps, map) == 0);
+ map__put(map);
+ }
+
+ // Check splitting.
+ map_split = dso__new_map(insert_split.name);
+ TEST_ASSERT_VAL("failed to create split map", map_split);
+ map__set_start(map_split, insert_split.start);
+ map__set_end(map_split, insert_split.end);
+
+ ret = maps__fixup_overlap_and_insert(maps, map_split);
+ TEST_ASSERT_VAL("failed to fixup and insert split map", !ret);
+
+ map__zput(map_split);
+ ret = check_maps(expected_after_split, ARRAY_SIZE(expected_after_split), maps);
+ TEST_ASSERT_VAL("split check failed", !ret);
+
+ // Check cover 1 map with another.
+ map_eclipse = dso__new_map(insert_eclipse.name);
+ TEST_ASSERT_VAL("failed to create eclipse map", map_eclipse);
+ map__set_start(map_eclipse, insert_eclipse.start);
+ map__set_end(map_eclipse, insert_eclipse.end);
+
+ ret = maps__fixup_overlap_and_insert(maps, map_eclipse);
+ TEST_ASSERT_VAL("failed to fixup and insert eclipse map", !ret);
+
+ map__zput(map_eclipse);
+ ret = check_maps(expected_final, ARRAY_SIZE(expected_final), maps);
+ TEST_ASSERT_VAL("eclipse check failed", !ret);
+
+ maps__zput(maps);
+ return TEST_OK;
+}
+
+static struct test_case tests__maps[] = {
+ TEST_CASE("Test merge_in interface", maps__merge_in),
+ TEST_CASE("Test fix up overlap interface", maps__fixup_overlap_and_insert),
+ { .name = NULL, }
+};
+
+struct test_suite suite__maps = {
+ .desc = "Maps - per process mmaps abstraction",
+ .test_cases = tests__maps,
+};
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index bd2106628b34..3313c236104e 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -1,15 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
+#include <fcntl.h>
#include <inttypes.h>
#include <stdlib.h>
#include <perf/cpumap.h>
+#include "cpumap.h"
#include "debug.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "tests.h"
+#include "util/affinity.h"
#include "util/mmap.h"
#include "util/sample.h"
#include <linux/err.h>
@@ -46,7 +49,7 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
char sbuf[STRERR_BUFSIZE];
struct mmap *md;
- threads = thread_map__new(-1, getpid(), UINT_MAX);
+ threads = thread_map__new_by_tid(getpid());
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
@@ -172,99 +175,199 @@ out_free_threads:
return err;
}
-static int test_stat_user_read(int event)
-{
- struct perf_counts_values counts = { .val = 0 };
- struct perf_thread_map *threads;
- struct perf_evsel *evsel;
- struct perf_event_mmap_page *pc;
- struct perf_event_attr attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = event,
-#ifdef __aarch64__
- .config1 = 0x2, /* Request user access */
-#endif
- };
- int err, i, ret = TEST_FAIL;
- bool opened = false, mapped = false;
+enum user_read_state {
+ USER_READ_ENABLED,
+ USER_READ_DISABLED,
+ USER_READ_UNKNOWN,
+};
- threads = perf_thread_map__new_dummy();
- TEST_ASSERT_VAL("failed to create threads", threads);
+static enum user_read_state set_user_read(struct perf_pmu *pmu, enum user_read_state enabled)
+{
+ char buf[2] = {0, '\n'};
+ ssize_t len;
+ int events_fd, rdpmc_fd;
+ enum user_read_state old_user_read = USER_READ_UNKNOWN;
+
+ if (enabled == USER_READ_UNKNOWN)
+ return USER_READ_UNKNOWN;
+
+ events_fd = perf_pmu__event_source_devices_fd();
+ if (events_fd < 0)
+ return USER_READ_UNKNOWN;
+
+ rdpmc_fd = perf_pmu__pathname_fd(events_fd, pmu->name, "rdpmc", O_RDWR);
+ if (rdpmc_fd < 0) {
+ close(events_fd);
+ return USER_READ_UNKNOWN;
+ }
- perf_thread_map__set_pid(threads, 0, 0);
+ len = read(rdpmc_fd, buf, sizeof(buf));
+ if (len != sizeof(buf))
+ pr_debug("%s read failed\n", __func__);
- evsel = perf_evsel__new(&attr);
- TEST_ASSERT_VAL("failed to create evsel", evsel);
+ // Note, on Intel hybrid disabling on 1 PMU will implicitly disable on
+ // all the core PMUs.
+ old_user_read = (buf[0] == '1') ? USER_READ_ENABLED : USER_READ_DISABLED;
- err = perf_evsel__open(evsel, NULL, threads);
- if (err) {
- pr_err("failed to open evsel: %s\n", strerror(-err));
- ret = TEST_SKIP;
- goto out;
+ if (enabled != old_user_read) {
+ buf[0] = (enabled == USER_READ_ENABLED) ? '1' : '0';
+ len = write(rdpmc_fd, buf, sizeof(buf));
+ if (len != sizeof(buf))
+ pr_debug("%s write failed\n", __func__);
}
- opened = true;
+ close(rdpmc_fd);
+ close(events_fd);
+ return old_user_read;
+}
- err = perf_evsel__mmap(evsel, 0);
- if (err) {
- pr_err("failed to mmap evsel: %s\n", strerror(-err));
- goto out;
+static int test_stat_user_read(u64 event, enum user_read_state enabled)
+{
+ struct perf_pmu *pmu = NULL;
+ struct perf_thread_map *threads = perf_thread_map__new_dummy();
+ int ret = TEST_OK;
+
+ pr_err("User space counter reading %" PRIu64 "\n", event);
+ if (!threads) {
+ pr_err("User space counter reading [Failed to create threads]\n");
+ return TEST_FAIL;
}
- mapped = true;
+ perf_thread_map__set_pid(threads, 0, 0);
- pc = perf_evsel__mmap_base(evsel, 0, 0);
- if (!pc) {
- pr_err("failed to get mmapped address\n");
- goto out;
- }
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ enum user_read_state saved_user_read_state = set_user_read(pmu, enabled);
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = perf_pmus__supports_extended_type()
+ ? event | ((u64)pmu->type << PERF_PMU_TYPE_SHIFT)
+ : event,
+#ifdef __aarch64__
+ .config1 = 0x2, /* Request user access */
+#endif
+ };
+ struct perf_evsel *evsel = NULL;
+ int err;
+ struct perf_event_mmap_page *pc;
+ bool mapped = false, opened = false, rdpmc_supported;
+ struct perf_counts_values counts = { .val = 0 };
+
+
+ pr_debug("User space counter reading for PMU %s\n", pmu->name);
+ /*
+ * Restrict scheduling to only use the rdpmc on the CPUs the
+ * event can be on. If the test doesn't run on the CPU of the
+ * event then the event will be disabled and the pc->index test
+ * will fail.
+ */
+ if (pmu->cpus != NULL)
+ cpu_map__set_affinity(pmu->cpus);
+
+ /* Make the evsel. */
+ evsel = perf_evsel__new(&attr);
+ if (!evsel) {
+ pr_err("User space counter reading for PMU %s [Failed to allocate evsel]\n",
+ pmu->name);
+ ret = TEST_FAIL;
+ goto cleanup;
+ }
- if (!pc->cap_user_rdpmc || !pc->index) {
- pr_err("userspace counter access not %s\n",
- !pc->cap_user_rdpmc ? "supported" : "enabled");
- ret = TEST_SKIP;
- goto out;
- }
- if (pc->pmc_width < 32) {
- pr_err("userspace counter width not set (%d)\n", pc->pmc_width);
- goto out;
- }
+ err = perf_evsel__open(evsel, NULL, threads);
+ if (err) {
+ pr_err("User space counter reading for PMU %s [Failed to open evsel]\n",
+ pmu->name);
+ ret = TEST_SKIP;
+ goto cleanup;
+ }
+ opened = true;
+ err = perf_evsel__mmap(evsel, 0);
+ if (err) {
+ pr_err("User space counter reading for PMU %s [Failed to mmap evsel]\n",
+ pmu->name);
+ ret = TEST_FAIL;
+ goto cleanup;
+ }
+ mapped = true;
+
+ pc = perf_evsel__mmap_base(evsel, 0, 0);
+ if (!pc) {
+ pr_err("User space counter reading for PMU %s [Failed to get mmaped address]\n",
+ pmu->name);
+ ret = TEST_FAIL;
+ goto cleanup;
+ }
- perf_evsel__read(evsel, 0, 0, &counts);
- if (counts.val == 0) {
- pr_err("failed to read value for evsel\n");
- goto out;
- }
+ if (saved_user_read_state == USER_READ_UNKNOWN)
+ rdpmc_supported = pc->cap_user_rdpmc && pc->index;
+ else
+ rdpmc_supported = (enabled == USER_READ_ENABLED);
- for (i = 0; i < 5; i++) {
- volatile int count = 0x10000 << i;
- __u64 start, end, last = 0;
+ if (rdpmc_supported && (!pc->cap_user_rdpmc || !pc->index)) {
+ pr_err("User space counter reading for PMU %s [Failed unexpected supported counter access %d %d]\n",
+ pmu->name, pc->cap_user_rdpmc, pc->index);
+ ret = TEST_FAIL;
+ goto cleanup;
+ }
- pr_debug("\tloop = %u, ", count);
+ if (!rdpmc_supported && pc->cap_user_rdpmc) {
+ pr_err("User space counter reading for PMU %s [Failed unexpected unsupported counter access %d]\n",
+ pmu->name, pc->cap_user_rdpmc);
+ ret = TEST_FAIL;
+ goto cleanup;
+ }
+
+ if (rdpmc_supported && pc->pmc_width < 32) {
+ pr_err("User space counter reading for PMU %s [Failed width not set %d]\n",
+ pmu->name, pc->pmc_width);
+ ret = TEST_FAIL;
+ goto cleanup;
+ }
perf_evsel__read(evsel, 0, 0, &counts);
- start = counts.val;
+ if (rdpmc_supported && counts.val == 0) {
+ pr_err("User space counter reading for PMU %s [Failed read]\n", pmu->name);
+ ret = TEST_FAIL;
+ goto cleanup;
+ }
- while (count--) ;
+ for (int i = 0; i < 5; i++) {
+ volatile int count = 0x10000 << i;
+ __u64 start, end, last = 0;
- perf_evsel__read(evsel, 0, 0, &counts);
- end = counts.val;
+ pr_debug("\tloop = %u, ", count);
- if ((end - start) < last) {
- pr_err("invalid counter data: end=%llu start=%llu last= %llu\n",
- end, start, last);
- goto out;
- }
- last = end - start;
- pr_debug("count = %llu\n", end - start);
- }
- ret = TEST_OK;
+ perf_evsel__read(evsel, 0, 0, &counts);
+ start = counts.val;
+
+ while (count--) ;
-out:
- if (mapped)
- perf_evsel__munmap(evsel);
- if (opened)
- perf_evsel__close(evsel);
- perf_evsel__delete(evsel);
+ perf_evsel__read(evsel, 0, 0, &counts);
+ end = counts.val;
+ if ((end - start) < last) {
+ pr_err("User space counter reading for PMU %s [Failed invalid counter data: end=%llu start=%llu last= %llu]\n",
+ pmu->name, end, start, last);
+ ret = TEST_FAIL;
+ goto cleanup;
+ }
+ last = end - start;
+ pr_debug("count = %llu\n", last);
+ }
+ pr_debug("User space counter reading for PMU %s [Success]\n", pmu->name);
+cleanup:
+ if (mapped)
+ perf_evsel__munmap(evsel);
+ if (opened)
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ /* If the affinity was changed, then put it back to all CPUs. */
+ if (pmu->cpus != NULL) {
+ struct perf_cpu_map *cpus = cpu_map__online();
+
+ cpu_map__set_affinity(cpus);
+ perf_cpu_map__put(cpus);
+ }
+ set_user_read(pmu, saved_user_read_state);
+ }
perf_thread_map__put(threads);
return ret;
}
@@ -272,20 +375,32 @@ out:
static int test__mmap_user_read_instr(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
+ return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS, USER_READ_ENABLED);
}
static int test__mmap_user_read_cycles(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
+ return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES, USER_READ_ENABLED);
+}
+
+static int test__mmap_user_read_instr_disabled(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS, USER_READ_DISABLED);
+}
+
+static int test__mmap_user_read_cycles_disabled(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES, USER_READ_DISABLED);
}
static struct test_case tests__basic_mmap[] = {
TEST_CASE_REASON("Read samples using the mmap interface",
basic_mmap,
"permissions"),
- TEST_CASE_REASON("User space counter reading of instructions",
+ TEST_CASE_REASON_EXCLUSIVE("User space counter reading of instructions",
mmap_user_read_instr,
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
(defined(__riscv) && __riscv_xlen == 64)
@@ -294,7 +409,7 @@ static struct test_case tests__basic_mmap[] = {
"unsupported"
#endif
),
- TEST_CASE_REASON("User space counter reading of cycles",
+ TEST_CASE_REASON_EXCLUSIVE("User space counter reading of cycles",
mmap_user_read_cycles,
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
(defined(__riscv) && __riscv_xlen == 64)
@@ -303,6 +418,24 @@ static struct test_case tests__basic_mmap[] = {
"unsupported"
#endif
),
+ TEST_CASE_REASON_EXCLUSIVE("User space counter disabling instructions",
+ mmap_user_read_instr_disabled,
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+ (defined(__riscv) && __riscv_xlen == 64)
+ "permissions"
+#else
+ "unsupported"
+#endif
+ ),
+ TEST_CASE_REASON_EXCLUSIVE("User space counter disabling cycles",
+ mmap_user_read_cycles_disabled,
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+ (defined(__riscv) && __riscv_xlen == 64)
+ "permissions"
+#else
+ "unsupported"
+#endif
+ ),
{ .name = NULL, }
};
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 446a3615d720..0c5619c6e6e9 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -8,6 +8,7 @@
#include <stdlib.h>
#include <stdio.h>
#include "debug.h"
+#include "env.h"
#include "event.h"
#include "tests.h"
#include "machine.h"
@@ -155,6 +156,7 @@ static int synth_process(struct machine *machine)
static int mmap_events(synth_cb synth)
{
+ struct perf_env host_env;
struct machine *machine;
int err, i;
@@ -167,7 +169,8 @@ static int mmap_events(synth_cb synth)
*/
TEST_ASSERT_VAL("failed to create threads", !threads_create());
- machine = machine__new_host();
+ perf_env__init(&host_env);
+ machine = machine__new_host(&host_env);
dump_trace = verbose > 1 ? 1 : 0;
@@ -209,6 +212,7 @@ static int mmap_events(synth_cb synth)
}
machine__delete(machine);
+ perf_env__exit(&host_env);
return err;
}
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index fb114118c876..3644d6f52c07 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -28,7 +28,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
struct evsel *evsel;
unsigned int nr_openat_calls = 111, i;
cpu_set_t cpu_set;
- struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
+ struct perf_thread_map *threads = thread_map__new_by_tid(getpid());
char sbuf[STRERR_BUFSIZE];
char errbuf[BUFSIZ];
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index 0ef4ba7c1571..2a139d2781a8 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -28,7 +28,6 @@ static int test__syscall_openat_tp_fields(struct test_suite *test __maybe_unused
{
struct record_opts opts = {
.target = {
- .uid = UINT_MAX,
.uses_mmap = true,
},
.no_buffering = true,
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
index 131b62271bfa..b54cbe5f1808 100644
--- a/tools/perf/tests/openat-syscall.c
+++ b/tools/perf/tests/openat-syscall.c
@@ -20,7 +20,7 @@ static int test__openat_syscall_event(struct test_suite *test __maybe_unused,
int err = TEST_FAIL, fd;
struct evsel *evsel;
unsigned int nr_openat_calls = 111, i;
- struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
+ struct perf_thread_map *threads = thread_map__new_by_tid(getpid());
char sbuf[STRERR_BUFSIZE];
char errbuf[BUFSIZ];
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 5ec2e5607987..128d21dc389f 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include "parse-events.h"
#include "evsel.h"
+#include "evsel_fprintf.h"
#include "evlist.h"
#include <api/fs/fs.h>
#include "tests.h"
#include "debug.h"
#include "pmu.h"
#include "pmus.h"
+#include "strbuf.h"
#include <dirent.h>
#include <errno.h>
#include "fncache.h"
@@ -20,38 +22,61 @@
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
-static int num_core_entries(void)
+static bool check_evlist(const char *test, int line, bool cond, struct evlist *evlist)
{
- /*
- * If the kernel supports extended type, expect events to be
- * opened once for each core PMU type. Otherwise fall back to the legacy
- * behavior of opening only one event even though there are multiple
- * PMUs
- */
- if (perf_pmus__supports_extended_type())
- return perf_pmus__num_core_pmus();
+ struct strbuf sb = STRBUF_INIT;
- return 1;
+ if (cond)
+ return true;
+
+ evlist__format_evsels(evlist, &sb, 2048);
+ pr_debug("FAILED %s:%d: %s\nFor evlist: %s\n", __FILE__, line, test, sb.buf);
+ strbuf_release(&sb);
+ return false;
}
+#define TEST_ASSERT_EVLIST(test, cond, evlist) \
+ if (!check_evlist(test, __LINE__, cond, evlist)) \
+ return TEST_FAIL
-static bool test_config(const struct evsel *evsel, __u64 expected_config)
+static bool check_evsel(const char *test, int line, bool cond, struct evsel *evsel)
{
- __u32 type = evsel->core.attr.type;
- __u64 config = evsel->core.attr.config;
+ struct perf_attr_details details = { .verbose = true, };
- if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) {
- /*
- * HARDWARE and HW_CACHE events encode the PMU's extended type
- * in the top 32-bits. Mask in order to ignore.
- */
- config &= PERF_HW_EVENT_MASK;
+ if (cond)
+ return true;
+
+ pr_debug("FAILED %s:%d: %s\nFor evsel: ", __FILE__, line, test);
+ evsel__fprintf(evsel, &details, debug_file());
+ return false;
+}
+#define TEST_ASSERT_EVSEL(test, cond, evsel) \
+ if (!check_evsel(test, __LINE__, cond, evsel)) \
+ return TEST_FAIL
+
+static int num_core_entries(struct evlist *evlist)
+{
+ /*
+ * Returns number of core PMUs if the evlist has >1 core PMU, otherwise
+ * returns 1. The number of core PMUs is needed as wild carding can
+ * open an event for each core PMU. If the events were opened with a
+ * specified PMU then wild carding won't happen.
+ */
+ struct perf_pmu *core_pmu = NULL;
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (!evsel->pmu->is_core)
+ continue;
+ if (core_pmu != evsel->pmu && core_pmu != NULL)
+ return perf_pmus__num_core_pmus();
+ core_pmu = evsel->pmu;
}
- return config == expected_config;
+ return 1;
}
-static bool test_perf_config(const struct perf_evsel *evsel, __u64 expected_config)
+static bool test_hw_config(const struct evsel *evsel, __u64 expected_config)
{
- return (evsel->attr.config & PERF_HW_EVENT_MASK) == expected_config;
+ return (evsel->core.attr.config & PERF_HW_EVENT_MASK) == expected_config;
}
#if defined(__s390x__)
@@ -84,12 +109,12 @@ static int test__checkevent_tracepoint(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong number of groups", 0 == evlist__nr_groups(evlist));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong sample_type",
- PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
- TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups", 0 == evlist__nr_groups(evlist), evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_type",
+ PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_period", 1 == evsel->core.attr.sample_period, evsel);
return TEST_OK;
}
@@ -97,34 +122,38 @@ static int test__checkevent_tracepoint_multi(struct evlist *evlist)
{
struct evsel *evsel;
- TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1);
- TEST_ASSERT_VAL("wrong number of groups", 0 == evlist__nr_groups(evlist));
+ TEST_ASSERT_EVLIST("wrong number of entries", evlist->core.nr_entries > 1, evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups", 0 == evlist__nr_groups(evlist), evlist);
evlist__for_each_entry(evlist, evsel) {
- TEST_ASSERT_VAL("wrong type",
- PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong sample_type",
- PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
- TEST_ASSERT_VAL("wrong sample_period",
- 1 == evsel->core.attr.sample_period);
+ TEST_ASSERT_EVSEL("wrong type",
+ PERF_TYPE_TRACEPOINT == evsel->core.attr.type,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong sample_type",
+ PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong sample_period",
+ 1 == evsel->core.attr.sample_period,
+ evsel);
}
return TEST_OK;
}
static int test__checkevent_raw(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
bool raw_type_match = false;
- TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
+ TEST_ASSERT_EVLIST("wrong number of entries", 0 != evlist->core.nr_entries, evlist);
- perf_evlist__for_each_evsel(&evlist->core, evsel) {
+ evlist__for_each_entry(evlist, evsel) {
struct perf_pmu *pmu __maybe_unused = NULL;
bool type_matched = false;
- TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, 0x1a));
- TEST_ASSERT_VAL("event not parsed as raw type",
- evsel->attr.type == PERF_TYPE_RAW);
+ TEST_ASSERT_EVSEL("wrong config", test_hw_config(evsel, 0x1a), evsel);
+ TEST_ASSERT_EVSEL("event not parsed as raw type",
+ evsel->core.attr.type == PERF_TYPE_RAW,
+ evsel);
#if defined(__aarch64__)
/*
* Arm doesn't have a real raw type PMU in sysfs, so raw events
@@ -135,15 +164,15 @@ static int test__checkevent_raw(struct evlist *evlist)
type_matched = raw_type_match = true;
#else
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- if (pmu->type == evsel->attr.type) {
- TEST_ASSERT_VAL("PMU type expected once", !type_matched);
+ if (pmu->type == evsel->core.attr.type) {
+ TEST_ASSERT_EVSEL("PMU type expected once", !type_matched, evsel);
type_matched = true;
if (pmu->type == PERF_TYPE_RAW)
raw_type_match = true;
}
}
#endif
- TEST_ASSERT_VAL("No PMU found for type", type_matched);
+ TEST_ASSERT_EVSEL("No PMU found for type", type_matched, evsel);
}
TEST_ASSERT_VAL("Raw PMU not matched", raw_type_match);
return TEST_OK;
@@ -153,62 +182,44 @@ static int test__checkevent_numeric(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", 1 == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 1 == evsel->core.attr.config, evsel);
return TEST_OK;
}
-static int assert_hw(struct perf_evsel *evsel, enum perf_hw_id id, const char *name)
-{
- struct perf_pmu *pmu;
-
- if (evsel->attr.type == PERF_TYPE_HARDWARE) {
- TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, id));
- return 0;
- }
- pmu = perf_pmus__find_by_type(evsel->attr.type);
-
- TEST_ASSERT_VAL("unexpected PMU type", pmu);
- TEST_ASSERT_VAL("PMU missing event", perf_pmu__have_event(pmu, name));
- return 0;
-}
-
static int test__checkevent_symbolic_name(struct evlist *evlist)
{
- struct perf_evsel *evsel;
-
- TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
+ struct evsel *evsel;
- perf_evlist__for_each_evsel(&evlist->core, evsel) {
- int ret = assert_hw(evsel, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+ TEST_ASSERT_EVLIST("wrong number of entries", 0 != evlist->core.nr_entries, evlist);
- if (ret)
- return ret;
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
}
-
return TEST_OK;
}
static int test__checkevent_symbolic_name_config(struct evlist *evlist)
{
- struct perf_evsel *evsel;
-
- TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
+ struct evsel *evsel;
- perf_evlist__for_each_evsel(&evlist->core, evsel) {
- int ret = assert_hw(evsel, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ TEST_ASSERT_EVLIST("wrong number of entries", 0 != evlist->core.nr_entries, evlist);
- if (ret)
- return ret;
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
/*
* The period value gets configured within evlist__config,
* while this test executes only parse events method.
*/
- TEST_ASSERT_VAL("wrong period", 0 == evsel->attr.sample_period);
- TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1);
- TEST_ASSERT_VAL("wrong config2", 1 == evsel->attr.config2);
+ TEST_ASSERT_EVSEL("wrong period", 0 == evsel->core.attr.sample_period, evsel);
+ TEST_ASSERT_EVSEL("wrong config1", 0 == evsel->core.attr.config1, evsel);
+ TEST_ASSERT_EVSEL("wrong config2", 1 == evsel->core.attr.config2, evsel);
}
return TEST_OK;
}
@@ -217,21 +228,21 @@ static int test__checkevent_symbolic_alias(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_SW_PAGE_FAULTS));
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type/config", evsel__match(evsel, SOFTWARE, SW_PAGE_FAULTS),
+ evsel);
return TEST_OK;
}
static int test__checkevent_genhw(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
- TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
+ TEST_ASSERT_EVLIST("wrong number of entries", 0 != evlist->core.nr_entries, evlist);
- perf_evlist__for_each_entry(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
- TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, 1 << 16));
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", test_hw_config(evsel, 1 << 16), evsel);
}
return TEST_OK;
}
@@ -240,13 +251,13 @@ static int test__checkevent_breakpoint(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
- TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
- evsel->core.attr.bp_type);
- TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
- evsel->core.attr.bp_len);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 0 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_type",
+ (HW_BREAKPOINT_R | HW_BREAKPOINT_W) == evsel->core.attr.bp_type,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong bp_len", HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len, evsel);
return TEST_OK;
}
@@ -254,12 +265,12 @@ static int test__checkevent_breakpoint_x(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
- TEST_ASSERT_VAL("wrong bp_type",
- HW_BREAKPOINT_X == evsel->core.attr.bp_type);
- TEST_ASSERT_VAL("wrong bp_len", default_breakpoint_len() == evsel->core.attr.bp_len);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 0 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_type", HW_BREAKPOINT_X == evsel->core.attr.bp_type, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_len", default_breakpoint_len() == evsel->core.attr.bp_len,
+ evsel);
return TEST_OK;
}
@@ -267,14 +278,11 @@ static int test__checkevent_breakpoint_r(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type",
- PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
- TEST_ASSERT_VAL("wrong bp_type",
- HW_BREAKPOINT_R == evsel->core.attr.bp_type);
- TEST_ASSERT_VAL("wrong bp_len",
- HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 0 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_type", HW_BREAKPOINT_R == evsel->core.attr.bp_type, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_len", HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len, evsel);
return TEST_OK;
}
@@ -282,14 +290,11 @@ static int test__checkevent_breakpoint_w(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type",
- PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
- TEST_ASSERT_VAL("wrong bp_type",
- HW_BREAKPOINT_W == evsel->core.attr.bp_type);
- TEST_ASSERT_VAL("wrong bp_len",
- HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 0 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_type", HW_BREAKPOINT_W == evsel->core.attr.bp_type, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_len", HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len, evsel);
return TEST_OK;
}
@@ -297,14 +302,13 @@ static int test__checkevent_breakpoint_rw(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type",
- PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
- TEST_ASSERT_VAL("wrong bp_type",
- (HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->core.attr.bp_type);
- TEST_ASSERT_VAL("wrong bp_len",
- HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 0 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_type",
+ (HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->core.attr.bp_type,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong bp_len", HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len, evsel);
return TEST_OK;
}
@@ -312,10 +316,11 @@ static int test__checkevent_tracepoint_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
return test__checkevent_tracepoint(evlist);
}
@@ -323,15 +328,15 @@ static int test__checkevent_tracepoint_modifier(struct evlist *evlist)
static int
test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
- TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1);
+ TEST_ASSERT_EVLIST("wrong number of entries", evlist->core.nr_entries > 1, evlist);
- perf_evlist__for_each_entry(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
}
return test__checkevent_tracepoint_multi(evlist);
@@ -339,64 +344,77 @@ test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)
static int test__checkevent_raw_modifier(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
+
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
- perf_evlist__for_each_entry(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
}
return test__checkevent_raw(evlist);
}
static int test__checkevent_numeric_modifier(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
+
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
- perf_evlist__for_each_entry(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
}
return test__checkevent_numeric(evlist);
}
static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == num_core_entries());
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
- perf_evlist__for_each_entry(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
}
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_exclude_host_modifier(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
+
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
- perf_evlist__for_each_entry(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
}
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_exclude_guest_modifier(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
+
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
- perf_evlist__for_each_entry(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong exclude guest", evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
}
return test__checkevent_symbolic_name(evlist);
}
@@ -405,23 +423,28 @@ static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
return test__checkevent_symbolic_alias(evlist);
}
static int test__checkevent_genhw_modifier(struct evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct evsel *evsel;
+
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
- perf_evlist__for_each_entry(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
}
return test__checkevent_genhw(evlist);
}
@@ -430,13 +453,17 @@ static int test__checkevent_exclude_idle_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+
+ TEST_ASSERT_EVSEL("wrong exclude idle", evsel->core.attr.exclude_idle, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
return test__checkevent_symbolic_name(evlist);
}
@@ -445,13 +472,17 @@ static int test__checkevent_exclude_idle_modifier_1(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+
+ TEST_ASSERT_EVSEL("wrong exclude idle", evsel->core.attr.exclude_idle, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
return test__checkevent_symbolic_name(evlist);
}
@@ -461,11 +492,11 @@ static int test__checkevent_breakpoint_modifier(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:u"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "mem:0:u"), evsel);
return test__checkevent_breakpoint(evlist);
}
@@ -474,11 +505,11 @@ static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:x:k"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "mem:0:x:k"), evsel);
return test__checkevent_breakpoint_x(evlist);
}
@@ -487,11 +518,11 @@ static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:r:hp"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "mem:0:r:hp"), evsel);
return test__checkevent_breakpoint_r(evlist);
}
@@ -500,11 +531,11 @@ static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:w:up"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "mem:0:w:up"), evsel);
return test__checkevent_breakpoint_w(evlist);
}
@@ -513,11 +544,11 @@ static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "mem:0:rw:kp"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "mem:0:rw:kp"), evsel);
return test__checkevent_breakpoint_rw(evlist);
}
@@ -526,11 +557,11 @@ static int test__checkevent_breakpoint_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "breakpoint"), evsel);
return test__checkevent_breakpoint(evlist);
}
@@ -539,11 +570,11 @@ static int test__checkevent_breakpoint_x_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "breakpoint"), evsel);
return test__checkevent_breakpoint_x(evlist);
}
@@ -552,11 +583,11 @@ static int test__checkevent_breakpoint_r_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "breakpoint"), evsel);
return test__checkevent_breakpoint_r(evlist);
}
@@ -565,11 +596,11 @@ static int test__checkevent_breakpoint_w_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "breakpoint"), evsel);
return test__checkevent_breakpoint_w(evlist);
}
@@ -578,11 +609,11 @@ static int test__checkevent_breakpoint_rw_modifier_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint"));
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "breakpoint"), evsel);
return test__checkevent_breakpoint_rw(evlist);
}
@@ -591,15 +622,15 @@ static int test__checkevent_breakpoint_2_events(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_EVSEL("wrong number of entries", 2 == evlist->core.nr_entries, evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint1"));
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "breakpoint1"), evsel);
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "breakpoint2"));
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "breakpoint2"), evsel);
return TEST_OK;
}
@@ -608,18 +639,20 @@ static int test__checkevent_pmu(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
-
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 10));
- TEST_ASSERT_VAL("wrong config1", 1 == evsel->core.attr.config1);
- TEST_ASSERT_VAL("wrong config2", 3 == evsel->core.attr.config2);
- TEST_ASSERT_VAL("wrong config3", 0 == evsel->core.attr.config3);
+ struct perf_pmu *core_pmu = perf_pmus__find_core_pmu();
+
+ TEST_ASSERT_EVSEL("wrong number of entries", 1 == evlist->core.nr_entries, evsel);
+ TEST_ASSERT_EVSEL("wrong type", core_pmu->type == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", test_hw_config(evsel, 10), evsel);
+ TEST_ASSERT_EVSEL("wrong config1", 1 == evsel->core.attr.config1, evsel);
+ TEST_ASSERT_EVSEL("wrong config2", 3 == evsel->core.attr.config2, evsel);
+ TEST_ASSERT_EVSEL("wrong config3", 0 == evsel->core.attr.config3, evsel);
+ TEST_ASSERT_EVSEL("wrong config4", 0 == evsel->core.attr.config4, evsel);
/*
* The period value gets configured within evlist__config,
* while this test executes only parse events method.
*/
- TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
+ TEST_ASSERT_EVSEL("wrong period", 0 == evsel->core.attr.sample_period, evsel);
return TEST_OK;
}
@@ -628,40 +661,41 @@ static int test__checkevent_list(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 3 <= evlist->core.nr_entries);
+ TEST_ASSERT_EVSEL("wrong number of entries", 3 <= evlist->core.nr_entries, evsel);
/* r1 */
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT != evsel->core.attr.type);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_TRACEPOINT != evsel->core.attr.type, evsel);
while (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
- TEST_ASSERT_VAL("wrong config1", 0 == evsel->core.attr.config1);
- TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2);
- TEST_ASSERT_VAL("wrong config3", 0 == evsel->core.attr.config3);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_EVSEL("wrong config", 1 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong config1", 0 == evsel->core.attr.config1, evsel);
+ TEST_ASSERT_EVSEL("wrong config2", 0 == evsel->core.attr.config2, evsel);
+ TEST_ASSERT_EVSEL("wrong config3", 0 == evsel->core.attr.config3, evsel);
+ TEST_ASSERT_EVSEL("wrong config4", 0 == evsel->core.attr.config4, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
evsel = evsel__next(evsel);
}
/* syscalls:sys_enter_openat:k */
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong sample_type",
- PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
- TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_type", PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong sample_period", 1 == evsel->core.attr.sample_period, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
/* 1:1:hp */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+ TEST_ASSERT_EVSEL("wrong type", 1 == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 1 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
return TEST_OK;
}
@@ -669,19 +703,22 @@ static int test__checkevent_list(struct evlist *evlist)
static int test__checkevent_pmu_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
+ struct perf_pmu *core_pmu = perf_pmus__find_core_pmu();
+ char buf[256];
- /* cpu/config=1,name=krava/u */
- TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "krava"));
+ /* default_core/config=1,name=krava/u */
+ TEST_ASSERT_EVLIST("wrong number of entries", 2 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", core_pmu->type == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 1 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, "krava"), evsel);
- /* cpu/config=2/u" */
+ /* default_core/config=2/u" */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 2));
- TEST_ASSERT_VAL("wrong name", evsel__name_is(evsel, "cpu/config=2/u"));
+ TEST_ASSERT_EVSEL("wrong number of entries", 2 == evlist->core.nr_entries, evsel);
+ TEST_ASSERT_EVSEL("wrong type", core_pmu->type == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 2 == evsel->core.attr.config, evsel);
+ snprintf(buf, sizeof(buf), "%s/config=2/u", core_pmu->name);
+ TEST_ASSERT_EVSEL("wrong name", evsel__name_is(evsel, buf), evsel);
return TEST_OK;
}
@@ -689,50 +726,55 @@ static int test__checkevent_pmu_name(struct evlist *evlist)
static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
+ struct perf_pmu *core_pmu = perf_pmus__find_core_pmu();
- /* cpu/config=1,call-graph=fp,time,period=100000/ */
- TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 1));
+ /* default_core/config=1,call-graph=fp,time,period=100000/ */
+ TEST_ASSERT_EVLIST("wrong number of entries", 2 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", core_pmu->type == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 1 == evsel->core.attr.config, evsel);
/*
* The period, time and callgraph value gets configured within evlist__config,
* while this test executes only parse events method.
*/
- TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
- TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
- TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type));
+ TEST_ASSERT_EVSEL("wrong period", 0 == evsel->core.attr.sample_period, evsel);
+ TEST_ASSERT_EVSEL("wrong callgraph", !evsel__has_callchain(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type), evsel);
- /* cpu/config=2,call-graph=no,time=0,period=2000/ */
+ /* default_core/config=2,call-graph=no,time=0,period=2000/ */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 2));
+ TEST_ASSERT_EVSEL("wrong type", core_pmu->type == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 2 == evsel->core.attr.config, evsel);
/*
* The period, time and callgraph value gets configured within evlist__config,
* while this test executes only parse events method.
*/
- TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
- TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
- TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type));
+ TEST_ASSERT_EVSEL("wrong period", 0 == evsel->core.attr.sample_period, evsel);
+ TEST_ASSERT_EVSEL("wrong callgraph", !evsel__has_callchain(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type), evsel);
return TEST_OK;
}
static int test__checkevent_pmu_events(struct evlist *evlist)
{
- struct evsel *evsel = evlist__first(evlist);
+ struct evsel *evsel;
+ struct perf_pmu *core_pmu = perf_pmus__find_core_pmu();
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type ||
- strcmp(evsel->pmu->name, "cpu"));
- TEST_ASSERT_VAL("wrong exclude_user",
- !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel",
- evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
- TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 <= evlist->core.nr_entries, evlist);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_EVSEL("wrong type",
+ core_pmu->type == evsel->core.attr.type ||
+ !strncmp(evsel__name(evsel), evsel->pmu->name,
+ strlen(evsel->pmu->name)),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong pinned", !evsel->core.attr.pinned, evsel);
+ TEST_ASSERT_EVSEL("wrong exclusive", !evsel->core.attr.exclusive, evsel);
+ }
return TEST_OK;
}
@@ -745,30 +787,26 @@ static int test__checkevent_pmu_events_mix(struct evlist *evlist)
* The wild card event will be opened at least once, but it may be
* opened on each core PMU.
*/
- TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries >= 2);
+ TEST_ASSERT_EVLIST("wrong number of entries", evlist->core.nr_entries >= 2, evlist);
for (int i = 0; i < evlist->core.nr_entries - 1; i++) {
evsel = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
/* pmu-event:u */
- TEST_ASSERT_VAL("wrong exclude_user",
- !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel",
- evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
- TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong pinned", !evsel->core.attr.pinned, evsel);
+ TEST_ASSERT_EVSEL("wrong exclusive", !evsel->core.attr.exclusive, evsel);
}
- /* cpu/pmu-event/u*/
+ /* default_core/pmu-event/u*/
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", evsel__find_pmu(evsel)->is_core);
- TEST_ASSERT_VAL("wrong exclude_user",
- !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel",
- evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
- TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.pinned);
+ TEST_ASSERT_EVSEL("wrong type", evsel__find_pmu(evsel)->is_core, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong pinned", !evsel->core.attr.pinned, evsel);
+ TEST_ASSERT_EVSEL("wrong exclusive", !evsel->core.attr.pinned, evsel);
return TEST_OK;
}
@@ -813,6 +851,15 @@ static int test__checkterms_simple(struct parse_events_terms *terms)
TEST_ASSERT_VAL("wrong val", term->val.num == 4);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config3"));
+ /* config4=5 */
+ term = list_entry(term->list.next, struct parse_events_term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG4);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 5);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config4"));
+
/* umask=1*/
term = list_entry(term->list.next, struct parse_events_term, list);
TEST_ASSERT_VAL("wrong type term",
@@ -855,48 +902,45 @@ static int test__checkterms_simple(struct parse_events_terms *terms)
static int test__group1(struct evlist *evlist)
{
- struct evsel *evsel, *leader;
-
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (num_core_entries() * 2));
- TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == num_core_entries());
+ struct evsel *evsel = NULL, *leader;
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (num_core_entries(evlist) * 2),
+ evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups",
+ evlist__nr_groups(evlist) == num_core_entries(evlist),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* instructions:k */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
/* cycles:upp */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event", evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
}
return TEST_OK;
}
@@ -905,61 +949,66 @@ static int test__group2(struct evlist *evlist)
{
struct evsel *evsel, *leader = NULL;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * num_core_entries() + 1));
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (2 * num_core_entries(evlist) + 1),
+ evlist);
/*
* TODO: Currently the software event won't be grouped with the hardware
* event except for 1 PMU.
*/
- TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist));
+ TEST_ASSERT_EVLIST("wrong number of groups", 1 == evlist__nr_groups(evlist), evlist);
evlist__for_each_entry(evlist, evsel) {
- int ret;
-
- if (evsel->core.attr.type == PERF_TYPE_SOFTWARE) {
+ if (evsel__match(evsel, SOFTWARE, SW_PAGE_FAULTS)) {
/* faults + :ku modifier */
leader = evsel;
- TEST_ASSERT_VAL("wrong config",
- test_config(evsel, PERF_COUNT_SW_PAGE_FAULTS));
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
continue;
}
- if (evsel->core.attr.type == PERF_TYPE_HARDWARE &&
- test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) {
+ if (evsel__match(evsel, HARDWARE, HW_BRANCH_INSTRUCTIONS)) {
/* branches + :u modifier */
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- if (evsel__has_leader(evsel, leader))
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ if (evsel__has_leader(evsel, leader)) {
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1,
+ evsel);
+ }
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
continue;
}
/* cycles:k */
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event", evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
}
return TEST_OK;
}
@@ -967,155 +1016,172 @@ static int test__group2(struct evlist *evlist)
static int test__group3(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel, *group1_leader = NULL, *group2_leader = NULL;
- int ret;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus() + 2));
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus() + 2),
+ evlist);
/*
* Currently the software event won't be grouped with the hardware event
* except for 1 PMU. This means there are always just 2 groups
* regardless of the number of core PMUs.
*/
- TEST_ASSERT_VAL("wrong number of groups", 2 == evlist__nr_groups(evlist));
+ TEST_ASSERT_EVLIST("wrong number of groups", 2 == evlist__nr_groups(evlist), evlist);
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
/* group1 syscalls:sys_enter_openat:H */
group1_leader = evsel;
- TEST_ASSERT_VAL("wrong sample_type",
- evsel->core.attr.sample_type == PERF_TP_SAMPLE_TYPE);
- TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong group name", !strcmp(evsel->group_name, "group1"));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("wrong sample_type",
+ evsel->core.attr.sample_type == PERF_TP_SAMPLE_TYPE,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong sample_period",
+ 1 == evsel->core.attr.sample_period,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", evsel->core.attr.exclude_guest,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !strcmp(evsel->group_name, "group1"),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
continue;
}
- if (evsel->core.attr.type == PERF_TYPE_HARDWARE &&
- test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)) {
+ if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
if (evsel->core.attr.exclude_user) {
/* group1 cycles:kppp */
- TEST_ASSERT_VAL("wrong exclude_user",
- evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel",
- !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest",
- !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host",
- !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip",
- evsel->core.attr.precise_ip == 3);
+ TEST_ASSERT_EVSEL("wrong exclude_user",
+ evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel",
+ !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest",
+ !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host",
+ !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip",
+ evsel->core.attr.precise_ip == 3, evsel);
if (evsel__has_leader(evsel, group1_leader)) {
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong group_idx",
- evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx",
+ evsel__group_idx(evsel) == 1,
+ evsel);
}
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
} else {
/* group2 cycles + G modifier */
group2_leader = evsel;
- TEST_ASSERT_VAL("wrong exclude_kernel",
- !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv",
- !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest",
- !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host",
- evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_EVSEL("wrong exclude_kernel",
+ !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv",
+ !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest",
+ !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host",
+ evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel),
+ evsel);
if (evsel->core.nr_members == 2) {
- TEST_ASSERT_VAL("wrong group_idx",
- evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_EVSEL("wrong group_idx",
+ evsel__group_idx(evsel) == 0,
+ evsel);
}
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
}
continue;
}
if (evsel->core.attr.type == 1) {
/* group2 1:3 + G modifier */
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 3));
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- if (evsel__has_leader(evsel, group2_leader))
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("wrong config", 3 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ if (evsel__has_leader(evsel, group2_leader)) {
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1,
+ evsel);
+ }
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
continue;
}
/* instructions:u */
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
}
return TEST_OK;
}
static int test__group4(struct evlist *evlist __maybe_unused)
{
- struct evsel *evsel, *leader;
-
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (num_core_entries() * 2));
- TEST_ASSERT_VAL("wrong number of groups",
- num_core_entries() == evlist__nr_groups(evlist));
+ struct evsel *evsel = NULL, *leader;
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (num_core_entries(evlist) * 2),
+ evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups",
+ num_core_entries(evlist) == evlist__nr_groups(evlist),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles:u + p */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 1);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip == 1, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
/* instructions:kp + p */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
}
return TEST_OK;
}
@@ -1123,96 +1189,92 @@ static int test__group4(struct evlist *evlist __maybe_unused)
static int test__group5(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel = NULL, *leader;
- int ret;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (5 * num_core_entries()));
- TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == (2 * num_core_entries()));
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (5 * num_core_entries(evlist)),
+ evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups",
+ evlist__nr_groups(evlist) == (2 * num_core_entries(evlist)),
+ evlist);
- for (int i = 0; i < num_core_entries(); i++) {
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles + G */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
/* instructions + G */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
}
- for (int i = 0; i < num_core_entries(); i++) {
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles:G */
evsel = leader = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
- TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", !evsel->sample_read, evsel);
/* instructions:G */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1, evsel);
}
- for (int i = 0; i < num_core_entries(); i++) {
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
}
return TEST_OK;
}
@@ -1221,45 +1283,43 @@ static int test__group_gh1(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * num_core_entries()));
- TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == num_core_entries());
-
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (2 * num_core_entries(evlist)),
+ evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups",
+ evlist__nr_groups(evlist) == num_core_entries(evlist),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles + :H group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
/* cache-misses:G + :H group modifier */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CACHE_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1, evsel);
}
return TEST_OK;
}
@@ -1268,45 +1328,43 @@ static int test__group_gh2(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * num_core_entries()));
- TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == num_core_entries());
-
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (2 * num_core_entries(evlist)),
+ evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups",
+ evlist__nr_groups(evlist) == num_core_entries(evlist),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles + :G group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
/* cache-misses:H + :G group modifier */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CACHE_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1, evsel);
}
return TEST_OK;
}
@@ -1315,45 +1373,43 @@ static int test__group_gh3(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * num_core_entries()));
- TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == num_core_entries());
-
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (2 * num_core_entries(evlist)),
+ evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups",
+ evlist__nr_groups(evlist) == num_core_entries(evlist),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles:G + :u group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
/* cache-misses:H + :u group modifier */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CACHE_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1, evsel);
}
return TEST_OK;
}
@@ -1362,45 +1418,43 @@ static int test__group_gh4(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * num_core_entries()));
- TEST_ASSERT_VAL("wrong number of groups",
- evlist__nr_groups(evlist) == num_core_entries());
-
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (2 * num_core_entries(evlist)),
+ evlist);
+ TEST_ASSERT_EVLIST("wrong number of groups",
+ evlist__nr_groups(evlist) == num_core_entries(evlist),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles:G + :uG group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__is_group_leader(evsel), evsel);
+ TEST_ASSERT_EVSEL("wrong core.nr_members", evsel->core.nr_members == 2, evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 0, evsel);
/* cache-misses:H + :uG group modifier */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CACHE_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong group_idx", evsel__group_idx(evsel) == 1, evsel);
}
return TEST_OK;
}
@@ -1409,58 +1463,54 @@ static int test__leader_sample1(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (3 * num_core_entries()));
-
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (3 * num_core_entries(evlist)),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles - sampling group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", evsel->sample_read, evsel);
/* cache-misses - not sampling */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CACHE_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", evsel->sample_read, evsel);
/* branch-misses - not sampling */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", !evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", evsel->sample_read, evsel);
}
return TEST_OK;
}
@@ -1469,43 +1519,40 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel = NULL, *leader;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (2 * num_core_entries()));
-
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (2 * num_core_entries(evlist)),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* instructions - sampling group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", evsel->sample_read, evsel);
/* branch-misses - not sampling */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
- TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude guest", !evsel->core.attr.exclude_guest, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude host", !evsel->core.attr.exclude_host, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
+ TEST_ASSERT_EVSEL("wrong sample_read", evsel->sample_read, evsel);
}
return TEST_OK;
}
@@ -1514,16 +1561,17 @@ static int test__checkevent_pinned_modifier(struct evlist *evlist)
{
struct evsel *evsel = NULL;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == num_core_entries());
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
- for (int i = 0; i < num_core_entries(); i++) {
+ for (int i = 0; i < num_core_entries(evlist); i++) {
evsel = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong pinned", evsel->core.attr.pinned, evsel);
}
return test__checkevent_symbolic_name(evlist);
}
@@ -1532,39 +1580,35 @@ static int test__pinned_group(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == (3 * num_core_entries()));
-
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == (3 * num_core_entries(evlist)),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles - group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
/* TODO: The group modifier is not copied to the split group leader. */
if (perf_pmus__num_core_pmus() == 1)
- TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned);
+ TEST_ASSERT_EVSEL("wrong pinned", evsel->core.attr.pinned, evsel);
/* cache-misses - can not be pinned, but will go on with the leader */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CACHE_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong pinned", !evsel->core.attr.pinned, evsel);
/* branch-misses - ditto */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong pinned", !evsel->core.attr.pinned, evsel);
}
return TEST_OK;
}
@@ -1573,11 +1617,14 @@ static int test__checkevent_exclusive_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive);
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", evsel->core.attr.precise_ip, evsel);
+ TEST_ASSERT_EVSEL("wrong exclusive", evsel->core.attr.exclusive, evsel);
return test__checkevent_symbolic_name(evlist);
}
@@ -1586,39 +1633,35 @@ static int test__exclusive_group(struct evlist *evlist)
{
struct evsel *evsel = NULL, *leader;
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == 3 * num_core_entries());
-
- for (int i = 0; i < num_core_entries(); i++) {
- int ret;
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == 3 * num_core_entries(evlist),
+ evlist);
+ for (int i = 0; i < num_core_entries(evlist); i++) {
/* cycles - group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
- TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong group name", !evsel->group_name, evsel);
+ TEST_ASSERT_EVSEL("wrong leader", evsel__has_leader(evsel, leader), evsel);
/* TODO: The group modifier is not copied to the split group leader. */
if (perf_pmus__num_core_pmus() == 1)
- TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive);
+ TEST_ASSERT_EVSEL("wrong exclusive", evsel->core.attr.exclusive, evsel);
/* cache-misses - can not be pinned, but will go on with the leader */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CACHE_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclusive", !evsel->core.attr.exclusive, evsel);
/* branch-misses - ditto */
evsel = evsel__next(evsel);
- ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES),
+ evsel);
+ TEST_ASSERT_EVSEL("wrong exclusive", !evsel->core.attr.exclusive, evsel);
}
return TEST_OK;
}
@@ -1626,13 +1669,13 @@ static int test__checkevent_breakpoint_len(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
- TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
- evsel->core.attr.bp_type);
- TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
- evsel->core.attr.bp_len);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 0 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_type",
+ (HW_BREAKPOINT_R | HW_BREAKPOINT_W) == evsel->core.attr.bp_type,
+ evsel);
+ TEST_ASSERT_EVSEL("wrong bp_len", HW_BREAKPOINT_LEN_1 == evsel->core.attr.bp_len, evsel);
return TEST_OK;
}
@@ -1641,13 +1684,11 @@ static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
- TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==
- evsel->core.attr.bp_type);
- TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
- evsel->core.attr.bp_len);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 0 == evsel->core.attr.config, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_type", HW_BREAKPOINT_W == evsel->core.attr.bp_type, evsel);
+ TEST_ASSERT_EVSEL("wrong bp_len", HW_BREAKPOINT_LEN_2 == evsel->core.attr.bp_len, evsel);
return TEST_OK;
}
@@ -1657,10 +1698,11 @@ test__checkevent_breakpoint_len_rw_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong exclude_user", !evsel->core.attr.exclude_user, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_hv", evsel->core.attr.exclude_hv, evsel);
+ TEST_ASSERT_EVSEL("wrong precise_ip", !evsel->core.attr.precise_ip, evsel);
return test__checkevent_breakpoint_rw(evlist);
}
@@ -1669,10 +1711,10 @@ static int test__checkevent_precise_max_modifier(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries",
- evlist->core.nr_entries == 1 + num_core_entries());
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_SW_TASK_CLOCK));
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == 1 + num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("wrong type/config", evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK), evsel);
return TEST_OK;
}
@@ -1680,7 +1722,10 @@ static int test__checkevent_config_symbol(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "insn"));
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("wrong name setting", evsel__name_is(evsel, "insn"), evsel);
return TEST_OK;
}
@@ -1688,7 +1733,8 @@ static int test__checkevent_config_raw(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "rawpmu"));
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong name setting", evsel__name_is(evsel, "rawpmu"), evsel);
return TEST_OK;
}
@@ -1696,7 +1742,8 @@ static int test__checkevent_config_num(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "numpmu"));
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong name setting", evsel__name_is(evsel, "numpmu"), evsel);
return TEST_OK;
}
@@ -1704,18 +1751,16 @@ static int test__checkevent_config_cache(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "cachepmu"));
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("wrong name setting", evsel__name_is(evsel, "cachepmu"), evsel);
return test__checkevent_genhw(evlist);
}
-static bool test__pmu_cpu_valid(void)
+static bool test__pmu_default_core_event_valid(void)
{
- return !!perf_pmus__find("cpu");
-}
-
-static bool test__pmu_cpu_event_valid(void)
-{
- struct perf_pmu *pmu = perf_pmus__find("cpu");
+ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
if (!pmu)
return false;
@@ -1732,7 +1777,56 @@ static int test__intel_pt(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong name setting", evsel__name_is(evsel, "intel_pt//u"));
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong name setting", evsel__name_is(evsel, "intel_pt//u"), evsel);
+ return TEST_OK;
+}
+
+static bool test__acr_valid(void)
+{
+ struct perf_pmu *pmu = NULL;
+
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ if (perf_pmu__has_format(pmu, "acr_mask"))
+ return true;
+ }
+
+ return false;
+}
+
+static int test__ratio_to_prev(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 * perf_pmus__num_core_pmus() == evlist->core.nr_entries);
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (!perf_pmu__has_format(evsel->pmu, "acr_mask"))
+ return TEST_OK;
+
+ if (evsel == evlist__first(evlist)) {
+ TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_CPU_CYCLES),
+ evsel);
+ } else {
+ TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2);
+ TEST_ASSERT_VAL("wrong leader", !evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 0);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_EVSEL("unexpected event",
+ evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS),
+ evsel);
+ }
+ /*
+ * The period value gets configured within evlist__config,
+ * while this test executes only parse events method.
+ */
+ TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
+ }
return TEST_OK;
}
@@ -1740,9 +1834,13 @@ static int test__checkevent_complex_name(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong complex name parsing",
- evsel__name_is(evsel,
- "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks"));
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("wrong complex name parsing",
+ evsel__name_is(evsel,
+ "COMPLEX_CYCLES_NAME:orig=cpu-cycles,desc=chip-clock-ticks"),
+ evsel);
return TEST_OK;
}
@@ -1750,57 +1848,57 @@ static int test__checkevent_raw_pmu(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, 0x1a));
+ TEST_ASSERT_EVLIST("wrong number of entries", 1 == evlist->core.nr_entries, evlist);
+ TEST_ASSERT_EVSEL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type, evsel);
+ TEST_ASSERT_EVSEL("wrong config", 0x1a == evsel->core.attr.config, evsel);
return TEST_OK;
}
static int test__sym_event_slash(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("unexpected event", evsel__match(evsel, HARDWARE, HW_CPU_CYCLES), evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_kernel", evsel->core.attr.exclude_kernel, evsel);
return TEST_OK;
}
static int test__sym_event_dc(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("unexpected event", evsel__match(evsel, HARDWARE, HW_CPU_CYCLES), evsel);
+ TEST_ASSERT_EVSEL("wrong exclude_user", evsel->core.attr.exclude_user, evsel);
return TEST_OK;
}
static int test__term_equal_term(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
- if (ret)
- return ret;
-
- TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "name") == 0);
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("unexpected event", evsel__match(evsel, HARDWARE, HW_CPU_CYCLES), evsel);
+ TEST_ASSERT_EVSEL("wrong name setting", strcmp(evsel->name, "name") == 0, evsel);
return TEST_OK;
}
static int test__term_equal_legacy(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
- int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
-
- if (ret)
- return ret;
- TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "l1d") == 0);
+ TEST_ASSERT_EVLIST("wrong number of entries",
+ evlist->core.nr_entries == num_core_entries(evlist),
+ evlist);
+ TEST_ASSERT_EVSEL("unexpected event", evsel__match(evsel, HARDWARE, HW_CPU_CYCLES), evsel);
+ TEST_ASSERT_EVSEL("wrong name setting", strcmp(evsel->name, "l1d") == 0, evsel);
return TEST_OK;
}
@@ -1891,7 +1989,7 @@ static const struct evlist_test test__events[] = {
/* 4 */
},
{
- .name = "cycles/period=100000,config2/",
+ .name = "cpu-cycles/period=100000,config2/",
.check = test__checkevent_symbolic_name_config,
/* 5 */
},
@@ -2006,27 +2104,27 @@ static const struct evlist_test test__events[] = {
/* 7 */
},
{
- .name = "{instructions:k,cycles:upp}",
+ .name = "{instructions:k,cpu-cycles:upp}",
.check = test__group1,
/* 8 */
},
{
- .name = "{faults:k,branches}:u,cycles:k",
+ .name = "{faults:k,branches}:u,cpu-cycles:k",
.check = test__group2,
/* 9 */
},
{
- .name = "group1{syscalls:sys_enter_openat:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u",
+ .name = "group1{syscalls:sys_enter_openat:H,cpu-cycles:kppp},group2{cpu-cycles,1:3}:G,instructions:u",
.check = test__group3,
/* 0 */
},
{
- .name = "{cycles:u,instructions:kp}:p",
+ .name = "{cpu-cycles:u,instructions:kp}:p",
.check = test__group4,
/* 1 */
},
{
- .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles",
+ .name = "{cpu-cycles,instructions}:G,{cpu-cycles:G,instructions:G},cpu-cycles",
.check = test__group5,
/* 2 */
},
@@ -2036,27 +2134,27 @@ static const struct evlist_test test__events[] = {
/* 3 */
},
{
- .name = "{cycles,cache-misses:G}:H",
+ .name = "{cpu-cycles,cache-misses:G}:H",
.check = test__group_gh1,
/* 4 */
},
{
- .name = "{cycles,cache-misses:H}:G",
+ .name = "{cpu-cycles,cache-misses:H}:G",
.check = test__group_gh2,
/* 5 */
},
{
- .name = "{cycles:G,cache-misses:H}:u",
+ .name = "{cpu-cycles:G,cache-misses:H}:u",
.check = test__group_gh3,
/* 6 */
},
{
- .name = "{cycles:G,cache-misses:H}:uG",
+ .name = "{cpu-cycles:G,cache-misses:H}:uG",
.check = test__group_gh4,
/* 7 */
},
{
- .name = "{cycles,cache-misses,branch-misses}:S",
+ .name = "{cpu-cycles,cache-misses,branch-misses}:S",
.check = test__leader_sample1,
/* 8 */
},
@@ -2071,7 +2169,7 @@ static const struct evlist_test test__events[] = {
/* 0 */
},
{
- .name = "{cycles,cache-misses,branch-misses}:D",
+ .name = "{cpu-cycles,cache-misses,branch-misses}:D",
.check = test__pinned_group,
/* 1 */
},
@@ -2109,7 +2207,7 @@ static const struct evlist_test test__events[] = {
/* 6 */
},
{
- .name = "task-clock:P,cycles",
+ .name = "task-clock:P,cpu-cycles",
.check = test__checkevent_precise_max_modifier,
/* 7 */
},
@@ -2140,17 +2238,17 @@ static const struct evlist_test test__events[] = {
/* 2 */
},
{
- .name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
+ .name = "cpu-cycles/name='COMPLEX_CYCLES_NAME:orig=cpu-cycles,desc=chip-clock-ticks'/Duk",
.check = test__checkevent_complex_name,
/* 3 */
},
{
- .name = "cycles//u",
+ .name = "cpu-cycles//u",
.check = test__sym_event_slash,
/* 4 */
},
{
- .name = "cycles:k",
+ .name = "cpu-cycles:k",
.check = test__sym_event_dc,
/* 5 */
},
@@ -2160,17 +2258,17 @@ static const struct evlist_test test__events[] = {
/* 6 */
},
{
- .name = "{cycles,cache-misses,branch-misses}:e",
+ .name = "{cpu-cycles,cache-misses,branch-misses}:e",
.check = test__exclusive_group,
/* 7 */
},
{
- .name = "cycles/name=name/",
+ .name = "cpu-cycles/name=name/",
.check = test__term_equal_term,
/* 8 */
},
{
- .name = "cycles/name=l1d/",
+ .name = "cpu-cycles/name=l1d/",
.check = test__term_equal_legacy,
/* 9 */
},
@@ -2249,30 +2347,34 @@ static const struct evlist_test test__events[] = {
.check = test__checkevent_tracepoint,
/* 4 */
},
+ {
+ .name = "{cycles,instructions/period=200000,ratio-to-prev=2.0/}",
+ .valid = test__acr_valid,
+ .check = test__ratio_to_prev,
+ /* 5 */
+ },
+
};
static const struct evlist_test test__events_pmu[] = {
{
- .name = "cpu/config=10,config1=1,config2=3,period=1000/u",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/config=10,config1=1,config2=3,period=1000/u",
.check = test__checkevent_pmu,
/* 0 */
},
{
- .name = "cpu/config=1,name=krava/u,cpu/config=2/u",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/config=1,name=krava/u,default_core/config=2/u",
.check = test__checkevent_pmu_name,
/* 1 */
},
{
- .name = "cpu/config=1,call-graph=fp,time,period=100000/,cpu/config=2,call-graph=no,time=0,period=2000/",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/config=1,call-graph=fp,time,period=100000/,default_core/config=2,call-graph=no,time=0,period=2000/",
.check = test__checkevent_pmu_partial_time_callgraph,
/* 2 */
},
{
- .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
- .valid = test__pmu_cpu_event_valid,
+ .name = "default_core/name='COMPLEX_CYCLES_NAME:orig=cpu-cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
+ .valid = test__pmu_default_core_event_valid,
.check = test__checkevent_complex_name,
/* 3 */
},
@@ -2287,158 +2389,132 @@ static const struct evlist_test test__events_pmu[] = {
/* 5 */
},
{
- .name = "cpu/L1-dcache-load-miss/",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/L1-dcache-load-miss/",
.check = test__checkevent_genhw,
/* 6 */
},
{
- .name = "cpu/L1-dcache-load-miss/kp",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/L1-dcache-load-miss/kp",
.check = test__checkevent_genhw_modifier,
/* 7 */
},
{
- .name = "cpu/L1-dcache-misses,name=cachepmu/",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/L1-dcache-misses,name=cachepmu/",
.check = test__checkevent_config_cache,
/* 8 */
},
{
- .name = "cpu/instructions/",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/instructions/",
.check = test__checkevent_symbolic_name,
/* 9 */
},
{
- .name = "cpu/cycles,period=100000,config2/",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/cycles,period=100000,config2/",
.check = test__checkevent_symbolic_name_config,
/* 0 */
},
{
- .name = "cpu/instructions/h",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/instructions/h",
.check = test__checkevent_symbolic_name_modifier,
/* 1 */
},
{
- .name = "cpu/instructions/G",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/instructions/G",
.check = test__checkevent_exclude_host_modifier,
/* 2 */
},
{
- .name = "cpu/instructions/H",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/instructions/H",
.check = test__checkevent_exclude_guest_modifier,
/* 3 */
},
{
- .name = "{cpu/instructions/k,cpu/cycles/upp}",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/instructions/k,default_core/cycles/upp}",
.check = test__group1,
/* 4 */
},
{
- .name = "{cpu/cycles/u,cpu/instructions/kp}:p",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/cycles/u,default_core/instructions/kp}:p",
.check = test__group4,
/* 5 */
},
{
- .name = "{cpu/cycles/,cpu/cache-misses/G}:H",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/cycles/,default_core/cache-misses/G}:H",
.check = test__group_gh1,
/* 6 */
},
{
- .name = "{cpu/cycles/,cpu/cache-misses/H}:G",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/cycles/,default_core/cache-misses/H}:G",
.check = test__group_gh2,
/* 7 */
},
{
- .name = "{cpu/cycles/G,cpu/cache-misses/H}:u",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/cycles/G,default_core/cache-misses/H}:u",
.check = test__group_gh3,
/* 8 */
},
{
- .name = "{cpu/cycles/G,cpu/cache-misses/H}:uG",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/cycles/G,default_core/cache-misses/H}:uG",
.check = test__group_gh4,
/* 9 */
},
{
- .name = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:S",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/cycles/,default_core/cache-misses/,default_core/branch-misses/}:S",
.check = test__leader_sample1,
/* 0 */
},
{
- .name = "{cpu/instructions/,cpu/branch-misses/}:Su",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/instructions/,default_core/branch-misses/}:Su",
.check = test__leader_sample2,
/* 1 */
},
{
- .name = "cpu/instructions/uDp",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/instructions/uDp",
.check = test__checkevent_pinned_modifier,
/* 2 */
},
{
- .name = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:D",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/cycles/,default_core/cache-misses/,default_core/branch-misses/}:D",
.check = test__pinned_group,
/* 3 */
},
{
- .name = "cpu/instructions/I",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/instructions/I",
.check = test__checkevent_exclude_idle_modifier,
/* 4 */
},
{
- .name = "cpu/instructions/kIG",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/instructions/kIG",
.check = test__checkevent_exclude_idle_modifier_1,
/* 5 */
},
{
- .name = "cpu/cycles/u",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/cycles/u",
.check = test__sym_event_slash,
/* 6 */
},
{
- .name = "cpu/cycles/k",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/cycles/k",
.check = test__sym_event_dc,
/* 7 */
},
{
- .name = "cpu/instructions/uep",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/instructions/uep",
.check = test__checkevent_exclusive_modifier,
/* 8 */
},
{
- .name = "{cpu/cycles/,cpu/cache-misses/,cpu/branch-misses/}:e",
- .valid = test__pmu_cpu_valid,
+ .name = "{default_core/cycles/,default_core/cache-misses/,default_core/branch-misses/}:e",
.check = test__exclusive_group,
/* 9 */
},
{
- .name = "cpu/cycles,name=name/",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/cycles,name=name/",
.check = test__term_equal_term,
/* 0 */
},
{
- .name = "cpu/cycles,name=l1d/",
- .valid = test__pmu_cpu_valid,
+ .name = "default_core/cycles,name=l1d/",
.check = test__term_equal_legacy,
/* 1 */
},
@@ -2451,7 +2527,7 @@ struct terms_test {
static const struct terms_test test__terms[] = {
[0] = {
- .str = "config=10,config1,config2=3,config3=4,umask=1,read,r0xead",
+ .str = "config=10,config1,config2=3,config3=4,config4=5,umask=1,read,r0xead",
.check = test__checkterms_simple,
},
};
@@ -2528,15 +2604,30 @@ static int combine_test_results(int existing, int latest)
static int test_events(const struct evlist_test *events, int cnt)
{
int ret = TEST_OK;
+ struct perf_pmu *core_pmu = perf_pmus__find_core_pmu();
for (int i = 0; i < cnt; i++) {
- const struct evlist_test *e = &events[i];
+ struct evlist_test e = events[i];
int test_ret;
+ const char *pos = e.name;
+ char buf[1024], *buf_pos = buf, *end;
+
+ while ((end = strstr(pos, "default_core"))) {
+ size_t len = end - pos;
+
+ strncpy(buf_pos, pos, len);
+ pos = end + 12;
+ buf_pos += len;
+ strcpy(buf_pos, core_pmu->name);
+ buf_pos += strlen(core_pmu->name);
+ }
+ strcpy(buf_pos, pos);
- pr_debug("running test %d '%s'\n", i, e->name);
- test_ret = test_event(e);
+ e.name = buf;
+ pr_debug("running test %d '%s'\n", i, e.name);
+ test_ret = test_event(&e);
if (test_ret != TEST_OK) {
- pr_debug("Event test failure: test %d '%s'", i, e->name);
+ pr_debug("Event test failure: test %d '%s'", i, e.name);
ret = combine_test_results(ret, test_ret);
}
}
@@ -2556,7 +2647,7 @@ static int test_term(const struct terms_test *t)
parse_events_terms__init(&terms);
- ret = parse_events_terms(&terms, t->str, /*input=*/ NULL);
+ ret = parse_events_terms(&terms, t->str);
if (ret) {
pr_debug("failed to parse terms '%s', err %d\n",
t->str , ret);
@@ -2777,8 +2868,9 @@ static int test__checkevent_pmu_events_alias(struct evlist *evlist)
struct evsel *evsel1 = evlist__first(evlist);
struct evsel *evsel2 = evlist__last(evlist);
- TEST_ASSERT_VAL("wrong type", evsel1->core.attr.type == evsel2->core.attr.type);
- TEST_ASSERT_VAL("wrong config", evsel1->core.attr.config == evsel2->core.attr.config);
+ TEST_ASSERT_EVSEL("wrong type", evsel1->core.attr.type == evsel2->core.attr.type, evsel1);
+ TEST_ASSERT_EVSEL("wrong config", evsel1->core.attr.config == evsel2->core.attr.config,
+ evsel1);
return TEST_OK;
}
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index 2c28fb50dc24..6bbc209a5c6a 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
+#include <errno.h>
#include <string.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
@@ -40,20 +41,17 @@ static void load_runtime_stat(struct evlist *evlist, struct value *vals)
count = find_value(evsel->name, vals);
evsel->supported = true;
evsel->stats->aggr->counts.val = count;
- if (evsel__name_is(evsel, "duration_time"))
- update_stats(&walltime_nsecs_stats, count);
}
}
-static double compute_single(struct rblist *metric_events, struct evlist *evlist,
- const char *name)
+static double compute_single(struct evlist *evlist, const char *name)
{
struct metric_expr *mexp;
struct metric_event *me;
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- me = metricgroup__lookup(metric_events, evsel, false);
+ me = metricgroup__lookup(&evlist->metric_events, evsel, false);
if (me != NULL) {
list_for_each_entry (mexp, &me->head, nd) {
if (strcmp(mexp->metric_name, name))
@@ -69,9 +67,6 @@ static int __compute_metric(const char *name, struct value *vals,
const char *name1, double *ratio1,
const char *name2, double *ratio2)
{
- struct rblist metric_events = {
- .nr_entries = 0,
- };
const struct pmu_metrics_table *pme_test;
struct perf_cpu_map *cpus;
struct evlist *evlist;
@@ -95,8 +90,7 @@ static int __compute_metric(const char *name, struct value *vals,
/* Parse the metric into metric_events list. */
pme_test = find_core_metrics_table("testarch", "testcpu");
- err = metricgroup__parse_groups_test(evlist, pme_test, name,
- &metric_events);
+ err = metricgroup__parse_groups_test(evlist, pme_test, name);
if (err)
goto out;
@@ -109,13 +103,12 @@ static int __compute_metric(const char *name, struct value *vals,
/* And execute the metric */
if (name1 && ratio1)
- *ratio1 = compute_single(&metric_events, evlist, name1);
+ *ratio1 = compute_single(evlist, name1);
if (name2 && ratio2)
- *ratio2 = compute_single(&metric_events, evlist, name2);
+ *ratio2 = compute_single(evlist, name2);
out:
/* ... cleanup. */
- metricgroup__rblist_exit(&metric_events);
evlist__free_stats(evlist);
perf_cpu_map__put(cpus);
evlist__delete(evlist);
diff --git a/tools/perf/tests/pe-file-parsing.c b/tools/perf/tests/pe-file-parsing.c
index fff58b220c07..30c7da79e109 100644
--- a/tools/perf/tests/pe-file-parsing.c
+++ b/tools/perf/tests/pe-file-parsing.c
@@ -24,7 +24,7 @@ static int run_dir(const char *d)
{
char filename[PATH_MAX];
char debugfile[PATH_MAX];
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
char debuglink[PATH_MAX];
char expect_build_id[] = {
0x5a, 0x0f, 0xd8, 0x82, 0xb5, 0x30, 0x84, 0x22,
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 0958c7c8995f..efbd9cd60c63 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -13,15 +13,19 @@
#include "tests.h"
#include "util/mmap.h"
#include "util/sample.h"
+#include "util/cpumap.h"
static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
{
- int i, cpu = -1, nrcpus = 1024;
+ int i, cpu = -1;
+ int nrcpus = cpu__max_cpu().cpu;
+ size_t size = CPU_ALLOC_SIZE(nrcpus);
+
realloc:
- CPU_ZERO(maskp);
+ CPU_ZERO_S(size, maskp);
- if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
- if (errno == EINVAL && nrcpus < (1024 << 8)) {
+ if (sched_getaffinity(pid, size, maskp) == -1) {
+ if (errno == EINVAL && nrcpus < (cpu__max_cpu().cpu << 8)) {
nrcpus = nrcpus << 2;
goto realloc;
}
@@ -30,11 +34,11 @@ realloc:
}
for (i = 0; i < nrcpus; i++) {
- if (CPU_ISSET(i, maskp)) {
+ if (CPU_ISSET_S(i, size, maskp)) {
if (cpu == -1)
cpu = i;
else
- CPU_CLR(i, maskp);
+ CPU_CLR_S(i, size, maskp);
}
}
@@ -45,14 +49,14 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
{
struct record_opts opts = {
.target = {
- .uid = UINT_MAX,
.uses_mmap = true,
},
.no_buffering = true,
.mmap_pages = 256,
};
- cpu_set_t cpu_mask;
- size_t cpu_mask_size = sizeof(cpu_mask);
+ int nrcpus = cpu__max_cpu().cpu;
+ cpu_set_t *cpu_mask;
+ size_t cpu_mask_size;
struct evlist *evlist = evlist__new_dummy();
struct evsel *evsel;
struct perf_sample sample;
@@ -70,12 +74,22 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
char sbuf[STRERR_BUFSIZE];
+ cpu_mask = CPU_ALLOC(nrcpus);
+ if (!cpu_mask) {
+ pr_debug("failed to create cpumask\n");
+ goto out;
+ }
+
+ cpu_mask_size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(cpu_mask_size, cpu_mask);
+
perf_sample__init(&sample, /*all=*/false);
if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
evlist = evlist__new_default();
if (evlist == NULL) {
pr_debug("Not enough memory to create evlist\n");
+ CPU_FREE(cpu_mask);
goto out;
}
@@ -112,10 +126,11 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
evsel__set_sample_bit(evsel, TIME);
evlist__config(evlist, &opts, NULL);
- err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
+ err = sched__get_first_possible_cpu(evlist->workload.pid, cpu_mask);
if (err < 0) {
pr_debug("sched__get_first_possible_cpu: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ evlist__cancel_workload(evlist);
goto out_delete_evlist;
}
@@ -124,9 +139,10 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
/*
* So that we can check perf_sample.cpu on all the samples.
*/
- if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
+ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
pr_debug("sched_setaffinity: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ evlist__cancel_workload(evlist);
goto out_delete_evlist;
}
@@ -138,6 +154,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
if (err < 0) {
pr_debug("perf_evlist__open: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ evlist__cancel_workload(evlist);
goto out_delete_evlist;
}
@@ -150,6 +167,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
if (err < 0) {
pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ evlist__cancel_workload(evlist);
goto out_delete_evlist;
}
@@ -329,6 +347,7 @@ found_exit:
++errs;
}
out_delete_evlist:
+ CPU_FREE(cpu_mask);
evlist__delete(evlist);
out:
perf_sample__exit(&sample);
diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg
index b3075c168cb2..52a90e6bd8af 100755
--- a/tools/perf/tests/perf-targz-src-pkg
+++ b/tools/perf/tests/perf-targz-src-pkg
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Test one of the main kernel Makefile targets to generate a perf sources tarball
# suitable for build outside the full kernel sources.
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index d3e40fa5482c..cca41bd37ae3 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -90,7 +90,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
struct mmap *md;
- threads = thread_map__new(-1, getpid(), UINT_MAX);
+ threads = thread_map__new_by_tid(getpid());
CHECK_NOT_NULL__(threads);
cpus = perf_cpu_map__new_online_cpus();
@@ -101,11 +101,11 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
perf_evlist__set_maps(&evlist->core, cpus, threads);
- CHECK__(parse_event(evlist, "cycles:u"));
+ CHECK__(parse_event(evlist, "cpu-cycles:u"));
evlist__config(evlist, &opts, NULL);
- /* For hybrid "cycles:u", it creates two events */
+ /* For hybrid "cpu-cycles:u", it creates two events */
evlist__for_each_entry(evlist, evsel) {
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c
index 2e38dfa34b6c..fca4a86452df 100644
--- a/tools/perf/tests/pfm.c
+++ b/tools/perf/tests/pfm.c
@@ -4,6 +4,7 @@
*
* Copyright 2020 Google LLC.
*/
+#include <errno.h>
#include "tests.h"
#include "util/debug.h"
#include "util/evlist.h"
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index db004d26fcb0..a99716862168 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -22,10 +22,6 @@ struct perf_pmu_test_event {
/* used for matching against events from generated pmu-events.c */
struct pmu_event event;
- /* used for matching against event aliases */
- /* extra events for aliases */
- const char *alias_str;
-
/*
* Note: For when PublicDescription does not exist in the JSON, we
* will have no long_desc in pmu_event.long_desc, but long_desc may
@@ -38,7 +34,9 @@ struct perf_pmu_test_event {
};
struct perf_pmu_test_pmu {
- struct perf_pmu pmu;
+ const char *pmu_name;
+ bool pmu_is_uncore;
+ const char *pmu_id;
struct perf_pmu_test_event const *aliases[10];
};
@@ -50,8 +48,6 @@ static const struct perf_pmu_test_event bp_l1_btb_correct = {
.desc = "L1 BTB Correction",
.topic = "branch",
},
- .alias_str = "event=0x8a",
- .alias_long_desc = "L1 BTB Correction",
};
static const struct perf_pmu_test_event bp_l2_btb_correct = {
@@ -62,8 +58,6 @@ static const struct perf_pmu_test_event bp_l2_btb_correct = {
.desc = "L2 BTB Correction",
.topic = "branch",
},
- .alias_str = "event=0x8b",
- .alias_long_desc = "L2 BTB Correction",
};
static const struct perf_pmu_test_event segment_reg_loads_any = {
@@ -74,8 +68,6 @@ static const struct perf_pmu_test_event segment_reg_loads_any = {
.desc = "Number of segment register loads",
.topic = "other",
},
- .alias_str = "event=0x6,period=0x30d40,umask=0x80",
- .alias_long_desc = "Number of segment register loads",
};
static const struct perf_pmu_test_event dispatch_blocked_any = {
@@ -86,8 +78,6 @@ static const struct perf_pmu_test_event dispatch_blocked_any = {
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
.topic = "other",
},
- .alias_str = "event=0x9,period=0x30d40,umask=0x20",
- .alias_long_desc = "Memory cluster signals to block micro-op dispatch for any reason",
};
static const struct perf_pmu_test_event eist_trans = {
@@ -98,8 +88,6 @@ static const struct perf_pmu_test_event eist_trans = {
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.topic = "other",
},
- .alias_str = "event=0x3a,period=0x30d40",
- .alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
};
static const struct perf_pmu_test_event l3_cache_rd = {
@@ -111,7 +99,6 @@ static const struct perf_pmu_test_event l3_cache_rd = {
.long_desc = "Attributable Level 3 cache access, read",
.topic = "cache",
},
- .alias_str = "event=0x40",
.alias_long_desc = "Attributable Level 3 cache access, read",
};
@@ -131,11 +118,8 @@ static const struct perf_pmu_test_event uncore_hisi_ddrc_flux_wcmd = {
.event = "event=2",
.desc = "DDRC write commands",
.topic = "uncore",
- .long_desc = "DDRC write commands",
.pmu = "hisi_sccl,ddrc",
},
- .alias_str = "event=0x2",
- .alias_long_desc = "DDRC write commands",
.matching_pmu = "hisi_sccl1_ddrc2",
};
@@ -145,11 +129,8 @@ static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
.event = "event=0x22,umask=0x81",
.desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.topic = "uncore",
- .long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.pmu = "uncore_cbox",
},
- .alias_str = "event=0x22,umask=0x81",
- .alias_long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.matching_pmu = "uncore_cbox_0",
};
@@ -159,11 +140,8 @@ static const struct perf_pmu_test_event uncore_hyphen = {
.event = "event=0xe0",
.desc = "UNC_CBO_HYPHEN",
.topic = "uncore",
- .long_desc = "UNC_CBO_HYPHEN",
.pmu = "uncore_cbox",
},
- .alias_str = "event=0xe0",
- .alias_long_desc = "UNC_CBO_HYPHEN",
.matching_pmu = "uncore_cbox_0",
};
@@ -173,11 +151,8 @@ static const struct perf_pmu_test_event uncore_two_hyph = {
.event = "event=0xc0",
.desc = "UNC_CBO_TWO_HYPH",
.topic = "uncore",
- .long_desc = "UNC_CBO_TWO_HYPH",
.pmu = "uncore_cbox",
},
- .alias_str = "event=0xc0",
- .alias_long_desc = "UNC_CBO_TWO_HYPH",
.matching_pmu = "uncore_cbox_0",
};
@@ -187,11 +162,8 @@ static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = {
.event = "event=7",
.desc = "Total read hits",
.topic = "uncore",
- .long_desc = "Total read hits",
.pmu = "hisi_sccl,l3c",
},
- .alias_str = "event=0x7",
- .alias_long_desc = "Total read hits",
.matching_pmu = "hisi_sccl3_l3c7",
};
@@ -201,11 +173,8 @@ static const struct perf_pmu_test_event uncore_imc_free_running_cache_miss = {
.event = "event=0x12",
.desc = "Total cache misses",
.topic = "uncore",
- .long_desc = "Total cache misses",
.pmu = "uncore_imc_free_running",
},
- .alias_str = "event=0x12",
- .alias_long_desc = "Total cache misses",
.matching_pmu = "uncore_imc_free_running_0",
};
@@ -215,11 +184,8 @@ static const struct perf_pmu_test_event uncore_imc_cache_hits = {
.event = "event=0x34",
.desc = "Total cache hits",
.topic = "uncore",
- .long_desc = "Total cache hits",
.pmu = "uncore_imc",
},
- .alias_str = "event=0x34",
- .alias_long_desc = "Total cache hits",
.matching_pmu = "uncore_imc_0",
};
@@ -243,8 +209,6 @@ static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = {
.pmu = "uncore_sys_ddr_pmu",
.compat = "v8",
},
- .alias_str = "event=0x2b",
- .alias_long_desc = "ddr write-cycles event",
.matching_pmu = "uncore_sys_ddr_pmu0",
};
@@ -257,8 +221,6 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = {
.pmu = "uncore_sys_ccn_pmu",
.compat = "0x01",
},
- .alias_str = "config=0x2c",
- .alias_long_desc = "ccn read-cycles event",
.matching_pmu = "uncore_sys_ccn_pmu4",
};
@@ -271,8 +233,6 @@ static const struct perf_pmu_test_event sys_cmn_pmu_hnf_cache_miss = {
.pmu = "uncore_sys_cmn_pmu",
.compat = "(434|436|43c|43a).*",
},
- .alias_str = "eventid=0x1,type=0x5",
- .alias_long_desc = "Counts total cache misses in first lookup result (high priority)",
.matching_pmu = "uncore_sys_cmn_pmu0",
};
@@ -394,9 +354,9 @@ static int compare_alias_to_test_event(struct pmu_event_info *alias,
return -1;
}
- if (!is_same(alias->str, test_event->alias_str)) {
+ if (!is_same(alias->str, test_event->event.event)) {
pr_debug("testing aliases PMU %s: mismatched str, %s vs %s\n",
- pmu_name, alias->str, test_event->alias_str);
+ pmu_name, alias->str, test_event->event.event);
return -1;
}
@@ -553,11 +513,10 @@ static int __test_core_pmu_event_aliases(const char *pmu_name, int *count)
if (!pmu)
return -1;
- INIT_LIST_HEAD(&pmu->format);
- INIT_LIST_HEAD(&pmu->aliases);
- INIT_LIST_HEAD(&pmu->caps);
- INIT_LIST_HEAD(&pmu->list);
- pmu->name = strdup(pmu_name);
+ if (perf_pmu__init(pmu, PERF_PMU_TYPE_FAKE, pmu_name) != 0) {
+ perf_pmu__delete(pmu);
+ return -1;
+ }
pmu->is_core = true;
pmu->events_table = table;
@@ -594,14 +553,30 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
{
int alias_count = 0, to_match_count = 0, matched_count = 0;
struct perf_pmu_test_event const **table;
- struct perf_pmu *pmu = &test_pmu->pmu;
- const char *pmu_name = pmu->name;
+ struct perf_pmu *pmu;
const struct pmu_events_table *events_table;
int res = 0;
events_table = find_core_events_table("testarch", "testcpu");
if (!events_table)
return -1;
+
+ pmu = zalloc(sizeof(*pmu));
+ if (!pmu)
+ return -1;
+
+ if (perf_pmu__init(pmu, PERF_PMU_TYPE_FAKE, test_pmu->pmu_name) != 0) {
+ perf_pmu__delete(pmu);
+ return -1;
+ }
+ pmu->is_uncore = test_pmu->pmu_is_uncore;
+ if (test_pmu->pmu_id) {
+ pmu->id = strdup(test_pmu->pmu_id);
+ if (!pmu->id) {
+ perf_pmu__delete(pmu);
+ return -1;
+ }
+ }
pmu->events_table = events_table;
pmu_add_cpu_aliases_table(pmu, events_table);
pmu->cpu_aliases_added = true;
@@ -617,7 +592,8 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
if (alias_count != to_match_count) {
pr_debug("testing aliases uncore PMU %s: mismatch expected aliases (%d) vs found (%d)\n",
- pmu_name, to_match_count, alias_count);
+ pmu->name, to_match_count, alias_count);
+ perf_pmu__delete(pmu);
return -1;
}
@@ -630,9 +606,10 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
.count = &matched_count,
};
- if (strcmp(pmu_name, test_event.matching_pmu)) {
+ if (strcmp(pmu->name, test_event.matching_pmu)) {
pr_debug("testing aliases uncore PMU %s: mismatched matching_pmu, %s vs %s\n",
- pmu_name, test_event.matching_pmu, pmu_name);
+ pmu->name, test_event.matching_pmu, pmu->name);
+ perf_pmu__delete(pmu);
return -1;
}
@@ -641,34 +618,32 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
if (err) {
res = err;
pr_debug("testing aliases uncore PMU %s: could not match alias %s\n",
- pmu_name, event->name);
+ pmu->name, event->name);
+ perf_pmu__delete(pmu);
return -1;
}
}
if (alias_count != matched_count) {
pr_debug("testing aliases uncore PMU %s: mismatch found aliases (%d) vs matched (%d)\n",
- pmu_name, matched_count, alias_count);
+ pmu->name, matched_count, alias_count);
res = -1;
}
+ perf_pmu__delete(pmu);
return res;
}
static struct perf_pmu_test_pmu test_pmus[] = {
{
- .pmu = {
- .name = "hisi_sccl1_ddrc2",
- .is_uncore = 1,
- },
+ .pmu_name = "hisi_sccl1_ddrc2",
+ .pmu_is_uncore = 1,
.aliases = {
&uncore_hisi_ddrc_flux_wcmd,
},
},
{
- .pmu = {
- .name = "uncore_cbox_0",
- .is_uncore = 1,
- },
+ .pmu_name = "uncore_cbox_0",
+ .pmu_is_uncore = 1,
.aliases = {
&unc_cbo_xsnp_response_miss_eviction,
&uncore_hyphen,
@@ -676,88 +651,70 @@ static struct perf_pmu_test_pmu test_pmus[] = {
},
},
{
- .pmu = {
- .name = "hisi_sccl3_l3c7",
- .is_uncore = 1,
- },
+ .pmu_name = "hisi_sccl3_l3c7",
+ .pmu_is_uncore = 1,
.aliases = {
&uncore_hisi_l3c_rd_hit_cpipe,
},
},
{
- .pmu = {
- .name = "uncore_imc_free_running_0",
- .is_uncore = 1,
- },
+ .pmu_name = "uncore_imc_free_running_0",
+ .pmu_is_uncore = 1,
.aliases = {
&uncore_imc_free_running_cache_miss,
},
},
{
- .pmu = {
- .name = "uncore_imc_0",
- .is_uncore = 1,
- },
+ .pmu_name = "uncore_imc_0",
+ .pmu_is_uncore = 1,
.aliases = {
&uncore_imc_cache_hits,
},
},
{
- .pmu = {
- .name = "uncore_sys_ddr_pmu0",
- .is_uncore = 1,
- .id = "v8",
- },
+ .pmu_name = "uncore_sys_ddr_pmu0",
+ .pmu_is_uncore = 1,
+ .pmu_id = "v8",
.aliases = {
&sys_ddr_pmu_write_cycles,
},
},
{
- .pmu = {
- .name = "uncore_sys_ccn_pmu4",
- .is_uncore = 1,
- .id = "0x01",
- },
+ .pmu_name = "uncore_sys_ccn_pmu4",
+ .pmu_is_uncore = 1,
+ .pmu_id = "0x01",
.aliases = {
&sys_ccn_pmu_read_cycles,
},
},
{
- .pmu = {
- .name = (char *)"uncore_sys_cmn_pmu0",
- .is_uncore = 1,
- .id = (char *)"43401",
- },
+ .pmu_name = "uncore_sys_cmn_pmu0",
+ .pmu_is_uncore = 1,
+ .pmu_id = "43401",
.aliases = {
&sys_cmn_pmu_hnf_cache_miss,
},
},
{
- .pmu = {
- .name = (char *)"uncore_sys_cmn_pmu0",
- .is_uncore = 1,
- .id = (char *)"43602",
- },
+ .pmu_name = "uncore_sys_cmn_pmu0",
+ .pmu_is_uncore = 1,
+ .pmu_id = "43602",
.aliases = {
&sys_cmn_pmu_hnf_cache_miss,
},
},
{
- .pmu = {
- .name = (char *)"uncore_sys_cmn_pmu0",
- .is_uncore = 1,
- .id = (char *)"43c03",
- },
+ .pmu_name = "uncore_sys_cmn_pmu0",
+ .pmu_is_uncore = 1,
+ .pmu_id = "43c03",
.aliases = {
&sys_cmn_pmu_hnf_cache_miss,
},
},
{
- .pmu = {
- .name = (char *)"uncore_sys_cmn_pmu0",
- .is_uncore = 1,
- .id = (char *)"43a01",
- },
+ .pmu_name = "uncore_sys_cmn_pmu0",
+ .pmu_is_uncore = 1,
+ .pmu_id = "43a01",
.aliases = {
&sys_cmn_pmu_hnf_cache_miss,
},
@@ -796,10 +753,6 @@ static int test__aliases(struct test_suite *test __maybe_unused,
for (i = 0; i < ARRAY_SIZE(test_pmus); i++) {
int res;
- INIT_LIST_HEAD(&test_pmus[i].pmu.format);
- INIT_LIST_HEAD(&test_pmus[i].pmu.aliases);
- INIT_LIST_HEAD(&test_pmus[i].pmu.caps);
-
res = __test_uncore_pmu_event_aliases(&test_pmus[i]);
if (res)
return res;
@@ -873,9 +826,6 @@ static int test__parsing_callback(const struct pmu_metric *pm,
struct evlist *evlist;
struct perf_cpu_map *cpus;
struct evsel *evsel;
- struct rblist metric_events = {
- .nr_entries = 0,
- };
int err = 0;
if (!pm->metric_expr)
@@ -900,7 +850,7 @@ static int test__parsing_callback(const struct pmu_metric *pm,
perf_evlist__set_maps(&evlist->core, cpus, NULL);
- err = metricgroup__parse_groups_test(evlist, table, pm->metric_name, &metric_events);
+ err = metricgroup__parse_groups_test(evlist, table, pm->metric_name);
if (err) {
if (!strcmp(pm->metric_name, "M1") || !strcmp(pm->metric_name, "M2") ||
!strcmp(pm->metric_name, "M3")) {
@@ -922,12 +872,10 @@ static int test__parsing_callback(const struct pmu_metric *pm,
evlist__alloc_aggr_stats(evlist, 1);
evlist__for_each_entry(evlist, evsel) {
evsel->stats->aggr->counts.val = k;
- if (evsel__name_is(evsel, "duration_time"))
- update_stats(&walltime_nsecs_stats, k);
k++;
}
evlist__for_each_entry(evlist, evsel) {
- struct metric_event *me = metricgroup__lookup(&metric_events, evsel, false);
+ struct metric_event *me = metricgroup__lookup(&evlist->metric_events, evsel, false);
if (me != NULL) {
struct metric_expr *mexp;
@@ -949,7 +897,6 @@ out_err:
pr_debug("Broken metric %s\n", pm->metric_name);
/* ... cleanup. */
- metricgroup__rblist_exit(&metric_events);
evlist__free_stats(evlist);
perf_cpu_map__put(cpus);
evlist__delete(evlist);
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 4a9f8e090cf4..cbded2c6faa4 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -169,8 +169,7 @@ static int test__pmu_format(struct test_suite *test __maybe_unused, int subtest
parse_events_terms__init(&terms);
if (parse_events_terms(&terms,
"krava01=15,krava02=170,krava03=1,krava11=27,krava12=1,"
- "krava13=2,krava21=119,krava22=11,krava23=2",
- NULL)) {
+ "krava13=2,krava21=119,krava22=11,krava23=2")) {
pr_err("Term parsing failed\n");
goto err_out;
}
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
deleted file mode 100644
index 0ebc22ac8d5b..000000000000
--- a/tools/perf/tests/python-use.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Just test if we can load the python binding.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <linux/compiler.h>
-#include "tests.h"
-#include "util/debug.h"
-
-static int test__python_use(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
-{
- char *cmd;
- int ret;
-
- if (asprintf(&cmd, "echo \"import sys ; sys.path.insert(0, '%s'); import perf\" | %s %s",
- PYTHONPATH, PYTHON, verbose > 0 ? "" : "2> /dev/null") < 0)
- return -1;
-
- pr_debug("python usage test: \"%s\"\n", cmd);
- ret = system(cmd) ? -1 : 0;
- free(cmd);
- return ret;
-}
-
-DEFINE_SUITE("'import perf' in python", python_use);
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 72411580f869..a7327c942ca2 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -152,6 +152,12 @@ static bool samples_same(struct perf_sample *s1,
if (type & PERF_SAMPLE_WEIGHT)
COMP(weight);
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ COMP(weight);
+ COMP(ins_lat);
+ COMP(weight3);
+ }
+
if (type & PERF_SAMPLE_DATA_SRC)
COMP(data_src);
@@ -269,6 +275,8 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
.cgroup = 114,
.data_page_size = 115,
.code_page_size = 116,
+ .ins_lat = 117,
+ .weight3 = 118,
.aux_sample = {
.size = sizeof(aux_data),
.data = (void *)aux_data,
@@ -439,6 +447,12 @@ static int test__sample_parsing(struct test_suite *test __maybe_unused, int subt
if (err)
return err;
}
+ sample_type = (PERF_SAMPLE_MAX - 1) & ~PERF_SAMPLE_WEIGHT_STRUCT;
+ for (i = 0; i < ARRAY_SIZE(rf); i++) {
+ err = do_test(sample_type, sample_regs, rf[i]);
+ if (err)
+ return err;
+ }
return 0;
}
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
index 919712899251..93baee2eae42 100644
--- a/tools/perf/tests/sdt.c
+++ b/tools/perf/tests/sdt.c
@@ -28,7 +28,7 @@ static int target_function(void)
static int build_id_cache__add_file(const char *filename)
{
char sbuild_id[SBUILD_ID_SIZE];
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
int err;
err = filename__read_build_id(filename, &bid);
@@ -37,7 +37,7 @@ static int build_id_cache__add_file(const char *filename)
return err;
}
- build_id__sprintf(&bid, sbuild_id);
+ build_id__snprintf(&bid, sbuild_id, sizeof(sbuild_id));
err = build_id_cache__add_s(sbuild_id, filename, NULL, false, false);
if (err < 0)
pr_debug("Failed to add build id cache of %s\n", filename);
diff --git a/tools/perf/tests/shell/amd-ibs-swfilt.sh b/tools/perf/tests/shell/amd-ibs-swfilt.sh
new file mode 100755
index 000000000000..e7f66df05c4b
--- /dev/null
+++ b/tools/perf/tests/shell/amd-ibs-swfilt.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+# AMD IBS software filtering
+
+ParanoidAndNotRoot() {
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
+
+echo "check availability of IBS swfilt"
+
+# check if IBS PMU is available
+if [ ! -d /sys/bus/event_source/devices/ibs_op ]; then
+ echo "[SKIP] IBS PMU does not exist"
+ exit 2
+fi
+
+# check if IBS PMU has swfilt format
+if [ ! -f /sys/bus/event_source/devices/ibs_op/format/swfilt ]; then
+ echo "[SKIP] IBS PMU does not have swfilt"
+ exit 2
+fi
+
+echo "run perf record with modifier and swfilt"
+err=0
+
+# setting any modifiers should fail
+perf record -B -e ibs_op//u -o /dev/null true 2> /dev/null
+if [ $? -eq 0 ]; then
+ echo "[FAIL] IBS PMU should not accept exclude_kernel"
+ exit 1
+fi
+
+# setting it with swfilt should be fine
+perf record -B -e ibs_op/swfilt/u -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt for exclude_kernel"
+ exit 1
+fi
+
+if ! ParanoidAndNotRoot 1
+then
+ # setting it with swfilt=1 should be fine
+ perf record -B -e ibs_op/swfilt=1/k -o /dev/null true
+ if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt for exclude_user"
+ exit 1
+ fi
+else
+ echo "[SKIP] not root and perf_event_paranoid too high for exclude_user"
+ err=2
+fi
+
+# check ibs_fetch PMU as well
+perf record -B -e ibs_fetch/swfilt/u -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS fetch PMU cannot handle swfilt for exclude_kernel"
+ exit 1
+fi
+
+# check system wide recording
+if ! ParanoidAndNotRoot 0
+then
+ perf record -aB --synth=no -e ibs_op/swfilt/k -o /dev/null true
+ if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt in system-wide mode"
+ exit 1
+ fi
+else
+ echo "[SKIP] not root and perf_event_paranoid too high for system-wide/exclude_user"
+ err=2
+fi
+
+echo "check number of samples with swfilt"
+
+kernel_sample=$(perf record -e ibs_op/swfilt/u -o- true | perf script -i- -F misc | grep -c ^K)
+if [ ${kernel_sample} -ne 0 ]; then
+ echo "[FAIL] unexpected kernel samples: " ${kernel_sample}
+ exit 1
+fi
+
+if ! ParanoidAndNotRoot 1
+then
+ user_sample=$(perf record -e ibs_fetch/swfilt/k -o- true | perf script -i- -F misc | grep -c ^U)
+ if [ ${user_sample} -ne 0 ]; then
+ echo "[FAIL] unexpected user samples: " ${user_sample}
+ exit 1
+ fi
+else
+ echo "[SKIP] not root and perf_event_paranoid too high for exclude_user"
+ err=2
+fi
+
+exit $err
diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh
index 16a1ccd06089..689de58e9238 100755
--- a/tools/perf/tests/shell/annotate.sh
+++ b/tools/perf/tests/shell/annotate.sh
@@ -53,21 +53,22 @@ test_basic() {
# Generate the annotated output file
if [ "x${mode}" == "xBasic" ]
then
- perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null > "${perfout}"
+ perf annotate --no-demangle -i "${perfdata}" --stdio --percent-limit 10 2> /dev/null > "${perfout}"
else
- perf annotate --no-demangle -i - --stdio 2> /dev/null < "${perfdata}" > "${perfout}"
+ perf annotate --no-demangle -i - --stdio 2> /dev/null --percent-limit 10 < "${perfdata}" > "${perfout}"
fi
# check if it has the target symbol
- if ! head -250 "${perfout}" | grep -q "${testsym}"
+ if ! grep -q "${testsym}" "${perfout}"
then
echo "${mode} annotate [Failed: missing target symbol]"
+ cat "${perfout}"
err=1
return
fi
# check if it has the disassembly lines
- if ! head -250 "${perfout}" | grep -q "${disasm_regex}"
+ if ! grep -q "${disasm_regex}" "${perfout}"
then
echo "${mode} annotate [Failed: missing disasm output from default disassembler]"
err=1
@@ -92,11 +93,11 @@ test_basic() {
# check one more with external objdump tool (forced by --objdump option)
if [ "x${mode}" == "xBasic" ]
then
- perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null > "${perfout}"
+ perf annotate --no-demangle -i "${perfdata}" --percent-limit 10 --objdump=objdump 2> /dev/null > "${perfout}"
else
- perf annotate --no-demangle -i - "${testsym}" 2> /dev/null < "${perfdata}" > "${perfout}"
+ perf annotate --no-demangle -i - "${testsym}" --percent-limit 10 --objdump=objdump 2> /dev/null < "${perfdata}" > "${perfout}"
fi
- if ! head -250 "${perfout}" | grep -q -m 3 "${disasm_regex}"
+ if ! grep -q -m 3 "${disasm_regex}" "${perfout}"
then
echo "${mode} annotate [Failed: missing disasm output from non default disassembler (using --objdump)]"
err=1
diff --git a/tools/perf/tests/shell/attr/test-stat-default b/tools/perf/tests/shell/attr/test-stat-default
index e47fb4944679..8dd27c1fb661 100644
--- a/tools/perf/tests/shell/attr/test-stat-default
+++ b/tools/perf/tests/shell/attr/test-stat-default
@@ -227,3 +227,10 @@ fd=28
type=4
config=270
optional=1
+
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+[event29:base-stat]
+fd=29
+type=4
+config=4269
+optional=1 \ No newline at end of file
diff --git a/tools/perf/tests/shell/attr/test-stat-detailed-1 b/tools/perf/tests/shell/attr/test-stat-detailed-1
index 3d500d3e0c5c..12a2ebf4e64a 100644
--- a/tools/perf/tests/shell/attr/test-stat-detailed-1
+++ b/tools/perf/tests/shell/attr/test-stat-detailed-1
@@ -269,3 +269,10 @@ fd=32
type=3
config=65538
optional=1
+
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+[event33:base-stat]
+fd=33
+type=4
+config=4269
+optional=1 \ No newline at end of file
diff --git a/tools/perf/tests/shell/attr/test-stat-detailed-2 b/tools/perf/tests/shell/attr/test-stat-detailed-2
index 01777a63752f..66ea25b7d38f 100644
--- a/tools/perf/tests/shell/attr/test-stat-detailed-2
+++ b/tools/perf/tests/shell/attr/test-stat-detailed-2
@@ -329,3 +329,10 @@ fd=38
type=3
config=65540
optional=1
+
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+[event39:base-stat]
+fd=39
+type=4
+config=4269
+optional=1 \ No newline at end of file
diff --git a/tools/perf/tests/shell/attr/test-stat-detailed-3 b/tools/perf/tests/shell/attr/test-stat-detailed-3
index 8400abd7e1e4..4a27bbfb9f87 100644
--- a/tools/perf/tests/shell/attr/test-stat-detailed-3
+++ b/tools/perf/tests/shell/attr/test-stat-detailed-3
@@ -349,3 +349,10 @@ fd=40
type=3
config=66048
optional=1
+
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+[event41:base-stat]
+fd=41
+type=4
+config=4269
+optional=1 \ No newline at end of file
diff --git a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
index 8226449ac5c3..f74aab5c5d7f 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
@@ -13,11 +13,12 @@
# they must be skipped.
#
-# include working environment
-. ../common/init.sh
-
+DIR_PATH="$(dirname $0)"
TEST_RESULT=0
+# include working environment
+. "$DIR_PATH/../common/init.sh"
+
# skip if not supported
BLACKFUNC_LIST=`head -n 5 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
if [ -z "$BLACKFUNC_LIST" ]; then
@@ -53,7 +54,8 @@ for BLACKFUNC in $BLACKFUNC_LIST; do
PERF_EXIT_CODE=$?
# check for bad DWARF polluting the result
- ../common/check_all_patterns_found.pl "$REGEX_MISSING_DECL_LINE" >/dev/null < $LOGS_DIR/adding_blacklisted.err
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$REGEX_MISSING_DECL_LINE" >/dev/null < $LOGS_DIR/adding_blacklisted.err
if [ $? -eq 0 ]; then
SKIP_DWARF=1
@@ -73,7 +75,11 @@ for BLACKFUNC in $BLACKFUNC_LIST; do
fi
fi
else
- ../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
+ "$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" \
+ "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" \
+ "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" \
+ "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
CHECK_EXIT_CODE=$?
SKIP_DWARF=0
@@ -94,7 +100,9 @@ fi
$CMD_PERF list probe:\* > $LOGS_DIR/adding_blacklisted_list.log
PERF_EXIT_CODE=$?
-../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" < $LOGS_DIR/adding_blacklisted_list.log
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" \
+ < $LOGS_DIR/adding_blacklisted_list.log
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing blacklisted probe (should NOT be listed)"
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
index df288cf90cd6..555a825d55f2 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -13,13 +13,14 @@
# and removing.
#
-# include working environment
-. ../common/init.sh
-
+DIR_PATH="$(dirname $0)"
TEST_RESULT=0
+# include working environment
+. "$DIR_PATH/../common/init.sh"
+
# shellcheck source=lib/probe_vfs_getname.sh
-. "$(dirname "$0")/../lib/probe_vfs_getname.sh"
+. "$DIR_PATH/../lib/probe_vfs_getname.sh"
TEST_PROBE=${TEST_PROBE:-"inode_permission"}
@@ -44,7 +45,9 @@ for opt in "" "-a" "--add"; do
$CMD_PERF probe $opt $TEST_PROBE 2> $LOGS_DIR/adding_kernel_add$opt.err
PERF_EXIT_CODE=$?
- ../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_add$opt.err
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" \
+ < $LOGS_DIR/adding_kernel_add$opt.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding probe $TEST_PROBE :: $opt"
@@ -58,7 +61,10 @@ done
$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list.log
PERF_EXIT_CODE=$?
-../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "probe:${TEST_PROBE}(?:_\d+)?\s+\[Tracepoint event\]" "Metric Groups:" < $LOGS_DIR/adding_kernel_list.log
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "$RE_LINE_EMPTY" "List of pre-defined events" \
+ "probe:${TEST_PROBE}(?:_\d+)?\s+\[Tracepoint event\]" \
+ "Metric Groups:" < $LOGS_DIR/adding_kernel_list.log
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf list"
@@ -71,7 +77,9 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf list
$CMD_PERF probe -l > $LOGS_DIR/adding_kernel_list-l.log
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "\s*probe:${TEST_PROBE}(?:_\d+)?\s+\(on ${TEST_PROBE}(?:[:\+]$RE_NUMBER_HEX)?@.+\)" < $LOGS_DIR/adding_kernel_list-l.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "\s*probe:${TEST_PROBE}(?:_\d+)?\s+\(on ${TEST_PROBE}(?:[:\+]$RE_NUMBER_HEX)?@.+\)" \
+ < $LOGS_DIR/adding_kernel_list-l.log
CHECK_EXIT_CODE=$?
if [ $NO_DEBUGINFO ] ; then
@@ -93,9 +101,13 @@ REGEX_STAT_VALUES="\s*\d+\s+probe:$TEST_PROBE"
# the value should be greater than 1
REGEX_STAT_VALUE_NONZERO="\s*[1-9][0-9]*\s+probe:$TEST_PROBE"
REGEX_STAT_TIME="\s*$RE_NUMBER\s+seconds (?:time elapsed|user|sys)"
-../common/check_all_lines_matched.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUES" "$REGEX_STAT_TIME" "$RE_LINE_COMMENT" "$RE_LINE_EMPTY" < $LOGS_DIR/adding_kernel_using_probe.log
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUES" "$REGEX_STAT_TIME" \
+ "$RE_LINE_COMMENT" "$RE_LINE_EMPTY" < $LOGS_DIR/adding_kernel_using_probe.log
CHECK_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUE_NONZERO" "$REGEX_STAT_TIME" < $LOGS_DIR/adding_kernel_using_probe.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUE_NONZERO" "$REGEX_STAT_TIME" \
+ < $LOGS_DIR/adding_kernel_using_probe.log
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using added probe"
@@ -108,7 +120,8 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using added probe"
$CMD_PERF probe -d $TEST_PROBE\* 2> $LOGS_DIR/adding_kernel_removing.err
PERF_EXIT_CODE=$?
-../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" < $LOGS_DIR/adding_kernel_removing.err
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "Removed event: probe:$TEST_PROBE" < $LOGS_DIR/adding_kernel_removing.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "deleting added probe"
@@ -121,7 +134,9 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "deleting added probe"
$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list_removed.log
PERF_EXIT_CODE=$?
-../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" < $LOGS_DIR/adding_kernel_list_removed.log
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" \
+ < $LOGS_DIR/adding_kernel_list_removed.log
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing removed probe (should NOT be listed)"
@@ -135,7 +150,9 @@ $CMD_PERF probe -n --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_dryrun.err
PERF_EXIT_CODE=$?
# check for the output (should be the same as usual)
-../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_dryrun.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" \
+ < $LOGS_DIR/adding_kernel_dryrun.err
CHECK_EXIT_CODE=$?
# check that no probe was added in real
@@ -152,7 +169,9 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "dry run :: adding probe"
$CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_01.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_01.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" \
+ < $LOGS_DIR/adding_kernel_forceadd_01.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: first probe adding"
@@ -162,7 +181,9 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: first pro
! $CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_02.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "Error: event \"$TEST_PROBE\" already exists." "Error: Failed to add events." < $LOGS_DIR/adding_kernel_forceadd_02.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Error: event \"$TEST_PROBE\" already exists." \
+ "Error: Failed to add events." < $LOGS_DIR/adding_kernel_forceadd_02.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (without force)"
@@ -173,7 +194,9 @@ NO_OF_PROBES=`$CMD_PERF probe -l $TEST_PROBE| wc -l`
$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "Added new events?:" "probe:${TEST_PROBE}_${NO_OF_PROBES}" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_03.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Added new events?:" "probe:${TEST_PROBE}_${NO_OF_PROBES}" \
+ "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_03.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (with force)"
@@ -187,7 +210,9 @@ $CMD_PERF stat -e probe:$TEST_PROBE -e probe:${TEST_PROBE}_${NO_OF_PROBES} -x';'
PERF_EXIT_CODE=$?
REGEX_LINE="$RE_NUMBER;+probe:${TEST_PROBE}_?(?:$NO_OF_PROBES)?;$RE_NUMBER;$RE_NUMBER"
-../common/check_all_lines_matched.pl "$REGEX_LINE" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/adding_kernel_using_two.log
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "$REGEX_LINE" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" \
+ < $LOGS_DIR/adding_kernel_using_two.log
CHECK_EXIT_CODE=$?
VALUE_1=`grep "$TEST_PROBE;" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
@@ -205,7 +230,9 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe"
$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "Removed event: probe:$TEST_PROBE" \
+ "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
@@ -217,7 +244,9 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
$CMD_PERF probe -nf --max-probes=512 -a 'vfs_* $params' 2> $LOGS_DIR/adding_kernel_adding_wildcard.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "probe:vfs_mknod" "probe:vfs_create" "probe:vfs_rmdir" "probe:vfs_link" "probe:vfs_write" < $LOGS_DIR/adding_kernel_adding_wildcard.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "probe:vfs_mknod" "probe:vfs_create" "probe:vfs_rmdir" \
+ "probe:vfs_link" "probe:vfs_write" < $LOGS_DIR/adding_kernel_adding_wildcard.err
CHECK_EXIT_CODE=$?
if [ $NO_DEBUGINFO ] ; then
@@ -240,13 +269,22 @@ test $PERF_EXIT_CODE -ne 139 -a $PERF_EXIT_CODE -ne 0
PERF_EXIT_CODE=$?
# check that the error message is reasonable
-../common/check_all_patterns_found.pl "Failed to find" "somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64" < $LOGS_DIR/adding_kernel_nonexisting.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Failed to find" \
+ "somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64" \
+ < $LOGS_DIR/adding_kernel_nonexisting.err
CHECK_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "in this function|at this address" "Error" "Failed to add events" < $LOGS_DIR/adding_kernel_nonexisting.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "in this function|at this address" "Error" "Failed to add events" \
+ < $LOGS_DIR/adding_kernel_nonexisting.err
(( CHECK_EXIT_CODE += $? ))
-../common/check_all_lines_matched.pl "Failed to find" "Error" "Probe point .+ not found" "optimized out" "Use.+\-\-range option to show.+location range" < $LOGS_DIR/adding_kernel_nonexisting.err
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "Failed to find" "Error" "Probe point .+ not found" "optimized out" \
+ "Use.+\-\-range option to show.+location range" \
+ < $LOGS_DIR/adding_kernel_nonexisting.err
(( CHECK_EXIT_CODE += $? ))
-../common/check_no_patterns_found.pl "$RE_SEGFAULT" < $LOGS_DIR/adding_kernel_nonexisting.err
+"$DIR_PATH/../common/check_no_patterns_found.pl" \
+ "$RE_SEGFAULT" < $LOGS_DIR/adding_kernel_nonexisting.err
(( CHECK_EXIT_CODE += $? ))
if [ $NO_DEBUGINFO ]; then
@@ -264,7 +302,10 @@ fi
$CMD_PERF probe --add "$TEST_PROBE%return \$retval" 2> $LOGS_DIR/adding_kernel_func_retval_add.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE%return with \\\$retval" < $LOGS_DIR/adding_kernel_func_retval_add.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Added new events?:" "probe:$TEST_PROBE" \
+ "on $TEST_PROBE%return with \\\$retval" \
+ < $LOGS_DIR/adding_kernel_func_retval_add.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: add"
@@ -274,7 +315,9 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: add"
$CMD_PERF record -e probe:$TEST_PROBE\* -o $CURRENT_TEST_DIR/perf.data -- cat /proc/cpuinfo > /dev/null 2> $LOGS_DIR/adding_kernel_func_retval_record.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/adding_kernel_func_retval_record.err
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" \
+ < $LOGS_DIR/adding_kernel_func_retval_record.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: record"
@@ -285,9 +328,11 @@ $CMD_PERF script -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/adding_kernel_func_r
PERF_EXIT_CODE=$?
REGEX_SCRIPT_LINE="\s*cat\s+$RE_NUMBER\s+\[$RE_NUMBER\]\s+$RE_NUMBER:\s+probe:$TEST_PROBE\w*:\s+\($RE_NUMBER_HEX\s+<\-\s+$RE_NUMBER_HEX\)\s+arg1=$RE_NUMBER_HEX"
-../common/check_all_lines_matched.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
CHECK_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function argument probing :: script"
diff --git a/tools/perf/tests/shell/base_probe/test_basic.sh b/tools/perf/tests/shell/base_probe/test_basic.sh
index 9d8b5afbeddd..162838ddc974 100755
--- a/tools/perf/tests/shell/base_probe/test_basic.sh
+++ b/tools/perf/tests/shell/base_probe/test_basic.sh
@@ -12,11 +12,12 @@
# This test tests basic functionality of perf probe command.
#
-# include working environment
-. ../common/init.sh
-
+DIR_PATH="$(dirname $0)"
TEST_RESULT=0
+# include working environment
+. "$DIR_PATH/../common/init.sh"
+
if ! check_kprobes_available; then
print_overall_skipped
exit 2
@@ -30,15 +31,25 @@ if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
$CMD_PERF probe --help > $LOGS_DIR/basic_helpmsg.log 2> $LOGS_DIR/basic_helpmsg.err
PERF_EXIT_CODE=$?
- ../common/check_all_patterns_found.pl "PERF-PROBE" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "PROBE\s+SYNTAX" "PROBE\s+ARGUMENT" "LINE\s+SYNTAX" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "PERF-PROBE" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" \
+ "PROBE\s+SYNTAX" "PROBE\s+ARGUMENT" "LINE\s+SYNTAX" \
+ < $LOGS_DIR/basic_helpmsg.log
CHECK_EXIT_CODE=$?
- ../common/check_all_patterns_found.pl "LAZY\s+MATCHING" "FILTER\s+PATTERN" "EXAMPLES" "SEE\s+ALSO" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "LAZY\s+MATCHING" "FILTER\s+PATTERN" "EXAMPLES" "SEE\s+ALSO" \
+ < $LOGS_DIR/basic_helpmsg.log
(( CHECK_EXIT_CODE += $? ))
- ../common/check_all_patterns_found.pl "vmlinux" "module=" "source=" "verbose" "quiet" "add=" "del=" "list.*EVENT" "line=" "vars=" "externs" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "vmlinux" "module=" "source=" "verbose" "quiet" "add=" "del=" \
+ "list.*EVENT" "line=" "vars=" "externs" < $LOGS_DIR/basic_helpmsg.log
(( CHECK_EXIT_CODE += $? ))
- ../common/check_all_patterns_found.pl "no-inlines" "funcs.*FILTER" "filter=FILTER" "force" "dry-run" "max-probes" "exec=" "demangle-kernel" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "no-inlines" "funcs.*FILTER" "filter=FILTER" "force" "dry-run" \
+ "max-probes" "exec=" "demangle-kernel" < $LOGS_DIR/basic_helpmsg.log
(( CHECK_EXIT_CODE += $? ))
- ../common/check_no_patterns_found.pl "No manual entry for" < $LOGS_DIR/basic_helpmsg.err
+ "$DIR_PATH/../common/check_no_patterns_found.pl" \
+ "No manual entry for" < $LOGS_DIR/basic_helpmsg.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
@@ -53,7 +64,9 @@ fi
# without any args perf-probe should print usage
$CMD_PERF probe 2> $LOGS_DIR/basic_usage.log > /dev/null
-../common/check_all_patterns_found.pl "[Uu]sage" "perf probe" "verbose" "quiet" "add" "del" "force" "line" "vars" "externs" "range" < $LOGS_DIR/basic_usage.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "[Uu]sage" "perf probe" "verbose" "quiet" "add" "del" "force" \
+ "line" "vars" "externs" "range" < $LOGS_DIR/basic_usage.log
CHECK_EXIT_CODE=$?
print_results 0 $CHECK_EXIT_CODE "usage message"
diff --git a/tools/perf/tests/shell/base_probe/test_invalid_options.sh b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
index 92f7254eb32a..44a3ae014bfa 100755
--- a/tools/perf/tests/shell/base_probe/test_invalid_options.sh
+++ b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
@@ -12,11 +12,12 @@
# This test checks whether the invalid and incompatible options are reported
#
-# include working environment
-. ../common/init.sh
-
+DIR_PATH="$(dirname $0)"
TEST_RESULT=0
+# include working environment
+. "$DIR_PATH/../common/init.sh"
+
if ! check_kprobes_available; then
print_overall_skipped
exit 2
@@ -33,7 +34,9 @@ for opt in '-a' '-d' '-L' '-V'; do
! $CMD_PERF probe $opt 2> $LOGS_DIR/invalid_options_missing_argument$opt.err
PERF_EXIT_CODE=$?
- ../common/check_all_patterns_found.pl "Error: switch .* requires a value" < $LOGS_DIR/invalid_options_missing_argument$opt.err
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Error: switch .* requires a value" \
+ < $LOGS_DIR/invalid_options_missing_argument$opt.err
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "missing argument for $opt"
@@ -66,7 +69,8 @@ for opt in '-a xxx -d xxx' '-a xxx -L foo' '-a xxx -V foo' '-a xxx -l' '-a xxx -
! $CMD_PERF probe $opt > /dev/null 2> $LOGS_DIR/aux.log
PERF_EXIT_CODE=$?
- ../common/check_all_patterns_found.pl "Error: switch .+ cannot be used with switch .+" < $LOGS_DIR/aux.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "Error: switch .+ cannot be used with switch .+" < $LOGS_DIR/aux.log
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "mutually exclusive options :: $opt"
diff --git a/tools/perf/tests/shell/base_probe/test_line_semantics.sh b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
index 20435b6bf6bc..576442d87a44 100755
--- a/tools/perf/tests/shell/base_probe/test_line_semantics.sh
+++ b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
@@ -13,11 +13,12 @@
# arguments are properly reported.
#
-# include working environment
-. ../common/init.sh
-
+DIR_PATH="$(dirname $0)"
TEST_RESULT=0
+# include working environment
+. "$DIR_PATH/../common/init.sh"
+
if ! check_kprobes_available; then
print_overall_skipped
exit 2
diff --git a/tools/perf/tests/shell/base_report/setup.sh b/tools/perf/tests/shell/base_report/setup.sh
index 8634e7e0dda6..bb49b0fabb11 100755
--- a/tools/perf/tests/shell/base_report/setup.sh
+++ b/tools/perf/tests/shell/base_report/setup.sh
@@ -12,8 +12,10 @@
#
#
+DIR_PATH="$(dirname $0)"
+
# include working environment
-. ../common/init.sh
+. "$DIR_PATH/../common/init.sh"
TEST_RESULT=0
@@ -24,7 +26,8 @@ SW_EVENT="cpu-clock"
$CMD_PERF record -asdg -e $SW_EVENT -o $CURRENT_TEST_DIR/perf.data -- $CMD_LONGER_SLEEP 2> $LOGS_DIR/setup.log
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup.log
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data file"
@@ -38,7 +41,8 @@ echo ==================
cat $LOGS_DIR/setup-latency.log
echo ==================
-../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup-latency.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup-latency.log
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data.1 file"
diff --git a/tools/perf/tests/shell/base_report/test_basic.sh b/tools/perf/tests/shell/base_report/test_basic.sh
index adfd8713b8f8..0dfe7e5fd1ca 100755
--- a/tools/perf/tests/shell/base_report/test_basic.sh
+++ b/tools/perf/tests/shell/base_report/test_basic.sh
@@ -12,11 +12,12 @@
#
#
-# include working environment
-. ../common/init.sh
-
+DIR_PATH="$(dirname $0)"
TEST_RESULT=0
+# include working environment
+. "$DIR_PATH/../common/init.sh"
+
### help message
@@ -25,19 +26,37 @@ if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
$CMD_PERF report --help > $LOGS_DIR/basic_helpmsg.log 2> $LOGS_DIR/basic_helpmsg.err
PERF_EXIT_CODE=$?
- ../common/check_all_patterns_found.pl "PERF-REPORT" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "OVERHEAD\s+CALCULATION" "SEE ALSO" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "PERF-REPORT" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" \
+ "OVERHEAD\s+CALCULATION" "SEE ALSO" < $LOGS_DIR/basic_helpmsg.log
CHECK_EXIT_CODE=$?
- ../common/check_all_patterns_found.pl "input" "verbose" "show-nr-samples" "show-cpu-utilization" "threads" "comms" "pid" "tid" "dsos" "symbols" "symbol-filter" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "input" "verbose" "show-nr-samples" "show-cpu-utilization" \
+ "threads" "comms" "pid" "tid" "dsos" "symbols" "symbol-filter" \
+ < $LOGS_DIR/basic_helpmsg.log
(( CHECK_EXIT_CODE += $? ))
- ../common/check_all_patterns_found.pl "hide-unresolved" "sort" "fields" "parent" "exclude-other" "column-widths" "field-separator" "dump-raw-trace" "children" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "hide-unresolved" "sort" "fields" "parent" "exclude-other" \
+ "column-widths" "field-separator" "dump-raw-trace" "children" \
+ < $LOGS_DIR/basic_helpmsg.log
(( CHECK_EXIT_CODE += $? ))
- ../common/check_all_patterns_found.pl "call-graph" "max-stack" "inverted" "ignore-callees" "pretty" "stdio" "tui" "gtk" "vmlinux" "kallsyms" "modules" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "call-graph" "max-stack" "inverted" "ignore-callees" "pretty" \
+ "stdio" "tui" "gtk" "vmlinux" "kallsyms" "modules" \
+ < $LOGS_DIR/basic_helpmsg.log
(( CHECK_EXIT_CODE += $? ))
- ../common/check_all_patterns_found.pl "force" "symfs" "cpu" "disassembler-style" "source" "asm-raw" "show-total-period" "show-info" "branch-stack" "group" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "force" "symfs" "cpu" "disassembler-style" "source" "asm-raw" \
+ "show-total-period" "show-info" "branch-stack" "group" \
+ < $LOGS_DIR/basic_helpmsg.log
(( CHECK_EXIT_CODE += $? ))
- ../common/check_all_patterns_found.pl "branch-history" "objdump" "demangle" "percent-limit" "percentage" "header" "itrace" "full-source-path" "show-ref-call-graph" < $LOGS_DIR/basic_helpmsg.log
+ "$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "branch-history" "objdump" "demangle" "percent-limit" "percentage" \
+ "header" "itrace" "full-source-path" "show-ref-call-graph" \
+ < $LOGS_DIR/basic_helpmsg.log
(( CHECK_EXIT_CODE += $? ))
- ../common/check_no_patterns_found.pl "No manual entry for" < $LOGS_DIR/basic_helpmsg.err
+ "$DIR_PATH/../common/check_no_patterns_found.pl" \
+ "No manual entry for" < $LOGS_DIR/basic_helpmsg.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
@@ -57,9 +76,12 @@ REGEX_LOST_SAMPLES_INFO="#\s*Total Lost Samples:\s+$RE_NUMBER"
REGEX_SAMPLES_INFO="#\s*Samples:\s+(?:$RE_NUMBER)\w?\s+of\s+event\s+'$RE_EVENT_ANY'"
REGEX_LINES_HEADER="#\s*Children\s+Self\s+Command\s+Shared Object\s+Symbol"
REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
-../common/check_all_patterns_found.pl "$REGEX_LOST_SAMPLES_INFO" "$REGEX_SAMPLES_INFO" "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_basic.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$REGEX_LOST_SAMPLES_INFO" "$REGEX_SAMPLES_INFO" \
+ "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_basic.log
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_basic.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "$DIR_PATH/stderr-whitelist.txt" < $LOGS_DIR/basic_basic.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution"
@@ -74,9 +96,11 @@ PERF_EXIT_CODE=$?
REGEX_LINES_HEADER="#\s*Children\s+Self\s+Samples\s+Command\s+Shared Object\s+Symbol"
REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
-../common/check_all_patterns_found.pl "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_nrsamples.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_nrsamples.log
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_nrsamples.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "$DIR_PATH/stderr-whitelist.txt" < $LOGS_DIR/basic_nrsamples.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "number of samples"
@@ -98,7 +122,10 @@ REGEX_LINE_CPUS_ONLINE="#\s+nrcpus online\s*:\s*$MY_CPUS_ONLINE"
REGEX_LINE_CPUS_AVAIL="#\s+nrcpus avail\s*:\s*$MY_CPUS_AVAILABLE"
# disable precise check for "nrcpus avail" in BASIC runmode
test $PERFTOOL_TESTSUITE_RUNMODE -lt $RUNMODE_STANDARD && REGEX_LINE_CPUS_AVAIL="#\s+nrcpus avail\s*:\s*$RE_NUMBER"
-../common/check_all_patterns_found.pl "$REGEX_LINE_TIMESTAMP" "$REGEX_LINE_HOSTNAME" "$REGEX_LINE_KERNEL" "$REGEX_LINE_PERF" "$REGEX_LINE_ARCH" "$REGEX_LINE_CPUS_ONLINE" "$REGEX_LINE_CPUS_AVAIL" < $LOGS_DIR/basic_header.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$REGEX_LINE_TIMESTAMP" "$REGEX_LINE_HOSTNAME" "$REGEX_LINE_KERNEL" \
+ "$REGEX_LINE_PERF" "$REGEX_LINE_ARCH" "$REGEX_LINE_CPUS_ONLINE" \
+ "$REGEX_LINE_CPUS_AVAIL" < $LOGS_DIR/basic_header.log
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "header"
@@ -129,9 +156,11 @@ PERF_EXIT_CODE=$?
REGEX_LINES_HEADER="#\s*Children\s+Self\s+sys\s+usr\s+Command\s+Shared Object\s+Symbol"
REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER%\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
-../common/check_all_patterns_found.pl "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_cpuut.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_cpuut.log
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_cpuut.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "$DIR_PATH/stderr-whitelist.txt" < $LOGS_DIR/basic_cpuut.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "show CPU utilization"
@@ -144,9 +173,11 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "show CPU utilization"
$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --pid=1 > $LOGS_DIR/basic_pid.log 2> $LOGS_DIR/basic_pid.err
PERF_EXIT_CODE=$?
-grep -P -v '^#' $LOGS_DIR/basic_pid.log | grep -P '\s+[\d\.]+%' | ../common/check_all_lines_matched.pl "systemd|init"
+grep -P -v '^#' $LOGS_DIR/basic_pid.log | grep -P '\s+[\d\.]+%' | \
+ "$DIR_PATH/../common/check_all_lines_matched.pl" "systemd|init"
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_pid.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "$DIR_PATH/stderr-whitelist.txt" < $LOGS_DIR/basic_pid.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "pid"
@@ -159,9 +190,11 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "pid"
$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --symbols=dummynonexistingsymbol > $LOGS_DIR/basic_symbols.log 2> $LOGS_DIR/basic_symbols.err
PERF_EXIT_CODE=$?
-../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/basic_symbols.log
+"$DIR_PATH/../common/check_all_lines_matched.pl" \
+ "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/basic_symbols.log
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_symbols.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "$DIR_PATH/stderr-whitelist.txt" < $LOGS_DIR/basic_symbols.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing symbol"
@@ -174,9 +207,11 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing symbol"
$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --symbol-filter=map > $LOGS_DIR/basic_symbolfilter.log 2> $LOGS_DIR/basic_symbolfilter.err
PERF_EXIT_CODE=$?
-grep -P -v '^#' $LOGS_DIR/basic_symbolfilter.log | grep -P '\s+[\d\.]+%' | ../common/check_all_lines_matched.pl "\[[k\.]\]\s+.*map"
+grep -P -v '^#' $LOGS_DIR/basic_symbolfilter.log | grep -P '\s+[\d\.]+%' | \
+ "$DIR_PATH/../common/check_all_lines_matched.pl" "\[[k\.]\]\s+.*map"
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_symbolfilter.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "$DIR_PATH/stderr-whitelist.txt" < $LOGS_DIR/basic_symbolfilter.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter"
@@ -189,7 +224,8 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter"
$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data.1 --stdio --header-only > $LOGS_DIR/latency_header.log
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl ", context_switch = 1, " < $LOGS_DIR/latency_header.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ ", context_switch = 1, " < $LOGS_DIR/latency_header.log
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency header"
@@ -200,9 +236,11 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency header"
$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_default.log 2> $LOGS_DIR/latency_default.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "# Overhead Latency Command" < $LOGS_DIR/latency_default.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "# Overhead Latency Command" < $LOGS_DIR/latency_default.log
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_default.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "stderr-whitelist.txt" < $LOGS_DIR/latency_default.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "default report for latency profile"
@@ -213,9 +251,11 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "default report for latency profi
$CMD_PERF report --latency --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_latency.log 2> $LOGS_DIR/latency_latency.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "# Latency Overhead Command" < $LOGS_DIR/latency_latency.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "# Latency Overhead Command" < $LOGS_DIR/latency_latency.log
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_latency.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "stderr-whitelist.txt" < $LOGS_DIR/latency_latency.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency report for latency profile"
@@ -226,9 +266,12 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency report for latency profi
$CMD_PERF report --hierarchy --sort latency,parallelism,comm,symbol --parallelism=1,2 --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/parallelism_hierarchy.log 2> $LOGS_DIR/parallelism_hierarchy.err
PERF_EXIT_CODE=$?
-../common/check_all_patterns_found.pl "# Latency Parallelism / Command / Symbol" < $LOGS_DIR/parallelism_hierarchy.log
+"$DIR_PATH/../common/check_all_patterns_found.pl" \
+ "# Latency Parallelism / Command / Symbol" \
+ < $LOGS_DIR/parallelism_hierarchy.log
CHECK_EXIT_CODE=$?
-../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/parallelism_hierarchy.err
+"$DIR_PATH/../common/check_errors_whitelisted.pl" \
+ "stderr-whitelist.txt" < $LOGS_DIR/parallelism_hierarchy.err
(( CHECK_EXIT_CODE += $? ))
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "parallelism histogram"
diff --git a/tools/perf/tests/shell/buildid.sh b/tools/perf/tests/shell/buildid.sh
index 3383ca3399d4..102808cca9db 100755
--- a/tools/perf/tests/shell/buildid.sh
+++ b/tools/perf/tests/shell/buildid.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# build id cache operations
# SPDX-License-Identifier: GPL-2.0
@@ -36,16 +36,69 @@ if [ ${run_pe} -eq 1 ]; then
unset WAYLAND_DISPLAY
fi
-ex_md5=$(mktemp /tmp/perf.ex.MD5.XXX)
-ex_sha1=$(mktemp /tmp/perf.ex.SHA1.XXX)
+build_id_dir=
+ex_source=$(mktemp /tmp/perf_buildid_test.ex.XXX.c)
+ex_md5=$(mktemp /tmp/perf_buildid_test.ex.MD5.XXX)
+ex_sha1=$(mktemp /tmp/perf_buildid_test.ex.SHA1.XXX)
ex_pe=$(dirname $0)/../pe-file.exe
+data=$(mktemp /tmp/perf_buildid_test.data.XXX)
+log_out=$(mktemp /tmp/perf_buildid_test.log.out.XXX)
+log_err=$(mktemp /tmp/perf_buildid_test.log.err.XXX)
-echo 'int main(void) { return 0; }' | cc -Wl,--build-id=sha1 -o ${ex_sha1} -x c -
-echo 'int main(void) { return 0; }' | cc -Wl,--build-id=md5 -o ${ex_md5} -x c -
+cleanup() {
+ rm -f ${ex_source} ${ex_md5} ${ex_sha1} ${data} ${log_out} ${log_err}
+ if [ ${run_pe} -eq 1 ]; then
+ rm -r ${wineprefix}
+ fi
+ if [ -d ${build_id_dir} ]; then
+ rm -rf ${build_id_dir}
+ fi
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# Test program based on the noploop workload.
+cat <<EOF > ${ex_source}
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+static volatile sig_atomic_t done;
+
+static void sighandler(int sig)
+{
+ (void)sig;
+ done = 1;
+}
+
+int main(int argc, const char **argv)
+{
+ int sec = 1;
+
+ if (argc > 1)
+ sec = atoi(argv[1]);
+
+ signal(SIGINT, sighandler);
+ signal(SIGALRM, sighandler);
+ alarm(sec);
+
+ while (!done)
+ continue;
+
+ return 0;
+}
+EOF
+cc -Wl,--build-id=sha1 ${ex_source} -o ${ex_sha1} -x c -
+cc -Wl,--build-id=md5 ${ex_source} -o ${ex_md5} -x c -
echo "test binaries: ${ex_sha1} ${ex_md5} ${ex_pe}"
-check()
+get_build_id()
{
case $1 in
*.exe)
@@ -64,6 +117,15 @@ check()
id=`readelf -n ${1} 2>/dev/null | grep 'Build ID' | awk '{print $3}'`
;;
esac
+ echo ${id}
+}
+
+check()
+{
+ file=$1
+ perf_data=$2
+
+ id=$(get_build_id $file)
echo "build id: ${id}"
id_file=${id#??}
@@ -76,45 +138,53 @@ check()
exit 1
fi
- file=${build_id_dir}/.build-id/$id_dir/`readlink ${link}`/elf
- echo "file: ${file}"
+ cached_file=${build_id_dir}/.build-id/$id_dir/`readlink ${link}`/elf
+ echo "file: ${cached_file}"
# Check for file permission of original file
# in case of pe-file.exe file
echo $1 | grep ".exe"
if [ $? -eq 0 ]; then
- if [ -x $1 ] && [ ! -x $file ]; then
- echo "failed: file ${file} executable does not exist"
+ if [ -x $1 ] && [ ! -x $cached_file ]; then
+ echo "failed: file ${cached_file} executable does not exist"
exit 1
fi
- if [ ! -x $file ] && [ ! -e $file ]; then
- echo "failed: file ${file} does not exist"
+ if [ ! -x $cached_file ] && [ ! -e $cached_file ]; then
+ echo "failed: file ${cached_file} does not exist"
exit 1
fi
- elif [ ! -x $file ]; then
- echo "failed: file ${file} does not exist"
+ elif [ ! -x $cached_file ]; then
+ echo "failed: file ${cached_file} does not exist"
exit 1
fi
- diff ${file} ${1}
+ diff ${cached_file} ${1}
if [ $? -ne 0 ]; then
- echo "failed: ${file} do not match"
+ echo "failed: ${cached_file} do not match"
exit 1
fi
- ${perf} buildid-cache -l | grep ${id}
+ ${perf} buildid-cache -l | grep -q ${id}
if [ $? -ne 0 ]; then
echo "failed: ${id} is not reported by \"perf buildid-cache -l\""
exit 1
fi
+ if [ -n "${perf_data}" ]; then
+ ${perf} buildid-list -i ${perf_data} | grep -q ${id}
+ if [ $? -ne 0 ]; then
+ echo "failed: ${id} is not reported by \"perf buildid-list -i ${perf_data}\""
+ exit 1
+ fi
+ fi
+
echo "OK for ${1}"
}
test_add()
{
- build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
+ build_id_dir=$(mktemp -d /tmp/perf_buildid_test.debug.XXX)
perf="perf --buildid-dir ${build_id_dir}"
${perf} buildid-cache -v -a ${1}
@@ -128,12 +198,88 @@ test_add()
rm -rf ${build_id_dir}
}
+test_remove()
+{
+ build_id_dir=$(mktemp -d /tmp/perf_buildid_test.debug.XXX)
+ perf="perf --buildid-dir ${build_id_dir}"
+
+ ${perf} buildid-cache -v -a ${1}
+ if [ $? -ne 0 ]; then
+ echo "failed: add ${1} to build id cache"
+ exit 1
+ fi
+
+ id=$(get_build_id ${1})
+ if ! ${perf} buildid-cache -l | grep -q ${id}; then
+ echo "failed: ${id} not in cache"
+ exit 1
+ fi
+
+ ${perf} buildid-cache -v -r ${1}
+ if [ $? -ne 0 ]; then
+ echo "failed: remove ${id} from build id cache"
+ exit 1
+ fi
+
+ if ${perf} buildid-cache -l | grep -q ${id}; then
+ echo "failed: ${id} still in cache after remove"
+ exit 1
+ fi
+
+ echo "remove: OK"
+ rm -rf ${build_id_dir}
+}
+
+test_purge()
+{
+ build_id_dir=$(mktemp -d /tmp/perf_buildid_test.debug.XXX)
+ perf="perf --buildid-dir ${build_id_dir}"
+
+ id1=$(get_build_id ${ex_sha1})
+ ${perf} buildid-cache -v -a ${ex_sha1}
+ if ! ${perf} buildid-cache -l | grep -q ${id1}; then
+ echo "failed: ${id1} not in cache"
+ exit 1
+ fi
+
+ id2=$(get_build_id ${ex_md5})
+ ${perf} buildid-cache -v -a ${ex_md5}
+ if ! ${perf} buildid-cache -l | grep -q ${id2}; then
+ echo "failed: ${id2} not in cache"
+ exit 1
+ fi
+
+ # Purge by path
+ ${perf} buildid-cache -v -p ${ex_sha1}
+ if [ $? -ne 0 ]; then
+ echo "failed: purge build id cache of ${ex_sha1}"
+ exit 1
+ fi
+
+ ${perf} buildid-cache -v -p ${ex_md5}
+ if [ $? -ne 0 ]; then
+ echo "failed: purge build id cache of ${ex_md5}"
+ exit 1
+ fi
+
+ # Verify both are gone
+ if ${perf} buildid-cache -l | grep -q ${id1}; then
+ echo "failed: ${id1} still in cache after purge"
+ exit 1
+ fi
+
+ if ${perf} buildid-cache -l | grep -q ${id2}; then
+ echo "failed: ${id2} still in cache after purge"
+ exit 1
+ fi
+
+ echo "purge: OK"
+ rm -rf ${build_id_dir}
+}
+
test_record()
{
- data=$(mktemp /tmp/perf.data.XXX)
- build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
- log_out=$(mktemp /tmp/perf.log.out.XXX)
- log_err=$(mktemp /tmp/perf.log.err.XXX)
+ build_id_dir=$(mktemp -d /tmp/perf_buildid_test.debug.XXX)
perf="perf --buildid-dir ${build_id_dir}"
echo "running: perf record $*"
@@ -145,7 +291,7 @@ test_record()
fi
args="$*"
- check ${args##* }
+ check ${args##* } ${data}
rm -f ${log_out} ${log_err}
rm -rf ${build_id_dir}
@@ -166,10 +312,15 @@ if [ ${run_pe} -eq 1 ]; then
test_record wine ${ex_pe}
fi
-# cleanup
-rm ${ex_sha1} ${ex_md5}
-if [ ${run_pe} -eq 1 ]; then
- rm -r ${wineprefix}
+# remove binaries manually via perf buildid-cache -r
+test_remove ${ex_sha1}
+test_remove ${ex_md5}
+if [ ${add_pe} -eq 1 ]; then
+ test_remove ${ex_pe}
fi
+# purge binaries manually via perf buildid-cache -p
+test_purge
+
+cleanup
exit 0
diff --git a/tools/perf/tests/shell/c2c.sh b/tools/perf/tests/shell/c2c.sh
new file mode 100755
index 000000000000..2471d44595c3
--- /dev/null
+++ b/tools/perf/tests/shell/c2c.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# perf c2c tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_c2c_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+check_c2c_support() {
+ # Check if perf c2c record works.
+ if ! perf c2c record -o "${perfdata}" -- true > /dev/null 2>&1 ; then
+ return 1
+ fi
+ return 0
+}
+
+test_c2c_record_report() {
+ echo "c2c record and report test"
+ if ! check_c2c_support ; then
+ echo "c2c record and report test [Skipped: perf c2c record failed (possibly missing hardware support)]"
+ err=2
+ return
+ fi
+
+ # Run a workload that does some memory operations.
+ if ! perf c2c record -o "${perfdata}" -- perf test -w datasym 1 > /dev/null 2>&1 ; then
+ echo "c2c record and report test [Skipped: perf c2c record failed during workload]"
+ return
+ fi
+
+ if ! perf c2c report -i "${perfdata}" --stdio > /dev/null 2>&1 ; then
+ echo "c2c record and report test [Failed: report failed]"
+ err=1
+ return
+ fi
+
+ if ! perf c2c report -i "${perfdata}" -N > /dev/null 2>&1 ; then
+ echo "c2c record and report test [Failed: report -N failed]"
+ err=1
+ return
+ fi
+
+ echo "c2c record and report test [Success]"
+}
+
+test_c2c_record_report
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh
index 26c7525651e0..cbfc78bec974 100644
--- a/tools/perf/tests/shell/common/init.sh
+++ b/tools/perf/tests/shell/common/init.sh
@@ -11,8 +11,8 @@
#
-. ../common/settings.sh
-. ../common/patterns.sh
+. "$(dirname $0)/../common/settings.sh"
+. "$(dirname $0)/../common/patterns.sh"
THIS_TEST_NAME=`basename $0 .sh`
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop.sh b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
index c63bc8c73e26..0301904b9637 100755
--- a/tools/perf/tests/shell/coresight/asm_pure_loop.sh
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / ASM Pure Loop (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
index 5f886cd09e6b..7e879217be30 100644
--- a/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
+++ b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
@@ -27,6 +27,8 @@ static void *thrfn(void *arg)
}
for (i = 0; i < len; i++)
memcpy(dst, src, a->size * 1024);
+
+ return NULL;
}
static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
index 8e29630957c8..1f765d69acc3 100755
--- a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
+++ b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / Memcpy 16k 10 Threads (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
index e05a559253ca..86f3f548b006 100644
--- a/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
+++ b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
@@ -34,8 +34,8 @@ static void *thrfn(void *arg)
}
asm volatile(
"loop:\n"
- "add %[i], %[i], #1\n"
- "cmp %[i], %[len]\n"
+ "add %w[i], %w[i], #1\n"
+ "cmp %w[i], %w[len]\n"
"blt loop\n"
: /* out */
: /* in */ [i] "r" (i), [len] "r" (len)
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
index 0c4c82a1c8e1..7f43a93a2ac2 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / Thread Loop 10 Threads - Check TID (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
index d3aea9fc6ced..a94d2079ed06 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / Thread Loop 2 Threads - Check TID (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
index 0fc7bf1a25af..8f4e1c985ca3 100644
--- a/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
@@ -20,7 +20,7 @@ static void *thrfn(void *arg)
for (i = 0; i < 10000; i++) {
asm volatile (
// force an unroll of thia add instruction so we can test long runs of code
-#define SNIP1 "add %[in], %[in], #1\n"
+#define SNIP1 "add %w[in], %w[in], #1\n"
// 10
#define SNIP2 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1
// 100
@@ -36,6 +36,8 @@ static void *thrfn(void *arg)
: /* clobber */
);
}
+
+ return NULL;
}
static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
index 7429d3a2ae43..cb3e97a0a89f 100755
--- a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
# CoreSight / Unroll Loop Thread 10 (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh
index e05a5dc49479..fe05fdebcab5 100755
--- a/tools/perf/tests/shell/diff.sh
+++ b/tools/perf/tests/shell/diff.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf diff tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/drm_pmu.sh b/tools/perf/tests/shell/drm_pmu.sh
new file mode 100755
index 000000000000..e629fe0e8463
--- /dev/null
+++ b/tools/perf/tests/shell/drm_pmu.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+# DRM PMU
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+output=$(mktemp /tmp/perf.drm_pmu.XXXXXX.txt)
+
+cleanup() {
+ rm -f "${output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# Array to store file descriptors and device names
+declare -A device_fds
+
+# Open all devices and store file descriptors. Opening the device will create a
+# /proc/$$/fdinfo file containing the DRM statistics.
+fd_count=3 # Start with file descriptor 3
+for device in /dev/dri/*
+do
+ if [[ ! -c "$device" ]]
+ then
+ continue
+ fi
+ major=$(stat -c "%Hr" "$device")
+ if [[ "$major" != 226 ]]
+ then
+ continue
+ fi
+ echo "Opening $device"
+ eval "exec $fd_count<\"$device\""
+ echo "fdinfo for: $device (FD: $fd_count)"
+ cat "/proc/$$/fdinfo/$fd_count"
+ echo
+ device_fds["$device"]="$fd_count"
+ fd_count=$((fd_count + 1))
+done
+
+if [[ ${#device_fds[@]} -eq 0 ]]
+then
+ echo "No DRM devices found [Skip]"
+ cleanup
+ exit 2
+fi
+
+# For each DRM event
+err=0
+for p in $(perf list --raw-dump drm-)
+do
+ echo -n "Testing perf stat of $p. "
+ perf stat -e "$p" --pid=$$ true > "$output" 2>&1
+ if ! grep -q "$p" "$output"
+ then
+ echo "Missing DRM event in: [Failed]"
+ cat "$output"
+ err=1
+ else
+ echo "[OK]"
+ fi
+done
+
+# Close all file descriptors
+for fd in "${device_fds[@]}"; do
+ eval "exec $fd<&-"
+done
+
+# Finished
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/evlist.sh b/tools/perf/tests/shell/evlist.sh
new file mode 100755
index 000000000000..140f099e75c1
--- /dev/null
+++ b/tools/perf/tests/shell/evlist.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+# perf evlist tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_evlist_simple() {
+ echo "Simple evlist test"
+ if ! perf record -e cycles -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Simple evlist [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i "${perfdata}" | grep -q "cycles"
+ then
+ echo "Simple evlist [Failed to list event]"
+ err=1
+ return
+ fi
+ echo "Simple evlist test [Success]"
+}
+
+test_evlist_group() {
+ echo "Group evlist test"
+ if ! perf record -e "{cycles,instructions}" -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Group evlist [Skipped event group recording failed]"
+ return
+ fi
+
+ if ! perf evlist -i "${perfdata}" -g | grep -q "{.*cycles.*,.*instructions.*}"
+ then
+ echo "Group evlist [Failed to list event group]"
+ err=1
+ return
+ fi
+ echo "Group evlist test [Success]"
+}
+
+test_evlist_verbose() {
+ echo "Event configuration evlist test"
+ if ! perf record -e cycles -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Event configuration evlist [Failed record]"
+ err=1
+ return
+ fi
+
+ if ! perf evlist -i "${perfdata}" -v | grep -q "config:"
+ then
+ echo "Event configuration evlist [Failed to list verbose info]"
+ err=1
+ return
+ fi
+ echo "Event configuration evlist test [Success]"
+}
+
+test_evlist_simple
+test_evlist_group
+test_evlist_verbose
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/ftrace.sh b/tools/perf/tests/shell/ftrace.sh
index c243731d2fbf..7f8aafcbb761 100755
--- a/tools/perf/tests/shell/ftrace.sh
+++ b/tools/perf/tests/shell/ftrace.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf ftrace tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/header.sh b/tools/perf/tests/shell/header.sh
new file mode 100755
index 000000000000..e1628ac0a614
--- /dev/null
+++ b/tools/perf/tests/shell/header.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# perf header tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test_header.perf.data.XXXXX)
+script_output=$(mktemp /tmp/__perf_test_header.perf.data.XXXXX.script)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ rm -f "${script_output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+check_header_output() {
+ declare -a fields=(
+ "captured"
+ "hostname"
+ "os release"
+ "arch"
+ "cpuid"
+ "nrcpus"
+ "event"
+ "cmdline"
+ "perf version"
+ "sibling (cores|dies|threads)"
+ "sibling threads"
+ "total memory"
+ )
+ for i in "${fields[@]}"
+ do
+ if ! grep -q -E "$i" "${script_output}"
+ then
+ echo "Failed to find expected $i in output"
+ err=1
+ fi
+ done
+}
+
+test_file() {
+ echo "Test perf header file"
+
+ perf record -o "${perfdata}" -- perf test -w noploop
+ perf report --header-only -I -i "${perfdata}" > "${script_output}"
+ check_header_output
+
+ echo "Test perf header file [Done]"
+}
+
+test_pipe() {
+ echo "Test perf header pipe"
+
+ perf record -o - -- perf test -w noploop | perf report --header-only -I -i - > "${script_output}"
+ check_header_output
+
+ echo "Test perf header pipe [Done]"
+}
+
+test_file
+test_pipe
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/jitdump-python.sh b/tools/perf/tests/shell/jitdump-python.sh
new file mode 100755
index 000000000000..ae86203b14a2
--- /dev/null
+++ b/tools/perf/tests/shell/jitdump-python.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+# python profiling with jitdump
+# SPDX-License-Identifier: GPL-2.0
+
+SHELLDIR=$(dirname $0)
+# shellcheck source=lib/setup_python.sh
+. "${SHELLDIR}"/lib/setup_python.sh
+
+OUTPUT=$(${PYTHON} -Xperf_jit -c 'import os, sys; print(os.getpid(), sys.is_stack_trampoline_active())' 2> /dev/null)
+PID=${OUTPUT% *}
+HAS_PERF_JIT=${OUTPUT#* }
+
+rm -f /tmp/jit-${PID}.dump 2> /dev/null
+if [ "${HAS_PERF_JIT}" != "True" ]; then
+ echo "SKIP: python JIT dump is not available"
+ exit 2
+fi
+
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXXX)
+
+cleanup() {
+ echo "Cleaning up files..."
+ rm -f ${PERF_DATA} ${PERF_DATA}.jit /tmp/jit-${PID}.dump /tmp/jitted-${PID}-*.so 2> /dev/null
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected termination"
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+echo "Run python with -Xperf_jit"
+cat <<EOF | perf record -k 1 -g --call-graph dwarf -o "${PERF_DATA}" \
+ -- ${PYTHON} -Xperf_jit
+def foo(n):
+ result = 0
+ for _ in range(n):
+ result += 1
+ return result
+
+def bar(n):
+ foo(n)
+
+def baz(n):
+ bar(n)
+
+if __name__ == "__main__":
+ baz(1000000)
+EOF
+
+# extract PID of the target process from the data
+_PID=$(perf report -i "${PERF_DATA}" -F pid -q -g none | cut -d: -f1 -s)
+PID=$(echo -n $_PID) # remove newlines
+
+echo "Generate JIT-ed DSOs using perf inject"
+DEBUGINFOD_URLS='' perf inject -i "${PERF_DATA}" -j -o "${PERF_DATA}.jit"
+
+echo "Add JIT-ed DSOs to the build-ID cache"
+for F in /tmp/jitted-${PID}-*.so; do
+ perf buildid-cache -a "${F}"
+done
+
+echo "Check the symbol containing the function/module name"
+NUM=$(perf report -i "${PERF_DATA}.jit" -s sym | grep -cE 'py::(foo|bar|baz):<stdin>')
+
+echo "Found ${NUM} matching lines"
+
+echo "Remove JIT-ed DSOs from the build-ID cache"
+for F in /tmp/jitted-${PID}-*.so; do
+ perf buildid-cache -r "${F}"
+done
+
+cleanup
+
+if [ "${NUM}" -eq 0 ]; then
+ exit 1
+fi
diff --git a/tools/perf/tests/shell/kallsyms.sh b/tools/perf/tests/shell/kallsyms.sh
new file mode 100755
index 000000000000..d0eb99753f47
--- /dev/null
+++ b/tools/perf/tests/shell/kallsyms.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+# perf kallsyms tests
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+
+test_kallsyms() {
+ echo "Basic perf kallsyms test"
+
+ # Check if /proc/kallsyms is readable
+ if [ ! -r /proc/kallsyms ]; then
+ echo "Basic perf kallsyms test [Skipped: /proc/kallsyms not readable]"
+ err=2
+ return
+ fi
+
+ # Use a symbol that is definitely a function and present in all kernels, e.g. schedule
+ symbol="schedule"
+
+ # Run perf kallsyms
+ # It prints "address symbol_name"
+ output=$(perf kallsyms $symbol 2>&1)
+ ret=$?
+
+ if [ $ret -ne 0 ] || [ -z "$output" ]; then
+ # If empty or failed, it might be due to permissions (kptr_restrict)
+ # Check if we can grep the symbol from /proc/kallsyms directly
+ if grep -q "$symbol" /proc/kallsyms 2>/dev/null; then
+ # If it's in /proc/kallsyms but perf kallsyms returned empty/error,
+ # it likely means perf couldn't parse it or access it correctly (e.g. kptr_restrict=2).
+ echo "Basic perf kallsyms test [Skipped: $symbol found in /proc/kallsyms but perf kallsyms failed (output: '$output')]"
+ err=2
+ return
+ else
+ echo "Basic perf kallsyms test [Skipped: $symbol not found in /proc/kallsyms]"
+ err=2
+ return
+ fi
+ fi
+
+ if echo "$output" | grep -q "not found"; then
+ echo "Basic perf kallsyms test [Failed: output '$output' does not contain $symbol]"
+ err=1
+ return
+ fi
+
+ if perf kallsyms ErlingHaaland | grep -vq "not found"; then
+ echo "Basic perf kallsyms test [Failed: ErlingHaaland found in the output]"
+ err=1
+ return
+ fi
+ echo "Basic perf kallsyms test [Success]"
+}
+
+test_kallsyms
+exit $err
diff --git a/tools/perf/tests/shell/kvm.sh b/tools/perf/tests/shell/kvm.sh
new file mode 100755
index 000000000000..2fafde1a29cc
--- /dev/null
+++ b/tools/perf/tests/shell/kvm.sh
@@ -0,0 +1,154 @@
+#!/bin/bash
+# perf kvm tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_kvm_test.perf.data.XXXXX)
+qemu_pid_file=$(mktemp /tmp/__perf_kvm_test.qemu.pid.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ if [ -f "${qemu_pid_file}" ]; then
+ if [ -s "${qemu_pid_file}" ]; then
+ qemu_pid=$(cat "${qemu_pid_file}")
+ if [ -n "${qemu_pid}" ]; then
+ kill "${qemu_pid}" 2>/dev/null || true
+ fi
+ fi
+ rm -f "${qemu_pid_file}"
+ fi
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+skip() {
+ echo "Skip: $1"
+ cleanup
+ exit 2
+}
+
+test_kvm_stat() {
+ echo "Testing perf kvm stat"
+
+ echo "Recording kvm events for pid ${qemu_pid}..."
+ if ! perf kvm stat record -p "${qemu_pid}" -o "${perfdata}" sleep 1; then
+ echo "Failed to record kvm events"
+ err=1
+ return
+ fi
+
+ echo "Reporting kvm events..."
+ if ! perf kvm -i "${perfdata}" stat report 2>&1 | grep -q "VM-EXIT"; then
+ echo "Failed to find VM-EXIT in report"
+ perf kvm -i "${perfdata}" stat report 2>&1
+ err=1
+ return
+ fi
+
+ echo "perf kvm stat test [Success]"
+}
+
+test_kvm_record_report() {
+ echo "Testing perf kvm record/report"
+
+ echo "Recording kvm profile for pid ${qemu_pid}..."
+ # Use --host to avoid needing guest symbols/mounts for this simple test
+ # We just want to verify the command runs and produces data
+ # We run in background and kill it because 'perf kvm record' appends options
+ # after the command, which breaks 'sleep' (e.g. it gets '-e cycles').
+ perf kvm --host record -p "${qemu_pid}" -o "${perfdata}" &
+ rec_pid=$!
+ sleep 1
+ kill -INT "${rec_pid}"
+ wait "${rec_pid}" || true
+
+ echo "Reporting kvm profile..."
+ # Check for some standard output from report
+ if ! perf kvm -i "${perfdata}" report --stdio 2>&1 | grep -q "Event count"; then
+ echo "Failed to report kvm profile"
+ perf kvm -i "${perfdata}" report --stdio 2>&1
+ err=1
+ return
+ fi
+
+ echo "perf kvm record/report test [Success]"
+}
+
+test_kvm_buildid_list() {
+ echo "Testing perf kvm buildid-list"
+
+ # We reuse the perf.data from the previous record test
+ if ! perf kvm --host -i "${perfdata}" buildid-list 2>&1 | grep -q "."; then
+ echo "Failed to list buildids"
+ perf kvm --host -i "${perfdata}" buildid-list 2>&1
+ err=1
+ return
+ fi
+
+ echo "perf kvm buildid-list test [Success]"
+}
+
+setup_qemu() {
+ # Find qemu
+ if [ "$(uname -m)" = "x86_64" ]; then
+ qemu="qemu-system-x86_64"
+ elif [ "$(uname -m)" = "aarch64" ]; then
+ qemu="qemu-system-aarch64"
+ elif [ "$(uname -m)" = "s390x" ]; then
+ qemu="qemu-system-s390x"
+ elif [ "$(uname -m)" = "ppc64le" ]; then
+ qemu="qemu-system-ppc64"
+ else
+ qemu="qemu-system-$(uname -m)"
+ fi
+
+ if ! which -s "$qemu"; then
+ skip "$qemu not found"
+ fi
+
+ if [ ! -r /dev/kvm ] || [ ! -w /dev/kvm ]; then
+ skip "/dev/kvm not accessible"
+ fi
+
+ if ! perf kvm stat record -a sleep 0.01 >/dev/null 2>&1; then
+ skip "No permission to record kvm events"
+ fi
+
+ echo "Starting $qemu..."
+ # Start qemu in background, detached, with pidfile
+ # We use -display none -daemonize and a monitor to keep it alive/controllable if needed
+ # We don't need a real kernel, just KVM active.
+ if ! $qemu -enable-kvm -display none -daemonize -pidfile "${qemu_pid_file}" -monitor none; then
+ echo "Failed to start qemu"
+ err=1
+ return
+ fi
+
+ # Wait a bit for qemu to start
+ sleep 1
+ qemu_pid=$(cat "${qemu_pid_file}")
+
+ if ! kill -0 "${qemu_pid}" 2>/dev/null; then
+ echo "Qemu process failed to stay alive"
+ err=1
+ return
+ fi
+}
+
+setup_qemu
+if [ $err -eq 0 ]; then
+ test_kvm_stat
+ test_kvm_record_report
+ test_kvm_buildid_list
+fi
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
index 561c93b75d77..0b35cce0b13d 100644
--- a/tools/perf/tests/shell/lib/perf_has_symbol.sh
+++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
perf_has_symbol()
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index 9e772a89ce38..dafbde56cc76 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -43,9 +43,12 @@ def isint(num):
def is_counter_value(num):
return isfloat(num) or num == '<not counted>' or num == '<not supported>'
+def is_metric_value(num):
+ return isfloat(num) or num == 'none'
+
def check_json_output(expected_items):
checks = {
- 'aggregate-number': lambda x: isfloat(x),
+ 'counters': lambda x: isfloat(x),
'core': lambda x: True,
'counter-value': lambda x: is_counter_value(x),
'cgroup': lambda x: True,
@@ -57,7 +60,7 @@ def check_json_output(expected_items):
'event-runtime': lambda x: isfloat(x),
'interval': lambda x: isfloat(x),
'metric-unit': lambda x: True,
- 'metric-value': lambda x: isfloat(x),
+ 'metric-value': lambda x: is_metric_value(x),
'metric-threshold': lambda x: x in ['unknown', 'good', 'less good', 'nearly bad', 'bad'],
'metricgroup': lambda x: True,
'node': lambda x: True,
@@ -65,8 +68,6 @@ def check_json_output(expected_items):
'socket': lambda x: True,
'thread': lambda x: True,
'unit': lambda x: True,
- 'insn per cycle': lambda x: isfloat(x),
- 'GHz': lambda x: True, # FIXME: it seems unintended for --metric-only
}
input = '[\n' + ','.join(Lines) + '\n]'
for item in json.loads(input):
@@ -75,7 +76,7 @@ def check_json_output(expected_items):
if count not in expected_items and count >= 1 and count <= 7 and 'metric-value' in item:
# Events that generate >1 metric may have isolated metric
# values and possibly other prefixes like interval, core,
- # aggregate-number, or event-runtime/pcnt-running from multiplexing.
+ # counters, or event-runtime/pcnt-running from multiplexing.
pass
elif count not in expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
pass
@@ -88,6 +89,8 @@ def check_json_output(expected_items):
f' in \'{item}\'')
for key, value in item.items():
if key not in checks:
+ if args.metric_only:
+ continue
raise RuntimeError(f'Unexpected key: key={key} value={value}')
if not checks[key](value):
raise RuntimeError(f'Check failed for: key={key} value={value}')
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 0b94216c9c46..dea8ef1977bf 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -35,7 +35,8 @@ class TestError:
class Validator:
- def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''):
+ def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='',
+ workload='true', metrics='', cputype='cpu'):
self.rulefname = rulefname
self.reportfname = reportfname
self.rules = None
@@ -43,6 +44,7 @@ class Validator:
self.metrics = self.__set_metrics(metrics)
self.skiplist = set()
self.tolerance = t
+ self.cputype = cputype
self.workloads = [x for x in workload.split(",") if x]
self.wlidx = 0 # idx of current workloads
@@ -377,7 +379,7 @@ class Validator:
def _run_perf(self, metric, workload: str):
tool = 'perf'
- command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
+ command = [tool, 'stat', '--cputype', self.cputype, '-j', '-M', f"{metric}", "-a"]
wl = workload.split()
command.extend(wl)
print(" ".join(command))
@@ -443,6 +445,8 @@ class Validator:
if 'MetricName' not in m:
print("Warning: no metric name")
continue
+ if 'Unit' in m and m['Unit'] != self.cputype:
+ continue
name = m['MetricName'].lower()
self.metrics.add(name)
if 'ScaleUnit' in m and (m['ScaleUnit'] == '1%' or m['ScaleUnit'] == '100%'):
@@ -578,6 +582,8 @@ def main() -> None:
parser.add_argument(
"-wl", help="Workload to run while data collection", default="true")
parser.add_argument("-m", help="Metric list to validate", default="")
+ parser.add_argument("-cputype", help="Only test metrics for the given CPU/PMU type",
+ default="cpu")
args = parser.parse_args()
outpath = Path(args.output_dir)
reportf = Path.joinpath(outpath, 'perf_report.json')
@@ -586,7 +592,7 @@ def main() -> None:
validator = Validator(args.rule, reportf, debug=args.debug,
datafname=datafile, fullrulefname=fullrule, workload=args.wl,
- metrics=args.m)
+ metrics=args.m, cputype=args.cputype)
ret = validator.test()
return ret
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 5c33ec7a5a63..88cd0e26d5f6 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
perf probe -l 2>&1 | grep -q probe:vfs_getname
@@ -13,14 +13,28 @@ cleanup_probe_vfs_getname() {
add_probe_vfs_getname() {
add_probe_verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
- result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*"
- line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/")
+ result_initname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+initname.*"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_initname_re" | sed -r "s/$result_initname_re/\1/")
+
+ # Search the old regular expressions so that this will
+ # pass on older kernels as well.
+ if [ -z "$line" ] ; then
+ result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/")
+ fi
+
if [ -z "$line" ] ; then
result_aname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->aname = NULL;"
line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_aname_re" | sed -r "s/$result_aname_re/\1/")
fi
+
+ if [ -z "$line" ] ; then
+ echo "Could not find probeable line"
+ return 2
+ fi
+
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
- perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
+ perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" || return 1
fi
}
diff --git a/tools/perf/tests/shell/lib/setup_python.sh b/tools/perf/tests/shell/lib/setup_python.sh
index c2fce1793538..a58e5536f2ed 100644
--- a/tools/perf/tests/shell/lib/setup_python.sh
+++ b/tools/perf/tests/shell/lib/setup_python.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
if [ "x$PYTHON" = "x" ]
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index 4d4aac547f01..3c36e80fe422 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -151,7 +151,12 @@ check_per_socket()
check_metric_only()
{
echo -n "Checking $1 output: metric only "
- perf stat --metric-only $2 -e instructions,cycles true
+ if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67
+ then
+ echo "[Skip] CPU-measurement counter facility not installed"
+ return
+ fi
+ perf stat --metric-only $2 -M page_faults_per_second true
commachecker --metric-only
echo "[Success]"
}
diff --git a/tools/perf/tests/shell/lib/waiting.sh b/tools/perf/tests/shell/lib/waiting.sh
index bdd5a7c71591..3a152892e077 100644
--- a/tools/perf/tests/shell/lib/waiting.sh
+++ b/tools/perf/tests/shell/lib/waiting.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
tenths=date\ +%s%1N
diff --git a/tools/perf/tests/shell/list.sh b/tools/perf/tests/shell/list.sh
index 76a9846cff22..0c04b3159cef 100755
--- a/tools/perf/tests/shell/list.sh
+++ b/tools/perf/tests/shell/list.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf list tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
index 30d195d4c62f..6dd90519f45c 100755
--- a/tools/perf/tests/shell/lock_contention.sh
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# kernel lock contention analysis test
# SPDX-License-Identifier: GPL-2.0
@@ -7,18 +7,24 @@ set -e
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
result=$(mktemp /tmp/__perf_test.result.XXXXX)
+errout=$(mktemp /tmp/__perf_test.errout.XXXXX)
cleanup() {
rm -f ${perfdata}
rm -f ${result}
- trap - EXIT TERM INT
+ rm -f ${errout}
+ trap - EXIT TERM INT ERR
}
trap_cleanup() {
+ if (( $? == 139 )); then #SIGSEGV
+ err=1
+ fi
+ echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit ${err}
}
-trap trap_cleanup EXIT TERM INT
+trap trap_cleanup EXIT TERM INT ERR
check() {
if [ "$(id -u)" != 0 ]; then
@@ -44,7 +50,7 @@ check() {
test_record()
{
echo "Testing perf lock record and perf lock contention"
- perf lock record -o ${perfdata} -- perf bench sched messaging > /dev/null 2>&1
+ perf lock record -o ${perfdata} -- perf bench sched messaging -p > /dev/null 2>&1
# the output goes to the stderr and we expect only 1 output (-E 1)
perf lock contention -i ${perfdata} -E 1 -q 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
@@ -64,7 +70,7 @@ test_bpf()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -75,10 +81,12 @@ test_bpf()
test_record_concurrent()
{
echo "Testing perf lock record and perf lock contention at the same time"
- perf lock record -o- -- perf bench sched messaging 2> /dev/null | \
+ perf lock record -o- -- perf bench sched messaging -p 2> ${errout} | \
perf lock contention -i- -E 1 -q 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
+ cat ${errout}
+ cat ${result}
err=1
exit
fi
@@ -99,7 +107,7 @@ test_aggr_task()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -t -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -t -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -122,7 +130,7 @@ test_aggr_addr()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -l -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -l -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -140,7 +148,7 @@ test_aggr_cgroup()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -g -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b --lock-cgroup -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
@@ -162,7 +170,7 @@ test_type_filter()
return
fi
- perf lock con -a -b -Y spinlock -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -Y spinlock -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(grep -c -v spinlock "${result}")" != "0" ]; then
echo "[Fail] BPF result should not have non-spinlocks:" "$(cat "${result}")"
err=1
@@ -194,7 +202,7 @@ test_lock_filter()
return
fi
- perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(grep -c -v "${test_lock_filter_type}" "${result}")" != "0" ]; then
echo "[Fail] BPF result should not have non-${test_lock_filter_type} locks:" "$(cat "${result}")"
err=1
@@ -222,7 +230,7 @@ test_stack_filter()
return
fi
- perf lock con -a -b -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -S unix_stream -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a lock from unix_stream:" "$(cat "${result}")"
err=1
@@ -250,7 +258,7 @@ test_aggr_task_stack_filter()
return
fi
- perf lock con -a -b -t -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b -t -S unix_stream -E 1 -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a task from unix_stream:" "$(cat "${result}")"
err=1
@@ -266,7 +274,7 @@ test_cgroup_filter()
return
fi
- perf lock con -a -b -g -E 1 -F wait_total -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b --lock-cgroup -E 1 -F wait_total -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a cgroup result:" "$(cat "${result}")"
err=1
@@ -274,7 +282,7 @@ test_cgroup_filter()
fi
cgroup=$(cat "${result}" | awk '{ print $3 }')
- perf lock con -a -b -g -E 1 -G "${cgroup}" -q -- perf bench sched messaging > /dev/null 2> ${result}
+ perf lock con -a -b --lock-cgroup -E 1 -G "${cgroup}" -q -- perf bench sched messaging -p > /dev/null 2> ${result}
if [ "$(cat "${result}" | wc -l)" != "1" ]; then
echo "[Fail] BPF result should have a result with cgroup filter:" "$(cat "${cgroup}")"
err=1
@@ -309,7 +317,7 @@ test_csv_output()
fi
# the perf lock contention output goes to the stderr
- perf lock con -a -b -E 1 -x , --output ${result} -- perf bench sched messaging > /dev/null 2>&1
+ perf lock con -a -b -E 1 -x , --output ${result} -- perf bench sched messaging -p > /dev/null 2>&1
output=$(grep -v "^#" ${result} | tr -d -c , | wc -c)
if [ "${header}" != "${output}" ]; then
echo "[Fail] BPF result does not match the number of commas: ${header} != ${output}"
@@ -333,4 +341,5 @@ test_aggr_task_stack_filter
test_cgroup_filter
test_csv_output
+cleanup
exit ${err}
diff --git a/tools/perf/tests/shell/perf-report-hierarchy.sh b/tools/perf/tests/shell/perf-report-hierarchy.sh
new file mode 100755
index 000000000000..e3c6f9a24f33
--- /dev/null
+++ b/tools/perf/tests/shell/perf-report-hierarchy.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# perf report --hierarchy
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@redhat.com>
+
+set -e
+
+temp_dir=$(mktemp -d /tmp/perf-test-report.XXXXXXXXXX)
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ sane=$(echo "${temp_dir}" | cut -b 1-21)
+ if [ "${sane}" = "/tmp/perf-test-report" ] ; then
+ echo "--- Cleaning up ---"
+ rm -rf "${temp_dir:?}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+test_report_hierarchy()
+{
+ echo "perf report --hierarchy"
+
+ perf_data="${temp_dir}/perf-report-hierarchy-perf.data"
+ perf record -o "${perf_data}" uname
+ perf report --hierarchy -i "${perf_data}" > /dev/null
+ echo "perf report --hierarchy test [Success]"
+}
+
+test_report_hierarchy
+
+cleanup
+
+exit 0
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
index c51a32931af6..5fe5682c28ce 100755
--- a/tools/perf/tests/shell/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Add vfs_getname probe to get syscall args filenames (exclusive)
# SPDX-License-Identifier: GPL-2.0
@@ -13,7 +13,13 @@ skip_if_no_perf_probe || exit 2
# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname $0)"/lib/probe_vfs_getname.sh
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
cleanup_probe_vfs_getname
exit $err
diff --git a/tools/perf/tests/shell/python-use.sh b/tools/perf/tests/shell/python-use.sh
new file mode 100755
index 000000000000..fd2ee5390060
--- /dev/null
+++ b/tools/perf/tests/shell/python-use.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# 'import perf' in python
+# SPDX-License-Identifier: GPL-2.0
+# Just test if we can load the python binding.
+set -e
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
+
+MODULE_DIR=$(dirname "$(which perf)")/python
+
+if [ -d "$MODULE_DIR" ]
+then
+ CMD=$(cat <<EOF
+import sys
+sys.path.insert(0, '$MODULE_DIR')
+import perf
+print('success!')
+EOF
+ )
+else
+ CMD=$(cat <<EOF
+import perf
+print('success!')
+EOF
+ )
+fi
+
+echo -e "Testing 'import perf' with:\n$CMD"
+
+if ! echo "$CMD" | $PYTHON | grep -q "success!"
+then
+ exit 1
+fi
+exit 0
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index c4bab5b5cc59..ab99bef556bf 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# probe libc's inet_pton & backtrace it with ping (exclusive)
# Installs a probe on libc's inet_pton function, that will use uprobes,
@@ -18,12 +18,13 @@
libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
nm -Dg $libc 2>/dev/null | grep -F -q inet_pton || exit 254
-event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?'
+event_pattern='probe_libc:inet_pton(_[[:digit:]]+)?'
add_libc_inet_pton_event() {
event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
- grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))")
+ awk -v ep="$event_pattern" -v l="$libc" '$0 ~ ep && $0 ~ \
+ ("\\(on inet_pton in " l "\\)") {print $1}')
if [ $? -ne 0 ] || [ -z "$event_name" ] ; then
printf "FAIL: could not add event\n"
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index fd5b10d46915..002f7037f182 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Use vfs_getname probe to get syscall args filenames (exclusive)
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
@@ -35,8 +35,14 @@ perf_script_filenames() {
grep -E " +touch +[0-9]+ +\[[0-9]+\] +[0-9]+\.[0-9]+: +probe:vfs_getname[_0-9]*: +\([[:xdigit:]]+\) +pathname=\"${file}\""
}
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
if [ $err -ne 0 ] ; then
exit $err
fi
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
index 8929046e9057..f6b82223834e 100755
--- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh
+++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Zstd perf.data compression/decompression
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index ba8d873d3ca7..0f5841c479e7 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -12,8 +12,10 @@ shelldir=$(dirname "$0")
. "${shelldir}"/lib/perf_has_symbol.sh
testsym="test_loop"
+testsym2="brstack"
skip_test_missing_symbol ${testsym}
+skip_test_missing_symbol ${testsym2}
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
@@ -34,13 +36,15 @@ default_fd_limit=$(ulimit -Sn)
min_fd_limit=$(($(getconf _NPROCESSORS_ONLN) * 16))
cleanup() {
- rm -rf "${perfdata}"
- rm -rf "${perfdata}".old
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ rm -f "${script_output}"
trap - EXIT TERM INT
}
trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
@@ -229,6 +233,31 @@ test_cgroup() {
echo "Cgroup sampling test [Success]"
}
+test_uid() {
+ echo "Uid sampling test"
+ if ! perf record -aB --synth=no --uid "$(id -u)" -o "${perfdata}" ${testprog} \
+ > "${script_output}" 2>&1
+ then
+ if grep -q "libbpf.*EPERM" "${script_output}"
+ then
+ echo "Uid sampling [Skipped permissions]"
+ return
+ else
+ echo "Uid sampling [Failed to record]"
+ err=1
+ # cat "${script_output}"
+ return
+ fi
+ fi
+ if ! perf report -i "${perfdata}" -q | grep -q "${testsym}"
+ then
+ echo "Uid sampling [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Uid sampling test [Success]"
+}
+
test_leader_sampling() {
echo "Basic leader sampling test"
if ! perf record -o "${perfdata}" -e "{cycles,cycles}:Su" -- \
@@ -238,22 +267,43 @@ test_leader_sampling() {
err=1
return
fi
+ perf script -i "${perfdata}" | grep brstack > $script_output
+ # Check if the two instruction counts are equal in each record.
+ # However, the throttling code doesn't consider event grouping. During throttling, only the
+ # leader is stopped, causing the slave's counts significantly higher. To temporarily solve this,
+ # let's set the tolerance rate to 80%.
+ # TODO: Revert the code for tolerance once the throttling mechanism is fixed.
index=0
- perf script -i "${perfdata}" > $script_output
+ valid_counts=0
+ invalid_counts=0
+ tolerance_rate=0.8
while IFS= read -r line
do
- # Check if the two instruction counts are equal in each record
cycles=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="cycles:") print $(i-1)}')
if [ $(($index%2)) -ne 0 ] && [ ${cycles}x != ${prev_cycles}x ]
then
- echo "Leader sampling [Failed inconsistent cycles count]"
- err=1
- return
+ invalid_counts=$(($invalid_counts+1))
+ else
+ valid_counts=$(($valid_counts+1))
fi
index=$(($index+1))
prev_cycles=$cycles
- done < $script_output
- echo "Basic leader sampling test [Success]"
+ done < "${script_output}"
+ total_counts=$(bc <<< "$invalid_counts+$valid_counts")
+ if (( $(bc <<< "$total_counts <= 0") ))
+ then
+ echo "Leader sampling [No sample generated]"
+ err=1
+ return
+ fi
+ isok=$(bc <<< "scale=2; if (($invalid_counts/$total_counts) < (1-$tolerance_rate)) { 0 } else { 1 };")
+ if [ $isok -eq 1 ]
+ then
+ echo "Leader sampling [Failed inconsistent cycles count]"
+ err=1
+ else
+ echo "Basic leader sampling test [Success]"
+ fi
}
test_topdown_leader_sampling() {
@@ -311,6 +361,72 @@ test_precise_max() {
fi
}
+test_callgraph() {
+ echo "Callgraph test"
+
+ case $(uname -m)
+ in s390x)
+ cmd_flags="--call-graph dwarf -e cpu-clock";;
+ *)
+ cmd_flags="-g";;
+ esac
+
+ if ! perf record -o "${perfdata}" $cmd_flags perf test -w brstack
+ then
+ echo "Callgraph test [Failed missing output]"
+ err=1
+ return
+ fi
+
+ if ! perf report -i "${perfdata}" 2>&1 | grep "${testsym2}"
+ then
+ echo "Callgraph test [Failed missing symbol]"
+ err=1
+ return
+ fi
+
+ echo "Callgraph test [Success]"
+}
+
+test_ratio_to_prev() {
+ echo "ratio-to-prev test"
+ if ! perf record -o /dev/null -e "{instructions, cycles/period=100000,ratio-to-prev=0.5/}" \
+ true 2> /dev/null
+ then
+ echo "ratio-to-prev [Skipped not supported]"
+ return
+ fi
+ if ! perf record -o /dev/null -e "instructions, cycles/period=100000,ratio-to-prev=0.5/" \
+ true |& grep -q 'Invalid use of ratio-to-prev term without preceding element in group'
+ then
+ echo "ratio-to-prev test [Failed elements must be in same group]"
+ err=1
+ return
+ fi
+ if ! perf record -o /dev/null -e "{instructions,dummy,cycles/period=100000,ratio-to-prev=0.5/}" \
+ true |& grep -q 'must have same PMU'
+ then
+ echo "ratio-to-prev test [Failed elements must have same PMU]"
+ err=1
+ return
+ fi
+ if ! perf record -o /dev/null -e "{instructions,cycles/ratio-to-prev=0.5/}" \
+ true |& grep -q 'Event period term or count (-c) must be set when using ratio-to-prev term.'
+ then
+ echo "ratio-to-prev test [Failed period must be set]"
+ err=1
+ return
+ fi
+ if ! perf record -o /dev/null -e "{cycles/ratio-to-prev=0.5/}" \
+ true |& grep -q 'Invalid use of ratio-to-prev term without preceding element in group'
+ then
+ echo "ratio-to-prev test [Failed need 2+ events]"
+ err=1
+ return
+ fi
+ echo "Basic ratio-to-prev record test [Success]"
+}
+
# raise the limit of file descriptors to minimum
if [[ $default_fd_limit -lt $min_fd_limit ]]; then
ulimit -Sn $min_fd_limit
@@ -322,9 +438,12 @@ test_system_wide
test_workload
test_branch_counter
test_cgroup
+test_uid
test_leader_sampling
test_topdown_leader_sampling
test_precise_max
+test_callgraph
+test_ratio_to_prev
# restore the default value
ulimit -Sn $default_fd_limit
diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
index 4d6c3c1b7fb9..383574cb3bd3 100755
--- a/tools/perf/tests/shell/record_bpf_filter.sh
+++ b/tools/perf/tests/shell/record_bpf_filter.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf record sample filtering (by BPF) tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/record_lbr.sh b/tools/perf/tests/shell/record_lbr.sh
index 8d750ee631f8..78a02e90ece1 100755
--- a/tools/perf/tests/shell/record_lbr.sh
+++ b/tools/perf/tests/shell/record_lbr.sh
@@ -4,7 +4,12 @@
set -e
-if [ ! -f /sys/devices/cpu/caps/branches ] && [ ! -f /sys/devices/cpu_core/caps/branches ]
+ParanoidAndNotRoot() {
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
+
+if [ ! -f /sys/bus/event_source/devices/cpu/caps/branches ] &&
+ [ ! -f /sys/bus/event_source/devices/cpu_core/caps/branches ]
then
echo "Skip: only x86 CPUs support LBR"
exit 2
@@ -22,6 +27,7 @@ cleanup() {
}
trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
@@ -93,7 +99,7 @@ lbr_test() {
return
fi
- zero_nr=$(echo "$out" | grep -c 'branch stack: nr:0' || true)
+ zero_nr=$(echo "$out" | grep -A3 'branch stack: nr:0' | grep thread | grep -cv swapper || true)
r=$(($zero_nr * 100 / $bs_nr))
if [ $r -gt $threshold ]; then
echo "$test [Failed empty br stack ratio exceed $threshold%: $r%]"
@@ -122,8 +128,11 @@ lbr_test "-j ind_call" "any indirect call" 2
lbr_test "-j ind_jmp" "any indirect jump" 100
lbr_test "-j call" "direct calls" 2
lbr_test "-j ind_call,u" "any indirect user call" 100
-lbr_test "-a -b" "system wide any branch" 2
-lbr_test "-a -j any_call" "system wide any call" 2
+if ! ParanoidAndNotRoot 1
+then
+ lbr_test "-a -b" "system wide any branch" 2
+ lbr_test "-a -j any_call" "system wide any call" 2
+fi
# Parallel
parallel_lbr_test "-b" "parallel any branch" 100 &
@@ -140,10 +149,16 @@ parallel_lbr_test "-j call" "parallel direct calls" 100 &
pid6=$!
parallel_lbr_test "-j ind_call,u" "parallel any indirect user call" 100 &
pid7=$!
-parallel_lbr_test "-a -b" "parallel system wide any branch" 100 &
-pid8=$!
-parallel_lbr_test "-a -j any_call" "parallel system wide any call" 100 &
-pid9=$!
+if ParanoidAndNotRoot 1
+then
+ pid8=
+ pid9=
+else
+ parallel_lbr_test "-a -b" "parallel system wide any branch" 100 &
+ pid8=$!
+ parallel_lbr_test "-a -j any_call" "parallel system wide any call" 100 &
+ pid9=$!
+fi
for pid in $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9
do
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
index 678947fe69ee..860a2d6f4b75 100755
--- a/tools/perf/tests/shell/record_offcpu.sh
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf record offcpu profiling tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
@@ -7,6 +7,9 @@ set -e
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+ts=$(printf "%u" $((~0 << 32))) # OFF_CPU_TIMESTAMP
+dummy_timestamp=${ts%???} # remove the last 3 digits to match perf script
+
cleanup() {
rm -f ${perfdata}
rm -f ${perfdata}.old
@@ -19,6 +22,9 @@ trap_cleanup() {
}
trap trap_cleanup EXIT TERM INT
+test_above_thresh="Threshold test (above threshold)"
+test_below_thresh="Threshold test (below threshold)"
+
test_offcpu_priv() {
echo "Checking off-cpu privilege"
@@ -88,6 +94,63 @@ test_offcpu_child() {
echo "Child task off-cpu test [Success]"
}
+# task blocks longer than the --off-cpu-thresh, perf should collect a direct sample
+test_offcpu_above_thresh() {
+ echo "${test_above_thresh}"
+
+ # collect direct off-cpu samples for tasks blocked for more than 999ms
+ if ! perf record -e dummy --off-cpu --off-cpu-thresh 999 -o ${perfdata} -- sleep 1 2> /dev/null
+ then
+ echo "${test_above_thresh} [Failed record]"
+ err=1
+ return
+ fi
+ # direct sample's timestamp should be lower than the dummy_timestamp of the at-the-end sample
+ # check if a direct sample exists
+ if ! perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F event | grep -q "offcpu-time"
+ then
+ echo "${test_above_thresh} [Failed missing direct samples]"
+ err=1
+ return
+ fi
+ # there should only be one direct sample, and its period should be higher than off-cpu-thresh
+ if ! perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F period | \
+ awk '{ if (int($1) > 999000000) exit 0; else exit 1; }'
+ then
+ echo "${test_above_thresh} [Failed off-cpu time too short]"
+ err=1
+ return
+ fi
+ echo "${test_above_thresh} [Success]"
+}
+
+# task blocks shorter than the --off-cpu-thresh, perf should collect an at-the-end sample
+test_offcpu_below_thresh() {
+ echo "${test_below_thresh}"
+
+ # collect direct off-cpu samples for tasks blocked for more than 1.2s
+ if ! perf record -e dummy --off-cpu --off-cpu-thresh 1200 -o ${perfdata} -- sleep 1 2> /dev/null
+ then
+ echo "${test_below_thresh} [Failed record]"
+ err=1
+ return
+ fi
+ # see if there's an at-the-end sample
+ if ! perf script --time "${dummy_timestamp}," -i ${perfdata} -F event | grep -q 'offcpu-time'
+ then
+ echo "${test_below_thresh} [Failed at-the-end samples cannot be found]"
+ err=1
+ return
+ fi
+ # plus there shouldn't be any direct samples
+ if perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F event | grep -q 'offcpu-time'
+ then
+ echo "${test_below_thresh} [Failed direct samples are found when they shouldn't be]"
+ err=1
+ return
+ fi
+ echo "${test_below_thresh} [Success]"
+}
test_offcpu_priv
@@ -99,5 +162,13 @@ if [ $err = 0 ]; then
test_offcpu_child
fi
+if [ $err = 0 ]; then
+ test_offcpu_above_thresh
+fi
+
+if [ $err = 0 ]; then
+ test_offcpu_below_thresh
+fi
+
cleanup
exit $err
diff --git a/tools/perf/tests/shell/record_sideband.sh b/tools/perf/tests/shell/record_sideband.sh
index ac70ac27d590..2182551873be 100755
--- a/tools/perf/tests/shell/record_sideband.sh
+++ b/tools/perf/tests/shell/record_sideband.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf record sideband tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/record_weak_term.sh b/tools/perf/tests/shell/record_weak_term.sh
new file mode 100755
index 000000000000..811b00ffb47a
--- /dev/null
+++ b/tools/perf/tests/shell/record_weak_term.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# record weak terms
+# SPDX-License-Identifier: GPL-2.0
+# Test that command line options override weak terms from sysfs or inbuilt json.
+set -e
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
+
+# Find the first event with a specified period, such as
+# "cpu_core/event=0x24,period=200003,umask=0xff/"
+event=$(perf list --json | $PYTHON -c '
+import json, sys
+for e in json.load(sys.stdin):
+ if "EventName" not in e or "/modifier" in e["EventName"]:
+ continue
+ if "Encoding" in e and "period=" in e["Encoding"]:
+ print(e["EventName"])
+ break
+')
+if [[ "$event" = "" ]]
+then
+ echo "Skip: No sysfs/json events with inbuilt period."
+ exit 2
+fi
+
+echo "Testing that for $event the period is overridden with 1000"
+perf list --detail "$event"
+if ! perf record -c 1000 -vv -e "$event" -o /dev/null true 2>&1 | \
+ grep -q -F '{ sample_period, sample_freq } 1000'
+then
+ echo "Fail: Unexpected verbose output and sample period"
+ exit 1
+fi
+echo "Success"
+exit 0
diff --git a/tools/perf/tests/shell/sched.sh b/tools/perf/tests/shell/sched.sh
new file mode 100755
index 000000000000..b9b81eaf856e
--- /dev/null
+++ b/tools/perf/tests/shell/sched.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+# perf sched tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+if [ "$(id -u)" != 0 ]; then
+ echo "[Skip] No root permission"
+ exit 2
+fi
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test_sched.perf.data.XXXXX)
+PID1=0
+PID2=0
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+start_noploops() {
+ # Start two noploop workloads on CPU0 to trigger scheduling.
+ perf test -w noploop 10 &
+ PID1=$!
+ taskset -pc 0 $PID1
+ perf test -w noploop 10 &
+ PID2=$!
+ taskset -pc 0 $PID2
+
+ if ! grep -q 'Cpus_allowed_list:\s*0$' "/proc/$PID1/status"
+ then
+ echo "Sched [Error taskset did not work for the 1st noploop ($PID1)]"
+ grep Cpus_allowed /proc/$PID1/status
+ err=1
+ fi
+
+ if ! grep -q 'Cpus_allowed_list:\s*0$' "/proc/$PID2/status"
+ then
+ echo "Sched [Error taskset did not work for the 2nd noploop ($PID2)]"
+ grep Cpus_allowed /proc/$PID2/status
+ err=1
+ fi
+}
+
+cleanup_noploops() {
+ kill "$PID1" "$PID2"
+}
+
+test_sched_record() {
+ echo "Sched record"
+
+ start_noploops
+
+ perf sched record --no-inherit -o "${perfdata}" sleep 1
+
+ cleanup_noploops
+}
+
+test_sched_latency() {
+ echo "Sched latency"
+
+ if ! perf sched latency -i "${perfdata}" | grep -q perf-noploop
+ then
+ echo "Sched latency [Failed missing output]"
+ err=1
+ fi
+}
+
+test_sched_script() {
+ echo "Sched script"
+
+ if ! perf sched script -i "${perfdata}" | grep -q perf-noploop
+ then
+ echo "Sched script [Failed missing output]"
+ err=1
+ fi
+}
+
+test_sched_map() {
+ echo "Sched map"
+
+ if ! perf sched map -i "${perfdata}" | grep -q perf-noploop
+ then
+ echo "Sched map [Failed missing output]"
+ err=1
+ fi
+}
+
+test_sched_timehist() {
+ echo "Sched timehist"
+
+ if ! perf sched timehist -i "${perfdata}" | grep -q perf-noploop
+ then
+ echo "Sched timehist [Failed missing output]"
+ err=1
+ fi
+}
+
+test_sched_record
+test_sched_latency
+test_sched_script
+test_sched_map
+test_sched_timehist
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/script.sh b/tools/perf/tests/shell/script.sh
index d3e2958d2242..7007f1cdf761 100755
--- a/tools/perf/tests/shell/script.sh
+++ b/tools/perf/tests/shell/script.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf script tests
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/script_dlfilter.sh b/tools/perf/tests/shell/script_dlfilter.sh
new file mode 100755
index 000000000000..45c97d4a7d5f
--- /dev/null
+++ b/tools/perf/tests/shell/script_dlfilter.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+# perf script --dlfilter tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+dlfilter_c=$(mktemp /tmp/__perf_test.dlfilter.test.c.XXXXX)
+dlfilter_so=$(mktemp /tmp/__perf_test.dlfilter.so.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${dlfilter_c}"
+ rm -f "${dlfilter_so}"
+ rm -f "${dlfilter_so}.o"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+cat <<EOF > "${dlfilter_c}"
+#include <perf/perf_dlfilter.h>
+#include <string.h>
+#include <stdio.h>
+
+struct perf_dlfilter_fns perf_dlfilter_fns;
+
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ const struct perf_dlfilter_al *al;
+
+ if (!sample->ip)
+ return 0;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al || !al->sym || strcmp(al->sym, "test_loop"))
+ return 1;
+
+ return 0;
+}
+EOF
+
+test_dlfilter() {
+ echo "Basic --dlfilter test"
+ # Generate perf.data file
+ if ! perf record -o "${perfdata}" perf test -w thloop 1 2> /dev/null
+ then
+ echo "Basic --dlfilter test [Failed record]"
+ err=1
+ return
+ fi
+
+ # Build the dlfilter
+ if ! cc -c -I tools/perf/include -fpic -x c "${dlfilter_c}" -o "${dlfilter_so}.o"
+ then
+ echo "Basic --dlfilter test [Failed to build dlfilter object]"
+ err=1
+ return
+ fi
+
+ if ! cc -shared -o "${dlfilter_so}" "${dlfilter_so}.o"
+ then
+ echo "Basic --dlfilter test [Failed to link dlfilter shared object]"
+ err=1
+ return
+ fi
+
+ # Check that the output contains "test_loop" and nothing else
+ if ! perf script -i "${perfdata}" --dlfilter "${dlfilter_so}" | grep -q "test_loop"
+ then
+ echo "Basic --dlfilter test [Failed missing output]"
+ err=1
+ return
+ fi
+
+ # The filter should filter out everything except test_loop, so ensure no other symbols are present
+ # This is a simple check; we could be more rigorous
+ if perf script -i "${perfdata}" --dlfilter "${dlfilter_so}" | grep -v "test_loop" | grep -q "perf"
+ then
+ echo "Basic --dlfilter test [Failed filtering]"
+ err=1
+ return
+ fi
+
+ echo "Basic --dlfilter test [Success]"
+}
+
+test_dlfilter
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index 7a6f6e177402..cd6fff597091 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -44,7 +44,7 @@ function commachecker()
;; "--per-die") exp=8
;; "--per-cluster") exp=8
;; "--per-cache") exp=8
- ;; "--metric-only") exp=2
+ ;; "--metric-only") exp=1
esac
while read line
diff --git a/tools/perf/tests/shell/stat+csv_summary.sh b/tools/perf/tests/shell/stat+csv_summary.sh
index 323123ff4d19..9a4353db3825 100755
--- a/tools/perf/tests/shell/stat+csv_summary.sh
+++ b/tools/perf/tests/shell/stat+csv_summary.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat csv summary test
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat+event_uniquifying.sh b/tools/perf/tests/shell/stat+event_uniquifying.sh
new file mode 100755
index 000000000000..b5dec6b6da36
--- /dev/null
+++ b/tools/perf/tests/shell/stat+event_uniquifying.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+# perf stat events uniquifying
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+stat_output=$(mktemp /tmp/__perf_test.stat_output.XXXXX)
+
+cleanup() {
+ rm -f "${stat_output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_event_uniquifying() {
+ echo "Uniquification of PMU sysfs events test"
+
+ # Read events from perf list with and without -v. With -v the duplicate PMUs
+ # aren't deduplicated. Note, json events are listed by perf list without a
+ # PMU.
+ read -ra pmu_events <<< "$(perf list --raw pmu)"
+ read -ra pmu_v_events <<< "$(perf list -v --raw pmu)"
+ # For all non-deduplicated events.
+ for pmu_v_event in "${pmu_v_events[@]}"; do
+ # If the event matches an event in the deduplicated events then it musn't
+ # be an event with duplicate PMUs, continue the outer loop.
+ for pmu_event in "${pmu_events[@]}"; do
+ if [[ "$pmu_v_event" == "$pmu_event" ]]; then
+ continue 2
+ fi
+ done
+ # Strip the suffix from the non-deduplicated event's PMU.
+ event=$(echo "$pmu_v_event" | sed -E 's/_[0-9]+//')
+ for pmu_event in "${pmu_events[@]}"; do
+ if [[ "$event" == "$pmu_event" ]]; then
+ echo "Testing event ${event} is uniquified to ${pmu_v_event}"
+ if ! perf stat -e "$event" -A -o ${stat_output} -- true; then
+ echo "Error running perf stat for event '$event' [Skip]"
+ if [ $err = 0 ]; then
+ err=2
+ fi
+ continue
+ fi
+ # Ensure the non-deduplicated event appears in the output.
+ if ! grep -q "${pmu_v_event}" "${stat_output}"; then
+ echo "Uniquification of PMU sysfs events test [Failed]"
+ cat "${stat_output}"
+ err=1
+ fi
+ break
+ fi
+ done
+ done
+}
+
+test_event_uniquifying
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index a4f257ea839e..85d1ad7186c6 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -176,7 +176,12 @@ check_per_socket()
check_metric_only()
{
echo -n "Checking json output: metric only "
- perf stat -j --metric-only -e instructions,cycles -o "${stat_output}" true
+ if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67
+ then
+ echo "[Skip] CPU-measurement counter facility not installed"
+ return
+ fi
+ perf stat -j --metric-only -M page_faults_per_second -o "${stat_output}" true
$PYTHON $pythonchecker --metric-only --file "${stat_output}"
echo "[Success]"
}
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
index 0c7d79a230ea..cabbbf17c662 100755
--- a/tools/perf/tests/shell/stat+shadow_stat.sh
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat metrics (shadow stat) test
# SPDX-License-Identifier: GPL-2.0
@@ -14,7 +14,7 @@ perf stat -a -e cycles sleep 1 2>&1 | grep -e cpu_core && exit 2
test_global_aggr()
{
- perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \
+ perf stat -a --no-big-num -M insn_per_cycle sleep 1 2>&1 | \
grep -e cycles -e instructions | \
while read num evt _ ipc rest
do
@@ -53,7 +53,7 @@ test_global_aggr()
test_no_aggr()
{
- perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \
+ perf stat -a -A --no-big-num -M insn_per_cycle sleep 1 2>&1 | \
grep ^CPU | \
while read cpu num evt _ ipc rest
do
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index 6fee67693ba7..9c4b92ecf448 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -12,8 +12,8 @@ set -e
stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
-event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
-skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound" "TopdownL1" "percent of slots")
+event_metric=("CPUs_utilized" "CPUs_utilized" "cs/sec" "migrations/sec" "faults/sec" "frontend_cycles_idle" "backend_cycles_idle" "GHz" "insn_per_cycle" "/sec" "branch_miss_rate")
+skip_metric=("tma_" "TopdownL1")
cleanup() {
rm -f "${stat_output}"
@@ -90,7 +90,11 @@ function commachecker()
}
done < "${stat_output}"
- [ $metric_only -eq 1 ] && exit 1
+ if [ $metric_only -ne 1 ]
+ then
+ echo "Missing metric only output in:"
+ cat "${stat_output}"
+ fi
return 0
}
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 8a100a7f2dc1..0b2f0f88ca16 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -16,9 +16,46 @@ test_default_stat() {
echo "Basic stat command test [Success]"
}
+test_null_stat() {
+ echo "Null stat command test"
+ if ! perf stat --null true 2>&1 | grep -E -q "Performance counter stats for 'true':"
+ then
+ echo "Null stat command test [Failed]"
+ err=1
+ return
+ fi
+ echo "Null stat command test [Success]"
+}
+
+find_offline_cpu() {
+ for i in $(seq 1 4096)
+ do
+ if [[ ! -f /sys/devices/system/cpu/cpu$i/online || \
+ $(cat /sys/devices/system/cpu/cpu$i/online) == "0" ]]
+ then
+ echo $i
+ return
+ fi
+ done
+ echo "Failed to find offline CPU"
+ exit 1
+}
+
+test_offline_cpu_stat() {
+ cpu=$(find_offline_cpu)
+ echo "Offline CPU stat command test (cpu $cpu)"
+ if ! perf stat "-C$cpu" -e cycles true 2>&1 | grep -E -q "No supported events found."
+ then
+ echo "Offline CPU stat command test [Failed]"
+ err=1
+ return
+ fi
+ echo "Offline CPU stat command test [Success]"
+}
+
test_stat_record_report() {
echo "stat record and report test"
- if ! perf stat record -o - true | perf stat report -i - 2>&1 | \
+ if ! perf stat record -e task-clock -o - true | perf stat report -i - 2>&1 | \
grep -E -q "Performance counter stats for 'pipe':"
then
echo "stat record and report test [Failed]"
@@ -30,7 +67,7 @@ test_stat_record_report() {
test_stat_record_script() {
echo "stat record and script test"
- if ! perf stat record -o - true | perf script -i - 2>&1 | \
+ if ! perf stat record -e task-clock -o - true | perf script -i - 2>&1 | \
grep -E -q "CPU[[:space:]]+THREAD[[:space:]]+VAL[[:space:]]+ENA[[:space:]]+RUN[[:space:]]+TIME[[:space:]]+EVENT"
then
echo "stat record and script test [Failed]"
@@ -196,7 +233,7 @@ test_hybrid() {
fi
# Run default Perf stat
- cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*| cycles[:uH]* " -c)
+ cycles_events=$(perf stat -a -- sleep 0.1 2>&1 | grep -E "/cpu-cycles/[uH]*| cpu-cycles[:uH]* " -c)
# The expectation is that default output will have a cycles events on each
# hybrid PMU. In situations with no cycles PMU events, like virtualized, this
@@ -212,6 +249,8 @@ test_hybrid() {
}
test_default_stat
+test_null_stat
+test_offline_cpu_stat
test_stat_record_report
test_stat_record_script
test_stat_repeat_weak_groups
diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh
index c6d61a4ac3e7..1400880ec01f 100755
--- a/tools/perf/tests/shell/stat_all_metricgroups.sh
+++ b/tools/perf/tests/shell/stat_all_metricgroups.sh
@@ -37,6 +37,9 @@ do
then
err=2 # Skip
fi
+ elif [[ "$m" == @(Default2|Default3|Default4) ]]
+ then
+ echo "Ignoring failures in $m that may contain unsupported legacy events"
else
echo "Metric group $m failed"
echo $result
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index ee817c66da06..3dabb39c7cc8 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -7,86 +7,108 @@ ParanoidAndNotRoot()
[ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
}
+test_prog="sleep 0.01"
system_wide_flag="-a"
if ParanoidAndNotRoot 0
then
system_wide_flag=""
+ test_prog="perf test -w noploop"
fi
err=0
for m in $(perf list --raw-dump metrics); do
echo "Testing $m"
- result=$(perf stat -M "$m" $system_wide_flag -- sleep 0.01 2>&1)
+ result=$(perf stat -M "$m" $system_wide_flag -- $test_prog 2>&1)
result_err=$?
- if [[ $result_err -gt 0 ]]
+ if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
- if [[ "$result" =~ "Cannot resolve IDs for" ]]
+ # No error result and metric shown.
+ continue
+ fi
+ if [[ "$result" =~ "Cannot resolve IDs for" || "$result" =~ "No supported events found" ]]
+ then
+ if [[ $(perf list --raw-dump $m) == "Default"* ]]
then
- echo "Metric contains missing events"
+ echo "[Ignored $m] failed but as a Default metric this can be expected"
echo $result
- err=1 # Fail
continue
- elif [[ "$result" =~ \
- "Access to performance monitoring and observability operations is limited" ]]
+ fi
+ echo "[Failed $m] Metric contains missing events"
+ echo $result
+ err=1 # Fail
+ continue
+ elif [[ "$result" =~ \
+ "Access to performance monitoring and observability operations is limited" ]]
+ then
+ echo "[Skipped $m] Permission failure"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Permission failure"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
+ then
+ echo "[Skipped $m] Permissions - need system wide mode"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Permissions - need system wide mode"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "<not supported>" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "<not supported>" ]]
+ then
+ if [[ $(perf list --raw-dump $m) == "Default"* ]]
then
- echo "Not supported events"
+ echo "[Ignored $m] failed but as a Default metric this can be expected"
echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
continue
- elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ fi
+ echo "[Skipped $m] Not supported events"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "FP issues"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "PMM" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "<not counted>" ]]
+ then
+ echo "[Skipped $m] Not counted events"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Optane memory issues"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
+ err=2 # Skip
fi
- fi
-
- if [[ "$result" =~ ${m:0:50} ]]
+ continue
+ elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ then
+ echo "[Skipped $m] FP issues"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "PMM" ]]
then
+ echo "[Skipped $m] Optane memory issues"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
continue
fi
# Failed, possibly the workload was too small so retry with something longer.
result=$(perf stat -M "$m" $system_wide_flag -- perf bench internals synthesize 2>&1)
- if [[ "$result" =~ ${m:0:50} ]]
+ result_err=$?
+ if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
+ # No error result and metric shown.
continue
fi
- echo "Metric '$m' not printed in:"
+ echo "[Failed $m] has non-zero error '$result_err' or not printed in:"
echo "$result"
err=1
done
diff --git a/tools/perf/tests/shell/stat_all_pfm.sh b/tools/perf/tests/shell/stat_all_pfm.sh
index 4d004f777a6e..c08c186af2c4 100755
--- a/tools/perf/tests/shell/stat_all_pfm.sh
+++ b/tools/perf/tests/shell/stat_all_pfm.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf all libpfm4 events test
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 95d2ad5d17c6..f43e28a136d3 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat --bpf-counters test (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
index 2ec69060c42f..ff2e06c408bc 100755
--- a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat --bpf-counters --for-each-cgroup test
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
index 279f19c5919a..30566f0b5427 100755
--- a/tools/perf/tests/shell/stat_metrics_values.sh
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -16,11 +16,16 @@ workload="perf bench futex hash -r 2 -s"
# Add -debug, save data file and full rule file
echo "Launch python validation script $pythonvalidator"
echo "Output will be stored in: $tmpdir"
-$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
-ret=$?
-rm -rf $tmpdir
-if [ $ret -ne 0 ]; then
- echo "Metric validation return with erros. Please check metrics reported with errors."
-fi
+for cputype in /sys/bus/event_source/devices/cpu_*; do
+ cputype=$(basename "$cputype")
+ echo "Testing metrics for: $cputype"
+ $PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}" \
+ -cputype "${cputype}"
+ ret=$?
+ rm -rf $tmpdir
+ if [ $ret -ne 0 ]; then
+ echo "Metric validation return with errors. Please check metrics reported with errors."
+ fi
+done
exit $ret
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index 9caa36130175..9172dd68a81d 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm64 callgraphs are complete in fp mode
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 573af9235b72..1c750b67d141 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm CoreSight trace data recording and synthesized samples (exclusive)
# Uses the 'perf record' to record trace data with Arm CoreSight sinks;
diff --git a/tools/perf/tests/shell/test_arm_coresight_disasm.sh b/tools/perf/tests/shell/test_arm_coresight_disasm.sh
index be2d26303f94..0dfb4fadf531 100755
--- a/tools/perf/tests/shell/test_arm_coresight_disasm.sh
+++ b/tools/perf/tests/shell/test_arm_coresight_disasm.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm CoreSight disassembly script completes without errors (exclusive)
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
index a69aab70dd8a..bb76ea88aa14 100755
--- a/tools/perf/tests/shell/test_arm_spe.sh
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm SPE trace data recording and synthesized samples (exclusive)
# Uses the 'perf record' to record trace data of Arm SPE events;
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
index 8efeef9fb956..5bcca51c03ac 100755
--- a/tools/perf/tests/shell/test_arm_spe_fork.sh
+++ b/tools/perf/tests/shell/test_arm_spe_fork.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check Arm SPE doesn't hang when there are forks
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/test_bpf_metadata.sh b/tools/perf/tests/shell/test_bpf_metadata.sh
new file mode 100755
index 000000000000..be67d56e0f09
--- /dev/null
+++ b/tools/perf/tests/shell/test_bpf_metadata.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+# BPF metadata collection test
+#
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_bpf_metadata() {
+ echo "Checking BPF metadata collection"
+
+ if ! perf check -q feature libbpf-strings ; then
+ echo "Basic BPF metadata test [skipping - not supported]"
+ err=0
+ return
+ fi
+
+ # This is a basic invocation of perf record
+ # that invokes the perf_sample_filter BPF program.
+ if ! perf record -e task-clock --filter 'ip > 0' \
+ -o "${perfdata}" sleep 1 2> /dev/null
+ then
+ echo "Basic BPF metadata test [Failed record]"
+ err=1
+ return
+ fi
+
+ # The BPF programs that ship with "perf" all have the following
+ # variable defined at compile time:
+ #
+ # const char bpf_metadata_perf_version[] SEC(".rodata") = <...>;
+ #
+ # This invocation looks for a PERF_RECORD_BPF_METADATA event,
+ # and checks that its content contains the string given by
+ # "perf version".
+ VERS=$(perf version | awk '{print $NF}')
+ if ! perf script --show-bpf-events -i "${perfdata}" | awk '
+ /PERF_RECORD_BPF_METADATA.*perf_sample_filter/ {
+ header = 1;
+ }
+ /^ *entry/ {
+ if (header) { header = 0; entry = 1; }
+ }
+ $0 !~ /^ *entry/ {
+ entry = 0;
+ }
+ /perf_version/ {
+ if (entry) print $NF;
+ }
+ ' | grep -qF "$VERS"
+ then
+ echo "Basic BPF metadata test [Failed invalid output]"
+ err=1
+ return
+ fi
+ echo "Basic BPF metadata test [Success]"
+}
+
+test_bpf_metadata
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index e01df7581393..85233d435be6 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check branch stack sampling
# SPDX-License-Identifier: GPL-2.0
@@ -17,37 +17,116 @@ fi
skip_test_missing_symbol brstack_bench
+err=0
TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
TESTPROG="perf test -w brstack"
cleanup() {
rm -rf $TMPDIR
+ trap - EXIT TERM INT
}
-trap cleanup EXIT TERM INT
+trap_cleanup() {
+ set +e
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+is_arm64() {
+ [ "$(uname -m)" = "aarch64" ];
+}
+
+check_branches() {
+ if ! tr -s ' ' '\n' < "$TMPDIR/perf.script" | grep -E -m1 -q "$1"; then
+ echo "Branches missing $1"
+ err=1
+ fi
+}
test_user_branches() {
echo "Testing user branch stack sampling"
- perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstacksym | tr -s ' ' '\n' > $TMPDIR/perf.script
+ perf record -o "$TMPDIR/perf.data" --branch-filter any,save_type,u -- ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
+ perf script -i "$TMPDIR/perf.data" --fields brstacksym > "$TMPDIR/perf.script"
# example of branch entries:
# brstack_foo+0x14/brstack_bar+0x40/P/-/-/0/CALL
- set -x
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_foo\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bar\+[^ ]*/brstack_foo\+[^ ]*/RET/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_foo\+[^ ]*/brstack_bench\+[^ ]*/RET/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_bench\+[^ ]*/COND/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack\+[^ ]*/brstack\+[^ ]*/UNCOND/.*$" $TMPDIR/perf.script
- set +x
-
+ expected=(
+ "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$"
+ "^brstack_foo\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$"
+ "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/CALL/.*$"
+ "^brstack_bench\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$"
+ "^brstack_bar\+[^ ]*/brstack_foo\+[^ ]*/RET/.*$"
+ "^brstack_foo\+[^ ]*/brstack_bench\+[^ ]*/RET/.*$"
+ "^brstack_bench\+[^ ]*/brstack_bench\+[^ ]*/COND/.*$"
+ "^brstack\+[^ ]*/brstack\+[^ ]*/UNCOND/.*$"
+ )
+ for x in "${expected[@]}"
+ do
+ check_branches "$x"
+ done
+
+ # Dump addresses only this time
+ perf script -i "$TMPDIR/perf.data" --fields brstack | \
+ tr ' ' '\n' > "$TMPDIR/perf.script"
+
+ # There should be no kernel addresses with the u option, in either
+ # source or target addresses.
+ if grep -E -m1 "0x[89a-f][0-9a-f]{15}" $TMPDIR/perf.script; then
+ echo "ERROR: Kernel address found in user mode"
+ err=1
+ fi
# some branch types are still not being tested:
- # IND COND_CALL COND_RET SYSCALL SYSRET IRQ SERROR NO_TX
+ # IND COND_CALL COND_RET SYSRET SERROR NO_TX
+}
+
+test_trap_eret_branches() {
+ echo "Testing trap & eret branches"
+ if ! is_arm64; then
+ echo "skip: not arm64"
+ else
+ perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u,k -- \
+ perf test -w traploop 1000
+ perf script -i $TMPDIR/perf.data --fields brstacksym | \
+ tr ' ' '\n' > $TMPDIR/perf.script
+
+ # BRBINF<n>.TYPE == TRAP are mapped to PERF_BR_IRQ by the BRBE driver
+ check_branches "^trap_bench\+[^ ]+/[^ ]/IRQ/"
+ check_branches "^[^ ]+/trap_bench\+[^ ]+/ERET/"
+ fi
+}
+
+test_kernel_branches() {
+ echo "Testing that k option only includes kernel source addresses"
+
+ if ! perf record --branch-filter any,k -o- -- true > /dev/null; then
+ echo "skip: not enough privileges"
+ else
+ perf record -o $TMPDIR/perf.data --branch-filter any,k -- \
+ perf bench syscall basic --loop 1000
+ perf script -i $TMPDIR/perf.data --fields brstack | \
+ tr ' ' '\n' > $TMPDIR/perf.script
+
+ # Example of branch entries:
+ # "0xffffffff93bda241/0xffffffff93bda20f/M/-/-/..."
+ # Source addresses come first and target address can be either
+ # userspace or kernel even with k option, as long as the source
+ # is in kernel.
+
+ #Look for source addresses with top bit set
+ if ! grep -E -m1 "^0x[89a-f][0-9a-f]{15}" $TMPDIR/perf.script; then
+ echo "ERROR: Kernel branches missing"
+ err=1
+ fi
+ # Look for no source addresses without top bit set
+ if grep -E -m1 "^0x[0-7][0-9a-f]{0,15}" $TMPDIR/perf.script; then
+ echo "ERROR: User branches found with kernel filter"
+ err=1
+ fi
+ fi
}
# first argument <arg0> is the argument passed to "--branch-stack <arg0>,save_type,u"
@@ -57,26 +136,67 @@ test_filter() {
test_filter_expect=$2
echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
-
- perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstack | tr -s ' ' '\n' | grep '.' > $TMPDIR/perf.script
+ perf record -o "$TMPDIR/perf.data" --branch-filter "$test_filter_filter,save_type,u" -- ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
+ perf script -i "$TMPDIR/perf.data" --fields brstack > "$TMPDIR/perf.script"
# fail if we find any branch type that doesn't match any of the expected ones
# also consider UNKNOWN branch types (-)
- if grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" $TMPDIR/perf.script; then
- return 1
+ if [ ! -s "$TMPDIR/perf.script" ]
+ then
+ echo "Empty script output"
+ err=1
+ return
+ fi
+ # Look for lines not matching test_filter_expect ignoring issues caused
+ # by empty output
+ tr -s ' ' '\n' < "$TMPDIR/perf.script" | grep '.' | \
+ grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" \
+ > "$TMPDIR/perf.script-filtered" || true
+ if [ -s "$TMPDIR/perf.script-filtered" ]
+ then
+ echo "Unexpected branch filter in script output"
+ cat "$TMPDIR/perf.script"
+ err=1
+ return
fi
}
+test_syscall() {
+ echo "Testing syscalls"
+ # skip if perf doesn't have enough privileges
+ if ! perf record --branch-filter any,k -o- -- true > /dev/null; then
+ echo "skip: not enough privileges"
+ else
+ perf record -o $TMPDIR/perf.data --branch-filter \
+ any_call,save_type,u,k -c 10000 -- \
+ perf bench syscall basic --loop 1000
+ perf script -i $TMPDIR/perf.data --fields brstacksym | \
+ tr ' ' '\n' > $TMPDIR/perf.script
+
+ check_branches "getppid[^ ]*/SYSCALL/"
+ fi
+}
set -e
test_user_branches
+test_syscall
+test_kernel_branches
+test_trap_eret_branches
+
+any_call="CALL|IND_CALL|COND_CALL|SYSCALL|IRQ"
-test_filter "any_call" "CALL|IND_CALL|COND_CALL|SYSCALL|IRQ"
+if is_arm64; then
+ any_call="$any_call|FAULT_DATA|FAULT_INST"
+fi
+
+test_filter "any_call" "$any_call"
test_filter "call" "CALL|SYSCALL"
test_filter "cond" "COND"
test_filter "any_ret" "RET|COND_RET|SYSRET|ERET"
test_filter "call,cond" "CALL|SYSCALL|COND"
-test_filter "any_call,cond" "CALL|IND_CALL|COND_CALL|IRQ|SYSCALL|COND"
-test_filter "cond,any_call,any_ret" "COND|CALL|IND_CALL|COND_CALL|SYSCALL|IRQ|RET|COND_RET|SYSRET|ERET"
+test_filter "any_call,cond" "$any_call|COND"
+test_filter "any_call,cond,any_ret" "$any_call|COND|RET|COND_RET"
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh
index bbe8277496ae..d61b5659a46d 100755
--- a/tools/perf/tests/shell/test_data_symbol.sh
+++ b/tools/perf/tests/shell/test_data_symbol.sh
@@ -54,11 +54,34 @@ trap cleanup_files exit term int
echo "Recording workload..."
-# perf mem/c2c internally uses IBS PMU on AMD CPU which doesn't support
-# user/kernel filtering and per-process monitoring, spin program on
-# specific CPU and test in per-CPU mode.
is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo)
if (($is_amd >= 1)); then
+ mem_events="$(perf mem record -v -e list 2>&1)"
+ if ! [[ "$mem_events" =~ ^mem\-ldst.*ibs_op/(.*)/.*available ]]; then
+ echo "ERROR: mem-ldst event is not matching"
+ exit 1
+ fi
+
+ # --ldlat on AMD:
+ # o Zen4 and earlier uarch does not support ldlat
+ # o Even on supported platforms, it's disabled (--ldlat=0) by default.
+ ldlat=${BASH_REMATCH[1]}
+ if [[ -n $ldlat ]]; then
+ if ! [[ "$ldlat" =~ ldlat=0 ]]; then
+ echo "ERROR: ldlat not initialized to 0?"
+ exit 1
+ fi
+
+ mem_events="$(perf mem record -v --ldlat=150 -e list 2>&1)"
+ if ! [[ "$mem_events" =~ ^mem-ldst.*ibs_op/ldlat=150/.*available ]]; then
+ echo "ERROR: --ldlat not honored?"
+ exit 1
+ fi
+ fi
+
+ # perf mem/c2c internally uses IBS PMU on AMD CPU which doesn't
+ # support user/kernel filtering and per-process monitoring on older
+ # kernels, spin program on specific CPU and test in per-CPU mode.
perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}"
else
perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}"
diff --git a/tools/perf/tests/shell/test_event_open_fallback.sh b/tools/perf/tests/shell/test_event_open_fallback.sh
new file mode 100755
index 000000000000..9420a7557c13
--- /dev/null
+++ b/tools/perf/tests/shell/test_event_open_fallback.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+# Perf event open fallback test
+# SPDX-License-Identifier: GPL-2.0
+
+skip_cnt=0
+ok_cnt=0
+err_cnt=0
+
+perf_record()
+{
+ perf record -o /dev/null "$@" -- true 1>/dev/null 2>&1
+}
+
+test_decrease_precise_ip()
+{
+ echo "Decrease precise ip test"
+
+ perf list pmu | grep -q 'cycles' || return 2
+
+ if ! perf_record -e cycles; then
+ return 2
+ fi
+
+ # It should reduce precision level down to 0 if needed.
+ if ! perf_record -e cycles:P; then
+ return 1
+ fi
+ return 0
+}
+
+test_decrease_precise_ip_complicated()
+{
+ echo "Decrease precise ip test (complicated case)"
+
+ perf list pmu | grep -q 'mem-loads-aux' || return 2
+
+ if ! perf_record -e '{mem-loads-aux:S,mem-loads:PS}'; then
+ return 1
+ fi
+ return 0
+}
+
+count_result()
+{
+ if [ "$1" -eq 2 ] ; then
+ skip_cnt=$((skip_cnt + 1))
+ return
+ fi
+ if [ "$1" -eq 0 ] ; then
+ ok_cnt=$((ok_cnt + 1))
+ return
+ fi
+ err_cnt=$((err_cnt + 1))
+}
+
+ret=0
+test_decrease_precise_ip || ret=$? ; count_result $ret ; ret=0
+test_decrease_precise_ip_complicated || ret=$? ; count_result $ret ; ret=0
+
+cleanup
+
+if [ ${err_cnt} -gt 0 ] ; then
+ exit 1
+fi
+
+if [ ${ok_cnt} -gt 0 ] ; then
+ exit 0
+fi
+
+# Skip
+exit 2
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index f3a9a040bacc..8ee761f03c38 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Miscellaneous Intel PT testing (exclusive)
# SPDX-License-Identifier: GPL-2.0
@@ -288,6 +288,11 @@ test_jitdump()
jitdump_incl_dir="${script_dir}/../../util"
jitdump_h="${jitdump_incl_dir}/jitdump.h"
+ if ! perf check feature -q libelf ; then
+ echo "SKIP: libelf is needed for jitdump"
+ return 2
+ fi
+
if [ ! -e "${jitdump_h}" ] ; then
echo "SKIP: Include file jitdump.h not found"
return 2
diff --git a/tools/perf/tests/shell/timechart.sh b/tools/perf/tests/shell/timechart.sh
new file mode 100755
index 000000000000..b14b3472c284
--- /dev/null
+++ b/tools/perf/tests/shell/timechart.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+# perf timechart tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_timechart_test.perf.data.XXXXX)
+output=$(mktemp /tmp/__perf_timechart_test.output.XXXXX.svg)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${output}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_timechart() {
+ echo "Basic perf timechart test"
+
+ # Try to record timechart data.
+ # perf timechart record uses system-wide recording and specific tracepoints.
+ # If it fails (e.g. permissions, missing tracepoints), skip the test.
+ if ! perf timechart record -o "${perfdata}" true > /dev/null 2>&1; then
+ echo "Basic perf timechart test [Skipped: perf timechart record failed (permissions/events?)]"
+ return
+ fi
+
+ # Generate the timechart
+ if ! perf timechart -i "${perfdata}" -o "${output}" > /dev/null 2>&1; then
+ echo "Basic perf timechart test [Failed: perf timechart command failed]"
+ err=1
+ return
+ fi
+
+ # Check if output file exists and is not empty
+ if [ ! -s "${output}" ]; then
+ echo "Basic perf timechart test [Failed: output file is empty or missing]"
+ err=1
+ return
+ fi
+
+ # Check if it looks like an SVG
+ if ! grep -q "svg" "${output}"; then
+ echo "Basic perf timechart test [Failed: output doesn't look like SVG]"
+ err=1
+ return
+ fi
+
+ echo "Basic perf timechart test [Success]"
+}
+
+if ! perf check feature -q libtraceevent ; then
+ echo "perf timechart is not supported. Skip."
+ cleanup
+ exit 2
+fi
+
+test_timechart
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/top.sh b/tools/perf/tests/shell/top.sh
new file mode 100755
index 000000000000..768ebcf7a89c
--- /dev/null
+++ b/tools/perf/tests/shell/top.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# perf top tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+log_file=$(mktemp /tmp/perf.top.log.XXXXX)
+
+cleanup() {
+ rm -f "${log_file}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_basic_perf_top() {
+ echo "Basic perf top test"
+
+ # Start a workload that spins to generate samples
+ # thloop runs for the specified number of seconds
+ perf test -w thloop 20 &
+ PID=$!
+
+ # Allow it to start
+ sleep 0.1
+
+ # Run perf top for 5 seconds, monitoring that PID
+ # Use --stdio to avoid TUI and redirect output
+ # Use -d 1 to avoid flooding output
+ # Use -e cpu-clock to ensure we get samples
+ # Use sleep to keep stdin open but silent, preventing EOF loop or interactive spam
+ if ! sleep 10 | timeout 5s perf top --stdio -d 1 -e cpu-clock -p $PID > "${log_file}" 2>&1; then
+ retval=$?
+ if [ $retval -ne 124 ] && [ $retval -ne 0 ]; then
+ echo "Basic perf top test [Failed: perf top failed to start or run (ret=$retval)]"
+ head -n 50 "${log_file}"
+ kill $PID
+ wait $PID 2>/dev/null || true
+ err=1
+ return
+ fi
+ fi
+
+ kill $PID
+ wait $PID 2>/dev/null || true
+
+ # Check for some sample data (percentage)
+ if ! grep -E -q "[0-9]+\.[0-9]+%" "${log_file}"; then
+ echo "Basic perf top test [Failed: no sample percentage found]"
+ head -n 50 "${log_file}"
+ err=1
+ return
+ fi
+
+ # Check for the symbol
+ if ! grep -q "test_loop" "${log_file}"; then
+ echo "Basic perf top test [Failed: test_loop symbol not found]"
+ head -n 50 "${log_file}"
+ err=1
+ return
+ fi
+
+ echo "Basic perf top test [Success]"
+}
+
+test_basic_perf_top
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 60fccb62c540..7a0b1145d0cd 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check open filename arg using perf trace + vfs_getname (exclusive)
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
@@ -25,9 +25,14 @@ trace_open_vfs_getname() {
grep -E " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +\"?${file}\"?, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
-
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
if [ $err -ne 0 ] ; then
exit $err
fi
diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
index f0b49f7fb57d..03e9f680a4a6 100755
--- a/tools/perf/tests/shell/trace_btf_enum.sh
+++ b/tools/perf/tests/shell/trace_btf_enum.sh
@@ -1,12 +1,11 @@
-#!/bin/sh
+#!/bin/bash
# perf trace enum augmentation tests
# SPDX-License-Identifier: GPL-2.0
err=0
-set -e
syscall="landlock_add_rule"
-non_syscall="timer:hrtimer_setup,timer:hrtimer_start"
+non_syscall="timer:hrtimer_start"
TESTPROG="perf test -w landlock"
@@ -17,13 +16,21 @@ skip_if_no_perf_trace || exit 2
check_vmlinux() {
echo "Checking if vmlinux exists"
- if ! ls /sys/kernel/btf/vmlinux 1>/dev/null 2>&1
+ if [ ! -f /sys/kernel/btf/vmlinux ]
then
echo "trace+enum test [Skipped missing vmlinux BTF support]"
err=2
fi
}
+check_permissions() {
+ if perf trace -e $syscall $TESTPROG 2>&1 | grep -q "Operation not permitted"
+ then
+ echo "trace+enum test [Skipped permissions]"
+ err=2
+ fi
+}
+
trace_landlock() {
echo "Tracing syscall ${syscall}"
@@ -34,27 +41,32 @@ trace_landlock() {
return
fi
- if perf trace -e $syscall $TESTPROG 2>&1 | \
- grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
+ output="$(perf trace -e $syscall $TESTPROG 2>&1)"
+ if echo "$output" | grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
then
err=0
else
+ printf "[syscall failure] Failed to trace syscall $syscall, output:\n$output\n"
err=1
fi
}
trace_non_syscall() {
- echo "Tracing non-syscall tracepoint ${non-syscall}"
- if perf trace -e $non_syscall --max-events=1 2>&1 | \
- grep -q -E '.*timer:hrtimer_.*\(.*mode: HRTIMER_MODE_.*\)$'
+ echo "Tracing non-syscall tracepoint ${non_syscall}"
+ output="$(perf trace -e $non_syscall --max-events=1 2>&1)"
+ if echo "$output" | grep -q -E '.*timer:hrtimer_.*\(.*mode: HRTIMER_MODE_.*\)$'
then
err=0
else
+ printf "[tracepoint failure] Failed to trace tracepoint $non_syscall, output:\n$output\n"
err=1
fi
}
check_vmlinux
+if [ $err = 0 ]; then
+ check_permissions
+fi
if [ $err = 0 ]; then
trace_landlock
diff --git a/tools/perf/tests/shell/trace_btf_general.sh b/tools/perf/tests/shell/trace_btf_general.sh
index a25d8744695e..ef2da806be6b 100755
--- a/tools/perf/tests/shell/trace_btf_general.sh
+++ b/tools/perf/tests/shell/trace_btf_general.sh
@@ -3,7 +3,6 @@
# SPDX-License-Identifier: GPL-2.0
err=0
-set -e
# shellcheck source=lib/probe.sh
. "$(dirname $0)"/lib/probe.sh
@@ -28,10 +27,10 @@ check_vmlinux() {
trace_test_string() {
echo "Testing perf trace's string augmentation"
- if ! perf trace -e renameat* --max-events=1 -- mv ${file1} ${file2} 2>&1 | \
- grep -q -E "^mv/[0-9]+ renameat(2)?\(.*, \"${file1}\", .*, \"${file2}\", .*\) += +[0-9]+$"
+ output="$(perf trace --sort-events -e renameat* --max-events=1 -- mv ${file1} ${file2} 2>&1)"
+ if ! echo "$output" | grep -q -E "^mv/[0-9]+ renameat(2)?\(.*, \"${file1}\", .*, \"${file2}\", .*\) += +[0-9]+$"
then
- echo "String augmentation test failed"
+ printf "String augmentation test failed, output:\n$output\n"
err=1
fi
}
@@ -39,20 +38,20 @@ trace_test_string() {
trace_test_buffer() {
echo "Testing perf trace's buffer augmentation"
# echo will insert a newline (\10) at the end of the buffer
- if ! perf trace -e write --max-events=1 -- echo "${buffer}" 2>&1 | \
- grep -q -E "^echo/[0-9]+ write\([0-9]+, ${buffer}.*, [0-9]+\) += +[0-9]+$"
+ output="$(perf trace --sort-events -e write --max-events=1 -- echo "${buffer}" 2>&1)"
+ if ! echo "$output" | grep -q -E "^echo/[0-9]+ write\([0-9]+, ${buffer}.*, [0-9]+\) += +[0-9]+$"
then
- echo "Buffer augmentation test failed"
+ printf "Buffer augmentation test failed, output:\n$output\n"
err=1
fi
}
trace_test_struct_btf() {
echo "Testing perf trace's struct augmentation"
- if ! perf trace -e clock_nanosleep --force-btf --max-events=1 -- sleep 1 2>&1 | \
- grep -q -E "^sleep/[0-9]+ clock_nanosleep\(0, 0, \{1,\}, 0x[0-9a-f]+\) += +[0-9]+$"
+ output="$(perf trace --sort-events -e clock_nanosleep --force-btf --max-events=1 -- sleep 1 2>&1)"
+ if ! echo "$output" | grep -q -E "^sleep/[0-9]+ clock_nanosleep\(0, 0, \{1,.*\}, 0x[0-9a-f]+\) += +[0-9]+$"
then
- echo "BTF struct augmentation test failed"
+ printf "BTF struct augmentation test failed, output:\n$output\n"
err=1
fi
}
diff --git a/tools/perf/tests/shell/trace_exit_race.sh b/tools/perf/tests/shell/trace_exit_race.sh
index 1e247693e756..db300cde94fb 100755
--- a/tools/perf/tests/shell/trace_exit_race.sh
+++ b/tools/perf/tests/shell/trace_exit_race.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf trace exit race
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/trace_record_replay.sh b/tools/perf/tests/shell/trace_record_replay.sh
index 6b4ed863c1ef..88d30a03dcec 100755
--- a/tools/perf/tests/shell/trace_record_replay.sh
+++ b/tools/perf/tests/shell/trace_record_replay.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf trace record and replay
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/tests/shell/trace_summary.sh b/tools/perf/tests/shell/trace_summary.sh
new file mode 100755
index 000000000000..22e2651d5919
--- /dev/null
+++ b/tools/perf/tests/shell/trace_summary.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+# perf trace summary (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+# Check that perf trace works with various summary mode
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
+
+OUTPUT=$(mktemp /tmp/perf_trace_test.XXXXX)
+
+test_perf_trace() {
+ args=$1
+ workload="true"
+ search="^\s*(open|read|close).*[0-9]+%$"
+
+ echo "testing: perf trace ${args} -- ${workload}"
+ perf trace ${args} -- ${workload} >${OUTPUT} 2>&1
+ if [ $? -ne 0 ]; then
+ echo "Error: perf trace ${args} failed unexpectedly"
+ cat ${OUTPUT}
+ rm -f ${OUTPUT}
+ exit 1
+ fi
+
+ count=$(grep -E -c -m 3 "${search}" ${OUTPUT})
+ if [ "${count}" != "3" ]; then
+ echo "Error: cannot find enough pattern ${search} in the output"
+ cat ${OUTPUT}
+ rm -f ${OUTPUT}
+ exit 1
+ fi
+}
+
+# summary only for a process
+test_perf_trace "-s"
+
+# normal output with summary at the end
+test_perf_trace "-S"
+
+# summary only with an explicit summary mode
+test_perf_trace "-s --summary-mode=thread"
+
+# summary with normal output - total summary mode
+test_perf_trace "-S --summary-mode=total"
+
+# summary only for system wide - per-thread summary
+test_perf_trace "-as --summary-mode=thread --no-bpf-summary"
+
+# summary only for system wide - total summary mode
+test_perf_trace "-as --summary-mode=total --no-bpf-summary"
+
+if ! perf check feature -q bpf; then
+ echo "Skip --bpf-summary tests as perf built without libbpf"
+ rm -f ${OUTPUT}
+ exit 2
+fi
+
+# summary only for system wide - per-thread summary with BPF
+test_perf_trace "-as --summary-mode=thread --bpf-summary"
+
+# summary only for system wide - total summary mode with BPF
+test_perf_trace "-as --summary-mode=total --bpf-summary"
+
+# summary with normal output for system wide - total summary mode with BPF
+test_perf_trace "-aS --summary-mode=total --bpf-summary"
+
+# summary only for system wide - cgroup summary mode with BPF
+test_perf_trace "-as --summary-mode=cgroup --bpf-summary"
+
+# summary with normal output for system wide - cgroup summary mode with BPF
+test_perf_trace "-aS --summary-mode=cgroup --bpf-summary"
+
+rm -f ${OUTPUT}
diff --git a/tools/perf/tests/subcmd-help.c b/tools/perf/tests/subcmd-help.c
new file mode 100644
index 000000000000..2280b4c0e5e7
--- /dev/null
+++ b/tools/perf/tests/subcmd-help.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tests.h"
+#include <linux/compiler.h>
+#include <subcmd/help.h>
+
+static int test__load_cmdnames(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct cmdnames cmds = {};
+
+ add_cmdname(&cmds, "aaa", 3);
+ add_cmdname(&cmds, "foo", 3);
+ add_cmdname(&cmds, "xyz", 3);
+
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds, "aaa") == 1);
+ TEST_ASSERT_VAL("wrong cmd", is_in_cmdlist(&cmds, "bar") == 0);
+ TEST_ASSERT_VAL("case sensitive", is_in_cmdlist(&cmds, "XYZ") == 0);
+
+ clean_cmdnames(&cmds);
+ return TEST_OK;
+}
+
+static int test__uniq_cmdnames(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct cmdnames cmds = {};
+
+ /* uniq() assumes it's sorted */
+ add_cmdname(&cmds, "aaa", 3);
+ add_cmdname(&cmds, "aaa", 3);
+ add_cmdname(&cmds, "bbb", 3);
+
+ TEST_ASSERT_VAL("invalid original size", cmds.cnt == 3);
+ /* uniquify command names (to remove second 'aaa') */
+ uniq(&cmds);
+ TEST_ASSERT_VAL("invalid final size", cmds.cnt == 2);
+
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds, "aaa") == 1);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds, "bbb") == 1);
+ TEST_ASSERT_VAL("wrong cmd", is_in_cmdlist(&cmds, "ccc") == 0);
+
+ clean_cmdnames(&cmds);
+ return TEST_OK;
+}
+
+static int test__exclude_cmdnames(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct cmdnames cmds1 = {};
+ struct cmdnames cmds2 = {};
+
+ add_cmdname(&cmds1, "aaa", 3);
+ add_cmdname(&cmds1, "bbb", 3);
+ add_cmdname(&cmds1, "ccc", 3);
+ add_cmdname(&cmds1, "ddd", 3);
+ add_cmdname(&cmds1, "eee", 3);
+ add_cmdname(&cmds1, "fff", 3);
+ add_cmdname(&cmds1, "ggg", 3);
+ add_cmdname(&cmds1, "hhh", 3);
+ add_cmdname(&cmds1, "iii", 3);
+ add_cmdname(&cmds1, "jjj", 3);
+
+ add_cmdname(&cmds2, "bbb", 3);
+ add_cmdname(&cmds2, "eee", 3);
+ add_cmdname(&cmds2, "jjj", 3);
+
+ TEST_ASSERT_VAL("invalid original size", cmds1.cnt == 10);
+ TEST_ASSERT_VAL("invalid original size", cmds2.cnt == 3);
+
+ /* remove duplicate command names in cmds1 */
+ exclude_cmds(&cmds1, &cmds2);
+
+ TEST_ASSERT_VAL("invalid excluded size", cmds1.cnt == 7);
+ TEST_ASSERT_VAL("invalid excluded size", cmds2.cnt == 3);
+
+ /* excluded commands should not belong to cmds1 */
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds1, "aaa") == 1);
+ TEST_ASSERT_VAL("wrong cmd", is_in_cmdlist(&cmds1, "bbb") == 0);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds1, "ccc") == 1);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds1, "ddd") == 1);
+ TEST_ASSERT_VAL("wrong cmd", is_in_cmdlist(&cmds1, "eee") == 0);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds1, "fff") == 1);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds1, "ggg") == 1);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds1, "hhh") == 1);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds1, "iii") == 1);
+ TEST_ASSERT_VAL("wrong cmd", is_in_cmdlist(&cmds1, "jjj") == 0);
+
+ /* they should be only in cmds2 */
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds2, "bbb") == 1);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds2, "eee") == 1);
+ TEST_ASSERT_VAL("cannot find cmd", is_in_cmdlist(&cmds2, "jjj") == 1);
+
+ clean_cmdnames(&cmds1);
+ clean_cmdnames(&cmds2);
+ return TEST_OK;
+}
+
+static struct test_case tests__subcmd_help[] = {
+ TEST_CASE("Load subcmd names", load_cmdnames),
+ TEST_CASE("Uniquify subcmd names", uniq_cmdnames),
+ TEST_CASE("Exclude duplicate subcmd names", exclude_cmdnames),
+ { .name = NULL, }
+};
+
+struct test_suite suite__subcmd_help = {
+ .desc = "libsubcmd help tests",
+ .test_cases = tests__subcmd_help,
+};
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 8df3f9d9ffd2..15791fcb76b2 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -264,7 +264,7 @@ static int compar(const void *a, const void *b)
const struct event_node *nodeb = b;
s64 cmp = nodea->event_time - nodeb->event_time;
- return cmp;
+ return cmp < 0 ? -1 : (cmp > 0 ? 1 : 0);
}
static int process_events(struct evlist *evlist,
@@ -332,7 +332,7 @@ out_free_nodes:
static int test__switch_tracking(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
const char *sched_switch = "sched:sched_switch";
- const char *cycles = "cycles:u";
+ const char *cycles = "cpu-cycles:u";
struct switch_tracking switch_tracking = { .tids = NULL, };
struct record_opts opts = {
.mmap_pages = UINT_MAX,
@@ -351,7 +351,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
const char *comm;
int err = -1;
- threads = thread_map__new(-1, getpid(), UINT_MAX);
+ threads = thread_map__new_by_tid(getpid());
if (!threads) {
pr_debug("thread_map__new failed!\n");
goto out_err;
diff --git a/tools/perf/tests/symbols.c b/tools/perf/tests/symbols.c
index ee20a366f32f..f4ffe5804f40 100644
--- a/tools/perf/tests/symbols.c
+++ b/tools/perf/tests/symbols.c
@@ -5,6 +5,7 @@
#include <limits.h>
#include "debug.h"
#include "dso.h"
+#include "env.h"
#include "machine.h"
#include "thread.h"
#include "symbol.h"
@@ -13,15 +14,18 @@
#include "tests.h"
struct test_info {
+ struct perf_env host_env;
struct machine *machine;
struct thread *thread;
};
static int init_test_info(struct test_info *ti)
{
- ti->machine = machine__new_host();
+ perf_env__init(&ti->host_env);
+ ti->machine = machine__new_host(&ti->host_env);
if (!ti->machine) {
pr_debug("machine__new_host() failed!\n");
+ perf_env__exit(&ti->host_env);
return TEST_FAIL;
}
@@ -29,6 +33,7 @@ static int init_test_info(struct test_info *ti)
ti->thread = machine__findnew_thread(ti->machine, 100, 100);
if (!ti->thread) {
pr_debug("machine__findnew_thread() failed!\n");
+ perf_env__exit(&ti->host_env);
return TEST_FAIL;
}
@@ -39,6 +44,7 @@ static void exit_test_info(struct test_info *ti)
{
thread__put(ti->thread);
machine__delete(ti->machine);
+ perf_env__exit(&ti->host_env);
}
struct dso_map {
@@ -96,8 +102,8 @@ static int create_map(struct test_info *ti, char *filename, struct map **map_p)
dso__put(dso);
/* Create a dummy map at 0x100000 */
- *map_p = map__new(ti->machine, 0x100000, 0xffffffff, 0, NULL,
- PROT_EXEC, 0, NULL, filename, ti->thread);
+ *map_p = map__new(ti->machine, 0x100000, 0xffffffff, 0, &dso_id_empty,
+ PROT_EXEC, /*flags=*/0, filename, ti->thread);
if (!*map_p) {
pr_debug("Failed to create map!");
return TEST_FAIL;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 8e328bbd509d..4053ff2813bb 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -46,7 +46,6 @@ static int test__task_exit(struct test_suite *test __maybe_unused, int subtest _
struct evsel *evsel;
struct evlist *evlist;
struct target target = {
- .uid = UINT_MAX,
.uses_mmap = true,
};
const char *argv[] = { "true", NULL };
diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c
index 1d5759d08141..f18c4cd337c8 100644
--- a/tools/perf/tests/tests-scripts.c
+++ b/tools/perf/tests/tests-scripts.c
@@ -85,7 +85,7 @@ static char *shell_test__description(int dir_fd, const char *name)
if (io.fd < 0)
return NULL;
- /* Skip first line - should be #!/bin/sh Shebang */
+ /* Skip first line - should be #!/bin/bash Shebang */
if (io__get_char(&io) != '#')
goto err_out;
if (io__get_char(&io) != '!')
@@ -260,6 +260,7 @@ static void append_scripts_in_dir(int dir_fd,
continue; /* Skip scripts that have a separate driver. */
fd = openat(dir_fd, ent->d_name, O_PATH);
append_scripts_in_dir(fd, result, result_sz);
+ close(fd);
}
for (i = 0; i < n_dirs; i++) /* Clean up */
zfree(&entlist[i]);
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 8aea344536b8..cb67ddbd0375 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -3,6 +3,7 @@
#define TESTS_H
#include <stdbool.h>
+#include "util/debug.h"
enum {
TEST_OK = 0,
@@ -71,6 +72,15 @@ struct test_suite {
.exclusive = true, \
}
+#define TEST_CASE_REASON_EXCLUSIVE(description, _name, _reason) \
+ { \
+ .name = #_name, \
+ .desc = description, \
+ .run_case = test__##_name, \
+ .skip_reason = _reason, \
+ .exclusive = true, \
+ }
+
#define DEFINE_SUITE(description, _name) \
struct test_case tests__##_name[] = { \
TEST_CASE(description, _name), \
@@ -110,7 +120,6 @@ DECLARE_SUITE(dso_data_cache);
DECLARE_SUITE(dso_data_reopen);
DECLARE_SUITE(parse_events);
DECLARE_SUITE(hists_link);
-DECLARE_SUITE(python_use);
DECLARE_SUITE(bp_signal);
DECLARE_SUITE(bp_signal_overflow);
DECLARE_SUITE(bp_accounting);
@@ -151,12 +160,13 @@ DECLARE_SUITE(bitmap_print);
DECLARE_SUITE(perf_hooks);
DECLARE_SUITE(unit_number__scnprint);
DECLARE_SUITE(mem2node);
-DECLARE_SUITE(maps__merge_in);
+DECLARE_SUITE(maps);
DECLARE_SUITE(time_utils);
DECLARE_SUITE(jit_write_elf);
DECLARE_SUITE(api_io);
DECLARE_SUITE(demangle_java);
DECLARE_SUITE(demangle_ocaml);
+DECLARE_SUITE(demangle_rust);
DECLARE_SUITE(pfm);
DECLARE_SUITE(parse_metric);
DECLARE_SUITE(pe_file_parsing);
@@ -167,6 +177,8 @@ DECLARE_SUITE(sigtrap);
DECLARE_SUITE(event_groups);
DECLARE_SUITE(symbols);
DECLARE_SUITE(util);
+DECLARE_SUITE(subcmd_help);
+DECLARE_SUITE(kallsyms_split);
/*
* PowerPC and S390 do not support creation of instruction breakpoints using the
@@ -227,6 +239,7 @@ DECLARE_WORKLOAD(sqrtloop);
DECLARE_WORKLOAD(brstack);
DECLARE_WORKLOAD(datasym);
DECLARE_WORKLOAD(landlock);
+DECLARE_WORKLOAD(traploop);
extern const char *dso_to_test;
extern const char *test_objdump_path;
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index 1fe521466bf4..54209592168d 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -115,7 +115,7 @@ static int test__thread_map_remove(struct test_suite *test __maybe_unused, int s
TEST_ASSERT_VAL("failed to allocate map string",
asprintf(&str, "%d,%d", getpid(), getppid()) >= 0);
- threads = thread_map__new_str(str, NULL, 0, false);
+ threads = thread_map__new_str(str, /*tid=*/NULL, /*all_threads=*/false);
free(str);
TEST_ASSERT_VAL("failed to allocate thread_map",
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index a8cb5ba898ab..ec01150d208d 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -43,6 +43,7 @@ static int session_write_header(char *path)
session->evlist = evlist__new_default();
TEST_ASSERT_VAL("can't get evlist", session->evlist);
+ session->evlist->session = session;
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_NRCPUS);
@@ -69,9 +70,11 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
int i;
struct aggr_cpu_id id;
struct perf_cpu cpu;
+ struct perf_env *env;
session = perf_session__new(&data, NULL);
TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
+ env = perf_session__env(session);
cpu__setup_cpunode_map();
/* On platforms with large numbers of CPUs process_cpu_topology()
@@ -95,9 +98,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
* condition is true (see do_core_id_test in header.c). So always
* run this test on those platforms.
*/
- if (!session->header.env.cpu
- && strncmp(session->header.env.arch, "s390", 4)
- && strncmp(session->header.env.arch, "aarch64", 7))
+ if (!env->cpu && strncmp(env->arch, "s390", 4) && strncmp(env->arch, "aarch64", 7))
return TEST_SKIP;
/*
@@ -106,20 +107,20 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
* physical_package_id will be set to -1. Hence skip this
* test if physical_package_id returns -1 for cpu from perf_cpu_map.
*/
- if (!strncmp(session->header.env.arch, "ppc64le", 7)) {
+ if (!strncmp(env->arch, "ppc64le", 7)) {
if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1)
return TEST_SKIP;
}
- TEST_ASSERT_VAL("Session header CPU map not set", session->header.env.cpu);
+ TEST_ASSERT_VAL("Session header CPU map not set", env->cpu);
- for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
+ for (i = 0; i < env->nr_cpus_avail; i++) {
cpu.cpu = i;
if (!perf_cpu_map__has(map, cpu))
continue;
pr_debug("CPU %d, core %d, socket %d\n", i,
- session->header.env.cpu[i].core_id,
- session->header.env.cpu[i].socket_id);
+ env->cpu[i].core_id,
+ env->cpu[i].socket_id);
}
// Test that CPU ID contains socket, die, core and CPU
@@ -129,13 +130,12 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
cpu.cpu == id.cpu.cpu);
TEST_ASSERT_VAL("Cpu map - Core ID doesn't match",
- session->header.env.cpu[cpu.cpu].core_id == id.core);
+ env->cpu[cpu.cpu].core_id == id.core);
TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match",
- session->header.env.cpu[cpu.cpu].socket_id ==
- id.socket);
+ env->cpu[cpu.cpu].socket_id == id.socket);
TEST_ASSERT_VAL("Cpu map - Die ID doesn't match",
- session->header.env.cpu[cpu.cpu].die_id == id.die);
+ env->cpu[cpu.cpu].die_id == id.die);
TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Cpu map - Thread IDX is set", id.thread_idx == -1);
}
@@ -144,14 +144,13 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
perf_cpu_map__for_each_cpu(cpu, i, map) {
id = aggr_cpu_id__core(cpu, NULL);
TEST_ASSERT_VAL("Core map - Core ID doesn't match",
- session->header.env.cpu[cpu.cpu].core_id == id.core);
+ env->cpu[cpu.cpu].core_id == id.core);
TEST_ASSERT_VAL("Core map - Socket ID doesn't match",
- session->header.env.cpu[cpu.cpu].socket_id ==
- id.socket);
+ env->cpu[cpu.cpu].socket_id == id.socket);
TEST_ASSERT_VAL("Core map - Die ID doesn't match",
- session->header.env.cpu[cpu.cpu].die_id == id.die);
+ env->cpu[cpu.cpu].die_id == id.die);
TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Core map - Thread IDX is set", id.thread_idx == -1);
}
@@ -160,11 +159,10 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
perf_cpu_map__for_each_cpu(cpu, i, map) {
id = aggr_cpu_id__die(cpu, NULL);
TEST_ASSERT_VAL("Die map - Socket ID doesn't match",
- session->header.env.cpu[cpu.cpu].socket_id ==
- id.socket);
+ env->cpu[cpu.cpu].socket_id == id.socket);
TEST_ASSERT_VAL("Die map - Die ID doesn't match",
- session->header.env.cpu[cpu.cpu].die_id == id.die);
+ env->cpu[cpu.cpu].die_id == id.die);
TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Die map - Core is set", id.core == -1);
@@ -176,8 +174,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
perf_cpu_map__for_each_cpu(cpu, i, map) {
id = aggr_cpu_id__socket(cpu, NULL);
TEST_ASSERT_VAL("Socket map - Socket ID doesn't match",
- session->header.env.cpu[cpu.cpu].socket_id ==
- id.socket);
+ env->cpu[cpu.cpu].socket_id == id.socket);
TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1);
diff --git a/tools/perf/tests/util.c b/tools/perf/tests/util.c
index 6366db5cbf8c..b273d287e164 100644
--- a/tools/perf/tests/util.c
+++ b/tools/perf/tests/util.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "tests.h"
#include "util/debug.h"
+#include "util/sha1.h"
#include <linux/compiler.h>
#include <stdlib.h>
@@ -16,6 +17,48 @@ static int test_strreplace(char needle, const char *haystack,
return ret == 0;
}
+#define MAX_LEN 512
+
+/* Test sha1() for all lengths from 0 to MAX_LEN inclusively. */
+static int test_sha1(void)
+{
+ u8 data[MAX_LEN];
+ size_t digests_size = (MAX_LEN + 1) * SHA1_DIGEST_SIZE;
+ u8 *digests;
+ u8 digest_of_digests[SHA1_DIGEST_SIZE];
+ /*
+ * The correctness of this value was verified by running this test with
+ * sha1() replaced by OpenSSL's SHA1().
+ */
+ static const u8 expected_digest_of_digests[SHA1_DIGEST_SIZE] = {
+ 0x74, 0xcd, 0x4c, 0xb9, 0xd8, 0xa6, 0xd5, 0x95, 0x22, 0x8b,
+ 0x7e, 0xd6, 0x8b, 0x7e, 0x46, 0x95, 0x31, 0x9b, 0xa2, 0x43,
+ };
+ size_t i;
+
+ digests = malloc(digests_size);
+ TEST_ASSERT_VAL("failed to allocate digests", digests != NULL);
+
+ /* Generate MAX_LEN bytes of data. */
+ for (i = 0; i < MAX_LEN; i++)
+ data[i] = i;
+
+ /* Calculate a SHA-1 for each length 0 through MAX_LEN inclusively. */
+ for (i = 0; i <= MAX_LEN; i++)
+ sha1(data, i, &digests[i * SHA1_DIGEST_SIZE]);
+
+ /* Calculate digest of all digests calculated above. */
+ sha1(digests, digests_size, digest_of_digests);
+
+ free(digests);
+
+ /* Check for the expected result. */
+ TEST_ASSERT_VAL("wrong output from sha1()",
+ memcmp(digest_of_digests, expected_digest_of_digests,
+ SHA1_DIGEST_SIZE) == 0);
+ return 0;
+}
+
static int test__util(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
{
TEST_ASSERT_VAL("empty string", test_strreplace(' ', "", "123", ""));
@@ -25,7 +68,7 @@ static int test__util(struct test_suite *t __maybe_unused, int subtest __maybe_u
TEST_ASSERT_VAL("replace long", test_strreplace('a', "abcabc", "longlong",
"longlongbclonglongbc"));
- return 0;
+ return test_sha1();
}
DEFINE_SUITE("util", util);
diff --git a/tools/perf/tests/workloads/Build b/tools/perf/tests/workloads/Build
index 5af17206f04d..fb1012cc4fc3 100644
--- a/tools/perf/tests/workloads/Build
+++ b/tools/perf/tests/workloads/Build
@@ -7,8 +7,10 @@ perf-test-y += sqrtloop.o
perf-test-y += brstack.o
perf-test-y += datasym.o
perf-test-y += landlock.o
+perf-test-y += traploop.o
CFLAGS_sqrtloop.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
CFLAGS_leafloop.o = -g -O0 -fno-inline -fno-omit-frame-pointer -U_FORTIFY_SOURCE
CFLAGS_brstack.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
CFLAGS_datasym.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
+CFLAGS_traploop.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
diff --git a/tools/perf/tests/workloads/noploop.c b/tools/perf/tests/workloads/noploop.c
index 940ea5910a84..656e472e6188 100644
--- a/tools/perf/tests/workloads/noploop.c
+++ b/tools/perf/tests/workloads/noploop.c
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <pthread.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
@@ -16,6 +17,7 @@ static int noploop(int argc, const char **argv)
{
int sec = 1;
+ pthread_setname_np(pthread_self(), "perf-noploop");
if (argc > 0)
sec = atoi(argv[0]);
diff --git a/tools/perf/tests/workloads/thloop.c b/tools/perf/tests/workloads/thloop.c
index 457b29f91c3e..bd8168f883fb 100644
--- a/tools/perf/tests/workloads/thloop.c
+++ b/tools/perf/tests/workloads/thloop.c
@@ -31,21 +31,52 @@ static void *thfunc(void *arg)
static int thloop(int argc, const char **argv)
{
- int sec = 1;
- pthread_t th;
+ int nt = 2, sec = 1, err = 1;
+ pthread_t *thread_list = NULL;
if (argc > 0)
sec = atoi(argv[0]);
+ if (sec <= 0) {
+ fprintf(stderr, "Error: seconds (%d) must be >= 1\n", sec);
+ return 1;
+ }
+
+ if (argc > 1)
+ nt = atoi(argv[1]);
+
+ if (nt <= 0) {
+ fprintf(stderr, "Error: thread count (%d) must be >= 1\n", nt);
+ return 1;
+ }
+
signal(SIGINT, sighandler);
signal(SIGALRM, sighandler);
- alarm(sec);
- pthread_create(&th, NULL, thfunc, test_loop);
- test_loop();
- pthread_join(th, NULL);
+ thread_list = calloc(nt, sizeof(pthread_t));
+ if (thread_list == NULL) {
+ fprintf(stderr, "Error: malloc failed for %d threads\n", nt);
+ goto out;
+ }
+ for (int i = 1; i < nt; i++) {
+ int ret = pthread_create(&thread_list[i], NULL, thfunc, test_loop);
- return 0;
+ if (ret) {
+ fprintf(stderr, "Error: failed to create thread %d\n", i);
+ done = 1; // Ensure started threads terminate.
+ goto out;
+ }
+ }
+ alarm(sec);
+ test_loop();
+ err = 0;
+out:
+ for (int i = 1; i < nt; i++) {
+ if (thread_list && thread_list[i])
+ pthread_join(thread_list[i], /*retval=*/NULL);
+ }
+ free(thread_list);
+ return err;
}
DEFINE_WORKLOAD(thloop);
diff --git a/tools/perf/tests/workloads/traploop.c b/tools/perf/tests/workloads/traploop.c
new file mode 100644
index 000000000000..68dec399a735
--- /dev/null
+++ b/tools/perf/tests/workloads/traploop.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdlib.h>
+#include "../tests.h"
+
+#define BENCH_RUNS 999999
+
+#ifdef __aarch64__
+static void trap_bench(void)
+{
+ unsigned long val;
+
+ asm("mrs %0, ID_AA64ISAR0_EL1" : "=r" (val)); /* TRAP + ERET */
+}
+#else
+static void trap_bench(void) { }
+#endif
+
+static int traploop(int argc, const char **argv)
+{
+ int num_loops = BENCH_RUNS;
+
+ if (argc > 0)
+ num_loops = atoi(argv[0]);
+
+ for (int i = 0; i < num_loops; i++)
+ trap_bench();
+
+ return 0;
+}
+
+DEFINE_WORKLOAD(traploop);
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index f50ebdc445b8..561590ee8cda 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -31,6 +31,6 @@ endif
$(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
- $(Q)$(call echo-cmd,test)shellcheck -s bash -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+ $(Q)$(call echo-cmd,test)$(SHELLCHECK) "$<" > $@ || (cat $@ && rm $@ && false)
perf-y += $(SHELL_TEST_LOGS)
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index c3322eb3d686..77d7c59f5d8b 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -34,10 +34,7 @@ typedef __kernel_sa_family_t sa_family_t;
struct sockaddr {
sa_family_t sa_family; /* address family, AF_xxx */
- union {
- char sa_data_min[14]; /* Minimum 14 bytes of protocol address */
- DECLARE_FLEX_ARRAY(char, sa_data);
- };
+ char sa_data[14]; /* 14 bytes of protocol address */
};
struct linger {
@@ -168,7 +165,7 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
-static inline size_t msg_data_left(struct msghdr *msg)
+static inline size_t msg_data_left(const struct msghdr *msg)
{
return iov_iter_count(&msg->msg_iter);
}
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
index a15ac2fa4b20..3741ea1b73d8 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
@@ -90,10 +90,29 @@
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
+/* Reserved kernel ranges [-100], [-10000, -40000]. */
#define AT_FDCWD -100 /* Special value for dirfd used to
indicate openat should use the
current working directory. */
+/*
+ * The concept of process and threads in userland and the kernel is a confusing
+ * one - within the kernel every thread is a 'task' with its own individual PID,
+ * however from userland's point of view threads are grouped by a single PID,
+ * which is that of the 'thread group leader', typically the first thread
+ * spawned.
+ *
+ * To cut the Gideon knot, for internal kernel usage, we refer to
+ * PIDFD_SELF_THREAD to refer to the current thread (or task from a kernel
+ * perspective), and PIDFD_SELF_THREAD_GROUP to refer to the current thread
+ * group leader...
+ */
+#define PIDFD_SELF_THREAD -10000 /* Current thread. */
+#define PIDFD_SELF_THREAD_GROUP -10001 /* Current thread group leader. */
+
+#define FD_PIDFS_ROOT -10002 /* Root of the pidfs filesystem */
+#define FD_NSFS_ROOT -10003 /* Root of the nsfs filesystem */
+#define FD_INVALID -10009 /* Invalid file descriptor: -10000 - EBADF = -10009 */
/* Generic flags for the *at(2) family of syscalls. */
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fs.h b/tools/perf/trace/beauty/include/uapi/linux/fs.h
index e762e1af650c..beb4c2d1e41c 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fs.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fs.h
@@ -60,6 +60,17 @@
#define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */
#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */
+/*
+ * The root inode of procfs is guaranteed to always have the same inode number.
+ * For programs that make heavy use of procfs, verifying that the root is a
+ * real procfs root and using openat2(RESOLVE_{NO_{XDEV,MAGICLINKS},BENEATH})
+ * will allow you to make sure you are never tricked into operating on the
+ * wrong procfs file.
+ */
+enum procfs_ino {
+ PROCFS_ROOT_INO = 1,
+};
+
struct file_clone_range {
__s64 src_fd;
__u64 src_offset;
@@ -91,6 +102,63 @@ struct fs_sysfs_path {
__u8 name[128];
};
+/* Protection info capability flags */
+#define LBMD_PI_CAP_INTEGRITY (1 << 0)
+#define LBMD_PI_CAP_REFTAG (1 << 1)
+
+/* Checksum types for Protection Information */
+#define LBMD_PI_CSUM_NONE 0
+#define LBMD_PI_CSUM_IP 1
+#define LBMD_PI_CSUM_CRC16_T10DIF 2
+#define LBMD_PI_CSUM_CRC64_NVME 4
+
+/* sizeof first published struct */
+#define LBMD_SIZE_VER0 16
+
+/*
+ * Logical block metadata capability descriptor
+ * If the device does not support metadata, all the fields will be zero.
+ * Applications must check lbmd_flags to determine whether metadata is
+ * supported or not.
+ */
+struct logical_block_metadata_cap {
+ /* Bitmask of logical block metadata capability flags */
+ __u32 lbmd_flags;
+ /*
+ * The amount of data described by each unit of logical block
+ * metadata
+ */
+ __u16 lbmd_interval;
+ /*
+ * Size in bytes of the logical block metadata associated with each
+ * interval
+ */
+ __u8 lbmd_size;
+ /*
+ * Size in bytes of the opaque block tag associated with each
+ * interval
+ */
+ __u8 lbmd_opaque_size;
+ /*
+ * Offset in bytes of the opaque block tag within the logical block
+ * metadata
+ */
+ __u8 lbmd_opaque_offset;
+ /* Size in bytes of the T10 PI tuple associated with each interval */
+ __u8 lbmd_pi_size;
+ /* Offset in bytes of T10 PI tuple within the logical block metadata */
+ __u8 lbmd_pi_offset;
+ /* T10 PI guard tag type */
+ __u8 lbmd_guard_tag_type;
+ /* Size in bytes of the T10 PI application tag */
+ __u8 lbmd_app_tag_size;
+ /* Size in bytes of the T10 PI reference tag */
+ __u8 lbmd_ref_tag_size;
+ /* Size in bytes of the T10 PI storage tag */
+ __u8 lbmd_storage_tag_size;
+ __u8 pad;
+};
+
/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */
#define FILE_DEDUPE_RANGE_SAME 0
#define FILE_DEDUPE_RANGE_DIFFERS 1
@@ -149,6 +217,24 @@ struct fsxattr {
};
/*
+ * Variable size structure for file_[sg]et_attr().
+ *
+ * Note. This is alternative to the structure 'struct file_kattr'/'struct fsxattr'.
+ * As this structure is passed to/from userspace with its size, this can
+ * be versioned based on the size.
+ */
+struct file_attr {
+ __u64 fa_xflags; /* xflags field value (get/set) */
+ __u32 fa_extsize; /* extsize field value (get/set)*/
+ __u32 fa_nextents; /* nextents field value (get) */
+ __u32 fa_projid; /* project identifier (get/set) */
+ __u32 fa_cowextsize; /* CoW extsize field value (get/set) */
+};
+
+#define FILE_ATTR_SIZE_VER0 24
+#define FILE_ATTR_SIZE_LATEST FILE_ATTR_SIZE_VER0
+
+/*
* Flags for the fsx_xflags field
*/
#define FS_XFLAG_REALTIME 0x00000001 /* data in realtime volume */
@@ -247,6 +333,8 @@ struct fsxattr {
* also /sys/kernel/debug/ for filesystems with debugfs exports
*/
#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path)
+/* Get logical block metadata capability details */
+#define FS_IOC_GETLBMD_CAP _IOWR(0x15, 2, struct logical_block_metadata_cap)
/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
@@ -342,10 +430,13 @@ typedef int __bitwise __kernel_rwf_t;
/* buffered IO that drops the cache after reading or writing data */
#define RWF_DONTCACHE ((__force __kernel_rwf_t)0x00000080)
+/* prevent pipe and socket writes from raising SIGPIPE */
+#define RWF_NOSIGNAL ((__force __kernel_rwf_t)0x00000100)
+
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC |\
- RWF_DONTCACHE)
+ RWF_DONTCACHE | RWF_NOSIGNAL)
#define PROCFS_IOCTL_MAGIC 'f'
@@ -361,6 +452,7 @@ typedef int __bitwise __kernel_rwf_t;
#define PAGE_IS_PFNZERO (1 << 5)
#define PAGE_IS_HUGE (1 << 6)
#define PAGE_IS_SOFT_DIRTY (1 << 7)
+#define PAGE_IS_GUARD (1 << 8)
/*
* struct page_region - Page region with flags
diff --git a/tools/perf/trace/beauty/include/uapi/linux/prctl.h b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
index 15c18ef4eb11..51c4e8c82b1e 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/prctl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
@@ -177,7 +177,17 @@ struct prctl_mm_map {
#define PR_GET_TID_ADDRESS 40
+/*
+ * Flags for PR_SET_THP_DISABLE are only applicable when disabling. Bit 0
+ * is reserved, so PR_GET_THP_DISABLE can return "1 | flags", to effectively
+ * return "1" when no flags were specified for PR_SET_THP_DISABLE.
+ */
#define PR_SET_THP_DISABLE 41
+/*
+ * Don't disable THPs when explicitly advised (e.g., MADV_HUGEPAGE /
+ * VM_HUGEPAGE, MADV_COLLAPSE).
+ */
+# define PR_THP_DISABLE_EXCEPT_ADVISED (1 << 1)
#define PR_GET_THP_DISABLE 42
/*
@@ -244,6 +254,8 @@ struct prctl_mm_map {
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
/* Unused; kept only for source compatibility */
# define PR_MTE_TCF_SHIFT 1
+/* MTE tag check store only */
+# define PR_MTE_STORE_ONLY (1UL << 19)
/* RISC-V pointer masking tag length */
# define PR_PMLEN_SHIFT 24
# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT)
@@ -255,7 +267,12 @@ struct prctl_mm_map {
/* Dispatch syscalls to a userspace handler */
#define PR_SET_SYSCALL_USER_DISPATCH 59
# define PR_SYS_DISPATCH_OFF 0
-# define PR_SYS_DISPATCH_ON 1
+/* Enable dispatch except for the specified range */
+# define PR_SYS_DISPATCH_EXCLUSIVE_ON 1
+/* Enable dispatch for the specified range */
+# define PR_SYS_DISPATCH_INCLUSIVE_ON 2
+/* Legacy name for backwards compatibility */
+# define PR_SYS_DISPATCH_ON PR_SYS_DISPATCH_EXCLUSIVE_ON
/* The control values for the user space selector when dispatch is enabled */
# define SYSCALL_DISPATCH_FILTER_ALLOW 0
# define SYSCALL_DISPATCH_FILTER_BLOCK 1
@@ -364,4 +381,9 @@ struct prctl_mm_map {
# define PR_TIMER_CREATE_RESTORE_IDS_ON 1
# define PR_TIMER_CREATE_RESTORE_IDS_GET 2
+/* FUTEX hash management */
+#define PR_FUTEX_HASH 78
+# define PR_FUTEX_HASH_SET_SLOTS 1
+# define PR_FUTEX_HASH_GET_SLOTS 2
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/perf/trace/beauty/include/uapi/linux/stat.h b/tools/perf/trace/beauty/include/uapi/linux/stat.h
index f78ee3670dd5..1686861aae20 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/stat.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/stat.h
@@ -182,8 +182,12 @@ struct statx {
/* File offset alignment for direct I/O reads */
__u32 stx_dio_read_offset_align;
- /* 0xb8 */
- __u64 __spare3[9]; /* Spare space for future expansion */
+ /* Optimised max atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_max_opt;
+ __u32 __spare2[1];
+
+ /* 0xc0 */
+ __u64 __spare3[8]; /* Spare space for future expansion */
/* 0x100 */
};
diff --git a/tools/perf/trace/beauty/include/uapi/linux/vhost.h b/tools/perf/trace/beauty/include/uapi/linux/vhost.h
index b95dd84eef2d..c57674a6aa0d 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/vhost.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/vhost.h
@@ -28,10 +28,10 @@
/* Set current process as the (exclusive) owner of this file descriptor. This
* must be called before any other vhost command. Further calls to
- * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */
+ * VHOST_SET_OWNER fail until VHOST_RESET_OWNER is called. */
#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
/* Give up ownership, and reset the device to default values.
- * Allows subsequent call to VHOST_OWNER_SET to succeed. */
+ * Allows subsequent call to VHOST_SET_OWNER to succeed. */
#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
/* Set up/modify memory layout */
@@ -235,4 +235,39 @@
*/
#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x82, \
struct vhost_vring_state)
+
+/* Extended features manipulation */
+#define VHOST_GET_FEATURES_ARRAY _IOR(VHOST_VIRTIO, 0x83, \
+ struct vhost_features_array)
+#define VHOST_SET_FEATURES_ARRAY _IOW(VHOST_VIRTIO, 0x83, \
+ struct vhost_features_array)
+
+/* fork_owner values for vhost */
+#define VHOST_FORK_OWNER_KTHREAD 0
+#define VHOST_FORK_OWNER_TASK 1
+
+/**
+ * VHOST_SET_FORK_FROM_OWNER - Set the fork_owner flag for the vhost device,
+ * This ioctl must called before VHOST_SET_OWNER.
+ * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y
+ *
+ * @param fork_owner: An 8-bit value that determines the vhost thread mode
+ *
+ * When fork_owner is set to VHOST_FORK_OWNER_TASK(default value):
+ * - Vhost will create vhost worker as tasks forked from the owner,
+ * inheriting all of the owner's attributes.
+ *
+ * When fork_owner is set to VHOST_FORK_OWNER_KTHREAD:
+ * - Vhost will create vhost workers as kernel threads.
+ */
+#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x84, __u8)
+
+/**
+ * VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device.
+ * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y
+ *
+ * @return: An 8-bit value indicating the current thread mode.
+ */
+#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x85, __u8)
+
#endif
diff --git a/tools/perf/ui/Build b/tools/perf/ui/Build
index d2ecd9290600..6005f813c9e3 100644
--- a/tools/perf/ui/Build
+++ b/tools/perf/ui/Build
@@ -8,5 +8,6 @@ perf-ui-y += stdio/hist.o
CFLAGS_setup.o += -DLIBDIR="BUILD_STR($(LIBDIR))"
perf-ui-$(CONFIG_SLANG) += browser.o
+perf-ui-$(CONFIG_SLANG) += keysyms.o
perf-ui-$(CONFIG_SLANG) += browsers/
perf-ui-$(CONFIG_SLANG) += tui/
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 19503e838738..dc88427b4ae5 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -233,6 +233,14 @@ int ui_browser__warning(struct ui_browser *browser, int timeout,
return key;
}
+int ui_browser__warn_unhandled_hotkey(struct ui_browser *browser, int key, int timeout, const char *help)
+{
+ char kname[32];
+
+ key_name(key, kname, sizeof(kname));
+ return ui_browser__warning(browser, timeout, "\n'%s' key not associated%s!\n", kname, help ?: "");
+}
+
int ui_browser__help_window(struct ui_browser *browser, const char *text)
{
int key;
@@ -451,6 +459,8 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
goto out;
if (browser->horiz_scroll != 0)
--browser->horiz_scroll;
+ else
+ goto out;
break;
case K_PGDN:
case ' ':
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 6e98d5f8f71c..9d4404f9b87f 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -66,12 +66,13 @@ void __ui_browser__vline(struct ui_browser *browser, unsigned int column,
int ui_browser__warning(struct ui_browser *browser, int timeout,
const char *format, ...);
+int ui_browser__warn_unhandled_hotkey(struct ui_browser *browser, int key, int timeout, const char *help);
int ui_browser__help_window(struct ui_browser *browser, const char *text);
bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
int ui_browser__input_window(const char *title, const char *text, char *input,
const char *exit_msg, int delay_sec);
-struct perf_env;
-int tui__header_window(struct perf_env *env);
+struct perf_session;
+int tui__header_window(struct perf_session *session);
void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
diff --git a/tools/perf/ui/browsers/annotate-data.c b/tools/perf/ui/browsers/annotate-data.c
index cd562a8822b7..aa8c89fe2e82 100644
--- a/tools/perf/ui/browsers/annotate-data.c
+++ b/tools/perf/ui/browsers/annotate-data.c
@@ -558,6 +558,7 @@ static int annotated_data_browser__run(struct annotated_data_browser *browser,
case CTRL('c'):
goto out;
default:
+ ui_browser__warn_unhandled_hotkey(&browser->b, key, delay_secs, ", use 'h'/F1 to see actions");
continue;
}
}
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 135d6ce88fb3..36aca8d6d003 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -4,7 +4,9 @@
#include "../ui.h"
#include "../../util/annotate.h"
#include "../../util/debug.h"
+#include "../../util/debuginfo.h"
#include "../../util/dso.h"
+#include "../../util/hashmap.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
#include "../../util/map.h"
@@ -12,7 +14,9 @@
#include "../../util/symbol.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
+#include "../../util/thread.h"
#include <inttypes.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
@@ -27,10 +31,31 @@ struct annotate_browser {
struct rb_node *curr_hot;
struct annotation_line *selection;
struct arch *arch;
+ /*
+ * perf top can delete hist_entry anytime. Callers should make sure
+ * its lifetime.
+ */
+ struct hist_entry *he;
+ struct debuginfo *dbg;
+ struct evsel *evsel;
+ struct hashmap *type_hash;
bool searching_backwards;
char search_bf[128];
};
+/* A copy of target hist_entry for perf top. */
+static struct hist_entry annotate_he;
+
+static size_t type_hash(long key, void *ctx __maybe_unused)
+{
+ return key;
+}
+
+static bool type_equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return key1 == key2;
+}
+
static inline struct annotation *browser__annotation(struct ui_browser *browser)
{
struct map_symbol *ms = browser->priv;
@@ -107,12 +132,21 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
.printf = annotate_browser__printf,
.write_graph = annotate_browser__write_graph,
};
+ struct annotation_print_data apd = {
+ .he = ab->he,
+ .arch = ab->arch,
+ .evsel = ab->evsel,
+ .dbg = ab->dbg,
+ };
/* The scroll bar isn't being used */
if (!browser->navkeypressed)
ops.width += 1;
- annotation_line__write(al, notes, &ops);
+ if (!IS_ERR_OR_NULL(ab->type_hash))
+ apd.type_hash = ab->type_hash;
+
+ annotation_line__write(al, notes, &ops, &apd);
if (ops.current_entry)
ab->selection = al;
@@ -345,6 +379,23 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
browser->curr_hot = rb_last(&browser->entries);
}
+static struct annotation_line *annotate_browser__find_new_asm_line(
+ struct annotate_browser *browser,
+ int idx_asm)
+{
+ struct annotation_line *al;
+ struct list_head *head = browser->b.entries;
+
+ /* find an annotation line in the new list with the same idx_asm */
+ list_for_each_entry(al, head, node) {
+ if (al->idx_asm == idx_asm)
+ return al;
+ }
+
+ /* There are no asm lines */
+ return NULL;
+}
+
static struct annotation_line *annotate_browser__find_next_asm_line(
struct annotate_browser *browser,
struct annotation_line *al)
@@ -368,7 +419,31 @@ static struct annotation_line *annotate_browser__find_next_asm_line(
return NULL;
}
-static bool annotate_browser__toggle_source(struct annotate_browser *browser)
+static bool annotation__has_source(struct annotation *notes)
+{
+ struct annotation_line *al;
+ bool found_asm = false;
+
+ /* Let's skip the first non-asm lines which present regardless of source. */
+ list_for_each_entry(al, &notes->src->source, node) {
+ if (al->offset >= 0) {
+ found_asm = true;
+ break;
+ }
+ }
+
+ if (found_asm) {
+ /* After assembly lines, any line without offset means source. */
+ list_for_each_entry_continue(al, &notes->src->source, node) {
+ if (al->offset == -1)
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool annotate_browser__toggle_source(struct annotate_browser *browser,
+ struct evsel *evsel)
{
struct annotation *notes = browser__annotation(&browser->b);
struct annotation_line *al;
@@ -377,6 +452,39 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
browser->b.seek(&browser->b, offset, SEEK_CUR);
al = list_entry(browser->b.top, struct annotation_line, node);
+ if (!annotate_opts.annotate_src)
+ annotate_opts.annotate_src = true;
+
+ /*
+ * It's about to get source code annotation for the first time.
+ * Drop the existing annotation_lines and get the new one with source.
+ * And then move to the original line at the same asm index.
+ */
+ if (annotate_opts.hide_src_code && !notes->src->tried_source) {
+ struct map_symbol *ms = browser->b.priv;
+ int orig_idx_asm = al->idx_asm;
+
+ /* annotate again with source code info */
+ annotate_opts.hide_src_code = false;
+ annotated_source__purge(notes->src);
+ symbol__annotate2(ms, evsel, &browser->arch);
+ annotate_opts.hide_src_code = true;
+
+ /* should be after annotated_source__purge() */
+ notes->src->tried_source = true;
+
+ if (!annotation__has_source(notes))
+ ui__warning("Annotation has no source code.");
+
+ browser->b.entries = &notes->src->source;
+ al = annotate_browser__find_new_asm_line(browser, orig_idx_asm);
+ if (unlikely(al == NULL)) {
+ al = list_first_entry(&notes->src->source,
+ struct annotation_line, node);
+ }
+ browser->b.seek(&browser->b, al->idx_asm, SEEK_SET);
+ }
+
if (annotate_opts.hide_src_code) {
if (al->idx_asm < offset)
offset = al->idx;
@@ -406,6 +514,9 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
browser->b.index = al->idx_asm;
}
+ if (annotate_opts.hide_src_code_on_title)
+ annotate_opts.hide_src_code_on_title = false;
+
return true;
}
@@ -438,9 +549,24 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser)
static int sym_title(struct symbol *sym, struct map *map, char *title,
size_t sz, int percent_type)
{
- return snprintf(title, sz, "%s %s [Percent: %s]", sym->name,
+ return snprintf(title, sz, "%s %s [Percent: %s] %s", sym->name,
dso__long_name(map__dso(map)),
- percent_type_str(percent_type));
+ percent_type_str(percent_type),
+ annotate_opts.code_with_type ? "[Type]" : "");
+}
+
+static void annotate_browser__show_function_title(struct annotate_browser *browser)
+{
+ struct ui_browser *b = &browser->b;
+ struct map_symbol *ms = b->priv;
+ struct symbol *sym = ms->sym;
+ char title[SYM_TITLE_MAX_SIZE];
+
+ sym_title(sym, ms->map, title, sizeof(title), annotate_opts.percent_type);
+
+ ui_browser__gotorc_title(b, 0, 0);
+ ui_browser__set_color(b, HE_COLORSET_ROOT);
+ ui_browser__write_nstring(b, title, b->width + 1);
}
/*
@@ -459,7 +585,6 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
struct map_symbol *ms = browser->b.priv, target_ms;
struct disasm_line *dl = disasm_line(browser->selection);
struct annotation *notes;
- char title[SYM_TITLE_MAX_SIZE];
if (!dl->ops.target.sym) {
ui_helpline__puts("The called function was not found.");
@@ -480,9 +605,14 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
target_ms.map = ms->map;
target_ms.sym = dl->ops.target.sym;
annotation__unlock(notes);
- symbol__tui_annotate(&target_ms, evsel, hbt);
- sym_title(ms->sym, ms->map, title, sizeof(title), annotate_opts.percent_type);
- ui_browser__show_title(&browser->b, title);
+ __hist_entry__tui_annotate(browser->he, &target_ms, evsel, hbt, NO_ADDR);
+
+ /*
+ * The annotate_browser above changed the title with the target function
+ * and now it's back to the original function. Refresh the header line
+ * for the original function again.
+ */
+ annotate_browser__show_function_title(browser);
return true;
}
@@ -654,20 +784,12 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
return __annotate_browser__search_reverse(browser);
}
-static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help)
+static int annotate_browser__show(struct annotate_browser *browser, char *title, const char *help)
{
- struct map_symbol *ms = browser->priv;
- struct symbol *sym = ms->sym;
- char symbol_dso[SYM_TITLE_MAX_SIZE];
-
- if (ui_browser__show(browser, title, help) < 0)
+ if (ui_browser__show(&browser->b, title, help) < 0)
return -1;
- sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), annotate_opts.percent_type);
-
- ui_browser__gotorc_title(browser, 0, 0);
- ui_browser__set_color(browser, HE_COLORSET_ROOT);
- ui_browser__write_nstring(browser, symbol_dso, browser->width + 1);
+ annotate_browser__show_function_title(browser);
return 0;
}
@@ -704,6 +826,54 @@ switch_percent_type(struct annotation_options *opts, bool base)
}
}
+static int annotate__scnprintf_title(struct hists *hists, char *bf, size_t size)
+{
+ int printed = hists__scnprintf_title(hists, bf, size);
+
+ if (!annotate_opts.hide_src_code_on_title) {
+ printed += scnprintf(bf + printed, size - printed, " [source: %s]",
+ annotate_opts.hide_src_code ? "OFF" : "On");
+ }
+
+ return printed;
+}
+
+static void annotate_browser__debuginfo_warning(struct annotate_browser *browser)
+{
+ struct map_symbol *ms = browser->b.priv;
+ struct dso *dso = map__dso(ms->map);
+
+ if (browser->dbg == NULL && annotate_opts.code_with_type &&
+ !dso__debuginfo_warned(dso)) {
+ ui__warning("DWARF debuginfo not found.\n\n"
+ "Data-type in this DSO will not be displayed.\n"
+ "Please make sure to have debug information.");
+ dso__set_debuginfo_warned(dso);
+ }
+}
+
+static s64 annotate_browser__curr_hot_offset(struct annotate_browser *browser)
+{
+ struct annotation_line *al = NULL;
+
+ if (browser->curr_hot)
+ al = rb_entry(browser->curr_hot, struct annotation_line, rb_node);
+
+ return al ? al->offset : 0;
+}
+
+static void annotate_browser__symbol_annotate_error(struct annotate_browser *browser, int err)
+{
+ struct map_symbol *ms = browser->b.priv;
+ struct symbol *sym = ms->sym;
+ struct dso *dso = map__dso(ms->map);
+ char msg[BUFSIZ];
+
+ dso__set_annotate_warned(dso);
+ symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
+ ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
+}
+
static int annotate_browser__run(struct annotate_browser *browser,
struct evsel *evsel,
struct hist_browser_timer *hbt)
@@ -719,12 +889,17 @@ static int annotate_browser__run(struct annotate_browser *browser,
char title[256];
int key;
- hists__scnprintf_title(hists, title, sizeof(title));
- if (annotate_browser__show(&browser->b, title, help) < 0)
+ annotate__scnprintf_title(hists, title, sizeof(title));
+ if (annotate_browser__show(browser, title, help) < 0)
return -1;
annotate_browser__calc_percent(browser, evsel);
+ if (browser->selection != NULL) {
+ browser->curr_hot = &browser->selection->rb_node;
+ browser->b.use_navkeypressed = false;
+ }
+
if (browser->curr_hot) {
annotate_browser__set_rb_top(browser, browser->curr_hot);
browser->b.navkeypressed = false;
@@ -734,6 +909,8 @@ static int annotate_browser__run(struct annotate_browser *browser,
annotation_br_cntr_abbr_list(&br_cntr_text, evsel, false);
+ annotate_browser__debuginfo_warning(browser);
+
while (1) {
key = ui_browser__run(&browser->b, delay_secs);
@@ -755,8 +932,8 @@ static int annotate_browser__run(struct annotate_browser *browser,
if (delay_secs != 0) {
symbol__annotate_decay_histogram(sym, evsel);
- hists__scnprintf_title(hists, title, sizeof(title));
- annotate_browser__show(&browser->b, title, help);
+ annotate__scnprintf_title(hists, title, sizeof(title));
+ annotate_browser__show(browser, title, help);
}
continue;
case K_TAB:
@@ -802,11 +979,12 @@ static int annotate_browser__run(struct annotate_browser *browser,
"b Toggle percent base [period/hits]\n"
"B Branch counter abbr list (Optional)\n"
"? Search string backwards\n"
- "f Toggle showing offsets to full address\n");
+ "f Toggle showing offsets to full address\n"
+ "T Toggle data type display\n");
continue;
case 'r':
script_browse(NULL, NULL);
- annotate_browser__show(&browser->b, title, help);
+ annotate_browser__show(browser, title, help);
continue;
case 'k':
annotate_opts.show_linenr = !annotate_opts.show_linenr;
@@ -817,10 +995,24 @@ static int annotate_browser__run(struct annotate_browser *browser,
case 'H':
nd = browser->curr_hot;
break;
- case 's':
- if (annotate_browser__toggle_source(browser))
+ case 's': {
+ struct annotation_line *al = NULL;
+ s64 offset = annotate_browser__curr_hot_offset(browser);
+
+ if (annotate_browser__toggle_source(browser, evsel))
ui_helpline__puts(help);
+
+ /* Update the annotation browser's rb_tree, and reset the nd */
+ annotate_browser__calc_percent(browser, evsel);
+ /* Try to find the same asm line as before */
+ al = annotated_source__get_line(notes->src, offset);
+ browser->curr_hot = al ? &al->rb_node : NULL;
+ nd = browser->curr_hot;
+
+ annotate__scnprintf_title(hists, title, sizeof(title));
+ annotate_browser__show(browser, title, help);
continue;
+ }
case 'o':
annotate_opts.use_offset = !annotate_opts.use_offset;
annotation__update_column_widths(notes);
@@ -884,7 +1076,7 @@ show_sup_ins:
continue;
}
case 'P':
- map_symbol__annotation_dump(ms, evsel);
+ map_symbol__annotation_dump(ms, evsel, browser->he);
continue;
case 't':
if (symbol_conf.show_total_period) {
@@ -906,8 +1098,8 @@ show_sup_ins:
case 'p':
case 'b':
switch_percent_type(&annotate_opts, key == 'b');
- hists__scnprintf_title(hists, title, sizeof(title));
- annotate_browser__show(&browser->b, title, help);
+ annotate__scnprintf_title(hists, title, sizeof(title));
+ annotate_browser__show(browser, title, help);
continue;
case 'B':
if (br_cntr_text)
@@ -920,6 +1112,17 @@ show_sup_ins:
case 'f':
annotation__toggle_full_addr(notes, ms);
continue;
+ case 'T':
+ annotate_opts.code_with_type ^= 1;
+ if (browser->dbg == NULL)
+ browser->dbg = dso__debuginfo(map__dso(ms->map));
+ if (browser->type_hash == NULL) {
+ browser->type_hash = hashmap__new(type_hash, type_equal,
+ /*ctx=*/NULL);
+ }
+ annotate_browser__show(browser, title, help);
+ annotate_browser__debuginfo_warning(browser);
+ continue;
case K_LEFT:
case '<':
case '>':
@@ -928,6 +1131,7 @@ show_sup_ins:
case CTRL('c'):
goto out;
default:
+ ui_browser__warn_unhandled_hotkey(&browser->b, key, delay_secs, ", use 'h'/F1 to see actions");
continue;
}
@@ -940,25 +1144,20 @@ out:
return key;
}
-int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct hist_browser_timer *hbt)
-{
- return symbol__tui_annotate(ms, evsel, hbt);
-}
-
int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
- struct hist_browser_timer *hbt)
+ struct hist_browser_timer *hbt, u64 al_addr)
{
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
SLtty_set_suspend_state(true);
- return map_symbol__tui_annotate(&he->ms, evsel, hbt);
+ return __hist_entry__tui_annotate(he, &he->ms, evsel, hbt, al_addr);
}
-int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct hist_browser_timer *hbt)
+int __hist_entry__tui_annotate(struct hist_entry *he, struct map_symbol *ms,
+ struct evsel *evsel,
+ struct hist_browser_timer *hbt, u64 al_addr)
{
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
@@ -972,6 +1171,8 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
.priv = ms,
.use_navkeypressed = true,
},
+ .he = he,
+ .evsel = evsel,
};
struct dso *dso;
int ret = -1, err;
@@ -987,16 +1188,40 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
if (not_annotated || !sym->annotate2) {
err = symbol__annotate2(ms, evsel, &browser.arch);
if (err) {
- char msg[BUFSIZ];
- dso__set_annotate_warned(dso);
- symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
- ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
+ annotate_browser__symbol_annotate_error(&browser, err);
return -1;
}
+
+ if (!annotate_opts.hide_src_code) {
+ notes->src->tried_source = true;
+ if (!annotation__has_source(notes))
+ ui__warning("Annotation has no source code.");
+ }
+ } else {
+ err = evsel__get_arch(evsel, &browser.arch);
+ if (err) {
+ annotate_browser__symbol_annotate_error(&browser, err);
+ return -1;
+ }
+ }
+
+ /* Copy necessary information when it's called from perf top */
+ if (hbt != NULL && he != &annotate_he) {
+ annotate_he.hists = he->hists;
+ annotate_he.thread = thread__get(he->thread);
+ annotate_he.cpumode = he->cpumode;
+ map_symbol__copy(&annotate_he.ms, ms);
+
+ browser.he = &annotate_he;
}
ui_helpline__push("Press ESC to exit");
+ if (annotate_opts.code_with_type) {
+ browser.dbg = dso__debuginfo(dso);
+ browser.type_hash = hashmap__new(type_hash, type_equal, /*ctx=*/NULL);
+ }
+
browser.b.width = notes->src->widths.max_line_len;
browser.b.nr_entries = notes->src->nr_entries;
browser.b.entries = &notes->src->source;
@@ -1005,10 +1230,40 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
if (annotate_opts.hide_src_code)
ui_browser__init_asm_mode(&browser.b);
+ /*
+ * If al_addr is set, it means that there should be a line
+ * intentionally selected, not based on the percentages
+ * which caculated by the event sampling. In this case, we
+ * convey this information into the browser selection, where
+ * the selection in other cases should be empty.
+ */
+ if (al_addr != NO_ADDR) {
+ struct annotation_line *al = annotated_source__get_line(notes->src,
+ al_addr - sym->start);
+
+ browser.selection = al;
+ }
+
ret = annotate_browser__run(&browser, evsel, hbt);
- if(not_annotated)
+ debuginfo__delete(browser.dbg);
+
+ if (!IS_ERR_OR_NULL(browser.type_hash)) {
+ struct hashmap_entry *cur;
+ size_t bkt;
+
+ hashmap__for_each_entry(browser.type_hash, cur, bkt)
+ zfree(&cur->pvalue);
+ hashmap__free(browser.type_hash);
+ }
+
+ if (not_annotated && !notes->src->tried_source)
annotated_source__purge(notes->src);
+ if (hbt != NULL && he != &annotate_he) {
+ thread__zput(annotate_he.thread);
+ map_symbol__exit(&annotate_he.ms);
+ }
+
return ret;
}
diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c
index 57e6e4332f74..5b5ca32e3eef 100644
--- a/tools/perf/ui/browsers/header.c
+++ b/tools/perf/ui/browsers/header.c
@@ -69,6 +69,7 @@ static int list_menu__run(struct ui_browser *menu)
key = -1;
break;
default:
+ ui_browser__warn_unhandled_hotkey(menu, key, 0, ", use 'h'/'?'/F1 to see actions");
continue;
}
@@ -92,16 +93,14 @@ static int ui__list_menu(int argc, char * const argv[])
return list_menu__run(&menu);
}
-int tui__header_window(struct perf_env *env)
+int tui__header_window(struct perf_session *session)
{
int i, argc = 0;
char **argv;
- struct perf_session *session;
char *ptr, *pos;
size_t size;
FILE *fp = open_memstream(&ptr, &size);
- session = container_of(env, struct perf_session, header.env);
perf_header__fprintf_info(session, fp, true);
fclose(fp);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 35c10509b797..08fecbe28a52 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1266,6 +1266,16 @@ hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt, \
_fmttype); \
}
+#define __HPP_COLOR_MEM_STAT_FN(_name, _type) \
+static int \
+hist_browser__hpp_color_mem_stat_##_name(struct perf_hpp_fmt *fmt, \
+ struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ return hpp__fmt_mem_stat(fmt, hpp, he, PERF_MEM_STAT_##_type, \
+ " %5.1f%%", __hpp__slsmg_color_printf);\
+}
+
__HPP_COLOR_PERCENT_FN(overhead, period, PERF_HPP_FMT_TYPE__PERCENT)
__HPP_COLOR_PERCENT_FN(latency, latency, PERF_HPP_FMT_TYPE__LATENCY)
__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, PERF_HPP_FMT_TYPE__PERCENT)
@@ -1274,9 +1284,15 @@ __HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, PERF_HPP_FMT_TYPE__
__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, PERF_HPP_FMT_TYPE__PERCENT)
__HPP_COLOR_ACC_PERCENT_FN(overhead_acc, period, PERF_HPP_FMT_TYPE__PERCENT)
__HPP_COLOR_ACC_PERCENT_FN(latency_acc, latency, PERF_HPP_FMT_TYPE__LATENCY)
+__HPP_COLOR_MEM_STAT_FN(op, OP)
+__HPP_COLOR_MEM_STAT_FN(cache, CACHE)
+__HPP_COLOR_MEM_STAT_FN(memory, MEMORY)
+__HPP_COLOR_MEM_STAT_FN(snoop, SNOOP)
+__HPP_COLOR_MEM_STAT_FN(dtlb, DTLB)
#undef __HPP_COLOR_PERCENT_FN
#undef __HPP_COLOR_ACC_PERCENT_FN
+#undef __HPP_COLOR_MEM_STAT_FN
void hist_browser__init_hpp(void)
{
@@ -1296,6 +1312,16 @@ void hist_browser__init_hpp(void)
hist_browser__hpp_color_overhead_acc;
perf_hpp__format[PERF_HPP__LATENCY_ACC].color =
hist_browser__hpp_color_latency_acc;
+ perf_hpp__format[PERF_HPP__MEM_STAT_OP].color =
+ hist_browser__hpp_color_mem_stat_op;
+ perf_hpp__format[PERF_HPP__MEM_STAT_CACHE].color =
+ hist_browser__hpp_color_mem_stat_cache;
+ perf_hpp__format[PERF_HPP__MEM_STAT_MEMORY].color =
+ hist_browser__hpp_color_mem_stat_memory;
+ perf_hpp__format[PERF_HPP__MEM_STAT_SNOOP].color =
+ hist_browser__hpp_color_mem_stat_snoop;
+ perf_hpp__format[PERF_HPP__MEM_STAT_DTLB].color =
+ hist_browser__hpp_color_mem_stat_dtlb;
res_sample_init();
}
@@ -1686,7 +1712,8 @@ hists_browser__scnprintf_headers(struct hist_browser *browser, char *buf,
return ret;
}
-static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *browser, char *buf, size_t size)
+static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *browser,
+ char *buf, size_t size, int line)
{
struct hists *hists = browser->hists;
struct perf_hpp dummy_hpp = {
@@ -1712,7 +1739,7 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
if (column++ < browser->b.horiz_scroll)
continue;
- ret = fmt->header(fmt, &dummy_hpp, hists, 0, NULL);
+ ret = fmt->header(fmt, &dummy_hpp, hists, line, NULL);
if (advance_hpp_check(&dummy_hpp, ret))
break;
@@ -1723,6 +1750,9 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
first_node = false;
}
+ if (line < hists->hpp_list->nr_header_lines - 1)
+ return ret;
+
if (!first_node) {
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
indent * HIERARCHY_INDENT, "");
@@ -1753,7 +1783,7 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
}
first_col = false;
- ret = fmt->header(fmt, &dummy_hpp, hists, 0, NULL);
+ ret = fmt->header(fmt, &dummy_hpp, hists, line, NULL);
dummy_hpp.buf[ret] = '\0';
start = strim(dummy_hpp.buf);
@@ -1772,14 +1802,18 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
static void hists_browser__hierarchy_headers(struct hist_browser *browser)
{
+ struct perf_hpp_list *hpp_list = browser->hists->hpp_list;
char headers[1024];
+ int line;
- hists_browser__scnprintf_hierarchy_headers(browser, headers,
- sizeof(headers));
+ for (line = 0; line < hpp_list->nr_header_lines; line++) {
+ hists_browser__scnprintf_hierarchy_headers(browser, headers,
+ sizeof(headers), line);
- ui_browser__gotorc_title(&browser->b, 0, 0);
- ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
- ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
+ ui_browser__gotorc_title(&browser->b, line, 0);
+ ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
+ ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
+ }
}
static void hists_browser__headers(struct hist_browser *browser)
@@ -2422,7 +2456,6 @@ close_file_and_continue:
struct popup_action {
unsigned long time;
struct thread *thread;
- struct evsel *evsel;
int (*fn)(struct hist_browser *browser, struct popup_action *act);
struct map_symbol ms;
int socket;
@@ -2451,8 +2484,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
else
evsel = hists_to_evsel(browser->hists);
- err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt);
he = hist_browser__selected_entry(browser);
+ err = __hist_entry__tui_annotate(he, &act->ms, evsel, browser->hbt, NO_ADDR);
/*
* offer option to annotate the other branch source or target
* (if they exists) when returning from annotate
@@ -2489,8 +2522,7 @@ static struct symbol *symbol__new_unresolved(u64 addr, struct map *map)
}
static int
-add_annotate_opt(struct hist_browser *browser __maybe_unused,
- struct popup_action *act, char **optstr,
+add_annotate_opt(struct popup_action *act, char **optstr,
struct map_symbol *ms,
u64 addr)
{
@@ -2514,18 +2546,17 @@ add_annotate_opt(struct hist_browser *browser __maybe_unused,
}
static int
-do_annotate_type(struct hist_browser *browser, struct popup_action *act)
+do_annotate_type(struct hist_browser *browser, struct popup_action *act __maybe_unused)
{
struct hist_entry *he = browser->he_selection;
- hist_entry__annotate_data_tui(he, act->evsel, browser->hbt);
+ hist_entry__annotate_data_tui(he, hists_to_evsel(browser->hists), browser->hbt);
ui_browser__handle_resize(&browser->b);
return 0;
}
static int
-add_annotate_type_opt(struct hist_browser *browser,
- struct popup_action *act, char **optstr,
+add_annotate_type_opt(struct popup_action *act, char **optstr,
struct hist_entry *he)
{
if (he == NULL || he->mem_type == NULL || he->mem_type->histograms == NULL)
@@ -2534,7 +2565,6 @@ add_annotate_type_opt(struct hist_browser *browser,
if (asprintf(optstr, "Annotate type %s", he->mem_type->self.type_name) < 0)
return 0;
- act->evsel = hists_to_evsel(browser->hists);
act->fn = do_annotate_type;
return 1;
}
@@ -2695,7 +2725,7 @@ add_map_opt(struct hist_browser *browser,
}
static int
-do_run_script(struct hist_browser *browser __maybe_unused,
+do_run_script(struct hist_browser *browser,
struct popup_action *act)
{
char *script_opt;
@@ -2734,27 +2764,26 @@ do_run_script(struct hist_browser *browser __maybe_unused,
n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end);
}
- script_browse(script_opt, act->evsel);
+ script_browse(script_opt, hists_to_evsel(browser->hists));
free(script_opt);
return 0;
}
static int
-do_res_sample_script(struct hist_browser *browser __maybe_unused,
+do_res_sample_script(struct hist_browser *browser,
struct popup_action *act)
{
struct hist_entry *he;
he = hist_browser__selected_entry(browser);
- res_sample_browse(he->res_samples, he->num_res, act->evsel, act->rstype);
+ res_sample_browse(he->res_samples, he->num_res, hists_to_evsel(browser->hists), act->rstype);
return 0;
}
static int
-add_script_opt_2(struct hist_browser *browser __maybe_unused,
- struct popup_action *act, char **optstr,
+add_script_opt_2(struct popup_action *act, char **optstr,
struct thread *thread, struct symbol *sym,
- struct evsel *evsel, const char *tstr)
+ const char *tstr)
{
if (thread) {
@@ -2772,7 +2801,6 @@ add_script_opt_2(struct hist_browser *browser __maybe_unused,
act->thread = thread;
act->ms.sym = sym;
- act->evsel = evsel;
act->fn = do_run_script;
return 1;
}
@@ -2780,13 +2808,12 @@ add_script_opt_2(struct hist_browser *browser __maybe_unused,
static int
add_script_opt(struct hist_browser *browser,
struct popup_action *act, char **optstr,
- struct thread *thread, struct symbol *sym,
- struct evsel *evsel)
+ struct thread *thread, struct symbol *sym)
{
int n, j;
struct hist_entry *he;
- n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, "");
+ n = add_script_opt_2(act, optstr, thread, sym, "");
he = hist_browser__selected_entry(browser);
if (sort_order && strstr(sort_order, "time")) {
@@ -2800,8 +2827,7 @@ add_script_opt(struct hist_browser *browser,
j += sprintf(tstr + j, "-");
timestamp__scnprintf_usec(he->time + symbol_conf.time_quantum,
tstr + j, sizeof tstr - j);
- n += add_script_opt_2(browser, act, optstr, thread, sym,
- evsel, tstr);
+ n += add_script_opt_2(act, optstr, thread, sym, tstr);
act->time = he->time;
}
return n;
@@ -2811,7 +2837,6 @@ static int
add_res_sample_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
struct res_sample *res_sample,
- struct evsel *evsel,
enum rstype type)
{
if (!res_sample)
@@ -2823,7 +2848,6 @@ add_res_sample_opt(struct hist_browser *browser __maybe_unused,
return 0;
act->fn = do_res_sample_script;
- act->evsel = evsel;
act->rstype = type;
return 1;
}
@@ -3209,7 +3233,7 @@ do_hotkey: // key came straight from options ui__popup_menu()
case 'i':
/* env->arch is NULL for live-mode (i.e. perf top) */
if (env->arch)
- tui__header_window(env);
+ tui__header_window(evsel__session(evsel));
continue;
case 'F':
symbol_conf.filter_relative ^= 1;
@@ -3274,10 +3298,10 @@ do_hotkey: // key came straight from options ui__popup_menu()
/*
* No need to set actions->dso here since
* it's just to remove the current filter.
- * Ditto for thread below.
*/
do_zoom_dso(browser, actions);
} else if (top == &browser->hists->thread_filter) {
+ actions->thread = thread;
do_zoom_thread(browser, actions);
} else if (top == &browser->hists->socket_filter) {
do_zoom_socket(browser, actions);
@@ -3308,6 +3332,8 @@ do_hotkey: // key came straight from options ui__popup_menu()
/* Fall thru */
default:
helpline = "Press '?' for help on key bindings";
+ ui_browser__warn_unhandled_hotkey(&browser->b, key, delay_secs,
+ ", use 'h'/'?'/F1 to see actions");
continue;
}
@@ -3322,27 +3348,23 @@ do_hotkey: // key came straight from options ui__popup_menu()
if (bi == NULL)
goto skip_annotation;
- nr_options += add_annotate_opt(browser,
- &actions[nr_options],
+ nr_options += add_annotate_opt(&actions[nr_options],
&options[nr_options],
&bi->from.ms,
bi->from.al_addr);
if (bi->to.ms.sym != bi->from.ms.sym)
- nr_options += add_annotate_opt(browser,
- &actions[nr_options],
+ nr_options += add_annotate_opt(&actions[nr_options],
&options[nr_options],
&bi->to.ms,
bi->to.al_addr);
} else if (browser->he_selection) {
- nr_options += add_annotate_opt(browser,
- &actions[nr_options],
+ nr_options += add_annotate_opt(&actions[nr_options],
&options[nr_options],
browser->selection,
browser->he_selection->ip);
}
skip_annotation:
- nr_options += add_annotate_type_opt(browser,
- &actions[nr_options],
+ nr_options += add_annotate_type_opt(&actions[nr_options],
&options[nr_options],
browser->he_selection);
nr_options += add_thread_opt(browser, &actions[nr_options],
@@ -3366,7 +3388,7 @@ skip_annotation:
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
- thread, NULL, evsel);
+ thread, NULL);
}
/*
* Note that browser->selection != NULL
@@ -3381,24 +3403,23 @@ skip_annotation:
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
- NULL, browser->selection->sym,
- evsel);
+ NULL, browser->selection->sym);
}
}
nr_options += add_script_opt(browser, &actions[nr_options],
- &options[nr_options], NULL, NULL, evsel);
+ &options[nr_options], NULL, NULL);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_res_sample(browser),
- evsel, A_NORMAL);
+ A_NORMAL);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_res_sample(browser),
- evsel, A_ASM);
+ A_ASM);
nr_options += add_res_sample_opt(browser, &actions[nr_options],
&options[nr_options],
hist_browser__selected_res_sample(browser),
- evsel, A_SOURCE);
+ A_SOURCE);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
skip_scripting:
@@ -3568,6 +3589,7 @@ browse_hists:
case CTRL('c'):
goto out;
default:
+ ui_browser__warn_unhandled_hotkey(&menu->b, key, delay_secs, NULL);
continue;
}
}
@@ -3693,7 +3715,7 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
struct popup_action action;
char *br_cntr_text = NULL;
static const char help[] =
- " q Quit \n"
+ " q/ESC Quit \n"
" B Branch counter abbr list (Optional)\n";
browser = hist_browser__new(hists);
@@ -3720,6 +3742,7 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
switch (key) {
case 'q':
+ case K_ESC:
goto out;
case '?':
ui_browser__help_window(&browser->b, help);
@@ -3746,7 +3769,9 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
}
continue;
default:
- break;
+ ui_browser__warn_unhandled_hotkey(&browser->b, key, 0,
+ ", use '?' to see actions");
+ continue;
}
}
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index fba55175a935..c61ba3174a24 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -88,8 +88,10 @@ static int map_browser__run(struct map_browser *browser)
case '/':
if (verbose > 0)
map_browser__search(browser);
+ /* fall thru */
default:
- break;
+ ui_browser__warn_unhandled_hotkey(&browser->b, key, 0, NULL);
+ continue;
case K_LEFT:
case K_ESC:
case 'q':
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index 2d04ece833aa..1e8c2c2f952d 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -94,7 +94,7 @@ static int check_ev_match(int dir_fd, const char *scriptname, struct perf_sessio
FILE *fp;
{
- char filename[FILENAME_MAX + 5];
+ char filename[NAME_MAX + 5];
int fd;
scnprintf(filename, sizeof(filename), "bin/%s-record", scriptname);
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index ae3b7fe1dadc..e58327595d37 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
#include <inttypes.h>
#include <math.h>
#include <stdlib.h>
@@ -11,6 +12,8 @@
#include "../util/sort.h"
#include "../util/evsel.h"
#include "../util/evlist.h"
+#include "../util/mem-events.h"
+#include "../util/string2.h"
#include "../util/thread.h"
#include "../util/util.h"
@@ -150,6 +153,48 @@ int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype);
}
+int hpp__fmt_mem_stat(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
+ struct hist_entry *he, enum mem_stat_type mst,
+ const char *fmtstr, hpp_snprint_fn print_fn)
+{
+ struct hists *hists = he->hists;
+ int mem_stat_idx = -1;
+ char *buf = hpp->buf;
+ size_t size = hpp->size;
+ u64 total = 0;
+ int ret = 0;
+
+ for (int i = 0; i < hists->nr_mem_stats; i++) {
+ if (hists->mem_stat_types[i] == mst) {
+ mem_stat_idx = i;
+ break;
+ }
+ }
+ assert(mem_stat_idx != -1);
+
+ for (int i = 0; i < MEM_STAT_LEN; i++)
+ total += hists->mem_stat_total[mem_stat_idx].entries[i];
+ assert(total != 0);
+
+ for (int i = 0; i < MEM_STAT_LEN; i++) {
+ u64 val = he->mem_stat[mem_stat_idx].entries[i];
+
+ if (hists->mem_stat_total[mem_stat_idx].entries[i] == 0)
+ continue;
+
+ ret += hpp__call_print_fn(hpp, print_fn, fmtstr, 100.0 * val / total);
+ }
+
+ /*
+ * Restore original buf and size as it's where caller expects
+ * the result will be saved.
+ */
+ hpp->buf = buf;
+ hpp->size = size;
+
+ return ret;
+}
+
static int field_cmp(u64 field_a, u64 field_b)
{
if (field_a > field_b)
@@ -294,6 +339,37 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
return ret;
}
+static bool perf_hpp__is_mem_stat_entry(struct perf_hpp_fmt *fmt);
+
+static enum mem_stat_type hpp__mem_stat_type(struct perf_hpp_fmt *fmt)
+{
+ if (!perf_hpp__is_mem_stat_entry(fmt))
+ return -1;
+
+ switch (fmt->idx) {
+ case PERF_HPP__MEM_STAT_OP:
+ return PERF_MEM_STAT_OP;
+ case PERF_HPP__MEM_STAT_CACHE:
+ return PERF_MEM_STAT_CACHE;
+ case PERF_HPP__MEM_STAT_MEMORY:
+ return PERF_MEM_STAT_MEMORY;
+ case PERF_HPP__MEM_STAT_SNOOP:
+ return PERF_MEM_STAT_SNOOP;
+ case PERF_HPP__MEM_STAT_DTLB:
+ return PERF_MEM_STAT_DTLB;
+ default:
+ break;
+ }
+ pr_debug("Should not reach here\n");
+ return -1;
+}
+
+static int64_t hpp__sort_mem_stat(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *a, struct hist_entry *b)
+{
+ return a->stat.period - b->stat.period;
+}
+
static int hpp__width_fn(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused,
struct hists *hists)
@@ -321,11 +397,78 @@ static int hpp__width_fn(struct perf_hpp_fmt *fmt,
}
static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
- struct hists *hists, int line __maybe_unused,
+ struct hists *hists, int line,
int *span __maybe_unused)
{
int len = hpp__width_fn(fmt, hpp, hists);
- return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
+ const char *hdr = "";
+
+ if (line == hists->hpp_list->nr_header_lines - 1)
+ hdr = fmt->name;
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", len, hdr);
+}
+
+static int hpp__header_mem_stat_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hists *hists, int line,
+ int *span __maybe_unused)
+{
+ char *buf = hpp->buf;
+ int ret = 0;
+ int len;
+ enum mem_stat_type mst = hpp__mem_stat_type(fmt);
+ int mem_stat_idx = -1;
+
+ for (int i = 0; i < hists->nr_mem_stats; i++) {
+ if (hists->mem_stat_types[i] == mst) {
+ mem_stat_idx = i;
+ break;
+ }
+ }
+ assert(mem_stat_idx != -1);
+
+ if (line == 0) {
+ int left, right;
+
+ len = 0;
+ /* update fmt->len for acutally used columns only */
+ for (int i = 0; i < MEM_STAT_LEN; i++) {
+ if (hists->mem_stat_total[mem_stat_idx].entries[i])
+ len += MEM_STAT_PRINT_LEN;
+ }
+ fmt->len = len;
+
+ /* print header directly if single column only */
+ if (len == MEM_STAT_PRINT_LEN)
+ return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
+
+ left = (len - strlen(fmt->name)) / 2 - 1;
+ right = len - left - strlen(fmt->name) - 2;
+
+ if (left < 0)
+ left = 0;
+ if (right < 0)
+ right = 0;
+
+ return scnprintf(hpp->buf, hpp->size, "%.*s %s %.*s",
+ left, graph_dotted_line, fmt->name, right, graph_dotted_line);
+ }
+
+
+ len = hpp->size;
+ for (int i = 0; i < MEM_STAT_LEN; i++) {
+ int printed;
+
+ if (hists->mem_stat_total[mem_stat_idx].entries[i] == 0)
+ continue;
+
+ printed = scnprintf(buf, len, "%*s", MEM_STAT_PRINT_LEN,
+ mem_stat_name(mst, i));
+ ret += printed;
+ buf += printed;
+ len -= printed;
+ }
+ return ret;
}
int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
@@ -453,6 +596,23 @@ static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
return __hpp__sort(a, b, he_get_##_field); \
}
+#define __HPP_COLOR_MEM_STAT_FN(_name, _type) \
+static int hpp__color_mem_stat_##_name(struct perf_hpp_fmt *fmt, \
+ struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ return hpp__fmt_mem_stat(fmt, hpp, he, PERF_MEM_STAT_##_type, \
+ " %5.1f%%", hpp_color_scnprintf); \
+}
+
+#define __HPP_ENTRY_MEM_STAT_FN(_name, _type) \
+static int hpp__entry_mem_stat_##_name(struct perf_hpp_fmt *fmt, \
+ struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ return hpp__fmt_mem_stat(fmt, hpp, he, PERF_MEM_STAT_##_type, \
+ " %5.1f%%", hpp_entry_scnprintf); \
+}
#define HPP_PERCENT_FNS(_type, _field, _fmttype) \
__HPP_COLOR_PERCENT_FN(_type, _field, _fmttype) \
@@ -472,6 +632,10 @@ __HPP_SORT_RAW_FN(_type, _field)
__HPP_ENTRY_AVERAGE_FN(_type, _field) \
__HPP_SORT_AVERAGE_FN(_type, _field)
+#define HPP_MEM_STAT_FNS(_name, _type) \
+__HPP_COLOR_MEM_STAT_FN(_name, _type) \
+__HPP_ENTRY_MEM_STAT_FN(_name, _type)
+
HPP_PERCENT_FNS(overhead, period, PERF_HPP_FMT_TYPE__PERCENT)
HPP_PERCENT_FNS(latency, latency, PERF_HPP_FMT_TYPE__LATENCY)
HPP_PERCENT_FNS(overhead_sys, period_sys, PERF_HPP_FMT_TYPE__PERCENT)
@@ -488,6 +652,12 @@ HPP_AVERAGE_FNS(weight1, weight1)
HPP_AVERAGE_FNS(weight2, weight2)
HPP_AVERAGE_FNS(weight3, weight3)
+HPP_MEM_STAT_FNS(op, OP)
+HPP_MEM_STAT_FNS(cache, CACHE)
+HPP_MEM_STAT_FNS(memory, MEMORY)
+HPP_MEM_STAT_FNS(snoop, SNOOP)
+HPP_MEM_STAT_FNS(dtlb, DTLB)
+
static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *a __maybe_unused,
struct hist_entry *b __maybe_unused)
@@ -495,6 +665,11 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
return 0;
}
+static bool perf_hpp__is_mem_stat_entry(struct perf_hpp_fmt *fmt)
+{
+ return fmt->sort == hpp__sort_mem_stat;
+}
+
static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
{
return a->header == hpp__header_fn;
@@ -508,6 +683,14 @@ static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
return a->idx == b->idx;
}
+static bool hpp__equal_mem_stat(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+ if (!perf_hpp__is_mem_stat_entry(a) || !perf_hpp__is_mem_stat_entry(b))
+ return false;
+
+ return a->entry == b->entry;
+}
+
#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
{ \
.name = _name, \
@@ -549,6 +732,20 @@ static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
.equal = hpp__equal, \
}
+#define HPP__MEM_STAT_PRINT_FNS(_name, _fn, _type) \
+ { \
+ .name = _name, \
+ .header = hpp__header_mem_stat_fn, \
+ .width = hpp__width_fn, \
+ .color = hpp__color_mem_stat_ ## _fn, \
+ .entry = hpp__entry_mem_stat_ ## _fn, \
+ .cmp = hpp__nop_cmp, \
+ .collapse = hpp__nop_cmp, \
+ .sort = hpp__sort_mem_stat, \
+ .idx = PERF_HPP__MEM_STAT_ ## _type, \
+ .equal = hpp__equal_mem_stat, \
+ }
+
struct perf_hpp_fmt perf_hpp__format[] = {
HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
HPP__COLOR_PRINT_FNS("Latency", latency, LATENCY),
@@ -563,6 +760,11 @@ struct perf_hpp_fmt perf_hpp__format[] = {
HPP__PRINT_FNS("Weight1", weight1, WEIGHT1),
HPP__PRINT_FNS("Weight2", weight2, WEIGHT2),
HPP__PRINT_FNS("Weight3", weight3, WEIGHT3),
+ HPP__MEM_STAT_PRINT_FNS("Mem Op", op, OP),
+ HPP__MEM_STAT_PRINT_FNS("Cache", cache, CACHE),
+ HPP__MEM_STAT_PRINT_FNS("Memory", memory, MEMORY),
+ HPP__MEM_STAT_PRINT_FNS("Snoop", snoop, SNOOP),
+ HPP__MEM_STAT_PRINT_FNS("D-TLB", dtlb, DTLB),
};
struct perf_hpp_list perf_hpp_list = {
@@ -574,11 +776,13 @@ struct perf_hpp_list perf_hpp_list = {
#undef HPP__COLOR_PRINT_FNS
#undef HPP__COLOR_ACC_PRINT_FNS
#undef HPP__PRINT_FNS
+#undef HPP__MEM_STAT_PRINT_FNS
#undef HPP_PERCENT_FNS
#undef HPP_PERCENT_ACC_FNS
#undef HPP_RAW_FNS
#undef HPP_AVERAGE_FNS
+#undef HPP_MEM_STAT_FNS
#undef __HPP_HEADER_FN
#undef __HPP_WIDTH_FN
@@ -588,6 +792,9 @@ struct perf_hpp_list perf_hpp_list = {
#undef __HPP_ENTRY_ACC_PERCENT_FN
#undef __HPP_ENTRY_RAW_FN
#undef __HPP_ENTRY_AVERAGE_FN
+#undef __HPP_COLOR_MEM_STAT_FN
+#undef __HPP_ENTRY_MEM_STAT_FN
+
#undef __HPP_SORT_FN
#undef __HPP_SORT_ACC_FN
#undef __HPP_SORT_RAW_FN
@@ -696,12 +903,14 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
list_del_init(&format->list);
+ list_del_init(&format->sort_list);
fmt_free(format);
}
-void perf_hpp__cancel_cumulate(void)
+void perf_hpp__cancel_cumulate(struct evlist *evlist)
{
struct perf_hpp_fmt *fmt, *acc, *ovh, *acc_lat, *tmp;
+ struct evsel *evsel;
if (is_strict_order(field_order))
return;
@@ -719,11 +928,29 @@ void perf_hpp__cancel_cumulate(void)
if (fmt_equal(ovh, fmt))
fmt->name = "Overhead";
}
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct hists *hists = evsel__hists(evsel);
+ struct perf_hpp_list_node *node;
+
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
+ if (fmt_equal(acc, fmt) || fmt_equal(acc_lat, fmt)) {
+ perf_hpp__column_unregister(fmt);
+ continue;
+ }
+
+ if (fmt_equal(ovh, fmt))
+ fmt->name = "Overhead";
+ }
+ }
+ }
}
-void perf_hpp__cancel_latency(void)
+void perf_hpp__cancel_latency(struct evlist *evlist)
{
struct perf_hpp_fmt *fmt, *lat, *acc, *tmp;
+ struct evsel *evsel;
if (is_strict_order(field_order))
return;
@@ -737,6 +964,18 @@ void perf_hpp__cancel_latency(void)
if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
perf_hpp__column_unregister(fmt);
}
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct hists *hists = evsel__hists(evsel);
+ struct perf_hpp_list_node *node;
+
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
+ if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
+ perf_hpp__column_unregister(fmt);
+ }
+ }
+ }
}
void perf_hpp__setup_output_field(struct perf_hpp_list *list)
@@ -787,18 +1026,12 @@ void perf_hpp__reset_output_field(struct perf_hpp_list *list)
struct perf_hpp_fmt *fmt, *tmp;
/* reset output fields */
- perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
- list_del_init(&fmt->list);
- list_del_init(&fmt->sort_list);
- fmt_free(fmt);
- }
+ perf_hpp_list__for_each_format_safe(list, fmt, tmp)
+ perf_hpp__column_unregister(fmt);
/* reset sort keys */
- perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
- list_del_init(&fmt->list);
- list_del_init(&fmt->sort_list);
- fmt_free(fmt);
- }
+ perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp)
+ perf_hpp__column_unregister(fmt);
}
/*
@@ -886,6 +1119,14 @@ void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
fmt->len = 8;
break;
+ case PERF_HPP__MEM_STAT_OP:
+ case PERF_HPP__MEM_STAT_CACHE:
+ case PERF_HPP__MEM_STAT_MEMORY:
+ case PERF_HPP__MEM_STAT_SNOOP:
+ case PERF_HPP__MEM_STAT_DTLB:
+ fmt->len = MEM_STAT_LEN * MEM_STAT_PRINT_LEN;
+ break;
+
default:
break;
}
@@ -991,3 +1232,42 @@ int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
return 0;
}
+
+int perf_hpp__alloc_mem_stats(struct perf_hpp_list *list, struct evlist *evlist)
+{
+ struct perf_hpp_fmt *fmt;
+ struct evsel *evsel;
+ enum mem_stat_type mst[16];
+ unsigned nr_mem_stats = 0;
+
+ perf_hpp_list__for_each_format(list, fmt) {
+ if (!perf_hpp__is_mem_stat_entry(fmt))
+ continue;
+
+ assert(nr_mem_stats < ARRAY_SIZE(mst));
+ mst[nr_mem_stats++] = hpp__mem_stat_type(fmt);
+ }
+
+ if (nr_mem_stats == 0)
+ return 0;
+
+ list->nr_header_lines = 2;
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct hists *hists = evsel__hists(evsel);
+
+ hists->mem_stat_types = calloc(nr_mem_stats,
+ sizeof(*hists->mem_stat_types));
+ if (hists->mem_stat_types == NULL)
+ return -ENOMEM;
+
+ hists->mem_stat_total = calloc(nr_mem_stats,
+ sizeof(*hists->mem_stat_total));
+ if (hists->mem_stat_total == NULL)
+ return -ENOMEM;
+
+ memcpy(hists->mem_stat_types, mst, nr_mem_stats * sizeof(*mst));
+ hists->nr_mem_stats = nr_mem_stats;
+ }
+ return 0;
+}
diff --git a/tools/perf/ui/keysyms.c b/tools/perf/ui/keysyms.c
new file mode 100644
index 000000000000..b64564b07f2f
--- /dev/null
+++ b/tools/perf/ui/keysyms.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include "keysyms.h"
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+
+const char *key_name(int key, char *bf, size_t size)
+{
+ if (isprint(key)) {
+ scnprintf(bf, size, "%c", key);
+ } else if (key < 32) {
+ scnprintf(bf, size, "Ctrl+%c", key + '@');
+ } else {
+ const char *name = NULL;
+
+ switch (key) {
+ case K_DOWN: name = "Down"; break;
+ case K_END: name = "End"; break;
+ case K_ENTER: name = "Enter"; break;
+ case K_ESC: name = "ESC"; break;
+ case K_F1: name = "F1"; break;
+ case K_HOME: name = "Home"; break;
+ case K_LEFT: name = "Left"; break;
+ case K_PGDN: name = "PgDown"; break;
+ case K_PGUP: name = "PgUp"; break;
+ case K_RIGHT: name = "Right"; break;
+ case K_TAB: name = "Tab"; break;
+ case K_UNTAB: name = "Untab"; break;
+ case K_UP: name = "Up"; break;
+ case K_BKSPC: name = "Backspace"; break;
+ case K_DEL: name = "Del"; break;
+ default:
+ if (key >= SL_KEY_F(1) && key <= SL_KEY_F(63))
+ scnprintf(bf, size, "F%d", key - SL_KEY_F(0));
+ else
+ scnprintf(bf, size, "Unknown (%d)", key);
+ }
+
+ if (name)
+ scnprintf(bf, size, "%s", name);
+ }
+
+ return bf;
+}
diff --git a/tools/perf/ui/keysyms.h b/tools/perf/ui/keysyms.h
index 04cc4e5c031f..969060edc362 100644
--- a/tools/perf/ui/keysyms.h
+++ b/tools/perf/ui/keysyms.h
@@ -27,4 +27,6 @@
#define K_SWITCH_INPUT_DATA -4
#define K_RELOAD -5
+const char *key_name(int key, char *bf, size_t size);
+
#endif /* _PERF_KEYSYMS_H_ */
diff --git a/tools/perf/ui/libslang.h b/tools/perf/ui/libslang.h
index 1dff3020e9d5..6722561e0458 100644
--- a/tools/perf/ui/libslang.h
+++ b/tools/perf/ui/libslang.h
@@ -15,11 +15,7 @@
#define ENABLE_SLFUTURE_CONST 1
#define ENABLE_SLFUTURE_VOID 1
-#ifdef HAVE_SLANG_INCLUDE_SUBDIR
-#include <slang/slang.h>
-#else
#include <slang.h>
-#endif
#define SL_KEY_UNTAB 0x1000
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 7ac4b98e28bc..8c4c8925df2c 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -643,45 +643,58 @@ static int hists__fprintf_hierarchy_headers(struct hists *hists,
unsigned header_width = 0;
struct perf_hpp_fmt *fmt;
struct perf_hpp_list_node *fmt_node;
+ struct perf_hpp_list *hpp_list = hists->hpp_list;
const char *sep = symbol_conf.field_sep;
indent = hists->nr_hpp_node;
- /* preserve max indent depth for column headers */
- print_hierarchy_indent(sep, indent, " ", fp);
-
/* the first hpp_list_node is for overhead columns */
fmt_node = list_first_entry(&hists->hpp_formats,
struct perf_hpp_list_node, list);
- perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
- fmt->header(fmt, hpp, hists, 0, NULL);
- fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
- }
+ for (int line = 0; line < hpp_list->nr_header_lines; line++) {
+ /* first # is displayed one level up */
+ if (line)
+ fprintf(fp, "# ");
- /* combine sort headers with ' / ' */
- first_node = true;
- list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
- if (!first_node)
- header_width += fprintf(fp, " / ");
- first_node = false;
+ /* preserve max indent depth for column headers */
+ print_hierarchy_indent(sep, indent, " ", fp);
- first_col = true;
perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
- if (perf_hpp__should_skip(fmt, hists))
- continue;
+ fmt->header(fmt, hpp, hists, line, NULL);
+ fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
+ }
- if (!first_col)
- header_width += fprintf(fp, "+");
- first_col = false;
+ if (line < hpp_list->nr_header_lines - 1)
+ goto next_line;
+
+ /* combine sort headers with ' / ' */
+ first_node = true;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ if (!first_node)
+ header_width += fprintf(fp, " / ");
+ first_node = false;
- fmt->header(fmt, hpp, hists, 0, NULL);
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
- header_width += fprintf(fp, "%s", strim(hpp->buf));
+ if (!first_col)
+ header_width += fprintf(fp, "+");
+ first_col = false;
+
+ fmt->header(fmt, hpp, hists, line, NULL);
+
+ header_width += fprintf(fp, "%s", strim(hpp->buf));
+ }
}
+
+next_line:
+ fprintf(fp, "\n");
}
- fprintf(fp, "\n# ");
+ fprintf(fp, "# ");
/* preserve max indent depth for initial dots */
print_hierarchy_indent(sep, indent, dots, fp);
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 16c6eff4d241..022534eed68c 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -108,7 +108,7 @@ static void ui__signal_backtrace(int sig)
printf("-------- backtrace --------\n");
size = backtrace(stackdump, ARRAY_SIZE(stackdump));
- backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
+ __dump_stack(stdout, stackdump, size);
exit(0);
}
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 946bce6628f3..1c2a43e1dc68 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -2,18 +2,19 @@ include $(srctree)/tools/scripts/Makefile.include
include $(srctree)/tools/scripts/utilities.mak
perf-util-y += arm64-frame-pointer-unwind-support.o
+perf-util-y += addr2line.o
perf-util-y += addr_location.o
perf-util-y += annotate.o
perf-util-y += block-info.o
perf-util-y += block-range.o
perf-util-y += build-id.o
perf-util-y += cacheline.o
+perf-util-y += capstone.o
perf-util-y += config.o
perf-util-y += copyfile.o
perf-util-y += ctype.o
perf-util-y += db-export.o
perf-util-y += disasm.o
-perf-util-y += disasm_bpf.o
perf-util-y += env.o
perf-util-y += event.o
perf-util-y += evlist.o
@@ -23,8 +24,9 @@ perf-util-y += evsel_fprintf.o
perf-util-y += perf_event_attr_fprintf.o
perf-util-y += evswitch.o
perf-util-y += find_bit.o
-perf-util-y += get_current_dir_name.o
perf-util-y += levenshtein.o
+perf-util-$(CONFIG_LIBBFD) += libbfd.o
+perf-util-y += llvm.o
perf-util-y += mmap.o
perf-util-y += memswap.o
perf-util-y += parse-events.o
@@ -41,6 +43,7 @@ perf-util-y += rbtree.o
perf-util-y += libstring.o
perf-util-y += bitmap.o
perf-util-y += hweight.o
+perf-util-y += sha1.o
perf-util-y += smt.o
perf-util-y += strbuf.o
perf-util-y += string.o
@@ -84,8 +87,10 @@ perf-util-y += pmu.o
perf-util-y += pmus.o
perf-util-y += pmu-flex.o
perf-util-y += pmu-bison.o
+perf-util-y += drm_pmu.o
perf-util-y += hwmon_pmu.o
perf-util-y += tool_pmu.o
+perf-util-y += tp_pmu.o
perf-util-y += svghelper.o
perf-util-y += trace-event-info.o
perf-util-y += trace-event-scripting.o
@@ -124,21 +129,22 @@ perf-util-y += iostat.o
perf-util-y += stream.o
perf-util-y += kvm-stat.o
perf-util-y += lock-contention.o
-perf-util-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-util-y += auxtrace.o
perf-util-y += intel-pt-decoder/
-perf-util-$(CONFIG_AUXTRACE) += intel-pt.o
-perf-util-$(CONFIG_AUXTRACE) += intel-bts.o
-perf-util-$(CONFIG_AUXTRACE) += arm-spe.o
-perf-util-$(CONFIG_AUXTRACE) += arm-spe-decoder/
-perf-util-$(CONFIG_AUXTRACE) += hisi-ptt.o
-perf-util-$(CONFIG_AUXTRACE) += hisi-ptt-decoder/
-perf-util-$(CONFIG_AUXTRACE) += s390-cpumsf.o
+perf-util-y += intel-pt.o
+perf-util-y += intel-bts.o
+perf-util-y += arm-spe.o
+perf-util-y += arm-spe-decoder/
+perf-util-y += hisi-ptt.o
+perf-util-y += hisi-ptt-decoder/
+perf-util-y += s390-cpumsf.o
+perf-util-y += powerpc-vpadtl.o
ifdef CONFIG_LIBOPENCSD
-perf-util-$(CONFIG_AUXTRACE) += cs-etm.o
-perf-util-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+perf-util-y += cs-etm.o
+perf-util-y += cs-etm-decoder/
endif
-perf-util-$(CONFIG_AUXTRACE) += cs-etm-base.o
+perf-util-y += cs-etm-base.o
perf-util-y += parse-branch-options.o
perf-util-y += dump-insn.o
@@ -161,7 +167,7 @@ perf-util-y += clockid.o
perf-util-y += list_sort.o
perf-util-y += mutex.o
perf-util-y += sharded_mutex.o
-perf-util-$(CONFIG_X86_64) += intel-tpebs.o
+perf-util-y += intel-tpebs.o
perf-util-$(CONFIG_LIBBPF) += bpf_map.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
@@ -173,6 +179,11 @@ perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-filter-flex.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-filter-bison.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += btf.o
+ifeq ($(CONFIG_TRACE),y)
+ perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf-trace-summary.o
+ perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_trace_augment.o
+endif
+
ifeq ($(CONFIG_LIBTRACEEVENT),y)
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_lock_contention.o
endif
@@ -237,9 +248,12 @@ perf-util-y += cap.o
perf-util-$(CONFIG_CXX_DEMANGLE) += demangle-cxx.o
perf-util-y += demangle-ocaml.o
perf-util-y += demangle-java.o
-perf-util-y += demangle-rust.o
+perf-util-y += demangle-rust-v0.o
perf-util-$(CONFIG_LIBLLVM) += llvm-c-helpers.o
+CFLAGS_demangle-rust-v0.o += -Wno-shadow -Wno-declaration-after-statement \
+ -Wno-switch-default -Wno-switch-enum -Wno-missing-field-initializers
+
ifdef CONFIG_JITDUMP
perf-util-$(CONFIG_LIBELF) += jitdump.o
perf-util-$(CONFIG_LIBELF) += genelf.o
@@ -414,7 +428,7 @@ endif
$(OUTPUT)%.shellcheck_log: %
$(call rule_mkdir)
- $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+ $(Q)$(call echo-cmd,test)$(SHELLCHECK) "$<" > $@ || (cat $@ && rm $@ && false)
perf-util-y += $(SHELL_TEST_LOGS)
diff --git a/tools/perf/util/addr2line.c b/tools/perf/util/addr2line.c
new file mode 100644
index 000000000000..f2d94a3272d7
--- /dev/null
+++ b/tools/perf/util/addr2line.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "addr2line.h"
+#include "debug.h"
+#include "dso.h"
+#include "string2.h"
+#include "srcline.h"
+#include "symbol.h"
+#include "symbol_conf.h"
+
+#include <api/io.h>
+#include <linux/zalloc.h>
+#include <subcmd/run-command.h>
+
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define MAX_INLINE_NEST 1024
+
+/* If addr2line doesn't return data for 1 second then timeout. */
+int addr2line_timeout_ms = 1 * 1000;
+
+static int filename_split(char *filename, unsigned int *line_nr)
+{
+ char *sep;
+
+ sep = strchr(filename, '\n');
+ if (sep)
+ *sep = '\0';
+
+ if (!strcmp(filename, "??:0"))
+ return 0;
+
+ sep = strchr(filename, ':');
+ if (sep) {
+ *sep++ = '\0';
+ *line_nr = strtoul(sep, NULL, 0);
+ return 1;
+ }
+ pr_debug("addr2line missing ':' in filename split\n");
+ return 0;
+}
+
+static void addr2line_subprocess_cleanup(struct child_process *a2l)
+{
+ if (a2l->pid != -1) {
+ kill(a2l->pid, SIGKILL);
+ finish_command(a2l); /* ignore result, we don't care */
+ a2l->pid = -1;
+ close(a2l->in);
+ close(a2l->out);
+ }
+
+ free(a2l);
+}
+
+static struct child_process *addr2line_subprocess_init(const char *addr2line_path,
+ const char *binary_path)
+{
+ const char *argv[] = {
+ addr2line_path ?: "addr2line",
+ "-e", binary_path,
+ "-a", "-i", "-f", NULL
+ };
+ struct child_process *a2l = zalloc(sizeof(*a2l));
+ int start_command_status = 0;
+
+ if (a2l == NULL) {
+ pr_err("Failed to allocate memory for addr2line");
+ return NULL;
+ }
+
+ a2l->pid = -1;
+ a2l->in = -1;
+ a2l->out = -1;
+ a2l->no_stderr = 1;
+
+ a2l->argv = argv;
+ start_command_status = start_command(a2l);
+ a2l->argv = NULL; /* it's not used after start_command; avoid dangling pointers */
+
+ if (start_command_status != 0) {
+ pr_warning("could not start addr2line (%s) for %s: start_command return code %d\n",
+ addr2line_path, binary_path, start_command_status);
+ addr2line_subprocess_cleanup(a2l);
+ return NULL;
+ }
+
+ return a2l;
+}
+
+enum a2l_style {
+ BROKEN,
+ GNU_BINUTILS,
+ LLVM,
+};
+
+static enum a2l_style addr2line_configure(struct child_process *a2l, const char *dso_name)
+{
+ static bool cached;
+ static enum a2l_style style;
+
+ if (!cached) {
+ char buf[128];
+ struct io io;
+ int ch;
+ int lines;
+
+ if (write(a2l->in, ",\n", 2) != 2)
+ return BROKEN;
+
+ io__init(&io, a2l->out, buf, sizeof(buf));
+ ch = io__get_char(&io);
+ if (ch == ',') {
+ style = LLVM;
+ cached = true;
+ lines = 1;
+ pr_debug3("Detected LLVM addr2line style\n");
+ } else if (ch == '0') {
+ style = GNU_BINUTILS;
+ cached = true;
+ lines = 3;
+ pr_debug3("Detected binutils addr2line style\n");
+ } else {
+ if (!symbol_conf.disable_add2line_warn) {
+ char *output = NULL;
+ size_t output_len;
+
+ io__getline(&io, &output, &output_len);
+ pr_warning("%s %s: addr2line configuration failed\n",
+ __func__, dso_name);
+ pr_warning("\t%c%s", ch, output);
+ }
+ pr_debug("Unknown/broken addr2line style\n");
+ return BROKEN;
+ }
+ while (lines) {
+ ch = io__get_char(&io);
+ if (ch <= 0)
+ break;
+ if (ch == '\n')
+ lines--;
+ }
+ /* Ignore SIGPIPE in the event addr2line exits. */
+ signal(SIGPIPE, SIG_IGN);
+ }
+ return style;
+}
+
+static int read_addr2line_record(struct io *io,
+ enum a2l_style style,
+ const char *dso_name,
+ u64 addr,
+ bool first,
+ char **function,
+ char **filename,
+ unsigned int *line_nr)
+{
+ /*
+ * Returns:
+ * -1 ==> error
+ * 0 ==> sentinel (or other ill-formed) record read
+ * 1 ==> a genuine record read
+ */
+ char *line = NULL;
+ size_t line_len = 0;
+ unsigned int dummy_line_nr = 0;
+ int ret = -1;
+
+ if (function != NULL)
+ zfree(function);
+
+ if (filename != NULL)
+ zfree(filename);
+
+ if (line_nr != NULL)
+ *line_nr = 0;
+
+ /*
+ * Read the first line. Without an error this will be:
+ * - for the first line an address like 0x1234,
+ * - the binutils sentinel 0x0000000000000000,
+ * - the llvm-addr2line the sentinel ',' character,
+ * - the function name line for an inlined function.
+ */
+ if (io__getline(io, &line, &line_len) < 0 || !line_len)
+ goto error;
+
+ pr_debug3("%s %s: addr2line read address for sentinel: %s", __func__, dso_name, line);
+ if (style == LLVM && line_len == 2 && line[0] == ',') {
+ /* Found the llvm-addr2line sentinel character. */
+ zfree(&line);
+ return 0;
+ } else if (style == GNU_BINUTILS && (!first || addr != 0)) {
+ int zero_count = 0, non_zero_count = 0;
+ /*
+ * Check for binutils sentinel ignoring it for the case the
+ * requested address is 0.
+ */
+
+ /* A given address should always start 0x. */
+ if (line_len >= 2 || line[0] != '0' || line[1] != 'x') {
+ for (size_t i = 2; i < line_len; i++) {
+ if (line[i] == '0')
+ zero_count++;
+ else if (line[i] != '\n')
+ non_zero_count++;
+ }
+ if (!non_zero_count) {
+ int ch;
+
+ if (first && !zero_count) {
+ /* Line was erroneous just '0x'. */
+ goto error;
+ }
+ /*
+ * Line was 0x0..0, the sentinel for binutils. Remove
+ * the function and filename lines.
+ */
+ zfree(&line);
+ do {
+ ch = io__get_char(io);
+ } while (ch > 0 && ch != '\n');
+ do {
+ ch = io__get_char(io);
+ } while (ch > 0 && ch != '\n');
+ return 0;
+ }
+ }
+ }
+ /* Read the second function name line (if inline data then this is the first line). */
+ if (first && (io__getline(io, &line, &line_len) < 0 || !line_len))
+ goto error;
+
+ pr_debug3("%s %s: addr2line read line: %s", __func__, dso_name, line);
+ if (function != NULL)
+ *function = strdup(strim(line));
+
+ zfree(&line);
+ line_len = 0;
+
+ /* Read the third filename and line number line. */
+ if (io__getline(io, &line, &line_len) < 0 || !line_len)
+ goto error;
+
+ pr_debug3("%s %s: addr2line filename:number : %s", __func__, dso_name, line);
+ if (filename_split(line, line_nr == NULL ? &dummy_line_nr : line_nr) == 0 &&
+ style == GNU_BINUTILS) {
+ ret = 0;
+ goto error;
+ }
+
+ if (filename != NULL)
+ *filename = strdup(line);
+
+ zfree(&line);
+ line_len = 0;
+
+ return 1;
+
+error:
+ free(line);
+ if (function != NULL)
+ zfree(function);
+ if (filename != NULL)
+ zfree(filename);
+ return ret;
+}
+
+static int inline_list__append_record(struct dso *dso,
+ struct inline_node *node,
+ struct symbol *sym,
+ const char *function,
+ const char *filename,
+ unsigned int line_nr)
+{
+ struct symbol *inline_sym = new_inline_sym(dso, sym, function);
+
+ return inline_list__append(inline_sym, srcline_from_fileline(filename, line_nr), node);
+}
+
+int cmd__addr2line(const char *dso_name, u64 addr,
+ char **file, unsigned int *line_nr,
+ struct dso *dso,
+ bool unwind_inlines,
+ struct inline_node *node,
+ struct symbol *sym __maybe_unused)
+{
+ struct child_process *a2l = dso__a2l(dso);
+ char *record_function = NULL;
+ char *record_filename = NULL;
+ unsigned int record_line_nr = 0;
+ int record_status = -1;
+ int ret = 0;
+ size_t inline_count = 0;
+ int len;
+ char buf[128];
+ ssize_t written;
+ struct io io = { .eof = false };
+ enum a2l_style a2l_style;
+
+ if (!a2l) {
+ if (!filename__has_section(dso_name, ".debug_line"))
+ goto out;
+
+ dso__set_a2l(dso,
+ addr2line_subprocess_init(symbol_conf.addr2line_path, dso_name));
+ a2l = dso__a2l(dso);
+ }
+
+ if (a2l == NULL) {
+ if (!symbol_conf.disable_add2line_warn)
+ pr_warning("%s %s: addr2line_subprocess_init failed\n", __func__, dso_name);
+ goto out;
+ }
+ a2l_style = addr2line_configure(a2l, dso_name);
+ if (a2l_style == BROKEN)
+ goto out;
+
+ /*
+ * Send our request and then *deliberately* send something that can't be
+ * interpreted as a valid address to ask addr2line about (namely,
+ * ","). This causes addr2line to first write out the answer to our
+ * request, in an unbounded/unknown number of records, and then to write
+ * out the lines "0x0...0", "??" and "??:0", for GNU binutils, or ","
+ * for llvm-addr2line, so that we can detect when it has finished giving
+ * us anything useful.
+ */
+ len = snprintf(buf, sizeof(buf), "%016"PRIx64"\n,\n", addr);
+ written = len > 0 ? write(a2l->in, buf, len) : -1;
+ if (written != len) {
+ if (!symbol_conf.disable_add2line_warn)
+ pr_warning("%s %s: could not send request\n", __func__, dso_name);
+ goto out;
+ }
+ io__init(&io, a2l->out, buf, sizeof(buf));
+ io.timeout_ms = addr2line_timeout_ms;
+ switch (read_addr2line_record(&io, a2l_style, dso_name, addr, /*first=*/true,
+ &record_function, &record_filename, &record_line_nr)) {
+ case -1:
+ if (!symbol_conf.disable_add2line_warn)
+ pr_warning("%s %s: could not read first record\n", __func__, dso_name);
+ goto out;
+ case 0:
+ /*
+ * The first record was invalid, so return failure, but first
+ * read another record, since we sent a sentinel ',' for the
+ * sake of detected the last inlined function. Treat this as the
+ * first of a record as the ',' generates a new start with GNU
+ * binutils, also force a non-zero address as we're no longer
+ * reading that record.
+ */
+ switch (read_addr2line_record(&io, a2l_style, dso_name,
+ /*addr=*/1, /*first=*/true,
+ NULL, NULL, NULL)) {
+ case -1:
+ if (!symbol_conf.disable_add2line_warn)
+ pr_warning("%s %s: could not read sentinel record\n",
+ __func__, dso_name);
+ break;
+ case 0:
+ /* The sentinel as expected. */
+ break;
+ default:
+ if (!symbol_conf.disable_add2line_warn)
+ pr_warning("%s %s: unexpected record instead of sentinel",
+ __func__, dso_name);
+ break;
+ }
+ goto out;
+ default:
+ /* First record as expected. */
+ break;
+ }
+
+ if (file) {
+ *file = strdup(record_filename);
+ ret = 1;
+ }
+ if (line_nr)
+ *line_nr = record_line_nr;
+
+ if (unwind_inlines) {
+ if (node && inline_list__append_record(dso, node, sym,
+ record_function,
+ record_filename,
+ record_line_nr)) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * We have to read the records even if we don't care about the inline
+ * info. This isn't the first record and force the address to non-zero
+ * as we're reading records beyond the first.
+ */
+ while ((record_status = read_addr2line_record(&io,
+ a2l_style,
+ dso_name,
+ /*addr=*/1,
+ /*first=*/false,
+ &record_function,
+ &record_filename,
+ &record_line_nr)) == 1) {
+ if (unwind_inlines && node && inline_count++ < MAX_INLINE_NEST) {
+ if (inline_list__append_record(dso, node, sym,
+ record_function,
+ record_filename,
+ record_line_nr)) {
+ ret = 0;
+ goto out;
+ }
+ ret = 1; /* found at least one inline frame */
+ }
+ }
+
+out:
+ free(record_function);
+ free(record_filename);
+ if (io.eof) {
+ dso__set_a2l(dso, NULL);
+ addr2line_subprocess_cleanup(a2l);
+ }
+ return ret;
+}
+
+void dso__free_a2l(struct dso *dso)
+{
+ struct child_process *a2l = dso__a2l(dso);
+
+ if (!a2l)
+ return;
+
+ addr2line_subprocess_cleanup(a2l);
+
+ dso__set_a2l(dso, NULL);
+}
diff --git a/tools/perf/util/addr2line.h b/tools/perf/util/addr2line.h
new file mode 100644
index 000000000000..d35a47ba8dab
--- /dev/null
+++ b/tools/perf/util/addr2line.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_ADDR2LINE_H
+#define __PERF_ADDR2LINE_H
+
+#include <linux/types.h>
+
+struct dso;
+struct inline_node;
+struct symbol;
+
+extern int addr2line_timeout_ms;
+
+int cmd__addr2line(const char *dso_name, u64 addr,
+ char **file, unsigned int *line_nr,
+ struct dso *dso,
+ bool unwind_inlines,
+ struct inline_node *node,
+ struct symbol *sym);
+
+#endif /* __PERF_ADDR2LINE_H */
diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
index 38dc4524b7e8..4fe851334296 100644
--- a/tools/perf/util/affinity.c
+++ b/tools/perf/util/affinity.c
@@ -5,6 +5,7 @@
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/zalloc.h>
+#include <perf/cpumap.h>
#include "perf.h"
#include "cpumap.h"
#include "affinity.h"
@@ -83,3 +84,20 @@ void affinity__cleanup(struct affinity *a)
if (a != NULL)
__affinity__cleanup(a);
}
+
+void cpu_map__set_affinity(const struct perf_cpu_map *cpumap)
+{
+ int cpu_set_size = get_cpu_set_size();
+ unsigned long *cpuset = bitmap_zalloc(cpu_set_size * 8);
+ struct perf_cpu cpu;
+ int idx;
+
+ if (!cpuset)
+ return;
+
+ perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpumap)
+ __set_bit(cpu.cpu, cpuset);
+
+ sched_setaffinity(0, cpu_set_size, (cpu_set_t *)cpuset);
+ zfree(&cpuset);
+}
diff --git a/tools/perf/util/affinity.h b/tools/perf/util/affinity.h
index 0ad6a18ef20c..7341194b2298 100644
--- a/tools/perf/util/affinity.h
+++ b/tools/perf/util/affinity.h
@@ -4,6 +4,7 @@
#include <stdbool.h>
+struct perf_cpu_map;
struct affinity {
unsigned long *orig_cpus;
unsigned long *sched_cpus;
@@ -13,5 +14,6 @@ struct affinity {
void affinity__cleanup(struct affinity *a);
void affinity__set(struct affinity *a, int cpu);
int affinity__setup(struct affinity *a);
+void cpu_map__set_affinity(const struct perf_cpu_map *cpumap);
#endif // PERF_AFFINITY_H
diff --git a/tools/perf/util/amd-sample-raw.c b/tools/perf/util/amd-sample-raw.c
index 456ce64ad822..b084dee76b1a 100644
--- a/tools/perf/util/amd-sample-raw.c
+++ b/tools/perf/util/amd-sample-raw.c
@@ -19,6 +19,8 @@
static u32 cpu_family, cpu_model, ibs_fetch_type, ibs_op_type;
static bool zen4_ibs_extensions;
+static bool ldlat_cap;
+static bool dtlb_pgsize_cap;
static void pr_ibs_fetch_ctl(union ibs_fetch_ctl reg)
{
@@ -78,14 +80,20 @@ static void pr_ic_ibs_extd_ctl(union ic_ibs_extd_ctl reg)
static void pr_ibs_op_ctl(union ibs_op_ctl reg)
{
char l3_miss_only[sizeof(" L3MissOnly _")] = "";
+ char ldlat[sizeof(" LdLatThrsh __ LdLatEn _")] = "";
if (zen4_ibs_extensions)
snprintf(l3_miss_only, sizeof(l3_miss_only), " L3MissOnly %d", reg.l3_miss_only);
- printf("ibs_op_ctl:\t%016llx MaxCnt %9d%s En %d Val %d CntCtl %d=%s CurCnt %9d\n",
+ if (ldlat_cap) {
+ snprintf(ldlat, sizeof(ldlat), " LdLatThrsh %2d LdLatEn %d",
+ reg.ldlat_thrsh, reg.ldlat_en);
+ }
+
+ printf("ibs_op_ctl:\t%016llx MaxCnt %9d%s En %d Val %d CntCtl %d=%s CurCnt %9d%s\n",
reg.val, ((reg.opmaxcnt_ext << 16) | reg.opmaxcnt) << 4, l3_miss_only,
reg.op_en, reg.op_val, reg.cnt_ctl,
- reg.cnt_ctl ? "uOps" : "cycles", reg.opcurcnt);
+ reg.cnt_ctl ? "uOps" : "cycles", reg.opcurcnt, ldlat);
}
static void pr_ibs_op_data(union ibs_op_data reg)
@@ -154,9 +162,20 @@ static void pr_ibs_op_data2(union ibs_op_data2 reg)
static void pr_ibs_op_data3(union ibs_op_data3 reg)
{
- char l2_miss_str[sizeof(" L2Miss _")] = "";
- char op_mem_width_str[sizeof(" OpMemWidth _____ bytes")] = "";
+ static const char * const dc_page_sizes[] = {
+ " 4K",
+ " 2M",
+ " 1G",
+ " ??",
+ };
char op_dc_miss_open_mem_reqs_str[sizeof(" OpDcMissOpenMemReqs __")] = "";
+ char dc_l1_l2tlb_miss_str[sizeof(" DcL1TlbMiss _ DcL2TlbMiss _")] = "";
+ char dc_l1tlb_hit_str[sizeof(" DcL1TlbHit2M _ DcL1TlbHit1G _")] = "";
+ char op_mem_width_str[sizeof(" OpMemWidth _____ bytes")] = "";
+ char dc_l2tlb_hit_2m_str[sizeof(" DcL2TlbHit2M _")] = "";
+ char dc_l2tlb_hit_1g_str[sizeof(" DcL2TlbHit1G _")] = "";
+ char dc_page_size_str[sizeof(" DcPageSize ____")] = "";
+ char l2_miss_str[sizeof(" L2Miss _")] = "";
/*
* Erratum #1293
@@ -172,16 +191,40 @@ static void pr_ibs_op_data3(union ibs_op_data3 reg)
snprintf(op_mem_width_str, sizeof(op_mem_width_str),
" OpMemWidth %2d bytes", 1 << (reg.op_mem_width - 1));
- printf("ibs_op_data3:\t%016llx LdOp %d StOp %d DcL1TlbMiss %d DcL2TlbMiss %d "
- "DcL1TlbHit2M %d DcL1TlbHit1G %d DcL2TlbHit2M %d DcMiss %d DcMisAcc %d "
- "DcWcMemAcc %d DcUcMemAcc %d DcLockedOp %d DcMissNoMabAlloc %d DcLinAddrValid %d "
- "DcPhyAddrValid %d DcL2TlbHit1G %d%s SwPf %d%s%s DcMissLat %5d TlbRefillLat %5d\n",
- reg.val, reg.ld_op, reg.st_op, reg.dc_l1tlb_miss, reg.dc_l2tlb_miss,
- reg.dc_l1tlb_hit_2m, reg.dc_l1tlb_hit_1g, reg.dc_l2tlb_hit_2m, reg.dc_miss,
- reg.dc_mis_acc, reg.dc_wc_mem_acc, reg.dc_uc_mem_acc, reg.dc_locked_op,
- reg.dc_miss_no_mab_alloc, reg.dc_lin_addr_valid, reg.dc_phy_addr_valid,
- reg.dc_l2_tlb_hit_1g, l2_miss_str, reg.sw_pf, op_mem_width_str,
- op_dc_miss_open_mem_reqs_str, reg.dc_miss_lat, reg.tlb_refill_lat);
+ if (dtlb_pgsize_cap) {
+ if (reg.dc_phy_addr_valid) {
+ int idx = (reg.dc_l1tlb_hit_1g << 1) | reg.dc_l1tlb_hit_2m;
+
+ snprintf(dc_l1_l2tlb_miss_str, sizeof(dc_l1_l2tlb_miss_str),
+ " DcL1TlbMiss %d DcL2TlbMiss %d",
+ reg.dc_l1tlb_miss, reg.dc_l2tlb_miss);
+ snprintf(dc_page_size_str, sizeof(dc_page_size_str),
+ " DcPageSize %4s", dc_page_sizes[idx]);
+ }
+ } else {
+ snprintf(dc_l1_l2tlb_miss_str, sizeof(dc_l1_l2tlb_miss_str),
+ " DcL1TlbMiss %d DcL2TlbMiss %d",
+ reg.dc_l1tlb_miss, reg.dc_l2tlb_miss);
+ snprintf(dc_l1tlb_hit_str, sizeof(dc_l1tlb_hit_str),
+ " DcL1TlbHit2M %d DcL1TlbHit1G %d",
+ reg.dc_l1tlb_hit_2m, reg.dc_l1tlb_hit_1g);
+ snprintf(dc_l2tlb_hit_2m_str, sizeof(dc_l2tlb_hit_2m_str),
+ " DcL2TlbHit2M %d", reg.dc_l2tlb_hit_2m);
+ snprintf(dc_l2tlb_hit_1g_str, sizeof(dc_l2tlb_hit_1g_str),
+ " DcL2TlbHit1G %d", reg.dc_l2_tlb_hit_1g);
+ }
+
+ printf("ibs_op_data3:\t%016llx LdOp %d StOp %d%s%s%s DcMiss %d DcMisAcc %d "
+ "DcWcMemAcc %d DcUcMemAcc %d DcLockedOp %d DcMissNoMabAlloc %d "
+ "DcLinAddrValid %d DcPhyAddrValid %d%s%s SwPf %d%s%s "
+ "DcMissLat %5d TlbRefillLat %5d\n",
+ reg.val, reg.ld_op, reg.st_op, dc_l1_l2tlb_miss_str,
+ dtlb_pgsize_cap ? dc_page_size_str : dc_l1tlb_hit_str,
+ dc_l2tlb_hit_2m_str, reg.dc_miss, reg.dc_mis_acc, reg.dc_wc_mem_acc,
+ reg.dc_uc_mem_acc, reg.dc_locked_op, reg.dc_miss_no_mab_alloc,
+ reg.dc_lin_addr_valid, reg.dc_phy_addr_valid, dc_l2tlb_hit_1g_str,
+ l2_miss_str, reg.sw_pf, op_mem_width_str, op_dc_miss_open_mem_reqs_str,
+ reg.dc_miss_lat, reg.tlb_refill_lat);
}
/*
@@ -311,7 +354,7 @@ static void parse_cpuid(struct perf_env *env)
*/
bool evlist__has_amd_ibs(struct evlist *evlist)
{
- struct perf_env *env = evlist->env;
+ struct perf_env *env = perf_session__env(evlist->session);
int ret, nr_pmu_mappings = perf_env__nr_pmu_mappings(env);
const char *pmu_mapping = perf_env__pmu_mappings(env);
char name[sizeof("ibs_fetch")];
@@ -331,6 +374,12 @@ bool evlist__has_amd_ibs(struct evlist *evlist)
if (perf_env__find_pmu_cap(env, "ibs_op", "zen4_ibs_extensions"))
zen4_ibs_extensions = 1;
+ if (perf_env__find_pmu_cap(env, "ibs_op", "ldlat"))
+ ldlat_cap = 1;
+
+ if (perf_env__find_pmu_cap(env, "ibs_op", "dtlb_pgsize"))
+ dtlb_pgsize_cap = 1;
+
if (ibs_fetch_type || ibs_op_type) {
if (!cpu_family)
parse_cpuid(env);
diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c
index 1ef2edbc71d9..07cf9c334be0 100644
--- a/tools/perf/util/annotate-data.c
+++ b/tools/perf/util/annotate-data.c
@@ -4,7 +4,7 @@
*
* Written by Namhyung Kim <namhyung@kernel.org>
*/
-
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
@@ -58,6 +58,10 @@ void pr_debug_type_name(Dwarf_Die *die, enum type_state_kind kind)
case TSR_KIND_CONST:
pr_info(" constant\n");
return;
+ case TSR_KIND_PERCPU_POINTER:
+ pr_info(" percpu pointer");
+ /* it also prints the type info */
+ break;
case TSR_KIND_POINTER:
pr_info(" pointer");
/* it also prints the type info */
@@ -573,25 +577,35 @@ struct type_state_stack *find_stack_state(struct type_state *state,
}
void set_stack_state(struct type_state_stack *stack, int offset, u8 kind,
- Dwarf_Die *type_die)
+ Dwarf_Die *type_die, int ptr_offset)
{
int tag;
Dwarf_Word size;
- if (dwarf_aggregate_size(type_die, &size) < 0)
+ if (kind == TSR_KIND_POINTER) {
+ /* TODO: arch-dependent pointer size */
+ size = sizeof(void *);
+ }
+ else if (dwarf_aggregate_size(type_die, &size) < 0)
size = 0;
- tag = dwarf_tag(type_die);
-
stack->type = *type_die;
stack->size = size;
stack->offset = offset;
+ stack->ptr_offset = ptr_offset;
stack->kind = kind;
+ if (kind == TSR_KIND_POINTER) {
+ stack->compound = false;
+ return;
+ }
+
+ tag = dwarf_tag(type_die);
+
switch (tag) {
case DW_TAG_structure_type:
case DW_TAG_union_type:
- stack->compound = (kind != TSR_KIND_POINTER);
+ stack->compound = (kind != TSR_KIND_PERCPU_POINTER);
break;
default:
stack->compound = false;
@@ -601,18 +615,19 @@ void set_stack_state(struct type_state_stack *stack, int offset, u8 kind,
struct type_state_stack *findnew_stack_state(struct type_state *state,
int offset, u8 kind,
- Dwarf_Die *type_die)
+ Dwarf_Die *type_die,
+ int ptr_offset)
{
struct type_state_stack *stack = find_stack_state(state, offset);
if (stack) {
- set_stack_state(stack, offset, kind, type_die);
+ set_stack_state(stack, offset, kind, type_die, ptr_offset);
return stack;
}
stack = malloc(sizeof(*stack));
if (stack) {
- set_stack_state(stack, offset, kind, type_die);
+ set_stack_state(stack, offset, kind, type_die, ptr_offset);
list_add(&stack->list, &state->stack_vars);
}
return stack;
@@ -868,6 +883,11 @@ static void update_var_state(struct type_state *state, struct data_loc_info *dlo
int offset = var->offset;
struct type_state_stack *stack;
+ /* If the reg location holds the pointer value, dereference the type */
+ if (!var->is_reg_var_addr && is_pointer_type(&mem_die) &&
+ __die_get_real_type(&mem_die, &mem_die) == NULL)
+ continue;
+
if (var->reg != DWARF_REG_FB)
offset -= fb_offset;
@@ -877,7 +897,7 @@ static void update_var_state(struct type_state *state, struct data_loc_info *dlo
continue;
findnew_stack_state(state, offset, TSR_KIND_TYPE,
- &mem_die);
+ &mem_die, /*ptr_offset=*/0);
if (var->reg == state->stack_reg) {
pr_debug_dtp("var [%"PRIx64"] %#x(reg%d)",
@@ -887,24 +907,45 @@ static void update_var_state(struct type_state *state, struct data_loc_info *dlo
insn_offset, -offset);
}
pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
- } else if (has_reg_type(state, var->reg) && var->offset == 0) {
+ } else if (has_reg_type(state, var->reg)) {
struct type_state_reg *reg;
Dwarf_Die orig_type;
reg = &state->regs[var->reg];
if (reg->ok && reg->kind == TSR_KIND_TYPE &&
- !is_better_type(&reg->type, &mem_die))
+ (!is_better_type(&reg->type, &mem_die) || var->is_reg_var_addr))
continue;
- orig_type = reg->type;
+ /* Handle address registers with TSR_KIND_POINTER */
+ if (var->is_reg_var_addr) {
+ if (reg->ok && reg->kind == TSR_KIND_POINTER &&
+ !is_better_type(&reg->type, &mem_die))
+ continue;
+ reg->offset = -var->offset;
+ reg->type = mem_die;
+ reg->kind = TSR_KIND_POINTER;
+ reg->ok = true;
+
+ pr_debug_dtp("var [%"PRIx64"] reg%d addr offset %x",
+ insn_offset, var->reg, var->offset);
+ pr_debug_type_name(&mem_die, TSR_KIND_POINTER);
+ continue;
+ }
+
+ orig_type = reg->type;
+ /*
+ * var->offset + reg value is the beginning of the struct
+ * reg->offset is the offset the reg points
+ */
+ reg->offset = -var->offset;
reg->type = mem_die;
reg->kind = TSR_KIND_TYPE;
reg->ok = true;
- pr_debug_dtp("var [%"PRIx64"] reg%d",
- insn_offset, var->reg);
+ pr_debug_dtp("var [%"PRIx64"] reg%d offset %x",
+ insn_offset, var->reg, var->offset);
pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
/*
@@ -1092,7 +1133,7 @@ again:
if (__die_get_real_type(&state->regs[reg].type, type_die) == NULL)
return PERF_TMR_NO_POINTER;
- dloc->type_offset = dloc->op->offset;
+ dloc->type_offset = dloc->op->offset + state->regs[reg].offset;
if (dwarf_tag(type_die) == DW_TAG_typedef)
die_get_real_type(type_die, &sized_type);
@@ -1108,6 +1149,30 @@ again:
}
if (state->regs[reg].kind == TSR_KIND_POINTER) {
+ struct strbuf sb;
+
+ strbuf_init(&sb, 32);
+ die_get_typename_from_type(&state->regs[reg].type, &sb);
+ pr_debug_dtp("(ptr->%s)", sb.buf);
+ strbuf_release(&sb);
+
+ /*
+ * Register holds a pointer (address) to the target variable.
+ * The type is the type of the variable it points to.
+ */
+ *type_die = state->regs[reg].type;
+
+ dloc->type_offset = dloc->op->offset + state->regs[reg].offset;
+
+ /* Get the size of the actual type */
+ if (dwarf_aggregate_size(type_die, &size) < 0 ||
+ (unsigned)dloc->type_offset >= size)
+ return PERF_TMR_BAD_OFFSET;
+
+ return PERF_TMR_OK;
+ }
+
+ if (state->regs[reg].kind == TSR_KIND_PERCPU_POINTER) {
pr_debug_dtp("percpu ptr");
/*
diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h
index 541fee1a5f0a..869307c7f130 100644
--- a/tools/perf/util/annotate-data.h
+++ b/tools/perf/util/annotate-data.h
@@ -34,6 +34,7 @@ enum type_state_kind {
TSR_KIND_TYPE,
TSR_KIND_PERCPU_BASE,
TSR_KIND_CONST,
+ TSR_KIND_PERCPU_POINTER,
TSR_KIND_POINTER,
TSR_KIND_CANARY,
};
@@ -173,6 +174,12 @@ extern struct annotated_data_stat ann_data_stat;
struct type_state_reg {
Dwarf_Die type;
u32 imm_value;
+ /*
+ * The offset within the struct that the register points to.
+ * A value of 0 means the register points to the beginning.
+ * type_offset = op->offset + reg->offset
+ */
+ s32 offset;
bool ok;
bool caller_saved;
u8 kind;
@@ -184,17 +191,22 @@ struct type_state_stack {
struct list_head list;
Dwarf_Die type;
int offset;
+ /* pointer offset, saves tsr->offset on the stack state */
+ int ptr_offset;
int size;
bool compound;
u8 kind;
};
-/* FIXME: This should be arch-dependent */
-#ifdef __powerpc__
+/*
+ * Maximum number of registers tracked in type_state.
+ *
+ * This limit must cover all supported architectures, since perf
+ * may analyze perf.data files generated on systems with a different
+ * register set. Use 32 as a safe upper bound instead of relying on
+ * build-arch specific values.
+ */
#define TYPE_STATE_MAX_REGS 32
-#else
-#define TYPE_STATE_MAX_REGS 16
-#endif
/*
* State table to maintain type info in each register and stack location.
@@ -237,9 +249,10 @@ int annotated_data_type__get_member_name(struct annotated_data_type *adt,
bool has_reg_type(struct type_state *state, int reg);
struct type_state_stack *findnew_stack_state(struct type_state *state,
int offset, u8 kind,
- Dwarf_Die *type_die);
+ Dwarf_Die *type_die,
+ int ptr_offset);
void set_stack_state(struct type_state_stack *stack, int offset, u8 kind,
- Dwarf_Die *type_die);
+ Dwarf_Die *type_die, int ptr_offset);
struct type_state_stack *find_stack_state(struct type_state *state,
int offset);
bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc,
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 1e59b9e5339d..cc7764455faf 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -765,14 +765,16 @@ __hist_entry__get_data_type(struct hist_entry *he, struct arch *arch,
struct debuginfo *dbg, struct disasm_line *dl,
int *type_offset);
-struct annotation_print_data {
- struct hist_entry *he;
- struct evsel *evsel;
- struct arch *arch;
- struct debuginfo *dbg;
- u64 start;
- int addr_fmt_width;
-};
+static bool needs_type_info(struct annotated_data_type *data_type)
+{
+ if (data_type == NULL || data_type == NO_TYPE)
+ return false;
+
+ if (verbose)
+ return true;
+
+ return (data_type != &stackop_type) && (data_type != &canary_type);
+}
static int
annotation_line__print(struct annotation_line *al, struct annotation_print_data *apd,
@@ -845,7 +847,7 @@ annotation_line__print(struct annotation_line *al, struct annotation_print_data
printf(" : ");
- disasm_line__print(dl, apd->start, apd->addr_fmt_width);
+ disasm_line__print(dl, notes->src->start, apd->addr_fmt_width);
if (opts->code_with_type && apd->dbg) {
struct annotated_data_type *data_type;
@@ -853,7 +855,7 @@ annotation_line__print(struct annotation_line *al, struct annotation_print_data
data_type = __hist_entry__get_data_type(apd->he, apd->arch,
apd->dbg, dl, &offset);
- if (data_type && data_type != NO_TYPE) {
+ if (needs_type_info(data_type)) {
char buf[4096];
printf("\t\t# data-type: %s",
@@ -978,7 +980,7 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
annotation__calc_percent(notes, evsel, symbol__size(sym));
}
-static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
+int evsel__get_arch(struct evsel *evsel, struct arch **parch)
{
struct perf_env *env = evsel__env(evsel);
const char *arch_name = perf_env__arch(env);
@@ -1013,14 +1015,13 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
- .evsel = evsel,
.options = &annotate_opts,
};
struct arch *arch = NULL;
int err, nr;
err = evsel__get_arch(evsel, &arch);
- if (err < 0)
+ if (err)
return err;
if (parch)
@@ -1230,7 +1231,6 @@ int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel)
struct annotation_print_data apd = {
.he = he,
.evsel = evsel,
- .start = map__rip_2objdump(map, sym->start),
};
int printed = 2, queue_len = 0;
int more = 0;
@@ -1267,9 +1267,9 @@ int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel)
symbol__annotate_hits(sym, evsel);
apd.addr_fmt_width = annotated_source__addr_fmt_width(&notes->src->source,
- apd.start);
+ notes->src->start);
evsel__get_arch(evsel, &apd.arch);
- apd.dbg = debuginfo__new(filename);
+ apd.dbg = dso__debuginfo(dso);
list_for_each_entry(pos, &notes->src->source, node) {
int err;
@@ -1357,7 +1357,8 @@ static void FILE__write_graph(void *fp, int graph)
fputs(s, fp);
}
-static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
+static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
+ struct annotation_print_data *apd)
{
struct annotation *notes = symbol__annotation(sym);
struct annotation_write_ops wops = {
@@ -1371,24 +1372,37 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
};
struct annotation_line *al;
+ if (annotate_opts.code_with_type) {
+ evsel__get_arch(apd->evsel, &apd->arch);
+ apd->dbg = dso__debuginfo(map__dso(apd->he->ms.map));
+ }
+
list_for_each_entry(al, &notes->src->source, node) {
if (annotation_line__filter(al))
continue;
- annotation_line__write(al, notes, &wops);
+ annotation_line__write(al, notes, &wops, apd);
fputc('\n', fp);
wops.first_line = false;
}
+ if (annotate_opts.code_with_type)
+ debuginfo__delete(apd->dbg);
+
return 0;
}
-int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
+int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
+ struct hist_entry *he)
{
const char *ev_name = evsel__name(evsel);
char buf[1024];
char *filename;
int err = -1;
FILE *fp;
+ struct annotation_print_data apd = {
+ .he = he,
+ .evsel = evsel,
+ };
if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
return -1;
@@ -1404,7 +1418,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
fprintf(fp, "%s() %s\nEvent: %s\n\n",
ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
- symbol__annotate_fprintf2(ms->sym, fp);
+ symbol__annotate_fprintf2(ms->sym, fp, &apd);
fclose(fp);
err = 0;
@@ -1451,6 +1465,7 @@ void annotated_source__purge(struct annotated_source *as)
list_del_init(&al->node);
disasm_line__free(disasm_line(al));
}
+ as->tried_source = false;
}
static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
@@ -1655,6 +1670,10 @@ int hist_entry__tty_annotate2(struct hist_entry *he, struct evsel *evsel)
struct symbol *sym = ms->sym;
struct rb_root source_line = RB_ROOT;
struct hists *hists = evsel__hists(evsel);
+ struct annotation_print_data apd = {
+ .he = he,
+ .evsel = evsel,
+ };
char buf[1024];
int err;
@@ -1677,7 +1696,7 @@ int hist_entry__tty_annotate2(struct hist_entry *he, struct evsel *evsel)
hists__scnprintf_title(hists, buf, sizeof(buf));
fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
- symbol__annotate_fprintf2(sym, stdout);
+ symbol__annotate_fprintf2(sym, stdout, &apd);
annotated_source__purge(symbol__annotation(sym)->src);
@@ -1742,7 +1761,7 @@ static double annotation_line__max_percent(struct annotation_line *al,
return percent_max;
}
-static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
+static int disasm_line__write(struct disasm_line *dl, struct annotation *notes,
void *obj, char *bf, size_t size,
void (*obj__printf)(void *obj, const char *fmt, ...),
void (*obj__write_graph)(void *obj, int graph))
@@ -1770,8 +1789,8 @@ call_like:
obj__printf(obj, " ");
}
- disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
- notes->src->widths.max_ins_name);
+ return disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
+ notes->src->widths.max_ins_name) + 2;
}
static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
@@ -1934,24 +1953,82 @@ err:
return -ENOMEM;
}
-static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
- bool first_line, bool current_entry, bool change_color, int width,
- void *obj, unsigned int percent_type,
- int (*obj__set_color)(void *obj, int color),
- void (*obj__set_percent_color)(void *obj, double percent, bool current),
- int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
- void (*obj__printf)(void *obj, const char *fmt, ...),
- void (*obj__write_graph)(void *obj, int graph))
+struct type_hash_entry {
+ struct annotated_data_type *type;
+ int offset;
+};
+static int disasm_line__snprint_type_info(struct disasm_line *dl,
+ char *buf, int len,
+ struct annotation_print_data *apd)
{
- double percent_max = annotation_line__max_percent(al, percent_type);
- int pcnt_width = annotation__pcnt_width(notes),
- cycles_width = annotation__cycles_width(notes);
+ struct annotated_data_type *data_type = NULL;
+ struct type_hash_entry *entry = NULL;
+ char member[256];
+ int offset = 0;
+ int printed;
+
+ scnprintf(buf, len, " ");
+
+ if (!annotate_opts.code_with_type || apd->dbg == NULL)
+ return 1;
+
+ if (apd->type_hash) {
+ hashmap__find(apd->type_hash, dl->al.offset, &entry);
+ if (entry != NULL) {
+ data_type = entry->type;
+ offset = entry->offset;
+ }
+ }
+
+ if (data_type == NULL)
+ data_type = __hist_entry__get_data_type(apd->he, apd->arch, apd->dbg, dl, &offset);
+
+ if (apd->type_hash && entry == NULL) {
+ entry = malloc(sizeof(*entry));
+ if (entry != NULL) {
+ entry->type = data_type;
+ entry->offset = offset;
+ hashmap__add(apd->type_hash, dl->al.offset, entry);
+ }
+ }
+
+ if (!needs_type_info(data_type))
+ return 1;
+
+ printed = scnprintf(buf, len, "\t\t# data-type: %s", data_type->self.type_name);
+
+ if (data_type != &stackop_type && data_type != &canary_type && len > printed)
+ printed += scnprintf(buf + printed, len - printed, " +%#x", offset);
+
+ if (annotated_data_type__get_member_name(data_type, member, sizeof(member), offset) &&
+ len > printed) {
+ printed += scnprintf(buf + printed, len - printed, " (%s)", member);
+ }
+ return printed;
+}
+
+void annotation_line__write(struct annotation_line *al, struct annotation *notes,
+ const struct annotation_write_ops *wops,
+ struct annotation_print_data *apd)
+{
+ bool current_entry = wops->current_entry;
+ bool change_color = wops->change_color;
+ double percent_max = annotation_line__max_percent(al, annotate_opts.percent_type);
+ int width = wops->width;
+ int pcnt_width = annotation__pcnt_width(notes);
+ int cycles_width = annotation__cycles_width(notes);
bool show_title = false;
char bf[256];
int printed;
-
- if (first_line && (al->offset == -1 || percent_max == 0.0)) {
+ void *obj = wops->obj;
+ int (*obj__set_color)(void *obj, int color) = wops->set_color;
+ void (*obj__set_percent_color)(void *obj, double percent, bool current) = wops->set_percent_color;
+ int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current) = wops->set_jumps_percent_color;
+ void (*obj__printf)(void *obj, const char *fmt, ...) = wops->printf;
+ void (*obj__write_graph)(void *obj, int graph) = wops->write_graph;
+
+ if (wops->first_line && (al->offset == -1 || percent_max == 0.0)) {
if (notes->branch && al->cycles) {
if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
show_title = true;
@@ -1965,7 +2042,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
for (i = 0; i < al->data_nr; i++) {
double percent;
- percent = annotation_data__percent(&al->data[i], percent_type);
+ percent = annotation_data__percent(&al->data[i],
+ annotate_opts.percent_type);
obj__set_percent_color(obj, percent, current_entry);
if (symbol_conf.show_total_period) {
@@ -1988,6 +2066,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
symbol_conf.show_nr_samples ? "Samples" : "Percent");
}
}
+ width -= pcnt_width;
if (notes->branch) {
if (al->cycles && al->cycles->ipc)
@@ -2051,11 +2130,13 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
}
}
+ width -= cycles_width;
obj__printf(obj, " ");
+ width -= 1;
if (!*al->line)
- obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
+ obj__printf(obj, "%-*s", width, " ");
else if (al->offset == -1) {
if (al->line_nr && annotate_opts.show_linenr)
printed = scnprintf(bf, sizeof(bf), "%-*d ",
@@ -2064,7 +2145,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
printed = scnprintf(bf, sizeof(bf), "%-*s ",
notes->src->widths.addr, " ");
obj__printf(obj, bf);
- obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
+ width -= printed;
+ obj__printf(obj, "%-*s", width, al->line);
} else {
u64 addr = al->offset;
int color = -1;
@@ -2107,22 +2189,18 @@ print_addr:
if (change_color)
obj__set_color(obj, color);
- disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
+ width -= printed;
- obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
- }
+ printed = disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf),
+ obj__printf, obj__write_graph);
-}
+ obj__printf(obj, "%s", bf);
+ width -= printed;
+
+ disasm_line__snprint_type_info(disasm_line(al), bf, sizeof(bf), apd);
+ obj__printf(obj, "%-*s", width, bf);
+ }
-void annotation_line__write(struct annotation_line *al, struct annotation *notes,
- struct annotation_write_ops *wops)
-{
- __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
- wops->change_color, wops->width, wops->obj,
- annotate_opts.percent_type,
- wops->set_color, wops->set_percent_color,
- wops->set_jumps_percent_color, wops->printf,
- wops->write_graph);
}
int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
@@ -2280,6 +2358,8 @@ void annotation_options__init(void)
opt->annotate_src = true;
opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
opt->percent_type = PERCENT_PERIOD_LOCAL;
+ opt->hide_src_code = true;
+ opt->hide_src_code_on_title = true;
}
void annotation_options__exit(void)
@@ -2618,6 +2698,20 @@ static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
return false;
}
+/**
+ * Returns true if the instruction has a memory operand without
+ * performing a load/store
+ */
+static bool is_address_gen_insn(struct arch *arch, struct disasm_line *dl)
+{
+ if (arch__is(arch, "x86")) {
+ if (!strncmp(dl->ins.name, "lea", 3))
+ return true;
+ }
+
+ return false;
+}
+
static struct disasm_line *
annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
{
@@ -2726,6 +2820,12 @@ __hist_entry__get_data_type(struct hist_entry *he, struct arch *arch,
return &stackop_type;
}
+ if (is_address_gen_insn(arch, dl)) {
+ istat->bad++;
+ ann_data_stat.no_mem_ops++;
+ return NO_TYPE;
+ }
+
for_each_insn_op_loc(&loc, i, op_loc) {
struct data_loc_info dloc = {
.arch = arch,
@@ -2826,7 +2926,7 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
di_cache.dso = dso__get(map__dso(ms->map));
debuginfo__delete(di_cache.dbg);
- di_cache.dbg = debuginfo__new(dso__long_name(di_cache.dso));
+ di_cache.dbg = dso__debuginfo(di_cache.dso);
}
if (di_cache.dbg == NULL) {
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 0e6e3f60a897..d4990bff29a7 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -44,6 +44,7 @@ enum perf_disassembler {
struct annotation_options {
bool hide_src_code,
+ hide_src_code_on_title,
use_offset,
jump_arrows,
print_lines,
@@ -198,8 +199,20 @@ struct annotation_write_ops {
void (*write_graph)(void *obj, int graph);
};
+struct annotation_print_data {
+ struct hist_entry *he;
+ struct evsel *evsel;
+ struct arch *arch;
+ struct debuginfo *dbg;
+ /* save data type info keyed by al->offset */
+ struct hashmap *type_hash;
+ /* It'll be set in hist_entry__annotate_printf() */
+ int addr_fmt_width;
+};
+
void annotation_line__write(struct annotation_line *al, struct annotation *notes,
- struct annotation_write_ops *ops);
+ const struct annotation_write_ops *ops,
+ struct annotation_print_data *apd);
int __annotation__scnprintf_samples_period(struct annotation *notes,
char *bf, size_t size,
@@ -293,6 +306,7 @@ struct annotated_source {
int nr_entries;
int nr_asm_entries;
int max_jump_sources;
+ bool tried_source;
u64 start;
struct {
u8 addr;
@@ -461,7 +475,8 @@ void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel);
void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel);
void annotated_source__purge(struct annotated_source *as);
-int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel);
+int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
+ struct hist_entry *he);
bool ui__has_annotation(void);
@@ -469,18 +484,6 @@ int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel);
int hist_entry__tty_annotate(struct hist_entry *he, struct evsel *evsel);
int hist_entry__tty_annotate2(struct hist_entry *he, struct evsel *evsel);
-#ifdef HAVE_SLANG_SUPPORT
-int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct hist_browser_timer *hbt);
-#else
-static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
- struct evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused)
-{
- return 0;
-}
-#endif
-
void annotation_options__init(void);
void annotation_options__exit(void);
@@ -582,4 +585,6 @@ void debuginfo_cache__delete(void);
int annotation_br_cntr_entry(char **str, int br_cntr_nr, u64 *br_cntr,
int num_aggr, struct evsel *evsel);
int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header);
+
+int evsel__get_arch(struct evsel *evsel, struct arch **parch);
#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/arm-spe-decoder/Build b/tools/perf/util/arm-spe-decoder/Build
index 960062b3cb9e..ab500e0efe24 100644
--- a/tools/perf/util/arm-spe-decoder/Build
+++ b/tools/perf/util/arm-spe-decoder/Build
@@ -1 +1 @@
-perf-util-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o arm-spe-decoder.o
+perf-util-y += arm-spe-pkt-decoder.o arm-spe-decoder.o
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
index 688fe6d75244..9e02b2bdd117 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
@@ -200,13 +200,61 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
decoder->record.op |= ARM_SPE_OP_ST;
else
decoder->record.op |= ARM_SPE_OP_LD;
- if (SPE_OP_PKT_IS_LDST_SVE(payload))
- decoder->record.op |= ARM_SPE_OP_SVE_LDST;
+
+ if (SPE_OP_PKT_LDST_SUBCLASS_GP_REG(payload)) {
+ decoder->record.op |= ARM_SPE_OP_GP_REG;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_SIMD_FP(payload)) {
+ decoder->record.op |= ARM_SPE_OP_SIMD_FP;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_UNSPEC_REG(payload)) {
+ decoder->record.op |= ARM_SPE_OP_UNSPEC_REG;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_NV_SYSREG(payload)) {
+ decoder->record.op |= ARM_SPE_OP_NV_SYSREG;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_MTE_TAG(payload)) {
+ decoder->record.op |= ARM_SPE_OP_MTE_TAG;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_EXTENDED(payload)) {
+ if (payload & SPE_OP_PKT_AR)
+ decoder->record.op |= ARM_SPE_OP_AR;
+ if (payload & SPE_OP_PKT_EXCL)
+ decoder->record.op |= ARM_SPE_OP_EXCL;
+ if (payload & SPE_OP_PKT_AT)
+ decoder->record.op |= ARM_SPE_OP_ATOMIC;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_SVE_SME_REG(payload)) {
+ decoder->record.op |= ARM_SPE_OP_SVE;
+ if (payload & SPE_OP_PKT_SVE_PRED)
+ decoder->record.op |= ARM_SPE_OP_PRED;
+ if (payload & SPE_OP_PKT_SVE_SG)
+ decoder->record.op |= ARM_SPE_OP_SG;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_MEMCPY(payload)) {
+ decoder->record.op |= ARM_SPE_OP_MEMCPY;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_MEMSET(payload)) {
+ decoder->record.op |= ARM_SPE_OP_MEMSET;
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_GCS(payload)) {
+ decoder->record.op |= ARM_SPE_OP_GCS;
+ if (payload & SPE_OP_PKT_GCS_COMM)
+ decoder->record.op |= ARM_SPE_OP_COMM;
+ }
+
break;
case SPE_OP_PKT_HDR_CLASS_OTHER:
decoder->record.op |= ARM_SPE_OP_OTHER;
- if (SPE_OP_PKT_IS_OTHER_SVE_OP(payload))
- decoder->record.op |= ARM_SPE_OP_SVE_OTHER;
+ if (SPE_OP_PKT_OTHER_SUBCLASS_SVE(payload)) {
+ decoder->record.op |= ARM_SPE_OP_SVE | ARM_SPE_OP_DP;
+ if (payload & SPE_OP_PKT_OTHER_FP)
+ decoder->record.op |= ARM_SPE_OP_FP;
+ if (payload & SPE_OP_PKT_SVE_PRED)
+ decoder->record.op |= ARM_SPE_OP_PRED;
+ } else if (SPE_OP_PKT_OTHER_SUBCLASS_SME(payload)) {
+ decoder->record.op |= ARM_SPE_OP_SME;
+ if (payload & SPE_OP_PKT_OTHER_FP)
+ decoder->record.op |= ARM_SPE_OP_FP;
+ } else if (SPE_OP_PKT_OTHER_SUBCLASS_OTHER(payload)) {
+ if (payload & SPE_OP_PKT_OTHER_ASE)
+ decoder->record.op |= ARM_SPE_OP_ASE;
+ if (payload & SPE_OP_PKT_OTHER_FP)
+ decoder->record.op |= ARM_SPE_OP_FP;
+ if (payload & SPE_OP_PKT_COND)
+ decoder->record.op |= ARM_SPE_OP_COND;
+ }
break;
case SPE_OP_PKT_HDR_CLASS_BR_ERET:
decoder->record.op |= ARM_SPE_OP_BRANCH_ERET;
@@ -229,42 +277,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
}
break;
case ARM_SPE_EVENTS:
- if (payload & BIT(EV_L1D_REFILL))
- decoder->record.type |= ARM_SPE_L1D_MISS;
-
- if (payload & BIT(EV_L1D_ACCESS))
- decoder->record.type |= ARM_SPE_L1D_ACCESS;
-
- if (payload & BIT(EV_TLB_WALK))
- decoder->record.type |= ARM_SPE_TLB_MISS;
-
- if (payload & BIT(EV_TLB_ACCESS))
- decoder->record.type |= ARM_SPE_TLB_ACCESS;
-
- if (payload & BIT(EV_LLC_MISS))
- decoder->record.type |= ARM_SPE_LLC_MISS;
-
- if (payload & BIT(EV_LLC_ACCESS))
- decoder->record.type |= ARM_SPE_LLC_ACCESS;
-
- if (payload & BIT(EV_REMOTE_ACCESS))
- decoder->record.type |= ARM_SPE_REMOTE_ACCESS;
-
- if (payload & BIT(EV_MISPRED))
- decoder->record.type |= ARM_SPE_BRANCH_MISS;
-
- if (payload & BIT(EV_NOT_TAKEN))
- decoder->record.type |= ARM_SPE_BRANCH_NOT_TAKEN;
-
- if (payload & BIT(EV_TRANSACTIONAL))
- decoder->record.type |= ARM_SPE_IN_TXN;
-
- if (payload & BIT(EV_PARTIAL_PREDICATE))
- decoder->record.type |= ARM_SPE_SVE_PARTIAL_PRED;
-
- if (payload & BIT(EV_EMPTY_PREDICATE))
- decoder->record.type |= ARM_SPE_SVE_EMPTY_PRED;
-
+ decoder->record.type = payload;
break;
case ARM_SPE_DATA_SOURCE:
decoder->record.source = payload;
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
index 5d232188643b..3310e05122f0 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
@@ -13,53 +13,65 @@
#include "arm-spe-pkt-decoder.h"
-enum arm_spe_sample_type {
- ARM_SPE_L1D_ACCESS = 1 << 0,
- ARM_SPE_L1D_MISS = 1 << 1,
- ARM_SPE_LLC_ACCESS = 1 << 2,
- ARM_SPE_LLC_MISS = 1 << 3,
- ARM_SPE_TLB_ACCESS = 1 << 4,
- ARM_SPE_TLB_MISS = 1 << 5,
- ARM_SPE_BRANCH_MISS = 1 << 6,
- ARM_SPE_REMOTE_ACCESS = 1 << 7,
- ARM_SPE_SVE_PARTIAL_PRED = 1 << 8,
- ARM_SPE_SVE_EMPTY_PRED = 1 << 9,
- ARM_SPE_BRANCH_NOT_TAKEN = 1 << 10,
- ARM_SPE_IN_TXN = 1 << 11,
-};
+#define ARM_SPE_L1D_ACCESS BIT(EV_L1D_ACCESS)
+#define ARM_SPE_L1D_MISS BIT(EV_L1D_REFILL)
+#define ARM_SPE_LLC_ACCESS BIT(EV_LLC_ACCESS)
+#define ARM_SPE_LLC_MISS BIT(EV_LLC_MISS)
+#define ARM_SPE_TLB_ACCESS BIT(EV_TLB_ACCESS)
+#define ARM_SPE_TLB_MISS BIT(EV_TLB_WALK)
+#define ARM_SPE_BRANCH_MISS BIT(EV_MISPRED)
+#define ARM_SPE_BRANCH_NOT_TAKEN BIT(EV_NOT_TAKEN)
+#define ARM_SPE_REMOTE_ACCESS BIT(EV_REMOTE_ACCESS)
+#define ARM_SPE_SVE_PARTIAL_PRED BIT(EV_PARTIAL_PREDICATE)
+#define ARM_SPE_SVE_EMPTY_PRED BIT(EV_EMPTY_PREDICATE)
+#define ARM_SPE_IN_TXN BIT(EV_TRANSACTIONAL)
+#define ARM_SPE_L2D_ACCESS BIT(EV_L2D_ACCESS)
+#define ARM_SPE_L2D_MISS BIT(EV_L2D_MISS)
+#define ARM_SPE_RECENTLY_FETCHED BIT(EV_RECENTLY_FETCHED)
+#define ARM_SPE_DATA_SNOOPED BIT(EV_DATA_SNOOPED)
+#define ARM_SPE_HITM BIT(EV_CACHE_DATA_MODIFIED)
enum arm_spe_op_type {
/* First level operation type */
ARM_SPE_OP_OTHER = 1 << 0,
ARM_SPE_OP_LDST = 1 << 1,
ARM_SPE_OP_BRANCH_ERET = 1 << 2,
+};
+
+enum arm_spe_2nd_op_ldst {
+ ARM_SPE_OP_GP_REG = 1 << 8,
+ ARM_SPE_OP_UNSPEC_REG = 1 << 9,
+ ARM_SPE_OP_NV_SYSREG = 1 << 10,
+ ARM_SPE_OP_SIMD_FP = 1 << 11,
+ ARM_SPE_OP_SVE = 1 << 12,
+ ARM_SPE_OP_MTE_TAG = 1 << 13,
+ ARM_SPE_OP_MEMCPY = 1 << 14,
+ ARM_SPE_OP_MEMSET = 1 << 15,
+ ARM_SPE_OP_GCS = 1 << 16,
+ ARM_SPE_OP_SME = 1 << 17,
+ ARM_SPE_OP_ASE = 1 << 18,
+
+ /* Assisted information for memory / SIMD */
+ ARM_SPE_OP_LD = 1 << 20,
+ ARM_SPE_OP_ST = 1 << 21,
+ ARM_SPE_OP_ATOMIC = 1 << 22,
+ ARM_SPE_OP_EXCL = 1 << 23,
+ ARM_SPE_OP_AR = 1 << 24,
+ ARM_SPE_OP_DP = 1 << 25, /* Data processing */
+ ARM_SPE_OP_PRED = 1 << 26, /* Predicated */
+ ARM_SPE_OP_SG = 1 << 27, /* Gather/Scatter */
+ ARM_SPE_OP_COMM = 1 << 28, /* Common */
+ ARM_SPE_OP_FP = 1 << 29, /* Floating-point */
+ ARM_SPE_OP_COND = 1 << 30, /* Conditional */
+};
- /* Second level operation type for OTHER */
- ARM_SPE_OP_SVE_OTHER = 1 << 16,
- ARM_SPE_OP_SVE_FP = 1 << 17,
- ARM_SPE_OP_SVE_PRED_OTHER = 1 << 18,
-
- /* Second level operation type for LDST */
- ARM_SPE_OP_LD = 1 << 16,
- ARM_SPE_OP_ST = 1 << 17,
- ARM_SPE_OP_ATOMIC = 1 << 18,
- ARM_SPE_OP_EXCL = 1 << 19,
- ARM_SPE_OP_AR = 1 << 20,
- ARM_SPE_OP_SIMD_FP = 1 << 21,
- ARM_SPE_OP_GP_REG = 1 << 22,
- ARM_SPE_OP_UNSPEC_REG = 1 << 23,
- ARM_SPE_OP_NV_SYSREG = 1 << 24,
- ARM_SPE_OP_SVE_LDST = 1 << 25,
- ARM_SPE_OP_SVE_PRED_LDST = 1 << 26,
- ARM_SPE_OP_SVE_SG = 1 << 27,
-
- /* Second level operation type for BRANCH_ERET */
- ARM_SPE_OP_BR_COND = 1 << 16,
- ARM_SPE_OP_BR_INDIRECT = 1 << 17,
- ARM_SPE_OP_BR_GCS = 1 << 18,
- ARM_SPE_OP_BR_CR_BL = 1 << 19,
- ARM_SPE_OP_BR_CR_RET = 1 << 20,
- ARM_SPE_OP_BR_CR_NON_BL_RET = 1 << 21,
+enum arm_spe_2nd_op_branch {
+ ARM_SPE_OP_BR_COND = 1 << 8,
+ ARM_SPE_OP_BR_INDIRECT = 1 << 9,
+ ARM_SPE_OP_BR_GCS = 1 << 10,
+ ARM_SPE_OP_BR_CR_BL = 1 << 11,
+ ARM_SPE_OP_BR_CR_RET = 1 << 12,
+ ARM_SPE_OP_BR_CR_NON_BL_RET = 1 << 13,
};
enum arm_spe_common_data_source {
@@ -82,8 +94,25 @@ enum arm_spe_ampereone_data_source {
ARM_SPE_AMPEREONE_L2D = 0x9,
};
+enum arm_spe_hisi_hip_data_source {
+ ARM_SPE_HISI_HIP_PEER_CPU = 0,
+ ARM_SPE_HISI_HIP_PEER_CPU_HITM = 1,
+ ARM_SPE_HISI_HIP_L3 = 2,
+ ARM_SPE_HISI_HIP_L3_HITM = 3,
+ ARM_SPE_HISI_HIP_PEER_CLUSTER = 4,
+ ARM_SPE_HISI_HIP_PEER_CLUSTER_HITM = 5,
+ ARM_SPE_HISI_HIP_REMOTE_SOCKET = 6,
+ ARM_SPE_HISI_HIP_REMOTE_SOCKET_HITM = 7,
+ ARM_SPE_HISI_HIP_LOCAL_MEM = 8,
+ ARM_SPE_HISI_HIP_REMOTE_MEM = 9,
+ ARM_SPE_HISI_HIP_NC_DEV = 13,
+ ARM_SPE_HISI_HIP_L2 = 16,
+ ARM_SPE_HISI_HIP_L2_HITM = 17,
+ ARM_SPE_HISI_HIP_L1 = 18,
+};
+
struct arm_spe_record {
- enum arm_spe_sample_type type;
+ u64 type;
int err;
u32 op;
u32 latency;
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
index 13cadb2f1cea..5769ba2f4140 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
@@ -314,6 +314,20 @@ static int arm_spe_pkt_desc_event(const struct arm_spe_pkt *packet,
arm_spe_pkt_out_string(&err, &buf, &buf_len, " SVE-PARTIAL-PRED");
if (payload & BIT(EV_EMPTY_PREDICATE))
arm_spe_pkt_out_string(&err, &buf, &buf_len, " SVE-EMPTY-PRED");
+ if (payload & BIT(EV_L2D_ACCESS))
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " L2D-ACCESS");
+ if (payload & BIT(EV_L2D_MISS))
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " L2D-MISS");
+ if (payload & BIT(EV_CACHE_DATA_MODIFIED))
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " HITM");
+ if (payload & BIT(EV_RECENTLY_FETCHED))
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " LFB");
+ if (payload & BIT(EV_DATA_SNOOPED))
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " SNOOPED");
+ if (payload & BIT(EV_STREAMING_SVE_MODE))
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " STREAMING-SVE");
+ if (payload & BIT(EV_SMCU))
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " SMCU");
return err;
}
@@ -326,7 +340,7 @@ static int arm_spe_pkt_desc_op_type(const struct arm_spe_pkt *packet,
switch (packet->index) {
case SPE_OP_PKT_HDR_CLASS_OTHER:
- if (SPE_OP_PKT_IS_OTHER_SVE_OP(payload)) {
+ if (SPE_OP_PKT_OTHER_SUBCLASS_SVE(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, "SVE-OTHER");
/* SVE effective vector length */
@@ -337,8 +351,21 @@ static int arm_spe_pkt_desc_op_type(const struct arm_spe_pkt *packet,
arm_spe_pkt_out_string(&err, &buf, &buf_len, " FP");
if (payload & SPE_OP_PKT_SVE_PRED)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " PRED");
- } else {
+ } else if (SPE_OP_PKT_OTHER_SUBCLASS_SME(payload)) {
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, "SME-OTHER");
+
+ /* SME effective vector length or tile size */
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " ETS %d",
+ SPE_OP_PKG_SME_ETS(payload));
+
+ if (payload & SPE_OP_PKT_OTHER_FP)
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " FP");
+ } else if (SPE_OP_PKT_OTHER_SUBCLASS_OTHER(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, "OTHER");
+ if (payload & SPE_OP_PKT_OTHER_ASE)
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " ASE");
+ if (payload & SPE_OP_PKT_OTHER_FP)
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " FP");
arm_spe_pkt_out_string(&err, &buf, &buf_len, " %s",
payload & SPE_OP_PKT_COND ?
"COND-SELECT" : "INSN-OTHER");
@@ -348,42 +375,30 @@ static int arm_spe_pkt_desc_op_type(const struct arm_spe_pkt *packet,
arm_spe_pkt_out_string(&err, &buf, &buf_len,
payload & 0x1 ? "ST" : "LD");
- if (SPE_OP_PKT_IS_LDST_ATOMIC(payload)) {
+ if (SPE_OP_PKT_LDST_SUBCLASS_EXTENDED(payload)) {
if (payload & SPE_OP_PKT_AT)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " AT");
if (payload & SPE_OP_PKT_EXCL)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " EXCL");
if (payload & SPE_OP_PKT_AR)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " AR");
- }
-
- switch (SPE_OP_PKT_LDST_SUBCLASS_GET(payload)) {
- case SPE_OP_PKT_LDST_SUBCLASS_SIMD_FP:
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_SIMD_FP(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, " SIMD-FP");
- break;
- case SPE_OP_PKT_LDST_SUBCLASS_GP_REG:
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_GP_REG(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, " GP-REG");
- break;
- case SPE_OP_PKT_LDST_SUBCLASS_UNSPEC_REG:
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_UNSPEC_REG(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, " UNSPEC-REG");
- break;
- case SPE_OP_PKT_LDST_SUBCLASS_NV_SYSREG:
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_NV_SYSREG(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, " NV-SYSREG");
- break;
- case SPE_OP_PKT_LDST_SUBCLASS_MTE_TAG:
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_MTE_TAG(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, " MTE-TAG");
- break;
- case SPE_OP_PKT_LDST_SUBCLASS_MEMCPY:
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_MEMCPY(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, " MEMCPY");
- break;
- case SPE_OP_PKT_LDST_SUBCLASS_MEMSET:
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_MEMSET(payload)) {
arm_spe_pkt_out_string(&err, &buf, &buf_len, " MEMSET");
- break;
- default:
- break;
- }
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_SVE_SME_REG(payload)) {
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " SVE-SME-REG");
- if (SPE_OP_PKT_IS_LDST_SVE(payload)) {
/* SVE effective vector length */
arm_spe_pkt_out_string(&err, &buf, &buf_len, " EVLEN %d",
SPE_OP_PKG_SVE_EVL(payload));
@@ -392,6 +407,10 @@ static int arm_spe_pkt_desc_op_type(const struct arm_spe_pkt *packet,
arm_spe_pkt_out_string(&err, &buf, &buf_len, " PRED");
if (payload & SPE_OP_PKT_SVE_SG)
arm_spe_pkt_out_string(&err, &buf, &buf_len, " SG");
+ } else if (SPE_OP_PKT_LDST_SUBCLASS_GCS(payload)) {
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " GCS");
+ if (payload & SPE_OP_PKT_GCS_COMM)
+ arm_spe_pkt_out_string(&err, &buf, &buf_len, " COMM");
}
break;
case SPE_OP_PKT_HDR_CLASS_BR_ERET:
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h
index 2cdf9f6da268..adf4cde320aa 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h
@@ -108,6 +108,13 @@ enum arm_spe_events {
EV_TRANSACTIONAL = 16,
EV_PARTIAL_PREDICATE = 17,
EV_EMPTY_PREDICATE = 18,
+ EV_L2D_ACCESS = 19,
+ EV_L2D_MISS = 20,
+ EV_CACHE_DATA_MODIFIED = 21,
+ EV_RECENTLY_FETCHED = 22,
+ EV_DATA_SNOOPED = 23,
+ EV_STREAMING_SVE_MODE = 24,
+ EV_SMCU = 25,
};
/* Operation packet header */
@@ -116,25 +123,39 @@ enum arm_spe_events {
#define SPE_OP_PKT_HDR_CLASS_LD_ST_ATOMIC 0x1
#define SPE_OP_PKT_HDR_CLASS_BR_ERET 0x2
-#define SPE_OP_PKT_IS_OTHER_SVE_OP(v) (((v) & (BIT(7) | BIT(3) | BIT(0))) == 0x8)
+#define SPE_OP_PKT_OTHER_SUBCLASS_OTHER(v) (((v) & GENMASK_ULL(7, 3)) == 0x0)
+#define SPE_OP_PKT_OTHER_SUBCLASS_SVE(v) (((v) & (BIT(7) | BIT(3) | BIT(0))) == 0x8)
+#define SPE_OP_PKT_OTHER_SUBCLASS_SME(v) (((v) & (BIT(7) | BIT(3) | BIT(0))) == 0x88)
-#define SPE_OP_PKT_LDST_SUBCLASS_GET(v) ((v) & GENMASK_ULL(7, 1))
-#define SPE_OP_PKT_LDST_SUBCLASS_GP_REG 0x0
-#define SPE_OP_PKT_LDST_SUBCLASS_SIMD_FP 0x4
-#define SPE_OP_PKT_LDST_SUBCLASS_UNSPEC_REG 0x10
-#define SPE_OP_PKT_LDST_SUBCLASS_NV_SYSREG 0x30
-#define SPE_OP_PKT_LDST_SUBCLASS_MTE_TAG 0x14
-#define SPE_OP_PKT_LDST_SUBCLASS_MEMCPY 0x20
-#define SPE_OP_PKT_LDST_SUBCLASS_MEMSET 0x25
+#define SPE_OP_PKT_OTHER_ASE BIT(2)
+#define SPE_OP_PKT_OTHER_FP BIT(1)
-#define SPE_OP_PKT_IS_LDST_ATOMIC(v) (((v) & (GENMASK_ULL(7, 5) | BIT(1))) == 0x2)
+/*
+ * SME effective vector length or tile size (ETS) is stored in byte 0
+ * bits [6:4,2]; the length is rounded up to a power of two and use 128
+ * as one step, so ETS calculation is:
+ *
+ * 128 * (2 ^ bits [6:4,2]) = 32 << (bits [6:4,2])
+ */
+#define SPE_OP_PKG_SME_ETS(v) (128 << (FIELD_GET(GENMASK_ULL(6, 4), (v)) << 1 | \
+ (FIELD_GET(BIT(2), (v)))))
+
+#define SPE_OP_PKT_LDST_SUBCLASS_GP_REG(v) (((v) & GENMASK_ULL(7, 1)) == 0x0)
+#define SPE_OP_PKT_LDST_SUBCLASS_SIMD_FP(v) (((v) & GENMASK_ULL(7, 1)) == 0x4)
+#define SPE_OP_PKT_LDST_SUBCLASS_UNSPEC_REG(v) (((v) & GENMASK_ULL(7, 1)) == 0x10)
+#define SPE_OP_PKT_LDST_SUBCLASS_NV_SYSREG(v) (((v) & GENMASK_ULL(7, 1)) == 0x30)
+#define SPE_OP_PKT_LDST_SUBCLASS_MTE_TAG(v) (((v) & GENMASK_ULL(7, 1)) == 0x14)
+#define SPE_OP_PKT_LDST_SUBCLASS_MEMCPY(v) (((v) & GENMASK_ULL(7, 1)) == 0x20)
+#define SPE_OP_PKT_LDST_SUBCLASS_MEMSET(v) (((v) & GENMASK_ULL(7, 0)) == 0x25)
+
+#define SPE_OP_PKT_LDST_SUBCLASS_EXTENDED(v) (((v) & (GENMASK_ULL(7, 5) | BIT(1))) == 0x2)
#define SPE_OP_PKT_AR BIT(4)
#define SPE_OP_PKT_EXCL BIT(3)
#define SPE_OP_PKT_AT BIT(2)
#define SPE_OP_PKT_ST BIT(0)
-#define SPE_OP_PKT_IS_LDST_SVE(v) (((v) & (BIT(3) | BIT(1))) == 0x8)
+#define SPE_OP_PKT_LDST_SUBCLASS_SVE_SME_REG(v) (((v) & (BIT(3) | BIT(1))) == 0x8)
#define SPE_OP_PKT_SVE_SG BIT(7)
/*
@@ -148,6 +169,10 @@ enum arm_spe_events {
#define SPE_OP_PKT_SVE_PRED BIT(2)
#define SPE_OP_PKT_SVE_FP BIT(1)
+#define SPE_OP_PKT_LDST_SUBCLASS_GCS(v) (((v) & (GENMASK_ULL(7, 3) | BIT(1))) == 0x40)
+
+#define SPE_OP_PKT_GCS_COMM BIT(2)
+
#define SPE_OP_PKT_CR_MASK GENMASK_ULL(4, 3)
#define SPE_OP_PKT_CR_BL(v) (FIELD_GET(SPE_OP_PKT_CR_MASK, (v)) == 1)
#define SPE_OP_PKT_CR_RET(v) (FIELD_GET(SPE_OP_PKT_CR_MASK, (v)) == 2)
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 2a9775649cc2..dc19e72258f3 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -39,6 +39,23 @@
#define is_ldst_op(op) (!!((op) & ARM_SPE_OP_LDST))
+#define is_simd_op(op) (!!((op) & (ARM_SPE_OP_SIMD_FP | ARM_SPE_OP_SVE | \
+ ARM_SPE_OP_SME | ARM_SPE_OP_ASE)))
+
+#define is_mem_op(op) (is_ldst_op(op) || is_simd_op(op))
+
+#define ARM_SPE_CACHE_EVENT(lvl) \
+ (ARM_SPE_##lvl##_ACCESS | ARM_SPE_##lvl##_MISS)
+
+#define arm_spe_is_cache_level(type, lvl) \
+ ((type) & ARM_SPE_CACHE_EVENT(lvl))
+
+#define arm_spe_is_cache_hit(type, lvl) \
+ (((type) & ARM_SPE_CACHE_EVENT(lvl)) == ARM_SPE_##lvl##_ACCESS)
+
+#define arm_spe_is_cache_miss(type, lvl) \
+ ((type) & ARM_SPE_##lvl##_MISS)
+
struct arm_spe {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
@@ -62,7 +79,6 @@ struct arm_spe {
u8 sample_remote_access;
u8 sample_memory;
u8 sample_instructions;
- u64 instructions_sample_period;
u64 l1d_miss_id;
u64 l1d_access_id;
@@ -101,7 +117,7 @@ struct arm_spe_queue {
u64 time;
u64 timestamp;
struct thread *thread;
- u64 period_instructions;
+ u64 sample_count;
u32 flags;
struct branch_stack *last_branch;
};
@@ -228,7 +244,6 @@ static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
speq->pid = -1;
speq->tid = -1;
speq->cpu = -1;
- speq->period_instructions = 0;
/* params set */
params.get_trace = arm_spe_get_trace;
@@ -305,15 +320,28 @@ static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
return 0;
}
-static u64 *arm_spe__get_metadata_by_cpu(struct arm_spe *spe, u64 cpu)
+static u64 *arm_spe__get_metadata_by_cpu(struct arm_spe *spe, int cpu)
{
u64 i;
if (!spe->metadata)
return NULL;
+ /* CPU ID is -1 for per-thread mode */
+ if (cpu < 0) {
+ /*
+ * On the heterogeneous system, due to CPU ID is -1,
+ * cannot confirm the data source packet is supported.
+ */
+ if (!spe->is_homogeneous)
+ return NULL;
+
+ /* In homogeneous system, simply use CPU0's metadata */
+ return spe->metadata[0];
+ }
+
for (i = 0; i < spe->metadata_nr_cpu; i++)
- if (spe->metadata[i][ARM_SPE_CPU] == cpu)
+ if (spe->metadata[i][ARM_SPE_CPU] == (u64)cpu)
return spe->metadata[i];
return NULL;
@@ -323,10 +351,7 @@ static struct simd_flags arm_spe__synth_simd_flags(const struct arm_spe_record *
{
struct simd_flags simd_flags = {};
- if ((record->op & ARM_SPE_OP_LDST) && (record->op & ARM_SPE_OP_SVE_LDST))
- simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
-
- if ((record->op & ARM_SPE_OP_OTHER) && (record->op & ARM_SPE_OP_SVE_OTHER))
+ if (record->op & ARM_SPE_OP_SVE)
simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
@@ -352,7 +377,7 @@ static void arm_spe_prep_sample(struct arm_spe *spe,
sample->cpumode = arm_spe_cpumode(spe, sample->ip);
sample->pid = speq->pid;
sample->tid = speq->tid;
- sample->period = 1;
+ sample->period = spe->synth_opts.period;
sample->cpu = speq->cpu;
sample->simd_flags = arm_spe__synth_simd_flags(record);
@@ -471,7 +496,8 @@ arm_spe_deliver_synth_event(struct arm_spe *spe,
}
static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
- u64 spe_events_id, u64 data_src)
+ u64 spe_events_id,
+ union perf_mem_data_src data_src)
{
struct arm_spe *spe = speq->spe;
struct arm_spe_record *record = &speq->decoder->record;
@@ -486,7 +512,7 @@ static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
sample.stream_id = spe_events_id;
sample.addr = record->virt_addr;
sample.phys_addr = record->phys_addr;
- sample.data_src = data_src;
+ sample.data_src = data_src.val;
sample.weight = record->latency;
ret = arm_spe_deliver_synth_event(spe, speq, event, &sample);
@@ -519,7 +545,8 @@ static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
}
static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
- u64 spe_events_id, u64 data_src)
+ u64 spe_events_id,
+ union perf_mem_data_src data_src)
{
struct arm_spe *spe = speq->spe;
struct arm_spe_record *record = &speq->decoder->record;
@@ -527,14 +554,6 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
struct perf_sample sample;
int ret;
- /*
- * Handles perf instruction sampling period.
- */
- speq->period_instructions++;
- if (speq->period_instructions < spe->instructions_sample_period)
- return 0;
- speq->period_instructions = 0;
-
perf_sample__init(&sample, /*all=*/true);
arm_spe_prep_sample(spe, speq, event, &sample);
@@ -542,8 +561,7 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
sample.stream_id = spe_events_id;
sample.addr = record->to_ip;
sample.phys_addr = record->phys_addr;
- sample.data_src = data_src;
- sample.period = spe->instructions_sample_period;
+ sample.data_src = data_src.val;
sample.weight = record->latency;
sample.flags = speq->flags;
sample.branch_stack = speq->last_branch;
@@ -554,15 +572,21 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
}
static const struct midr_range common_ds_encoding_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A720AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
{},
};
@@ -571,6 +595,11 @@ static const struct midr_range ampereone_ds_encoding_cpus[] = {
{},
};
+static const struct midr_range hisi_hip_ds_encoding_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_HISI_HIP12),
+ {},
+};
+
static void arm_spe__sample_flags(struct arm_spe_queue *speq)
{
const struct arm_spe_record *record = &speq->decoder->record;
@@ -665,8 +694,8 @@ static void arm_spe__synth_data_source_common(const struct arm_spe_record *recor
* socket
*/
case ARM_SPE_COMMON_DS_REMOTE:
- data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
- data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+ data_src->mem_lvl = PERF_MEM_LVL_NA;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
break;
@@ -718,35 +747,217 @@ static void arm_spe__synth_data_source_ampereone(const struct arm_spe_record *re
arm_spe__synth_data_source_common(&common_record, data_src);
}
+static void arm_spe__synth_data_source_hisi_hip(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ /* Use common synthesis method to handle store operations */
+ if (record->op & ARM_SPE_OP_ST) {
+ arm_spe__synth_data_source_common(record, data_src);
+ return;
+ }
+
+ switch (record->source) {
+ case ARM_SPE_HISI_HIP_PEER_CPU:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_PEER_CPU_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_L3:
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
+ break;
+ case ARM_SPE_HISI_HIP_L3_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ break;
+ case ARM_SPE_HISI_HIP_PEER_CLUSTER:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_PEER_CLUSTER_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_REMOTE_SOCKET:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_REMOTE_SOCKET_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_HISI_HIP_LOCAL_MEM:
+ data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_HISI_HIP_REMOTE_MEM:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_RAM1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ break;
+ case ARM_SPE_HISI_HIP_NC_DEV:
+ data_src->mem_lvl = PERF_MEM_LVL_IO | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_IO;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_HISI_HIP_L2:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_HISI_HIP_L2_HITM:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ break;
+ case ARM_SPE_HISI_HIP_L1:
+ data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ default:
+ break;
+ }
+}
+
static const struct data_source_handle data_source_handles[] = {
DS(common_ds_encoding_cpus, data_source_common),
DS(ampereone_ds_encoding_cpus, data_source_ampereone),
+ DS(hisi_hip_ds_encoding_cpus, data_source_hisi_hip),
};
-static void arm_spe__synth_memory_level(const struct arm_spe_record *record,
- union perf_mem_data_src *data_src)
+static void arm_spe__synth_ld_memory_level(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
{
- if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
- data_src->mem_lvl = PERF_MEM_LVL_L3;
+ /*
+ * To find a cache hit, search in ascending order from the lower level
+ * caches to the higher level caches. This reflects the best scenario
+ * for a cache hit.
+ */
+ if (arm_spe_is_cache_hit(record->type, L1D)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ } else if (record->type & ARM_SPE_RECENTLY_FETCHED) {
+ data_src->mem_lvl = PERF_MEM_LVL_LFB | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_LFB;
+ } else if (arm_spe_is_cache_hit(record->type, L2D)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ } else if (arm_spe_is_cache_hit(record->type, LLC)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ /*
+ * To find a cache miss, search in descending order from the higher
+ * level cache to the lower level cache. This represents the worst
+ * scenario for a cache miss.
+ */
+ } else if (arm_spe_is_cache_miss(record->type, LLC)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_MISS;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ } else if (arm_spe_is_cache_miss(record->type, L2D)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_MISS;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ } else if (arm_spe_is_cache_miss(record->type, L1D)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ }
+}
- if (record->type & ARM_SPE_LLC_MISS)
- data_src->mem_lvl |= PERF_MEM_LVL_MISS;
- else
- data_src->mem_lvl |= PERF_MEM_LVL_HIT;
- } else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
+static void arm_spe__synth_st_memory_level(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ /* Record the greatest level info for a store operation. */
+ if (arm_spe_is_cache_level(record->type, LLC)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L3;
+ data_src->mem_lvl |= arm_spe_is_cache_miss(record->type, LLC) ?
+ PERF_MEM_LVL_MISS : PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ } else if (arm_spe_is_cache_level(record->type, L2D)) {
+ data_src->mem_lvl = PERF_MEM_LVL_L2;
+ data_src->mem_lvl |= arm_spe_is_cache_miss(record->type, L2D) ?
+ PERF_MEM_LVL_MISS : PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ } else if (arm_spe_is_cache_level(record->type, L1D)) {
data_src->mem_lvl = PERF_MEM_LVL_L1;
+ data_src->mem_lvl |= arm_spe_is_cache_miss(record->type, L1D) ?
+ PERF_MEM_LVL_MISS : PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ }
+}
- if (record->type & ARM_SPE_L1D_MISS)
- data_src->mem_lvl |= PERF_MEM_LVL_MISS;
- else
- data_src->mem_lvl |= PERF_MEM_LVL_HIT;
+static void arm_spe__synth_memory_level(struct arm_spe_queue *speq,
+ const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
+ struct arm_spe *spe = speq->spe;
+
+ /*
+ * The data source packet contains more info for cache levels for
+ * peer snooping. So respect the memory level if has been set by
+ * data source parsing.
+ */
+ if (!data_src->mem_lvl) {
+ if (data_src->mem_op == PERF_MEM_OP_LOAD)
+ arm_spe__synth_ld_memory_level(record, data_src);
+ if (data_src->mem_op == PERF_MEM_OP_STORE)
+ arm_spe__synth_st_memory_level(record, data_src);
+ }
+
+ if (!data_src->mem_lvl) {
+ data_src->mem_lvl = PERF_MEM_LVL_NA;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
+ }
+
+ /*
+ * If 'mem_snoop' has been set by data source packet, skip to set
+ * it at here.
+ */
+ if (!data_src->mem_snoop) {
+ if (record->type & ARM_SPE_DATA_SNOOPED) {
+ if (record->type & ARM_SPE_HITM)
+ data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
+ else
+ data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
+ } else {
+ u64 *metadata =
+ arm_spe__get_metadata_by_cpu(spe, speq->cpu);
+
+ /*
+ * Set NA ("Not available") mode if no meta data or the
+ * SNOOPED event is not supported.
+ */
+ if (!metadata ||
+ !(metadata[ARM_SPE_CAP_EVENT_FILTER] & ARM_SPE_DATA_SNOOPED))
+ data_src->mem_snoop = PERF_MEM_SNOOP_NA;
+ else
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ }
}
- if (record->type & ARM_SPE_REMOTE_ACCESS)
- data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+ if (!data_src->mem_remote) {
+ if (record->type & ARM_SPE_REMOTE_ACCESS)
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ }
}
-static bool arm_spe__synth_ds(struct arm_spe_queue *speq,
+static void arm_spe__synth_ds(struct arm_spe_queue *speq,
const struct arm_spe_record *record,
union perf_mem_data_src *data_src)
{
@@ -760,59 +971,43 @@ static bool arm_spe__synth_ds(struct arm_spe_queue *speq,
const char *cpuid;
pr_warning_once("Old SPE metadata, re-record to improve decode accuracy\n");
- cpuid = perf_env__cpuid(spe->session->evlist->env);
+ cpuid = perf_env__cpuid(perf_session__env(spe->session));
midr = strtol(cpuid, NULL, 16);
} else {
- /* CPU ID is -1 for per-thread mode */
- if (speq->cpu < 0) {
- /*
- * On the heterogeneous system, due to CPU ID is -1,
- * cannot confirm the data source packet is supported.
- */
- if (!spe->is_homogeneous)
- return false;
-
- /* In homogeneous system, simply use CPU0's metadata */
- if (spe->metadata)
- metadata = spe->metadata[0];
- } else {
- metadata = arm_spe__get_metadata_by_cpu(spe, speq->cpu);
- }
-
+ metadata = arm_spe__get_metadata_by_cpu(spe, speq->cpu);
if (!metadata)
- return false;
+ return;
midr = metadata[ARM_SPE_CPU_MIDR];
}
for (i = 0; i < ARRAY_SIZE(data_source_handles); i++) {
if (is_midr_in_range_list(midr, data_source_handles[i].midr_ranges)) {
- data_source_handles[i].ds_synth(record, data_src);
- return true;
+ return data_source_handles[i].ds_synth(record, data_src);
}
}
- return false;
+ return;
}
-static u64 arm_spe__synth_data_source(struct arm_spe_queue *speq,
- const struct arm_spe_record *record)
+static union perf_mem_data_src
+arm_spe__synth_data_source(struct arm_spe_queue *speq,
+ const struct arm_spe_record *record)
{
- union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA };
+ union perf_mem_data_src data_src = {};
- /* Only synthesize data source for LDST operations */
- if (!is_ldst_op(record->op))
- return 0;
+ if (!is_mem_op(record->op))
+ return data_src;
if (record->op & ARM_SPE_OP_LD)
data_src.mem_op = PERF_MEM_OP_LOAD;
else if (record->op & ARM_SPE_OP_ST)
data_src.mem_op = PERF_MEM_OP_STORE;
else
- return 0;
+ data_src.mem_op = PERF_MEM_OP_NA;
- if (!arm_spe__synth_ds(speq, record, &data_src))
- arm_spe__synth_memory_level(record, &data_src);
+ arm_spe__synth_ds(speq, record, &data_src);
+ arm_spe__synth_memory_level(speq, record, &data_src);
if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
data_src.mem_dtlb = PERF_MEM_TLB_WK;
@@ -823,16 +1018,24 @@ static u64 arm_spe__synth_data_source(struct arm_spe_queue *speq,
data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
}
- return data_src.val;
+ return data_src;
}
static int arm_spe_sample(struct arm_spe_queue *speq)
{
const struct arm_spe_record *record = &speq->decoder->record;
struct arm_spe *spe = speq->spe;
- u64 data_src;
+ union perf_mem_data_src data_src;
int err;
+ /*
+ * Discard all samples until period is reached
+ */
+ speq->sample_count++;
+ if (speq->sample_count < spe->synth_opts.period)
+ return 0;
+ speq->sample_count = 0;
+
arm_spe__sample_flags(speq);
data_src = arm_spe__synth_data_source(speq, record);
@@ -902,11 +1105,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
return err;
}
- /*
- * When data_src is zero it means the record is not a memory operation,
- * skip to synthesize memory sample for this case.
- */
- if (spe->sample_memory && is_ldst_op(record->op)) {
+ if (spe->sample_memory && is_mem_op(record->op)) {
err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
if (err)
return err;
@@ -1436,6 +1635,7 @@ static const char * const metadata_per_cpu_fmts[] = {
[ARM_SPE_CPU_MIDR] = " MIDR :0x%"PRIx64"\n",
[ARM_SPE_CPU_PMU_TYPE] = " PMU Type :%"PRId64"\n",
[ARM_SPE_CAP_MIN_IVAL] = " Min Interval :%"PRId64"\n",
+ [ARM_SPE_CAP_EVENT_FILTER] = " Event Filter :0x%"PRIx64"\n",
};
static void arm_spe_print_info(struct arm_spe *spe, __u64 *arr)
@@ -1532,12 +1732,10 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
attr.exclude_guest = evsel->core.attr.exclude_guest;
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
+ attr.sample_period = spe->synth_opts.period;
/* create new id val to be a fixed offset from evsel id */
- id = evsel->core.id[0] + 1000000000;
-
- if (!id)
- id = 1;
+ id = auxtrace_synth_id_range_start(evsel);
if (spe->synth_opts.flc) {
spe->sample_flc = true;
@@ -1648,25 +1846,15 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
}
if (spe->synth_opts.instructions) {
- if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
- pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n");
- goto synth_instructions_out;
- }
- if (spe->synth_opts.period > 1)
- pr_warning("Arm SPE has a hardware-based sample period.\n"
- "Additional instruction events will be discarded by --itrace\n");
-
spe->sample_instructions = true;
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
- attr.sample_period = spe->synth_opts.period;
- spe->instructions_sample_period = attr.sample_period;
+
err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->instructions_id = id;
arm_spe_set_event_name(evlist, id, "instructions");
}
-synth_instructions_out:
return 0;
}
@@ -1775,10 +1963,23 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
if (dump_trace)
return 0;
- if (session->itrace_synth_opts && session->itrace_synth_opts->set)
+ if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
spe->synth_opts = *session->itrace_synth_opts;
- else
+ } else {
itrace_synth_opts__set_default(&spe->synth_opts, false);
+ /* Default nanoseconds period not supported */
+ spe->synth_opts.period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
+ spe->synth_opts.period = 1;
+ }
+
+ if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
+ ui__error("You must only use i (instructions) --itrace period with Arm SPE. e.g --itrace=i1i\n");
+ err = -EINVAL;
+ goto err_free_queues;
+ }
+ if (spe->synth_opts.period > 1)
+ ui__warning("Arm SPE has a hardware-based sampling period.\n\n"
+ "--itrace periods > 1i downsample by an interval of n SPE samples rather than n instructions.\n");
err = arm_spe_synth_events(spe, session);
if (err)
diff --git a/tools/perf/util/arm-spe.h b/tools/perf/util/arm-spe.h
index 390679a4af2f..3966df1856d8 100644
--- a/tools/perf/util/arm-spe.h
+++ b/tools/perf/util/arm-spe.h
@@ -47,6 +47,8 @@ enum {
ARM_SPE_CPU_PMU_TYPE,
/* Minimal interval */
ARM_SPE_CAP_MIN_IVAL,
+ /* Event filter */
+ ARM_SPE_CAP_EVENT_FILTER,
ARM_SPE_CPU_PRIV_MAX,
};
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 03211c2623de..a224687ffbc1 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -55,12 +55,29 @@
#include "hisi-ptt.h"
#include "s390-cpumsf.h"
#include "util/mmap.h"
+#include "powerpc-vpadtl.h"
#include <linux/ctype.h>
#include "symbol/kallsyms.h"
#include <internal/lib.h>
#include "util/sample.h"
+#define AUXTRACE_SYNTH_EVENT_ID_OFFSET 1000000000ULL
+
+/*
+ * Event IDs are allocated sequentially, so a big offset from any
+ * existing ID will reach a unused range.
+ */
+u64 auxtrace_synth_id_range_start(struct evsel *evsel)
+{
+ u64 id = evsel->core.id[0] + AUXTRACE_SYNTH_EVENT_ID_OFFSET;
+
+ if (!id)
+ id = 1;
+
+ return id;
+}
+
/*
* Make a group from 'leader' to 'last', requiring that the events were not
* already grouped to a different leader.
@@ -185,10 +202,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
if (per_cpu) {
mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
- if (evlist->core.threads)
- mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
- else
- mp->tid = -1;
+ mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
} else {
mp->cpu.cpu = -1;
mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
@@ -1365,7 +1379,8 @@ static void unleader_auxtrace(struct perf_session *session)
}
}
-int perf_event__process_auxtrace_info(struct perf_session *session,
+int perf_event__process_auxtrace_info(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
enum auxtrace_type type = event->auxtrace_info.type;
@@ -1393,6 +1408,9 @@ int perf_event__process_auxtrace_info(struct perf_session *session,
case PERF_AUXTRACE_HISI_PTT:
err = hisi_ptt_process_auxtrace_info(event, session);
break;
+ case PERF_AUXTRACE_VPA_DTL:
+ err = powerpc_vpadtl_process_auxtrace_info(event, session);
+ break;
case PERF_AUXTRACE_UNKNOWN:
default:
return -EINVAL;
@@ -1406,7 +1424,8 @@ int perf_event__process_auxtrace_info(struct perf_session *session,
return 0;
}
-s64 perf_event__process_auxtrace(struct perf_session *session,
+s64 perf_event__process_auxtrace(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
s64 err;
@@ -1803,7 +1822,8 @@ void events_stats__auxtrace_error_warn(const struct events_stats *stats)
}
}
-int perf_event__process_auxtrace_error(struct perf_session *session,
+int perf_event__process_auxtrace_error(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
if (auxtrace__dont_decode(session))
@@ -1890,7 +1910,7 @@ int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
}
static int __auxtrace_mmap__read(struct mmap *map,
- struct auxtrace_record *itr,
+ struct auxtrace_record *itr, struct perf_env *env,
const struct perf_tool *tool, process_auxtrace_t fn,
bool snapshot, size_t snapshot_size)
{
@@ -1900,7 +1920,7 @@ static int __auxtrace_mmap__read(struct mmap *map,
size_t size, head_off, old_off, len1, len2, padding;
union perf_event ev;
void *data1, *data2;
- int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL));
+ int kernel_is_64_bit = perf_env__kernel_is_64_bit(env);
head = auxtrace_mmap__read_head(mm, kernel_is_64_bit);
@@ -2002,17 +2022,18 @@ static int __auxtrace_mmap__read(struct mmap *map,
}
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
- const struct perf_tool *tool, process_auxtrace_t fn)
+ struct perf_env *env, const struct perf_tool *tool,
+ process_auxtrace_t fn)
{
- return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
+ return __auxtrace_mmap__read(map, itr, env, tool, fn, false, 0);
}
int auxtrace_mmap__read_snapshot(struct mmap *map,
- struct auxtrace_record *itr,
+ struct auxtrace_record *itr, struct perf_env *env,
const struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size)
{
- return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
+ return __auxtrace_mmap__read(map, itr, env, tool, fn, true, snapshot_size);
}
/**
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index b0db84d27b25..6947f3f284c0 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -8,21 +8,17 @@
#define __PERF_AUXTRACE_H
#include <sys/types.h>
-#include <errno.h>
-#include <stdbool.h>
-#include <stddef.h>
#include <stdio.h> // FILE
-#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/types.h>
-#include <perf/cpumap.h>
-#include <asm/bitsperlong.h>
#include <asm/barrier.h>
+#include <perf/cpumap.h>
union perf_event;
struct perf_session;
struct evlist;
struct evsel;
+struct perf_env;
struct perf_tool;
struct mmap;
struct perf_sample;
@@ -49,6 +45,7 @@ enum auxtrace_type {
PERF_AUXTRACE_ARM_SPE,
PERF_AUXTRACE_S390_CPUMSF,
PERF_AUXTRACE_HISI_PTT,
+ PERF_AUXTRACE_VPA_DTL,
};
enum itrace_period_type {
@@ -457,8 +454,6 @@ struct addr_filters {
struct auxtrace_cache;
-#ifdef HAVE_AUXTRACE_SUPPORT
-
u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm);
int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail);
@@ -512,10 +507,11 @@ typedef int (*process_auxtrace_t)(const struct perf_tool *tool,
size_t len1, void *data2, size_t len2);
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
- const struct perf_tool *tool, process_auxtrace_t fn);
+ struct perf_env *env, const struct perf_tool *tool,
+ process_auxtrace_t fn);
int auxtrace_mmap__read_snapshot(struct mmap *map,
- struct auxtrace_record *itr,
+ struct auxtrace_record *itr, struct perf_env *env,
const struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size);
@@ -612,11 +608,14 @@ void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
const char *msg, u64 timestamp);
-int perf_event__process_auxtrace_info(struct perf_session *session,
+int perf_event__process_auxtrace_info(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event);
-s64 perf_event__process_auxtrace(struct perf_session *session,
+s64 perf_event__process_auxtrace(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event);
-int perf_event__process_auxtrace_error(struct perf_session *session,
+int perf_event__process_auxtrace_error(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event);
int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
const char *str, int unset);
@@ -645,6 +644,7 @@ void auxtrace__free_events(struct perf_session *session);
void auxtrace__free(struct perf_session *session);
bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
struct evsel *evsel);
+u64 auxtrace_synth_id_range_start(struct evsel *evsel);
#define ITRACE_HELP \
" i[period]: synthesize instructions events\n" \
@@ -699,212 +699,4 @@ void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
opts->range_num = 0;
}
-#else
-#include "debug.h"
-
-static inline struct auxtrace_record *
-auxtrace_record__init(struct evlist *evlist __maybe_unused,
- int *err)
-{
- *err = 0;
- return NULL;
-}
-
-static inline
-void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
-{
-}
-
-static inline
-int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
- struct evlist *evlist __maybe_unused,
- struct record_opts *opts __maybe_unused)
-{
- return 0;
-}
-
-static inline
-int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- return 0;
-}
-
-static inline
-s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- return 0;
-}
-
-static inline
-int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- return 0;
-}
-
-static inline
-void perf_session__auxtrace_error_inc(struct perf_session *session
- __maybe_unused,
- union perf_event *event
- __maybe_unused)
-{
-}
-
-static inline
-void events_stats__auxtrace_error_warn(const struct events_stats *stats
- __maybe_unused)
-{
-}
-
-static inline
-int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
- const char *str __maybe_unused, int unset __maybe_unused)
-{
- pr_err("AUX area tracing not supported\n");
- return -EINVAL;
-}
-
-static inline
-int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
- const char *str __maybe_unused,
- int unset __maybe_unused)
-{
- pr_err("AUX area tracing not supported\n");
- return -EINVAL;
-}
-
-static inline
-int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
- struct record_opts *opts __maybe_unused,
- const char *str)
-{
- if (!str)
- return 0;
- pr_err("AUX area tracing not supported\n");
- return -EINVAL;
-}
-
-static inline
-int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
- struct evlist *evlist __maybe_unused,
- struct record_opts *opts __maybe_unused,
- const char *str)
-{
- if (!str)
- return 0;
- pr_err("AUX area tracing not supported\n");
- return -EINVAL;
-}
-
-static inline
-int auxtrace_parse_aux_action(struct evlist *evlist __maybe_unused)
-{
- pr_err("AUX area tracing not supported\n");
- return -EINVAL;
-}
-
-static inline
-int auxtrace__process_event(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_sample *sample __maybe_unused,
- const struct perf_tool *tool __maybe_unused)
-{
- return 0;
-}
-
-static inline
-void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
- struct perf_sample *sample __maybe_unused)
-{
-}
-
-static inline
-int auxtrace__flush_events(struct perf_session *session __maybe_unused,
- const struct perf_tool *tool __maybe_unused)
-{
- return 0;
-}
-
-static inline
-void auxtrace__free_events(struct perf_session *session __maybe_unused)
-{
-}
-
-static inline
-void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
-{
-}
-
-static inline
-void auxtrace__free(struct perf_session *session __maybe_unused)
-{
-}
-
-static inline
-int auxtrace_index__write(int fd __maybe_unused,
- struct list_head *head __maybe_unused)
-{
- return -EINVAL;
-}
-
-static inline
-int auxtrace_index__process(int fd __maybe_unused,
- u64 size __maybe_unused,
- struct perf_session *session __maybe_unused,
- bool needs_swap __maybe_unused)
-{
- return -EINVAL;
-}
-
-static inline
-void auxtrace_index__free(struct list_head *head __maybe_unused)
-{
-}
-
-static inline
-bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
- struct evsel *evsel __maybe_unused)
-{
- return false;
-}
-
-static inline
-int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
-{
- return 0;
-}
-
-int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
- struct auxtrace_mmap_params *mp,
- void *userpg, int fd);
-void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
-void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
- off_t auxtrace_offset,
- unsigned int auxtrace_pages,
- bool auxtrace_overwrite);
-void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
- struct evlist *evlist,
- struct evsel *evsel, int idx);
-
-#define ITRACE_HELP ""
-
-static inline
-void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
- __maybe_unused,
- struct perf_time_interval *ptime_range
- __maybe_unused,
- int range_num __maybe_unused)
-{
-}
-
-static inline
-void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
- __maybe_unused)
-{
-}
-
-#endif
-
#endif
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index c81444059ad0..2298cd396c42 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -1,13 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
+#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/err.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
+#include <linux/zalloc.h>
#include <internal/lib.h>
+#include <perf/event.h>
#include <symbol/kallsyms.h>
#include "bpf-event.h"
#include "bpf-utils.h"
@@ -151,6 +159,362 @@ static int synthesize_bpf_prog_name(char *buf, int size,
return name_len;
}
+#ifdef HAVE_LIBBPF_STRINGS_SUPPORT
+
+#define BPF_METADATA_PREFIX "bpf_metadata_"
+#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
+
+static bool name_has_bpf_metadata_prefix(const char **s)
+{
+ if (strncmp(*s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) != 0)
+ return false;
+ *s += BPF_METADATA_PREFIX_LEN;
+ return true;
+}
+
+struct bpf_metadata_map {
+ struct btf *btf;
+ const struct btf_type *datasec;
+ void *rodata;
+ size_t rodata_size;
+ unsigned int num_vars;
+};
+
+static int bpf_metadata_read_map_data(__u32 map_id, struct bpf_metadata_map *map)
+{
+ int map_fd;
+ struct bpf_map_info map_info;
+ __u32 map_info_len;
+ int key;
+ struct btf *btf;
+ const struct btf_type *datasec;
+ struct btf_var_secinfo *vsi;
+ unsigned int vlen, vars;
+ void *rodata;
+
+ map_fd = bpf_map_get_fd_by_id(map_id);
+ if (map_fd < 0)
+ return -1;
+
+ memset(&map_info, 0, sizeof(map_info));
+ map_info_len = sizeof(map_info);
+ if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len) < 0)
+ goto out_close;
+
+ /* If it's not an .rodata map, don't bother. */
+ if (map_info.type != BPF_MAP_TYPE_ARRAY ||
+ map_info.key_size != sizeof(int) ||
+ map_info.max_entries != 1 ||
+ !map_info.btf_value_type_id ||
+ !strstr(map_info.name, ".rodata")) {
+ goto out_close;
+ }
+
+ btf = btf__load_from_kernel_by_id(map_info.btf_id);
+ if (!btf)
+ goto out_close;
+ datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
+ if (!btf_is_datasec(datasec))
+ goto out_free_btf;
+
+ /*
+ * If there aren't any variables with the "bpf_metadata_" prefix,
+ * don't bother.
+ */
+ vlen = btf_vlen(datasec);
+ vsi = btf_var_secinfos(datasec);
+ vars = 0;
+ for (unsigned int i = 0; i < vlen; i++, vsi++) {
+ const struct btf_type *t_var = btf__type_by_id(btf, vsi->type);
+ const char *name = btf__name_by_offset(btf, t_var->name_off);
+
+ if (name_has_bpf_metadata_prefix(&name))
+ vars++;
+ }
+ if (vars == 0)
+ goto out_free_btf;
+
+ rodata = zalloc(map_info.value_size);
+ if (!rodata)
+ goto out_free_btf;
+ key = 0;
+ if (bpf_map_lookup_elem(map_fd, &key, rodata)) {
+ free(rodata);
+ goto out_free_btf;
+ }
+ close(map_fd);
+
+ map->btf = btf;
+ map->datasec = datasec;
+ map->rodata = rodata;
+ map->rodata_size = map_info.value_size;
+ map->num_vars = vars;
+ return 0;
+
+out_free_btf:
+ btf__free(btf);
+out_close:
+ close(map_fd);
+ return -1;
+}
+
+struct format_btf_ctx {
+ char *buf;
+ size_t buf_size;
+ size_t buf_idx;
+};
+
+static void format_btf_cb(void *arg, const char *fmt, va_list ap)
+{
+ int n;
+ struct format_btf_ctx *ctx = (struct format_btf_ctx *)arg;
+
+ n = vsnprintf(ctx->buf + ctx->buf_idx, ctx->buf_size - ctx->buf_idx,
+ fmt, ap);
+ ctx->buf_idx += n;
+ if (ctx->buf_idx >= ctx->buf_size)
+ ctx->buf_idx = ctx->buf_size;
+}
+
+static void format_btf_variable(struct btf *btf, char *buf, size_t buf_size,
+ const struct btf_type *t, const void *btf_data)
+{
+ struct format_btf_ctx ctx = {
+ .buf = buf,
+ .buf_idx = 0,
+ .buf_size = buf_size,
+ };
+ const struct btf_dump_type_data_opts opts = {
+ .sz = sizeof(struct btf_dump_type_data_opts),
+ .skip_names = 1,
+ .compact = 1,
+ .emit_strings = 1,
+ };
+ struct btf_dump *d;
+ size_t btf_size;
+
+ d = btf_dump__new(btf, format_btf_cb, &ctx, NULL);
+ btf_size = btf__resolve_size(btf, t->type);
+ btf_dump__dump_type_data(d, t->type, btf_data, btf_size, &opts);
+ btf_dump__free(d);
+}
+
+static void bpf_metadata_fill_event(struct bpf_metadata_map *map,
+ struct perf_record_bpf_metadata *bpf_metadata_event)
+{
+ struct btf_var_secinfo *vsi;
+ unsigned int i, vlen;
+
+ memset(bpf_metadata_event->prog_name, 0, BPF_PROG_NAME_LEN);
+ vlen = btf_vlen(map->datasec);
+ vsi = btf_var_secinfos(map->datasec);
+
+ for (i = 0; i < vlen; i++, vsi++) {
+ const struct btf_type *t_var = btf__type_by_id(map->btf,
+ vsi->type);
+ const char *name = btf__name_by_offset(map->btf,
+ t_var->name_off);
+ const __u64 nr_entries = bpf_metadata_event->nr_entries;
+ struct perf_record_bpf_metadata_entry *entry;
+
+ if (!name_has_bpf_metadata_prefix(&name))
+ continue;
+
+ if (nr_entries >= (__u64)map->num_vars)
+ break;
+
+ entry = &bpf_metadata_event->entries[nr_entries];
+ memset(entry, 0, sizeof(*entry));
+ snprintf(entry->key, BPF_METADATA_KEY_LEN, "%s", name);
+ format_btf_variable(map->btf, entry->value,
+ BPF_METADATA_VALUE_LEN, t_var,
+ map->rodata + vsi->offset);
+ bpf_metadata_event->nr_entries++;
+ }
+}
+
+static void bpf_metadata_free_map_data(struct bpf_metadata_map *map)
+{
+ btf__free(map->btf);
+ free(map->rodata);
+}
+
+static struct bpf_metadata *bpf_metadata_alloc(__u32 nr_prog_tags,
+ __u32 nr_variables)
+{
+ struct bpf_metadata *metadata;
+ size_t event_size;
+
+ metadata = zalloc(sizeof(struct bpf_metadata));
+ if (!metadata)
+ return NULL;
+
+ metadata->prog_names = zalloc(nr_prog_tags * sizeof(char *));
+ if (!metadata->prog_names) {
+ bpf_metadata_free(metadata);
+ return NULL;
+ }
+ for (__u32 prog_index = 0; prog_index < nr_prog_tags; prog_index++) {
+ metadata->prog_names[prog_index] = zalloc(BPF_PROG_NAME_LEN);
+ if (!metadata->prog_names[prog_index]) {
+ bpf_metadata_free(metadata);
+ return NULL;
+ }
+ metadata->nr_prog_names++;
+ }
+
+ event_size = sizeof(metadata->event->bpf_metadata) +
+ nr_variables * sizeof(metadata->event->bpf_metadata.entries[0]);
+ metadata->event = zalloc(event_size);
+ if (!metadata->event) {
+ bpf_metadata_free(metadata);
+ return NULL;
+ }
+ metadata->event->bpf_metadata = (struct perf_record_bpf_metadata) {
+ .header = {
+ .type = PERF_RECORD_BPF_METADATA,
+ .size = event_size,
+ },
+ .nr_entries = 0,
+ };
+
+ return metadata;
+}
+
+static struct bpf_metadata *bpf_metadata_create(struct bpf_prog_info *info)
+{
+ struct bpf_metadata *metadata;
+ const __u32 *map_ids = (__u32 *)(uintptr_t)info->map_ids;
+
+ for (__u32 map_index = 0; map_index < info->nr_map_ids; map_index++) {
+ struct bpf_metadata_map map;
+
+ if (bpf_metadata_read_map_data(map_ids[map_index], &map) != 0)
+ continue;
+
+ metadata = bpf_metadata_alloc(info->nr_prog_tags, map.num_vars);
+ if (!metadata)
+ continue;
+
+ bpf_metadata_fill_event(&map, &metadata->event->bpf_metadata);
+
+ for (__u32 index = 0; index < info->nr_prog_tags; index++) {
+ synthesize_bpf_prog_name(metadata->prog_names[index],
+ BPF_PROG_NAME_LEN, info,
+ map.btf, index);
+ }
+
+ bpf_metadata_free_map_data(&map);
+
+ return metadata;
+ }
+
+ return NULL;
+}
+
+static int synthesize_perf_record_bpf_metadata(const struct bpf_metadata *metadata,
+ const struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ const size_t event_size = metadata->event->header.size;
+ union perf_event *event;
+ int err = 0;
+
+ event = zalloc(event_size + machine->id_hdr_size);
+ if (!event)
+ return -1;
+ memcpy(event, metadata->event, event_size);
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+ for (__u32 index = 0; index < metadata->nr_prog_names; index++) {
+ memcpy(event->bpf_metadata.prog_name,
+ metadata->prog_names[index], BPF_PROG_NAME_LEN);
+ err = perf_tool__process_synth_event(tool, event, machine,
+ process);
+ if (err != 0)
+ break;
+ }
+
+ free(event);
+ return err;
+}
+
+void bpf_metadata_free(struct bpf_metadata *metadata)
+{
+ if (metadata == NULL)
+ return;
+ for (__u32 index = 0; index < metadata->nr_prog_names; index++)
+ free(metadata->prog_names[index]);
+ free(metadata->prog_names);
+ free(metadata->event);
+ free(metadata);
+}
+
+#else /* HAVE_LIBBPF_STRINGS_SUPPORT */
+
+static struct bpf_metadata *bpf_metadata_create(struct bpf_prog_info *info __maybe_unused)
+{
+ return NULL;
+}
+
+static int synthesize_perf_record_bpf_metadata(const struct bpf_metadata *metadata __maybe_unused,
+ const struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return 0;
+}
+
+void bpf_metadata_free(struct bpf_metadata *metadata __maybe_unused)
+{
+}
+
+#endif /* HAVE_LIBBPF_STRINGS_SUPPORT */
+
+struct bpf_metadata_final_ctx {
+ const struct perf_tool *tool;
+ perf_event__handler_t process;
+ struct machine *machine;
+};
+
+static void synthesize_final_bpf_metadata_cb(struct bpf_prog_info_node *node,
+ void *data)
+{
+ struct bpf_metadata_final_ctx *ctx = (struct bpf_metadata_final_ctx *)data;
+ struct bpf_metadata *metadata = node->metadata;
+ int err;
+
+ if (metadata == NULL)
+ return;
+ err = synthesize_perf_record_bpf_metadata(metadata, ctx->tool,
+ ctx->process, ctx->machine);
+ if (err != 0) {
+ const char *prog_name = metadata->prog_names[0];
+
+ if (prog_name != NULL)
+ pr_warning("Couldn't synthesize final BPF metadata for %s.\n", prog_name);
+ else
+ pr_warning("Couldn't synthesize final BPF metadata.\n");
+ }
+ bpf_metadata_free(metadata);
+ node->metadata = NULL;
+}
+
+void perf_event__synthesize_final_bpf_metadata(struct perf_session *session,
+ perf_event__handler_t process)
+{
+ struct perf_env *env = &session->header.env;
+ struct bpf_metadata_final_ctx ctx = {
+ .tool = session->tool,
+ .process = process,
+ .machine = &session->machines.host,
+ };
+
+ perf_env__iterate_bpf_prog_info(env, synthesize_final_bpf_metadata_cb,
+ &ctx);
+}
+
/*
* Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
* program. One PERF_RECORD_BPF_EVENT is generated for the program. And
@@ -173,6 +537,7 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
const struct perf_tool *tool = session->tool;
struct bpf_prog_info_node *info_node;
struct perf_bpil *info_linear;
+ struct bpf_metadata *metadata;
struct bpf_prog_info *info;
struct btf *btf = NULL;
struct perf_env *env;
@@ -184,7 +549,7 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
* for perf-record and perf-report use header.env;
* otherwise, use global perf_env.
*/
- env = session->data ? &session->header.env : &perf_env;
+ env = perf_session__env(session);
arrays = 1UL << PERF_BPIL_JITED_KSYMS;
arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
@@ -193,6 +558,7 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
arrays |= 1UL << PERF_BPIL_JITED_INSNS;
arrays |= 1UL << PERF_BPIL_LINE_INFO;
arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
+ arrays |= 1UL << PERF_BPIL_MAP_IDS;
info_linear = get_bpf_prog_info_linear(fd, arrays);
if (IS_ERR_OR_NULL(info_linear)) {
@@ -289,10 +655,17 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
}
info_node->info_linear = info_linear;
+ info_node->metadata = NULL;
if (!perf_env__insert_bpf_prog_info(env, info_node)) {
- free(info_linear);
+ /*
+ * Insert failed, likely because of a duplicate event
+ * made by the sideband thread. Ignore synthesizing the
+ * metadata.
+ */
free(info_node);
+ goto out;
}
+ /* info_linear is now owned by info_node and shouldn't be freed below. */
info_linear = NULL;
/*
@@ -301,6 +674,15 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
*/
err = perf_tool__process_synth_event(tool, event,
machine, process);
+
+ /* Synthesize PERF_RECORD_BPF_METADATA */
+ metadata = bpf_metadata_create(info);
+ if (metadata != NULL) {
+ err = synthesize_perf_record_bpf_metadata(metadata,
+ tool, process,
+ machine);
+ bpf_metadata_free(metadata);
+ }
}
out:
@@ -451,18 +833,18 @@ int perf_event__synthesize_bpf_events(struct perf_session *session,
return err;
}
-static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+static int perf_env__add_bpf_info(struct perf_env *env, u32 id)
{
struct bpf_prog_info_node *info_node;
struct perf_bpil *info_linear;
struct btf *btf = NULL;
u64 arrays;
u32 btf_id;
- int fd;
+ int fd, err = 0;
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0)
- return;
+ return -EINVAL;
arrays = 1UL << PERF_BPIL_JITED_KSYMS;
arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
@@ -471,10 +853,12 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
arrays |= 1UL << PERF_BPIL_JITED_INSNS;
arrays |= 1UL << PERF_BPIL_LINE_INFO;
arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
+ arrays |= 1UL << PERF_BPIL_MAP_IDS;
info_linear = get_bpf_prog_info_linear(fd, arrays);
if (IS_ERR_OR_NULL(info_linear)) {
pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+ err = PTR_ERR(info_linear);
goto out;
}
@@ -483,39 +867,48 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
info_node = malloc(sizeof(struct bpf_prog_info_node));
if (info_node) {
info_node->info_linear = info_linear;
+ info_node->metadata = bpf_metadata_create(&info_linear->info);
if (!perf_env__insert_bpf_prog_info(env, info_node)) {
+ pr_debug("%s: duplicate add bpf info request for id %u\n",
+ __func__, btf_id);
free(info_linear);
free(info_node);
+ goto out;
}
- } else
+ } else {
free(info_linear);
+ err = -ENOMEM;
+ goto out;
+ }
if (btf_id == 0)
goto out;
btf = btf__load_from_kernel_by_id(btf_id);
- if (libbpf_get_error(btf)) {
- pr_debug("%s: failed to get BTF of id %u, aborting\n",
- __func__, btf_id);
- goto out;
+ if (!btf) {
+ err = -errno;
+ pr_debug("%s: failed to get BTF of id %u %d\n", __func__, btf_id, err);
+ } else {
+ perf_env__fetch_btf(env, btf_id, btf);
}
- perf_env__fetch_btf(env, btf_id, btf);
out:
btf__free(btf);
close(fd);
+ return err;
}
static int bpf_event__sb_cb(union perf_event *event, void *data)
{
struct perf_env *env = data;
+ int ret = 0;
if (event->header.type != PERF_RECORD_BPF_EVENT)
return -1;
switch (event->bpf.type) {
case PERF_BPF_EVENT_PROG_LOAD:
- perf_env__add_bpf_info(env, event->bpf.id);
+ ret = perf_env__add_bpf_info(env, event->bpf.id);
case PERF_BPF_EVENT_PROG_UNLOAD:
/*
@@ -529,7 +922,7 @@ static int bpf_event__sb_cb(union perf_event *event, void *data)
break;
}
- return 0;
+ return ret;
}
int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index e2f0420905f5..60d2c6637af5 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -17,8 +17,15 @@ struct record_opts;
struct evlist;
struct target;
+struct bpf_metadata {
+ union perf_event *event;
+ char **prog_names;
+ __u64 nr_prog_names;
+};
+
struct bpf_prog_info_node {
struct perf_bpil *info_linear;
+ struct bpf_metadata *metadata;
struct rb_node rb_node;
};
@@ -36,6 +43,7 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
struct perf_env *env,
FILE *fp);
+void bpf_metadata_free(struct bpf_metadata *metadata);
#else
static inline int machine__process_bpf(struct machine *machine __maybe_unused,
union perf_event *event __maybe_unused,
@@ -56,5 +64,10 @@ static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info _
{
}
+
+static inline void bpf_metadata_free(struct bpf_metadata *metadata __maybe_unused)
+{
+
+}
#endif // HAVE_LIBBPF_SUPPORT
#endif
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index a4fdf6911ec1..1a2e7b388d57 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -52,9 +52,11 @@
#include <internal/xyarray.h>
#include <perf/threadmap.h>
+#include "util/cap.h"
#include "util/debug.h"
#include "util/evsel.h"
#include "util/target.h"
+#include "util/bpf-utils.h"
#include "util/bpf-filter.h"
#include <util/bpf-filter-flex.h>
@@ -449,7 +451,13 @@ int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
struct bpf_program *prog;
struct bpf_link *link;
struct perf_bpf_filter_entry *entry;
- bool needs_idx_hash = !target__has_cpu(target) && !target->uid_str;
+ bool needs_idx_hash = !target__has_cpu(target);
+#if LIBBPF_CURRENT_VERSION_GEQ(1, 7)
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts,
+ .dont_enable = true);
+#else
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
+#endif
entry = calloc(MAX_FILTERS, sizeof(*entry));
if (entry == NULL)
@@ -521,7 +529,8 @@ int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
prog = skel->progs.perf_sample_filter;
for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
- link = bpf_program__attach_perf_event(prog, FD(evsel, x, y));
+ link = bpf_program__attach_perf_event_opts(prog, FD(evsel, x, y),
+ &pe_opts);
if (IS_ERR(link)) {
pr_err("Failed to attach perf sample-filter program\n");
ret = PTR_ERR(link);
@@ -618,11 +627,38 @@ struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term
return expr;
}
+static bool check_bpf_filter_capable(void)
+{
+ bool used_root;
+
+ if (perf_cap__capable(CAP_BPF, &used_root))
+ return true;
+
+ if (!used_root) {
+ /* Check if root already pinned the filter programs and maps */
+ int fd = get_pinned_fd("filters");
+
+ if (fd >= 0) {
+ close(fd);
+ return true;
+ }
+ }
+
+ pr_err("Error: BPF filter only works for %s!\n"
+ "\tPlease run 'perf record --setup-filter pin' as root first.\n",
+ used_root ? "root" : "users with the CAP_BPF capability");
+
+ return false;
+}
+
int perf_bpf_filter__parse(struct list_head *expr_head, const char *str)
{
YY_BUFFER_STATE buffer;
int ret;
+ if (!check_bpf_filter_capable())
+ return -EPERM;
+
buffer = perf_bpf_filter__scan_string(str);
ret = perf_bpf_filter_parse(expr_head);
diff --git a/tools/perf/util/bpf-filter.h b/tools/perf/util/bpf-filter.h
index 916ed7770b73..818c554b91b2 100644
--- a/tools/perf/util/bpf-filter.h
+++ b/tools/perf/util/bpf-filter.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include "bpf_skel/sample-filter.h"
+#include "util/debug.h"
struct perf_bpf_filter_expr {
struct list_head list;
@@ -35,9 +36,13 @@ int perf_bpf_filter__unpin(void);
#else /* !HAVE_BPF_SKEL */
+#include <errno.h>
+
static inline int perf_bpf_filter__parse(struct list_head *expr_head __maybe_unused,
const char *str __maybe_unused)
{
+ pr_err("Error: BPF filter is requested but perf is not built with BPF.\n"
+ "\tPlease make sure to build with libbpf and BPF skeleton.\n");
return -EOPNOTSUPP;
}
static inline int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused,
diff --git a/tools/perf/util/bpf-trace-summary.c b/tools/perf/util/bpf-trace-summary.c
new file mode 100644
index 000000000000..cf6e1e4402d5
--- /dev/null
+++ b/tools/perf/util/bpf-trace-summary.c
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <errno.h>
+#include <inttypes.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "dwarf-regs.h" /* for EM_HOST */
+#include "syscalltbl.h"
+#include "util/cgroup.h"
+#include "util/hashmap.h"
+#include "util/trace.h"
+#include "util/util.h"
+#include <bpf/bpf.h>
+#include <linux/rbtree.h>
+#include <linux/time64.h>
+#include <tools/libc_compat.h> /* reallocarray */
+
+#include "bpf_skel/syscall_summary.h"
+#include "bpf_skel/syscall_summary.skel.h"
+
+
+static struct syscall_summary_bpf *skel;
+static struct rb_root cgroups = RB_ROOT;
+
+int trace_prepare_bpf_summary(enum trace_summary_mode mode)
+{
+ skel = syscall_summary_bpf__open();
+ if (skel == NULL) {
+ fprintf(stderr, "failed to open syscall summary bpf skeleton\n");
+ return -1;
+ }
+
+ if (mode == SUMMARY__BY_THREAD)
+ skel->rodata->aggr_mode = SYSCALL_AGGR_THREAD;
+ else if (mode == SUMMARY__BY_CGROUP)
+ skel->rodata->aggr_mode = SYSCALL_AGGR_CGROUP;
+ else
+ skel->rodata->aggr_mode = SYSCALL_AGGR_CPU;
+
+ if (cgroup_is_v2("perf_event") > 0)
+ skel->rodata->use_cgroup_v2 = 1;
+
+ if (syscall_summary_bpf__load(skel) < 0) {
+ fprintf(stderr, "failed to load syscall summary bpf skeleton\n");
+ return -1;
+ }
+
+ if (syscall_summary_bpf__attach(skel) < 0) {
+ fprintf(stderr, "failed to attach syscall summary bpf skeleton\n");
+ return -1;
+ }
+
+ if (mode == SUMMARY__BY_CGROUP)
+ read_all_cgroups(&cgroups);
+
+ return 0;
+}
+
+void trace_start_bpf_summary(void)
+{
+ skel->bss->enabled = 1;
+}
+
+void trace_end_bpf_summary(void)
+{
+ skel->bss->enabled = 0;
+}
+
+struct syscall_node {
+ int syscall_nr;
+ struct syscall_stats stats;
+};
+
+static double rel_stddev(struct syscall_stats *stat)
+{
+ double variance, average;
+
+ if (stat->count < 2)
+ return 0;
+
+ average = (double)stat->total_time / stat->count;
+
+ variance = stat->squared_sum;
+ variance -= (stat->total_time * stat->total_time) / stat->count;
+ variance /= stat->count - 1;
+
+ return 100 * sqrt(variance / stat->count) / average;
+}
+
+/*
+ * The syscall_data is to maintain syscall stats ordered by total time.
+ * It supports different summary modes like per-thread or global.
+ *
+ * For per-thread stats, it uses two-level data strurcture -
+ * syscall_data is keyed by TID and has an array of nodes which
+ * represents each syscall for the thread.
+ *
+ * For global stats, it's still two-level technically but we don't need
+ * per-cpu analysis so it's keyed by the syscall number to combine stats
+ * from different CPUs. And syscall_data always has a syscall_node so
+ * it can effectively work as flat hierarchy.
+ *
+ * For per-cgroup stats, it uses two-level data structure like thread
+ * syscall_data is keyed by CGROUP and has an array of node which
+ * represents each syscall for the cgroup.
+ */
+struct syscall_data {
+ u64 key; /* tid if AGGR_THREAD, syscall-nr if AGGR_CPU, cgroup if AGGR_CGROUP */
+ int nr_events;
+ int nr_nodes;
+ u64 total_time;
+ struct syscall_node *nodes;
+};
+
+static int datacmp(const void *a, const void *b)
+{
+ const struct syscall_data * const *sa = a;
+ const struct syscall_data * const *sb = b;
+
+ return (*sa)->total_time > (*sb)->total_time ? -1 : 1;
+}
+
+static int nodecmp(const void *a, const void *b)
+{
+ const struct syscall_node *na = a;
+ const struct syscall_node *nb = b;
+
+ return na->stats.total_time > nb->stats.total_time ? -1 : 1;
+}
+
+static size_t sc_node_hash(long key, void *ctx __maybe_unused)
+{
+ return key;
+}
+
+static bool sc_node_equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return key1 == key2;
+}
+
+static int print_common_stats(struct syscall_data *data, int max_summary, FILE *fp)
+{
+ int printed = 0;
+
+ if (max_summary == 0 || max_summary > data->nr_nodes)
+ max_summary = data->nr_nodes;
+
+ for (int i = 0; i < max_summary; i++) {
+ struct syscall_node *node = &data->nodes[i];
+ struct syscall_stats *stat = &node->stats;
+ double total = (double)(stat->total_time) / NSEC_PER_MSEC;
+ double min = (double)(stat->min_time) / NSEC_PER_MSEC;
+ double max = (double)(stat->max_time) / NSEC_PER_MSEC;
+ double avg = total / stat->count;
+ const char *name;
+
+ /* TODO: support other ABIs */
+ name = syscalltbl__name(EM_HOST, node->syscall_nr);
+ if (name)
+ printed += fprintf(fp, " %-15s", name);
+ else
+ printed += fprintf(fp, " syscall:%-7d", node->syscall_nr);
+
+ printed += fprintf(fp, " %8u %6u %9.3f %9.3f %9.3f %9.3f %9.2f%%\n",
+ stat->count, stat->error, total, min, avg, max,
+ rel_stddev(stat));
+ }
+ return printed;
+}
+
+static int update_thread_stats(struct hashmap *hash, struct syscall_key *map_key,
+ struct syscall_stats *map_data)
+{
+ struct syscall_data *data;
+ struct syscall_node *nodes;
+
+ if (!hashmap__find(hash, map_key->cpu_or_tid, &data)) {
+ data = zalloc(sizeof(*data));
+ if (data == NULL)
+ return -ENOMEM;
+
+ data->key = map_key->cpu_or_tid;
+ if (hashmap__add(hash, data->key, data) < 0) {
+ free(data);
+ return -ENOMEM;
+ }
+ }
+
+ /* update thread total stats */
+ data->nr_events += map_data->count;
+ data->total_time += map_data->total_time;
+
+ nodes = reallocarray(data->nodes, data->nr_nodes + 1, sizeof(*nodes));
+ if (nodes == NULL)
+ return -ENOMEM;
+
+ data->nodes = nodes;
+ nodes = &data->nodes[data->nr_nodes++];
+ nodes->syscall_nr = map_key->nr;
+
+ /* each thread has an entry for each syscall, just use the stat */
+ memcpy(&nodes->stats, map_data, sizeof(*map_data));
+ return 0;
+}
+
+static int print_thread_stat(struct syscall_data *data, int max_summary, FILE *fp)
+{
+ int printed = 0;
+
+ qsort(data->nodes, data->nr_nodes, sizeof(*data->nodes), nodecmp);
+
+ printed += fprintf(fp, " thread (%d), ", (int)data->key);
+ printed += fprintf(fp, "%d events\n\n", data->nr_events);
+
+ printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
+ printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
+ printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
+
+ printed += print_common_stats(data, max_summary, fp);
+ printed += fprintf(fp, "\n\n");
+
+ return printed;
+}
+
+static int print_thread_stats(struct syscall_data **data, int nr_data, int max_summary, FILE *fp)
+{
+ int printed = 0;
+
+ for (int i = 0; i < nr_data; i++)
+ printed += print_thread_stat(data[i], max_summary, fp);
+
+ return printed;
+}
+
+static int update_total_stats(struct hashmap *hash, struct syscall_key *map_key,
+ struct syscall_stats *map_data)
+{
+ struct syscall_data *data;
+ struct syscall_stats *stat;
+
+ if (!hashmap__find(hash, map_key->nr, &data)) {
+ data = zalloc(sizeof(*data));
+ if (data == NULL)
+ return -ENOMEM;
+
+ data->nodes = zalloc(sizeof(*data->nodes));
+ if (data->nodes == NULL) {
+ free(data);
+ return -ENOMEM;
+ }
+
+ data->nr_nodes = 1;
+ data->key = map_key->nr;
+ data->nodes->syscall_nr = data->key;
+
+ if (hashmap__add(hash, data->key, data) < 0) {
+ free(data->nodes);
+ free(data);
+ return -ENOMEM;
+ }
+ }
+
+ /* update total stats for this syscall */
+ data->nr_events += map_data->count;
+ data->total_time += map_data->total_time;
+
+ /* This is sum of the same syscall from different CPUs */
+ stat = &data->nodes->stats;
+
+ stat->total_time += map_data->total_time;
+ stat->squared_sum += map_data->squared_sum;
+ stat->count += map_data->count;
+ stat->error += map_data->error;
+
+ if (stat->max_time < map_data->max_time)
+ stat->max_time = map_data->max_time;
+ if (stat->min_time > map_data->min_time || stat->min_time == 0)
+ stat->min_time = map_data->min_time;
+
+ return 0;
+}
+
+static int print_total_stats(struct syscall_data **data, int nr_data, int max_summary, FILE *fp)
+{
+ int printed = 0;
+ int nr_events = 0;
+
+ for (int i = 0; i < nr_data; i++)
+ nr_events += data[i]->nr_events;
+
+ printed += fprintf(fp, " total, %d events\n\n", nr_events);
+
+ printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
+ printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
+ printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
+
+ if (max_summary == 0 || max_summary > nr_data)
+ max_summary = nr_data;
+
+ for (int i = 0; i < max_summary; i++)
+ printed += print_common_stats(data[i], max_summary, fp);
+
+ printed += fprintf(fp, "\n\n");
+ return printed;
+}
+
+static int update_cgroup_stats(struct hashmap *hash, struct syscall_key *map_key,
+ struct syscall_stats *map_data)
+{
+ struct syscall_data *data;
+ struct syscall_node *nodes;
+
+ if (!hashmap__find(hash, map_key->cgroup, &data)) {
+ data = zalloc(sizeof(*data));
+ if (data == NULL)
+ return -ENOMEM;
+
+ data->key = map_key->cgroup;
+ if (hashmap__add(hash, data->key, data) < 0) {
+ free(data);
+ return -ENOMEM;
+ }
+ }
+
+ /* update thread total stats */
+ data->nr_events += map_data->count;
+ data->total_time += map_data->total_time;
+
+ nodes = reallocarray(data->nodes, data->nr_nodes + 1, sizeof(*nodes));
+ if (nodes == NULL)
+ return -ENOMEM;
+
+ data->nodes = nodes;
+ nodes = &data->nodes[data->nr_nodes++];
+ nodes->syscall_nr = map_key->nr;
+
+ /* each thread has an entry for each syscall, just use the stat */
+ memcpy(&nodes->stats, map_data, sizeof(*map_data));
+ return 0;
+}
+
+static int print_cgroup_stat(struct syscall_data *data, int max_summary, FILE *fp)
+{
+ int printed = 0;
+ struct cgroup *cgrp = __cgroup__find(&cgroups, data->key);
+
+ qsort(data->nodes, data->nr_nodes, sizeof(*data->nodes), nodecmp);
+
+ if (cgrp)
+ printed += fprintf(fp, " cgroup %s,", cgrp->name);
+ else
+ printed += fprintf(fp, " cgroup id:%lu,", (unsigned long)data->key);
+
+ printed += fprintf(fp, " %d events\n\n", data->nr_events);
+
+ printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
+ printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
+ printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
+
+ printed += print_common_stats(data, max_summary, fp);
+ printed += fprintf(fp, "\n\n");
+
+ return printed;
+}
+
+static int print_cgroup_stats(struct syscall_data **data, int nr_data, int max_summary, FILE *fp)
+{
+ int printed = 0;
+
+ for (int i = 0; i < nr_data; i++)
+ printed += print_cgroup_stat(data[i], max_summary, fp);
+
+ return printed;
+}
+
+int trace_print_bpf_summary(FILE *fp, int max_summary)
+{
+ struct bpf_map *map = skel->maps.syscall_stats_map;
+ struct syscall_key *prev_key, key;
+ struct syscall_data **data = NULL;
+ struct hashmap schash;
+ struct hashmap_entry *entry;
+ int nr_data = 0;
+ int printed = 0;
+ int i;
+ size_t bkt;
+
+ hashmap__init(&schash, sc_node_hash, sc_node_equal, /*ctx=*/NULL);
+
+ printed = fprintf(fp, "\n Summary of events:\n\n");
+
+ /* get stats from the bpf map */
+ prev_key = NULL;
+ while (!bpf_map__get_next_key(map, prev_key, &key, sizeof(key))) {
+ struct syscall_stats stat;
+
+ if (!bpf_map__lookup_elem(map, &key, sizeof(key), &stat, sizeof(stat), 0)) {
+ switch (skel->rodata->aggr_mode) {
+ case SYSCALL_AGGR_THREAD:
+ update_thread_stats(&schash, &key, &stat);
+ break;
+ case SYSCALL_AGGR_CPU:
+ update_total_stats(&schash, &key, &stat);
+ break;
+ case SYSCALL_AGGR_CGROUP:
+ update_cgroup_stats(&schash, &key, &stat);
+ break;
+ default:
+ break;
+ }
+ }
+
+ prev_key = &key;
+ }
+
+ nr_data = hashmap__size(&schash);
+ data = calloc(nr_data, sizeof(*data));
+ if (data == NULL)
+ goto out;
+
+ i = 0;
+ hashmap__for_each_entry(&schash, entry, bkt)
+ data[i++] = entry->pvalue;
+
+ qsort(data, nr_data, sizeof(*data), datacmp);
+
+ switch (skel->rodata->aggr_mode) {
+ case SYSCALL_AGGR_THREAD:
+ printed += print_thread_stats(data, nr_data, max_summary, fp);
+ break;
+ case SYSCALL_AGGR_CPU:
+ printed += print_total_stats(data, nr_data, max_summary, fp);
+ break;
+ case SYSCALL_AGGR_CGROUP:
+ printed += print_cgroup_stats(data, nr_data, max_summary, fp);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < nr_data && data; i++) {
+ free(data[i]->nodes);
+ free(data[i]);
+ }
+ free(data);
+
+out:
+ hashmap__clear(&schash);
+ return printed;
+}
+
+void trace_cleanup_bpf_summary(void)
+{
+ if (!RB_EMPTY_ROOT(&cgroups)) {
+ struct cgroup *cgrp, *tmp;
+
+ rbtree_postorder_for_each_entry_safe(cgrp, tmp, &cgroups, node)
+ cgroup__put(cgrp);
+
+ cgroups = RB_ROOT;
+ }
+
+ syscall_summary_bpf__destroy(skel);
+}
diff --git a/tools/perf/util/bpf-utils.c b/tools/perf/util/bpf-utils.c
index 80b1d2b3729b..5a66dc8594aa 100644
--- a/tools/perf/util/bpf-utils.c
+++ b/tools/perf/util/bpf-utils.c
@@ -20,7 +20,7 @@ struct bpil_array_desc {
*/
};
-static struct bpil_array_desc bpil_array_desc[] = {
+static const struct bpil_array_desc bpil_array_desc[] = {
[PERF_BPIL_JITED_INSNS] = {
offsetof(struct bpf_prog_info, jited_prog_insns),
offsetof(struct bpf_prog_info, jited_prog_len),
@@ -115,7 +115,7 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
__u32 info_len = sizeof(info);
__u32 data_len = 0;
int i, err;
- void *ptr;
+ __u8 *ptr;
if (arrays >> PERF_BPIL_LAST_ARRAY)
return ERR_PTR(-EINVAL);
@@ -126,15 +126,15 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
pr_debug("can't get prog info: %s", strerror(errno));
return ERR_PTR(-EFAULT);
}
+ if (info.type >= __MAX_BPF_PROG_TYPE)
+ pr_debug("%s:%d: unexpected program type %u\n", __func__, __LINE__, info.type);
/* step 2: calculate total size of all arrays */
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
bool include_array = (arrays & (1UL << i)) > 0;
- struct bpil_array_desc *desc;
__u32 count, size;
- desc = bpil_array_desc + i;
-
/* kernel is too old to support this field */
if (info_len < desc->array_offset + sizeof(__u32) ||
info_len < desc->count_offset + sizeof(__u32) ||
@@ -163,19 +163,20 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
ptr = info_linear->data;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
- struct bpil_array_desc *desc;
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
__u32 count, size;
if ((arrays & (1UL << i)) == 0)
continue;
- desc = bpil_array_desc + i;
count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
bpf_prog_info_set_offset_u32(&info_linear->info,
desc->count_offset, count);
bpf_prog_info_set_offset_u32(&info_linear->info,
desc->size_offset, size);
+ assert(ptr >= info_linear->data);
+ assert(ptr < &info_linear->data[data_len]);
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset,
ptr_to_u64(ptr));
@@ -189,27 +190,45 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
free(info_linear);
return ERR_PTR(-EFAULT);
}
+ if (info_linear->info.type >= __MAX_BPF_PROG_TYPE) {
+ pr_debug("%s:%d: unexpected program type %u\n",
+ __func__, __LINE__, info_linear->info.type);
+ }
/* step 6: verify the data */
+ ptr = info_linear->data;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
- struct bpil_array_desc *desc;
- __u32 v1, v2;
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
+ __u32 count1, count2, size1, size2;
+ __u64 ptr2;
if ((arrays & (1UL << i)) == 0)
continue;
- desc = bpil_array_desc + i;
- v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
- v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ count1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ count2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->count_offset);
- if (v1 != v2)
- pr_warning("%s: mismatch in element count\n", __func__);
+ if (count1 != count2) {
+ pr_warning("%s: mismatch in element count %u vs %u\n", __func__, count1, count2);
+ free(info_linear);
+ return ERR_PTR(-ERANGE);
+ }
- v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
- v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ size1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+ size2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->size_offset);
- if (v1 != v2)
- pr_warning("%s: mismatch in rec size\n", __func__);
+ if (size1 != size2) {
+ pr_warning("%s: mismatch in rec size %u vs %u\n", __func__, size1, size2);
+ free(info_linear);
+ return ERR_PTR(-ERANGE);
+ }
+ ptr2 = bpf_prog_info_read_offset_u64(&info_linear->info, desc->array_offset);
+ if (ptr_to_u64(ptr) != ptr2) {
+ pr_warning("%s: mismatch in array %p vs %llx\n", __func__, ptr, ptr2);
+ free(info_linear);
+ return ERR_PTR(-ERANGE);
+ }
+ ptr += roundup(count1 * size1, sizeof(__u64));
}
/* step 7: update info_len and data_len */
@@ -224,13 +243,12 @@ void bpil_addr_to_offs(struct perf_bpil *info_linear)
int i;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
- struct bpil_array_desc *desc;
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
__u64 addr, offs;
if ((info_linear->arrays & (1UL << i)) == 0)
continue;
- desc = bpil_array_desc + i;
addr = bpf_prog_info_read_offset_u64(&info_linear->info,
desc->array_offset);
offs = addr - ptr_to_u64(info_linear->data);
@@ -244,13 +262,12 @@ void bpil_offs_to_addr(struct perf_bpil *info_linear)
int i;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
- struct bpil_array_desc *desc;
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
__u64 addr, offs;
if ((info_linear->arrays & (1UL << i)) == 0)
continue;
- desc = bpil_array_desc + i;
offs = bpf_prog_info_read_offset_u64(&info_linear->info,
desc->array_offset);
addr = offs + ptr_to_u64(info_linear->data);
diff --git a/tools/perf/util/bpf-utils.h b/tools/perf/util/bpf-utils.h
index 86a5055cdfad..a8bc1a232968 100644
--- a/tools/perf/util/bpf-utils.h
+++ b/tools/perf/util/bpf-utils.h
@@ -8,6 +8,16 @@
#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/libbpf.h>
+#include <bpf/libbpf_version.h>
+
+#define LIBBPF_CURRENT_VERSION_GEQ(major, minor) \
+ (LIBBPF_MAJOR_VERSION > (major) || \
+ (LIBBPF_MAJOR_VERSION == (major) && LIBBPF_MINOR_VERSION >= (minor)))
+
+#if LIBBPF_CURRENT_VERSION_GEQ(1, 7)
+// libbpf 1.7+ support the btf_dump_type_data_opts.emit_strings option.
+#define HAVE_LIBBPF_STRINGS_SUPPORT 1
+#endif
/*
* Get bpf_prog_info in continuous memory
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 73fcafbffc6a..a5882b582205 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -6,10 +6,14 @@
#include <limits.h>
#include <unistd.h>
#include <sys/file.h>
+#include <sys/resource.h>
#include <sys/time.h>
#include <linux/err.h>
+#include <linux/list.h>
#include <linux/zalloc.h>
#include <api/fs/fs.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
#include <perf/bpf_perf.h>
#include "bpf_counter.h"
@@ -28,13 +32,67 @@
#include "bpf_skel/bperf_leader.skel.h"
#include "bpf_skel/bperf_follower.skel.h"
+struct bpf_counter {
+ void *skel;
+ struct list_head list;
+};
+
#define ATTR_MAP_SIZE 16
-static inline void *u64_to_ptr(__u64 ptr)
+static void *u64_to_ptr(__u64 ptr)
{
return (void *)(unsigned long)ptr;
}
+
+void set_max_rlimit(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+}
+
+static __u32 bpf_link_get_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.id;
+}
+
+static __u32 bpf_link_get_prog_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.prog_id;
+}
+
+static __u32 bpf_map_get_id(int fd)
+{
+ struct bpf_map_info map_info = { .id = 0, };
+ __u32 map_info_len = sizeof(map_info);
+
+ bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
+ return map_info.id;
+}
+
+/* trigger the leader program on a cpu */
+int bperf_trigger_reading(int prog_fd, int cpu)
+{
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = NULL,
+ .ctx_size_in = 0,
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ .cpu = cpu,
+ .retval = 0,
+ );
+
+ return bpf_prog_test_run_opts(prog_fd, &opts);
+}
+
static struct bpf_counter *bpf_counter_alloc(void)
{
struct bpf_counter *counter;
@@ -278,6 +336,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx
{
struct bpf_prog_profiler_bpf *skel;
struct bpf_counter *counter;
+ int cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx).cpu;
int ret;
list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
@@ -285,7 +344,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx
assert(skel != NULL);
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
- &cpu_map_idx, &fd, BPF_ANY);
+ &cpu, &fd, BPF_ANY);
if (ret)
return ret;
}
@@ -393,7 +452,6 @@ static int bperf_check_target(struct evsel *evsel,
return 0;
}
-static struct perf_cpu_map *all_cpu_map;
static __u32 filter_entry_cnt;
static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
@@ -402,6 +460,7 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
int link_fd, diff_map_fd, err;
struct bpf_link *link = NULL;
+ struct perf_thread_map *threads;
if (!skel) {
pr_err("Failed to open leader skeleton\n");
@@ -437,7 +496,11 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
* following evsel__open_per_cpu call
*/
evsel->leader_skel = skel;
- evsel__open_per_cpu(evsel, all_cpu_map, -1);
+ assert(!perf_cpu_map__has_any_cpu_or_is_empty(evsel->core.cpus));
+ /* Always open system wide. */
+ threads = thread_map__new_by_tid(-1);
+ evsel__open(evsel, evsel->core.cpus, threads);
+ perf_thread_map__put(threads);
out:
bperf_leader_bpf__destroy(skel);
@@ -475,12 +538,6 @@ static int bperf__load(struct evsel *evsel, struct target *target)
if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
return -1;
- if (!all_cpu_map) {
- all_cpu_map = perf_cpu_map__new_online_cpus();
- if (!all_cpu_map)
- return -1;
- }
-
evsel->bperf_leader_prog_fd = -1;
evsel->bperf_leader_link_fd = -1;
@@ -598,9 +655,10 @@ out:
static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
{
struct bperf_leader_bpf *skel = evsel->leader_skel;
+ int cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx).cpu;
return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
- &cpu_map_idx, &fd, BPF_ANY);
+ &cpu, &fd, BPF_ANY);
}
/*
@@ -609,13 +667,12 @@ static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
*/
static int bperf_sync_counters(struct evsel *evsel)
{
- int num_cpu, i, cpu;
+ struct perf_cpu cpu;
+ int idx;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus)
+ bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu.cpu);
- num_cpu = perf_cpu_map__nr(all_cpu_map);
- for (i = 0; i < num_cpu; i++) {
- cpu = perf_cpu_map__cpu(all_cpu_map, i).cpu;
- bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
- }
return 0;
}
@@ -785,7 +842,7 @@ struct bpf_counter_ops bperf_ops = {
extern struct bpf_counter_ops bperf_cgrp_ops;
-static inline bool bpf_counter_skip(struct evsel *evsel)
+static bool bpf_counter_skip(struct evsel *evsel)
{
return evsel->bpf_counter_ops == NULL;
}
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
index c6d21c07b14c..658d8e7d507e 100644
--- a/tools/perf/util/bpf_counter.h
+++ b/tools/perf/util/bpf_counter.h
@@ -2,18 +2,10 @@
#ifndef __PERF_BPF_COUNTER_H
#define __PERF_BPF_COUNTER_H 1
-#include <linux/list.h>
-#include <sys/resource.h>
-
-#ifdef HAVE_LIBBPF_SUPPORT
-#include <bpf/bpf.h>
-#include <bpf/btf.h>
-#include <bpf/libbpf.h>
-#endif
-
struct evsel;
struct target;
-struct bpf_counter;
+
+#ifdef HAVE_BPF_SKEL
typedef int (*bpf_counter_evsel_op)(struct evsel *evsel);
typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel,
@@ -22,6 +14,7 @@ typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
int cpu_map_idx,
int fd);
+/* Shared ops between bpf_counter, bpf_counter_cgroup, etc. */
struct bpf_counter_ops {
bpf_counter_evsel_target_op load;
bpf_counter_evsel_op enable;
@@ -31,13 +24,6 @@ struct bpf_counter_ops {
bpf_counter_evsel_install_pe_op install_pe;
};
-struct bpf_counter {
- void *skel;
- struct list_head list;
-};
-
-#ifdef HAVE_BPF_SKEL
-
int bpf_counter__load(struct evsel *evsel, struct target *target);
int bpf_counter__enable(struct evsel *evsel);
int bpf_counter__disable(struct evsel *evsel);
@@ -45,6 +31,9 @@ int bpf_counter__read(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
+int bperf_trigger_reading(int prog_fd, int cpu);
+void set_max_rlimit(void);
+
#else /* HAVE_BPF_SKEL */
#include <linux/err.h>
@@ -83,55 +72,4 @@ static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
#endif /* HAVE_BPF_SKEL */
-static inline void set_max_rlimit(void)
-{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
-
- setrlimit(RLIMIT_MEMLOCK, &rinf);
-}
-
-#ifdef HAVE_BPF_SKEL
-
-static inline __u32 bpf_link_get_id(int fd)
-{
- struct bpf_link_info link_info = { .id = 0, };
- __u32 link_info_len = sizeof(link_info);
-
- bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
- return link_info.id;
-}
-
-static inline __u32 bpf_link_get_prog_id(int fd)
-{
- struct bpf_link_info link_info = { .id = 0, };
- __u32 link_info_len = sizeof(link_info);
-
- bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
- return link_info.prog_id;
-}
-
-static inline __u32 bpf_map_get_id(int fd)
-{
- struct bpf_map_info map_info = { .id = 0, };
- __u32 map_info_len = sizeof(map_info);
-
- bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
- return map_info.id;
-}
-
-/* trigger the leader program on a cpu */
-static inline int bperf_trigger_reading(int prog_fd, int cpu)
-{
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
- .ctx_in = NULL,
- .ctx_size_in = 0,
- .flags = BPF_F_TEST_RUN_ON_CPU,
- .cpu = cpu,
- .retval = 0,
- );
-
- return bpf_prog_test_run_opts(prog_fd, &opts);
-}
-#endif /* HAVE_BPF_SKEL */
-
#endif /* __PERF_BPF_COUNTER_H */
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 6ff42619de12..17d7196c6589 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -4,6 +4,7 @@
/* Copyright (c) 2021 Google */
#include <assert.h>
+#include <errno.h>
#include <limits.h>
#include <unistd.h>
#include <sys/file.h>
@@ -13,6 +14,7 @@
#include <linux/zalloc.h>
#include <linux/perf_event.h>
#include <api/fs/fs.h>
+#include <bpf/bpf.h>
#include <perf/bpf_perf.h>
#include "affinity.h"
@@ -26,6 +28,7 @@
#include "cpumap.h"
#include "thread_map.h"
+#include "bpf_skel/bperf_cgroup.h"
#include "bpf_skel/bperf_cgroup.skel.h"
static struct perf_event_attr cgrp_switch_attr = {
@@ -41,6 +44,55 @@ static struct bperf_cgroup_bpf *skel;
#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
+static void setup_rodata(struct bperf_cgroup_bpf *sk, int evlist_size)
+{
+ int map_size, total_cpus = cpu__max_cpu().cpu;
+
+ sk->rodata->num_cpus = total_cpus;
+ sk->rodata->num_events = evlist_size / nr_cgroups;
+
+ if (cgroup_is_v2("perf_event") > 0)
+ sk->rodata->use_cgroup_v2 = 1;
+
+ BUG_ON(evlist_size % nr_cgroups != 0);
+
+ /* we need one copy of events per cpu for reading */
+ map_size = total_cpus * evlist_size / nr_cgroups;
+ bpf_map__set_max_entries(sk->maps.events, map_size);
+ bpf_map__set_max_entries(sk->maps.cgrp_idx, nr_cgroups);
+ /* previous result is saved in a per-cpu array */
+ map_size = evlist_size / nr_cgroups;
+ bpf_map__set_max_entries(sk->maps.prev_readings, map_size);
+ /* cgroup result needs all events (per-cpu) */
+ map_size = evlist_size;
+ bpf_map__set_max_entries(sk->maps.cgrp_readings, map_size);
+}
+
+static void test_max_events_program_load(void)
+{
+#ifndef NDEBUG
+ /*
+ * Test that the program verifies with the maximum number of events. If
+ * this test fails unfortunately perf needs recompiling with a lower
+ * BPERF_CGROUP__MAX_EVENTS to avoid BPF verifier issues.
+ */
+ int err, max_events = BPERF_CGROUP__MAX_EVENTS * nr_cgroups;
+ struct bperf_cgroup_bpf *test_skel = bperf_cgroup_bpf__open();
+
+ if (!test_skel) {
+ pr_err("Failed to open cgroup skeleton\n");
+ return;
+ }
+ setup_rodata(test_skel, max_events);
+ err = bperf_cgroup_bpf__load(test_skel);
+ if (err) {
+ pr_err("Failed to load cgroup skeleton with max events %d.\n",
+ BPERF_CGROUP__MAX_EVENTS);
+ }
+ bperf_cgroup_bpf__destroy(test_skel);
+#endif
+}
+
static int bperf_load_program(struct evlist *evlist)
{
struct bpf_link *link;
@@ -49,35 +101,18 @@ static int bperf_load_program(struct evlist *evlist)
int i, j;
struct perf_cpu cpu;
int total_cpus = cpu__max_cpu().cpu;
- int map_size, map_fd;
- int prog_fd, err;
+ int map_fd, prog_fd, err;
+
+ set_max_rlimit();
+
+ test_max_events_program_load();
skel = bperf_cgroup_bpf__open();
if (!skel) {
pr_err("Failed to open cgroup skeleton\n");
return -1;
}
-
- skel->rodata->num_cpus = total_cpus;
- skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
-
- if (cgroup_is_v2("perf_event") > 0)
- skel->rodata->use_cgroup_v2 = 1;
-
- BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
-
- /* we need one copy of events per cpu for reading */
- map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
- bpf_map__set_max_entries(skel->maps.events, map_size);
- bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
- /* previous result is saved in a per-cpu array */
- map_size = evlist->core.nr_entries / nr_cgroups;
- bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
- /* cgroup result needs all events (per-cpu) */
- map_size = evlist->core.nr_entries;
- bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
-
- set_max_rlimit();
+ setup_rodata(skel, evlist->core.nr_entries);
err = bperf_cgroup_bpf__load(skel);
if (err) {
@@ -185,7 +220,8 @@ static int bperf_cgrp__load(struct evsel *evsel,
}
static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
- int cpu __maybe_unused, int fd __maybe_unused)
+ int cpu_map_idx __maybe_unused,
+ int fd __maybe_unused)
{
/* nothing to do */
return 0;
diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c
index 7324668cc83e..c456d24efa30 100644
--- a/tools/perf/util/bpf_ftrace.c
+++ b/tools/perf/util/bpf_ftrace.c
@@ -1,8 +1,10 @@
-#include <stdio.h>
+#include <errno.h>
#include <fcntl.h>
#include <stdint.h>
+#include <stdio.h>
#include <stdlib.h>
+#include <bpf/bpf.h>
#include <linux/err.h>
#include "util/ftrace.h"
@@ -21,15 +23,26 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
{
int fd, err;
int i, ncpus = 1, ntasks = 1;
- struct filter_entry *func;
+ struct filter_entry *func = NULL;
- if (!list_is_singular(&ftrace->filters)) {
- pr_err("ERROR: %s target function(s).\n",
- list_empty(&ftrace->filters) ? "No" : "Too many");
- return -1;
- }
+ if (!list_empty(&ftrace->filters)) {
+ if (!list_is_singular(&ftrace->filters)) {
+ pr_err("ERROR: Too many target functions.\n");
+ return -1;
+ }
+ func = list_first_entry(&ftrace->filters, struct filter_entry, list);
+ } else {
+ int count = 0;
+ struct list_head *pos;
- func = list_first_entry(&ftrace->filters, struct filter_entry, list);
+ list_for_each(pos, &ftrace->event_pair)
+ count++;
+
+ if (count != 2) {
+ pr_err("ERROR: Needs two target events.\n");
+ return -1;
+ }
+ }
skel = func_latency_bpf__open();
if (!skel) {
@@ -93,20 +106,44 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
skel->bss->min = INT64_MAX;
- skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
- false, func->name);
- if (IS_ERR(skel->links.func_begin)) {
- pr_err("Failed to attach fentry program\n");
- err = PTR_ERR(skel->links.func_begin);
- goto out;
- }
+ if (func) {
+ skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
+ false, func->name);
+ if (IS_ERR(skel->links.func_begin)) {
+ pr_err("Failed to attach fentry program\n");
+ err = PTR_ERR(skel->links.func_begin);
+ goto out;
+ }
- skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
- true, func->name);
- if (IS_ERR(skel->links.func_end)) {
- pr_err("Failed to attach fexit program\n");
- err = PTR_ERR(skel->links.func_end);
- goto out;
+ skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
+ true, func->name);
+ if (IS_ERR(skel->links.func_end)) {
+ pr_err("Failed to attach fexit program\n");
+ err = PTR_ERR(skel->links.func_end);
+ goto out;
+ }
+ } else {
+ struct filter_entry *event;
+
+ event = list_first_entry(&ftrace->event_pair, struct filter_entry, list);
+
+ skel->links.event_begin = bpf_program__attach_raw_tracepoint(skel->progs.event_begin,
+ event->name);
+ if (IS_ERR(skel->links.event_begin)) {
+ pr_err("Failed to attach first tracepoint program\n");
+ err = PTR_ERR(skel->links.event_begin);
+ goto out;
+ }
+
+ event = list_next_entry(event, list);
+
+ skel->links.event_end = bpf_program__attach_raw_tracepoint(skel->progs.event_end,
+ event->name);
+ if (IS_ERR(skel->links.event_end)) {
+ pr_err("Failed to attach second tracepoint program\n");
+ err = PTR_ERR(skel->links.event_end);
+ goto out;
+ }
}
/* XXX: we don't actually use this fd - just for poll() */
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index 5af8f6d1bc95..7b5671f13c53 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -12,6 +12,7 @@
#include "util/lock-contention.h"
#include <linux/zalloc.h>
#include <linux/string.h>
+#include <api/fs/fs.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <inttypes.h>
@@ -35,28 +36,26 @@ static bool slab_cache_equal(long key1, long key2, void *ctx __maybe_unused)
static void check_slab_cache_iter(struct lock_contention *con)
{
- struct btf *btf = btf__load_vmlinux_btf();
s32 ret;
hashmap__init(&slab_hash, slab_cache_hash, slab_cache_equal, /*ctx=*/NULL);
- if (btf == NULL) {
+ con->btf = btf__load_vmlinux_btf();
+ if (con->btf == NULL) {
pr_debug("BTF loading failed: %s\n", strerror(errno));
return;
}
- ret = btf__find_by_name_kind(btf, "bpf_iter__kmem_cache", BTF_KIND_STRUCT);
+ ret = btf__find_by_name_kind(con->btf, "bpf_iter__kmem_cache", BTF_KIND_STRUCT);
if (ret < 0) {
bpf_program__set_autoload(skel->progs.slab_cache_iter, false);
pr_debug("slab cache iterator is not available: %d\n", ret);
- goto out;
+ return;
}
has_slab_iter = true;
bpf_map__set_max_entries(skel->maps.slab_caches, con->map_nr_entries);
-out:
- btf__free(btf);
}
static void run_slab_cache_iter(void)
@@ -109,6 +108,75 @@ static void exit_slab_cache_iter(void)
hashmap__clear(&slab_hash);
}
+static void init_numa_data(struct lock_contention *con)
+{
+ struct symbol *sym;
+ struct map *kmap;
+ char *buf = NULL, *p;
+ size_t len;
+ long last = -1;
+ int ret;
+
+ /*
+ * 'struct zone' is embedded in 'struct pglist_data' as an array.
+ * As we may not have full information of the struct zone in the
+ * (fake) vmlinux.h, let's get the actual size from BTF.
+ */
+ ret = btf__find_by_name_kind(con->btf, "zone", BTF_KIND_STRUCT);
+ if (ret < 0) {
+ pr_debug("cannot get type of struct zone: %d\n", ret);
+ return;
+ }
+
+ ret = btf__resolve_size(con->btf, ret);
+ if (ret < 0) {
+ pr_debug("cannot get size of struct zone: %d\n", ret);
+ return;
+ }
+ skel->rodata->sizeof_zone = ret;
+
+ /* UMA system doesn't have 'node_data[]' - just use contig_page_data. */
+ sym = machine__find_kernel_symbol_by_name(con->machine,
+ "contig_page_data",
+ &kmap);
+ if (sym) {
+ skel->rodata->contig_page_data_addr = map__unmap_ip(kmap, sym->start);
+ map__put(kmap);
+ return;
+ }
+
+ /*
+ * The 'node_data' is an array of pointers to struct pglist_data.
+ * It needs to follow the pointer for each node in BPF to get the
+ * address of struct pglist_data and its zones.
+ */
+ sym = machine__find_kernel_symbol_by_name(con->machine,
+ "node_data",
+ &kmap);
+ if (sym == NULL)
+ return;
+
+ skel->rodata->node_data_addr = map__unmap_ip(kmap, sym->start);
+ map__put(kmap);
+
+ /* get the number of online nodes using the last node number + 1 */
+ ret = sysfs__read_str("devices/system/node/online", &buf, &len);
+ if (ret < 0) {
+ pr_debug("failed to read online node: %d\n", ret);
+ return;
+ }
+
+ p = buf;
+ while (p && *p) {
+ last = strtol(p, &p, 0);
+
+ if (p && (*p == ',' || *p == '-' || *p == '\n'))
+ p++;
+ }
+ skel->rodata->nr_nodes = last + 1;
+ free(buf);
+}
+
int lock_contention_prepare(struct lock_contention *con)
{
int i, fd;
@@ -116,6 +184,9 @@ int lock_contention_prepare(struct lock_contention *con)
struct evlist *evlist = con->evlist;
struct target *target = con->target;
+ /* make sure it loads the kernel map before lookup */
+ map__load(machine__kernel_map(con->machine));
+
skel = lock_contention_bpf__open();
if (!skel) {
pr_err("Failed to open lock-contention BPF skeleton\n");
@@ -193,6 +264,27 @@ int lock_contention_prepare(struct lock_contention *con)
skel->rodata->has_addr = 1;
}
+ /* resolve lock name in delays */
+ if (con->nr_delays) {
+ struct symbol *sym;
+ struct map *kmap;
+
+ for (i = 0; i < con->nr_delays; i++) {
+ sym = machine__find_kernel_symbol_by_name(con->machine,
+ con->delays[i].sym,
+ &kmap);
+ if (sym == NULL) {
+ pr_warning("ignore unknown symbol: %s\n",
+ con->delays[i].sym);
+ continue;
+ }
+
+ con->delays[i].addr = map__unmap_ip(kmap, sym->start);
+ }
+ skel->rodata->lock_delay = 1;
+ bpf_map__set_max_entries(skel->maps.lock_delays, con->nr_delays);
+ }
+
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
@@ -218,6 +310,8 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map__set_max_entries(skel->maps.slab_filter, nslabs);
+ init_numa_data(con);
+
if (lock_contention_bpf__load(skel) < 0) {
pr_err("Failed to load lock-contention BPF skeleton\n");
return -1;
@@ -282,6 +376,13 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
}
+ if (con->nr_delays) {
+ fd = bpf_map__fd(skel->maps.lock_delays);
+
+ for (i = 0; i < con->nr_delays; i++)
+ bpf_map_update_elem(fd, &con->delays[i].addr, &con->delays[i].time, BPF_ANY);
+ }
+
if (con->aggr_mode == LOCK_AGGR_CGROUP)
read_all_cgroups(&con->cgroups);
@@ -505,6 +606,11 @@ static const char *lock_contention_get_name(struct lock_contention *con,
return "rq_lock";
}
+ if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
+ if (flags == LOCK_CLASS_ZONE_LOCK)
+ return "zone_lock";
+ }
+
/* look slab_hash for dynamic locks in a slab object */
if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
@@ -646,9 +752,6 @@ int lock_contention_read(struct lock_contention *con)
bpf_prog_test_run_opts(prog_fd, &opts);
}
- /* make sure it loads the kernel map */
- maps__load_first(machine->kmaps);
-
prev_key = NULL;
while (!bpf_map_get_next_key(fd, prev_key, &key)) {
s64 ls_key;
@@ -743,6 +846,7 @@ int lock_contention_finish(struct lock_contention *con)
}
exit_slab_cache_iter();
+ btf__free(con->btf);
return 0;
}
diff --git a/tools/perf/util/bpf_map.c b/tools/perf/util/bpf_map.c
index 578f27d2d6b4..442f91b4e8e1 100644
--- a/tools/perf/util/bpf_map.c
+++ b/tools/perf/util/bpf_map.c
@@ -5,6 +5,7 @@
#include <bpf/libbpf.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <errno.h>
#include <stdbool.h>
#include <stdlib.h>
#include <unistd.h>
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index 4269b41d1771..88e0660c4bff 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -13,6 +13,9 @@
#include "util/cgroup.h"
#include "util/strlist.h"
#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <internal/xyarray.h>
+#include <linux/time64.h>
#include "bpf_skel/off_cpu.skel.h"
@@ -36,34 +39,25 @@ union off_cpu_data {
u64 array[1024 / sizeof(u64)];
};
+u64 off_cpu_raw[MAX_STACKS + 5];
+
static int off_cpu_config(struct evlist *evlist)
{
+ char off_cpu_event[64];
struct evsel *evsel;
- struct perf_event_attr attr = {
- .type = PERF_TYPE_SOFTWARE,
- .config = PERF_COUNT_SW_BPF_OUTPUT,
- .size = sizeof(attr), /* to capture ABI version */
- };
- char *evname = strdup(OFFCPU_EVENT);
- if (evname == NULL)
- return -ENOMEM;
-
- evsel = evsel__new(&attr);
- if (!evsel) {
- free(evname);
- return -ENOMEM;
+ scnprintf(off_cpu_event, sizeof(off_cpu_event), "bpf-output/name=%s/", OFFCPU_EVENT);
+ if (parse_event(evlist, off_cpu_event)) {
+ pr_err("Failed to open off-cpu event\n");
+ return -1;
}
- evsel->core.attr.freq = 1;
- evsel->core.attr.sample_period = 1;
- /* off-cpu analysis depends on stack trace */
- evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
-
- evlist__add(evlist, evsel);
-
- free(evsel->name);
- evsel->name = evname;
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_offcpu_event(evsel)) {
+ evsel->core.system_wide = true;
+ break;
+ }
+ }
return 0;
}
@@ -71,6 +65,9 @@ static int off_cpu_config(struct evlist *evlist)
static void off_cpu_start(void *arg)
{
struct evlist *evlist = arg;
+ struct evsel *evsel;
+ struct perf_cpu pcpu;
+ int i;
/* update task filter for the given workload */
if (skel->rodata->has_task && skel->rodata->uses_tgid &&
@@ -84,6 +81,26 @@ static void off_cpu_start(void *arg)
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
+ /* update BPF perf_event map */
+ evsel = evlist__find_evsel_by_str(evlist, OFFCPU_EVENT);
+ if (evsel == NULL) {
+ pr_err("%s evsel not found\n", OFFCPU_EVENT);
+ return;
+ }
+
+ perf_cpu_map__for_each_cpu(pcpu, i, evsel->core.cpus) {
+ int err;
+ int cpu_nr = pcpu.cpu;
+
+ err = bpf_map__update_elem(skel->maps.offcpu_output, &cpu_nr, sizeof(int),
+ xyarray__entry(evsel->core.fd, cpu_nr, 0),
+ sizeof(int), BPF_ANY);
+ if (err) {
+ pr_err("Failed to update perf event map for direct off-cpu dumping\n");
+ return;
+ }
+ }
+
skel->bss->enabled = 1;
}
@@ -277,6 +294,8 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
}
}
+ skel->bss->offcpu_thresh_ns = opts->off_cpu_thresh_ns;
+
err = off_cpu_bpf__attach(skel);
if (err) {
pr_err("Failed to attach off-cpu BPF skeleton\n");
@@ -300,6 +319,7 @@ int off_cpu_write(struct perf_session *session)
{
int bytes = 0, size;
int fd, stack;
+ u32 raw_size;
u64 sample_type, val, sid = 0;
struct evsel *evsel;
struct perf_data_file *file = &session->data->file;
@@ -339,46 +359,54 @@ int off_cpu_write(struct perf_session *session)
while (!bpf_map_get_next_key(fd, &prev, &key)) {
int n = 1; /* start from perf_event_header */
- int ip_pos = -1;
bpf_map_lookup_elem(fd, &key, &val);
+ /* zero-fill some of the fields, will be overwritten by raw_data when parsing */
if (sample_type & PERF_SAMPLE_IDENTIFIER)
data.array[n++] = sid;
- if (sample_type & PERF_SAMPLE_IP) {
- ip_pos = n;
+ if (sample_type & PERF_SAMPLE_IP)
data.array[n++] = 0; /* will be updated */
- }
if (sample_type & PERF_SAMPLE_TID)
- data.array[n++] = (u64)key.pid << 32 | key.tgid;
+ data.array[n++] = 0;
if (sample_type & PERF_SAMPLE_TIME)
data.array[n++] = tstamp;
- if (sample_type & PERF_SAMPLE_ID)
- data.array[n++] = sid;
if (sample_type & PERF_SAMPLE_CPU)
data.array[n++] = 0;
if (sample_type & PERF_SAMPLE_PERIOD)
- data.array[n++] = val;
- if (sample_type & PERF_SAMPLE_CALLCHAIN) {
- int len = 0;
-
- /* data.array[n] is callchain->nr (updated later) */
- data.array[n + 1] = PERF_CONTEXT_USER;
- data.array[n + 2] = 0;
-
- bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
- while (data.array[n + 2 + len])
+ data.array[n++] = 0;
+ if (sample_type & PERF_SAMPLE_RAW) {
+ /*
+ * [ size ][ data ]
+ * [ data ]
+ * [ data ]
+ * [ data ]
+ * [ data ][ empty]
+ */
+ int len = 0, i = 0;
+ void *raw_data = (void *)data.array + n * sizeof(u64);
+
+ off_cpu_raw[i++] = (u64)key.pid << 32 | key.tgid;
+ off_cpu_raw[i++] = val;
+
+ /* off_cpu_raw[i] is callchain->nr (updated later) */
+ off_cpu_raw[i + 1] = PERF_CONTEXT_USER;
+ off_cpu_raw[i + 2] = 0;
+
+ bpf_map_lookup_elem(stack, &key.stack_id, &off_cpu_raw[i + 2]);
+ while (off_cpu_raw[i + 2 + len])
len++;
- /* update length of callchain */
- data.array[n] = len + 1;
+ off_cpu_raw[i] = len + 1;
+ i += len + 2;
+
+ off_cpu_raw[i++] = key.cgroup_id;
- /* update sample ip with the first callchain entry */
- if (ip_pos >= 0)
- data.array[ip_pos] = data.array[n + 2];
+ raw_size = i * sizeof(u64) + sizeof(u32); /* 4 bytes for alignment */
+ memcpy(raw_data, &raw_size, sizeof(raw_size));
+ memcpy(raw_data + sizeof(u32), off_cpu_raw, i * sizeof(u64));
- /* calculate sample callchain data array length */
- n += len + 2;
+ n += i + 1;
}
if (sample_type & PERF_SAMPLE_CGROUP)
data.array[n++] = key.cgroup_id;
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index e4352881e3fa..2a6e61864ee0 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -7,7 +7,6 @@
*/
#include "vmlinux.h"
-#include "../trace_augment.h"
#include <bpf/bpf_helpers.h>
#include <linux/limits.h>
@@ -27,6 +26,8 @@
#define MAX_CPUS 4096
+#define TRACE_AUG_MAX_BUF 32 /* for buffer augmentation in perf trace */
+
/* bpf-output associated map */
struct __augmented_syscalls__ {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -44,7 +45,7 @@ struct syscalls_sys_enter {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__type(key, __u32);
__type(value, __u32);
- __uint(max_entries, 512);
+ __uint(max_entries, 1024);
} syscalls_sys_enter SEC(".maps");
/*
@@ -56,7 +57,7 @@ struct syscalls_sys_exit {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__type(key, __u32);
__type(value, __u32);
- __uint(max_entries, 512);
+ __uint(max_entries, 1024);
} syscalls_sys_exit SEC(".maps");
struct syscall_enter_args {
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
index 57cab7647a9a..c2298a2decc9 100644
--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -1,14 +1,12 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
// Copyright (c) 2021 Google
+#include "bperf_cgroup.h"
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
-#define MAX_LEVELS 10 // max cgroup hierarchy level: arbitrary
-#define MAX_EVENTS 32 // max events per cgroup: arbitrary
-
// NOTE: many of map and global data will be modified before loading
// from the userspace (perf tool) using the skeleton helpers.
@@ -97,7 +95,7 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup);
level = BPF_CORE_READ(cgrp, level);
- for (cnt = 0; i < MAX_LEVELS; i++) {
+ for (cnt = 0; i < BPERF_CGROUP__MAX_LEVELS; i++) {
__u64 cgrp_id;
if (i > level)
@@ -123,7 +121,7 @@ static inline int get_cgroup_v2_idx(__u32 *cgrps, int size)
__u32 *elem;
int cnt;
- for (cnt = 0; i < MAX_LEVELS; i++) {
+ for (cnt = 0; i < BPERF_CGROUP__MAX_LEVELS; i++) {
__u64 cgrp_id = bpf_get_current_ancestor_cgroup_id(i);
if (cgrp_id == 0)
@@ -148,17 +146,17 @@ static int bperf_cgroup_count(void)
register int c = 0;
struct bpf_perf_event_value val, delta, *prev_val, *cgrp_val;
__u32 cpu = bpf_get_smp_processor_id();
- __u32 cgrp_idx[MAX_LEVELS];
+ __u32 cgrp_idx[BPERF_CGROUP__MAX_LEVELS];
int cgrp_cnt;
__u32 key, cgrp;
long err;
if (use_cgroup_v2)
- cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, MAX_LEVELS);
+ cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, BPERF_CGROUP__MAX_LEVELS);
else
- cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, MAX_LEVELS);
+ cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, BPERF_CGROUP__MAX_LEVELS);
- for ( ; idx < MAX_EVENTS; idx++) {
+ for ( ; idx < BPERF_CGROUP__MAX_EVENTS; idx++) {
if (idx == num_events)
break;
@@ -186,7 +184,7 @@ static int bperf_cgroup_count(void)
delta.enabled = val.enabled - prev_val->enabled;
delta.running = val.running - prev_val->running;
- for (c = 0; c < MAX_LEVELS; c++) {
+ for (c = 0; c < BPERF_CGROUP__MAX_LEVELS; c++) {
if (c == cgrp_cnt)
break;
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.h b/tools/perf/util/bpf_skel/bperf_cgroup.h
new file mode 100644
index 000000000000..3fb84b19d39a
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Data structures shared between BPF and tools. */
+#ifndef __BPERF_CGROUP_H
+#define __BPERF_CGROUP_H
+
+// These constants impact code size of bperf_cgroup.bpf.c that may result in BPF
+// verifier issues. They are exposed to control the size and also to disable BPF
+// counters when the number of user events is too large.
+
+// max cgroup hierarchy level: arbitrary
+#define BPERF_CGROUP__MAX_LEVELS 10
+// max events per cgroup: arbitrary
+#define BPERF_CGROUP__MAX_EVENTS 128
+
+#endif /* __BPERF_CGROUP_H */
diff --git a/tools/perf/util/bpf_skel/func_latency.bpf.c b/tools/perf/util/bpf_skel/func_latency.bpf.c
index e731a79a753a..621e2022c8bc 100644
--- a/tools/perf/util/bpf_skel/func_latency.bpf.c
+++ b/tools/perf/util/bpf_skel/func_latency.bpf.c
@@ -52,34 +52,89 @@ const volatile unsigned int min_latency;
const volatile unsigned int max_latency;
const volatile unsigned int bucket_num = NUM_BUCKET;
-SEC("kprobe/func")
-int BPF_PROG(func_begin)
+static bool can_record(void)
{
- __u64 key, now;
-
- if (!enabled)
- return 0;
-
- key = bpf_get_current_pid_tgid();
-
if (has_cpu) {
__u32 cpu = bpf_get_smp_processor_id();
__u8 *ok;
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
if (!ok)
- return 0;
+ return false;
}
if (has_task) {
- __u32 pid = key & 0xffffffff;
+ __u32 pid = bpf_get_current_pid_tgid();
__u8 *ok;
ok = bpf_map_lookup_elem(&task_filter, &pid);
if (!ok)
- return 0;
+ return false;
}
+ return true;
+}
+
+static void update_latency(__s64 delta)
+{
+ __u64 val = delta;
+ __u32 key = 0;
+ __u64 *hist;
+ __u64 cmp_base = use_nsec ? 1 : 1000;
+
+ if (delta < 0)
+ return;
+ if (bucket_range != 0) {
+ val = delta / cmp_base;
+
+ if (min_latency > 0) {
+ if (val > min_latency)
+ val -= min_latency;
+ else
+ goto do_lookup;
+ }
+
+ // Less than 1 unit (ms or ns), or, in the future,
+ // than the min latency desired.
+ if (val > 0) { // 1st entry: [ 1 unit .. bucket_range units )
+ key = val / bucket_range + 1;
+ if (key >= bucket_num)
+ key = bucket_num - 1;
+ }
+
+ goto do_lookup;
+ }
+ // calculate index using delta
+ for (key = 0; key < (bucket_num - 1); key++) {
+ if (delta < (cmp_base << key))
+ break;
+ }
+
+do_lookup:
+ hist = bpf_map_lookup_elem(&latency, &key);
+ if (!hist)
+ return;
+
+ __sync_fetch_and_add(hist, 1);
+
+ __sync_fetch_and_add(&total, delta); // always in nsec
+ __sync_fetch_and_add(&count, 1);
+
+ if (delta > max)
+ max = delta;
+ if (delta < min)
+ min = delta;
+}
+
+SEC("kprobe/func")
+int BPF_PROG(func_begin)
+{
+ __u64 key, now;
+
+ if (!enabled || !can_record())
+ return 0;
+
+ key = bpf_get_current_pid_tgid();
now = bpf_ktime_get_ns();
// overwrite timestamp for nested functions
@@ -92,7 +147,6 @@ int BPF_PROG(func_end)
{
__u64 tid;
__u64 *start;
- __u64 cmp_base = use_nsec ? 1 : 1000;
if (!enabled)
return 0;
@@ -101,56 +155,44 @@ int BPF_PROG(func_end)
start = bpf_map_lookup_elem(&functime, &tid);
if (start) {
- __s64 delta = bpf_ktime_get_ns() - *start;
- __u64 val = delta;
- __u32 key = 0;
- __u64 *hist;
-
+ update_latency(bpf_ktime_get_ns() - *start);
bpf_map_delete_elem(&functime, &tid);
+ }
- if (delta < 0)
- return 0;
+ return 0;
+}
- if (bucket_range != 0) {
- val = delta / cmp_base;
+SEC("raw_tp")
+int BPF_PROG(event_begin)
+{
+ __u64 key, now;
- if (min_latency > 0) {
- if (val > min_latency)
- val -= min_latency;
- else
- goto do_lookup;
- }
+ if (!enabled || !can_record())
+ return 0;
- // Less than 1 unit (ms or ns), or, in the future,
- // than the min latency desired.
- if (val > 0) { // 1st entry: [ 1 unit .. bucket_range units )
- key = val / bucket_range + 1;
- if (key >= bucket_num)
- key = bucket_num - 1;
- }
+ key = bpf_get_current_pid_tgid();
+ now = bpf_ktime_get_ns();
- goto do_lookup;
- }
- // calculate index using delta
- for (key = 0; key < (bucket_num - 1); key++) {
- if (delta < (cmp_base << key))
- break;
- }
+ // overwrite timestamp for nested events
+ bpf_map_update_elem(&functime, &key, &now, BPF_ANY);
+ return 0;
+}
-do_lookup:
- hist = bpf_map_lookup_elem(&latency, &key);
- if (!hist)
- return 0;
+SEC("raw_tp")
+int BPF_PROG(event_end)
+{
+ __u64 tid;
+ __u64 *start;
- __sync_fetch_and_add(hist, 1);
+ if (!enabled)
+ return 0;
- __sync_fetch_and_add(&total, delta); // always in nsec
- __sync_fetch_and_add(&count, 1);
+ tid = bpf_get_current_pid_tgid();
- if (delta > max)
- max = delta;
- if (delta < min)
- min = delta;
+ start = bpf_map_lookup_elem(&functime, &tid);
+ if (start) {
+ update_latency(bpf_ktime_get_ns() - *start);
+ bpf_map_delete_elem(&functime, &tid);
}
return 0;
diff --git a/tools/perf/util/bpf_skel/kwork_top.bpf.c b/tools/perf/util/bpf_skel/kwork_top.bpf.c
index 73e32e063030..6673386302e2 100644
--- a/tools/perf/util/bpf_skel/kwork_top.bpf.c
+++ b/tools/perf/util/bpf_skel/kwork_top.bpf.c
@@ -18,9 +18,7 @@ enum kwork_class_type {
};
#define MAX_ENTRIES 102400
-#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 4096
-#endif
#define PF_KTHREAD 0x00200000
#define MAX_COMMAND_LEN 16
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index 69be7a4234e0..96e7d853b9ed 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -11,6 +11,12 @@
/* for collect_lock_syms(). 4096 was rejected by the verifier */
#define MAX_CPUS 1024
+/* for collect_zone_lock(). It should be more than the actual zones. */
+#define MAX_ZONES 10
+
+/* for do_lock_delay(). Arbitrarily set to 1 million. */
+#define MAX_LOOP (1U << 20)
+
/* lock contention flags from include/trace/events/lock.h */
#define LCB_F_SPIN (1U << 0)
#define LCB_F_READ (1U << 1)
@@ -146,6 +152,13 @@ struct {
__uint(max_entries, 1);
} slab_caches SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64));
+ __uint(value_size, sizeof(__u64));
+ __uint(max_entries, 1);
+} lock_delays SEC(".maps");
+
struct rw_semaphore___old {
struct task_struct *owner;
} __attribute__((preserve_access_index));
@@ -176,6 +189,7 @@ const volatile int stack_skip;
const volatile int lock_owner;
const volatile int use_cgroup_v2;
const volatile int max_stack;
+const volatile int lock_delay;
/* determine the key of lock stat */
const volatile int aggr_mode;
@@ -384,6 +398,35 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
return 0;
}
+static inline long delay_callback(__u64 idx, void *arg)
+{
+ __u64 target = *(__u64 *)arg;
+
+ if (target <= bpf_ktime_get_ns())
+ return 1;
+
+ /* just to kill time */
+ (void)bpf_get_prandom_u32();
+
+ return 0;
+}
+
+static inline void do_lock_delay(__u64 duration)
+{
+ __u64 target = bpf_ktime_get_ns() + duration;
+
+ bpf_loop(MAX_LOOP, delay_callback, &target, /*flags=*/0);
+}
+
+static inline void check_lock_delay(__u64 lock)
+{
+ __u64 *delay;
+
+ delay = bpf_map_lookup_elem(&lock_delays, &lock);
+ if (delay)
+ do_lock_delay(*delay);
+}
+
static inline struct tstamp_data *get_tstamp_elem(__u32 flags)
{
__u32 pid;
@@ -793,6 +836,9 @@ found:
update_contention_data(data, duration, 1);
out:
+ if (lock_delay)
+ check_lock_delay(pelem->lock);
+
pelem->lock = 0;
if (need_delete)
bpf_map_delete_elem(&tstamp, &pid);
@@ -801,6 +847,11 @@ out:
extern struct rq runqueues __ksym;
+const volatile __u64 contig_page_data_addr;
+const volatile __u64 node_data_addr;
+const volatile int nr_nodes;
+const volatile int sizeof_zone;
+
struct rq___old {
raw_spinlock_t lock;
} __attribute__((preserve_access_index));
@@ -809,6 +860,59 @@ struct rq___new {
raw_spinlock_t __lock;
} __attribute__((preserve_access_index));
+static void collect_zone_lock(void)
+{
+ __u64 nr_zones, zone_off;
+ __u64 lock_addr, lock_off;
+ __u32 lock_flag = LOCK_CLASS_ZONE_LOCK;
+
+ zone_off = offsetof(struct pglist_data, node_zones);
+ lock_off = offsetof(struct zone, lock);
+
+ if (contig_page_data_addr) {
+ struct pglist_data *contig_page_data;
+
+ contig_page_data = (void *)(long)contig_page_data_addr;
+ nr_zones = BPF_CORE_READ(contig_page_data, nr_zones);
+
+ for (int i = 0; i < MAX_ZONES; i++) {
+ __u64 zone_addr;
+
+ if (i >= nr_zones)
+ break;
+
+ zone_addr = contig_page_data_addr + (sizeof_zone * i) + zone_off;
+ lock_addr = zone_addr + lock_off;
+
+ bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
+ }
+ } else if (nr_nodes > 0) {
+ struct pglist_data **node_data = (void *)(long)node_data_addr;
+
+ for (int i = 0; i < nr_nodes; i++) {
+ struct pglist_data *pgdat = NULL;
+ int err;
+
+ err = bpf_core_read(&pgdat, sizeof(pgdat), &node_data[i]);
+ if (err < 0 || pgdat == NULL)
+ break;
+
+ nr_zones = BPF_CORE_READ(pgdat, nr_zones);
+ for (int k = 0; k < MAX_ZONES; k++) {
+ __u64 zone_addr;
+
+ if (k >= nr_zones)
+ break;
+
+ zone_addr = (__u64)(void *)pgdat + (sizeof_zone * k) + zone_off;
+ lock_addr = zone_addr + lock_off;
+
+ bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
+ }
+ }
+ }
+}
+
SEC("raw_tp/bpf_test_finish")
int BPF_PROG(collect_lock_syms)
{
@@ -830,6 +934,9 @@ int BPF_PROG(collect_lock_syms)
lock_flag = LOCK_CLASS_RQLOCK;
bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
}
+
+ collect_zone_lock();
+
return 0;
}
diff --git a/tools/perf/util/bpf_skel/lock_data.h b/tools/perf/util/bpf_skel/lock_data.h
index 15f5743bd409..28c5e5aced7f 100644
--- a/tools/perf/util/bpf_skel/lock_data.h
+++ b/tools/perf/util/bpf_skel/lock_data.h
@@ -67,6 +67,7 @@ enum lock_aggr_mode {
enum lock_class_sym {
LOCK_CLASS_NONE,
LOCK_CLASS_RQLOCK,
+ LOCK_CLASS_ZONE_LOCK,
};
struct slab_cache_data {
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index c152116df72f..72763bb8d1de 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -18,10 +18,19 @@
#define MAX_STACKS 32
#define MAX_ENTRIES 102400
+#define MAX_CPUS 4096
+#define MAX_OFFCPU_LEN 37
+
+// We have a 'struct stack' in vmlinux.h when building with GEN_VMLINUX_H=1
+struct __stack {
+ u64 array[MAX_STACKS];
+};
+
struct tstamp_data {
__u32 stack_id;
__u32 state;
__u64 timestamp;
+ struct __stack stack;
};
struct offcpu_key {
@@ -39,6 +48,24 @@ struct {
__uint(max_entries, MAX_ENTRIES);
} stacks SEC(".maps");
+struct offcpu_data {
+ u64 array[MAX_OFFCPU_LEN];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, MAX_CPUS);
+} offcpu_output SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct offcpu_data));
+ __uint(max_entries, 1);
+} offcpu_payload SEC(".maps");
+
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
@@ -97,6 +124,8 @@ const volatile bool uses_cgroup_v1 = false;
int perf_subsys_id = -1;
+__u64 offcpu_thresh_ns;
+
/*
* Old kernel used to call it task_struct->state and now it's '__state'.
* Use BPF CO-RE "ignored suffix rule" to deal with it like below:
@@ -183,6 +212,47 @@ static inline int can_record(struct task_struct *t, int state)
return 1;
}
+static inline int copy_stack(struct __stack *from, struct offcpu_data *to, int n)
+{
+ int len = 0;
+
+ for (int i = 0; i < MAX_STACKS && from->array[i]; ++i, ++len)
+ to->array[n + 2 + i] = from->array[i];
+
+ return len;
+}
+
+/**
+ * off_cpu_dump - dump off-cpu samples to ring buffer
+ * @data: payload for dumping off-cpu samples
+ * @key: off-cpu data
+ * @stack: stack trace of the task before being scheduled out
+ *
+ * If the threshold of off-cpu time is reached, acquire tid, period, callchain, and cgroup id
+ * information of the task, and dump it as a raw sample to perf ring buffer
+ */
+static int off_cpu_dump(void *ctx, struct offcpu_data *data, struct offcpu_key *key,
+ struct __stack *stack, __u64 delta)
+{
+ int n = 0, len = 0;
+
+ data->array[n++] = (u64)key->tgid << 32 | key->pid;
+ data->array[n++] = delta;
+
+ /* data->array[n] is callchain->nr (updated later) */
+ data->array[n + 1] = PERF_CONTEXT_USER;
+ data->array[n + 2] = 0;
+ len = copy_stack(stack, data, n);
+
+ /* update length of callchain */
+ data->array[n] = len + 1;
+ n += len + 2;
+
+ data->array[n++] = key->cgroup_id;
+
+ return bpf_perf_event_output(ctx, &offcpu_output, BPF_F_CURRENT_CPU, data, n * sizeof(u64));
+}
+
static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
struct task_struct *next, int state)
{
@@ -207,6 +277,16 @@ static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
pelem->state = state;
pelem->stack_id = stack_id;
+ /*
+ * If stacks are successfully collected by bpf_get_stackid(), collect them once more
+ * in task_storage for direct off-cpu sample dumping
+ */
+ if (stack_id > 0 && bpf_get_stack(ctx, &pelem->stack, MAX_STACKS * sizeof(u64), BPF_F_USER_STACK)) {
+ /*
+ * This empty if block is used to avoid 'result unused warning' from bpf_get_stack().
+ * If the collection fails, continue with the logic for the next task.
+ */
+ }
next:
pelem = bpf_task_storage_get(&tstamp, next, NULL, 0);
@@ -221,11 +301,19 @@ next:
__u64 delta = ts - pelem->timestamp;
__u64 *total;
- total = bpf_map_lookup_elem(&off_cpu, &key);
- if (total)
- *total += delta;
- else
- bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
+ if (delta >= offcpu_thresh_ns) {
+ int zero = 0;
+ struct offcpu_data *data = bpf_map_lookup_elem(&offcpu_payload, &zero);
+
+ if (data)
+ off_cpu_dump(ctx, data, &key, &pelem->stack, delta);
+ } else {
+ total = bpf_map_lookup_elem(&off_cpu, &key);
+ if (total)
+ *total += delta;
+ else
+ bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
+ }
/* prevent to reuse the timestamp later */
pelem->timestamp = 0;
diff --git a/tools/perf/util/bpf_skel/perf_version.h b/tools/perf/util/bpf_skel/perf_version.h
new file mode 100644
index 000000000000..1ed5b2e59bf5
--- /dev/null
+++ b/tools/perf/util/bpf_skel/perf_version.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __PERF_VERSION_H__
+#define __PERF_VERSION_H__
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+/*
+ * This is used by tests/shell/record_bpf_metadata.sh
+ * to verify that BPF metadata generation works.
+ *
+ * PERF_VERSION is defined by a build rule at compile time.
+ */
+const char bpf_metadata_perf_version[] SEC(".rodata") = PERF_VERSION;
+
+#endif /* __PERF_VERSION_H__ */
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
index b195e6efeb8b..e5666d4c1722 100644
--- a/tools/perf/util/bpf_skel/sample_filter.bpf.c
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -164,7 +164,7 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
if (entry->part == 8) {
union perf_mem_data_src___new *data = (void *)&kctx->data->data_src;
- if (bpf_core_field_exists(data->mem_hops))
+ if (__builtin_preserve_field_info(data->mem_hops, BPF_FIELD_EXISTS))
return data->mem_hops;
return 0;
diff --git a/tools/perf/util/bpf_skel/syscall_summary.bpf.c b/tools/perf/util/bpf_skel/syscall_summary.bpf.c
new file mode 100644
index 000000000000..1bcd066a5199
--- /dev/null
+++ b/tools/perf/util/bpf_skel/syscall_summary.bpf.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Trace raw_syscalls tracepoints to collect system call statistics.
+ */
+
+#include "vmlinux.h"
+#include "syscall_summary.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+/* This is to calculate a delta between sys-enter and sys-exit for each thread */
+struct syscall_trace {
+ int nr; /* syscall number is only available at sys-enter */
+ int unused;
+ u64 timestamp;
+};
+
+#define MAX_ENTRIES (128 * 1024)
+
+struct syscall_trace_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int); /* tid */
+ __type(value, struct syscall_trace);
+ __uint(max_entries, MAX_ENTRIES);
+} syscall_trace_map SEC(".maps");
+
+struct syscall_stats_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct syscall_key);
+ __type(value, struct syscall_stats);
+ __uint(max_entries, MAX_ENTRIES);
+} syscall_stats_map SEC(".maps");
+
+int enabled; /* controlled from userspace */
+
+const volatile enum syscall_aggr_mode aggr_mode;
+const volatile int use_cgroup_v2;
+
+int perf_subsys_id = -1;
+
+static inline __u64 get_current_cgroup_id(void)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ if (use_cgroup_v2)
+ return bpf_get_current_cgroup_id();
+
+ task = bpf_get_current_task_btf();
+
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+
+ cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
+ return BPF_CORE_READ(cgrp, kn, id);
+}
+
+static void update_stats(int cpu_or_tid, u64 cgroup_id, int nr, s64 duration,
+ long ret)
+{
+ struct syscall_key key = {
+ .cpu_or_tid = cpu_or_tid,
+ .cgroup = cgroup_id,
+ .nr = nr,
+ };
+ struct syscall_stats *stats;
+
+ stats = bpf_map_lookup_elem(&syscall_stats_map, &key);
+ if (stats == NULL) {
+ struct syscall_stats zero = {};
+
+ bpf_map_update_elem(&syscall_stats_map, &key, &zero, BPF_NOEXIST);
+ stats = bpf_map_lookup_elem(&syscall_stats_map, &key);
+ if (stats == NULL)
+ return;
+ }
+
+ __sync_fetch_and_add(&stats->count, 1);
+ if (ret < 0)
+ __sync_fetch_and_add(&stats->error, 1);
+
+ if (duration > 0) {
+ __sync_fetch_and_add(&stats->total_time, duration);
+ __sync_fetch_and_add(&stats->squared_sum, duration * duration);
+ if (stats->max_time < duration)
+ stats->max_time = duration;
+ if (stats->min_time > duration || stats->min_time == 0)
+ stats->min_time = duration;
+ }
+
+ return;
+}
+
+SEC("tp_btf/sys_enter")
+int sys_enter(u64 *ctx)
+{
+ int tid;
+ struct syscall_trace st;
+
+ if (!enabled)
+ return 0;
+
+ st.nr = ctx[1]; /* syscall number */
+ st.unused = 0;
+ st.timestamp = bpf_ktime_get_ns();
+
+ tid = bpf_get_current_pid_tgid();
+ bpf_map_update_elem(&syscall_trace_map, &tid, &st, BPF_ANY);
+
+ return 0;
+}
+
+SEC("tp_btf/sys_exit")
+int sys_exit(u64 *ctx)
+{
+ int tid;
+ int key = 0;
+ u64 cgroup = 0;
+ long ret = ctx[1]; /* return value of the syscall */
+ struct syscall_trace *st;
+ s64 delta;
+
+ if (!enabled)
+ return 0;
+
+ tid = bpf_get_current_pid_tgid();
+ st = bpf_map_lookup_elem(&syscall_trace_map, &tid);
+ if (st == NULL)
+ return 0;
+
+ if (aggr_mode == SYSCALL_AGGR_THREAD)
+ key = tid;
+ else if (aggr_mode == SYSCALL_AGGR_CGROUP)
+ cgroup = get_current_cgroup_id();
+ else
+ key = bpf_get_smp_processor_id();
+
+ delta = bpf_ktime_get_ns() - st->timestamp;
+ update_stats(key, cgroup, st->nr, delta, ret);
+
+ bpf_map_delete_elem(&syscall_trace_map, &tid);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/util/bpf_skel/syscall_summary.h b/tools/perf/util/bpf_skel/syscall_summary.h
new file mode 100644
index 000000000000..72ccccb45925
--- /dev/null
+++ b/tools/perf/util/bpf_skel/syscall_summary.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Data structures shared between BPF and tools. */
+#ifndef UTIL_BPF_SKEL_SYSCALL_SUMMARY_H
+#define UTIL_BPF_SKEL_SYSCALL_SUMMARY_H
+
+enum syscall_aggr_mode {
+ SYSCALL_AGGR_THREAD,
+ SYSCALL_AGGR_CPU,
+ SYSCALL_AGGR_CGROUP,
+};
+
+struct syscall_key {
+ u64 cgroup;
+ int cpu_or_tid;
+ int nr;
+};
+
+struct syscall_stats {
+ u64 total_time;
+ u64 squared_sum;
+ u64 max_time;
+ u64 min_time;
+ u32 count;
+ u32 error;
+};
+
+#endif /* UTIL_BPF_SKEL_SYSCALL_SUMMARY_H */
diff --git a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
index 7b81d3173917..a59ce912be18 100644
--- a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
+++ b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
@@ -203,4 +203,13 @@ struct bpf_iter__kmem_cache {
struct kmem_cache *s;
} __attribute__((preserve_access_index));
+struct zone {
+ spinlock_t lock;
+} __attribute__((preserve_access_index));
+
+struct pglist_data {
+ struct zone node_zones[6]; /* value for all possible config */
+ int nr_zones;
+} __attribute__((preserve_access_index));
+
#endif // __VMLINUX_H
diff --git a/tools/perf/util/bpf_trace_augment.c b/tools/perf/util/bpf_trace_augment.c
new file mode 100644
index 000000000000..56ed17534caa
--- /dev/null
+++ b/tools/perf/util/bpf_trace_augment.c
@@ -0,0 +1,143 @@
+#include <bpf/libbpf.h>
+#include <internal/xyarray.h>
+
+#include "util/debug.h"
+#include "util/evlist.h"
+#include "util/trace_augment.h"
+
+#include "bpf_skel/augmented_raw_syscalls.skel.h"
+
+static struct augmented_raw_syscalls_bpf *skel;
+static struct evsel *bpf_output;
+
+int augmented_syscalls__prepare(void)
+{
+ struct bpf_program *prog;
+ char buf[128];
+ int err;
+
+ skel = augmented_raw_syscalls_bpf__open();
+ if (!skel) {
+ pr_debug("Failed to open augmented syscalls BPF skeleton\n");
+ return -errno;
+ }
+
+ /*
+ * Disable attaching the BPF programs except for sys_enter and
+ * sys_exit that tail call into this as necessary.
+ */
+ bpf_object__for_each_program(prog, skel->obj) {
+ if (prog != skel->progs.sys_enter && prog != skel->progs.sys_exit)
+ bpf_program__set_autoattach(prog, /*autoattach=*/false);
+ }
+
+ err = augmented_raw_syscalls_bpf__load(skel);
+ if (err < 0) {
+ libbpf_strerror(err, buf, sizeof(buf));
+ pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", buf);
+ return err;
+ }
+
+ augmented_raw_syscalls_bpf__attach(skel);
+ return 0;
+}
+
+int augmented_syscalls__create_bpf_output(struct evlist *evlist)
+{
+ int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
+
+ if (err) {
+ pr_err("ERROR: Setup BPF output event failed: %d\n", err);
+ return err;
+ }
+
+ bpf_output = evlist__last(evlist);
+ assert(evsel__name_is(bpf_output, "__augmented_syscalls__"));
+
+ return 0;
+}
+
+void augmented_syscalls__setup_bpf_output(void)
+{
+ struct perf_cpu cpu;
+ int i;
+
+ if (bpf_output == NULL)
+ return;
+
+ /*
+ * Set up the __augmented_syscalls__ BPF map to hold for each
+ * CPU the bpf-output event's file descriptor.
+ */
+ perf_cpu_map__for_each_cpu(cpu, i, bpf_output->core.cpus) {
+ int mycpu = cpu.cpu;
+
+ bpf_map__update_elem(skel->maps.__augmented_syscalls__,
+ &mycpu, sizeof(mycpu),
+ xyarray__entry(bpf_output->core.fd,
+ mycpu, 0),
+ sizeof(__u32), BPF_ANY);
+ }
+}
+
+int augmented_syscalls__set_filter_pids(unsigned int nr, pid_t *pids)
+{
+ bool value = true;
+ int err = 0;
+
+ if (skel == NULL)
+ return 0;
+
+ for (size_t i = 0; i < nr; ++i) {
+ err = bpf_map__update_elem(skel->maps.pids_filtered, &pids[i],
+ sizeof(*pids), &value, sizeof(value),
+ BPF_ANY);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int augmented_syscalls__get_map_fds(int *enter_fd, int *exit_fd, int *beauty_fd)
+{
+ if (skel == NULL)
+ return -1;
+
+ *enter_fd = bpf_map__fd(skel->maps.syscalls_sys_enter);
+ *exit_fd = bpf_map__fd(skel->maps.syscalls_sys_exit);
+ *beauty_fd = bpf_map__fd(skel->maps.beauty_map_enter);
+
+ if (*enter_fd < 0 || *exit_fd < 0 || *beauty_fd < 0) {
+ pr_err("Error: failed to get syscall or beauty map fd\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+struct bpf_program *augmented_syscalls__unaugmented(void)
+{
+ return skel->progs.syscall_unaugmented;
+}
+
+struct bpf_program *augmented_syscalls__find_by_title(const char *name)
+{
+ struct bpf_program *pos;
+ const char *sec_name;
+
+ if (skel->obj == NULL)
+ return NULL;
+
+ bpf_object__for_each_program(pos, skel->obj) {
+ sec_name = bpf_program__section_name(pos);
+ if (sec_name && !strcmp(sec_name, name))
+ return pos;
+ }
+
+ return NULL;
+}
+
+void augmented_syscalls__cleanup(void)
+{
+ augmented_raw_syscalls_bpf__destroy(skel);
+}
diff --git a/tools/perf/util/branch.c b/tools/perf/util/branch.c
index ab760e267d41..3712be067464 100644
--- a/tools/perf/util/branch.c
+++ b/tools/perf/util/branch.c
@@ -46,7 +46,7 @@ const char *branch_new_type_name(int new_type)
"FAULT_DATA",
"FAULT_INST",
/*
- * TODO: This switch should happen on 'session->header.env.arch'
+ * TODO: This switch should happen on 'perf_session__env(session)->arch'
* instead, because an arm64 platform perf recording could be
* opened for analysis on other platforms as well.
*/
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index e763e8d99a43..fdb35133fde4 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -42,10 +42,20 @@
static bool no_buildid_cache;
+static int mark_dso_hit_callback(struct callchain_cursor_node *node, void *data __maybe_unused)
+{
+ struct map *map = node->ms.map;
+
+ if (map)
+ dso__set_hit(map__dso(map));
+
+ return 0;
+}
+
int build_id__mark_dso_hit(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
- struct evsel *evsel __maybe_unused,
+ struct evsel *evsel,
struct machine *machine)
{
struct addr_location al;
@@ -63,31 +73,36 @@ int build_id__mark_dso_hit(const struct perf_tool *tool __maybe_unused,
dso__set_hit(map__dso(al.map));
addr_location__exit(&al);
+
+ sample__for_each_callchain_node(thread, evsel, sample, PERF_MAX_STACK_DEPTH,
+ /*symbols=*/false, mark_dso_hit_callback, /*data=*/NULL);
+
+
thread__put(thread);
return 0;
}
-int build_id__sprintf(const struct build_id *build_id, char *bf)
+int build_id__snprintf(const struct build_id *build_id, char *bf, size_t bf_size)
{
- char *bid = bf;
- const u8 *raw = build_id->data;
- size_t i;
+ size_t offs = 0;
- bf[0] = 0x0;
-
- for (i = 0; i < build_id->size; ++i) {
- sprintf(bid, "%02x", *raw);
- ++raw;
- bid += 2;
+ if (build_id->size == 0) {
+ /* Ensure bf is always \0 terminated. */
+ if (bf_size > 0)
+ bf[0] = '\0';
+ return 0;
}
- return (bid - bf) + 1;
+ for (size_t i = 0; i < build_id->size && offs < bf_size; ++i)
+ offs += snprintf(bf + offs, bf_size - offs, "%02x", build_id->data[i]);
+
+ return offs;
}
-int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id)
+int sysfs__snprintf_build_id(const char *root_dir, char *sbuild_id, size_t sbuild_id_size)
{
char notes[PATH_MAX];
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
int ret;
if (!root_dir)
@@ -99,19 +114,19 @@ int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id)
if (ret < 0)
return ret;
- return build_id__sprintf(&bid, sbuild_id);
+ return build_id__snprintf(&bid, sbuild_id, sbuild_id_size);
}
-int filename__sprintf_build_id(const char *pathname, char *sbuild_id)
+int filename__snprintf_build_id(const char *pathname, char *sbuild_id, size_t sbuild_id_size)
{
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
int ret;
ret = filename__read_build_id(pathname, &bid);
if (ret < 0)
return ret;
- return build_id__sprintf(&bid, sbuild_id);
+ return build_id__snprintf(&bid, sbuild_id, sbuild_id_size);
}
/* asnprintf consolidates asprintf and snprintf */
@@ -212,9 +227,9 @@ static bool build_id_cache__valid_id(char *sbuild_id)
return false;
if (!strcmp(pathname, DSO__NAME_KALLSYMS))
- ret = sysfs__sprintf_build_id("/", real_sbuild_id);
+ ret = sysfs__snprintf_build_id("/", real_sbuild_id, sizeof(real_sbuild_id));
else if (pathname[0] == '/')
- ret = filename__sprintf_build_id(pathname, real_sbuild_id);
+ ret = filename__snprintf_build_id(pathname, real_sbuild_id, sizeof(real_sbuild_id));
else
ret = -EINVAL; /* Should we support other special DSO cache? */
if (ret >= 0)
@@ -243,7 +258,7 @@ char *__dso__build_id_filename(const struct dso *dso, char *bf, size_t size,
if (!dso__has_build_id(dso))
return NULL;
- build_id__sprintf(dso__bid_const(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
linkname = build_id_cache__linkname(sbuild_id, NULL, 0);
if (!linkname)
return NULL;
@@ -326,7 +341,7 @@ static int machine__write_buildid_table_cb(struct dso *dso, void *data)
}
in_kernel = dso__kernel(dso) || is_kernel_module(name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
- return write_buildid(name, name_len, dso__bid(dso), args->machine->pid,
+ return write_buildid(name, name_len, &dso__id(dso)->build_id, args->machine->pid,
in_kernel ? args->kmisc : args->umisc, args->fd);
}
@@ -769,7 +784,7 @@ static int build_id_cache__add_b(const struct build_id *bid,
{
char sbuild_id[SBUILD_ID_SIZE];
- build_id__sprintf(bid, sbuild_id);
+ build_id__snprintf(bid, sbuild_id, sizeof(sbuild_id));
return __build_id_cache__add_s(sbuild_id, name, nsi, is_kallsyms,
is_vdso, proper_name, root_dir);
@@ -841,7 +856,7 @@ static int filename__read_build_id_ns(const char *filename,
static bool dso__build_id_mismatch(struct dso *dso, const char *name)
{
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
bool ret = false;
mutex_lock(dso__lock(dso));
@@ -864,7 +879,7 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
char *allocated_name = NULL;
int ret = 0;
- if (!dso__has_build_id(dso))
+ if (!dso__has_build_id(dso) || !dso__hit(dso))
return 0;
if (dso__is_kcore(dso)) {
@@ -951,7 +966,10 @@ bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
void build_id__init(struct build_id *bid, const u8 *data, size_t size)
{
- WARN_ON(size > BUILD_ID_SIZE);
+ if (size > BUILD_ID_SIZE) {
+ pr_debug("Truncating build_id size from %zd\n", size);
+ size = BUILD_ID_SIZE;
+ }
memcpy(bid->data, data, size);
bid->size = size;
}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index a212497bfdb0..47e621cebe1b 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -13,7 +13,7 @@
struct build_id {
u8 data[BUILD_ID_SIZE];
- size_t size;
+ u8 size;
};
struct dso;
@@ -21,10 +21,10 @@ struct feat_fd;
struct nsinfo;
void build_id__init(struct build_id *bid, const u8 *data, size_t size);
-int build_id__sprintf(const struct build_id *build_id, char *bf);
+int build_id__snprintf(const struct build_id *build_id, char *bf, size_t bf_size);
bool build_id__is_defined(const struct build_id *bid);
-int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id);
-int filename__sprintf_build_id(const char *pathname, char *sbuild_id);
+int sysfs__snprintf_build_id(const char *root_dir, char *sbuild_id, size_t sbuild_id_size);
+int filename__snprintf_build_id(const char *pathname, char *sbuild_id, size_t sbuild_id_size);
char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
size_t size);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index d7b7eef740b9..428e5350d7a2 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -275,9 +275,13 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
if (tok) {
unsigned long size;
- size = strtoul(tok, &name, 0);
- if (size < (unsigned) sysctl__max_stack())
- param->max_stack = size;
+ if (!strncmp(tok, "defer", sizeof("defer"))) {
+ param->defer = true;
+ } else {
+ size = strtoul(tok, &name, 0);
+ if (size < (unsigned) sysctl__max_stack())
+ param->max_stack = size;
+ }
}
break;
@@ -314,6 +318,12 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
} while (0);
free(buf);
+
+ if (param->defer && param->record_mode != CALLCHAIN_FP) {
+ pr_err("callchain: deferred callchain only works with FP\n");
+ return -EINVAL;
+ }
+
return ret;
}
@@ -1828,3 +1838,38 @@ int sample__for_each_callchain_node(struct thread *thread, struct evsel *evsel,
}
return 0;
}
+
+/*
+ * This function merges earlier samples (@sample_orig) waiting for deferred
+ * user callchains with the matching callchain record (@sample_callchain)
+ * which is delivered now. The @sample_orig->callchain should be released
+ * after use if ->deferred_callchain is set.
+ */
+int sample__merge_deferred_callchain(struct perf_sample *sample_orig,
+ struct perf_sample *sample_callchain)
+{
+ u64 nr_orig = sample_orig->callchain->nr - 1;
+ u64 nr_deferred = sample_callchain->callchain->nr;
+ struct ip_callchain *callchain;
+
+ if (sample_orig->callchain->nr < 2) {
+ sample_orig->deferred_callchain = false;
+ return -EINVAL;
+ }
+
+ callchain = calloc(1 + nr_orig + nr_deferred, sizeof(u64));
+ if (callchain == NULL) {
+ sample_orig->deferred_callchain = false;
+ return -ENOMEM;
+ }
+
+ callchain->nr = nr_orig + nr_deferred;
+ /* copy original including PERF_CONTEXT_USER_DEFERRED (but the cookie) */
+ memcpy(callchain->ips, sample_orig->callchain->ips, nr_orig * sizeof(u64));
+ /* copy deferred user callchains */
+ memcpy(&callchain->ips[nr_orig], sample_callchain->callchain->ips,
+ nr_deferred * sizeof(u64));
+
+ sample_orig->callchain = callchain;
+ return 0;
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 86ed9e4d04f9..2a52af8c80ac 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -98,6 +98,7 @@ extern bool dwarf_callchain_users;
struct callchain_param {
bool enabled;
+ bool defer;
enum perf_call_graph_mode record_mode;
u32 dump_size;
enum chain_mode mode;
@@ -317,4 +318,7 @@ int sample__for_each_callchain_node(struct thread *thread, struct evsel *evsel,
struct perf_sample *sample, int max_stack,
bool symbols, callchain_iter_fn cb, void *data);
+int sample__merge_deferred_callchain(struct perf_sample *sample_orig,
+ struct perf_sample *sample_callchain);
+
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cap.c b/tools/perf/util/cap.c
index 69d9a2bcd40b..24a0ea7e6d97 100644
--- a/tools/perf/util/cap.c
+++ b/tools/perf/util/cap.c
@@ -7,7 +7,6 @@
#include "debug.h"
#include <errno.h>
#include <string.h>
-#include <linux/capability.h>
#include <sys/syscall.h>
#include <unistd.h>
diff --git a/tools/perf/util/cap.h b/tools/perf/util/cap.h
index 0c6a1ff55f07..c1b8ac033ccc 100644
--- a/tools/perf/util/cap.h
+++ b/tools/perf/util/cap.h
@@ -3,6 +3,7 @@
#define __PERF_CAP_H
#include <stdbool.h>
+#include <linux/capability.h>
/* For older systems */
#ifndef CAP_SYSLOG
@@ -13,6 +14,10 @@
#define CAP_PERFMON 38
#endif
+#ifndef CAP_BPF
+#define CAP_BPF 39
+#endif
+
/* Query if a capability is supported, used_root is set if the fallback root check was used. */
bool perf_cap__capable(int cap, bool *used_root);
diff --git a/tools/perf/util/capstone.c b/tools/perf/util/capstone.c
new file mode 100644
index 000000000000..be5fd44b1f9d
--- /dev/null
+++ b/tools/perf/util/capstone.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "capstone.h"
+#include "annotate.h"
+#include "addr_location.h"
+#include "debug.h"
+#include "disasm.h"
+#include "dso.h"
+#include "machine.h"
+#include "map.h"
+#include "namespaces.h"
+#include "print_insn.h"
+#include "symbol.h"
+#include "thread.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+#include <capstone/capstone.h>
+#endif
+
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+static int capstone_init(struct machine *machine, csh *cs_handle, bool is64,
+ bool disassembler_style)
+{
+ cs_arch arch;
+ cs_mode mode;
+
+ if (machine__is(machine, "x86_64") && is64) {
+ arch = CS_ARCH_X86;
+ mode = CS_MODE_64;
+ } else if (machine__normalized_is(machine, "x86")) {
+ arch = CS_ARCH_X86;
+ mode = CS_MODE_32;
+ } else if (machine__normalized_is(machine, "arm64")) {
+ arch = CS_ARCH_ARM64;
+ mode = CS_MODE_ARM;
+ } else if (machine__normalized_is(machine, "arm")) {
+ arch = CS_ARCH_ARM;
+ mode = CS_MODE_ARM + CS_MODE_V8;
+ } else if (machine__normalized_is(machine, "s390")) {
+ arch = CS_ARCH_SYSZ;
+ mode = CS_MODE_BIG_ENDIAN;
+ } else {
+ return -1;
+ }
+
+ if (cs_open(arch, mode, cs_handle) != CS_ERR_OK) {
+ pr_warning_once("cs_open failed\n");
+ return -1;
+ }
+
+ if (machine__normalized_is(machine, "x86")) {
+ /*
+ * In case of using capstone_init while symbol__disassemble
+ * setting CS_OPT_SYNTAX_ATT depends if disassembler_style opts
+ * is set via annotation args
+ */
+ if (disassembler_style)
+ cs_option(*cs_handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT);
+ /*
+ * Resolving address operands to symbols is implemented
+ * on x86 by investigating instruction details.
+ */
+ cs_option(*cs_handle, CS_OPT_DETAIL, CS_OPT_ON);
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+static size_t print_insn_x86(struct thread *thread, u8 cpumode, cs_insn *insn,
+ int print_opts, FILE *fp)
+{
+ struct addr_location al;
+ size_t printed = 0;
+
+ if (insn->detail && insn->detail->x86.op_count == 1) {
+ cs_x86_op *op = &insn->detail->x86.operands[0];
+
+ addr_location__init(&al);
+ if (op->type == X86_OP_IMM &&
+ thread__find_symbol(thread, cpumode, op->imm, &al)) {
+ printed += fprintf(fp, "%s ", insn[0].mnemonic);
+ printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
+ if (print_opts & PRINT_INSN_IMM_HEX)
+ printed += fprintf(fp, " [%#" PRIx64 "]", op->imm);
+ addr_location__exit(&al);
+ return printed;
+ }
+ addr_location__exit(&al);
+ }
+
+ printed += fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str);
+ return printed;
+}
+#endif
+
+
+ssize_t capstone__fprintf_insn_asm(struct machine *machine __maybe_unused,
+ struct thread *thread __maybe_unused,
+ u8 cpumode __maybe_unused, bool is64bit __maybe_unused,
+ const uint8_t *code __maybe_unused,
+ size_t code_size __maybe_unused,
+ uint64_t ip __maybe_unused, int *lenp __maybe_unused,
+ int print_opts __maybe_unused, FILE *fp __maybe_unused)
+{
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+ size_t printed;
+ cs_insn *insn;
+ csh cs_handle;
+ size_t count;
+ int ret;
+
+ /* TODO: Try to initiate capstone only once but need a proper place. */
+ ret = capstone_init(machine, &cs_handle, is64bit, true);
+ if (ret < 0)
+ return ret;
+
+ count = cs_disasm(cs_handle, code, code_size, ip, 1, &insn);
+ if (count > 0) {
+ if (machine__normalized_is(machine, "x86"))
+ printed = print_insn_x86(thread, cpumode, &insn[0], print_opts, fp);
+ else
+ printed = fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str);
+ if (lenp)
+ *lenp = insn->size;
+ cs_free(insn, count);
+ } else {
+ printed = -1;
+ }
+
+ cs_close(&cs_handle);
+ return printed;
+#else
+ return -1;
+#endif
+}
+
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+static void print_capstone_detail(cs_insn *insn, char *buf, size_t len,
+ struct annotate_args *args, u64 addr)
+{
+ int i;
+ struct map *map = args->ms.map;
+ struct symbol *sym;
+
+ /* TODO: support more architectures */
+ if (!arch__is(args->arch, "x86"))
+ return;
+
+ if (insn->detail == NULL)
+ return;
+
+ for (i = 0; i < insn->detail->x86.op_count; i++) {
+ cs_x86_op *op = &insn->detail->x86.operands[i];
+ u64 orig_addr;
+
+ if (op->type != X86_OP_MEM)
+ continue;
+
+ /* only print RIP-based global symbols for now */
+ if (op->mem.base != X86_REG_RIP)
+ continue;
+
+ /* get the target address */
+ orig_addr = addr + insn->size + op->mem.disp;
+ addr = map__objdump_2mem(map, orig_addr);
+
+ if (dso__kernel(map__dso(map))) {
+ /*
+ * The kernel maps can be split into sections, let's
+ * find the map first and the search the symbol.
+ */
+ map = maps__find(map__kmaps(map), addr);
+ if (map == NULL)
+ continue;
+ }
+
+ /* convert it to map-relative address for search */
+ addr = map__map_ip(map, addr);
+
+ sym = map__find_symbol(map, addr);
+ if (sym == NULL)
+ continue;
+
+ if (addr == sym->start) {
+ scnprintf(buf, len, "\t# %"PRIx64" <%s>",
+ orig_addr, sym->name);
+ } else {
+ scnprintf(buf, len, "\t# %"PRIx64" <%s+%#"PRIx64">",
+ orig_addr, sym->name, addr - sym->start);
+ }
+ break;
+ }
+}
+#endif
+
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+struct find_file_offset_data {
+ u64 ip;
+ u64 offset;
+};
+
+/* This will be called for each PHDR in an ELF binary */
+static int find_file_offset(u64 start, u64 len, u64 pgoff, void *arg)
+{
+ struct find_file_offset_data *data = arg;
+
+ if (start <= data->ip && data->ip < start + len) {
+ data->offset = pgoff + data->ip - start;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+int symbol__disassemble_capstone(const char *filename __maybe_unused,
+ struct symbol *sym __maybe_unused,
+ struct annotate_args *args __maybe_unused)
+{
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+ struct annotation *notes = symbol__annotation(sym);
+ struct map *map = args->ms.map;
+ struct dso *dso = map__dso(map);
+ u64 start = map__rip_2objdump(map, sym->start);
+ u64 offset;
+ int i, count, free_count;
+ bool is_64bit = false;
+ bool needs_cs_close = false;
+ /* Malloc-ed buffer containing instructions read from disk. */
+ u8 *code_buf = NULL;
+ /* Pointer to code to be disassembled. */
+ const u8 *buf;
+ u64 buf_len;
+ csh handle;
+ cs_insn *insn = NULL;
+ char disasm_buf[512];
+ struct disasm_line *dl;
+ bool disassembler_style = false;
+
+ if (args->options->objdump_path)
+ return -1;
+
+ buf = dso__read_symbol(dso, filename, map, sym,
+ &code_buf, &buf_len, &is_64bit);
+ if (buf == NULL)
+ return errno;
+
+ /* add the function address and name */
+ scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
+ start, sym->name);
+
+ args->offset = -1;
+ args->line = disasm_buf;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ if (!args->options->disassembler_style ||
+ !strcmp(args->options->disassembler_style, "att"))
+ disassembler_style = true;
+
+ if (capstone_init(maps__machine(args->ms.maps), &handle, is_64bit, disassembler_style) < 0)
+ goto err;
+
+ needs_cs_close = true;
+
+ free_count = count = cs_disasm(handle, buf, buf_len, start, buf_len, &insn);
+ for (i = 0, offset = 0; i < count; i++) {
+ int printed;
+
+ printed = scnprintf(disasm_buf, sizeof(disasm_buf),
+ " %-7s %s",
+ insn[i].mnemonic, insn[i].op_str);
+ print_capstone_detail(&insn[i], disasm_buf + printed,
+ sizeof(disasm_buf) - printed, args,
+ start + offset);
+
+ args->offset = offset;
+ args->line = disasm_buf;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ offset += insn[i].size;
+ }
+
+ /* It failed in the middle: probably due to unknown instructions */
+ if (offset != buf_len) {
+ struct list_head *list = &notes->src->source;
+
+ /* Discard all lines and fallback to objdump */
+ while (!list_empty(list)) {
+ dl = list_first_entry(list, struct disasm_line, al.node);
+
+ list_del_init(&dl->al.node);
+ disasm_line__free(dl);
+ }
+ count = -1;
+ }
+
+out:
+ if (needs_cs_close) {
+ cs_close(&handle);
+ if (free_count > 0)
+ cs_free(insn, free_count);
+ }
+ free(code_buf);
+ return count < 0 ? count : 0;
+
+err:
+ if (needs_cs_close) {
+ struct disasm_line *tmp;
+
+ /*
+ * It probably failed in the middle of the above loop.
+ * Release any resources it might add.
+ */
+ list_for_each_entry_safe(dl, tmp, &notes->src->source, al.node) {
+ list_del(&dl->al.node);
+ disasm_line__free(dl);
+ }
+ }
+ count = -1;
+ goto out;
+#else
+ return -1;
+#endif
+}
+
+int symbol__disassemble_capstone_powerpc(const char *filename __maybe_unused,
+ struct symbol *sym __maybe_unused,
+ struct annotate_args *args __maybe_unused)
+{
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+ struct annotation *notes = symbol__annotation(sym);
+ struct map *map = args->ms.map;
+ struct dso *dso = map__dso(map);
+ struct nscookie nsc;
+ u64 start = map__rip_2objdump(map, sym->start);
+ u64 end = map__rip_2objdump(map, sym->end);
+ u64 len = end - start;
+ u64 offset;
+ int i, fd, count;
+ bool is_64bit = false;
+ bool needs_cs_close = false;
+ u8 *buf = NULL;
+ struct find_file_offset_data data = {
+ .ip = start,
+ };
+ csh handle;
+ char disasm_buf[512];
+ struct disasm_line *dl;
+ u32 *line;
+ bool disassembler_style = false;
+
+ if (args->options->objdump_path)
+ return -1;
+
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
+ fd = open(filename, O_RDONLY);
+ nsinfo__mountns_exit(&nsc);
+ if (fd < 0)
+ return -1;
+
+ if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data,
+ &is_64bit) == 0)
+ goto err;
+
+ if (!args->options->disassembler_style ||
+ !strcmp(args->options->disassembler_style, "att"))
+ disassembler_style = true;
+
+ if (capstone_init(maps__machine(args->ms.maps), &handle, is_64bit, disassembler_style) < 0)
+ goto err;
+
+ needs_cs_close = true;
+
+ buf = malloc(len);
+ if (buf == NULL)
+ goto err;
+
+ count = pread(fd, buf, len, data.offset);
+ close(fd);
+ fd = -1;
+
+ if ((u64)count != len)
+ goto err;
+
+ line = (u32 *)buf;
+
+ /* add the function address and name */
+ scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
+ start, sym->name);
+
+ args->offset = -1;
+ args->line = disasm_buf;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ /*
+ * TODO: enable disassm for powerpc
+ * count = cs_disasm(handle, buf, len, start, len, &insn);
+ *
+ * For now, only binary code is saved in disassembled line
+ * to be used in "type" and "typeoff" sort keys. Each raw code
+ * is 32 bit instruction. So use "len/4" to get the number of
+ * entries.
+ */
+ count = len/4;
+
+ for (i = 0, offset = 0; i < count; i++) {
+ args->offset = offset;
+ sprintf(args->line, "%x", line[i]);
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ break;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ offset += 4;
+ }
+
+ /* It failed in the middle */
+ if (offset != len) {
+ struct list_head *list = &notes->src->source;
+
+ /* Discard all lines and fallback to objdump */
+ while (!list_empty(list)) {
+ dl = list_first_entry(list, struct disasm_line, al.node);
+
+ list_del_init(&dl->al.node);
+ disasm_line__free(dl);
+ }
+ count = -1;
+ }
+
+out:
+ if (needs_cs_close)
+ cs_close(&handle);
+ free(buf);
+ return count < 0 ? count : 0;
+
+err:
+ if (fd >= 0)
+ close(fd);
+ count = -1;
+ goto out;
+#else
+ return -1;
+#endif
+}
diff --git a/tools/perf/util/capstone.h b/tools/perf/util/capstone.h
new file mode 100644
index 000000000000..0f030ea034b6
--- /dev/null
+++ b/tools/perf/util/capstone.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_CAPSTONE_H
+#define __PERF_CAPSTONE_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/types.h>
+
+struct annotate_args;
+struct machine;
+struct symbol;
+struct thread;
+
+ssize_t capstone__fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpumode,
+ bool is64bit, const uint8_t *code, size_t code_size,
+ uint64_t ip, int *lenp, int print_opts, FILE *fp);
+int symbol__disassemble_capstone(const char *filename, struct symbol *sym,
+ struct annotate_args *args);
+int symbol__disassemble_capstone_powerpc(const char *filename, struct symbol *sym,
+ struct annotate_args *args);
+
+#endif /* __PERF_CAPSTONE_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index fbcc0626f9ce..040eb75f0804 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -10,6 +10,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/statfs.h>
+#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
@@ -413,8 +414,7 @@ static bool has_pattern_string(const char *str)
return !!strpbrk(str, "{}[]()|*+?^$");
}
-int evlist__expand_cgroup(struct evlist *evlist, const char *str,
- struct rblist *metric_events, bool open_cgroup)
+int evlist__expand_cgroup(struct evlist *evlist, const char *str, bool open_cgroup)
{
struct evlist *orig_list, *tmp_list;
struct evsel *pos, *evsel, *leader;
@@ -440,12 +440,8 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str,
evlist__splice_list_tail(orig_list, &evlist->core.entries);
evlist->core.nr_entries = 0;
- if (metric_events) {
- orig_metric_events = *metric_events;
- rblist__init(metric_events);
- } else {
- rblist__init(&orig_metric_events);
- }
+ orig_metric_events = evlist->metric_events;
+ metricgroup__rblist_init(&evlist->metric_events);
if (has_pattern_string(str))
prefix_len = match_cgroups(str);
@@ -490,12 +486,10 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str,
cgroup__put(cgrp);
nr_cgroups++;
- if (metric_events) {
- if (metricgroup__copy_metric_events(tmp_list, cgrp,
- metric_events,
- &orig_metric_events) < 0)
- goto out_err;
- }
+ if (metricgroup__copy_metric_events(tmp_list, cgrp,
+ &evlist->metric_events,
+ &orig_metric_events) < 0)
+ goto out_err;
evlist__splice_list_tail(evlist, &tmp_list->core.entries);
tmp_list->core.nr_entries = 0;
@@ -512,7 +506,7 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str,
out_err:
evlist__delete(orig_list);
evlist__delete(tmp_list);
- rblist__exit(&orig_metric_events);
+ metricgroup__rblist_exit(&orig_metric_events);
release_cgroup_list();
return ret;
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
index de8882d6e8d3..7b1bda22878c 100644
--- a/tools/perf/util/cgroup.h
+++ b/tools/perf/util/cgroup.h
@@ -28,8 +28,7 @@ struct rblist;
struct cgroup *cgroup__new(const char *name, bool do_open);
struct cgroup *evlist__findnew_cgroup(struct evlist *evlist, const char *name);
-int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups,
- struct rblist *metric_events, bool open_cgroup);
+int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups, bool open_cgroup);
void evlist__set_default_cgroup(struct evlist *evlist, struct cgroup *cgroup);
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 8aa456d7c2cd..9880247a2c33 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -24,6 +24,7 @@ static struct comm_strs {
static void comm_strs__remove_if_last(struct comm_str *cs);
static void comm_strs__init(void)
+ NO_THREAD_SAFETY_ANALYSIS /* Inherently single threaded due to pthread_once. */
{
init_rwsem(&_comm_strs.lock);
_comm_strs.capacity = 16;
@@ -119,6 +120,7 @@ static void comm_strs__remove_if_last(struct comm_str *cs)
}
static struct comm_str *__comm_strs__find(struct comm_strs *comm_strs, const char *str)
+ SHARED_LOCKS_REQUIRED(comm_strs->lock)
{
struct comm_str **result;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index ae72b66b6ded..e0219bc6330a 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -19,7 +19,7 @@
#include "util/hist.h" /* perf_hist_config */
#include "util/stat.h" /* perf_stat__set_big_num */
#include "util/evsel.h" /* evsel__hw_names, evsel__use_bpf_counters */
-#include "util/srcline.h" /* addr2line_timeout_ms */
+#include "util/addr2line.h" /* addr2line_timeout_ms */
#include "build-id.h"
#include "debug.h"
#include "config.h"
@@ -37,6 +37,8 @@
#define METRIC_ONLY_LEN 20
+static struct stats walltime_nsecs_stats;
+
struct perf_stat_config stat_config = {
.aggr_mode = AGGR_GLOBAL,
.aggr_level = MAX_CACHE_LVL + 1,
@@ -45,7 +47,6 @@ struct perf_stat_config stat_config = {
.run_count = 1,
.metric_only_len = METRIC_ONLY_LEN,
.walltime_nsecs_stats = &walltime_nsecs_stats,
- .ru_stats = &ru_stats,
.big_num = true,
.ctl_fd = -1,
.ctl_fd_ack = -1,
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 89570397a4b3..a80845038a5e 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -684,16 +684,21 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
unsigned char *bitmap;
struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
- if (buf == NULL)
+ if (buf == NULL || size == 0)
return 0;
+ if (last_cpu.cpu < 0) {
+ buf[0] = '\0';
+ return 0;
+ }
+
bitmap = zalloc(last_cpu.cpu / 8 + 1);
if (bitmap == NULL) {
buf[0] = '\0';
return 0;
}
- perf_cpu_map__for_each_cpu(c, idx, map)
+ perf_cpu_map__for_each_cpu_skip_any(c, idx, map)
bitmap[c.cpu / 8] |= 1 << (c.cpu % 8);
for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
index 056d665f7f88..27550db2aa4c 100644
--- a/tools/perf/util/cs-etm-decoder/Build
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -1 +1 @@
-perf-util-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
+perf-util-y += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index b85a8837bddc..3050fe212666 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -588,6 +588,7 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
const ocsd_generic_trace_elem *elem)
{
ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
+ ocsd_gen_trc_elem_t type;
struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
struct cs_etm_queue *etmq = decoder->data;
struct cs_etm_packet_queue *packet_queue;
@@ -597,52 +598,29 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
if (!packet_queue)
return OCSD_RESP_FATAL_SYS_ERR;
- switch (elem->elem_type) {
- case OCSD_GEN_TRC_ELEM_UNKNOWN:
- break;
- case OCSD_GEN_TRC_ELEM_EO_TRACE:
- case OCSD_GEN_TRC_ELEM_NO_SYNC:
- case OCSD_GEN_TRC_ELEM_TRACE_ON:
+ type = elem->elem_type;
+
+ if (type == OCSD_GEN_TRC_ELEM_EO_TRACE ||
+ type == OCSD_GEN_TRC_ELEM_NO_SYNC ||
+ type == OCSD_GEN_TRC_ELEM_TRACE_ON)
resp = cs_etm_decoder__buffer_discontinuity(etmq, packet_queue,
trace_chan_id);
- break;
- case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
+ else if (type == OCSD_GEN_TRC_ELEM_INSTR_RANGE)
resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
trace_chan_id);
- break;
- case OCSD_GEN_TRC_ELEM_EXCEPTION:
+ else if (type == OCSD_GEN_TRC_ELEM_EXCEPTION)
resp = cs_etm_decoder__buffer_exception(etmq, packet_queue, elem,
trace_chan_id);
- break;
- case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
+ else if (type == OCSD_GEN_TRC_ELEM_EXCEPTION_RET)
resp = cs_etm_decoder__buffer_exception_ret(etmq, packet_queue,
trace_chan_id);
- break;
- case OCSD_GEN_TRC_ELEM_TIMESTAMP:
+ else if (type == OCSD_GEN_TRC_ELEM_TIMESTAMP)
resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
trace_chan_id,
indx);
- break;
- case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
+ else if (type == OCSD_GEN_TRC_ELEM_PE_CONTEXT)
resp = cs_etm_decoder__set_tid(etmq, packet_queue,
elem, trace_chan_id);
- break;
- /* Unused packet types */
- case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH:
- case OCSD_GEN_TRC_ELEM_ADDR_NACC:
- case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
- case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
- case OCSD_GEN_TRC_ELEM_EVENT:
- case OCSD_GEN_TRC_ELEM_SWTRACE:
- case OCSD_GEN_TRC_ELEM_CUSTOM:
- case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
- case OCSD_GEN_TRC_ELEM_MEMTRANS:
-#if (OCSD_VER_NUM >= 0x010400)
- case OCSD_GEN_TRC_ELEM_INSTRUMENTATION:
-#endif
- default:
- break;
- }
return resp;
}
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 30f4bb3e7fa3..25d56e0f1c07 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -777,7 +777,7 @@ static void cs_etm__packet_dump(const char *pkt_string, void *data)
char queue_nr[64];
if (verbose)
- snprintf(queue_nr, sizeof(queue_nr), "Qnr:%d; ", etmq->queue_nr);
+ snprintf(queue_nr, sizeof(queue_nr), "Qnr:%u; ", etmq->queue_nr);
else
queue_nr[0] = '\0';
@@ -1726,10 +1726,7 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
attr.read_format = evsel->core.attr.read_format;
/* create new id val to be a fixed offset from evsel id */
- id = evsel->core.id[0] + 1000000000;
-
- if (!id)
- id = 1;
+ id = auxtrace_synth_id_range_start(evsel);
if (etm->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 5e7ff09fbc95..3d2e437e1354 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1338,14 +1338,14 @@ static void cleanup_events(struct perf_session *session)
static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
{
struct ctf_stream **stream;
- struct perf_header *ph = &session->header;
+ struct perf_env *env = perf_session__env(session);
int ncpus;
/*
* Try to get the number of cpus used in the data file,
* if not present fallback to the MAX_CPUS.
*/
- ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
+ ncpus = env->nr_cpus_avail ?: MAX_CPUS;
stream = zalloc(sizeof(*stream) * ncpus);
if (!stream) {
@@ -1371,7 +1371,7 @@ static void free_streams(struct ctf_writer *cw)
static int ctf_writer__setup_env(struct ctf_writer *cw,
struct perf_session *session)
{
- struct perf_header *header = &session->header;
+ struct perf_env *env = perf_session__env(session);
struct bt_ctf_writer *writer = cw->writer;
#define ADD(__n, __v) \
@@ -1380,11 +1380,11 @@ do { \
return -1; \
} while (0)
- ADD("host", header->env.hostname);
+ ADD("host", env->hostname);
ADD("sysname", "Linux");
- ADD("release", header->env.os_release);
- ADD("version", header->env.version);
- ADD("machine", header->env.arch);
+ ADD("release", env->os_release);
+ ADD("version", env->version);
+ ADD("machine", env->arch);
ADD("domain", "kernel");
ADD("tracer_name", "perf");
@@ -1401,7 +1401,7 @@ static int ctf_writer__setup_clock(struct ctf_writer *cw,
int64_t offset = 0;
if (tod) {
- struct perf_env *env = &session->header.env;
+ struct perf_env *env = perf_session__env(session);
if (!env->clock.enabled) {
pr_err("Can't provide --tod time, missing clock data. "
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
index d9f805bf6fb0..9dc1e184cf3c 100644
--- a/tools/perf/util/data-convert-json.c
+++ b/tools/perf/util/data-convert-json.c
@@ -257,7 +257,8 @@ static int process_sample_event(const struct perf_tool *tool,
static void output_headers(struct perf_session *session, struct convert_json *c)
{
struct stat st;
- struct perf_header *header = &session->header;
+ const struct perf_header *header = &session->header;
+ const struct perf_env *env = perf_session__env(session);
int ret;
int fd = perf_data__fd(session->data);
int i;
@@ -280,32 +281,32 @@ static void output_headers(struct perf_session *session, struct convert_json *c)
output_json_key_format(out, true, 2, "data-size", "%" PRIu64, header->data_size);
output_json_key_format(out, true, 2, "feat-offset", "%" PRIu64, header->feat_offset);
- output_json_key_string(out, true, 2, "hostname", header->env.hostname);
- output_json_key_string(out, true, 2, "os-release", header->env.os_release);
- output_json_key_string(out, true, 2, "arch", header->env.arch);
+ output_json_key_string(out, true, 2, "hostname", env->hostname);
+ output_json_key_string(out, true, 2, "os-release", env->os_release);
+ output_json_key_string(out, true, 2, "arch", env->arch);
- if (header->env.cpu_desc)
- output_json_key_string(out, true, 2, "cpu-desc", header->env.cpu_desc);
+ if (env->cpu_desc)
+ output_json_key_string(out, true, 2, "cpu-desc", env->cpu_desc);
- output_json_key_string(out, true, 2, "cpuid", header->env.cpuid);
- output_json_key_format(out, true, 2, "nrcpus-online", "%u", header->env.nr_cpus_online);
- output_json_key_format(out, true, 2, "nrcpus-avail", "%u", header->env.nr_cpus_avail);
+ output_json_key_string(out, true, 2, "cpuid", env->cpuid);
+ output_json_key_format(out, true, 2, "nrcpus-online", "%u", env->nr_cpus_online);
+ output_json_key_format(out, true, 2, "nrcpus-avail", "%u", env->nr_cpus_avail);
- if (header->env.clock.enabled) {
+ if (env->clock.enabled) {
output_json_key_format(out, true, 2, "clockid",
- "%u", header->env.clock.clockid);
+ "%u", env->clock.clockid);
output_json_key_format(out, true, 2, "clock-time",
- "%" PRIu64, header->env.clock.clockid_ns);
+ "%" PRIu64, env->clock.clockid_ns);
output_json_key_format(out, true, 2, "real-time",
- "%" PRIu64, header->env.clock.tod_ns);
+ "%" PRIu64, env->clock.tod_ns);
}
- output_json_key_string(out, true, 2, "perf-version", header->env.version);
+ output_json_key_string(out, true, 2, "perf-version", env->version);
output_json_key_format(out, true, 2, "cmdline", "[");
- for (i = 0; i < header->env.nr_cmdline; i++) {
+ for (i = 0; i < env->nr_cmdline; i++) {
output_json_delimiters(out, i != 0, 3);
- output_json_string(c->out, header->env.cmdline_argv[i]);
+ output_json_string(c->out, env->cmdline_argv[i]);
}
output_json_format(out, false, 2, "]");
}
@@ -376,8 +377,7 @@ int bt_convert__perf2json(const char *input_name, const char *output_name,
fprintf(stderr, "Error creating perf session!\n");
goto err_fclose;
}
-
- if (symbol__init(&session->header.env) < 0) {
+ if (symbol__init(perf_session__env(session)) < 0) {
fprintf(stderr, "Symbol init error!\n");
goto err_session_delete;
}
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 50f916374d87..8f52e8cefcf3 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -181,7 +181,7 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
if (al->map) {
struct dso *dso = map__dso(al->map);
- err = db_export__dso(dbe, dso, maps__machine(al->maps));
+ err = db_export__dso(dbe, dso, maps__machine(thread__maps(al->thread)));
if (err)
return err;
*dso_db_id = dso__db_id(dso);
@@ -256,6 +256,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
al.map = map__get(node->ms.map);
al.maps = maps__get(thread__maps(thread));
al.addr = node->ip;
+ al.thread = thread__get(thread);
if (al.map && !al.sym)
al.sym = dso__find_symbol(map__dso(al.map), al.addr);
@@ -358,14 +359,18 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
};
struct thread *main_thread;
struct comm *comm = NULL;
- struct machine *machine;
+ struct machine *machine = NULL;
int err;
+ if (thread__maps(thread))
+ machine = maps__machine(thread__maps(thread));
+ if (!machine)
+ return -1;
+
err = db_export__evsel(dbe, evsel);
if (err)
return err;
- machine = maps__machine(al->maps);
err = db_export__machine(dbe, machine);
if (err)
return err;
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index f9ef7d045c92..1dfa4d0eec4d 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -14,11 +14,19 @@
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
+#include "addr_location.h"
#include "color.h"
-#include "event.h"
#include "debug.h"
+#include "env.h"
+#include "event.h"
+#include "machine.h"
+#include "map.h"
#include "print_binary.h"
+#include "srcline.h"
+#include "symbol.h"
+#include "synthetic-events.h"
#include "target.h"
+#include "thread.h"
#include "trace-event.h"
#include "ui/helpline.h"
#include "ui/ui.h"
@@ -298,21 +306,66 @@ void perf_debug_setup(void)
libapi_set_print(pr_warning_wrapper, pr_warning_wrapper, pr_debug_wrapper);
}
+void __dump_stack(FILE *file, void **stackdump, size_t stackdump_size)
+{
+ /* TODO: async safety. printf, malloc, etc. aren't safe inside a signal handler. */
+ pid_t pid = getpid();
+ struct machine *machine;
+ struct thread *thread = NULL;
+ struct perf_env host_env;
+
+ perf_env__init(&host_env);
+ machine = machine__new_live(&host_env, /*kernel_maps=*/false, pid);
+
+ if (machine)
+ thread = machine__find_thread(machine, pid, pid);
+
+#ifdef HAVE_BACKTRACE_SUPPORT
+ if (!machine || !thread) {
+ /*
+ * Backtrace functions are async signal safe. Fall back on them
+ * if machine/thread creation fails.
+ */
+ backtrace_symbols_fd(stackdump, stackdump_size, fileno(file));
+ machine__delete(machine);
+ perf_env__exit(&host_env);
+ return;
+ }
+#endif
+
+ for (size_t i = 0; i < stackdump_size; i++) {
+ struct addr_location al;
+ u64 addr = (u64)(uintptr_t)stackdump[i];
+ bool printed = false;
+
+ addr_location__init(&al);
+ if (thread && thread__find_map(thread, PERF_RECORD_MISC_USER, addr, &al)) {
+ al.sym = map__find_symbol(al.map, al.addr);
+ if (al.sym) {
+ fprintf(file, " #%zd %p in %s ", i, stackdump[i], al.sym->name);
+ printed = true;
+ }
+ }
+ if (!printed)
+ fprintf(file, " #%zd %p ", i, stackdump[i]);
+
+ map__fprintf_srcline(al.map, al.addr, "", file);
+ fprintf(file, "\n");
+ addr_location__exit(&al);
+ }
+ thread__put(thread);
+ machine__delete(machine);
+ perf_env__exit(&host_env);
+}
+
/* Obtain a backtrace and print it to stdout. */
#ifdef HAVE_BACKTRACE_SUPPORT
void dump_stack(void)
{
- void *array[16];
- size_t size = backtrace(array, ARRAY_SIZE(array));
- char **strings = backtrace_symbols(array, size);
- size_t i;
-
- printf("Obtained %zd stack frames.\n", size);
-
- for (i = 0; i < size; i++)
- printf("%s\n", strings[i]);
+ void *stackdump[32];
+ size_t size = backtrace(stackdump, ARRAY_SIZE(stackdump));
- free(strings);
+ __dump_stack(stdout, stackdump, size);
}
#else
void dump_stack(void) {}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index a4026d1fd6a3..6b737e195ce1 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -85,6 +85,7 @@ void debug_set_display_time(bool set);
void perf_debug_setup(void);
int perf_quiet_option(void);
+void __dump_stack(FILE *file, void **stackdump, size_t stackdump_size);
void dump_stack(void);
void sighandler_dump_stack(int sig);
diff --git a/tools/perf/util/debuginfo.c b/tools/perf/util/debuginfo.c
index b5deea7cbdf2..4a559b3e8cdc 100644
--- a/tools/perf/util/debuginfo.c
+++ b/tools/perf/util/debuginfo.c
@@ -103,15 +103,19 @@ struct debuginfo *debuginfo__new(const char *path)
char buf[PATH_MAX], nil = '\0';
struct dso *dso;
struct debuginfo *dinfo = NULL;
- struct build_id bid;
+ struct build_id bid = { .size = 0};
/* Try to open distro debuginfo files */
dso = dso__new(path);
if (!dso)
goto out;
- /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */
- if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0)
+ /*
+ * Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO. Don't block
+ * incase the path isn't for a regular file.
+ */
+ assert(!dso__has_build_id(dso));
+ if (filename__read_build_id(path, &bid) > 0)
dso__set_build_id(dso, &bid);
for (type = distro_dwarf_types;
diff --git a/tools/perf/util/demangle-cxx.h b/tools/perf/util/demangle-cxx.h
index 26b5b66c0b4e..9359937a881a 100644
--- a/tools/perf/util/demangle-cxx.h
+++ b/tools/perf/util/demangle-cxx.h
@@ -2,6 +2,8 @@
#ifndef __PERF_DEMANGLE_CXX
#define __PERF_DEMANGLE_CXX 1
+#include <stdbool.h>
+
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/tools/perf/util/demangle-rust-v0.c b/tools/perf/util/demangle-rust-v0.c
new file mode 100644
index 000000000000..19924d85407d
--- /dev/null
+++ b/tools/perf/util/demangle-rust-v0.c
@@ -0,0 +1,2042 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// The contents of this file come from the Rust rustc-demangle library, hosted
+// in the <https://github.com/rust-lang/rustc-demangle> repository, licensed
+// under "Apache-2.0 OR MIT". For copyright details, see
+// <https://github.com/rust-lang/rustc-demangle/blob/main/README.md>.
+// Please note that the file should be kept as close as possible to upstream.
+
+// Code for demangling Rust symbols. This code is mostly
+// a line-by-line translation of the Rust code in `rustc-demangle`.
+
+// you can find the latest version of this code in https://github.com/rust-lang/rustc-demangle
+
+#include <stdint.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/param.h>
+#include <stdio.h>
+
+#include "demangle-rust-v0.h"
+
+#if defined(__GNUC__) || defined(__clang__)
+#define NODISCARD __attribute__((warn_unused_result))
+#else
+#define NODISCARD
+#endif
+
+#define MAX_DEPTH 500
+
+typedef enum {
+ DemangleOk,
+ DemangleInvalid,
+ DemangleRecursed,
+ DemangleBug,
+} demangle_status;
+
+struct demangle_v0 {
+ const char *mangled;
+ size_t mangled_len;
+};
+
+struct demangle_legacy {
+ const char *mangled;
+ size_t mangled_len;
+ size_t elements;
+};
+
+// private version of memrchr to avoid _GNU_SOURCE
+static void *demangle_memrchr(const void *s, int c, size_t n) {
+ const uint8_t *s_ = s;
+ for (; n != 0; n--) {
+ if (s_[n-1] == c) {
+ return (void*)&s_[n-1];
+ }
+ }
+ return NULL;
+}
+
+
+static bool unicode_iscontrol(uint32_t ch) {
+ // this is *technically* a unicode table, but
+ // some unicode properties are simpler than you might think
+ return ch < 0x20 || (ch >= 0x7f && ch < 0xa0);
+}
+
+// "good enough" tables, the only consequence is that when printing
+// *constant strings*, some characters are printed as `\u{abcd}` rather than themselves.
+//
+// I'm leaving these here to allow easily replacing them with actual
+// tables if desired.
+static bool unicode_isprint(uint32_t ch) {
+ if (ch < 0x20) {
+ return false;
+ }
+ if (ch < 0x7f) {
+ return true;
+ }
+ return false;
+}
+
+static bool unicode_isgraphemextend(uint32_t ch) {
+ (void)ch;
+ return false;
+}
+
+static bool str_isascii(const char *s, size_t s_len) {
+ for (size_t i = 0; i < s_len; i++) {
+ if (s[i] & 0x80) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+typedef enum {
+ PunycodeOk,
+ PunycodeError
+} punycode_status;
+
+struct parser {
+ // the parser assumes that `sym` has a safe "terminating byte". It might be NUL,
+ // but it might also be something else if a symbol is "truncated".
+ const char *sym;
+ size_t sym_len;
+ size_t next;
+ uint32_t depth;
+};
+
+struct printer {
+ demangle_status status; // if status == 0 parser is valid
+ struct parser parser;
+ char *out; // NULL for no output [in which case out_len is not decremented]
+ size_t out_len;
+ uint32_t bound_lifetime_depth;
+ bool alternate;
+};
+
+static NODISCARD overflow_status printer_print_path(struct printer *printer, bool in_value);
+static NODISCARD overflow_status printer_print_type(struct printer *printer);
+static NODISCARD overflow_status printer_print_const(struct printer *printer, bool in_value);
+
+static NODISCARD demangle_status try_parse_path(struct parser *parser) {
+ struct printer printer = {
+ DemangleOk,
+ *parser,
+ NULL,
+ SIZE_MAX,
+ 0,
+ false
+ };
+ overflow_status ignore = printer_print_path(&printer, false); // can't fail since no output
+ (void)ignore;
+ *parser = printer.parser;
+ return printer.status;
+}
+
+NODISCARD static demangle_status rust_demangle_v0_demangle(const char *s, size_t s_len, struct demangle_v0 *res, const char **rest) {
+ if (s_len > strlen(s)) {
+ // s_len only exists to shorten the string, this is not a buffer API
+ return DemangleInvalid;
+ }
+
+ const char *inner;
+ size_t inner_len;
+ if (s_len >= 2 && !strncmp(s, "_R", strlen("_R"))) {
+ inner = s+2;
+ inner_len = s_len - 2;
+ } else if (s_len >= 1 && !strncmp(s, "R", strlen("R"))) {
+ // On Windows, dbghelp strips leading underscores, so we accept "R..."
+ // form too.
+ inner = s+1;
+ inner_len = s_len - 1;
+ } else if (s_len >= 3 && !strncmp(s, "__R", strlen("__R"))) {
+ // On OSX, symbols are prefixed with an extra _
+ inner = s+3;
+ inner_len = s_len - 3;
+ } else {
+ return DemangleInvalid;
+ }
+
+ // Paths always start with uppercase characters.
+ if (*inner < 'A' || *inner > 'Z') {
+ return DemangleInvalid;
+ }
+
+ if (!str_isascii(inner, inner_len)) {
+ return DemangleInvalid;
+ }
+
+ struct parser parser = { inner, inner_len, 0, 0 };
+
+ demangle_status status = try_parse_path(&parser);
+ if (status != DemangleOk) return status;
+ char next = parser.sym[parser.next];
+
+ // Instantiating crate (paths always start with uppercase characters).
+ if (parser.next < parser.sym_len && next >= 'A' && next <= 'Z') {
+ status = try_parse_path(&parser);
+ if (status != DemangleOk) return status;
+ }
+
+ res->mangled = inner;
+ res->mangled_len = inner_len;
+ if (rest) {
+ *rest = parser.sym + parser.next;
+ }
+
+ return DemangleOk;
+}
+
+// This might require `len` to be up to 3 characters bigger than the real output len in case of utf-8
+NODISCARD static overflow_status rust_demangle_v0_display_demangle(struct demangle_v0 res, char *out, size_t len, bool alternate) {
+ struct printer printer = {
+ DemangleOk,
+ {
+ res.mangled,
+ res.mangled_len,
+ 0,
+ 0
+ },
+ out,
+ len,
+ 0,
+ alternate
+ };
+ if (printer_print_path(&printer, true) == OverflowOverflow) {
+ return OverflowOverflow;
+ }
+ if (printer.out_len < OVERFLOW_MARGIN) {
+ return OverflowOverflow;
+ }
+ *printer.out = '\0';
+ return OverflowOk;
+}
+
+static size_t code_to_utf8(unsigned char *buffer, uint32_t code)
+{
+ if (code <= 0x7F) {
+ buffer[0] = code;
+ return 1;
+ }
+ if (code <= 0x7FF) {
+ buffer[0] = 0xC0 | (code >> 6); /* 110xxxxx */
+ buffer[1] = 0x80 | (code & 0x3F); /* 10xxxxxx */
+ return 2;
+ }
+ if (code <= 0xFFFF) {
+ buffer[0] = 0xE0 | (code >> 12); /* 1110xxxx */
+ buffer[1] = 0x80 | ((code >> 6) & 0x3F); /* 10xxxxxx */
+ buffer[2] = 0x80 | (code & 0x3F); /* 10xxxxxx */
+ return 3;
+ }
+ if (code <= 0x10FFFF) {
+ buffer[0] = 0xF0 | (code >> 18); /* 11110xxx */
+ buffer[1] = 0x80 | ((code >> 12) & 0x3F); /* 10xxxxxx */
+ buffer[2] = 0x80 | ((code >> 6) & 0x3F); /* 10xxxxxx */
+ buffer[3] = 0x80 | (code & 0x3F); /* 10xxxxxx */
+ return 4;
+ }
+ return 0;
+}
+
+
+// return length of char at byte, or SIZE_MAX if invalid. buf should have 4 valid characters
+static NODISCARD size_t utf8_next_char(uint8_t *s, uint32_t *ch) {
+ uint8_t byte = *s;
+ // UTF8-1 = %x00-7F
+ // UTF8-2 = %xC2-DF UTF8-tail
+ // UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
+ // %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
+ // UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
+ // %xF4 %x80-8F 2( UTF8-tail )
+ if (byte < 0x80) {
+ *ch = byte;
+ return 1;
+ } else if (byte < 0xc2) {
+ return SIZE_MAX;
+ } else if (byte < 0xe0) {
+ if (s[1] >= 0x80 && s[1] < 0xc0) {
+ *ch = ((byte&0x1f)<<6) + (s[1] & 0x3f);
+ return 2;
+ }
+ return SIZE_MAX;
+ } if (byte < 0xf0) {
+ if (!(s[1] >= 0x80 && s[1] < 0xc0) || !(s[2] >= 0x80 && s[2] < 0xc0)) {
+ return SIZE_MAX; // basic validation
+ }
+ if (byte == 0xe0 && s[1] < 0xa0) {
+ return SIZE_MAX; // overshort
+ }
+ if (byte == 0xed && s[1] >= 0xa0) {
+ return SIZE_MAX; // surrogate
+ }
+ *ch = ((byte&0x0f)<<12) + ((s[1] & 0x3f)<<6) + (s[2] & 0x3f);
+ return 3;
+ } else if (byte < 0xf5) {
+ if (!(s[1] >= 0x80 && s[1] < 0xc0) || !(s[2] >= 0x80 && s[2] < 0xc0) || !(s[3] >= 0x80 && s[3] < 0xc0)) {
+ return SIZE_MAX; // basic validation
+ }
+ if (byte == 0xf0 && s[1] < 0x90) {
+ return SIZE_MAX; // overshort
+ }
+ if (byte == 0xf4 && s[1] >= 0x90) {
+ return SIZE_MAX; // over max
+ }
+ *ch = ((byte&0x07)<<18) + ((s[1] & 0x3f)<<12) + ((s[2] & 0x3f)<<6) + (s[3]&0x3f);
+ return 4;
+ } else {
+ return SIZE_MAX;
+ }
+}
+
+static NODISCARD bool validate_char(uint32_t n) {
+ return ((n ^ 0xd800) - 0x800) < 0x110000 - 0x800;
+}
+
+#define SMALL_PUNYCODE_LEN 128
+
+static NODISCARD punycode_status punycode_decode(const char *start, size_t ascii_len, const char *punycode_start, size_t punycode_len, uint32_t (*out_)[SMALL_PUNYCODE_LEN], size_t *out_len) {
+ uint32_t *out = *out_;
+
+ if (punycode_len == 0) {
+ return PunycodeError;
+ }
+
+ if (ascii_len > SMALL_PUNYCODE_LEN) {
+ return PunycodeError;
+ }
+ for (size_t i = 0; i < ascii_len; i++) {
+ out[i] = start[i];
+ }
+ size_t len = ascii_len;
+
+ size_t base = 36, t_min = 1, t_max = 26, skew = 38, damp = 700, bias = 72, i = 0, n = 0x80;
+ for (;;) {
+ size_t delta = 0, w = 1, k = 0;
+ for (;;) {
+ k += base;
+ size_t biased = k < bias ? 0 : k - bias;
+ size_t t = MIN(MAX(biased, t_min), t_max);
+ size_t d;
+ if (punycode_len == 0) {
+ return PunycodeError;
+ }
+ char nx = *punycode_start++;
+ punycode_len--;
+ if ('a' <= nx && nx <= 'z') {
+ d = nx - 'a';
+ } else if ('0' <= nx && nx <= '9') {
+ d = 26 + (nx - '0');
+ } else {
+ return PunycodeError;
+ }
+ if (w == 0 || d > SIZE_MAX / w || d*w > SIZE_MAX - delta) {
+ return PunycodeError;
+ }
+ delta += d * w;
+ if (d < t) {
+ break;
+ }
+ if (base < t || w == 0 || (base - t) > SIZE_MAX / w) {
+ return PunycodeError;
+ }
+ w *= (base - t);
+ }
+
+ len += 1;
+ if (i > SIZE_MAX - delta) {
+ return PunycodeError;
+ }
+ i += delta;
+ if (n > SIZE_MAX - i / len) {
+ return PunycodeError;
+ }
+ n += i / len;
+ i %= len;
+
+ // char validation
+ if (n > UINT32_MAX || !validate_char((uint32_t)n)) {
+ return PunycodeError;
+ }
+
+ // insert new character
+ if (len > SMALL_PUNYCODE_LEN) {
+ return PunycodeError;
+ }
+ memmove(out + i + 1, out + i, (len - i - 1) * sizeof(uint32_t));
+ out[i] = (uint32_t)n;
+
+ // start i index at incremented position
+ i++;
+
+ // If there are no more deltas, decoding is complete.
+ if (punycode_len == 0) {
+ *out_len = len;
+ return PunycodeOk;
+ }
+
+ // Perform bias adaptation.
+ delta /= damp;
+ damp = 2;
+
+ delta += delta / len;
+ k = 0;
+ while (delta > ((base - t_min) * t_max) / 2) {
+ delta /= base - t_min;
+ k += base;
+ }
+ bias = k + ((base - t_min + 1) * delta) / (delta + skew);
+ }
+}
+
+struct ident {
+ const char *ascii_start;
+ size_t ascii_len;
+ const char *punycode_start;
+ size_t punycode_len;
+};
+
+static NODISCARD overflow_status display_ident(const char *ascii_start, size_t ascii_len, const char *punycode_start, size_t punycode_len, uint8_t *out, size_t *out_len) {
+ uint32_t outbuf[SMALL_PUNYCODE_LEN];
+
+ size_t wide_len;
+ size_t out_buflen = *out_len;
+
+ if (punycode_len == 0) {
+ if (ascii_len > out_buflen) {
+ return OverflowOverflow;
+ }
+ memcpy(out, ascii_start, ascii_len);
+ *out_len = ascii_len;
+ } else if (punycode_decode(ascii_start, ascii_len, punycode_start, punycode_len, &outbuf, &wide_len) == PunycodeOk) {
+ size_t narrow_len = 0;
+ for (size_t i = 0; i < wide_len; i++) {
+ if (out_buflen - narrow_len < 4) {
+ return OverflowOverflow;
+ }
+ unsigned char *pos = &out[narrow_len];
+ narrow_len += code_to_utf8(pos, outbuf[i]);
+ }
+ *out_len = narrow_len;
+ } else {
+ size_t narrow_len = 0;
+ if (out_buflen < strlen("punycode{")) {
+ return OverflowOverflow;
+ }
+ memcpy(out, "punycode{", strlen("punycode{"));
+ narrow_len = strlen("punycode{");
+ if (ascii_len > 0) {
+ if (out_buflen - narrow_len < ascii_len || out_buflen - narrow_len - ascii_len < 1) {
+ return OverflowOverflow;
+ }
+ memcpy(out + narrow_len, ascii_start, ascii_len);
+ narrow_len += ascii_len;
+ out[narrow_len] = '-';
+ narrow_len++;
+ }
+ if (out_buflen - narrow_len < punycode_len || out_buflen - narrow_len - punycode_len < 1) {
+ return OverflowOverflow;
+ }
+ memcpy(out + narrow_len, punycode_start, punycode_len);
+ narrow_len += punycode_len;
+ out[narrow_len] = '}';
+ narrow_len++;
+ *out_len = narrow_len;
+ }
+
+ return OverflowOk;
+}
+
+static NODISCARD bool try_parse_uint(const char *buf, size_t len, uint64_t *result) {
+ size_t cur = 0;
+ for(;cur < len && buf[cur] == '0';cur++);
+ uint64_t result_val = 0;
+ if (len - cur > 16) return false;
+ for(;cur < len;cur++) {
+ char c = buf[cur];
+ result_val <<= 4;
+ if ('0' <= c && c <= '9') {
+ result_val += c - '0';
+ } else if ('a' <= c && c <= 'f') {
+ result_val += 10 + (c - 'a');
+ } else {
+ return false;
+ }
+ }
+ *result = result_val;
+ return true;
+}
+
+static NODISCARD bool dinibble2int(const char *buf, uint8_t *result) {
+ uint8_t result_val = 0;
+ for (int i = 0; i < 2; i++) {
+ char c = buf[i];
+ result_val <<= 4;
+ if ('0' <= c && c <= '9') {
+ result_val += c - '0';
+ } else if ('a' <= c && c <= 'f') {
+ result_val += 10 + (c - 'a');
+ } else {
+ return false;
+ }
+ }
+ *result = result_val;
+ return true;
+}
+
+
+typedef enum {
+ NtsOk = 0,
+ NtsOverflow = 1,
+ NtsInvalid = 2
+} nibbles_to_string_status;
+
+// '\u{10ffff}', +margin
+#define ESCAPED_SIZE 12
+
+static NODISCARD size_t char_to_string(uint32_t ch, uint8_t quote, bool first, char (*buf)[ESCAPED_SIZE]) {
+ // encode the character
+ char *escaped_buf = *buf;
+ escaped_buf[0] = '\\';
+ size_t escaped_len = 2;
+ switch (ch) {
+ case '\0':
+ escaped_buf[1] = '0';
+ break;
+ case '\t':
+ escaped_buf[1] = 't';
+ break;
+ case '\r':
+ escaped_buf[1] = 'r';
+ break;
+ case '\n':
+ escaped_buf[1] = 'n';
+ break;
+ case '\\':
+ escaped_buf[1] = '\\';
+ break;
+ default:
+ if (ch == quote) {
+ escaped_buf[1] = ch;
+ } else if (!unicode_isprint(ch) || (first && unicode_isgraphemextend(ch))) {
+ int hexlen = snprintf(escaped_buf, ESCAPED_SIZE, "\\u{%x}", (unsigned int)ch);
+ if (hexlen < 0) {
+ return 0; // (snprintf shouldn't fail!)
+ }
+ escaped_len = hexlen;
+ } else {
+ // printable character
+ escaped_buf[0] = ch;
+ escaped_len = 1;
+ }
+ break;
+ }
+
+ return escaped_len;
+}
+
+// convert nibbles to a single/double-quoted string
+static NODISCARD nibbles_to_string_status nibbles_to_string(const char *buf, size_t len, uint8_t *out, size_t *out_len) {
+ uint8_t quote = '"';
+ bool first = true;
+
+ if ((len % 2) != 0) {
+ return NtsInvalid; // odd number of nibbles
+ }
+
+ size_t cur_out_len = 0;
+
+ // write starting quote
+ if (out != NULL) {
+ cur_out_len = *out_len;
+ if (cur_out_len == 0) {
+ return NtsOverflow;
+ }
+ *out++ = quote;
+ cur_out_len--;
+ }
+
+ uint8_t conv_buf[4] = {0};
+ size_t conv_buf_len = 0;
+ while (len > 1 || conv_buf_len > 0) {
+ while (len > 1 && conv_buf_len < sizeof(conv_buf)) {
+ if (!dinibble2int(buf, &conv_buf[conv_buf_len])) {
+ return NtsInvalid;
+ }
+ conv_buf_len++;
+ buf += 2;
+ len -= 2;
+ }
+
+ // conv_buf is full here if possible, process 1 UTF-8 character
+ uint32_t ch = 0;
+ size_t consumed = utf8_next_char(conv_buf, &ch);
+ if (consumed > conv_buf_len) {
+ // either SIZE_MAX (invalid UTF-8) or finished input buffer and
+ // there are still bytes remaining, in both cases invalid
+ return NtsInvalid;
+ }
+
+ // "consume" the character
+ memmove(conv_buf, conv_buf+consumed, conv_buf_len-consumed);
+ conv_buf_len -= consumed;
+
+ char escaped_buf[ESCAPED_SIZE];
+ size_t escaped_len = char_to_string(ch, '"', first, &escaped_buf);
+ if (out != NULL) {
+ if (cur_out_len < escaped_len) {
+ return NtsOverflow;
+ }
+ memcpy(out, escaped_buf, escaped_len);
+ out += escaped_len;
+ cur_out_len -= escaped_len;
+ }
+ first = false;
+ }
+
+ // write ending quote
+ if (out != NULL) {
+ if (cur_out_len == 0) {
+ return NtsOverflow;
+ }
+ *out++ = quote;
+ cur_out_len--;
+ *out_len -= cur_out_len; // subtract remaining space to get used space
+ }
+
+ return NtsOk;
+}
+
+static const char* basic_type(uint8_t tag) {
+ switch(tag) {
+ case 'b':
+ return "bool";
+ case 'c':
+ return "char";
+ case 'e':
+ return "str";
+ case 'u':
+ return "()";
+ case 'a':
+ return "i8";
+ case 's':
+ return "i16";
+ case 'l':
+ return "i32";
+ case 'x':
+ return "i64";
+ case 'n':
+ return "i128";
+ case 'i':
+ return "isize";
+ case 'h':
+ return "u8";
+ case 't':
+ return "u16";
+ case 'm':
+ return "u32";
+ case 'y':
+ return "u64";
+ case 'o':
+ return "u128";
+ case 'j':
+ return "usize";
+ case 'f':
+ return "f32";
+ case 'd':
+ return "f64";
+ case 'z':
+ return "!";
+ case 'p':
+ return "_";
+ case 'v':
+ return "...";
+ default:
+ return NULL;
+ }
+}
+
+static NODISCARD demangle_status parser_push_depth(struct parser *parser) {
+ parser->depth++;
+ if (parser->depth > MAX_DEPTH) {
+ return DemangleRecursed;
+ } else {
+ return DemangleOk;
+ }
+}
+
+static demangle_status parser_pop_depth(struct parser *parser) {
+ parser->depth--;
+ return DemangleOk;
+}
+
+static uint8_t parser_peek(struct parser const *parser) {
+ if (parser->next == parser->sym_len) {
+ return 0; // add a "pseudo nul terminator" to avoid peeking past the end of a symbol
+ } else {
+ return parser->sym[parser->next];
+ }
+}
+
+static bool parser_eat(struct parser *parser, uint8_t ch) {
+ if (parser_peek(parser) == ch) {
+ if (ch != 0) { // safety: make sure we don't skip past the NUL terminator
+ parser->next++;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static uint8_t parser_next(struct parser *parser) {
+ // don't advance after end of input, and return an imaginary NUL terminator
+ if (parser->next == parser->sym_len) {
+ return 0;
+ } else {
+ return parser->sym[parser->next++];
+ }
+}
+
+static NODISCARD demangle_status parser_ch(struct parser *parser, uint8_t *next) {
+ // don't advance after end of input
+ if (parser->next == parser->sym_len) {
+ return DemangleInvalid;
+ } else {
+ *next = parser->sym[parser->next++];
+ return DemangleOk;
+ }
+}
+
+struct buf {
+ const char *start;
+ size_t len;
+};
+
+static NODISCARD demangle_status parser_hex_nibbles(struct parser *parser, struct buf *buf) {
+ size_t start = parser->next;
+ for (;;) {
+ uint8_t ch = parser_next(parser);
+ if (ch == '_') {
+ break;
+ }
+ if (!(('0' <= ch && ch <= '9') || ('a' <= ch && ch <= 'f'))) {
+ return DemangleInvalid;
+ }
+ }
+ buf->start = parser->sym + start;
+ buf->len = parser->next - start - 1; // skip final _
+ return DemangleOk;
+}
+
+static NODISCARD demangle_status parser_digit_10(struct parser *parser, uint8_t *out) {
+ uint8_t ch = parser_peek(parser);
+ if ('0' <= ch && ch <= '9') {
+ *out = ch - '0';
+ parser->next++;
+ return DemangleOk;
+ } else {
+ return DemangleInvalid;
+ }
+}
+
+static NODISCARD demangle_status parser_digit_62(struct parser *parser, uint64_t *out) {
+ uint8_t ch = parser_peek(parser);
+ if ('0' <= ch && ch <= '9') {
+ *out = ch - '0';
+ parser->next++;
+ return DemangleOk;
+ } else if ('a' <= ch && ch <= 'z') {
+ *out = 10 + (ch - 'a');
+ parser->next++;
+ return DemangleOk;
+ } else if ('A' <= ch && ch <= 'Z') {
+ *out = 10 + 26 + (ch - 'A');
+ parser->next++;
+ return DemangleOk;
+ } else {
+ return DemangleInvalid;
+ }
+}
+
+static NODISCARD demangle_status parser_integer_62(struct parser *parser, uint64_t *out) {
+ if (parser_eat(parser, '_')) {
+ *out = 0;
+ return DemangleOk;
+ }
+
+ uint64_t x = 0;
+ demangle_status status;
+ while (!parser_eat(parser, '_')) {
+ uint64_t d;
+ if ((status = parser_digit_62(parser, &d)) != DemangleOk) {
+ return status;
+ }
+ if (x > UINT64_MAX / 62) {
+ return DemangleInvalid;
+ }
+ x *= 62;
+ if (x > UINT64_MAX - d) {
+ return DemangleInvalid;
+ }
+ x += d;
+ }
+ if (x == UINT64_MAX) {
+ return DemangleInvalid;
+ }
+ *out = x + 1;
+ return DemangleOk;
+}
+
+static NODISCARD demangle_status parser_opt_integer_62(struct parser *parser, uint8_t tag, uint64_t *out) {
+ if (!parser_eat(parser, tag)) {
+ *out = 0;
+ return DemangleOk;
+ }
+
+ demangle_status status;
+ if ((status = parser_integer_62(parser, out)) != DemangleOk) {
+ return status;
+ }
+ if (*out == UINT64_MAX) {
+ return DemangleInvalid;
+ }
+ *out = *out + 1;
+ return DemangleOk;
+}
+
+static NODISCARD demangle_status parser_disambiguator(struct parser *parser, uint64_t *out) {
+ return parser_opt_integer_62(parser, 's', out);
+}
+
+typedef uint8_t parser_namespace_type;
+
+static NODISCARD demangle_status parser_namespace(struct parser *parser, parser_namespace_type *out) {
+ uint8_t next = parser_next(parser);
+ if ('A' <= next && next <= 'Z') {
+ *out = next;
+ return DemangleOk;
+ } else if ('a' <= next && next <= 'z') {
+ *out = 0;
+ return DemangleOk;
+ } else {
+ return DemangleInvalid;
+ }
+}
+
+static NODISCARD demangle_status parser_backref(struct parser *parser, struct parser *out) {
+ size_t start = parser->next;
+ if (start == 0) {
+ return DemangleBug;
+ }
+ size_t s_start = start - 1;
+ uint64_t i;
+ demangle_status status = parser_integer_62(parser, &i);
+ if (status != DemangleOk) {
+ return status;
+ }
+ if (i >= s_start) {
+ return DemangleInvalid;
+ }
+ struct parser res = {
+ .sym = parser->sym,
+ .sym_len = parser->sym_len,
+ .next = (size_t)i,
+ .depth = parser->depth
+ };
+ status = parser_push_depth(&res);
+ if (status != DemangleOk) {
+ return status;
+ }
+ *out = res;
+ return DemangleOk;
+}
+
+static NODISCARD demangle_status parser_ident(struct parser *parser, struct ident *out) {
+ bool is_punycode = parser_eat(parser, 'u');
+ size_t len;
+ uint8_t d;
+ demangle_status status = parser_digit_10(parser, &d);
+ len = d;
+ if (status != DemangleOk) {
+ return status;
+ }
+ if (len) {
+ for (;;) {
+ status = parser_digit_10(parser, &d);
+ if (status != DemangleOk) {
+ break;
+ }
+ if (len > SIZE_MAX / 10) {
+ return DemangleInvalid;
+ }
+ len *= 10;
+ if (len > SIZE_MAX - d) {
+ return DemangleInvalid;
+ }
+ len += d;
+ }
+ }
+
+ // Skip past the optional `_` separator.
+ parser_eat(parser, '_');
+
+ size_t start = parser->next;
+ if (parser->sym_len - parser->next < len) {
+ return DemangleInvalid;
+ }
+ parser->next += len;
+
+ const char *ident = &parser->sym[start];
+
+ if (is_punycode) {
+ const char *underscore = demangle_memrchr(ident, '_', (size_t)len);
+ if (underscore == NULL) {
+ *out = (struct ident){
+ .ascii_start="",
+ .ascii_len=0,
+ .punycode_start=ident,
+ .punycode_len=len
+ };
+ } else {
+ size_t ascii_len = underscore - ident;
+ // ascii_len <= len - 1 since `_` is in the first len bytes
+ size_t punycode_len = len - 1 - ascii_len;
+ *out = (struct ident){
+ .ascii_start=ident,
+ .ascii_len=ascii_len,
+ .punycode_start=underscore + 1,
+ .punycode_len=punycode_len
+ };
+ }
+ if (out->punycode_len == 0) {
+ return DemangleInvalid;
+ }
+ return DemangleOk;
+ } else {
+ *out = (struct ident) {
+ .ascii_start=ident,
+ .ascii_len=(size_t)len,
+ .punycode_start="",
+ .punycode_len=0,
+ };
+ return DemangleOk;
+ }
+}
+
+#define INVALID_SYNTAX "{invalid syntax}"
+
+static const char *demangle_error_message(demangle_status status) {
+ switch (status) {
+ case DemangleInvalid:
+ return INVALID_SYNTAX;
+ case DemangleBug:
+ return "{bug}";
+ case DemangleRecursed:
+ return "{recursion limit reached}";
+ default:
+ return "{unknown error}";
+ }
+}
+
+#define PRINT(print_fn) \
+ do { \
+ if ((print_fn) == OverflowOverflow) { \
+ return OverflowOverflow; \
+ } \
+ } while(0)
+
+#define PRINT_CH(printer, s) PRINT(printer_print_ch((printer), (s)))
+#define PRINT_STR(printer, s) PRINT(printer_print_str((printer), (s)))
+#define PRINT_U64(printer, s) PRINT(printer_print_u64((printer), (s)))
+#define PRINT_IDENT(printer, s) PRINT(printer_print_ident((printer), (s)))
+
+#define INVALID(printer) \
+ do { \
+ PRINT_STR((printer), INVALID_SYNTAX); \
+ (printer)->status = DemangleInvalid; \
+ return OverflowOk; \
+ } while(0)
+
+#define PARSE(printer, method, ...) \
+ do { \
+ if ((printer)->status != DemangleOk) { \
+ PRINT_STR((printer), "?"); \
+ return OverflowOk; \
+ } else { \
+ demangle_status _parse_status = method(&(printer)->parser, ## __VA_ARGS__); \
+ if (_parse_status != DemangleOk) { \
+ PRINT_STR((printer), demangle_error_message(_parse_status)); \
+ (printer)->status = _parse_status; \
+ return OverflowOk; \
+ } \
+ } \
+ } while(0)
+
+#define PRINT_SEP_LIST(printer, body, sep) \
+ do { \
+ size_t _sep_list_i; \
+ PRINT_SEP_LIST_COUNT(printer, _sep_list_i, body, sep); \
+ } while(0)
+
+#define PRINT_SEP_LIST_COUNT(printer, count, body, sep) \
+ do { \
+ count = 0; \
+ while ((printer)->status == DemangleOk && !printer_eat((printer), 'E')) { \
+ if (count > 0) { PRINT_STR(printer, sep); } \
+ body; \
+ count++; \
+ } \
+ } while(0)
+
+static bool printer_eat(struct printer *printer, uint8_t b) {
+ if (printer->status != DemangleOk) {
+ return false;
+ }
+
+ return parser_eat(&printer->parser, b);
+}
+
+static void printer_pop_depth(struct printer *printer) {
+ if (printer->status == DemangleOk) {
+ parser_pop_depth(&printer->parser);
+ }
+}
+
+static NODISCARD overflow_status printer_print_buf(struct printer *printer, const char *start, size_t len) {
+ if (printer->out == NULL) {
+ return OverflowOk;
+ }
+ if (printer->out_len < len) {
+ return OverflowOverflow;
+ }
+
+ memcpy(printer->out, start, len);
+ printer->out += len;
+ printer->out_len -= len;
+ return OverflowOk;
+}
+
+static NODISCARD overflow_status printer_print_str(struct printer *printer, const char *buf) {
+ return printer_print_buf(printer, buf, strlen(buf));
+}
+
+static NODISCARD overflow_status printer_print_ch(struct printer *printer, char ch) {
+ return printer_print_buf(printer, &ch, 1);
+}
+
+static NODISCARD overflow_status printer_print_u64(struct printer *printer, uint64_t n) {
+ char buf[32] = {0};
+ sprintf(buf, "%llu", (unsigned long long)n); // printing uint64 uses 21 < 32 chars
+ return printer_print_str(printer, buf);
+}
+
+static NODISCARD overflow_status printer_print_ident(struct printer *printer, struct ident *ident) {
+ if (printer->out == NULL) {
+ return OverflowOk;
+ }
+
+ size_t out_len = printer->out_len;
+ overflow_status status;
+ if ((status = display_ident(ident->ascii_start, ident->ascii_len, ident->punycode_start, ident->punycode_len, (uint8_t*)printer->out, &out_len)) != OverflowOk) {
+ return status;
+ }
+ printer->out += out_len;
+ printer->out_len -= out_len;
+ return OverflowOk;
+}
+
+typedef overflow_status (*printer_fn)(struct printer *printer);
+typedef overflow_status (*backref_fn)(struct printer *printer, bool *arg);
+
+static NODISCARD overflow_status printer_print_backref(struct printer *printer, backref_fn func, bool *arg) {
+ struct parser backref;
+ PARSE(printer, parser_backref, &backref);
+
+ if (printer->out == NULL) {
+ return OverflowOk;
+ }
+
+ struct parser orig_parser = printer->parser;
+ demangle_status orig_status = printer->status; // fixme not sure this is needed match for Ok on the Rust side
+ printer->parser = backref;
+ printer->status = DemangleOk;
+ overflow_status status = func(printer, arg);
+ printer->parser = orig_parser;
+ printer->status = orig_status;
+
+ return status;
+}
+
+static NODISCARD overflow_status printer_print_lifetime_from_index(struct printer *printer, uint64_t lt) {
+ // Bound lifetimes aren't tracked when skipping printing.
+ if (printer->out == NULL) {
+ return OverflowOk;
+ }
+
+ PRINT_STR(printer, "'");
+ if (lt == 0) {
+ PRINT_STR(printer, "_");
+ return OverflowOk;
+ }
+
+ if (printer->bound_lifetime_depth < lt) {
+ INVALID(printer);
+ } else {
+ uint64_t depth = printer->bound_lifetime_depth - lt;
+ if (depth < 26) {
+ PRINT_CH(printer, 'a' + depth);
+ } else {
+ PRINT_STR(printer, "_");
+ PRINT_U64(printer, depth);
+ }
+
+ return OverflowOk;
+ }
+}
+
+static NODISCARD overflow_status printer_in_binder(struct printer *printer, printer_fn func) {
+ uint64_t bound_lifetimes;
+ PARSE(printer, parser_opt_integer_62, 'G', &bound_lifetimes);
+
+ // Don't track bound lifetimes when skipping printing.
+ if (printer->out == NULL) {
+ return func(printer);
+ }
+
+ if (bound_lifetimes > 0) {
+ PRINT_STR(printer, "for<");
+ for (uint64_t i = 0; i < bound_lifetimes; i++) {
+ if (i > 0) {
+ PRINT_STR(printer, ", ");
+ }
+ printer->bound_lifetime_depth++;
+ PRINT(printer_print_lifetime_from_index(printer, 1));
+ }
+ PRINT_STR(printer, "> ");
+ }
+
+ overflow_status r = func(printer);
+ printer->bound_lifetime_depth -= bound_lifetimes;
+
+ return r;
+}
+
+static NODISCARD overflow_status printer_print_generic_arg(struct printer *printer) {
+ if (printer_eat(printer, 'L')) {
+ uint64_t lt;
+ PARSE(printer, parser_integer_62, &lt);
+ return printer_print_lifetime_from_index(printer, lt);
+ } else if (printer_eat(printer, 'K')) {
+ return printer_print_const(printer, false);
+ } else {
+ return printer_print_type(printer);
+ }
+}
+
+static NODISCARD overflow_status printer_print_generic_args(struct printer *printer) {
+ PRINT_STR(printer, "<");
+ PRINT_SEP_LIST(printer, PRINT(printer_print_generic_arg(printer)), ", ");
+ PRINT_STR(printer, ">");
+ return OverflowOk;
+}
+
+static NODISCARD overflow_status printer_print_path_out_of_value(struct printer *printer, bool *_arg) {
+ (void)_arg;
+ return printer_print_path(printer, false);
+}
+
+static NODISCARD overflow_status printer_print_path_in_value(struct printer *printer, bool *_arg) {
+ (void)_arg;
+ return printer_print_path(printer, true);
+}
+
+static NODISCARD overflow_status printer_print_path(struct printer *printer, bool in_value) {
+ PARSE(printer, parser_push_depth);
+ uint8_t tag;
+ PARSE(printer, parser_ch, &tag);
+
+ overflow_status st;
+ uint64_t dis;
+ struct ident name;
+ parser_namespace_type ns;
+ char *orig_out;
+
+ switch(tag) {
+ case 'C':
+ PARSE(printer, parser_disambiguator, &dis);
+ PARSE(printer, parser_ident, &name);
+
+ PRINT_IDENT(printer, &name);
+
+ if (printer->out != NULL && !printer->alternate && dis != 0) {
+ PRINT_STR(printer, "[");
+ char buf[24] = {0};
+ sprintf(buf, "%llx", (unsigned long long)dis);
+ PRINT_STR(printer, buf);
+ PRINT_STR(printer, "]");
+ }
+ break;
+ case 'N':
+ PARSE(printer, parser_namespace, &ns);
+ if ((st = printer_print_path(printer, in_value)) != OverflowOk) {
+ return st;
+ }
+
+ // HACK(eddyb) if the parser is already marked as having errored,
+ // `parse!` below will print a `?` without its preceding `::`
+ // (because printing the `::` is skipped in certain conditions,
+ // i.e. a lowercase namespace with an empty identifier),
+ // so in order to get `::?`, the `::` has to be printed here.
+ if (printer->status != DemangleOk) {
+ PRINT_STR(printer, "::");
+ }
+
+ PARSE(printer, parser_disambiguator, &dis);
+ PARSE(printer, parser_ident, &name);
+ // Special namespace, like closures and shims
+ if (ns) {
+ PRINT_STR(printer, "::{");
+ if (ns == 'C') {
+ PRINT_STR(printer, "closure");
+ } else if (ns == 'S') {
+ PRINT_STR(printer, "shim");
+ } else {
+ PRINT_CH(printer, ns);
+ }
+ if (name.ascii_len != 0 || name.punycode_len != 0) {
+ PRINT_STR(printer, ":");
+ PRINT_IDENT(printer, &name);
+ }
+ PRINT_STR(printer, "#");
+ PRINT_U64(printer, dis);
+ PRINT_STR(printer, "}");
+ } else {
+ // Implementation-specific/unspecified namespaces
+ if (name.ascii_len != 0 || name.punycode_len != 0) {
+ PRINT_STR(printer, "::");
+ PRINT_IDENT(printer, &name);
+ }
+ }
+ break;
+ case 'M':
+ case 'X':
+ // for impls, ignore the impls own path
+ PARSE(printer, parser_disambiguator, &dis);
+ orig_out = printer->out;
+ printer->out = NULL;
+ PRINT(printer_print_path(printer, false));
+ printer->out = orig_out;
+
+ // fallthru
+ case 'Y':
+ PRINT_STR(printer, "<");
+ PRINT(printer_print_type(printer));
+ if (tag != 'M') {
+ PRINT_STR(printer, " as ");
+ PRINT(printer_print_path(printer, false));
+ }
+ PRINT_STR(printer, ">");
+ break;
+ case 'I':
+ PRINT(printer_print_path(printer, in_value));
+ if (in_value) {
+ PRINT_STR(printer, "::");
+ }
+ PRINT(printer_print_generic_args(printer));
+ break;
+ case 'B':
+ PRINT(printer_print_backref(printer, in_value ? printer_print_path_in_value : printer_print_path_out_of_value, NULL));
+ break;
+ default:
+ INVALID(printer);
+ break;
+ }
+
+ printer_pop_depth(printer);
+ return OverflowOk;
+}
+
+static NODISCARD overflow_status printer_print_const_uint(struct printer *printer, uint8_t tag) {
+ struct buf hex;
+ PARSE(printer, parser_hex_nibbles, &hex);
+
+ uint64_t val;
+ if (try_parse_uint(hex.start, hex.len, &val)) {
+ PRINT_U64(printer, val);
+ } else {
+ PRINT_STR(printer, "0x");
+ PRINT(printer_print_buf(printer, hex.start, hex.len));
+ }
+
+ if (printer->out != NULL && !printer->alternate) {
+ const char *ty = basic_type(tag);
+ if (/* safety */ ty != NULL) {
+ PRINT_STR(printer, ty);
+ }
+ }
+
+ return OverflowOk;
+}
+
+static NODISCARD overflow_status printer_print_const_str_literal(struct printer *printer) {
+ struct buf hex;
+ PARSE(printer, parser_hex_nibbles, &hex);
+
+ size_t out_len = SIZE_MAX;
+ nibbles_to_string_status nts_status = nibbles_to_string(hex.start, hex.len, NULL, &out_len);
+ switch (nts_status) {
+ case NtsOk:
+ if (printer->out != NULL) {
+ out_len = printer->out_len;
+ nts_status = nibbles_to_string(hex.start, hex.len, (uint8_t*)printer->out, &out_len);
+ if (nts_status != NtsOk) {
+ return OverflowOverflow;
+ }
+ printer->out += out_len;
+ printer->out_len -= out_len;
+ }
+ return OverflowOk;
+ case NtsOverflow:
+ // technically if there is a string of size `SIZE_MAX/6` whose escaped version overflows
+ // SIZE_MAX but has an invalid char, this will be a "fake" overflow. In practice,
+ // that is not going to happen and a fuzzer will not generate strings of this length.
+ return OverflowOverflow;
+ case NtsInvalid:
+ default:
+ INVALID(printer);
+ }
+}
+
+static NODISCARD overflow_status printer_print_const_struct(struct printer *printer) {
+ uint64_t dis;
+ struct ident name;
+ PARSE(printer, parser_disambiguator, &dis);
+ PARSE(printer, parser_ident, &name);
+ PRINT_IDENT(printer, &name);
+ PRINT_STR(printer, ": ");
+ return printer_print_const(printer, true);
+}
+
+static NODISCARD overflow_status printer_print_const_out_of_value(struct printer *printer, bool *_arg) {
+ (void)_arg;
+ return printer_print_const(printer, false);
+}
+
+static NODISCARD overflow_status printer_print_const_in_value(struct printer *printer, bool *_arg) {
+ (void)_arg;
+ return printer_print_const(printer, true);
+}
+
+static NODISCARD overflow_status printer_print_const(struct printer *printer, bool in_value) {
+ uint8_t tag;
+
+ PARSE(printer, parser_ch, &tag);
+ PARSE(printer, parser_push_depth);
+
+ struct buf hex;
+ uint64_t val;
+ size_t count;
+
+ bool opened_brace = false;
+#define OPEN_BRACE_IF_OUTSIDE_EXPR \
+ do { if (!in_value) { \
+ opened_brace = true; \
+ PRINT_STR(printer, "{"); \
+ } } while(0)
+
+ switch(tag) {
+ case 'p':
+ PRINT_STR(printer, "_");
+ break;
+ // Primitive leaves with hex-encoded values (see `basic_type`).
+ case 'a':
+ case 's':
+ case 'l':
+ case 'x':
+ case 'n':
+ case 'i':
+ if (printer_eat(printer, 'n')) {
+ PRINT_STR(printer, "-");
+ }
+ /* fallthrough */
+ case 'h':
+ case 't':
+ case 'm':
+ case 'y':
+ case 'o':
+ case 'j':
+ PRINT(printer_print_const_uint(printer, tag));
+ break;
+ case 'b':
+ PARSE(printer, parser_hex_nibbles, &hex);
+ if (try_parse_uint(hex.start, hex.len, &val)) {
+ if (val == 0) {
+ PRINT_STR(printer, "false");
+ } else if (val == 1) {
+ PRINT_STR(printer, "true");
+ } else {
+ INVALID(printer);
+ }
+ } else {
+ INVALID(printer);
+ }
+ break;
+ case 'c':
+ PARSE(printer, parser_hex_nibbles, &hex);
+ if (try_parse_uint(hex.start, hex.len, &val)
+ && val < UINT32_MAX
+ && validate_char((uint32_t)val))
+ {
+ char escaped_buf[ESCAPED_SIZE];
+ size_t escaped_size = char_to_string((uint32_t)val, '\'', true, &escaped_buf);
+
+ PRINT_STR(printer, "'");
+ PRINT(printer_print_buf(printer, escaped_buf, escaped_size));
+ PRINT_STR(printer, "'");
+ } else {
+ INVALID(printer);
+ }
+ break;
+ case 'e':
+ OPEN_BRACE_IF_OUTSIDE_EXPR;
+ PRINT_STR(printer, "*");
+ PRINT(printer_print_const_str_literal(printer));
+ break;
+ case 'R':
+ case 'Q':
+ if (tag == 'R' && printer_eat(printer, 'e')) {
+ PRINT(printer_print_const_str_literal(printer));
+ } else {
+ OPEN_BRACE_IF_OUTSIDE_EXPR;
+ PRINT_STR(printer, "&");
+ if (tag != 'R') {
+ PRINT_STR(printer, "mut ");
+ }
+ PRINT(printer_print_const(printer, true));
+ }
+ break;
+ case 'A':
+ OPEN_BRACE_IF_OUTSIDE_EXPR;
+ PRINT_STR(printer, "[");
+ PRINT_SEP_LIST(printer, PRINT(printer_print_const(printer, true)), ", ");
+ PRINT_STR(printer, "]");
+ break;
+ case 'T':
+ OPEN_BRACE_IF_OUTSIDE_EXPR;
+ PRINT_STR(printer, "(");
+ PRINT_SEP_LIST_COUNT(printer, count, PRINT(printer_print_const(printer, true)), ", ");
+ if (count == 1) {
+ PRINT_STR(printer, ",");
+ }
+ PRINT_STR(printer, ")");
+ break;
+ case 'V':
+ OPEN_BRACE_IF_OUTSIDE_EXPR;
+ PRINT(printer_print_path(printer, true));
+ PARSE(printer, parser_ch, &tag);
+ switch(tag) {
+ case 'U':
+ break;
+ case 'T':
+ PRINT_STR(printer, "(");
+ PRINT_SEP_LIST(printer, PRINT(printer_print_const(printer, true)), ", ");
+ PRINT_STR(printer, ")");
+ break;
+ case 'S':
+ PRINT_STR(printer, " { ");
+ PRINT_SEP_LIST(printer, PRINT(printer_print_const_struct(printer)), ", ");
+ PRINT_STR(printer, " }");
+ break;
+ default:
+ INVALID(printer);
+ }
+ break;
+ case 'B':
+ PRINT(printer_print_backref(printer, in_value ? printer_print_const_in_value : printer_print_const_out_of_value, NULL));
+ break;
+ default:
+ INVALID(printer);
+ }
+#undef OPEN_BRACE_IF_OUTSIDE_EXPR
+
+ if (opened_brace) {
+ PRINT_STR(printer, "}");
+ }
+ printer_pop_depth(printer);
+
+ return OverflowOk;
+}
+
+/// A trait in a trait object may have some "existential projections"
+/// (i.e. associated type bindings) after it, which should be printed
+/// in the `<...>` of the trait, e.g. `dyn Trait<T, U, Assoc=X>`.
+/// To this end, this method will keep the `<...>` of an 'I' path
+/// open, by omitting the `>`, and return `Ok(true)` in that case.
+static NODISCARD overflow_status printer_print_maybe_open_generics(struct printer *printer, bool *open) {
+ if (printer_eat(printer, 'B')) {
+ // NOTE(eddyb) the closure may not run if printing is being skipped,
+ // but in that case the returned boolean doesn't matter.
+ *open = false;
+ return printer_print_backref(printer, printer_print_maybe_open_generics, open);
+ } else if(printer_eat(printer, 'I')) {
+ PRINT(printer_print_path(printer, false));
+ PRINT_STR(printer, "<");
+ PRINT_SEP_LIST(printer, PRINT(printer_print_generic_arg(printer)), ", ");
+ *open = true;
+ return OverflowOk;
+ } else {
+ PRINT(printer_print_path(printer, false));
+ *open = false;
+ return OverflowOk;
+ }
+}
+
+static NODISCARD overflow_status printer_print_dyn_trait(struct printer *printer) {
+ bool open;
+ PRINT(printer_print_maybe_open_generics(printer, &open));
+
+ while (printer_eat(printer, 'p')) {
+ if (!open) {
+ PRINT_STR(printer, "<");
+ open = true;
+ } else {
+ PRINT_STR(printer, ", ");
+ }
+
+ struct ident name;
+ PARSE(printer, parser_ident, &name);
+
+ PRINT_IDENT(printer, &name);
+ PRINT_STR(printer, " = ");
+ PRINT(printer_print_type(printer));
+ }
+
+ if (open) {
+ PRINT_STR(printer, ">");
+ }
+
+ return OverflowOk;
+}
+
+static NODISCARD overflow_status printer_print_object_bounds(struct printer *printer) {
+ PRINT_SEP_LIST(printer, PRINT(printer_print_dyn_trait(printer)), " + ");
+ return OverflowOk;
+}
+
+static NODISCARD overflow_status printer_print_function_type(struct printer *printer) {
+ bool is_unsafe = printer_eat(printer, 'U');
+ const char *abi;
+ size_t abi_len;
+ if (printer_eat(printer, 'K')) {
+ if (printer_eat(printer, 'C')) {
+ abi = "C";
+ abi_len = 1;
+ } else {
+ struct ident abi_ident;
+ PARSE(printer, parser_ident, &abi_ident);
+ if (abi_ident.ascii_len == 0 || abi_ident.punycode_len != 0) {
+ INVALID(printer);
+ }
+ abi = abi_ident.ascii_start;
+ abi_len = abi_ident.ascii_len;
+ }
+ } else {
+ abi = NULL;
+ abi_len = 0;
+ }
+
+ if (is_unsafe) {
+ PRINT_STR(printer, "unsafe ");
+ }
+
+ if (abi != NULL) {
+ PRINT_STR(printer, "extern \"");
+
+ // replace _ with -
+ while (abi_len > 0) {
+ const char *minus = memchr(abi, '_', abi_len);
+ if (minus == NULL) {
+ PRINT(printer_print_buf(printer, (const char*)abi, abi_len));
+ break;
+ } else {
+ size_t space_to_minus = minus - abi;
+ PRINT(printer_print_buf(printer, (const char*)abi, space_to_minus));
+ PRINT_STR(printer, "-");
+ abi = minus + 1;
+ abi_len -= (space_to_minus + 1);
+ }
+ }
+
+ PRINT_STR(printer, "\" ");
+ }
+
+ PRINT_STR(printer, "fn(");
+ PRINT_SEP_LIST(printer, PRINT(printer_print_type(printer)), ", ");
+ PRINT_STR(printer, ")");
+
+ if (printer_eat(printer, 'u')) {
+ // Skip printing the return type if it's 'u', i.e. `()`.
+ } else {
+ PRINT_STR(printer, " -> ");
+ PRINT(printer_print_type(printer));
+ }
+
+ return OverflowOk;
+}
+
+static NODISCARD overflow_status printer_print_type_backref(struct printer *printer, bool *_arg) {
+ (void)_arg;
+ return printer_print_type(printer);
+}
+
+static NODISCARD overflow_status printer_print_type(struct printer *printer) {
+ uint8_t tag;
+ PARSE(printer, parser_ch, &tag);
+
+ const char *basic_ty = basic_type(tag);
+ if (basic_ty) {
+ return printer_print_str(printer, basic_ty);
+ }
+
+ uint64_t count;
+ uint64_t lt;
+
+ PARSE(printer, parser_push_depth);
+ switch (tag) {
+ case 'R':
+ case 'Q':
+ PRINT_STR(printer, "&");
+ if (printer_eat(printer, 'L')) {
+ PARSE(printer, parser_integer_62, &lt);
+ if (lt != 0) {
+ PRINT(printer_print_lifetime_from_index(printer, lt));
+ PRINT_STR(printer, " ");
+ }
+ }
+ if (tag != 'R') {
+ PRINT_STR(printer, "mut ");
+ }
+ PRINT(printer_print_type(printer));
+ break;
+ case 'P':
+ case 'O':
+ PRINT_STR(printer, "*");
+ if (tag != 'P') {
+ PRINT_STR(printer, "mut ");
+ } else {
+ PRINT_STR(printer, "const ");
+ }
+ PRINT(printer_print_type(printer));
+ break;
+ case 'A':
+ case 'S':
+ PRINT_STR(printer, "[");
+ PRINT(printer_print_type(printer));
+ if (tag == 'A') {
+ PRINT_STR(printer, "; ");
+ PRINT(printer_print_const(printer, true));
+ }
+ PRINT_STR(printer, "]");
+ break;
+ case 'T':
+ PRINT_STR(printer, "(");
+ PRINT_SEP_LIST_COUNT(printer, count, PRINT(printer_print_type(printer)), ", ");
+ if (count == 1) {
+ PRINT_STR(printer, ",");
+ }
+ PRINT_STR(printer, ")");
+ break;
+ case 'F':
+ PRINT(printer_in_binder(printer, printer_print_function_type));
+ break;
+ case 'D':
+ PRINT_STR(printer, "dyn ");
+ PRINT(printer_in_binder(printer, printer_print_object_bounds));
+
+ if (!printer_eat(printer, 'L')) {
+ INVALID(printer);
+ }
+ PARSE(printer, parser_integer_62, &lt);
+
+ if (lt != 0) {
+ PRINT_STR(printer, " + ");
+ PRINT(printer_print_lifetime_from_index(printer, lt));
+ }
+ break;
+ case 'B':
+ PRINT(printer_print_backref(printer, printer_print_type_backref, NULL));
+ break;
+ default:
+ // Go back to the tag, so `print_path` also sees it.
+ if (printer->status == DemangleOk && /* safety */ printer->parser.next > 0) {
+ printer->parser.next--;
+ }
+ PRINT(printer_print_path(printer, false));
+ }
+
+ printer_pop_depth(printer);
+ return OverflowOk;
+}
+
+NODISCARD static demangle_status rust_demangle_legacy_demangle(const char *s, size_t s_len, struct demangle_legacy *res, const char **rest)
+{
+ if (s_len > strlen(s)) {
+ // s_len only exists to shorten the string, this is not a buffer API
+ return DemangleInvalid;
+ }
+
+ const char *inner;
+ size_t inner_len;
+ if (s_len >= 3 && !strncmp(s, "_ZN", 3)) {
+ inner = s + 3;
+ inner_len = s_len - 3;
+ } else if (s_len >= 2 && !strncmp(s, "ZN", 2)) {
+ // On Windows, dbghelp strips leading underscores, so we accept "ZN...E"
+ // form too.
+ inner = s + 2;
+ inner_len = s_len - 2;
+ } else if (s_len >= 4 && !strncmp(s, "__ZN", 4)) {
+ // On OSX, symbols are prefixed with an extra _
+ inner = s + 4;
+ inner_len = s_len - 4;
+ } else {
+ return DemangleInvalid;
+ }
+
+ if (!str_isascii(inner, inner_len)) {
+ return DemangleInvalid;
+ }
+
+ size_t elements = 0;
+ const char *chars = inner;
+ size_t chars_len = inner_len;
+ if (chars_len == 0) {
+ return DemangleInvalid;
+ }
+ char c;
+ while ((c = *chars) != 'E') {
+ // Decode an identifier element's length
+ if (c < '0' || c > '9') {
+ return DemangleInvalid;
+ }
+ size_t len = 0;
+ while (c >= '0' && c <= '9') {
+ size_t d = c - '0';
+ if (len > SIZE_MAX / 10) {
+ return DemangleInvalid;
+ }
+ len *= 10;
+ if (len > SIZE_MAX - d) {
+ return DemangleInvalid;
+ }
+ len += d;
+
+ chars++;
+ chars_len--;
+ if (chars_len == 0) {
+ return DemangleInvalid;
+ }
+ c = *chars;
+ }
+
+ // Advance by the length
+ if (chars_len <= len) {
+ return DemangleInvalid;
+ }
+ chars += len;
+ chars_len -= len;
+ elements++;
+ }
+ *res = (struct demangle_legacy) { inner, inner_len, elements };
+ *rest = chars + 1;
+ return DemangleOk;
+}
+
+static bool is_rust_hash(const char *s, size_t len) {
+ if (len == 0 || s[0] != 'h') {
+ return false;
+ }
+
+ for (size_t i = 1; i < len; i++) {
+ if (!((s[i] >= '0' && s[i] <= '9') || (s[i] >= 'a' && s[i] <= 'f') || (s[i] >= 'A' && s[i] <= 'F'))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+NODISCARD static overflow_status rust_demangle_legacy_display_demangle(struct demangle_legacy res, char *out, size_t len, bool alternate)
+{
+ struct printer printer = {
+ // not actually using the parser part of the printer, just keeping it to share the format functions
+ DemangleOk,
+ { NULL },
+ out,
+ len,
+ 0,
+ alternate
+ };
+ const char *inner = res.mangled;
+ for (size_t element = 0; element < res.elements; element++) {
+ size_t i = 0;
+ const char *rest;
+ for (rest = inner; rest < res.mangled + res.mangled_len && *rest >= '0' && *rest <= '9'; rest++) {
+ i *= 10;
+ i += *rest - '0';
+ }
+ if ((size_t)(res.mangled + res.mangled_len - rest) < i) {
+ // safety: shouldn't reach this place if the input string is validated. bail out.
+ // safety: we knwo rest <= res.mangled + res.mangled_len from the for-loop above
+ break;
+ }
+
+ size_t len = i;
+ inner = rest + len;
+
+ // From here on, inner contains a pointer to the next element, rest[:len] to the current one
+ if (alternate && element + 1 == res.elements && is_rust_hash(rest, i)) {
+ break;
+ }
+ if (element != 0) {
+ PRINT_STR(&printer, "::");
+ }
+
+ if (len >= 2 && !strncmp(rest, "_$", 2)) {
+ rest++;
+ len--;
+ }
+
+ while (len > 0) {
+ if (rest[0] == '.') {
+ if (len >= 2 && rest[1] == '.') {
+ PRINT_STR(&printer, "::");
+ rest += 2;
+ len -= 2;
+ } else {
+ PRINT_STR(&printer, ".");
+ rest += 1;
+ len -= 1;
+ }
+ } else if (rest[0] == '$') {
+ const char *escape = memchr(rest + 1, '$', len - 1);
+ if (escape == NULL) {
+ break;
+ }
+ const char *escape_start = rest + 1;
+ size_t escape_len = escape - (rest + 1);
+
+ size_t next_len = len - (escape + 1 - rest);
+ const char *next_rest = escape + 1;
+
+ char ch;
+ if ((escape_len == 2 && escape_start[0] == 'S' && escape_start[1] == 'P')) {
+ ch = '@';
+ } else if ((escape_len == 2 && escape_start[0] == 'B' && escape_start[1] == 'P')) {
+ ch = '*';
+ } else if ((escape_len == 2 && escape_start[0] == 'R' && escape_start[1] == 'F')) {
+ ch = '&';
+ } else if ((escape_len == 2 && escape_start[0] == 'L' && escape_start[1] == 'T')) {
+ ch = '<';
+ } else if ((escape_len == 2 && escape_start[0] == 'G' && escape_start[1] == 'T')) {
+ ch = '>';
+ } else if ((escape_len == 2 && escape_start[0] == 'L' && escape_start[1] == 'P')) {
+ ch = '(';
+ } else if ((escape_len == 2 && escape_start[0] == 'R' && escape_start[1] == 'P')) {
+ ch = ')';
+ } else if ((escape_len == 1 && escape_start[0] == 'C')) {
+ ch = ',';
+ } else {
+ if (escape_len > 1 && escape_start[0] == 'u') {
+ escape_start++;
+ escape_len--;
+ uint64_t val;
+ if (try_parse_uint(escape_start, escape_len, &val)
+ && val < UINT32_MAX
+ && validate_char((uint32_t)val))
+ {
+ if (!unicode_iscontrol(val)) {
+ uint8_t wchr[4];
+ size_t wchr_len = code_to_utf8(wchr, (uint32_t)val);
+ PRINT(printer_print_buf(&printer, (const char*)wchr, wchr_len));
+ len = next_len;
+ rest = next_rest;
+ continue;
+ }
+ }
+ }
+ break; // print the rest of this element raw
+ }
+ PRINT_CH(&printer, ch);
+ len = next_len;
+ rest = next_rest;
+ } else {
+ size_t j = 0;
+ for (;j < len && rest[j] != '$' && rest[j] != '.';j++);
+ if (j == len) {
+ break;
+ }
+ PRINT(printer_print_buf(&printer, rest, j));
+ rest += j;
+ len -= j;
+ }
+ }
+ PRINT(printer_print_buf(&printer, rest, len));
+ }
+
+ if (printer.out_len < OVERFLOW_MARGIN) {
+ return OverflowOverflow;
+ }
+ *printer.out = '\0';
+ return OverflowOk;
+}
+
+static bool is_symbol_like(const char *s, size_t len) {
+ // rust-demangle definition of symbol like: control characters and space are not symbol-like, all else is
+ for (size_t i = 0; i < len; i++) {
+ char ch = s[i];
+ if (!(ch >= 0x21 && ch <= 0x7e)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void rust_demangle_demangle(const char *s, struct demangle *res)
+{
+ // During ThinLTO LLVM may import and rename internal symbols, so strip out
+ // those endings first as they're one of the last manglings applied to symbol
+ // names.
+ const char *llvm = ".llvm.";
+ const char *found_llvm = strstr(s, llvm);
+ size_t s_len = strlen(s);
+ if (found_llvm) {
+ const char *all_hex_ptr = found_llvm + strlen(".llvm.");
+ bool all_hex = true;
+ for (;*all_hex_ptr;all_hex_ptr++) {
+ if (!(('0' <= *all_hex_ptr && *all_hex_ptr <= '9') ||
+ ('A' <= *all_hex_ptr && *all_hex_ptr <= 'F') ||
+ *all_hex_ptr == '@')) {
+ all_hex = false;
+ break;
+ }
+ }
+
+ if (all_hex) {
+ s_len = found_llvm - s;
+ }
+ }
+
+ const char *suffix;
+ struct demangle_legacy legacy;
+ demangle_status st = rust_demangle_legacy_demangle(s, s_len, &legacy, &suffix);
+ if (st == DemangleOk) {
+ *res = (struct demangle) {
+ .style=DemangleStyleLegacy,
+ .mangled=legacy.mangled,
+ .mangled_len=legacy.mangled_len,
+ .elements=legacy.elements,
+ .original=s,
+ .original_len=s_len,
+ .suffix=suffix,
+ .suffix_len=s_len - (suffix - s),
+ };
+ } else {
+ struct demangle_v0 v0;
+ st = rust_demangle_v0_demangle(s, s_len, &v0, &suffix);
+ if (st == DemangleOk) {
+ *res = (struct demangle) {
+ .style=DemangleStyleV0,
+ .mangled=v0.mangled,
+ .mangled_len=v0.mangled_len,
+ .elements=0,
+ .original=s,
+ .original_len=s_len,
+ .suffix=suffix,
+ .suffix_len=s_len - (suffix - s),
+ };
+ } else {
+ *res = (struct demangle) {
+ .style=DemangleStyleUnknown,
+ .mangled=NULL,
+ .mangled_len=0,
+ .elements=0,
+ .original=s,
+ .original_len=s_len,
+ .suffix=s,
+ .suffix_len=0,
+ };
+ }
+ }
+
+ // Output like LLVM IR adds extra period-delimited words. See if
+ // we are in that case and save the trailing words if so.
+ if (res->suffix_len) {
+ if (res->suffix[0] == '.' && is_symbol_like(res->suffix, res->suffix_len)) {
+ // Keep the suffix
+ } else {
+ // Reset the suffix and invalidate the demangling
+ res->style = DemangleStyleUnknown;
+ res->suffix_len = 0;
+ }
+ }
+}
+
+bool rust_demangle_is_known(struct demangle *res) {
+ return res->style != DemangleStyleUnknown;
+}
+
+overflow_status rust_demangle_display_demangle(struct demangle const *res, char *out, size_t len, bool alternate) {
+ size_t original_len = res->original_len;
+ size_t out_len;
+ switch (res->style) {
+ case DemangleStyleUnknown:
+ if (len < original_len) {
+ return OverflowOverflow;
+ } else {
+ memcpy(out, res->original, original_len);
+ out += original_len;
+ len -= original_len;
+ break;
+ }
+ break;
+ case DemangleStyleLegacy: {
+ struct demangle_legacy legacy = {
+ res->mangled,
+ res->mangled_len,
+ res->elements
+ };
+ if (rust_demangle_legacy_display_demangle(legacy, out, len, alternate) == OverflowOverflow) {
+ return OverflowOverflow;
+ }
+ out_len = strlen(out);
+ out += out_len;
+ len -= out_len;
+ break;
+ }
+ case DemangleStyleV0: {
+ struct demangle_v0 v0 = {
+ res->mangled,
+ res->mangled_len
+ };
+ if (rust_demangle_v0_display_demangle(v0, out, len, alternate) == OverflowOverflow) {
+ return OverflowOverflow;
+ }
+ out_len = strlen(out);
+ out += out_len;
+ len -= out_len;
+ break;
+ }
+ }
+ size_t suffix_len = res->suffix_len;
+ if (len < suffix_len || len - suffix_len < OVERFLOW_MARGIN) {
+ return OverflowOverflow;
+ }
+ memcpy(out, res->suffix, suffix_len);
+ out[suffix_len] = 0;
+ return OverflowOk;
+}
diff --git a/tools/perf/util/demangle-rust-v0.h b/tools/perf/util/demangle-rust-v0.h
new file mode 100644
index 000000000000..d0092818610a
--- /dev/null
+++ b/tools/perf/util/demangle-rust-v0.h
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// The contents of this file come from the Rust rustc-demangle library, hosted
+// in the <https://github.com/rust-lang/rustc-demangle> repository, licensed
+// under "Apache-2.0 OR MIT". For copyright details, see
+// <https://github.com/rust-lang/rustc-demangle/blob/main/README.md>.
+// Please note that the file should be kept as close as possible to upstream.
+
+#ifndef _H_DEMANGLE_V0_H
+#define _H_DEMANGLE_V0_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+
+#if defined(__GNUC__) || defined(__clang__)
+#define DEMANGLE_NODISCARD __attribute__((warn_unused_result))
+#else
+#define DEMANGLE_NODISCARD
+#endif
+
+typedef enum {
+ OverflowOk,
+ OverflowOverflow
+} overflow_status;
+
+enum demangle_style {
+ DemangleStyleUnknown = 0,
+ DemangleStyleLegacy,
+ DemangleStyleV0,
+};
+
+// Not using a union here to make the struct easier to copy-paste if needed.
+struct demangle {
+ enum demangle_style style;
+ // points to the "mangled" part of the name,
+ // not including `ZN` or `R` prefixes.
+ const char *mangled;
+ size_t mangled_len;
+ // In DemangleStyleLegacy, is the number of path elements
+ size_t elements;
+ // while it's called "original", it will not contain `.llvm.9D1C9369@@16` suffixes
+ // that are to be ignored.
+ const char *original;
+ size_t original_len;
+ // Contains the part after the mangled name that is to be outputted,
+ // which can be `.exit.i.i` suffixes LLVM sometimes adds.
+ const char *suffix;
+ size_t suffix_len;
+};
+
+// if the length of the output buffer is less than `output_len-OVERFLOW_MARGIN`,
+// the demangler will return `OverflowOverflow` even if there is no overflow.
+#define OVERFLOW_MARGIN 4
+
+/// Demangle a C string that refers to a Rust symbol and put the demangle intermediate result in `res`.
+/// Beware that `res` contains references into `s`. If `s` is modified (or free'd) before calling
+/// `rust_demangle_display_demangle` behavior is undefined.
+///
+/// Use `rust_demangle_display_demangle` to convert it to an actual string.
+void rust_demangle_demangle(const char *s, struct demangle *res);
+
+/// Write the string in a `struct demangle` into a buffer.
+///
+/// Return `OverflowOk` if the output buffer was sufficiently big, `OverflowOverflow` if it wasn't.
+/// This function is `O(n)` in the length of the input + *output* [$], but the demangled output of demangling a symbol can
+/// be exponentially[$$] large, therefore it is recommended to have a sane bound (`rust-demangle`
+/// uses 1,000,000 bytes) on `len`.
+///
+/// `alternate`, if true, uses the less verbose alternate formatting (Rust `{:#}`) is used, which does not show
+/// symbol hashes and types of constant ints.
+///
+/// [$] It's `O(n * MAX_DEPTH)`, but `MAX_DEPTH` is a constant 300 and therefore it's `O(n)`
+/// [$$] Technically, bounded by `O(n^MAX_DEPTH)`, but this is practically exponential.
+DEMANGLE_NODISCARD overflow_status rust_demangle_display_demangle(struct demangle const *res, char *out, size_t len, bool alternate);
+
+/// Returns true if `res` refers to a known valid Rust demangling style, false if it's an unknown style.
+bool rust_demangle_is_known(struct demangle *res);
+
+#undef DEMANGLE_NODISCARD
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/tools/perf/util/demangle-rust.c b/tools/perf/util/demangle-rust.c
deleted file mode 100644
index a659fc69f73a..000000000000
--- a/tools/perf/util/demangle-rust.c
+++ /dev/null
@@ -1,269 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <string.h>
-#include "debug.h"
-
-#include "demangle-rust.h"
-
-/*
- * Mangled Rust symbols look like this:
- *
- * _$LT$std..sys..fd..FileDesc$u20$as$u20$core..ops..Drop$GT$::drop::hc68340e1baa4987a
- *
- * The original symbol is:
- *
- * <std::sys::fd::FileDesc as core::ops::Drop>::drop
- *
- * The last component of the path is a 64-bit hash in lowercase hex, prefixed
- * with "h". Rust does not have a global namespace between crates, an illusion
- * which Rust maintains by using the hash to distinguish things that would
- * otherwise have the same symbol.
- *
- * Any path component not starting with a XID_Start character is prefixed with
- * "_".
- *
- * The following escape sequences are used:
- *
- * "," => $C$
- * "@" => $SP$
- * "*" => $BP$
- * "&" => $RF$
- * "<" => $LT$
- * ">" => $GT$
- * "(" => $LP$
- * ")" => $RP$
- * " " => $u20$
- * "'" => $u27$
- * "[" => $u5b$
- * "]" => $u5d$
- * "~" => $u7e$
- *
- * A double ".." means "::" and a single "." means "-".
- *
- * The only characters allowed in the mangled symbol are a-zA-Z0-9 and _.:$
- */
-
-static const char *hash_prefix = "::h";
-static const size_t hash_prefix_len = 3;
-static const size_t hash_len = 16;
-
-static bool is_prefixed_hash(const char *start);
-static bool looks_like_rust(const char *sym, size_t len);
-static bool unescape(const char **in, char **out, const char *seq, char value);
-
-/*
- * INPUT:
- * sym: symbol that has been through BFD-demangling
- *
- * This function looks for the following indicators:
- *
- * 1. The hash must consist of "h" followed by 16 lowercase hex digits.
- *
- * 2. As a sanity check, the hash must use between 5 and 15 of the 16 possible
- * hex digits. This is true of 99.9998% of hashes so once in your life you
- * may see a false negative. The point is to notice path components that
- * could be Rust hashes but are probably not, like "haaaaaaaaaaaaaaaa". In
- * this case a false positive (non-Rust symbol has an important path
- * component removed because it looks like a Rust hash) is worse than a
- * false negative (the rare Rust symbol is not demangled) so this sets the
- * balance in favor of false negatives.
- *
- * 3. There must be no characters other than a-zA-Z0-9 and _.:$
- *
- * 4. There must be no unrecognized $-sign sequences.
- *
- * 5. There must be no sequence of three or more dots in a row ("...").
- */
-bool
-rust_is_mangled(const char *sym)
-{
- size_t len, len_without_hash;
-
- if (!sym)
- return false;
-
- len = strlen(sym);
- if (len <= hash_prefix_len + hash_len)
- /* Not long enough to contain "::h" + hash + something else */
- return false;
-
- len_without_hash = len - (hash_prefix_len + hash_len);
- if (!is_prefixed_hash(sym + len_without_hash))
- return false;
-
- return looks_like_rust(sym, len_without_hash);
-}
-
-/*
- * A hash is the prefix "::h" followed by 16 lowercase hex digits. The hex
- * digits must comprise between 5 and 15 (inclusive) distinct digits.
- */
-static bool is_prefixed_hash(const char *str)
-{
- const char *end;
- bool seen[16];
- size_t i;
- int count;
-
- if (strncmp(str, hash_prefix, hash_prefix_len))
- return false;
- str += hash_prefix_len;
-
- memset(seen, false, sizeof(seen));
- for (end = str + hash_len; str < end; str++)
- if (*str >= '0' && *str <= '9')
- seen[*str - '0'] = true;
- else if (*str >= 'a' && *str <= 'f')
- seen[*str - 'a' + 10] = true;
- else
- return false;
-
- /* Count how many distinct digits seen */
- count = 0;
- for (i = 0; i < 16; i++)
- if (seen[i])
- count++;
-
- return count >= 5 && count <= 15;
-}
-
-static bool looks_like_rust(const char *str, size_t len)
-{
- const char *end = str + len;
-
- while (str < end)
- switch (*str) {
- case '$':
- if (!strncmp(str, "$C$", 3))
- str += 3;
- else if (!strncmp(str, "$SP$", 4)
- || !strncmp(str, "$BP$", 4)
- || !strncmp(str, "$RF$", 4)
- || !strncmp(str, "$LT$", 4)
- || !strncmp(str, "$GT$", 4)
- || !strncmp(str, "$LP$", 4)
- || !strncmp(str, "$RP$", 4))
- str += 4;
- else if (!strncmp(str, "$u20$", 5)
- || !strncmp(str, "$u27$", 5)
- || !strncmp(str, "$u5b$", 5)
- || !strncmp(str, "$u5d$", 5)
- || !strncmp(str, "$u7e$", 5))
- str += 5;
- else
- return false;
- break;
- case '.':
- /* Do not allow three or more consecutive dots */
- if (!strncmp(str, "...", 3))
- return false;
- /* Fall through */
- case 'a' ... 'z':
- case 'A' ... 'Z':
- case '0' ... '9':
- case '_':
- case ':':
- str++;
- break;
- default:
- return false;
- }
-
- return true;
-}
-
-/*
- * INPUT:
- * sym: symbol for which rust_is_mangled(sym) returns true
- *
- * The input is demangled in-place because the mangled name is always longer
- * than the demangled one.
- */
-void
-rust_demangle_sym(char *sym)
-{
- const char *in;
- char *out;
- const char *end;
-
- if (!sym)
- return;
-
- in = sym;
- out = sym;
- end = sym + strlen(sym) - (hash_prefix_len + hash_len);
-
- while (in < end)
- switch (*in) {
- case '$':
- if (!(unescape(&in, &out, "$C$", ',')
- || unescape(&in, &out, "$SP$", '@')
- || unescape(&in, &out, "$BP$", '*')
- || unescape(&in, &out, "$RF$", '&')
- || unescape(&in, &out, "$LT$", '<')
- || unescape(&in, &out, "$GT$", '>')
- || unescape(&in, &out, "$LP$", '(')
- || unescape(&in, &out, "$RP$", ')')
- || unescape(&in, &out, "$u20$", ' ')
- || unescape(&in, &out, "$u27$", '\'')
- || unescape(&in, &out, "$u5b$", '[')
- || unescape(&in, &out, "$u5d$", ']')
- || unescape(&in, &out, "$u7e$", '~'))) {
- pr_err("demangle-rust: unexpected escape sequence");
- goto done;
- }
- break;
- case '_':
- /*
- * If this is the start of a path component and the next
- * character is an escape sequence, ignore the
- * underscore. The mangler inserts an underscore to make
- * sure the path component begins with a XID_Start
- * character.
- */
- if ((in == sym || in[-1] == ':') && in[1] == '$')
- in++;
- else
- *out++ = *in++;
- break;
- case '.':
- if (in[1] == '.') {
- /* ".." becomes "::" */
- *out++ = ':';
- *out++ = ':';
- in += 2;
- } else {
- /* "." becomes "-" */
- *out++ = '-';
- in++;
- }
- break;
- case 'a' ... 'z':
- case 'A' ... 'Z':
- case '0' ... '9':
- case ':':
- *out++ = *in++;
- break;
- default:
- pr_err("demangle-rust: unexpected character '%c' in symbol\n",
- *in);
- goto done;
- }
-
-done:
- *out = '\0';
-}
-
-static bool unescape(const char **in, char **out, const char *seq, char value)
-{
- size_t len = strlen(seq);
-
- if (strncmp(*in, seq, len))
- return false;
-
- **out = value;
-
- *in += len;
- *out += 1;
-
- return true;
-}
diff --git a/tools/perf/util/demangle-rust.h b/tools/perf/util/demangle-rust.h
deleted file mode 100644
index 2fca618b1aa5..000000000000
--- a/tools/perf/util/demangle-rust.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PERF_DEMANGLE_RUST
-#define __PERF_DEMANGLE_RUST 1
-
-bool rust_is_mangled(const char *str);
-void rust_demangle_sym(char *str);
-
-#endif /* __PERF_DEMANGLE_RUST */
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
index 8f0eb56c6fc6..50b9433f3f8e 100644
--- a/tools/perf/util/disasm.c
+++ b/tools/perf/util/disasm.c
@@ -14,13 +14,15 @@
#include "annotate.h"
#include "annotate-data.h"
#include "build-id.h"
+#include "capstone.h"
#include "debug.h"
#include "disasm.h"
-#include "disasm_bpf.h"
#include "dso.h"
#include "dwarf-regs.h"
#include "env.h"
#include "evsel.h"
+#include "libbfd.h"
+#include "llvm.h"
#include "map.h"
#include "maps.h"
#include "namespaces.h"
@@ -49,7 +51,6 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
static void ins__sort(struct arch *arch);
static int disasm_line__parse(char *line, const char **namep, char **rawp);
static int disasm_line__parse_powerpc(struct disasm_line *dl, struct annotate_args *args);
-static char *expand_tabs(char *line, char **storage, size_t *storage_len);
static __attribute__((constructor)) void symbol__init_regexpr(void)
{
@@ -246,8 +247,8 @@ static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
}
-int ins__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name)
+static int ins__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
{
if (ins->ops->scnprintf)
return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
@@ -390,13 +391,16 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
* skip over possible up to 2 operands to get to address, e.g.:
* tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
*/
- if (c++ != NULL) {
+ if (c != NULL) {
+ c++;
ops->target.addr = strtoull(c, NULL, 16);
if (!ops->target.addr) {
c = strchr(c, ',');
c = validate_comma(c, ops);
- if (c++ != NULL)
+ if (c != NULL) {
+ c++;
ops->target.addr = strtoull(c, NULL, 16);
+ }
}
} else {
ops->target.addr = strtoull(ops->raw, NULL, 16);
@@ -824,7 +828,7 @@ static struct ins_ops ret_ops = {
.scnprintf = ins__raw_scnprintf,
};
-bool ins__is_nop(const struct ins *ins)
+static bool ins__is_nop(const struct ins *ins)
{
return ins->ops == &nop_ops;
}
@@ -1218,7 +1222,7 @@ int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, s
char *build_id_msg = NULL;
if (dso__has_build_id(dso)) {
- build_id__sprintf(dso__bid(dso), bf + 15);
+ build_id__snprintf(dso__bid(dso), bf + 15, sizeof(bf) - 15);
build_id_msg = bf;
}
scnprintf(buf, buflen,
@@ -1330,420 +1334,6 @@ fallback:
return 0;
}
-#ifdef HAVE_LIBCAPSTONE_SUPPORT
-#include <capstone/capstone.h>
-
-int capstone_init(struct machine *machine, csh *cs_handle, bool is64, bool disassembler_style);
-
-static int open_capstone_handle(struct annotate_args *args, bool is_64bit,
- csh *handle)
-{
- struct annotation_options *opt = args->options;
- cs_mode mode = is_64bit ? CS_MODE_64 : CS_MODE_32;
-
- /* TODO: support more architectures */
- if (!arch__is(args->arch, "x86"))
- return -1;
-
- if (cs_open(CS_ARCH_X86, mode, handle) != CS_ERR_OK)
- return -1;
-
- if (!opt->disassembler_style ||
- !strcmp(opt->disassembler_style, "att"))
- cs_option(*handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT);
-
- /*
- * Resolving address operands to symbols is implemented
- * on x86 by investigating instruction details.
- */
- cs_option(*handle, CS_OPT_DETAIL, CS_OPT_ON);
-
- return 0;
-}
-#endif
-
-#if defined(HAVE_LIBCAPSTONE_SUPPORT) || defined(HAVE_LIBLLVM_SUPPORT)
-struct find_file_offset_data {
- u64 ip;
- u64 offset;
-};
-
-/* This will be called for each PHDR in an ELF binary */
-static int find_file_offset(u64 start, u64 len, u64 pgoff, void *arg)
-{
- struct find_file_offset_data *data = arg;
-
- if (start <= data->ip && data->ip < start + len) {
- data->offset = pgoff + data->ip - start;
- return 1;
- }
- return 0;
-}
-
-static u8 *
-read_symbol(const char *filename, struct map *map, struct symbol *sym,
- u64 *len, bool *is_64bit)
-{
- struct dso *dso = map__dso(map);
- struct nscookie nsc;
- u64 start = map__rip_2objdump(map, sym->start);
- u64 end = map__rip_2objdump(map, sym->end);
- int fd, count;
- u8 *buf = NULL;
- struct find_file_offset_data data = {
- .ip = start,
- };
-
- *is_64bit = false;
-
- nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
- fd = open(filename, O_RDONLY);
- nsinfo__mountns_exit(&nsc);
- if (fd < 0)
- return NULL;
-
- if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data,
- is_64bit) == 0)
- goto err;
-
- *len = end - start;
- buf = malloc(*len);
- if (buf == NULL)
- goto err;
-
- count = pread(fd, buf, *len, data.offset);
- close(fd);
- fd = -1;
-
- if ((u64)count != *len)
- goto err;
-
- return buf;
-
-err:
- if (fd >= 0)
- close(fd);
- free(buf);
- return NULL;
-}
-#endif
-
-#if !defined(HAVE_LIBCAPSTONE_SUPPORT) || !defined(HAVE_LIBLLVM_SUPPORT)
-static void symbol__disassembler_missing(const char *disassembler, const char *filename,
- struct symbol *sym)
-{
- pr_debug("The %s disassembler isn't linked in for %s in %s\n",
- disassembler, sym->name, filename);
-}
-#endif
-
-#ifdef HAVE_LIBCAPSTONE_SUPPORT
-static void print_capstone_detail(cs_insn *insn, char *buf, size_t len,
- struct annotate_args *args, u64 addr)
-{
- int i;
- struct map *map = args->ms.map;
- struct symbol *sym;
-
- /* TODO: support more architectures */
- if (!arch__is(args->arch, "x86"))
- return;
-
- if (insn->detail == NULL)
- return;
-
- for (i = 0; i < insn->detail->x86.op_count; i++) {
- cs_x86_op *op = &insn->detail->x86.operands[i];
- u64 orig_addr;
-
- if (op->type != X86_OP_MEM)
- continue;
-
- /* only print RIP-based global symbols for now */
- if (op->mem.base != X86_REG_RIP)
- continue;
-
- /* get the target address */
- orig_addr = addr + insn->size + op->mem.disp;
- addr = map__objdump_2mem(map, orig_addr);
-
- if (dso__kernel(map__dso(map))) {
- /*
- * The kernel maps can be splitted into sections,
- * let's find the map first and the search the symbol.
- */
- map = maps__find(map__kmaps(map), addr);
- if (map == NULL)
- continue;
- }
-
- /* convert it to map-relative address for search */
- addr = map__map_ip(map, addr);
-
- sym = map__find_symbol(map, addr);
- if (sym == NULL)
- continue;
-
- if (addr == sym->start) {
- scnprintf(buf, len, "\t# %"PRIx64" <%s>",
- orig_addr, sym->name);
- } else {
- scnprintf(buf, len, "\t# %"PRIx64" <%s+%#"PRIx64">",
- orig_addr, sym->name, addr - sym->start);
- }
- break;
- }
-}
-
-static int symbol__disassemble_capstone_powerpc(char *filename, struct symbol *sym,
- struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct map *map = args->ms.map;
- struct dso *dso = map__dso(map);
- struct nscookie nsc;
- u64 start = map__rip_2objdump(map, sym->start);
- u64 end = map__rip_2objdump(map, sym->end);
- u64 len = end - start;
- u64 offset;
- int i, fd, count;
- bool is_64bit = false;
- bool needs_cs_close = false;
- u8 *buf = NULL;
- struct find_file_offset_data data = {
- .ip = start,
- };
- csh handle;
- char disasm_buf[512];
- struct disasm_line *dl;
- u32 *line;
- bool disassembler_style = false;
-
- if (args->options->objdump_path)
- return -1;
-
- nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
- fd = open(filename, O_RDONLY);
- nsinfo__mountns_exit(&nsc);
- if (fd < 0)
- return -1;
-
- if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data,
- &is_64bit) == 0)
- goto err;
-
- if (!args->options->disassembler_style ||
- !strcmp(args->options->disassembler_style, "att"))
- disassembler_style = true;
-
- if (capstone_init(maps__machine(args->ms.maps), &handle, is_64bit, disassembler_style) < 0)
- goto err;
-
- needs_cs_close = true;
-
- buf = malloc(len);
- if (buf == NULL)
- goto err;
-
- count = pread(fd, buf, len, data.offset);
- close(fd);
- fd = -1;
-
- if ((u64)count != len)
- goto err;
-
- line = (u32 *)buf;
-
- /* add the function address and name */
- scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
- start, sym->name);
-
- args->offset = -1;
- args->line = disasm_buf;
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
-
- dl = disasm_line__new(args);
- if (dl == NULL)
- goto err;
-
- annotation_line__add(&dl->al, &notes->src->source);
-
- /*
- * TODO: enable disassm for powerpc
- * count = cs_disasm(handle, buf, len, start, len, &insn);
- *
- * For now, only binary code is saved in disassembled line
- * to be used in "type" and "typeoff" sort keys. Each raw code
- * is 32 bit instruction. So use "len/4" to get the number of
- * entries.
- */
- count = len/4;
-
- for (i = 0, offset = 0; i < count; i++) {
- args->offset = offset;
- sprintf(args->line, "%x", line[i]);
-
- dl = disasm_line__new(args);
- if (dl == NULL)
- break;
-
- annotation_line__add(&dl->al, &notes->src->source);
-
- offset += 4;
- }
-
- /* It failed in the middle */
- if (offset != len) {
- struct list_head *list = &notes->src->source;
-
- /* Discard all lines and fallback to objdump */
- while (!list_empty(list)) {
- dl = list_first_entry(list, struct disasm_line, al.node);
-
- list_del_init(&dl->al.node);
- disasm_line__free(dl);
- }
- count = -1;
- }
-
-out:
- if (needs_cs_close)
- cs_close(&handle);
- free(buf);
- return count < 0 ? count : 0;
-
-err:
- if (fd >= 0)
- close(fd);
- count = -1;
- goto out;
-}
-
-static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
- struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct map *map = args->ms.map;
- u64 start = map__rip_2objdump(map, sym->start);
- u64 len;
- u64 offset;
- int i, count, free_count;
- bool is_64bit = false;
- bool needs_cs_close = false;
- u8 *buf = NULL;
- csh handle;
- cs_insn *insn = NULL;
- char disasm_buf[512];
- struct disasm_line *dl;
-
- if (args->options->objdump_path)
- return -1;
-
- buf = read_symbol(filename, map, sym, &len, &is_64bit);
- if (buf == NULL)
- return -1;
-
- /* add the function address and name */
- scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
- start, sym->name);
-
- args->offset = -1;
- args->line = disasm_buf;
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
-
- dl = disasm_line__new(args);
- if (dl == NULL)
- goto err;
-
- annotation_line__add(&dl->al, &notes->src->source);
-
- if (open_capstone_handle(args, is_64bit, &handle) < 0)
- goto err;
-
- needs_cs_close = true;
-
- free_count = count = cs_disasm(handle, buf, len, start, len, &insn);
- for (i = 0, offset = 0; i < count; i++) {
- int printed;
-
- printed = scnprintf(disasm_buf, sizeof(disasm_buf),
- " %-7s %s",
- insn[i].mnemonic, insn[i].op_str);
- print_capstone_detail(&insn[i], disasm_buf + printed,
- sizeof(disasm_buf) - printed, args,
- start + offset);
-
- args->offset = offset;
- args->line = disasm_buf;
-
- dl = disasm_line__new(args);
- if (dl == NULL)
- goto err;
-
- annotation_line__add(&dl->al, &notes->src->source);
-
- offset += insn[i].size;
- }
-
- /* It failed in the middle: probably due to unknown instructions */
- if (offset != len) {
- struct list_head *list = &notes->src->source;
-
- /* Discard all lines and fallback to objdump */
- while (!list_empty(list)) {
- dl = list_first_entry(list, struct disasm_line, al.node);
-
- list_del_init(&dl->al.node);
- disasm_line__free(dl);
- }
- count = -1;
- }
-
-out:
- if (needs_cs_close) {
- cs_close(&handle);
- if (free_count > 0)
- cs_free(insn, free_count);
- }
- free(buf);
- return count < 0 ? count : 0;
-
-err:
- if (needs_cs_close) {
- struct disasm_line *tmp;
-
- /*
- * It probably failed in the middle of the above loop.
- * Release any resources it might add.
- */
- list_for_each_entry_safe(dl, tmp, &notes->src->source, al.node) {
- list_del(&dl->al.node);
- disasm_line__free(dl);
- }
- }
- count = -1;
- goto out;
-}
-#else // HAVE_LIBCAPSTONE_SUPPORT
-static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
- struct annotate_args *args __maybe_unused)
-{
- symbol__disassembler_missing("capstone", filename, sym);
- return -1;
-}
-
-static int symbol__disassemble_capstone_powerpc(char *filename, struct symbol *sym,
- struct annotate_args *args __maybe_unused)
-{
- symbol__disassembler_missing("capstone powerpc", filename, sym);
- return -1;
-}
-#endif // HAVE_LIBCAPSTONE_SUPPORT
-
static int symbol__disassemble_raw(char *filename, struct symbol *sym,
struct annotate_args *args)
{
@@ -1830,201 +1420,12 @@ err:
goto out;
}
-#ifdef HAVE_LIBLLVM_SUPPORT
-#include <llvm-c/Disassembler.h>
-#include <llvm-c/Target.h>
-#include "util/llvm-c-helpers.h"
-
-struct symbol_lookup_storage {
- u64 branch_addr;
- u64 pcrel_load_addr;
-};
-
-/*
- * Whenever LLVM wants to resolve an address into a symbol, it calls this
- * callback. We don't ever actually _return_ anything (in particular, because
- * it puts quotation marks around what we return), but we use this as a hint
- * that there is a branch or PC-relative address in the expression that we
- * should add some textual annotation for after the instruction. The caller
- * will use this information to add the actual annotation.
- */
-static const char *
-symbol_lookup_callback(void *disinfo, uint64_t value,
- uint64_t *ref_type,
- uint64_t address __maybe_unused,
- const char **ref __maybe_unused)
-{
- struct symbol_lookup_storage *storage = disinfo;
-
- if (*ref_type == LLVMDisassembler_ReferenceType_In_Branch)
- storage->branch_addr = value;
- else if (*ref_type == LLVMDisassembler_ReferenceType_In_PCrel_Load)
- storage->pcrel_load_addr = value;
- *ref_type = LLVMDisassembler_ReferenceType_InOut_None;
- return NULL;
-}
-
-static int symbol__disassemble_llvm(char *filename, struct symbol *sym,
- struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct map *map = args->ms.map;
- struct dso *dso = map__dso(map);
- u64 start = map__rip_2objdump(map, sym->start);
- u8 *buf;
- u64 len;
- u64 pc;
- bool is_64bit;
- char triplet[64];
- char disasm_buf[2048];
- size_t disasm_len;
- struct disasm_line *dl;
- LLVMDisasmContextRef disasm = NULL;
- struct symbol_lookup_storage storage;
- char *line_storage = NULL;
- size_t line_storage_len = 0;
- int ret = -1;
-
- if (args->options->objdump_path)
- return -1;
-
- LLVMInitializeAllTargetInfos();
- LLVMInitializeAllTargetMCs();
- LLVMInitializeAllDisassemblers();
-
- buf = read_symbol(filename, map, sym, &len, &is_64bit);
- if (buf == NULL)
- return -1;
-
- if (arch__is(args->arch, "x86")) {
- if (is_64bit)
- scnprintf(triplet, sizeof(triplet), "x86_64-pc-linux");
- else
- scnprintf(triplet, sizeof(triplet), "i686-pc-linux");
- } else {
- scnprintf(triplet, sizeof(triplet), "%s-linux-gnu",
- args->arch->name);
- }
-
- disasm = LLVMCreateDisasm(triplet, &storage, 0, NULL,
- symbol_lookup_callback);
- if (disasm == NULL)
- goto err;
-
- if (args->options->disassembler_style &&
- !strcmp(args->options->disassembler_style, "intel"))
- LLVMSetDisasmOptions(disasm,
- LLVMDisassembler_Option_AsmPrinterVariant);
-
- /*
- * This needs to be set after AsmPrinterVariant, due to a bug in LLVM;
- * setting AsmPrinterVariant makes a new instruction printer, making it
- * forget about the PrintImmHex flag (which is applied before if both
- * are given to the same call).
- */
- LLVMSetDisasmOptions(disasm, LLVMDisassembler_Option_PrintImmHex);
-
- /* add the function address and name */
- scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
- start, sym->name);
-
- args->offset = -1;
- args->line = disasm_buf;
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
-
- dl = disasm_line__new(args);
- if (dl == NULL)
- goto err;
-
- annotation_line__add(&dl->al, &notes->src->source);
-
- pc = start;
- for (u64 offset = 0; offset < len; ) {
- unsigned int ins_len;
-
- storage.branch_addr = 0;
- storage.pcrel_load_addr = 0;
-
- ins_len = LLVMDisasmInstruction(disasm, buf + offset,
- len - offset, pc,
- disasm_buf, sizeof(disasm_buf));
- if (ins_len == 0)
- goto err;
- disasm_len = strlen(disasm_buf);
-
- if (storage.branch_addr != 0) {
- char *name = llvm_name_for_code(dso, filename,
- storage.branch_addr);
- if (name != NULL) {
- disasm_len += scnprintf(disasm_buf + disasm_len,
- sizeof(disasm_buf) -
- disasm_len,
- " <%s>", name);
- free(name);
- }
- }
- if (storage.pcrel_load_addr != 0) {
- char *name = llvm_name_for_data(dso, filename,
- storage.pcrel_load_addr);
- disasm_len += scnprintf(disasm_buf + disasm_len,
- sizeof(disasm_buf) - disasm_len,
- " # %#"PRIx64,
- storage.pcrel_load_addr);
- if (name) {
- disasm_len += scnprintf(disasm_buf + disasm_len,
- sizeof(disasm_buf) -
- disasm_len,
- " <%s>", name);
- free(name);
- }
- }
-
- args->offset = offset;
- args->line = expand_tabs(disasm_buf, &line_storage,
- &line_storage_len);
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
-
- llvm_addr2line(filename, pc, &args->fileloc,
- (unsigned int *)&args->line_nr, false, NULL);
-
- dl = disasm_line__new(args);
- if (dl == NULL)
- goto err;
-
- annotation_line__add(&dl->al, &notes->src->source);
-
- free(args->fileloc);
- pc += ins_len;
- offset += ins_len;
- }
-
- ret = 0;
-
-err:
- LLVMDisasmDispose(disasm);
- free(buf);
- free(line_storage);
- return ret;
-}
-#else // HAVE_LIBLLVM_SUPPORT
-static int symbol__disassemble_llvm(char *filename, struct symbol *sym,
- struct annotate_args *args __maybe_unused)
-{
- symbol__disassembler_missing("LLVM", filename, sym);
- return -1;
-}
-#endif // HAVE_LIBLLVM_SUPPORT
-
/*
* Possibly create a new version of line with tabs expanded. Returns the
* existing or new line, storage is updated if a new line is allocated. If
* allocation fails then NULL is returned.
*/
-static char *expand_tabs(char *line, char **storage, size_t *storage_len)
+char *expand_tabs(char *line, char **storage, size_t *storage_len)
{
size_t i, src, dst, len, new_storage_len, num_tabs;
char *new_line;
@@ -2079,6 +1480,23 @@ static char *expand_tabs(char *line, char **storage, size_t *storage_len)
return new_line;
}
+static int symbol__disassemble_bpf_image(struct symbol *sym, struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct disasm_line *dl;
+
+ args->offset = -1;
+ args->line = strdup("to be implemented");
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ zfree(&args->line);
+ return 0;
+}
+
static int symbol__disassemble_objdump(const char *filename, struct symbol *sym,
struct annotate_args *args)
{
@@ -2103,6 +1521,12 @@ static int symbol__disassemble_objdump(const char *filename, struct symbol *sym,
struct child_process objdump_process;
int err;
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return symbol__disassemble_bpf_libbfd(sym, args);
+
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE)
+ return symbol__disassemble_bpf_image(sym, args);
+
err = asprintf(&command,
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
@@ -2237,11 +1661,7 @@ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
pr_debug("annotating [%p] %30s : [%p] %30s\n", dso, dso__long_name(dso), sym, sym->name);
- if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) {
- return symbol__disassemble_bpf(sym, args);
- } else if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) {
- return symbol__disassemble_bpf_image(sym, args);
- } else if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) {
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) {
return SYMBOL_ANNOTATE_ERRNO__COULDNT_DETERMINE_FILE_TYPE;
} else if (dso__is_kcore(dso)) {
kce.addr = map__rip_2objdump(map, sym->start);
@@ -2284,6 +1704,13 @@ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
}
}
+ /* FIXME: LLVM and CAPSTONE should support source code */
+ if (options->annotate_src && !options->hide_src_code) {
+ err = symbol__disassemble_objdump(symfs_filename, sym, args);
+ if (err == 0)
+ goto out_remove_tmp;
+ }
+
err = -1;
for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers) && err != 0; i++) {
enum perf_disassembler dis = options->disassemblers[i];
diff --git a/tools/perf/util/disasm.h b/tools/perf/util/disasm.h
index c135db2416b5..d2cb555e4a3b 100644
--- a/tools/perf/util/disasm.h
+++ b/tools/perf/util/disasm.h
@@ -98,7 +98,6 @@ struct ins_ops {
struct annotate_args {
struct arch *arch;
struct map_symbol ms;
- struct evsel *evsel;
struct annotation_options *options;
s64 offset;
char *line;
@@ -110,13 +109,10 @@ struct arch *arch__find(const char *name);
bool arch__is(struct arch *arch, const char *name);
struct ins_ops *ins__find(struct arch *arch, const char *name, struct disasm_line *dl);
-int ins__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name);
bool ins__is_call(const struct ins *ins);
bool ins__is_jump(const struct ins *ins);
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
-bool ins__is_nop(const struct ins *ins);
bool ins__is_ret(const struct ins *ins);
bool ins__is_lock(const struct ins *ins);
@@ -128,4 +124,6 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size,
int symbol__disassemble(struct symbol *sym, struct annotate_args *args);
+char *expand_tabs(char *line, char **storage, size_t *storage_len);
+
#endif /* __PERF_UTIL_DISASM_H */
diff --git a/tools/perf/util/disasm_bpf.c b/tools/perf/util/disasm_bpf.c
deleted file mode 100644
index 1fee71c79b62..000000000000
--- a/tools/perf/util/disasm_bpf.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include "util/annotate.h"
-#include "util/disasm_bpf.h"
-#include "util/symbol.h"
-#include <linux/zalloc.h>
-#include <string.h>
-
-#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-#define PACKAGE "perf"
-#include <bfd.h>
-#include <bpf/bpf.h>
-#include <bpf/btf.h>
-#include <bpf/libbpf.h>
-#include <dis-asm.h>
-#include <errno.h>
-#include <linux/btf.h>
-#include <tools/dis-asm-compat.h>
-
-#include "util/bpf-event.h"
-#include "util/bpf-utils.h"
-#include "util/debug.h"
-#include "util/dso.h"
-#include "util/map.h"
-#include "util/env.h"
-#include "util/util.h"
-
-int symbol__disassemble_bpf(struct symbol *sym, struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct bpf_prog_linfo *prog_linfo = NULL;
- struct bpf_prog_info_node *info_node;
- int len = sym->end - sym->start;
- disassembler_ftype disassemble;
- struct map *map = args->ms.map;
- struct perf_bpil *info_linear;
- struct disassemble_info info;
- struct dso *dso = map__dso(map);
- int pc = 0, count, sub_id;
- struct btf *btf = NULL;
- char tpath[PATH_MAX];
- size_t buf_size;
- int nr_skip = 0;
- char *buf;
- bfd *bfdf;
- int ret;
- FILE *s;
-
- if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO)
- return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
-
- pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
- sym->name, sym->start, sym->end - sym->start);
-
- memset(tpath, 0, sizeof(tpath));
- perf_exe(tpath, sizeof(tpath));
-
- bfdf = bfd_openr(tpath, NULL);
- if (bfdf == NULL)
- abort();
-
- if (!bfd_check_format(bfdf, bfd_object))
- abort();
-
- s = open_memstream(&buf, &buf_size);
- if (!s) {
- ret = errno;
- goto out;
- }
- init_disassemble_info_compat(&info, s,
- (fprintf_ftype) fprintf,
- fprintf_styled);
- info.arch = bfd_get_arch(bfdf);
- info.mach = bfd_get_mach(bfdf);
-
- info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env,
- dso__bpf_prog(dso)->id);
- if (!info_node) {
- ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
- goto out;
- }
- info_linear = info_node->info_linear;
- sub_id = dso__bpf_prog(dso)->sub_id;
-
- info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
- info.buffer_length = info_linear->info.jited_prog_len;
-
- if (info_linear->info.nr_line_info)
- prog_linfo = bpf_prog_linfo__new(&info_linear->info);
-
- if (info_linear->info.btf_id) {
- struct btf_node *node;
-
- node = perf_env__find_btf(dso__bpf_prog(dso)->env,
- info_linear->info.btf_id);
- if (node)
- btf = btf__new((__u8 *)(node->data),
- node->data_size);
- }
-
- disassemble_init_for_target(&info);
-
-#ifdef DISASM_FOUR_ARGS_SIGNATURE
- disassemble = disassembler(info.arch,
- bfd_big_endian(bfdf),
- info.mach,
- bfdf);
-#else
- disassemble = disassembler(bfdf);
-#endif
- if (disassemble == NULL)
- abort();
-
- fflush(s);
- do {
- const struct bpf_line_info *linfo = NULL;
- struct disasm_line *dl;
- size_t prev_buf_size;
- const char *srcline;
- u64 addr;
-
- addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
- count = disassemble(pc, &info);
-
- if (prog_linfo)
- linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
- addr, sub_id,
- nr_skip);
-
- if (linfo && btf) {
- srcline = btf__name_by_offset(btf, linfo->line_off);
- nr_skip++;
- } else
- srcline = NULL;
-
- fprintf(s, "\n");
- prev_buf_size = buf_size;
- fflush(s);
-
- if (!annotate_opts.hide_src_code && srcline) {
- args->offset = -1;
- args->line = strdup(srcline);
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
- dl = disasm_line__new(args);
- if (dl) {
- annotation_line__add(&dl->al,
- &notes->src->source);
- }
- }
-
- args->offset = pc;
- args->line = buf + prev_buf_size;
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
- dl = disasm_line__new(args);
- if (dl)
- annotation_line__add(&dl->al, &notes->src->source);
-
- pc += count;
- } while (count > 0 && pc < len);
-
- ret = 0;
-out:
- free(prog_linfo);
- btf__free(btf);
- fclose(s);
- bfd_close(bfdf);
- return ret;
-}
-#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-int symbol__disassemble_bpf(struct symbol *sym __maybe_unused, struct annotate_args *args __maybe_unused)
-{
- return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
-}
-#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-
-int symbol__disassemble_bpf_image(struct symbol *sym, struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *dl;
-
- args->offset = -1;
- args->line = strdup("to be implemented");
- args->line_nr = 0;
- args->fileloc = NULL;
- dl = disasm_line__new(args);
- if (dl)
- annotation_line__add(&dl->al, &notes->src->source);
-
- zfree(&args->line);
- return 0;
-}
diff --git a/tools/perf/util/disasm_bpf.h b/tools/perf/util/disasm_bpf.h
deleted file mode 100644
index 2ecb19545388..000000000000
--- a/tools/perf/util/disasm_bpf.h
+++ /dev/null
@@ -1,12 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#ifndef __PERF_DISASM_BPF_H
-#define __PERF_DISASM_BPF_H
-
-struct symbol;
-struct annotate_args;
-
-int symbol__disassemble_bpf(struct symbol *sym, struct annotate_args *args);
-int symbol__disassemble_bpf_image(struct symbol *sym, struct annotate_args *args);
-
-#endif /* __PERF_DISASM_BPF_H */
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index ddacef881af2..c0afcbd954f8 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -513,6 +513,7 @@ int dlfilter__do_filter_event(struct dlfilter *d,
d->d_addr_al = &d_addr_al;
d_sample.size = sizeof(d_sample);
+ d_sample.p_stage_cyc = sample->weight3;
d_ip_al.size = 0; /* To indicate d_ip_al is not initialized */
d_addr_al.size = 0; /* To indicate d_addr_al is not initialized */
@@ -526,7 +527,6 @@ int dlfilter__do_filter_event(struct dlfilter *d,
ASSIGN(period);
ASSIGN(weight);
ASSIGN(ins_lat);
- ASSIGN(p_stage_cyc);
ASSIGN(transaction);
ASSIGN(insn_cnt);
ASSIGN(cyc_cnt);
diff --git a/tools/perf/util/drm_pmu.c b/tools/perf/util/drm_pmu.c
new file mode 100644
index 000000000000..b48a375e4584
--- /dev/null
+++ b/tools/perf/util/drm_pmu.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include "drm_pmu.h"
+#include "counts.h"
+#include "cpumap.h"
+#include "debug.h"
+#include "evsel.h"
+#include "pmu.h"
+#include <perf/threadmap.h>
+#include <api/fs/fs.h>
+#include <api/io.h>
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <linux/unistd.h>
+#include <linux/kcmp.h>
+#include <linux/zalloc.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/sysmacros.h>
+#include <sys/types.h>
+
+enum drm_pmu_unit {
+ DRM_PMU_UNIT_BYTES,
+ DRM_PMU_UNIT_CAPACITY,
+ DRM_PMU_UNIT_CYCLES,
+ DRM_PMU_UNIT_HZ,
+ DRM_PMU_UNIT_NS,
+
+ DRM_PMU_UNIT_MAX,
+};
+
+struct drm_pmu_event {
+ const char *name;
+ const char *desc;
+ enum drm_pmu_unit unit;
+};
+
+struct drm_pmu {
+ struct perf_pmu pmu;
+ struct drm_pmu_event *events;
+ int num_events;
+};
+
+static const char * const drm_pmu_unit_strs[DRM_PMU_UNIT_MAX] = {
+ "bytes",
+ "capacity",
+ "cycles",
+ "hz",
+ "ns",
+};
+
+static const char * const drm_pmu_scale_unit_strs[DRM_PMU_UNIT_MAX] = {
+ "1bytes",
+ "1capacity",
+ "1cycles",
+ "1hz",
+ "1ns",
+};
+
+bool perf_pmu__is_drm(const struct perf_pmu *pmu)
+{
+ return pmu && pmu->type >= PERF_PMU_TYPE_DRM_START &&
+ pmu->type <= PERF_PMU_TYPE_DRM_END;
+}
+
+bool evsel__is_drm(const struct evsel *evsel)
+{
+ return perf_pmu__is_drm(evsel->pmu);
+}
+
+static struct drm_pmu *add_drm_pmu(struct list_head *pmus, char *line, size_t line_len)
+{
+ struct drm_pmu *drm;
+ struct perf_pmu *pmu;
+ const char *name;
+ __u32 max_drm_pmu_type = 0, type;
+ int i = 12;
+
+ if (line[line_len - 1] == '\n')
+ line[line_len - 1] = '\0';
+ while (isspace(line[i]))
+ i++;
+
+ line[--i] = '_';
+ line[--i] = 'm';
+ line[--i] = 'r';
+ line[--i] = 'd';
+ name = &line[i];
+
+ list_for_each_entry(pmu, pmus, list) {
+ if (!perf_pmu__is_drm(pmu))
+ continue;
+ if (pmu->type > max_drm_pmu_type)
+ max_drm_pmu_type = pmu->type;
+ if (!strcmp(pmu->name, name)) {
+ /* PMU already exists. */
+ return NULL;
+ }
+ }
+
+ if (max_drm_pmu_type != 0)
+ type = max_drm_pmu_type + 1;
+ else
+ type = PERF_PMU_TYPE_DRM_START;
+
+ if (type > PERF_PMU_TYPE_DRM_END) {
+ zfree(&drm);
+ pr_err("Unable to encode DRM PMU type for %s\n", name);
+ return NULL;
+ }
+
+ drm = zalloc(sizeof(*drm));
+ if (!drm)
+ return NULL;
+
+ if (perf_pmu__init(&drm->pmu, type, name) != 0) {
+ perf_pmu__delete(&drm->pmu);
+ return NULL;
+ }
+
+ drm->pmu.cpus = perf_cpu_map__new_int(0);
+ if (!drm->pmu.cpus) {
+ perf_pmu__delete(&drm->pmu);
+ return NULL;
+ }
+ return drm;
+}
+
+
+static bool starts_with(const char *str, const char *prefix)
+{
+ return !strncmp(prefix, str, strlen(prefix));
+}
+
+static int add_event(struct drm_pmu_event **events, int *num_events,
+ const char *line, enum drm_pmu_unit unit, const char *desc)
+{
+ const char *colon = strchr(line, ':');
+ struct drm_pmu_event *tmp;
+
+ if (!colon)
+ return -EINVAL;
+
+ tmp = reallocarray(*events, *num_events + 1, sizeof(struct drm_pmu_event));
+ if (!tmp)
+ return -ENOMEM;
+ tmp[*num_events].unit = unit;
+ tmp[*num_events].desc = desc;
+ tmp[*num_events].name = strndup(line, colon - line);
+ if (!tmp[*num_events].name)
+ return -ENOMEM;
+ (*num_events)++;
+ *events = tmp;
+ return 0;
+}
+
+static int read_drm_pmus_cb(void *args, int fdinfo_dir_fd, const char *fd_name)
+{
+ struct list_head *pmus = args;
+ char buf[640];
+ struct io io;
+ char *line = NULL;
+ size_t line_len;
+ struct drm_pmu *drm = NULL;
+ struct drm_pmu_event *events = NULL;
+ int num_events = 0;
+
+ io__init(&io, openat(fdinfo_dir_fd, fd_name, O_RDONLY), buf, sizeof(buf));
+ if (io.fd == -1) {
+ /* Failed to open file, ignore. */
+ return 0;
+ }
+
+ while (io__getline(&io, &line, &line_len) > 0) {
+ if (starts_with(line, "drm-driver:")) {
+ drm = add_drm_pmu(pmus, line, line_len);
+ if (!drm)
+ break;
+ continue;
+ }
+ /*
+ * Note the string matching below is alphabetical, with more
+ * specific matches appearing before less specific.
+ */
+ if (starts_with(line, "drm-active-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
+ "Total memory active in one or more engines");
+ continue;
+ }
+ if (starts_with(line, "drm-cycles-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_CYCLES,
+ "Busy cycles");
+ continue;
+ }
+ if (starts_with(line, "drm-engine-capacity-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_CAPACITY,
+ "Engine capacity");
+ continue;
+ }
+ if (starts_with(line, "drm-engine-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_NS,
+ "Utilization in ns");
+ continue;
+ }
+ if (starts_with(line, "drm-maxfreq-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_HZ,
+ "Maximum frequency");
+ continue;
+ }
+ if (starts_with(line, "drm-purgeable-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
+ "Size of resident and purgeable memory buffers");
+ continue;
+ }
+ if (starts_with(line, "drm-resident-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
+ "Size of resident memory buffers");
+ continue;
+ }
+ if (starts_with(line, "drm-shared-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
+ "Size of shared memory buffers");
+ continue;
+ }
+ if (starts_with(line, "drm-total-cycles-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
+ "Total busy cycles");
+ continue;
+ }
+ if (starts_with(line, "drm-total-")) {
+ add_event(&events, &num_events, line, DRM_PMU_UNIT_BYTES,
+ "Size of shared and private memory");
+ continue;
+ }
+ if (verbose > 1 && starts_with(line, "drm-") &&
+ !starts_with(line, "drm-client-id:") &&
+ !starts_with(line, "drm-pdev:"))
+ pr_debug("Unhandled DRM PMU fdinfo line match '%s'\n", line);
+ }
+ if (drm) {
+ drm->events = events;
+ drm->num_events = num_events;
+ list_add_tail(&drm->pmu.list, pmus);
+ }
+ free(line);
+ if (io.fd != -1)
+ close(io.fd);
+ return 0;
+}
+
+void drm_pmu__exit(struct perf_pmu *pmu)
+{
+ struct drm_pmu *drm = container_of(pmu, struct drm_pmu, pmu);
+
+ free(drm->events);
+}
+
+bool drm_pmu__have_event(const struct perf_pmu *pmu, const char *name)
+{
+ struct drm_pmu *drm = container_of(pmu, struct drm_pmu, pmu);
+
+ if (!starts_with(name, "drm-"))
+ return false;
+
+ for (int i = 0; i < drm->num_events; i++) {
+ if (!strcasecmp(drm->events[i].name, name))
+ return true;
+ }
+ return false;
+}
+
+int drm_pmu__for_each_event(const struct perf_pmu *pmu, void *state, pmu_event_callback cb)
+{
+ struct drm_pmu *drm = container_of(pmu, struct drm_pmu, pmu);
+
+ for (int i = 0; i < drm->num_events; i++) {
+ char encoding_buf[128];
+ struct pmu_event_info info = {
+ .pmu = pmu,
+ .name = drm->events[i].name,
+ .alias = NULL,
+ .scale_unit = drm_pmu_scale_unit_strs[drm->events[i].unit],
+ .desc = drm->events[i].desc,
+ .long_desc = NULL,
+ .encoding_desc = encoding_buf,
+ .topic = "drm",
+ .pmu_name = pmu->name,
+ .event_type_desc = "DRM event",
+ };
+ int ret;
+
+ snprintf(encoding_buf, sizeof(encoding_buf), "%s/config=0x%x/", pmu->name, i);
+
+ ret = cb(state, &info);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+size_t drm_pmu__num_events(const struct perf_pmu *pmu)
+{
+ const struct drm_pmu *drm = container_of(pmu, struct drm_pmu, pmu);
+
+ return drm->num_events;
+}
+
+static int drm_pmu__index_for_event(const struct drm_pmu *drm, const char *name)
+{
+ for (int i = 0; i < drm->num_events; i++) {
+ if (!strcmp(drm->events[i].name, name))
+ return i;
+ }
+ return -1;
+}
+
+static int drm_pmu__config_term(const struct drm_pmu *drm,
+ struct perf_event_attr *attr,
+ struct parse_events_term *term,
+ struct parse_events_error *err)
+{
+ if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER) {
+ int i = drm_pmu__index_for_event(drm, term->config);
+
+ if (i >= 0) {
+ attr->config = i;
+ return 0;
+ }
+ }
+ if (err) {
+ char *err_str;
+
+ parse_events_error__handle(err, term->err_val,
+ asprintf(&err_str,
+ "unexpected drm event term (%s) %s",
+ parse_events__term_type_str(term->type_term),
+ term->config) < 0
+ ? strdup("unexpected drm event term")
+ : err_str,
+ NULL);
+ }
+ return -EINVAL;
+}
+
+int drm_pmu__config_terms(const struct perf_pmu *pmu,
+ struct perf_event_attr *attr,
+ struct parse_events_terms *terms,
+ struct parse_events_error *err)
+{
+ struct drm_pmu *drm = container_of(pmu, struct drm_pmu, pmu);
+ struct parse_events_term *term;
+
+ list_for_each_entry(term, &terms->terms, list) {
+ if (drm_pmu__config_term(drm, attr, term, err))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int drm_pmu__check_alias(const struct perf_pmu *pmu, struct parse_events_terms *terms,
+ struct perf_pmu_info *info, struct parse_events_error *err)
+{
+ struct drm_pmu *drm = container_of(pmu, struct drm_pmu, pmu);
+ struct parse_events_term *term =
+ list_first_entry(&terms->terms, struct parse_events_term, list);
+
+ if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER) {
+ int i = drm_pmu__index_for_event(drm, term->config);
+
+ if (i >= 0) {
+ info->unit = drm_pmu_unit_strs[drm->events[i].unit];
+ info->scale = 1;
+ return 0;
+ }
+ }
+ if (err) {
+ char *err_str;
+
+ parse_events_error__handle(err, term->err_val,
+ asprintf(&err_str,
+ "unexpected drm event term (%s) %s",
+ parse_events__term_type_str(term->type_term),
+ term->config) < 0
+ ? strdup("unexpected drm event term")
+ : err_str,
+ NULL);
+ }
+ return -EINVAL;
+}
+
+struct minor_info {
+ unsigned int *minors;
+ int minors_num, minors_len;
+};
+
+static int for_each_drm_fdinfo_in_dir(int (*cb)(void *args, int fdinfo_dir_fd, const char *fd_name),
+ void *args, int proc_dir, const char *pid_name,
+ struct minor_info *minors)
+{
+ char buf[256];
+ DIR *fd_dir;
+ struct dirent *fd_entry;
+ int fd_dir_fd, fdinfo_dir_fd = -1;
+
+
+ scnprintf(buf, sizeof(buf), "%s/fd", pid_name);
+ fd_dir_fd = openat(proc_dir, buf, O_DIRECTORY);
+ if (fd_dir_fd == -1)
+ return 0; /* Presumably lost race to open. */
+ fd_dir = fdopendir(fd_dir_fd);
+ if (!fd_dir) {
+ close(fd_dir_fd);
+ return -ENOMEM;
+ }
+ while ((fd_entry = readdir(fd_dir)) != NULL) {
+ struct stat stat;
+ unsigned int minor;
+ bool is_dup = false;
+ int ret;
+
+ if (fd_entry->d_type != DT_LNK)
+ continue;
+
+ if (fstatat(fd_dir_fd, fd_entry->d_name, &stat, 0) != 0)
+ continue;
+
+ if ((stat.st_mode & S_IFMT) != S_IFCHR || major(stat.st_rdev) != 226)
+ continue;
+
+ minor = minor(stat.st_rdev);
+ for (int i = 0; i < minors->minors_num; i++) {
+ if (minor(stat.st_rdev) == minors->minors[i]) {
+ is_dup = true;
+ break;
+ }
+ }
+ if (is_dup)
+ continue;
+
+ if (minors->minors_num == minors->minors_len) {
+ unsigned int *tmp = reallocarray(minors->minors, minors->minors_len + 4,
+ sizeof(unsigned int));
+
+ if (tmp) {
+ minors->minors = tmp;
+ minors->minors_len += 4;
+ }
+ }
+ minors->minors[minors->minors_num++] = minor;
+ if (fdinfo_dir_fd == -1) {
+ /* Open fdinfo dir if we have a DRM fd. */
+ scnprintf(buf, sizeof(buf), "%s/fdinfo", pid_name);
+ fdinfo_dir_fd = openat(proc_dir, buf, O_DIRECTORY);
+ if (fdinfo_dir_fd == -1)
+ continue;
+ }
+ ret = cb(args, fdinfo_dir_fd, fd_entry->d_name);
+ if (ret)
+ goto close_fdinfo;
+ }
+
+close_fdinfo:
+ if (fdinfo_dir_fd != -1)
+ close(fdinfo_dir_fd);
+ closedir(fd_dir);
+ return 0;
+}
+
+static int for_each_drm_fdinfo(bool skip_all_duplicates,
+ int (*cb)(void *args, int fdinfo_dir_fd, const char *fd_name),
+ void *args)
+{
+ DIR *proc_dir;
+ struct dirent *proc_entry;
+ int ret;
+ /*
+ * minors maintains an array of DRM minor device numbers seen for a pid,
+ * or for all pids if skip_all_duplicates is true, so that duplicates
+ * are ignored.
+ */
+ struct minor_info minors = {
+ .minors = NULL,
+ .minors_num = 0,
+ .minors_len = 0,
+ };
+
+ proc_dir = opendir(procfs__mountpoint());
+ if (!proc_dir)
+ return 0;
+
+ /* Walk through the /proc directory. */
+ while ((proc_entry = readdir(proc_dir)) != NULL) {
+ if (proc_entry->d_type != DT_DIR ||
+ !isdigit(proc_entry->d_name[0]))
+ continue;
+ if (!skip_all_duplicates) {
+ /* Reset the seen minor numbers for each pid. */
+ minors.minors_num = 0;
+ }
+ ret = for_each_drm_fdinfo_in_dir(cb, args,
+ dirfd(proc_dir), proc_entry->d_name,
+ &minors);
+ if (ret)
+ break;
+ }
+ free(minors.minors);
+ closedir(proc_dir);
+ return ret;
+}
+
+int perf_pmus__read_drm_pmus(struct list_head *pmus)
+{
+ return for_each_drm_fdinfo(/*skip_all_duplicates=*/true, read_drm_pmus_cb, pmus);
+}
+
+int evsel__drm_pmu_open(struct evsel *evsel,
+ struct perf_thread_map *threads,
+ int start_cpu_map_idx, int end_cpu_map_idx)
+{
+ (void)evsel;
+ (void)threads;
+ (void)start_cpu_map_idx;
+ (void)end_cpu_map_idx;
+ return 0;
+}
+
+static uint64_t read_count_and_apply_unit(const char *count_and_unit, enum drm_pmu_unit unit)
+{
+ char *unit_ptr = NULL;
+ uint64_t count = strtoul(count_and_unit, &unit_ptr, 10);
+
+ if (!unit_ptr)
+ return 0;
+
+ while (isblank(*unit_ptr))
+ unit_ptr++;
+
+ switch (unit) {
+ case DRM_PMU_UNIT_BYTES:
+ if (*unit_ptr == '\0')
+ assert(count == 0); /* Generally undocumented, happens for 0. */
+ else if (!strcmp(unit_ptr, "KiB"))
+ count *= 1024;
+ else if (!strcmp(unit_ptr, "MiB"))
+ count *= 1024 * 1024;
+ else
+ pr_err("Unexpected bytes unit '%s'\n", unit_ptr);
+ break;
+ case DRM_PMU_UNIT_CAPACITY:
+ /* No units expected. */
+ break;
+ case DRM_PMU_UNIT_CYCLES:
+ /* No units expected. */
+ break;
+ case DRM_PMU_UNIT_HZ:
+ if (!strcmp(unit_ptr, "Hz"))
+ count *= 1;
+ else if (!strcmp(unit_ptr, "KHz"))
+ count *= 1000;
+ else if (!strcmp(unit_ptr, "MHz"))
+ count *= 1000000;
+ else
+ pr_err("Unexpected hz unit '%s'\n", unit_ptr);
+ break;
+ case DRM_PMU_UNIT_NS:
+ /* Only unit ns expected. */
+ break;
+ case DRM_PMU_UNIT_MAX:
+ default:
+ break;
+ }
+ return count;
+}
+
+static uint64_t read_drm_event(int fdinfo_dir_fd, const char *fd_name,
+ const char *match, enum drm_pmu_unit unit)
+{
+ char buf[640];
+ struct io io;
+ char *line = NULL;
+ size_t line_len;
+ uint64_t count = 0;
+
+ io__init(&io, openat(fdinfo_dir_fd, fd_name, O_RDONLY), buf, sizeof(buf));
+ if (io.fd == -1) {
+ /* Failed to open file, ignore. */
+ return 0;
+ }
+ while (io__getline(&io, &line, &line_len) > 0) {
+ size_t i = strlen(match);
+
+ if (strncmp(line, match, i))
+ continue;
+ if (line[i] != ':')
+ continue;
+ while (isblank(line[++i]))
+ ;
+ if (line[line_len - 1] == '\n')
+ line[line_len - 1] = '\0';
+ count = read_count_and_apply_unit(&line[i], unit);
+ break;
+ }
+ free(line);
+ close(io.fd);
+ return count;
+}
+
+struct read_drm_event_cb_args {
+ const char *match;
+ uint64_t count;
+ enum drm_pmu_unit unit;
+};
+
+static int read_drm_event_cb(void *vargs, int fdinfo_dir_fd, const char *fd_name)
+{
+ struct read_drm_event_cb_args *args = vargs;
+
+ args->count += read_drm_event(fdinfo_dir_fd, fd_name, args->match, args->unit);
+ return 0;
+}
+
+static uint64_t drm_pmu__read_system_wide(struct drm_pmu *drm, struct evsel *evsel)
+{
+ struct read_drm_event_cb_args args = {
+ .count = 0,
+ .match = drm->events[evsel->core.attr.config].name,
+ .unit = drm->events[evsel->core.attr.config].unit,
+ };
+
+ for_each_drm_fdinfo(/*skip_all_duplicates=*/false, read_drm_event_cb, &args);
+ return args.count;
+}
+
+static uint64_t drm_pmu__read_for_pid(struct drm_pmu *drm, struct evsel *evsel, int pid)
+{
+ struct read_drm_event_cb_args args = {
+ .count = 0,
+ .match = drm->events[evsel->core.attr.config].name,
+ .unit = drm->events[evsel->core.attr.config].unit,
+ };
+ struct minor_info minors = {
+ .minors = NULL,
+ .minors_num = 0,
+ .minors_len = 0,
+ };
+ int proc_dir = open(procfs__mountpoint(), O_DIRECTORY);
+ char pid_name[12];
+ int ret;
+
+ if (proc_dir < 0)
+ return 0;
+
+ snprintf(pid_name, sizeof(pid_name), "%d", pid);
+ ret = for_each_drm_fdinfo_in_dir(read_drm_event_cb, &args, proc_dir, pid_name, &minors);
+ free(minors.minors);
+ close(proc_dir);
+ return ret == 0 ? args.count : 0;
+}
+
+int evsel__drm_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
+{
+ struct drm_pmu *drm = container_of(evsel->pmu, struct drm_pmu, pmu);
+ struct perf_counts_values *count, *old_count = NULL;
+ int pid = perf_thread_map__pid(evsel->core.threads, thread);
+ uint64_t counter;
+
+ if (pid != -1)
+ counter = drm_pmu__read_for_pid(drm, evsel, pid);
+ else
+ counter = drm_pmu__read_system_wide(drm, evsel);
+
+ if (evsel->prev_raw_counts)
+ old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
+
+ count = perf_counts(evsel->counts, cpu_map_idx, thread);
+ if (old_count) {
+ count->val = old_count->val + counter;
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+ } else {
+ count->val = counter;
+ count->run++;
+ count->ena++;
+ }
+ return 0;
+}
diff --git a/tools/perf/util/drm_pmu.h b/tools/perf/util/drm_pmu.h
new file mode 100644
index 000000000000..e7f366fca8a4
--- /dev/null
+++ b/tools/perf/util/drm_pmu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __DRM_PMU_H
+#define __DRM_PMU_H
+/*
+ * Linux DRM clients expose information through usage stats as documented in
+ * Documentation/gpu/drm-usage-stats.rst (available online at
+ * https://docs.kernel.org/gpu/drm-usage-stats.html). This is a tool like PMU
+ * that exposes DRM information.
+ */
+
+#include "pmu.h"
+#include <stdbool.h>
+
+struct list_head;
+struct perf_thread_map;
+
+void drm_pmu__exit(struct perf_pmu *pmu);
+bool drm_pmu__have_event(const struct perf_pmu *pmu, const char *name);
+int drm_pmu__for_each_event(const struct perf_pmu *pmu, void *state, pmu_event_callback cb);
+size_t drm_pmu__num_events(const struct perf_pmu *pmu);
+int drm_pmu__config_terms(const struct perf_pmu *pmu,
+ struct perf_event_attr *attr,
+ struct parse_events_terms *terms,
+ struct parse_events_error *err);
+int drm_pmu__check_alias(const struct perf_pmu *pmu, struct parse_events_terms *terms,
+ struct perf_pmu_info *info, struct parse_events_error *err);
+
+
+bool perf_pmu__is_drm(const struct perf_pmu *pmu);
+bool evsel__is_drm(const struct evsel *evsel);
+
+int perf_pmus__read_drm_pmus(struct list_head *pmus);
+
+int evsel__drm_pmu_open(struct evsel *evsel,
+ struct perf_thread_map *threads,
+ int start_cpu_map_idx, int end_cpu_map_idx);
+int evsel__drm_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread);
+
+#endif /* __DRM_PMU_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 8619b6eea62d..344e689567ee 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -217,7 +217,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
break;
}
- build_id__sprintf(dso__bid_const(dso), build_id_hex);
+ build_id__snprintf(dso__bid(dso), build_id_hex, sizeof(build_id_hex));
len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
snprintf(filename + len, size - len, "%.2s/%s.debug",
build_id_hex, build_id_hex + 2);
@@ -1349,6 +1349,16 @@ struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
return dso;
}
+static void __dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
+{
+ if (dso__long_name_allocated(dso))
+ free((char *)dso__long_name(dso));
+
+ RC_CHK_ACCESS(dso)->long_name = name;
+ RC_CHK_ACCESS(dso)->long_name_len = strlen(name);
+ dso__set_long_name_allocated(dso, name_allocated);
+}
+
static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
{
struct dsos *dsos = dso__dsos(dso);
@@ -1362,81 +1372,86 @@ static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_a
* renaming the dso.
*/
down_write(&dsos->lock);
- }
-
- if (dso__long_name_allocated(dso))
- free((char *)dso__long_name(dso));
-
- RC_CHK_ACCESS(dso)->long_name = name;
- RC_CHK_ACCESS(dso)->long_name_len = strlen(name);
- dso__set_long_name_allocated(dso, name_allocated);
-
- if (dsos) {
+ __dso__set_long_name_id(dso, name, name_allocated);
dsos->sorted = false;
up_write(&dsos->lock);
+ } else {
+ __dso__set_long_name_id(dso, name, name_allocated);
}
}
static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
{
- if (a->maj > b->maj) return -1;
- if (a->maj < b->maj) return 1;
-
- if (a->min > b->min) return -1;
- if (a->min < b->min) return 1;
+ if (a->mmap2_valid && b->mmap2_valid) {
+ if (a->maj > b->maj) return -1;
+ if (a->maj < b->maj) return 1;
- if (a->ino > b->ino) return -1;
- if (a->ino < b->ino) return 1;
+ if (a->min > b->min) return -1;
+ if (a->min < b->min) return 1;
- /*
- * Synthesized MMAP events have zero ino_generation, avoid comparing
- * them with MMAP events with actual ino_generation.
- *
- * I found it harmful because the mismatch resulted in a new
- * dso that did not have a build ID whereas the original dso did have a
- * build ID. The build ID was essential because the object was not found
- * otherwise. - Adrian
- */
- if (a->ino_generation && b->ino_generation) {
+ if (a->ino > b->ino) return -1;
+ if (a->ino < b->ino) return 1;
+ }
+ if (a->mmap2_ino_generation_valid && b->mmap2_ino_generation_valid) {
if (a->ino_generation > b->ino_generation) return -1;
if (a->ino_generation < b->ino_generation) return 1;
}
-
+ if (build_id__is_defined(&a->build_id) && build_id__is_defined(&b->build_id)) {
+ if (a->build_id.size != b->build_id.size)
+ return a->build_id.size < b->build_id.size ? -1 : 1;
+ return memcmp(a->build_id.data, b->build_id.data, a->build_id.size);
+ }
return 0;
}
-bool dso_id__empty(const struct dso_id *id)
-{
- if (!id)
- return true;
-
- return !id->maj && !id->min && !id->ino && !id->ino_generation;
-}
+const struct dso_id dso_id_empty = {
+ {
+ .maj = 0,
+ .min = 0,
+ .ino = 0,
+ .ino_generation = 0,
+ },
+ .mmap2_valid = false,
+ .mmap2_ino_generation_valid = false,
+ {
+ .size = 0,
+ }
+};
-void __dso__inject_id(struct dso *dso, const struct dso_id *id)
+void __dso__improve_id(struct dso *dso, const struct dso_id *id)
{
struct dsos *dsos = dso__dsos(dso);
struct dso_id *dso_id = dso__id(dso);
+ bool changed = false;
/* dsos write lock held by caller. */
- dso_id->maj = id->maj;
- dso_id->min = id->min;
- dso_id->ino = id->ino;
- dso_id->ino_generation = id->ino_generation;
-
- if (dsos)
+ if (id->mmap2_valid && !dso_id->mmap2_valid) {
+ dso_id->maj = id->maj;
+ dso_id->min = id->min;
+ dso_id->ino = id->ino;
+ dso_id->mmap2_valid = true;
+ changed = true;
+ }
+ if (id->mmap2_ino_generation_valid && !dso_id->mmap2_ino_generation_valid) {
+ dso_id->ino_generation = id->ino_generation;
+ dso_id->mmap2_ino_generation_valid = true;
+ changed = true;
+ }
+ if (build_id__is_defined(&id->build_id) && !build_id__is_defined(&dso_id->build_id)) {
+ dso_id->build_id = id->build_id;
+ changed = true;
+ }
+ if (changed && dsos)
dsos->sorted = false;
}
int dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
{
- /*
- * The second is always dso->id, so zeroes if not set, assume passing
- * NULL for a means a zeroed id
- */
- if (dso_id__empty(a) || dso_id__empty(b))
+ if (a == &dso_id_empty || b == &dso_id_empty) {
+ /* There is no valid data to compare so the comparison always returns identical. */
return 0;
+ }
return __dso_id__cmp(a, b);
}
@@ -1451,6 +1466,16 @@ void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
dso__set_long_name_id(dso, name, name_allocated);
}
+static void __dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
+{
+ if (dso__short_name_allocated(dso))
+ free((char *)dso__short_name(dso));
+
+ RC_CHK_ACCESS(dso)->short_name = name;
+ RC_CHK_ACCESS(dso)->short_name_len = strlen(name);
+ dso__set_short_name_allocated(dso, name_allocated);
+}
+
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
{
struct dsos *dsos = dso__dsos(dso);
@@ -1464,17 +1489,11 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
* renaming the dso.
*/
down_write(&dsos->lock);
- }
- if (dso__short_name_allocated(dso))
- free((char *)dso__short_name(dso));
-
- RC_CHK_ACCESS(dso)->short_name = name;
- RC_CHK_ACCESS(dso)->short_name_len = strlen(name);
- dso__set_short_name_allocated(dso, name_allocated);
-
- if (dsos) {
+ __dso__set_short_name(dso, name, name_allocated);
dsos->sorted = false;
up_write(&dsos->lock);
+ } else {
+ __dso__set_short_name(dso, name, name_allocated);
}
}
@@ -1533,7 +1552,6 @@ struct dso *dso__new_id(const char *name, const struct dso_id *id)
dso->loaded = 0;
dso->rel = 0;
dso->sorted_by_name = 0;
- dso->has_build_id = 0;
dso->has_srcline = 1;
dso->a2l_fails = 1;
dso->kernel = DSO_SPACE__USER;
@@ -1605,6 +1623,10 @@ struct dso *dso__get(struct dso *dso)
void dso__put(struct dso *dso)
{
+#ifdef REFCNT_CHECKING
+ if (dso && dso__data(dso) && refcount_read(&RC_CHK_ACCESS(dso)->refcnt) == 2)
+ dso__data_close(dso);
+#endif
if (dso && refcount_dec_and_test(&RC_CHK_ACCESS(dso)->refcnt))
dso__delete(dso);
else
@@ -1638,15 +1660,14 @@ int dso__swap_init(struct dso *dso, unsigned char eidata)
return 0;
}
-void dso__set_build_id(struct dso *dso, struct build_id *bid)
+void dso__set_build_id(struct dso *dso, const struct build_id *bid)
{
- RC_CHK_ACCESS(dso)->bid = *bid;
- RC_CHK_ACCESS(dso)->has_build_id = 1;
+ dso__id(dso)->build_id = *bid;
}
-bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
+bool dso__build_id_equal(const struct dso *dso, const struct build_id *bid)
{
- const struct build_id *dso_bid = dso__bid_const(dso);
+ const struct build_id *dso_bid = dso__bid(dso);
if (dso_bid->size > bid->size && dso_bid->size == BUILD_ID_SIZE) {
/*
@@ -1665,18 +1686,20 @@ bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
{
char path[PATH_MAX];
+ struct build_id bid = { .size = 0, };
if (machine__is_default_guest(machine))
return;
sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
- if (sysfs__read_build_id(path, dso__bid(dso)) == 0)
- dso__set_has_build_id(dso);
+ sysfs__read_build_id(path, &bid);
+ dso__set_build_id(dso, &bid);
}
int dso__kernel_module_get_build_id(struct dso *dso,
const char *root_dir)
{
char filename[PATH_MAX];
+ struct build_id bid = { .size = 0, };
/*
* kernel module short names are of the form "[module]" and
* we need just "module" here.
@@ -1687,9 +1710,8 @@ int dso__kernel_module_get_build_id(struct dso *dso,
"%s/sys/module/%.*s/notes/.note.gnu.build-id",
root_dir, (int)strlen(name) - 1, name);
- if (sysfs__read_build_id(filename, dso__bid(dso)) == 0)
- dso__set_has_build_id(dso);
-
+ sysfs__read_build_id(filename, &bid);
+ dso__set_build_id(dso, &bid);
return 0;
}
@@ -1697,7 +1719,7 @@ static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
{
char sbuild_id[SBUILD_ID_SIZE];
- build_id__sprintf(dso__bid(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
return fprintf(fp, "%s", sbuild_id);
}
@@ -1776,3 +1798,115 @@ bool is_perf_pid_map_name(const char *dso_name)
return perf_pid_map_tid(dso_name, &tid);
}
+
+struct find_file_offset_data {
+ u64 ip;
+ u64 offset;
+};
+
+/* This will be called for each PHDR in an ELF binary */
+static int find_file_offset(u64 start, u64 len, u64 pgoff, void *arg)
+{
+ struct find_file_offset_data *data = arg;
+
+ if (start <= data->ip && data->ip < start + len) {
+ data->offset = pgoff + data->ip - start;
+ return 1;
+ }
+ return 0;
+}
+
+static const u8 *__dso__read_symbol(struct dso *dso, const char *symfs_filename,
+ u64 start, size_t len,
+ u8 **out_buf, u64 *out_buf_len, bool *is_64bit)
+{
+ struct nscookie nsc;
+ int fd;
+ ssize_t count;
+ struct find_file_offset_data data = {
+ .ip = start,
+ };
+ u8 *code_buf = NULL;
+ int saved_errno;
+
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
+ fd = open(symfs_filename, O_RDONLY);
+ saved_errno = errno;
+ nsinfo__mountns_exit(&nsc);
+ if (fd < 0) {
+ errno = saved_errno;
+ return NULL;
+ }
+ if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data, is_64bit) <= 0) {
+ close(fd);
+ errno = ENOENT;
+ return NULL;
+ }
+ code_buf = malloc(len);
+ if (code_buf == NULL) {
+ close(fd);
+ errno = ENOMEM;
+ return NULL;
+ }
+ count = pread(fd, code_buf, len, data.offset);
+ saved_errno = errno;
+ close(fd);
+ if ((u64)count != len) {
+ free(code_buf);
+ errno = saved_errno;
+ return NULL;
+ }
+ *out_buf = code_buf;
+ *out_buf_len = len;
+ return code_buf;
+}
+
+/*
+ * Read a symbol into memory for disassembly by a library like capstone of
+ * libLLVM. If memory is allocated out_buf holds it.
+ */
+const u8 *dso__read_symbol(struct dso *dso, const char *symfs_filename,
+ const struct map *map, const struct symbol *sym,
+ u8 **out_buf, u64 *out_buf_len, bool *is_64bit)
+{
+ u64 start = map__rip_2objdump(map, sym->start);
+ u64 end = map__rip_2objdump(map, sym->end);
+ size_t len = end - start;
+
+ *out_buf = NULL;
+ *out_buf_len = 0;
+ *is_64bit = false;
+
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) {
+ /*
+ * Note, there is fallback BPF image disassembly in the objdump
+ * version but it currently does nothing.
+ */
+ errno = EOPNOTSUPP;
+ return NULL;
+ }
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) {
+#ifdef HAVE_LIBBPF_SUPPORT
+ struct bpf_prog_info_node *info_node;
+ struct perf_bpil *info_linear;
+
+ *is_64bit = sizeof(void *) == sizeof(u64);
+ info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env,
+ dso__bpf_prog(dso)->id);
+ if (!info_node) {
+ errno = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
+ return NULL;
+ }
+ info_linear = info_node->info_linear;
+ assert(len <= info_linear->info.jited_prog_len);
+ *out_buf_len = len;
+ return (const u8 *)(uintptr_t)(info_linear->info.jited_prog_insns);
+#else
+ pr_debug("No BPF program disassembly support\n");
+ errno = EOPNOTSUPP;
+ return NULL;
+#endif
+ }
+ return __dso__read_symbol(dso, symfs_filename, start, len,
+ out_buf, out_buf_len, is_64bit);
+}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index c87564471f9b..f8ccb9816b89 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -10,6 +10,7 @@
#include <stdio.h>
#include <linux/bitops.h>
#include "build-id.h"
+#include "debuginfo.h"
#include "mutex.h"
#include <internal/rc_check.h>
@@ -185,14 +186,33 @@ enum dso_load_errno {
#define DSO__DATA_CACHE_SIZE 4096
#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1)
-/*
- * Data about backing storage DSO, comes from PERF_RECORD_MMAP2 meta events
+/**
+ * struct dso_id
+ *
+ * Data about backing storage DSO, comes from PERF_RECORD_MMAP2 meta events,
+ * reading from /proc/pid/maps or synthesis of build_ids from DSOs. Possibly
+ * incomplete at any particular use.
*/
struct dso_id {
- u32 maj;
- u32 min;
- u64 ino;
- u64 ino_generation;
+ /* Data related to the mmap2 event or read from /proc/pid/maps. */
+ struct {
+ u32 maj;
+ u32 min;
+ u64 ino;
+ u64 ino_generation;
+ };
+ /** @mmap2_valid: Are the maj, min and ino fields valid? */
+ bool mmap2_valid;
+ /**
+ * @mmap2_ino_generation_valid: Is the ino_generation valid? Generally
+ * false for /proc/pid/maps mmap event.
+ */
+ bool mmap2_ino_generation_valid;
+ /**
+ * @build_id: A possibly populated build_id. build_id__is_defined checks
+ * whether it is populated.
+ */
+ struct build_id build_id;
};
struct dso_cache {
@@ -243,7 +263,6 @@ DECLARE_RC_STRUCT(dso) {
u64 addr;
struct symbol *symbol;
} last_find_result;
- struct build_id bid;
u64 text_offset;
u64 text_end;
const char *short_name;
@@ -276,12 +295,12 @@ DECLARE_RC_STRUCT(dso) {
enum dso_swap_type needs_swap:2;
bool is_kmod:1;
u8 adjust_symbols:1;
- u8 has_build_id:1;
u8 header_build_id:1;
u8 has_srcline:1;
u8 hit:1;
u8 annotate_warned:1;
u8 auxtrace_warned:1;
+ u8 debuginfo_warned:1;
u8 short_name_allocated:1;
u8 long_name_allocated:1;
u8 is_64_bit:1;
@@ -292,6 +311,9 @@ DECLARE_RC_STRUCT(dso) {
};
extern struct mutex _dso__data_open_lock;
+extern const struct dso_id dso_id_empty;
+
+int dso_id__cmp(const struct dso_id *a, const struct dso_id *b);
/* dso__for_each_symbol - iterate over the symbols of given type
*
@@ -342,6 +364,16 @@ static inline void dso__set_annotate_warned(struct dso *dso)
RC_CHK_ACCESS(dso)->annotate_warned = 1;
}
+static inline bool dso__debuginfo_warned(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->debuginfo_warned;
+}
+
+static inline void dso__set_debuginfo_warned(struct dso *dso)
+{
+ RC_CHK_ACCESS(dso)->debuginfo_warned = 1;
+}
+
static inline bool dso__auxtrace_warned(const struct dso *dso)
{
return RC_CHK_ACCESS(dso)->auxtrace_warned;
@@ -362,31 +394,11 @@ static inline void dso__set_auxtrace_cache(struct dso *dso, struct auxtrace_cach
RC_CHK_ACCESS(dso)->auxtrace_cache = cache;
}
-static inline struct build_id *dso__bid(struct dso *dso)
-{
- return &RC_CHK_ACCESS(dso)->bid;
-}
-
-static inline const struct build_id *dso__bid_const(const struct dso *dso)
-{
- return &RC_CHK_ACCESS(dso)->bid;
-}
-
static inline struct dso_bpf_prog *dso__bpf_prog(struct dso *dso)
{
return &RC_CHK_ACCESS(dso)->bpf_prog;
}
-static inline bool dso__has_build_id(const struct dso *dso)
-{
- return RC_CHK_ACCESS(dso)->has_build_id;
-}
-
-static inline void dso__set_has_build_id(struct dso *dso)
-{
- RC_CHK_ACCESS(dso)->has_build_id = true;
-}
-
static inline bool dso__has_srcline(const struct dso *dso)
{
return RC_CHK_ACCESS(dso)->has_srcline;
@@ -462,6 +474,16 @@ static inline const struct dso_id *dso__id_const(const struct dso *dso)
return &RC_CHK_ACCESS(dso)->id;
}
+static inline const struct build_id *dso__bid(const struct dso *dso)
+{
+ return &dso__id_const(dso)->build_id;
+}
+
+static inline bool dso__has_build_id(const struct dso *dso)
+{
+ return build_id__is_defined(dso__bid(dso));
+}
+
static inline struct rb_root_cached *dso__inlined_nodes(struct dso *dso)
{
return &RC_CHK_ACCESS(dso)->inlined_nodes;
@@ -699,9 +721,6 @@ static inline void dso__set_text_offset(struct dso *dso, u64 val)
RC_CHK_ACCESS(dso)->text_offset = val;
}
-int dso_id__cmp(const struct dso_id *a, const struct dso_id *b);
-bool dso_id__empty(const struct dso_id *id);
-
struct dso *dso__new_id(const char *name, const struct dso_id *id);
struct dso *dso__new(const char *name);
void dso__delete(struct dso *dso);
@@ -709,7 +728,7 @@ void dso__delete(struct dso *dso);
int dso__cmp_id(struct dso *a, struct dso *b);
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
-void __dso__inject_id(struct dso *dso, const struct dso_id *id);
+void __dso__improve_id(struct dso *dso, const struct dso_id *id);
int dso__name_len(const struct dso *dso);
@@ -739,8 +758,8 @@ void dso__sort_by_name(struct dso *dso);
int dso__swap_init(struct dso *dso, unsigned char eidata);
-void dso__set_build_id(struct dso *dso, struct build_id *bid);
-bool dso__build_id_equal(const struct dso *dso, struct build_id *bid);
+void dso__set_build_id(struct dso *dso, const struct build_id *bid);
+bool dso__build_id_equal(const struct dso *dso, const struct build_id *bid);
void dso__read_running_kernel_build_id(struct dso *dso,
struct machine *machine);
int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir);
@@ -896,4 +915,17 @@ u64 dso__findnew_global_type(struct dso *dso, u64 addr, u64 offset);
bool perf_pid_map_tid(const char *dso_name, int *tid);
bool is_perf_pid_map_name(const char *dso_name);
+/*
+ * In the future, we may get debuginfo using build-ID (w/o path).
+ * Add this helper is for the smooth conversion.
+ */
+static inline struct debuginfo *dso__debuginfo(struct dso *dso)
+{
+ return debuginfo__new(dso__long_name(dso));
+}
+
+const u8 *dso__read_symbol(struct dso *dso, const char *symfs_filename,
+ const struct map *map, const struct symbol *sym,
+ u8 **out_buf, u64 *out_buf_len, bool *is_64bit);
+
#endif /* __PERF_DSO */
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index e0998e2a7c4e..0a7645c7fae7 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -72,6 +72,7 @@ static int dsos__read_build_ids_cb(struct dso *dso, void *data)
{
struct dsos__read_build_ids_cb_args *args = data;
struct nscookie nsc;
+ struct build_id bid = { .size = 0, };
if (args->with_hits && !dso__hit(dso) && !dso__is_vdso(dso))
return 0;
@@ -80,15 +81,15 @@ static int dsos__read_build_ids_cb(struct dso *dso, void *data)
return 0;
}
nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
- if (filename__read_build_id(dso__long_name(dso), dso__bid(dso)) > 0) {
+ if (filename__read_build_id(dso__long_name(dso), &bid) > 0) {
+ dso__set_build_id(dso, &bid);
args->have_build_id = true;
- dso__set_has_build_id(dso);
} else if (errno == ENOENT && dso__nsinfo(dso)) {
char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso));
- if (new_name && filename__read_build_id(new_name, dso__bid(dso)) > 0) {
+ if (new_name && filename__read_build_id(new_name, &bid) > 0) {
+ dso__set_build_id(dso, &bid);
args->have_build_id = true;
- dso__set_has_build_id(dso);
}
free(new_name);
}
@@ -157,6 +158,7 @@ static struct dso *__dsos__find_by_longname_id(struct dsos *dsos,
const char *name,
const struct dso_id *id,
bool write_locked)
+ SHARED_LOCKS_REQUIRED(dsos->lock)
{
struct dsos__key key = {
.long_name = name,
@@ -262,6 +264,7 @@ static int dsos__find_id_cb(struct dso *dso, void *data)
static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, const struct dso_id *id,
bool cmp_short, bool write_locked)
+ SHARED_LOCKS_REQUIRED(dsos->lock)
{
struct dso *res;
@@ -284,7 +287,7 @@ struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
struct dso *res;
down_read(&dsos->lock);
- res = __dsos__find_id(dsos, name, NULL, cmp_short, /*write_locked=*/false);
+ res = __dsos__find_id(dsos, name, &dso_id_empty, cmp_short, /*write_locked=*/false);
up_read(&dsos->lock);
return res;
}
@@ -338,11 +341,12 @@ static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, const
}
static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, const struct dso_id *id)
+ SHARED_LOCKS_REQUIRED(dsos->lock)
{
struct dso *dso = __dsos__find_id(dsos, name, id, false, /*write_locked=*/true);
- if (dso && dso_id__empty(dso__id(dso)) && !dso_id__empty(id))
- __dso__inject_id(dso, id);
+ if (dso)
+ __dso__improve_id(dso, id);
return dso ? dso : __dsos__addnew_id(dsos, name, id);
}
@@ -370,7 +374,7 @@ static int dsos__fprintf_buildid_cb(struct dso *dso, void *data)
if (args->skip && args->skip(dso, args->parm))
return 0;
- build_id__sprintf(dso__bid(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
args->ret += fprintf(args->fp, "%-40s %s\n", sbuild_id, dso__long_name(dso));
return 0;
}
@@ -433,7 +437,8 @@ struct dso *dsos__findnew_module_dso(struct dsos *dsos,
down_write(&dsos->lock);
- dso = __dsos__find_id(dsos, m->name, NULL, /*cmp_short=*/true, /*write_locked=*/true);
+ dso = __dsos__find_id(dsos, m->name, &dso_id_empty, /*cmp_short=*/true,
+ /*write_locked=*/true);
if (dso) {
up_write(&dsos->lock);
return dso;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 559c953ca172..9267af204c7d 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -1388,18 +1388,19 @@ struct find_var_data {
#define DWARF_OP_DIRECT_REGS 32
static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
- u64 addr_offset, u64 addr_type, bool is_pointer)
+ s64 addr_offset, s64 addr_type, bool is_pointer)
{
Dwarf_Die type_die;
Dwarf_Word size;
+ s64 offset = addr_offset - addr_type;
- if (addr_offset == addr_type) {
+ if (offset == 0) {
/* Update offset relative to the start of the variable */
data->offset = 0;
return true;
}
- if (addr_offset < addr_type)
+ if (offset < 0)
return false;
if (die_get_real_type(die_mem, &type_die) == NULL)
@@ -1414,14 +1415,42 @@ static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
if (dwarf_aggregate_size(&type_die, &size) < 0)
return false;
- if (addr_offset >= addr_type + size)
+ if ((u64)offset >= size)
return false;
/* Update offset relative to the start of the variable */
- data->offset = addr_offset - addr_type;
+ data->offset = offset;
return true;
}
+/**
+ * is_breg_access_indirect - Check if breg based access implies type
+ * dereference
+ * @ops: DWARF operations array
+ * @nops: Number of operations in @ops
+ *
+ * Returns true if the DWARF expression evaluates to the variable's
+ * value, so the memory access on that register needs type dereference.
+ * Returns false if the expression evaluates to the variable's address.
+ * This is called after check_allowed_ops.
+ */
+static bool is_breg_access_indirect(Dwarf_Op *ops, size_t nops)
+{
+ /* only the base register */
+ if (nops == 1)
+ return false;
+
+ if (nops == 2 && ops[1].atom == DW_OP_stack_value)
+ return true;
+
+ if (nops == 3 && (ops[1].atom == DW_OP_deref ||
+ ops[1].atom == DW_OP_deref_size) &&
+ ops[2].atom == DW_OP_stack_value)
+ return false;
+ /* unreachable, OP not supported */
+ return false;
+}
+
/* Only checks direct child DIEs in the given scope. */
static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
{
@@ -1450,7 +1479,7 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
if (data->is_fbreg && ops->atom == DW_OP_fbreg &&
check_allowed_ops(ops, nops) &&
match_var_offset(die_mem, data, data->offset, ops->number,
- /*is_pointer=*/false))
+ is_breg_access_indirect(ops, nops)))
return DIE_FIND_CB_END;
/* Only match with a simple case */
@@ -1462,11 +1491,11 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
/*is_pointer=*/true))
return DIE_FIND_CB_END;
- /* Local variables accessed by a register + offset */
+ /* variables accessed by a register + offset */
if (ops->atom == (DW_OP_breg0 + data->reg) &&
check_allowed_ops(ops, nops) &&
match_var_offset(die_mem, data, data->offset, ops->number,
- /*is_pointer=*/false))
+ is_breg_access_indirect(ops, nops)))
return DIE_FIND_CB_END;
} else {
/* pointer variables saved in a register 32 or above */
@@ -1476,11 +1505,11 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
/*is_pointer=*/true))
return DIE_FIND_CB_END;
- /* Local variables accessed by a register + offset */
+ /* variables accessed by a register + offset */
if (ops->atom == DW_OP_bregx && data->reg == ops->number &&
check_allowed_ops(ops, nops) &&
match_var_offset(die_mem, data, data->offset, ops->number2,
- /*is_poitner=*/false))
+ is_breg_access_indirect(ops, nops)))
return DIE_FIND_CB_END;
}
}
@@ -1598,13 +1627,22 @@ static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg)
if (!check_allowed_ops(ops, nops))
return DIE_FIND_CB_SIBLING;
- if (die_get_real_type(die_mem, &type_die) == NULL)
+ if (__die_get_real_type(die_mem, &type_die) == NULL)
return DIE_FIND_CB_SIBLING;
vt = malloc(sizeof(*vt));
if (vt == NULL)
return DIE_FIND_CB_END;
+ /* Usually a register holds the value of a variable */
+ vt->is_reg_var_addr = false;
+
+ if (((ops->atom >= DW_OP_breg0 && ops->atom <= DW_OP_breg31) ||
+ ops->atom == DW_OP_bregx || ops->atom == DW_OP_fbreg) &&
+ !is_breg_access_indirect(ops, nops))
+ /* The register contains an address of the variable. */
+ vt->is_reg_var_addr = true;
+
vt->die_off = dwarf_dieoffset(&type_die);
vt->addr = start;
vt->reg = reg_from_dwarf_op(ops);
@@ -1920,6 +1958,7 @@ struct find_scope_data {
static int __die_find_scope_cb(Dwarf_Die *die_mem, void *arg)
{
struct find_scope_data *data = arg;
+ int tag = dwarf_tag(die_mem);
if (dwarf_haspc(die_mem, data->pc)) {
Dwarf_Die *tmp;
@@ -1933,6 +1972,14 @@ static int __die_find_scope_cb(Dwarf_Die *die_mem, void *arg)
data->nr++;
return DIE_FIND_CB_CHILD;
}
+
+ /*
+ * If the DIE doesn't have the PC, we still need to check its children
+ * and siblings if it's a container like a namespace.
+ */
+ if (tag == DW_TAG_namespace)
+ return DIE_FIND_CB_CONTINUE;
+
return DIE_FIND_CB_SIBLING;
}
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 892c8c5c23fc..cd481ec9c5a1 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -148,6 +148,8 @@ struct die_var_type {
u64 addr;
int reg;
int offset;
+ /* Whether the register holds a address to the type */
+ bool is_reg_var_addr;
};
/* Return type info of a member at offset */
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 36411749e007..f1626d2032cd 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -3,8 +3,10 @@
#include "debug.h"
#include "env.h"
#include "util/header.h"
-#include "linux/compiler.h"
+#include "util/rwsem.h"
+#include <linux/compiler.h>
#include <linux/ctype.h>
+#include <linux/rbtree.h>
#include <linux/string.h>
#include <linux/zalloc.h>
#include "cgroup.h"
@@ -17,8 +19,6 @@
#include "strbuf.h"
#include "trace/beauty/beauty.h"
-struct perf_env perf_env;
-
#ifdef HAVE_LIBBPF_SUPPORT
#include "bpf-event.h"
#include "bpf-utils.h"
@@ -89,6 +89,20 @@ out:
return node;
}
+void perf_env__iterate_bpf_prog_info(struct perf_env *env,
+ void (*cb)(struct bpf_prog_info_node *node,
+ void *data),
+ void *data)
+{
+ struct rb_node *first;
+
+ down_read(&env->bpf_progs.lock);
+ first = rb_first(&env->bpf_progs.infos);
+ for (struct rb_node *node = first; node != NULL; node = rb_next(node))
+ (*cb)(rb_entry(node, struct bpf_prog_info_node, rb_node), data);
+ up_read(&env->bpf_progs.lock);
+}
+
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{
bool ret;
@@ -174,6 +188,7 @@ static void perf_env__purge_bpf(struct perf_env *env)
next = rb_next(&node->rb_node);
rb_erase(&node->rb_node, root);
zfree(&node->info_linear);
+ bpf_metadata_free(node->metadata);
free(node);
}
@@ -254,6 +269,7 @@ void perf_env__exit(struct perf_env *env)
void perf_env__init(struct perf_env *env)
{
+ memset(env, 0, sizeof(*env));
#ifdef HAVE_LIBBPF_SUPPORT
env->bpf_progs.infos = RB_ROOT;
env->bpf_progs.btfs = RB_ROOT;
@@ -416,6 +432,116 @@ static int perf_env__read_nr_cpus_avail(struct perf_env *env)
return env->nr_cpus_avail ? 0 : -ENOENT;
}
+static int __perf_env__read_core_pmu_caps(const struct perf_pmu *pmu,
+ int *nr_caps, char ***caps,
+ unsigned int *max_branches,
+ unsigned int *br_cntr_nr,
+ unsigned int *br_cntr_width)
+{
+ struct perf_pmu_caps *pcaps = NULL;
+ char *ptr, **tmp;
+ int ret = 0;
+
+ *nr_caps = 0;
+ *caps = NULL;
+
+ if (!pmu->nr_caps)
+ return 0;
+
+ *caps = calloc(pmu->nr_caps, sizeof(char *));
+ if (!*caps)
+ return -ENOMEM;
+
+ tmp = *caps;
+ list_for_each_entry(pcaps, &pmu->caps, list) {
+ if (asprintf(&ptr, "%s=%s", pcaps->name, pcaps->value) < 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ *tmp++ = ptr;
+
+ if (!strcmp(pcaps->name, "branches"))
+ *max_branches = atoi(pcaps->value);
+ else if (!strcmp(pcaps->name, "branch_counter_nr"))
+ *br_cntr_nr = atoi(pcaps->value);
+ else if (!strcmp(pcaps->name, "branch_counter_width"))
+ *br_cntr_width = atoi(pcaps->value);
+ }
+ *nr_caps = pmu->nr_caps;
+ return 0;
+error:
+ while (tmp-- != *caps)
+ zfree(tmp);
+ zfree(caps);
+ *nr_caps = 0;
+ return ret;
+}
+
+int perf_env__read_core_pmu_caps(struct perf_env *env)
+{
+ struct pmu_caps *pmu_caps;
+ struct perf_pmu *pmu = NULL;
+ int nr_pmu, i = 0, j;
+ int ret;
+
+ nr_pmu = perf_pmus__num_core_pmus();
+
+ if (!nr_pmu)
+ return -ENODEV;
+
+ if (nr_pmu == 1) {
+ pmu = perf_pmus__find_core_pmu();
+ if (!pmu)
+ return -ENODEV;
+ ret = perf_pmu__caps_parse(pmu);
+ if (ret < 0)
+ return ret;
+ return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps,
+ &env->cpu_pmu_caps,
+ &env->max_branches,
+ &env->br_cntr_nr,
+ &env->br_cntr_width);
+ }
+
+ pmu_caps = calloc(nr_pmu, sizeof(*pmu_caps));
+ if (!pmu_caps)
+ return -ENOMEM;
+
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ if (perf_pmu__caps_parse(pmu) <= 0)
+ continue;
+ ret = __perf_env__read_core_pmu_caps(pmu, &pmu_caps[i].nr_caps,
+ &pmu_caps[i].caps,
+ &pmu_caps[i].max_branches,
+ &pmu_caps[i].br_cntr_nr,
+ &pmu_caps[i].br_cntr_width);
+ if (ret)
+ goto error;
+
+ pmu_caps[i].pmu_name = strdup(pmu->name);
+ if (!pmu_caps[i].pmu_name) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ i++;
+ }
+
+ env->nr_pmus_with_caps = nr_pmu;
+ env->pmu_caps = pmu_caps;
+
+ return 0;
+error:
+ for (i = 0; i < nr_pmu; i++) {
+ for (j = 0; j < pmu_caps[i].nr_caps; j++)
+ zfree(&pmu_caps[i].caps[j]);
+ zfree(&pmu_caps[i].caps);
+ zfree(&pmu_caps[i].pmu_name);
+ }
+ zfree(&pmu_caps);
+ return ret;
+}
+
const char *perf_env__raw_arch(struct perf_env *env)
{
return env && !perf_env__read_arch(env) ? env->arch : "unknown";
@@ -676,3 +802,25 @@ bool x86__is_amd_cpu(void)
return is_amd;
}
+
+bool perf_env__is_x86_intel_cpu(struct perf_env *env)
+{
+ static int is_intel; /* 0: Uninitialized, 1: Yes, -1: No */
+
+ if (is_intel == 0)
+ is_intel = env->cpuid && strstarts(env->cpuid, "GenuineIntel") ? 1 : -1;
+
+ return is_intel >= 1 ? true : false;
+}
+
+bool x86__is_intel_cpu(void)
+{
+ struct perf_env env = { .total_mem = 0, };
+ bool is_intel;
+
+ perf_env__cpuid(&env);
+ is_intel = perf_env__is_x86_intel_cpu(&env);
+ perf_env__exit(&env);
+
+ return is_intel;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d90e343cf1fa..9977b85523a8 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -150,8 +150,7 @@ enum perf_compress_type {
struct bpf_prog_info_node;
struct btf_node;
-extern struct perf_env perf_env;
-
+int perf_env__read_core_pmu_caps(struct perf_env *env);
void perf_env__exit(struct perf_env *env);
int perf_env__kernel_is_64_bit(struct perf_env *env);
@@ -174,16 +173,22 @@ const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
void perf_env__init(struct perf_env *env);
+#ifdef HAVE_LIBBPF_SUPPORT
bool __perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
bool perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
+void perf_env__iterate_bpf_prog_info(struct perf_env *env,
+ void (*cb)(struct bpf_prog_info_node *node,
+ void *data),
+ void *data);
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+#endif // HAVE_LIBBPF_SUPPORT
int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
@@ -196,5 +201,7 @@ void perf_env__find_br_cntr_info(struct perf_env *env,
bool x86__is_amd_cpu(void);
bool perf_env__is_x86_amd_cpu(struct perf_env *env);
+bool x86__is_intel_cpu(void);
+bool perf_env__is_x86_intel_cpu(struct perf_env *env);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index c23b77f8f854..4c92cc1a952c 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,9 +1,12 @@
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <perf/cpumap.h>
+#include <perf/event.h>
+#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -58,6 +61,7 @@ static const char *perf_event__names[] = {
[PERF_RECORD_CGROUP] = "CGROUP",
[PERF_RECORD_TEXT_POKE] = "TEXT_POKE",
[PERF_RECORD_AUX_OUTPUT_HW_ID] = "AUX_OUTPUT_HW_ID",
+ [PERF_RECORD_CALLCHAIN_DEFERRED] = "CALLCHAIN_DEFERRED",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
@@ -77,6 +81,8 @@ static const char *perf_event__names[] = {
[PERF_RECORD_HEADER_FEATURE] = "FEATURE",
[PERF_RECORD_COMPRESSED] = "COMPRESSED",
[PERF_RECORD_FINISHED_INIT] = "FINISHED_INIT",
+ [PERF_RECORD_COMPRESSED2] = "COMPRESSED2",
+ [PERF_RECORD_BPF_METADATA] = "BPF_METADATA",
};
const char *perf_event__name(unsigned int id)
@@ -329,7 +335,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
build_id__init(&bid, event->mmap2.build_id,
event->mmap2.build_id_size);
- build_id__sprintf(&bid, sbuild_id);
+ build_id__snprintf(&bid, sbuild_id, sizeof(sbuild_id));
return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
" <%s>]: %c%c%c%c %s\n",
@@ -448,12 +454,13 @@ int perf_event__exit_del_thread(const struct perf_tool *tool __maybe_unused,
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
- return fprintf(fp, " offset: %#"PRI_lx64" size: %#"PRI_lx64" flags: %#"PRI_lx64" [%s%s%s]\n",
+ return fprintf(fp, " offset: %#"PRI_lx64" size: %#"PRI_lx64" flags: %#"PRI_lx64" [%s%s%s%s]\n",
event->aux.aux_offset, event->aux.aux_size,
event->aux.flags,
event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
- event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : "");
+ event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : "",
+ event->aux.flags & PERF_AUX_FLAG_COLLISION ? "C" : "");
}
size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
@@ -503,6 +510,20 @@ size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp)
event->bpf.type, event->bpf.flags, event->bpf.id);
}
+size_t perf_event__fprintf_bpf_metadata(union perf_event *event, FILE *fp)
+{
+ struct perf_record_bpf_metadata *metadata = &event->bpf_metadata;
+ size_t ret;
+
+ ret = fprintf(fp, " prog %s\n", metadata->prog_name);
+ for (__u32 i = 0; i < metadata->nr_entries; i++) {
+ ret += fprintf(fp, " entry %d: %20s = %s\n", i,
+ metadata->entries[i].key,
+ metadata->entries[i].value);
+ }
+ return ret;
+}
+
static int text_poke_printer(enum binary_printer_ops op, unsigned int val,
void *extra, FILE *fp)
{
@@ -600,6 +621,9 @@ size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FIL
case PERF_RECORD_AUX_OUTPUT_HW_ID:
ret += perf_event__fprintf_aux_output_hw_id(event, fp);
break;
+ case PERF_RECORD_BPF_METADATA:
+ ret += perf_event__fprintf_bpf_metadata(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 664bf39567ce..64c63b59d617 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -117,6 +117,7 @@ enum perf_synth_id {
PERF_SYNTH_INTEL_PSB,
PERF_SYNTH_INTEL_EVT,
PERF_SYNTH_INTEL_IFLAG_CHG,
+ PERF_SYNTH_POWERPC_VPA_DTL,
};
/*
@@ -254,6 +255,25 @@ struct perf_synth_intel_iflag_chg {
u64 branch_ip; /* If via_branch */
};
+/*
+ * The powerpc VPA DTL entries are of below format
+ */
+struct powerpc_vpadtl_entry {
+ u8 dispatch_reason;
+ u8 preempt_reason;
+ u16 processor_id;
+ u32 enqueue_to_dispatch_time;
+ u32 ready_to_enqueue_time;
+ u32 waiting_to_ready_time;
+ u64 timebase;
+ u64 fault_addr;
+ u64 srr0;
+ u64 srr1;
+};
+
+extern const char *dispatch_reasons[11];
+extern const char *preempt_reasons[10];
+
static inline void *perf_synth__raw_data(void *p)
{
return p + 4;
@@ -370,6 +390,7 @@ size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_bpf_metadata(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *machine,FILE *fp);
size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp);
@@ -390,11 +411,6 @@ extern unsigned int proc_map_timeout;
#define PAGE_SIZE_NAME_LEN 32
char *get_page_size_name(u64 size, char *str);
-void arch_perf_parse_sample_weight(struct perf_sample *data, const __u64 *array, u64 type);
-void arch_perf_synthesize_sample_weight(const struct perf_sample *data, __u64 *array, u64 type);
-const char *arch_perf_header_entry(const char *se_header);
-int arch_support_sort_key(const char *sort_key);
-
static inline bool perf_event_header__cpumode_is_guest(u8 cpumode)
{
return cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c1a04141aed0..03674d2cbd01 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -35,6 +35,8 @@
#include "util/util.h"
#include "util/env.h"
#include "util/intel-tpebs.h"
+#include "util/metricgroup.h"
+#include "util/strbuf.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
@@ -82,6 +84,8 @@ void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
evlist->ctl_fd.ack = -1;
evlist->ctl_fd.pos = -1;
evlist->nr_br_cntr = -1;
+ metricgroup__rblist_init(&evlist->metric_events);
+ INIT_LIST_HEAD(&evlist->deferred_samples);
}
struct evlist *evlist__new(void)
@@ -98,16 +102,24 @@ struct evlist *evlist__new_default(void)
{
struct evlist *evlist = evlist__new();
bool can_profile_kernel;
- int err;
+ struct perf_pmu *pmu = NULL;
if (!evlist)
return NULL;
can_profile_kernel = perf_event_paranoid_check(1);
- err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
- if (err) {
- evlist__delete(evlist);
- return NULL;
+
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ char buf[256];
+ int err;
+
+ snprintf(buf, sizeof(buf), "%s/cycles/%s", pmu->name,
+ can_profile_kernel ? "P" : "Pu");
+ err = parse_event(evlist, buf);
+ if (err) {
+ evlist__delete(evlist);
+ return NULL;
+ }
}
if (evlist->core.nr_entries > 1) {
@@ -172,6 +184,7 @@ static void evlist__purge(struct evlist *evlist)
void evlist__exit(struct evlist *evlist)
{
+ metricgroup__rblist_exit(&evlist->metric_events);
event_enable_timer__exit(&evlist->eet);
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
@@ -183,7 +196,6 @@ void evlist__delete(struct evlist *evlist)
if (evlist == NULL)
return;
- tpebs_delete();
evlist__free_stats(evlist);
evlist__munmap(evlist);
evlist__close(evlist);
@@ -1006,8 +1018,7 @@ int evlist__create_maps(struct evlist *evlist, struct target *target)
* per-thread data. thread_map__new_str will call
* thread_map__new_all_cpus to enumerate all threads.
*/
- threads = thread_map__new_str(target->pid, target->tid, target->uid,
- all_threads);
+ threads = thread_map__new_str(target->pid, target->tid, all_threads);
if (!threads)
return -1;
@@ -2468,23 +2479,36 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
return NULL;
}
-int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
+void evlist__format_evsels(struct evlist *evlist, struct strbuf *sb, size_t max_length)
{
- struct evsel *evsel;
- int printed = 0;
+ struct evsel *evsel, *leader = NULL;
+ bool first = true;
evlist__for_each_entry(evlist, evsel) {
+ struct evsel *new_leader = evsel__leader(evsel);
+
if (evsel__is_dummy_event(evsel))
continue;
- if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
- } else {
- printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
- break;
+
+ if (leader != new_leader && leader && leader->core.nr_members > 1)
+ strbuf_addch(sb, '}');
+
+ if (!first)
+ strbuf_addch(sb, ',');
+
+ if (sb->len > max_length) {
+ strbuf_addstr(sb, "...");
+ return;
}
- }
+ if (leader != new_leader && new_leader->core.nr_members > 1)
+ strbuf_addch(sb, '{');
- return printed;
+ strbuf_addstr(sb, evsel__name(evsel));
+ first = false;
+ leader = new_leader;
+ }
+ if (leader && leader->core.nr_members > 1)
+ strbuf_addch(sb, '}');
}
void evlist__check_mem_load_aux(struct evlist *evlist)
@@ -2534,52 +2558,61 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis
return;
evlist__for_each_entry(evlist, pos) {
- struct perf_cpu_map *intersect, *to_test, *online = cpu_map__online();
- const struct perf_pmu *pmu = evsel__find_pmu(pos);
+ evsel__warn_user_requested_cpus(pos, user_requested_cpus);
+ }
+ perf_cpu_map__put(user_requested_cpus);
+}
- to_test = pmu && pmu->is_core ? pmu->cpus : online;
- intersect = perf_cpu_map__intersect(to_test, user_requested_cpus);
- if (!perf_cpu_map__equal(intersect, user_requested_cpus)) {
- char buf[128];
+/* Should uniquify be disabled for the evlist? */
+static bool evlist__disable_uniquify(const struct evlist *evlist)
+{
+ struct evsel *counter;
+ struct perf_pmu *last_pmu = NULL;
+ bool first = true;
- cpu_map__snprint(to_test, buf, sizeof(buf));
- pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n",
- cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos));
+ evlist__for_each_entry(evlist, counter) {
+ /* If PMUs vary then uniquify can be useful. */
+ if (!first && counter->pmu != last_pmu)
+ return false;
+ first = false;
+ if (counter->pmu) {
+ /* Allow uniquify for uncore PMUs. */
+ if (!counter->pmu->is_core)
+ return false;
+ /* Keep hybrid event names uniquified for clarity. */
+ if (perf_pmus__num_core_pmus() > 1)
+ return false;
}
- perf_cpu_map__put(intersect);
- perf_cpu_map__put(online);
+ last_pmu = counter->pmu;
}
- perf_cpu_map__put(user_requested_cpus);
+ return true;
}
-void evlist__uniquify_name(struct evlist *evlist)
+static bool evlist__set_needs_uniquify(struct evlist *evlist, const struct perf_stat_config *config)
{
- char *new_name, empty_attributes[2] = ":", *attributes;
- struct evsel *pos;
-
- if (perf_pmus__num_core_pmus() == 1)
- return;
+ struct evsel *counter;
+ bool needs_uniquify = false;
- evlist__for_each_entry(evlist, pos) {
- if (!evsel__is_hybrid(pos))
- continue;
+ if (evlist__disable_uniquify(evlist)) {
+ evlist__for_each_entry(evlist, counter)
+ counter->uniquified_name = true;
+ return false;
+ }
- if (strchr(pos->name, '/'))
- continue;
+ evlist__for_each_entry(evlist, counter) {
+ if (evsel__set_needs_uniquify(counter, config))
+ needs_uniquify = true;
+ }
+ return needs_uniquify;
+}
- attributes = strchr(pos->name, ':');
- if (attributes)
- *attributes = '\0';
- else
- attributes = empty_attributes;
+void evlist__uniquify_evsel_names(struct evlist *evlist, const struct perf_stat_config *config)
+{
+ if (evlist__set_needs_uniquify(evlist, config)) {
+ struct evsel *pos;
- if (asprintf(&new_name, "%s/%s/%s", pos->pmu ? pos->pmu->name : "",
- pos->name, attributes + 1)) {
- free(pos->name);
- pos->name = new_name;
- } else {
- *attributes = ':';
- }
+ evlist__for_each_entry(evlist, pos)
+ evsel__uniquify_counter(pos);
}
}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index edcbf1c10e92..911834ae7c2a 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -12,6 +12,7 @@
#include <perf/evlist.h>
#include "events_stats.h"
#include "evsel.h"
+#include "rblist.h"
#include <pthread.h>
#include <signal.h>
#include <unistd.h>
@@ -19,7 +20,9 @@
struct pollfd;
struct thread_map;
struct perf_cpu_map;
+struct perf_stat_config;
struct record_opts;
+struct strbuf;
struct target;
/*
@@ -68,7 +71,7 @@ struct evlist {
struct mmap *overwrite_mmap;
struct evsel *selected;
struct events_stats stats;
- struct perf_env *env;
+ struct perf_session *session;
void (*trace_event_sample_raw)(struct evlist *evlist,
union perf_event *event,
struct perf_sample *sample);
@@ -84,6 +87,13 @@ struct evlist {
int pos; /* index at evlist core object to check signals */
} ctl_fd;
struct event_enable_timer *eet;
+ /**
+ * @metric_events: A list of struct metric_event which each have a list
+ * of struct metric_expr.
+ */
+ struct rblist metric_events;
+ /* samples with deferred_callchain would wait here. */
+ struct list_head deferred_samples;
};
struct evsel_str_handler {
@@ -103,6 +113,7 @@ void evlist__add(struct evlist *evlist, struct evsel *entry);
void evlist__remove(struct evlist *evlist, struct evsel *evsel);
int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs);
+int arch_evlist__add_required_events(struct list_head *list);
int evlist__add_dummy(struct evlist *evlist);
struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide);
@@ -430,10 +441,10 @@ int event_enable_timer__process(struct event_enable_timer *eet);
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
-int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
+void evlist__format_evsels(struct evlist *evlist, struct strbuf *sb, size_t max_length);
void evlist__check_mem_load_aux(struct evlist *evlist);
void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
-void evlist__uniquify_name(struct evlist *evlist);
+void evlist__uniquify_evsel_names(struct evlist *evlist, const struct perf_stat_config *config);
bool evlist__has_bpf_output(struct evlist *evlist);
bool evlist__needs_bpf_sb_event(struct evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 3c030da2e477..9cd706f62793 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -48,6 +48,7 @@
#include "record.h"
#include "debug.h"
#include "trace-event.h"
+#include "session.h"
#include "stat.h"
#include "string2.h"
#include "memswap.h"
@@ -56,8 +57,10 @@
#include "off_cpu.h"
#include "pmu.h"
#include "pmus.h"
+#include "drm_pmu.h"
#include "hwmon_pmu.h"
#include "tool_pmu.h"
+#include "tp_pmu.h"
#include "rlimit.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
@@ -399,11 +402,11 @@ void evsel__init(struct evsel *evsel,
evsel->sample_size = __evsel__sample_size(attr->sample_type);
evsel__calc_id_pos(evsel);
evsel->cmdline_group_boundary = false;
- evsel->metric_events = NULL;
evsel->per_pkg_mask = NULL;
evsel->collect_stat = false;
evsel->group_pmu_name = NULL;
evsel->skippable = false;
+ evsel->supported = true;
evsel->alternate_hw_config = PERF_COUNT_HW_MAX;
evsel->script_output_type = -1; // FIXME: OUTPUT_TYPE_UNSET, see builtin-script.c
}
@@ -487,7 +490,7 @@ struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig)
return NULL;
evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
- evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
+ evsel->core.pmu_cpus = perf_cpu_map__get(orig->core.pmu_cpus);
evsel->core.threads = perf_thread_map__get(orig->core.threads);
evsel->core.nr_members = orig->core.nr_members;
evsel->core.system_wide = orig->core.system_wide;
@@ -535,6 +538,7 @@ struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig)
#endif
evsel->handler = orig->handler;
evsel->core.leader = orig->core.leader;
+ evsel->metric_leader = orig->metric_leader;
evsel->max_events = orig->max_events;
zfree(&evsel->unit);
@@ -552,11 +556,11 @@ struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig)
evsel->exclude_GH = orig->exclude_GH;
evsel->sample_read = orig->sample_read;
- evsel->auto_merge_stats = orig->auto_merge_stats;
evsel->collect_stat = orig->collect_stat;
evsel->weak_group = orig->weak_group;
evsel->use_config_name = orig->use_config_name;
evsel->pmu = orig->pmu;
+ evsel->first_wildcard_match = orig->first_wildcard_match;
if (evsel__copy_config_terms(evsel, orig) < 0)
goto out_err;
@@ -570,24 +574,6 @@ out_err:
return NULL;
}
-static int trace_event__id(const char *sys, const char *name)
-{
- char *tp_dir = get_events_file(sys);
- char path[PATH_MAX];
- int id, err;
-
- if (!tp_dir)
- return -1;
-
- scnprintf(path, PATH_MAX, "%s/%s/id", tp_dir, name);
- put_events_file(tp_dir);
- err = filename__read_int(path, &id);
- if (err)
- return err;
-
- return id;
-}
-
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
@@ -621,7 +607,7 @@ struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool
event_attr_init(&attr);
if (format) {
- id = trace_event__id(sys, name);
+ id = tp_pmu__id(sys, name);
if (id < 0) {
err = id;
goto out_free;
@@ -1080,6 +1066,9 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
pr_info("Disabling user space callchains for function trace event.\n");
attr->exclude_callchain_user = 1;
}
+
+ if (param->defer && !attr->exclude_callchain_user)
+ attr->defer_callchain = 1;
}
void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
@@ -1106,6 +1095,71 @@ static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *
}
}
+static void evsel__apply_ratio_to_prev(struct evsel *evsel,
+ struct perf_event_attr *attr,
+ struct record_opts *opts,
+ const char *buf)
+{
+ struct perf_event_attr *prev_attr = NULL;
+ struct evsel *evsel_prev = NULL;
+ u64 type = evsel->core.attr.sample_type;
+ u64 prev_type = 0;
+ double rtp;
+
+ rtp = strtod(buf, NULL);
+ if (rtp <= 0) {
+ pr_err("Invalid ratio-to-prev value %lf\n", rtp);
+ return;
+ }
+ if (evsel == evsel__leader(evsel)) {
+ pr_err("Invalid use of ratio-to-prev term without preceding element in group\n");
+ return;
+ }
+ if (!evsel->pmu->is_core) {
+ pr_err("Event using ratio-to-prev term must have a core PMU\n");
+ return;
+ }
+
+ evsel_prev = evsel__prev(evsel);
+ if (!evsel_prev) {
+ pr_err("Previous event does not exist.\n");
+ return;
+ }
+
+ if (evsel_prev->pmu->type != evsel->pmu->type) {
+ pr_err("Compared events (\"%s\", \"%s\") must have same PMU\n",
+ evsel->name, evsel_prev->name);
+ return;
+ }
+
+ prev_attr = &evsel_prev->core.attr;
+ prev_type = evsel_prev->core.attr.sample_type;
+
+ if (!(prev_type & PERF_SAMPLE_PERIOD)) {
+ attr->sample_period = prev_attr->sample_period * rtp;
+ attr->freq = 0;
+ evsel__reset_sample_bit(evsel, PERIOD);
+ } else if (!(type & PERF_SAMPLE_PERIOD)) {
+ prev_attr->sample_period = attr->sample_period / rtp;
+ prev_attr->freq = 0;
+ evsel__reset_sample_bit(evsel_prev, PERIOD);
+ } else {
+ if (opts->user_interval != ULLONG_MAX) {
+ prev_attr->sample_period = opts->user_interval;
+ attr->sample_period = prev_attr->sample_period * rtp;
+ prev_attr->freq = 0;
+ attr->freq = 0;
+ evsel__reset_sample_bit(evsel_prev, PERIOD);
+ evsel__reset_sample_bit(evsel, PERIOD);
+ } else {
+ pr_err("Event period term or count (-c) must be set when using ratio-to-prev term.\n");
+ return;
+ }
+ }
+
+ arch_evsel__apply_ratio_to_prev(evsel, attr);
+}
+
static void evsel__apply_config_terms(struct evsel *evsel,
struct record_opts *opts, bool track)
{
@@ -1119,6 +1173,7 @@ static void evsel__apply_config_terms(struct evsel *evsel,
u32 dump_size = 0;
int max_stack = 0;
const char *callgraph_buf = NULL;
+ const char *rtp_buf = NULL;
list_for_each_entry(term, config_terms, list) {
switch (term->type) {
@@ -1189,6 +1244,9 @@ static void evsel__apply_config_terms(struct evsel *evsel,
break;
case EVSEL__CONFIG_TERM_CFG_CHG:
break;
+ case EVSEL__CONFIG_TERM_RATIO_TO_PREV:
+ rtp_buf = term->val.str;
+ break;
default:
break;
}
@@ -1240,6 +1298,8 @@ static void evsel__apply_config_terms(struct evsel *evsel,
evsel__config_callchain(evsel, opts, &param);
}
}
+ if (rtp_buf)
+ evsel__apply_ratio_to_prev(evsel, attr, opts, rtp_buf);
}
struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
@@ -1264,6 +1324,11 @@ void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
{
}
+void __weak arch_evsel__apply_ratio_to_prev(struct evsel *evsel __maybe_unused,
+ struct perf_event_attr *attr __maybe_unused)
+{
+}
+
static void evsel__set_default_freq_period(struct record_opts *opts,
struct perf_event_attr *attr)
{
@@ -1275,9 +1340,10 @@ static void evsel__set_default_freq_period(struct record_opts *opts,
}
}
-static bool evsel__is_offcpu_event(struct evsel *evsel)
+bool evsel__is_offcpu_event(struct evsel *evsel)
{
- return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT);
+ return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT) &&
+ evsel->core.attr.sample_type & PERF_SAMPLE_RAW;
}
/*
@@ -1425,7 +1491,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
evsel__set_sample_bit(evsel, CPU);
}
- if (opts->sample_address)
+ if (opts->sample_data_src)
evsel__set_sample_bit(evsel, DATA_SRC);
if (opts->sample_phys_addr)
@@ -1440,14 +1506,16 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
attr->branch_sample_type = opts->branch_stack;
}
- if (opts->sample_weight)
+ if (opts->sample_weight || evsel->retire_lat) {
arch_evsel__set_sample_weight(evsel);
-
+ evsel->retire_lat = false;
+ }
attr->task = track;
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
attr->build_id = track && opts->build_id;
+ attr->defer_output = track && callchain && callchain->defer;
/*
* ksymbol is tracked separately with text poke because it needs to be
@@ -1524,7 +1592,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
attr->exclude_user = 1;
}
- if (evsel->core.own_cpus || evsel->unit)
+ if (evsel->core.pmu_cpus || evsel->unit)
evsel->core.attr.read_format |= PERF_FORMAT_ID;
/*
@@ -1554,8 +1622,10 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
if (evsel__is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK);
- if (evsel__is_offcpu_event(evsel))
+ if (evsel__is_offcpu_event(evsel)) {
evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
+ attr->inherit = 0;
+ }
arch__post_evsel_config(evsel, attr);
}
@@ -1652,10 +1722,21 @@ static void evsel__free_config_terms(struct evsel *evsel)
free_config_terms(&evsel->config_terms);
}
+static void (*evsel__priv_destructor)(void *priv);
+
+void evsel__set_priv_destructor(void (*destructor)(void *priv))
+{
+ assert(evsel__priv_destructor == NULL);
+
+ evsel__priv_destructor = destructor;
+}
+
void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
assert(evsel->evlist == NULL);
+ if (evsel__is_retire_lat(evsel))
+ evsel__tpebs_close(evsel);
bpf_counter__destroy(evsel);
perf_bpf_filter__destroy(evsel);
evsel__free_counts(evsel);
@@ -1663,9 +1744,7 @@ void evsel__exit(struct evsel *evsel)
perf_evsel__free_id(&evsel->core);
evsel__free_config_terms(evsel);
cgroup__put(evsel->cgrp);
- perf_cpu_map__put(evsel->core.cpus);
- perf_cpu_map__put(evsel->core.own_cpus);
- perf_thread_map__put(evsel->core.threads);
+ perf_evsel__exit(&evsel->core);
zfree(&evsel->group_name);
zfree(&evsel->name);
#ifdef HAVE_LIBTRACEEVENT
@@ -1679,7 +1758,8 @@ void evsel__exit(struct evsel *evsel)
evsel__zero_per_pkg(evsel);
hashmap__free(evsel->per_pkg_mask);
evsel->per_pkg_mask = NULL;
- zfree(&evsel->metric_events);
+ if (evsel__priv_destructor)
+ evsel__priv_destructor(evsel->priv);
perf_evsel__object.fini(evsel);
if (evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME ||
evsel__tool_event(evsel) == TOOL_PMU__EVENT_USER_TIME)
@@ -1718,11 +1798,6 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
}
-static int evsel__read_retire_lat(struct evsel *evsel, int cpu_map_idx, int thread)
-{
- return tpebs_set_evsel(evsel, cpu_map_idx, thread);
-}
-
static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
u64 val, u64 ena, u64 run, u64 lost)
{
@@ -1730,8 +1805,8 @@ static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
count = perf_counts(counter->counts, cpu_map_idx, thread);
- if (counter->retire_lat) {
- evsel__read_retire_lat(counter, cpu_map_idx, thread);
+ if (evsel__is_retire_lat(counter)) {
+ evsel__tpebs_read(counter, cpu_map_idx, thread);
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
return;
}
@@ -1868,16 +1943,19 @@ bool __evsel__match(const struct evsel *evsel, u32 type, u64 config)
u32 e_type = evsel->core.attr.type;
u64 e_config = evsel->core.attr.config;
- if (e_type != type) {
- return type == PERF_TYPE_HARDWARE && evsel->pmu && evsel->pmu->is_core &&
- evsel->alternate_hw_config == config;
- }
-
- if ((type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) &&
- perf_pmus__supports_extended_type())
+ if (e_type == type && e_config == config)
+ return true;
+ if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
+ return false;
+ if ((e_type == PERF_TYPE_HARDWARE || e_type == PERF_TYPE_HW_CACHE) &&
+ perf_pmus__supports_extended_type())
e_config &= PERF_HW_EVENT_MASK;
-
- return e_config == config;
+ if (e_type == type && e_config == config)
+ return true;
+ if (type == PERF_TYPE_HARDWARE && evsel->pmu && evsel->pmu->is_core &&
+ evsel->alternate_hw_config == config)
+ return true;
+ return false;
}
int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
@@ -1888,8 +1966,11 @@ int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
if (evsel__is_hwmon(evsel))
return evsel__hwmon_pmu_read(evsel, cpu_map_idx, thread);
+ if (evsel__is_drm(evsel))
+ return evsel__drm_pmu_read(evsel, cpu_map_idx, thread);
+
if (evsel__is_retire_lat(evsel))
- return evsel__read_retire_lat(evsel, cpu_map_idx, thread);
+ return evsel__tpebs_read(evsel, cpu_map_idx, thread);
if (evsel->core.attr.read_format & PERF_FORMAT_GROUP)
return evsel__read_group(evsel, cpu_map_idx, thread);
@@ -1943,7 +2024,7 @@ static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
struct evsel *leader = evsel__leader(evsel);
int fd;
- if (evsel__is_group_leader(evsel))
+ if (!evsel->supported || evsel__is_group_leader(evsel))
return -1;
/*
@@ -1957,7 +2038,7 @@ static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
return -1;
fd = FD(leader, cpu_map_idx, thread);
- BUG_ON(fd == -1 && !leader->skippable);
+ BUG_ON(fd == -1 && leader->supported);
/*
* When the leader has been skipped, return -2 to distinguish from no
@@ -2123,6 +2204,10 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
static void evsel__disable_missing_features(struct evsel *evsel)
{
+ if (perf_missing_features.defer_callchain && evsel->core.attr.defer_callchain)
+ evsel->core.attr.defer_callchain = 0;
+ if (perf_missing_features.defer_callchain && evsel->core.attr.defer_output)
+ evsel->core.attr.defer_output = 0;
if (perf_missing_features.inherit_sample_read && evsel->core.attr.inherit &&
(evsel->core.attr.sample_type & PERF_SAMPLE_READ))
evsel->core.attr.inherit = 0;
@@ -2397,8 +2482,15 @@ static bool evsel__detect_missing_features(struct evsel *evsel, struct perf_cpu
/* Please add new feature detection here. */
+ attr.defer_callchain = true;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.defer_callchain = true;
+ pr_debug2("switching off deferred callchain support\n");
+ attr.defer_callchain = false;
+
attr.inherit = true;
- attr.sample_type = PERF_SAMPLE_READ;
+ attr.sample_type = PERF_SAMPLE_READ | PERF_SAMPLE_TID;
if (has_attr_feature(&attr, /*flags=*/0))
goto found;
perf_missing_features.inherit_sample_read = true;
@@ -2508,6 +2600,10 @@ found:
errno = old_errno;
check:
+ if ((evsel->core.attr.defer_callchain || evsel->core.attr.defer_output) &&
+ perf_missing_features.defer_callchain)
+ return true;
+
if (evsel->core.attr.inherit &&
(evsel->core.attr.sample_type & PERF_SAMPLE_READ) &&
perf_missing_features.inherit_sample_read)
@@ -2575,12 +2671,14 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
enum rlimit_action set_rlimit = NO_CHANGE;
struct perf_cpu cpu;
- if (evsel__is_retire_lat(evsel))
- return tpebs_start(evsel->evlist);
+ if (evsel__is_retire_lat(evsel)) {
+ err = evsel__tpebs_open(evsel);
+ goto out;
+ }
err = __evsel__prepare_open(evsel, cpus, threads);
if (err)
- return err;
+ goto out;
if (cpus == NULL)
cpus = empty_cpu_map;
@@ -2600,14 +2698,22 @@ fallback_missing_features:
display_attr(&evsel->core.attr);
if (evsel__is_tool(evsel)) {
- return evsel__tool_pmu_open(evsel, threads,
+ err = evsel__tool_pmu_open(evsel, threads,
+ start_cpu_map_idx,
+ end_cpu_map_idx);
+ goto out;
+ }
+ if (evsel__is_hwmon(evsel)) {
+ err = evsel__hwmon_pmu_open(evsel, threads,
start_cpu_map_idx,
end_cpu_map_idx);
+ goto out;
}
- if (evsel__is_hwmon(evsel)) {
- return evsel__hwmon_pmu_open(evsel, threads,
- start_cpu_map_idx,
- end_cpu_map_idx);
+ if (evsel__is_drm(evsel)) {
+ err = evsel__drm_pmu_open(evsel, threads,
+ start_cpu_map_idx,
+ end_cpu_map_idx);
+ goto out;
}
for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
@@ -2686,7 +2792,8 @@ retry_open:
}
}
- return 0;
+ err = 0;
+ goto out;
try_fallback:
if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
@@ -2725,6 +2832,9 @@ out_close:
thread = nthreads;
} while (--idx >= 0);
errno = old_errno;
+out:
+ if (err)
+ evsel->supported = false;
return err;
}
@@ -2737,22 +2847,37 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
void evsel__close(struct evsel *evsel)
{
if (evsel__is_retire_lat(evsel))
- tpebs_delete();
+ evsel__tpebs_close(evsel);
perf_evsel__close(&evsel->core);
perf_evsel__free_id(&evsel->core);
}
-int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
+int evsel__open_per_cpu_and_thread(struct evsel *evsel,
+ struct perf_cpu_map *cpus, int cpu_map_idx,
+ struct perf_thread_map *threads)
{
if (cpu_map_idx == -1)
- return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
+ return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
+
+ return evsel__open_cpu(evsel, cpus, threads, cpu_map_idx, cpu_map_idx + 1);
+}
+
+int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
+{
+ struct perf_thread_map *threads = thread_map__new_by_tid(-1);
+ int ret = evsel__open_per_cpu_and_thread(evsel, cpus, cpu_map_idx, threads);
- return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
+ perf_thread_map__put(threads);
+ return ret;
}
int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
{
- return evsel__open(evsel, NULL, threads);
+ struct perf_cpu_map *cpus = perf_cpu_map__new_any_cpu();
+ int ret = evsel__open_per_cpu_and_thread(evsel, cpus, -1, threads);
+
+ perf_cpu_map__put(cpus);
+ return ret;
}
static int perf_evsel__parse_id_sample(const struct evsel *evsel,
@@ -2845,11 +2970,18 @@ perf_event__check_size(union perf_event *event, unsigned int sample_size)
return 0;
}
-void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
- const __u64 *array,
- u64 type __maybe_unused)
+static void perf_parse_sample_weight(struct perf_sample *data, const __u64 *array, u64 type)
{
- data->weight = *array;
+ union perf_sample_weight weight;
+
+ weight.full = *array;
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ data->weight = weight.var1_dw;
+ data->ins_lat = weight.var2_w;
+ data->weight3 = weight.var3_w;
+ } else {
+ data->weight = weight.full;
+ }
}
u64 evsel__bitfield_swap_branch_flags(u64 value)
@@ -2923,6 +3055,35 @@ static inline bool evsel__has_branch_counters(const struct evsel *evsel)
return false;
}
+static int __set_offcpu_sample(struct perf_sample *data)
+{
+ u64 *array = data->raw_data;
+ u32 max_size = data->raw_size, *p32;
+ const void *endp = (void *)array + max_size;
+
+ if (array == NULL)
+ return -EFAULT;
+
+ OVERFLOW_CHECK_u64(array);
+ p32 = (void *)array++;
+ data->pid = p32[0];
+ data->tid = p32[1];
+
+ OVERFLOW_CHECK_u64(array);
+ data->period = *array++;
+
+ OVERFLOW_CHECK_u64(array);
+ data->callchain = (struct ip_callchain *)array++;
+ OVERFLOW_CHECK(array, data->callchain->nr * sizeof(u64), max_size);
+ data->ip = data->callchain->ips[1];
+ array += data->callchain->nr;
+
+ OVERFLOW_CHECK_u64(array);
+ data->cgroup = *array;
+
+ return 0;
+}
+
int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
@@ -2948,6 +3109,20 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->data_src = PERF_MEM_DATA_SRC_NONE;
data->vcpu = -1;
+ if (event->header.type == PERF_RECORD_CALLCHAIN_DEFERRED) {
+ const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
+
+ data->callchain = (struct ip_callchain *)&event->callchain_deferred.nr;
+ if (data->callchain->nr > max_callchain_nr)
+ return -EFAULT;
+
+ data->deferred_cookie = event->callchain_deferred.cookie;
+
+ if (evsel->core.attr.sample_id_all)
+ perf_evsel__parse_id_sample(evsel, event, data);
+ return 0;
+ }
+
if (event->header.type != PERF_RECORD_SAMPLE) {
if (!evsel->core.attr.sample_id_all)
return 0;
@@ -3072,12 +3247,25 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
if (type & PERF_SAMPLE_CALLCHAIN) {
const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
+ u64 callchain_nr;
OVERFLOW_CHECK_u64(array);
data->callchain = (struct ip_callchain *)array++;
- if (data->callchain->nr > max_callchain_nr)
+ callchain_nr = data->callchain->nr;
+ if (callchain_nr > max_callchain_nr)
return -EFAULT;
- sz = data->callchain->nr * sizeof(u64);
+ sz = callchain_nr * sizeof(u64);
+ /*
+ * Save the cookie for the deferred user callchain. The last 2
+ * entries in the callchain should be the context marker and the
+ * cookie. The cookie will be used to match PERF_RECORD_
+ * CALLCHAIN_DEFERRED later.
+ */
+ if (evsel->core.attr.defer_callchain && callchain_nr >= 2 &&
+ data->callchain->ips[callchain_nr - 2] == PERF_CONTEXT_USER_DEFERRED) {
+ data->deferred_cookie = data->callchain->ips[callchain_nr - 1];
+ data->deferred_callchain = true;
+ }
OVERFLOW_CHECK(array, sz, max_size);
array = (void *)array + sz;
}
@@ -3206,7 +3394,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
if (type & PERF_SAMPLE_WEIGHT_TYPE) {
OVERFLOW_CHECK_u64(array);
- arch_perf_parse_sample_weight(data, array, type);
+ perf_parse_sample_weight(data, array, type);
array++;
}
@@ -3277,6 +3465,9 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array = (void *)array + sz;
}
+ if (evsel__is_offcpu_event(evsel))
+ return __set_offcpu_sample(data);
+
return 0;
}
@@ -3505,7 +3696,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
/* If event has exclude user then don't exclude kernel. */
if (evsel->core.attr.exclude_user)
- return false;
+ goto no_fallback;
/* Is there already the separator in the name. */
if (strchr(name, '/') ||
@@ -3513,7 +3704,7 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
sep = "";
if (asprintf(&new_name, "%s%su", name, sep) < 0)
- return false;
+ goto no_fallback;
free(evsel->name);
evsel->name = new_name;
@@ -3536,17 +3727,19 @@ bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
sep = "";
if (asprintf(&new_name, "%s%sH", name, sep) < 0)
- return false;
+ goto no_fallback;
free(evsel->name);
evsel->name = new_name;
/* Apple M1 requires exclude_guest */
- scnprintf(msg, msgsize, "trying to fall back to excluding guest samples");
+ scnprintf(msg, msgsize, "Trying to fall back to excluding guest samples");
evsel->core.attr.exclude_guest = 1;
return true;
}
-
+no_fallback:
+ scnprintf(msg, msgsize, "No fallback found for '%s' for error %d",
+ evsel__name(evsel), err);
return false;
}
@@ -3659,6 +3852,7 @@ static int dump_perf_event_processes(char *msg, size_t size)
}
int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
+ int err __maybe_unused,
char *msg __maybe_unused,
size_t size __maybe_unused)
{
@@ -3668,6 +3862,7 @@ int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
int evsel__open_strerror(struct evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
+ struct perf_pmu *pmu;
char sbuf[STRERR_BUFSIZE];
int printed = 0, enforced = 0;
int ret;
@@ -3752,6 +3947,10 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
return scnprintf(msg, size, "%s",
"No hardware sampling interrupt available.\n");
#endif
+ if (!target__has_cpu(target))
+ return scnprintf(msg, size,
+ "Unsupported event (%s) in per-thread mode, enable system wide with '-a'.",
+ evsel__name(evsel));
break;
case EBUSY:
if (find_process("oprofiled"))
@@ -3779,7 +3978,8 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
return scnprintf(msg, size, "The 'aux_action' feature is not supported, update the kernel.");
if (perf_missing_features.aux_output)
return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
- if (!target__has_cpu(target))
+ pmu = evsel__find_pmu(evsel);
+ if (!pmu->is_core && !target__has_cpu(target))
return scnprintf(msg, size,
"Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
evsel__name(evsel));
@@ -3792,7 +3992,7 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
break;
}
- ret = arch_evsel__open_strerror(evsel, msg, size);
+ ret = arch_evsel__open_strerror(evsel, err, msg, size);
if (ret)
return ret;
@@ -3802,11 +4002,16 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
}
+struct perf_session *evsel__session(struct evsel *evsel)
+{
+ return evsel && evsel->evlist ? evsel->evlist->session : NULL;
+}
+
struct perf_env *evsel__env(struct evsel *evsel)
{
- if (evsel && evsel->evlist && evsel->evlist->env)
- return evsel->evlist->env;
- return &perf_env;
+ struct perf_session *session = evsel__session(evsel);
+
+ return session ? perf_session__env(session) : NULL;
}
static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
@@ -3816,6 +4021,9 @@ static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
if (evsel__is_retire_lat(evsel))
return 0;
+ if (perf_pmu__kind(evsel->pmu) != PERF_PMU_KIND_PE)
+ return 0;
+
for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
thread++) {
@@ -3869,6 +4077,8 @@ bool evsel__is_hybrid(const struct evsel *evsel)
struct evsel *evsel__leader(const struct evsel *evsel)
{
+ if (evsel->core.leader == NULL)
+ return NULL;
return container_of(evsel->core.leader, struct evsel, core);
}
@@ -3917,3 +4127,152 @@ void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
leader->core.nr_members--;
}
}
+
+bool evsel__set_needs_uniquify(struct evsel *counter, const struct perf_stat_config *config)
+{
+ struct evsel *evsel;
+
+ if (counter->needs_uniquify) {
+ /* Already set. */
+ return true;
+ }
+
+ if (counter->use_config_name || counter->is_libpfm_event) {
+ /* Original name will be used. */
+ return false;
+ }
+
+ if (!config->hybrid_merge && evsel__is_hybrid(counter)) {
+ /* Unique hybrid counters necessary. */
+ counter->needs_uniquify = true;
+ return true;
+ }
+
+ if (counter->core.attr.type < PERF_TYPE_MAX && counter->core.attr.type != PERF_TYPE_RAW) {
+ /* Legacy event, don't uniquify. */
+ return false;
+ }
+
+ if (counter->pmu && counter->pmu->is_core &&
+ counter->alternate_hw_config != PERF_COUNT_HW_MAX) {
+ /* A sysfs or json event replacing a legacy event, don't uniquify. */
+ return false;
+ }
+
+ if (config->aggr_mode == AGGR_NONE) {
+ /* Always unique with no aggregation. */
+ counter->needs_uniquify = true;
+ return true;
+ }
+
+ if (counter->first_wildcard_match != NULL) {
+ /*
+ * If stats are merged then only the first_wildcard_match is
+ * displayed, there is no need to uniquify this evsel as the
+ * name won't be shown.
+ */
+ return false;
+ }
+
+ /*
+ * Do other non-merged events in the evlist have the same name? If so
+ * uniquify is necessary.
+ */
+ evlist__for_each_entry(counter->evlist, evsel) {
+ if (evsel == counter || evsel->first_wildcard_match || evsel->pmu == counter->pmu)
+ continue;
+
+ if (evsel__name_is(counter, evsel__name(evsel))) {
+ counter->needs_uniquify = true;
+ return true;
+ }
+ }
+ return false;
+}
+
+void evsel__uniquify_counter(struct evsel *counter)
+{
+ const char *name, *pmu_name, *config;
+ char *new_name;
+ int len, ret;
+
+ /* No uniquification necessary. */
+ if (!counter->needs_uniquify)
+ return;
+
+ /* The evsel was already uniquified. */
+ if (counter->uniquified_name)
+ return;
+
+ /* Avoid checking to uniquify twice. */
+ counter->uniquified_name = true;
+
+ name = evsel__name(counter);
+ config = strchr(name, '/');
+ pmu_name = counter->pmu->name;
+
+ /* Already prefixed by the PMU name? */
+ len = pmu_name_len_no_suffix(pmu_name);
+
+ if (!strncmp(name, pmu_name, len)) {
+ /*
+ * If the PMU name is there, then there is no sense in not
+ * having a slash. Do this for robustness.
+ */
+ if (config == NULL)
+ config = name - 1;
+
+ ret = asprintf(&new_name, "%s/%s", pmu_name, config + 1);
+ } else if (config) {
+ len = config - name;
+ if (config[1] == '/') {
+ /* case: event// */
+ ret = asprintf(&new_name, "%s/%.*s/%s", pmu_name, len, name, config + 2);
+ } else {
+ /* case: event/.../ */
+ ret = asprintf(&new_name, "%s/%.*s,%s", pmu_name, len, name, config + 1);
+ }
+ } else {
+ config = strchr(name, ':');
+ if (config) {
+ /* case: event:.. */
+ len = config - name;
+
+ ret = asprintf(&new_name, "%s/%.*s/%s", pmu_name, len, name, config + 1);
+ } else {
+ /* case: event */
+ ret = asprintf(&new_name, "%s/%s/", pmu_name, name);
+ }
+ }
+ if (ret > 0) {
+ free(counter->name);
+ counter->name = new_name;
+ } else {
+ /* ENOMEM from asprintf. */
+ counter->uniquified_name = false;
+ }
+}
+
+void evsel__warn_user_requested_cpus(struct evsel *evsel, struct perf_cpu_map *user_requested_cpus)
+{
+ struct perf_cpu_map *intersect, *online = NULL;
+ const struct perf_pmu *pmu = evsel__find_pmu(evsel);
+
+ if (pmu && pmu->is_core) {
+ intersect = perf_cpu_map__intersect(pmu->cpus, user_requested_cpus);
+ } else {
+ online = cpu_map__online();
+ intersect = perf_cpu_map__intersect(online, user_requested_cpus);
+ }
+ if (!perf_cpu_map__equal(intersect, user_requested_cpus)) {
+ char buf1[128];
+ char buf2[128];
+
+ cpu_map__snprint(user_requested_cpus, buf1, sizeof(buf1));
+ cpu_map__snprint(online ?: pmu->cpus, buf2, sizeof(buf2));
+ pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n",
+ buf1, pmu ? pmu->name : "cpu", buf2, evsel__name(evsel));
+ }
+ perf_cpu_map__put(intersect);
+ perf_cpu_map__put(online);
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index aae431d63d64..a08130ff2e47 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -16,6 +16,7 @@
struct bpf_object;
struct cgroup;
struct perf_counts;
+struct perf_stat_config;
struct perf_stat_evsel;
union perf_event;
struct bpf_counter_ops;
@@ -69,6 +70,11 @@ struct evsel {
const char *unit;
struct cgroup *cgrp;
const char *metric_id;
+ /*
+ * This point to the first evsel with the same name, intended to store the
+ * aggregated counts in aggregation mode.
+ */
+ struct evsel *first_wildcard_match;
/* parse modifier helper */
int exclude_GH;
int sample_read;
@@ -77,13 +83,13 @@ struct evsel {
bool percore;
bool precise_max;
bool is_libpfm_event;
- bool auto_merge_stats;
bool collect_stat;
bool weak_group;
bool bpf_counter;
bool use_config_name;
bool skippable;
bool retire_lat;
+ bool dont_regroup;
int bpf_fd;
struct bpf_object *bpf_obj;
struct list_head config_terms;
@@ -94,7 +100,6 @@ struct evsel {
* metric fields are similar, but needs more care as they can have
* references to other metric (evsel).
*/
- struct evsel **metric_events;
struct evsel *metric_leader;
void *handler;
@@ -114,11 +119,10 @@ struct evsel {
bool ignore_missing_thread;
bool forced_leader;
bool cmdline_group_boundary;
- bool merged_stat;
bool reset_group;
- bool errored;
bool needs_auxtrace_mmap;
bool default_metricgroup; /* A member of the Default metricgroup */
+ bool default_show_events; /* If a default group member, show the event */
bool needs_uniquify;
struct hashmap *per_pkg_mask;
int err;
@@ -177,6 +181,12 @@ struct evsel {
/* For tool events */
/* Beginning time subtracted when the counter is read. */
union {
+ /* Defaults for retirement latency events. */
+ struct _retirement_latency {
+ double mean;
+ double min;
+ double max;
+ } retirement_latency;
/* duration_time is a single global time. */
__u64 start_time;
/*
@@ -211,6 +221,7 @@ struct perf_missing_features {
bool branch_counters;
bool aux_action;
bool inherit_sample_read;
+ bool defer_callchain;
};
extern struct perf_missing_features perf_missing_features;
@@ -270,6 +281,8 @@ void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
void evsel__exit(struct evsel *evsel);
void evsel__delete(struct evsel *evsel);
+void evsel__set_priv_destructor(void (*destructor)(void *priv));
+
struct callchain_param;
void evsel__config(struct evsel *evsel, struct record_opts *opts,
@@ -329,7 +342,8 @@ void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
void arch_evsel__set_sample_weight(struct evsel *evsel);
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
-int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size);
+int arch_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size);
+void arch_evsel__apply_ratio_to_prev(struct evsel *evsel, struct perf_event_attr *attr);
int evsel__set_filter(struct evsel *evsel, const char *filter);
int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
@@ -339,6 +353,9 @@ int evsel__enable(struct evsel *evsel);
int evsel__disable(struct evsel *evsel);
int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx);
+int evsel__open_per_cpu_and_thread(struct evsel *evsel,
+ struct perf_cpu_map *cpus, int cpu_map_idx,
+ struct perf_thread_map *threads);
int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx);
int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads);
int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
@@ -527,6 +544,7 @@ static inline bool evsel__is_dummy_event(struct evsel *evsel)
(evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
}
+struct perf_session *evsel__session(struct evsel *evsel);
struct perf_env *evsel__env(struct evsel *evsel);
int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
@@ -542,6 +560,9 @@ void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader);
bool arch_evsel__must_be_in_group(const struct evsel *evsel);
+bool evsel__set_needs_uniquify(struct evsel *counter, const struct perf_stat_config *config);
+void evsel__uniquify_counter(struct evsel *counter);
+
/*
* Macro to swap the bit-field postition and size.
* Used when,
@@ -557,4 +578,8 @@ u64 evsel__bitfield_swap_branch_flags(u64 value);
void evsel__set_config_if_unset(struct perf_pmu *pmu, struct evsel *evsel,
const char *config_name, u64 val);
+bool evsel__is_offcpu_event(struct evsel *evsel);
+
+void evsel__warn_user_requested_cpus(struct evsel *evsel, struct perf_cpu_map *user_requested_cpus);
+
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/evsel_config.h b/tools/perf/util/evsel_config.h
index af52a1516d0b..bcd3a978f0c4 100644
--- a/tools/perf/util/evsel_config.h
+++ b/tools/perf/util/evsel_config.h
@@ -28,6 +28,7 @@ enum evsel_term_type {
EVSEL__CONFIG_TERM_AUX_ACTION,
EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE,
EVSEL__CONFIG_TERM_CFG_CHG,
+ EVSEL__CONFIG_TERM_RATIO_TO_PREV,
};
struct evsel_config_term {
@@ -48,6 +49,7 @@ struct evsel_config_term {
u32 aux_sample_size;
u64 cfg_chg;
char *str;
+ int cpu;
} val;
bool weak;
};
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 103984b29b1e..10f1a03c2860 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -168,7 +168,10 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
node_al.addr = addr;
node_al.map = map__get(map);
- if (print_symoffset) {
+ if (sample->deferred_callchain &&
+ sample->deferred_cookie == node->ip) {
+ printed += fprintf(fp, "(cookie)");
+ } else if (print_symoffset) {
printed += __symbol__fprintf_symname_offs(sym, &node_al,
print_unknown_as_addr,
true, fp);
diff --git a/tools/perf/util/evswitch.c b/tools/perf/util/evswitch.c
index 40cb56a9347d..d4c06a3f825a 100644
--- a/tools/perf/util/evswitch.c
+++ b/tools/perf/util/evswitch.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2019, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+#include <errno.h>
#include "evswitch.h"
#include "evlist.h"
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 6413537442aa..465fe2e9bbbe 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -166,8 +166,12 @@ int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
data_ptr->kind = EXPR_ID_DATA__VALUE;
ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data);
- if (ret)
+ if (ret) {
free(data_ptr);
+ } else if (old_data) {
+ data_ptr->val.val += old_data->val.val;
+ data_ptr->val.source_count += old_data->val.source_count;
+ }
free(old_key);
free(old_data);
return ret;
@@ -397,14 +401,12 @@ double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx
if (ev != TOOL_PMU__EVENT_NONE) {
u64 count;
- if (tool_pmu__read_event(ev, &count))
+ if (tool_pmu__read_event(ev, /*evsel=*/NULL,
+ ctx->system_wide, ctx->user_requested_cpu_list,
+ &count))
result = count;
else
pr_err("Failure to read '%s'", literal);
-
- } else if (!strcmp("#core_wide", literal)) {
- result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list)
- ? 1.0 : 0.0;
} else {
pr_err("Unrecognized literal '%s'", literal);
}
diff --git a/tools/perf/util/fncache.c b/tools/perf/util/fncache.c
index 6225cbc52310..bf9559c55c63 100644
--- a/tools/perf/util/fncache.c
+++ b/tools/perf/util/fncache.c
@@ -1,53 +1,58 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Manage a cache of file names' existence */
+#include <pthread.h>
#include <stdlib.h>
-#include <unistd.h>
#include <string.h>
-#include <linux/list.h>
+#include <unistd.h>
+#include <linux/compiler.h>
#include "fncache.h"
+#include "hashmap.h"
-struct fncache {
- struct hlist_node nd;
- bool res;
- char name[];
-};
+static struct hashmap *fncache;
-#define FNHSIZE 61
+static size_t fncache__hash(long key, void *ctx __maybe_unused)
+{
+ return str_hash((const char *)key);
+}
-static struct hlist_head fncache_hash[FNHSIZE];
+static bool fncache__equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return strcmp((const char *)key1, (const char *)key2) == 0;
+}
-unsigned shash(const unsigned char *s)
+static void fncache__init(void)
{
- unsigned h = 0;
- while (*s)
- h = 65599 * h + *s++;
- return h ^ (h >> 16);
+ fncache = hashmap__new(fncache__hash, fncache__equal, /*ctx=*/NULL);
+}
+
+static struct hashmap *fncache__get(void)
+{
+ static pthread_once_t fncache_once = PTHREAD_ONCE_INIT;
+
+ pthread_once(&fncache_once, fncache__init);
+
+ return fncache;
}
static bool lookup_fncache(const char *name, bool *res)
{
- int h = shash((const unsigned char *)name) % FNHSIZE;
- struct fncache *n;
-
- hlist_for_each_entry(n, &fncache_hash[h], nd) {
- if (!strcmp(n->name, name)) {
- *res = n->res;
- return true;
- }
- }
- return false;
+ long val;
+
+ if (!hashmap__find(fncache__get(), name, &val))
+ return false;
+
+ *res = (val != 0);
+ return true;
}
static void update_fncache(const char *name, bool res)
{
- struct fncache *n = malloc(sizeof(struct fncache) + strlen(name) + 1);
- int h = shash((const unsigned char *)name) % FNHSIZE;
-
- if (!n)
- return;
- strcpy(n->name, name);
- n->res = res;
- hlist_add_head(&n->nd, &fncache_hash[h]);
+ char *old_key = NULL, *key = strdup(name);
+
+ if (key) {
+ hashmap__set(fncache__get(), key, res, &old_key, /*old_value*/NULL);
+ free(old_key);
+ }
}
/* No LRU, only use when bounded in some other way. */
diff --git a/tools/perf/util/fncache.h b/tools/perf/util/fncache.h
index fe020beaefb1..b6a0f209493e 100644
--- a/tools/perf/util/fncache.h
+++ b/tools/perf/util/fncache.h
@@ -1,7 +1,6 @@
#ifndef _FCACHE_H
#define _FCACHE_H 1
-unsigned shash(const unsigned char *s);
bool file_available(const char *name);
#endif
diff --git a/tools/perf/util/ftrace.h b/tools/perf/util/ftrace.h
index a9bc47da83a5..950f2efafad2 100644
--- a/tools/perf/util/ftrace.h
+++ b/tools/perf/util/ftrace.h
@@ -17,6 +17,7 @@ struct perf_ftrace {
struct list_head notrace;
struct list_head graph_funcs;
struct list_head nograph_funcs;
+ struct list_head event_pair;
struct hashmap *profile_hash;
unsigned long percpu_buffer_size;
bool inherit;
@@ -29,6 +30,10 @@ struct perf_ftrace {
int graph_depth;
int func_stack_trace;
int func_irq_info;
+ int graph_args;
+ int graph_retval;
+ int graph_retval_hex;
+ int graph_retaddr;
int graph_nosleep_time;
int graph_noirqs;
int graph_verbose;
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index cdce7f173d00..a1cd5196f4ec 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -12,15 +12,14 @@
#include <libelf.h>
#include <string.h>
#include <stdlib.h>
-#include <unistd.h>
#include <inttypes.h>
-#include <fcntl.h>
#include <err.h>
#ifdef HAVE_LIBDW_SUPPORT
#include <dwarf.h>
#endif
#include "genelf.h"
+#include "sha1.h"
#include "../util/jitdump.h"
#include <linux/compiler.h>
@@ -28,25 +27,6 @@
#define NT_GNU_BUILD_ID 3
#endif
-#define BUILD_ID_URANDOM /* different uuid for each run */
-
-#ifdef HAVE_LIBCRYPTO_SUPPORT
-
-#define BUILD_ID_MD5
-#undef BUILD_ID_SHA /* does not seem to work well when linked with Java */
-#undef BUILD_ID_URANDOM /* different uuid for each run */
-
-#ifdef BUILD_ID_SHA
-#include <openssl/sha.h>
-#endif
-
-#ifdef BUILD_ID_MD5
-#include <openssl/evp.h>
-#include <openssl/md5.h>
-#endif
-#endif
-
-
typedef struct {
unsigned int namesz; /* Size of entry's owner string */
unsigned int descsz; /* Size of the note descriptor */
@@ -71,7 +51,7 @@ static char shd_string_table[] = {
static struct buildid_note {
Elf_Note desc; /* descsz: size of build-id, must be multiple of 4 */
char name[4]; /* GNU\0 */
- char build_id[20];
+ u8 build_id[SHA1_DIGEST_SIZE];
} bnote;
static Elf_Sym symtab[]={
@@ -92,65 +72,6 @@ static Elf_Sym symtab[]={
}
};
-#ifdef BUILD_ID_URANDOM
-static void
-gen_build_id(struct buildid_note *note,
- unsigned long load_addr __maybe_unused,
- const void *code __maybe_unused,
- size_t csize __maybe_unused)
-{
- int fd;
- size_t sz = sizeof(note->build_id);
- ssize_t sret;
-
- fd = open("/dev/urandom", O_RDONLY);
- if (fd == -1)
- err(1, "cannot access /dev/urandom for buildid");
-
- sret = read(fd, note->build_id, sz);
-
- close(fd);
-
- if (sret != (ssize_t)sz)
- memset(note->build_id, 0, sz);
-}
-#endif
-
-#ifdef BUILD_ID_SHA
-static void
-gen_build_id(struct buildid_note *note,
- unsigned long load_addr __maybe_unused,
- const void *code,
- size_t csize)
-{
- if (sizeof(note->build_id) < SHA_DIGEST_LENGTH)
- errx(1, "build_id too small for SHA1");
-
- SHA1(code, csize, (unsigned char *)note->build_id);
-}
-#endif
-
-#ifdef BUILD_ID_MD5
-static void
-gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *code, size_t csize)
-{
- EVP_MD_CTX *mdctx;
-
- if (sizeof(note->build_id) < 16)
- errx(1, "build_id too small for MD5");
-
- mdctx = EVP_MD_CTX_new();
- if (!mdctx)
- errx(2, "failed to create EVP_MD_CTX");
-
- EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
- EVP_DigestUpdate(mdctx, &load_addr, sizeof(load_addr));
- EVP_DigestUpdate(mdctx, code, csize);
- EVP_DigestFinal_ex(mdctx, (unsigned char *)note->build_id, NULL);
- EVP_MD_CTX_free(mdctx);
-}
-#endif
-
static int
jit_add_eh_frame_info(Elf *e, void* unwinding, uint64_t unwinding_header_size,
uint64_t unwinding_size, uint64_t base_offset)
@@ -239,7 +160,7 @@ jit_add_eh_frame_info(Elf *e, void* unwinding, uint64_t unwinding_header_size,
* csize: the code size in bytes
*/
int
-jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+jit_write_elf(int fd, uint64_t load_addr __maybe_unused, const char *sym,
const void *code, int csize,
void *debug __maybe_unused, int nr_debug_entries __maybe_unused,
void *unwinding, uint64_t unwinding_header_size, uint64_t unwinding_size)
@@ -252,6 +173,8 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
Elf_Shdr *shdr;
uint64_t eh_frame_base_offset;
char *strsym = NULL;
+ void *build_id_data = NULL, *tmp;
+ int build_id_data_len;
int symlen;
int retval = -1;
@@ -330,6 +253,14 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
shdr->sh_entsize = 0;
+ build_id_data = malloc(csize);
+ if (build_id_data == NULL) {
+ warnx("cannot allocate build-id data");
+ goto error;
+ }
+ memcpy(build_id_data, code, csize);
+ build_id_data_len = csize;
+
/*
* Setup .eh_frame_hdr and .eh_frame
*/
@@ -413,6 +344,15 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
shdr->sh_entsize = sizeof(Elf_Sym);
shdr->sh_link = unwinding ? 6 : 4; /* index of .strtab section */
+ tmp = realloc(build_id_data, build_id_data_len + sizeof(symtab));
+ if (tmp == NULL) {
+ warnx("cannot allocate build-id data");
+ goto error;
+ }
+ memcpy(tmp + build_id_data_len, symtab, sizeof(symtab));
+ build_id_data = tmp;
+ build_id_data_len += sizeof(symtab);
+
/*
* setup symbols string table
* 2 = 1 for 0 in 1st entry, 1 for the 0 at end of symbol for 2nd entry
@@ -455,6 +395,15 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
shdr->sh_flags = 0;
shdr->sh_entsize = 0;
+ tmp = realloc(build_id_data, build_id_data_len + symlen);
+ if (tmp == NULL) {
+ warnx("cannot allocate build-id data");
+ goto error;
+ }
+ memcpy(tmp + build_id_data_len, strsym, symlen);
+ build_id_data = tmp;
+ build_id_data_len += symlen;
+
/*
* setup build-id section
*/
@@ -473,7 +422,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
/*
* build-id generation
*/
- gen_build_id(&bnote, load_addr, code, csize);
+ sha1(build_id_data, build_id_data_len, bnote.build_id);
bnote.desc.namesz = sizeof(bnote.name); /* must include 0 termination */
bnote.desc.descsz = sizeof(bnote.build_id);
bnote.desc.type = NT_GNU_BUILD_ID;
@@ -518,7 +467,7 @@ error:
(void)elf_end(e);
free(strsym);
-
+ free(build_id_data);
return retval;
}
diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c
deleted file mode 100644
index e68935e9ac8c..000000000000
--- a/tools/perf/util/get_current_dir_name.c
+++ /dev/null
@@ -1,18 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-// Copyright (C) 2018, 2019 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
-//
-#ifndef HAVE_GET_CURRENT_DIR_NAME
-#include "get_current_dir_name.h"
-#include <limits.h>
-#include <string.h>
-#include <unistd.h>
-
-/* Android's 'bionic' library, for one, doesn't have this */
-
-char *get_current_dir_name(void)
-{
- char pwd[PATH_MAX];
-
- return getcwd(pwd, sizeof(pwd)) == NULL ? NULL : strdup(pwd);
-}
-#endif // HAVE_GET_CURRENT_DIR_NAME
diff --git a/tools/perf/util/get_current_dir_name.h b/tools/perf/util/get_current_dir_name.h
deleted file mode 100644
index 69f7d5537d32..000000000000
--- a/tools/perf/util/get_current_dir_name.h
+++ /dev/null
@@ -1,8 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-// Copyright (C) 2018, 2019 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
-//
-#ifndef __PERF_GET_CURRENT_DIR_NAME_H
-#ifndef HAVE_GET_CURRENT_DIR_NAME
-char *get_current_dir_name(void);
-#endif // HAVE_GET_CURRENT_DIR_NAME
-#endif // __PERF_GET_CURRENT_DIR_NAME_H
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index e3cdc3b7b4ab..f5cad377c99e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -335,7 +335,6 @@ static int write_build_id(struct feat_fd *ff,
pr_debug("failed to write buildid table\n");
return err;
}
- perf_session__cache_build_ids(session);
return 0;
}
@@ -557,6 +556,7 @@ static int write_event_desc(struct feat_fd *ff,
static int write_cmdline(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
char pbuf[MAXPATHLEN], *buf;
int i, ret, n;
@@ -564,7 +564,7 @@ static int write_cmdline(struct feat_fd *ff,
buf = perf_exe(pbuf, MAXPATHLEN);
/* account for binary path */
- n = perf_env.nr_cmdline + 1;
+ n = env->nr_cmdline + 1;
ret = do_write(ff, &n, sizeof(n));
if (ret < 0)
@@ -574,8 +574,8 @@ static int write_cmdline(struct feat_fd *ff,
if (ret < 0)
return ret;
- for (i = 0 ; i < perf_env.nr_cmdline; i++) {
- ret = do_write_string(ff, perf_env.cmdline_argv[i]);
+ for (i = 0 ; i < env->nr_cmdline; i++) {
+ ret = do_write_string(ff, env->cmdline_argv[i]);
if (ret < 0)
return ret;
}
@@ -586,6 +586,7 @@ static int write_cmdline(struct feat_fd *ff,
static int write_cpu_topology(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
struct cpu_topology *tp;
u32 i;
int ret, j;
@@ -613,17 +614,17 @@ static int write_cpu_topology(struct feat_fd *ff,
break;
}
- ret = perf_env__read_cpu_topology_map(&perf_env);
+ ret = perf_env__read_cpu_topology_map(env);
if (ret < 0)
goto done;
- for (j = 0; j < perf_env.nr_cpus_avail; j++) {
- ret = do_write(ff, &perf_env.cpu[j].core_id,
- sizeof(perf_env.cpu[j].core_id));
+ for (j = 0; j < env->nr_cpus_avail; j++) {
+ ret = do_write(ff, &env->cpu[j].core_id,
+ sizeof(env->cpu[j].core_id));
if (ret < 0)
return ret;
- ret = do_write(ff, &perf_env.cpu[j].socket_id,
- sizeof(perf_env.cpu[j].socket_id));
+ ret = do_write(ff, &env->cpu[j].socket_id,
+ sizeof(env->cpu[j].socket_id));
if (ret < 0)
return ret;
}
@@ -641,9 +642,9 @@ static int write_cpu_topology(struct feat_fd *ff,
goto done;
}
- for (j = 0; j < perf_env.nr_cpus_avail; j++) {
- ret = do_write(ff, &perf_env.cpu[j].die_id,
- sizeof(perf_env.cpu[j].die_id));
+ for (j = 0; j < env->nr_cpus_avail; j++) {
+ ret = do_write(ff, &env->cpu[j].die_id,
+ sizeof(env->cpu[j].die_id));
if (ret < 0)
return ret;
}
@@ -1016,13 +1017,13 @@ static int write_bpf_prog_info(struct feat_fd *ff,
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
- int ret;
+ int ret = 0;
down_read(&env->bpf_progs.lock);
ret = do_write(ff, &env->bpf_progs.infos_cnt,
sizeof(env->bpf_progs.infos_cnt));
- if (ret < 0)
+ if (ret < 0 || env->bpf_progs.infos_cnt == 0)
goto out;
root = &env->bpf_progs.infos;
@@ -1058,14 +1059,14 @@ static int write_bpf_btf(struct feat_fd *ff,
struct perf_env *env = &ff->ph->env;
struct rb_root *root;
struct rb_node *next;
- int ret;
+ int ret = 0;
down_read(&env->bpf_progs.lock);
ret = do_write(ff, &env->bpf_progs.btfs_cnt,
sizeof(env->bpf_progs.btfs_cnt));
- if (ret < 0)
+ if (ret < 0 || env->bpf_progs.btfs_cnt == 0)
goto out;
root = &env->bpf_progs.btfs;
@@ -1553,7 +1554,7 @@ static int __write_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
static int write_cpu_pmu_caps(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
- struct perf_pmu *cpu_pmu = perf_pmus__find("cpu");
+ struct perf_pmu *cpu_pmu = perf_pmus__find_core_pmu();
int ret;
if (!cpu_pmu)
@@ -1814,6 +1815,9 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
root = &env->bpf_progs.infos;
next = rb_first(root);
+ if (!next)
+ printf("# bpf_prog_info empty\n");
+
while (next) {
struct bpf_prog_info_node *node;
@@ -1838,6 +1842,9 @@ static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
root = &env->bpf_progs.btfs;
next = rb_first(root);
+ if (!next)
+ printf("# btf info empty\n");
+
while (next) {
struct btf_node *node;
@@ -2111,17 +2118,18 @@ static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
{
+ struct perf_env *env = &ff->ph->env;
struct pmu_caps *pmu_caps;
- for (int i = 0; i < ff->ph->env.nr_pmus_with_caps; i++) {
- pmu_caps = &ff->ph->env.pmu_caps[i];
+ for (int i = 0; i < env->nr_pmus_with_caps; i++) {
+ pmu_caps = &env->pmu_caps[i];
__print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps,
pmu_caps->pmu_name);
}
- if (strcmp(perf_env__arch(&ff->ph->env), "x86") == 0 &&
- perf_env__has_pmu_mapping(&ff->ph->env, "ibs_op")) {
- char *max_precise = perf_env__find_pmu_cap(&ff->ph->env, "cpu", "max_precise");
+ if (strcmp(perf_env__arch(env), "x86") == 0 &&
+ perf_env__has_pmu_mapping(env, "ibs_op")) {
+ char *max_precise = perf_env__find_pmu_cap(env, "cpu", "max_precise");
if (max_precise != NULL && atoi(max_precise) == 0)
fprintf(fp, "# AMD systems uses ibs_op// PMU for some precise events, e.g.: cycles:p, see the 'perf list' man page for further details.\n");
@@ -2130,18 +2138,19 @@ static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
{
+ struct perf_env *env = &ff->ph->env;
const char *delimiter = "# pmu mappings: ";
char *str, *tmp;
u32 pmu_num;
u32 type;
- pmu_num = ff->ph->env.nr_pmu_mappings;
+ pmu_num = env->nr_pmu_mappings;
if (!pmu_num) {
fprintf(fp, "# pmu mappings: not available\n");
return;
}
- str = ff->ph->env.pmu_mappings;
+ str = env->pmu_mappings;
while (pmu_num) {
type = strtoul(str, &tmp, 0);
@@ -2223,17 +2232,18 @@ static void memory_node__fprintf(struct memory_node *n,
static void print_mem_topology(struct feat_fd *ff, FILE *fp)
{
+ struct perf_env *env = &ff->ph->env;
struct memory_node *nodes;
int i, nr;
- nodes = ff->ph->env.memory_nodes;
- nr = ff->ph->env.nr_memory_nodes;
+ nodes = env->memory_nodes;
+ nr = env->nr_memory_nodes;
fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
- nr, ff->ph->env.memory_bsize);
+ nr, env->memory_bsize);
for (i = 0; i < nr; i++) {
- memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
+ memory_node__fprintf(&nodes[i], env->memory_bsize, fp);
}
}
@@ -2291,7 +2301,7 @@ static int __event_process_build_id(struct perf_record_header_build_id *bev,
free(m.name);
}
- build_id__sprintf(dso__bid(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
pr_debug("build id event received for %s: %s [%zu]\n",
dso__long_name(dso), sbuild_id, size);
dso__put(dso);
@@ -2431,6 +2441,7 @@ static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
int ret;
u32 nr_cpus_avail, nr_cpus_online;
@@ -2441,20 +2452,21 @@ static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
ret = do_read_u32(ff, &nr_cpus_online);
if (ret)
return ret;
- ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
- ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
+ env->nr_cpus_avail = (int)nr_cpus_avail;
+ env->nr_cpus_online = (int)nr_cpus_online;
return 0;
}
static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
u64 total_mem;
int ret;
ret = do_read_u64(ff, &total_mem);
if (ret)
return -1;
- ff->ph->env.total_mem = (unsigned long long)total_mem;
+ env->total_mem = (unsigned long long)total_mem;
return 0;
}
@@ -2515,13 +2527,14 @@ process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
char *str, *cmdline = NULL, **argv = NULL;
u32 nr, i, len = 0;
if (do_read_u32(ff, &nr))
return -1;
- ff->ph->env.nr_cmdline = nr;
+ env->nr_cmdline = nr;
cmdline = zalloc(ff->size + nr + 1);
if (!cmdline)
@@ -2541,8 +2554,8 @@ static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
len += strlen(str) + 1;
free(str);
}
- ff->ph->env.cmdline = cmdline;
- ff->ph->env.cmdline_argv = (const char **) argv;
+ env->cmdline = cmdline;
+ env->cmdline_argv = (const char **) argv;
return 0;
error:
@@ -2556,19 +2569,18 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
u32 nr, i;
char *str = NULL;
struct strbuf sb;
- int cpu_nr = ff->ph->env.nr_cpus_avail;
+ struct perf_env *env = &ff->ph->env;
+ int cpu_nr = env->nr_cpus_avail;
u64 size = 0;
- struct perf_header *ph = ff->ph;
- bool do_core_id_test = true;
- ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
- if (!ph->env.cpu)
+ env->cpu = calloc(cpu_nr, sizeof(*env->cpu));
+ if (!env->cpu)
return -1;
if (do_read_u32(ff, &nr))
goto free_cpu;
- ph->env.nr_sibling_cores = nr;
+ env->nr_sibling_cores = nr;
size += sizeof(u32);
if (strbuf_init(&sb, 128) < 0)
goto free_cpu;
@@ -2584,12 +2596,12 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
size += string_size(str);
zfree(&str);
}
- ph->env.sibling_cores = strbuf_detach(&sb, NULL);
+ env->sibling_cores = strbuf_detach(&sb, NULL);
if (do_read_u32(ff, &nr))
return -1;
- ph->env.nr_sibling_threads = nr;
+ env->nr_sibling_threads = nr;
size += sizeof(u32);
for (i = 0; i < nr; i++) {
@@ -2603,43 +2615,28 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
size += string_size(str);
zfree(&str);
}
- ph->env.sibling_threads = strbuf_detach(&sb, NULL);
+ env->sibling_threads = strbuf_detach(&sb, NULL);
/*
* The header may be from old perf,
* which doesn't include core id and socket id information.
*/
if (ff->size <= size) {
- zfree(&ph->env.cpu);
+ zfree(&env->cpu);
return 0;
}
- /* On s390 the socket_id number is not related to the numbers of cpus.
- * The socket_id number might be higher than the numbers of cpus.
- * This depends on the configuration.
- * AArch64 is the same.
- */
- if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
- || !strncmp(ph->env.arch, "aarch64", 7)))
- do_core_id_test = false;
-
for (i = 0; i < (u32)cpu_nr; i++) {
if (do_read_u32(ff, &nr))
goto free_cpu;
- ph->env.cpu[i].core_id = nr;
+ env->cpu[i].core_id = nr;
size += sizeof(u32);
if (do_read_u32(ff, &nr))
goto free_cpu;
- if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
- pr_debug("socket_id number is too big."
- "You may need to upgrade the perf tool.\n");
- goto free_cpu;
- }
-
- ph->env.cpu[i].socket_id = nr;
+ env->cpu[i].socket_id = nr;
size += sizeof(u32);
}
@@ -2653,7 +2650,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &nr))
return -1;
- ph->env.nr_sibling_dies = nr;
+ env->nr_sibling_dies = nr;
size += sizeof(u32);
for (i = 0; i < nr; i++) {
@@ -2667,13 +2664,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
size += string_size(str);
zfree(&str);
}
- ph->env.sibling_dies = strbuf_detach(&sb, NULL);
+ env->sibling_dies = strbuf_detach(&sb, NULL);
for (i = 0; i < (u32)cpu_nr; i++) {
if (do_read_u32(ff, &nr))
goto free_cpu;
- ph->env.cpu[i].die_id = nr;
+ env->cpu[i].die_id = nr;
}
return 0;
@@ -2682,12 +2679,13 @@ error:
strbuf_release(&sb);
zfree(&str);
free_cpu:
- zfree(&ph->env.cpu);
+ zfree(&env->cpu);
return -1;
}
static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
struct numa_node *nodes, *n;
u32 nr, i;
char *str;
@@ -2722,8 +2720,8 @@ static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
if (!n->map)
goto error;
}
- ff->ph->env.nr_numa_nodes = nr;
- ff->ph->env.numa_nodes = nodes;
+ env->nr_numa_nodes = nr;
+ env->numa_nodes = nodes;
return 0;
error:
@@ -2733,6 +2731,7 @@ error:
static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
char *name;
u32 pmu_num;
u32 type;
@@ -2746,7 +2745,7 @@ static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
return 0;
}
- ff->ph->env.nr_pmu_mappings = pmu_num;
+ env->nr_pmu_mappings = pmu_num;
if (strbuf_init(&sb, 128) < 0)
return -1;
@@ -2765,14 +2764,14 @@ static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
goto error;
if (!strcmp(name, "msr"))
- ff->ph->env.msr_pmu_type = type;
+ env->msr_pmu_type = type;
free(name);
pmu_num--;
}
/* AMD may set it by evlist__has_amd_ibs() from perf_session__new() */
- free(ff->ph->env.pmu_mappings);
- ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
+ free(env->pmu_mappings);
+ env->pmu_mappings = strbuf_detach(&sb, NULL);
return 0;
error:
@@ -2782,6 +2781,7 @@ error:
static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
size_t ret = -1;
u32 i, nr, nr_groups;
struct perf_session *session;
@@ -2795,7 +2795,7 @@ static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &nr_groups))
return -1;
- ff->ph->env.nr_groups = nr_groups;
+ env->nr_groups = nr_groups;
if (!nr_groups) {
pr_debug("group desc not available\n");
return 0;
@@ -2879,6 +2879,7 @@ static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
struct cpu_cache_level *caches;
u32 cnt, i, version;
@@ -2919,8 +2920,8 @@ static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
#undef _R
}
- ff->ph->env.caches = caches;
- ff->ph->env.caches_cnt = cnt;
+ env->caches = caches;
+ env->caches_cnt = cnt;
return 0;
out_free_caches:
for (i = 0; i < cnt; i++) {
@@ -2956,6 +2957,7 @@ static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
static int process_mem_topology(struct feat_fd *ff,
void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
struct memory_node *nodes;
u64 version, i, nr, bsize;
int ret = -1;
@@ -2994,9 +2996,9 @@ static int process_mem_topology(struct feat_fd *ff,
nodes[i] = n;
}
- ff->ph->env.memory_bsize = bsize;
- ff->ph->env.memory_nodes = nodes;
- ff->ph->env.nr_memory_nodes = nr;
+ env->memory_bsize = bsize;
+ env->memory_nodes = nodes;
+ env->nr_memory_nodes = nr;
ret = 0;
out:
@@ -3008,7 +3010,9 @@ out:
static int process_clockid(struct feat_fd *ff,
void *data __maybe_unused)
{
- if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns))
+ struct perf_env *env = &ff->ph->env;
+
+ if (do_read_u64(ff, &env->clock.clockid_res_ns))
return -1;
return 0;
@@ -3017,6 +3021,7 @@ static int process_clockid(struct feat_fd *ff,
static int process_clock_data(struct feat_fd *ff,
void *_data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
u32 data32;
u64 data64;
@@ -3031,26 +3036,27 @@ static int process_clock_data(struct feat_fd *ff,
if (do_read_u32(ff, &data32))
return -1;
- ff->ph->env.clock.clockid = data32;
+ env->clock.clockid = data32;
/* TOD ref time */
if (do_read_u64(ff, &data64))
return -1;
- ff->ph->env.clock.tod_ns = data64;
+ env->clock.tod_ns = data64;
/* clockid ref time */
if (do_read_u64(ff, &data64))
return -1;
- ff->ph->env.clock.clockid_ns = data64;
- ff->ph->env.clock.enabled = true;
+ env->clock.clockid_ns = data64;
+ env->clock.enabled = true;
return 0;
}
static int process_hybrid_topology(struct feat_fd *ff,
void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
struct hybrid_node *nodes, *n;
u32 nr, i;
@@ -3074,8 +3080,8 @@ static int process_hybrid_topology(struct feat_fd *ff,
goto error;
}
- ff->ph->env.nr_hybrid_nodes = nr;
- ff->ph->env.hybrid_nodes = nodes;
+ env->nr_hybrid_nodes = nr;
+ env->hybrid_nodes = nodes;
return 0;
error:
@@ -3161,6 +3167,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
/* after reading from file, translate offset to address */
bpil_offs_to_addr(info_linear);
info_node->info_linear = info_linear;
+ info_node->metadata = NULL;
if (!__perf_env__insert_bpf_prog_info(env, info_node)) {
free(info_linear);
free(info_node);
@@ -3227,19 +3234,21 @@ out:
static int process_compressed(struct feat_fd *ff,
void *data __maybe_unused)
{
- if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
+ struct perf_env *env = &ff->ph->env;
+
+ if (do_read_u32(ff, &(env->comp_ver)))
return -1;
- if (do_read_u32(ff, &(ff->ph->env.comp_type)))
+ if (do_read_u32(ff, &(env->comp_type)))
return -1;
- if (do_read_u32(ff, &(ff->ph->env.comp_level)))
+ if (do_read_u32(ff, &(env->comp_level)))
return -1;
- if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
+ if (do_read_u32(ff, &(env->comp_ratio)))
return -1;
- if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
+ if (do_read_u32(ff, &(env->comp_mmap_len)))
return -1;
return 0;
@@ -3311,19 +3320,21 @@ error:
static int process_cpu_pmu_caps(struct feat_fd *ff,
void *data __maybe_unused)
{
- int ret = __process_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
- &ff->ph->env.cpu_pmu_caps,
- &ff->ph->env.max_branches,
- &ff->ph->env.br_cntr_nr,
- &ff->ph->env.br_cntr_width);
+ struct perf_env *env = &ff->ph->env;
+ int ret = __process_pmu_caps(ff, &env->nr_cpu_pmu_caps,
+ &env->cpu_pmu_caps,
+ &env->max_branches,
+ &env->br_cntr_nr,
+ &env->br_cntr_width);
- if (!ret && !ff->ph->env.cpu_pmu_caps)
+ if (!ret && !env->cpu_pmu_caps)
pr_debug("cpu pmu capabilities not available\n");
return ret;
}
static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
{
+ struct perf_env *env = &ff->ph->env;
struct pmu_caps *pmu_caps;
u32 nr_pmu, i;
int ret;
@@ -3361,8 +3372,8 @@ static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
}
}
- ff->ph->env.nr_pmus_with_caps = nr_pmu;
- ff->ph->env.pmu_caps = pmu_caps;
+ env->nr_pmus_with_caps = nr_pmu;
+ env->pmu_caps = pmu_caps;
return 0;
err:
@@ -3660,6 +3671,7 @@ static int perf_session__do_write_header(struct perf_session *session,
struct perf_header *header = &session->header;
struct evsel *evsel;
struct feat_fd ff = {
+ .ph = header,
.fd = fd,
};
u64 attr_offset = sizeof(f_header), attr_size = 0;
@@ -4228,7 +4240,7 @@ int perf_session__read_header(struct perf_session *session)
if (session->evlist == NULL)
return -ENOMEM;
- session->evlist->env = &header->env;
+ session->evlist->session = session;
session->machines.host.env = &header->env;
/*
@@ -4341,12 +4353,12 @@ out_delete_evlist:
int perf_event__process_feature(struct perf_session *session,
union perf_event *event)
{
- const struct perf_tool *tool = session->tool;
struct feat_fd ff = { .fd = 0 };
struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
int type = fe->header.type;
u64 feat = fe->feat_id;
int ret = 0;
+ bool print = dump_trace;
if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
pr_warning("invalid record type %d in pipe-mode\n", type);
@@ -4357,28 +4369,35 @@ int perf_event__process_feature(struct perf_session *session,
return -1;
}
- if (!feat_ops[feat].process)
- return 0;
-
ff.buf = (void *)fe->data;
ff.size = event->header.size - sizeof(*fe);
ff.ph = &session->header;
- if (feat_ops[feat].process(&ff, NULL)) {
+ if (feat_ops[feat].process && feat_ops[feat].process(&ff, NULL)) {
ret = -1;
goto out;
}
- if (!feat_ops[feat].print || !tool->show_feat_hdr)
- goto out;
+ if (session->tool->show_feat_hdr) {
+ if (!feat_ops[feat].full_only ||
+ session->tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
+ print = true;
+ } else {
+ fprintf(stdout, "# %s info available, use -I to display\n",
+ feat_ops[feat].name);
+ }
+ }
- if (!feat_ops[feat].full_only ||
- tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
- feat_ops[feat].print(&ff, stdout);
- } else {
- fprintf(stdout, "# %s info available, use -I to display\n",
- feat_ops[feat].name);
+ if (dump_trace)
+ printf(", ");
+
+ if (print) {
+ if (feat_ops[feat].print)
+ feat_ops[feat].print(&ff, stdout);
+ else
+ printf("# %s", feat_ops[feat].name);
}
+
out:
free_event_desc(ff.events);
return ret;
@@ -4420,6 +4439,11 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
return ret;
}
+size_t perf_event__fprintf_attr(union perf_event *event, FILE *fp)
+{
+ return perf_event_attr__fprintf(fp, &event->attr.attr, __desc_attr__fprintf, NULL);
+}
+
int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
@@ -4429,6 +4453,9 @@ int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
struct evsel *evsel;
struct evlist *evlist = *pevlist;
+ if (dump_trace)
+ perf_event__fprintf_attr(event, stdout);
+
if (evlist == NULL) {
*pevlist = evlist = evlist__new();
if (evlist == NULL)
@@ -4495,8 +4522,8 @@ int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused
case PERF_EVENT_UPDATE__CPUS:
map = cpu_map__new_data(&ev->cpus.cpus);
if (map) {
- perf_cpu_map__put(evsel->core.own_cpus);
- evsel->core.own_cpus = map;
+ perf_cpu_map__put(evsel->core.pmu_cpus);
+ evsel->core.pmu_cpus = map;
} else
pr_err("failed to get event_update cpus\n");
default:
@@ -4507,7 +4534,8 @@ int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused
}
#ifdef HAVE_LIBTRACEEVENT
-int perf_event__process_tracing_data(struct perf_session *session,
+int perf_event__process_tracing_data(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
ssize_t size_read, padding, size = event->tracing_data.size;
@@ -4555,7 +4583,8 @@ int perf_event__process_tracing_data(struct perf_session *session,
}
#endif
-int perf_event__process_build_id(struct perf_session *session,
+int perf_event__process_build_id(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
__event_process_build_id(&event->build_id,
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 5201af6305f4..c058021c3150 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -175,12 +175,15 @@ int perf_event__process_attr(const struct perf_tool *tool, union perf_event *eve
int perf_event__process_event_update(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist);
+size_t perf_event__fprintf_attr(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp);
#ifdef HAVE_LIBTRACEEVENT
-int perf_event__process_tracing_data(struct perf_session *session,
+int perf_event__process_tracing_data(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event);
#endif
-int perf_event__process_build_id(struct perf_session *session,
+int perf_event__process_build_id(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event);
bool is_perf_magic(u64 magic);
diff --git a/tools/perf/util/hisi-ptt-decoder/Build b/tools/perf/util/hisi-ptt-decoder/Build
index 3298f7b7e308..2ee0eb731656 100644
--- a/tools/perf/util/hisi-ptt-decoder/Build
+++ b/tools/perf/util/hisi-ptt-decoder/Build
@@ -1 +1 @@
-perf-util-$(CONFIG_AUXTRACE) += hisi-ptt-pkt-decoder.o
+perf-util-y += hisi-ptt-pkt-decoder.o
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index d65228c11412..ef4b569f7df4 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -336,6 +336,69 @@ static void he_stat__decay(struct he_stat *he_stat)
he_stat->latency = (he_stat->latency * 7) / 8;
}
+static int hists__update_mem_stat(struct hists *hists, struct hist_entry *he,
+ struct mem_info *mi, u64 period)
+{
+ if (hists->nr_mem_stats == 0)
+ return 0;
+
+ if (he->mem_stat == NULL) {
+ he->mem_stat = calloc(hists->nr_mem_stats, sizeof(*he->mem_stat));
+ if (he->mem_stat == NULL)
+ return -1;
+ }
+
+ for (int i = 0; i < hists->nr_mem_stats; i++) {
+ int idx = mem_stat_index(hists->mem_stat_types[i],
+ mem_info__const_data_src(mi)->val);
+
+ assert(0 <= idx && idx < MEM_STAT_LEN);
+ he->mem_stat[i].entries[idx] += period;
+ hists->mem_stat_total[i].entries[idx] += period;
+ }
+ return 0;
+}
+
+static void hists__add_mem_stat(struct hists *hists, struct hist_entry *dst,
+ struct hist_entry *src)
+{
+ if (hists->nr_mem_stats == 0)
+ return;
+
+ for (int i = 0; i < hists->nr_mem_stats; i++) {
+ for (int k = 0; k < MEM_STAT_LEN; k++)
+ dst->mem_stat[i].entries[k] += src->mem_stat[i].entries[k];
+ }
+}
+
+static int hists__clone_mem_stat(struct hists *hists, struct hist_entry *dst,
+ struct hist_entry *src)
+{
+ if (hists->nr_mem_stats == 0)
+ return 0;
+
+ dst->mem_stat = calloc(hists->nr_mem_stats, sizeof(*dst->mem_stat));
+ if (dst->mem_stat == NULL)
+ return -1;
+
+ for (int i = 0; i < hists->nr_mem_stats; i++) {
+ for (int k = 0; k < MEM_STAT_LEN; k++)
+ dst->mem_stat[i].entries[k] = src->mem_stat[i].entries[k];
+ }
+ return 0;
+}
+
+static void hists__decay_mem_stat(struct hists *hists, struct hist_entry *he)
+{
+ if (hists->nr_mem_stats == 0)
+ return;
+
+ for (int i = 0; i < hists->nr_mem_stats; i++) {
+ for (int k = 0; k < MEM_STAT_LEN; k++)
+ he->mem_stat[i].entries[k] = (he->mem_stat[i].entries[k] * 7) / 8;
+ }
+}
+
static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
@@ -350,6 +413,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
if (symbol_conf.cumulate_callchain)
he_stat__decay(he->stat_acc);
decay_callchain(he->callchain);
+ hists__decay_mem_stat(hists, he);
if (!he->depth) {
u64 period_diff = prev_period - he->stat.period;
@@ -544,10 +608,8 @@ err_infos:
map_symbol__exit(&he->branch_info->to.ms);
zfree(&he->branch_info);
}
- if (he->mem_info) {
- map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
- map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
- }
+ if (he->mem_info)
+ mem_info__zput(he->mem_info);
err:
map_symbol__exit(&he->ms);
zfree(&he->stat_acc);
@@ -693,6 +755,10 @@ out:
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
if (symbol_conf.cumulate_callchain)
he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
+ if (hists__update_mem_stat(hists, he, entry->mem_info, period) < 0) {
+ hist_entry__delete(he);
+ return NULL;
+ }
return he;
}
@@ -761,7 +827,7 @@ __hists__add_entry(struct hists *hists,
.period = sample->period,
.weight1 = sample->weight,
.weight2 = sample->ins_lat,
- .weight3 = sample->p_stage_cyc,
+ .weight3 = sample->weight3,
.latency = al->latency,
},
.parent = sym_parent,
@@ -778,7 +844,7 @@ __hists__add_entry(struct hists *hists,
.time = hist_time(sample->time),
.weight = sample->weight,
.ins_lat = sample->ins_lat,
- .p_stage_cyc = sample->p_stage_cyc,
+ .weight3 = sample->weight3,
.simd_flags = sample->simd_flags,
}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
@@ -1423,6 +1489,7 @@ void hist_entry__delete(struct hist_entry *he)
free_callchain(he->callchain);
zfree(&he->trace_output);
zfree(&he->raw_data);
+ zfree(&he->mem_stat);
ops->free(he);
}
@@ -1572,6 +1639,7 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
cmp = hist_entry__collapse_hierarchy(hpp_list, iter, he);
if (!cmp) {
he_stat__add_stat(&iter->stat, &he->stat);
+ hists__add_mem_stat(hists, iter, he);
return iter;
}
@@ -1613,6 +1681,11 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
new->srcfile = NULL;
}
+ if (hists__clone_mem_stat(hists, new, he) < 0) {
+ hist_entry__delete(new);
+ return NULL;
+ }
+
rb_link_node(&new->rb_node_in, parent, p);
rb_insert_color_cached(&new->rb_node_in, root, leftmost);
return new;
@@ -1695,6 +1768,7 @@ static int hists__collapse_insert_entry(struct hists *hists,
he_stat__add_stat(&iter->stat, &he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__add_stat(iter->stat_acc, he->stat_acc);
+ hists__add_mem_stat(hists, iter, he);
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
struct callchain_cursor *cursor = get_tls_callchain_cursor();
@@ -2978,6 +3052,8 @@ static void hists_evsel__exit(struct evsel *evsel)
struct perf_hpp_list_node *node, *tmp;
hists__delete_all_entries(hists);
+ zfree(&hists->mem_stat_types);
+ zfree(&hists->mem_stat_total);
list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 317d06cca8b8..1d5ea632ca4e 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -9,6 +9,7 @@
#include "events_stats.h"
#include "evsel.h"
#include "map_symbol.h"
+#include "mem-events.h"
#include "mutex.h"
#include "sample.h"
#include "spark.h"
@@ -41,6 +42,7 @@ enum hist_column {
HISTC_TIME,
HISTC_DSO,
HISTC_THREAD,
+ HISTC_TGID,
HISTC_COMM,
HISTC_CGROUP_ID,
HISTC_CGROUP,
@@ -100,6 +102,13 @@ enum hist_column {
struct thread;
struct dso;
+#define MEM_STAT_LEN 8
+
+struct he_mem_stat {
+ /* meaning of entries depends on enum mem_stat_type */
+ u64 entries[MEM_STAT_LEN];
+};
+
struct hists {
struct rb_root_cached entries_in_array[2];
struct rb_root_cached *entries_in;
@@ -125,6 +134,9 @@ struct hists {
struct perf_hpp_list *hpp_list;
struct list_head hpp_formats;
int nr_hpp_node;
+ int nr_mem_stats;
+ enum mem_stat_type *mem_stat_types;
+ struct he_mem_stat *mem_stat_total;
};
#define hists__has(__h, __f) (__h)->hpp_list->__f
@@ -232,6 +244,7 @@ struct hist_entry {
} pairs;
struct he_stat stat;
struct he_stat *stat_acc;
+ struct he_mem_stat *mem_stat;
struct map_symbol ms;
struct thread *thread;
struct comm *comm;
@@ -242,7 +255,8 @@ struct hist_entry {
u64 code_page_size;
u64 weight;
u64 ins_lat;
- u64 p_stage_cyc;
+ /** @weight3: On x86 holds retire_lat, on powerpc holds p_stage_cyc. */
+ u64 weight3;
s32 socket;
s32 cpu;
int parallelism;
@@ -576,18 +590,25 @@ enum {
PERF_HPP__WEIGHT1,
PERF_HPP__WEIGHT2,
PERF_HPP__WEIGHT3,
+ PERF_HPP__MEM_STAT_OP,
+ PERF_HPP__MEM_STAT_CACHE,
+ PERF_HPP__MEM_STAT_MEMORY,
+ PERF_HPP__MEM_STAT_SNOOP,
+ PERF_HPP__MEM_STAT_DTLB,
PERF_HPP__MAX_INDEX
};
void perf_hpp__init(void);
-void perf_hpp__cancel_cumulate(void);
-void perf_hpp__cancel_latency(void);
+void perf_hpp__cancel_cumulate(struct evlist *evlist);
+void perf_hpp__cancel_latency(struct evlist *evlist);
void perf_hpp__setup_output_field(struct perf_hpp_list *list);
void perf_hpp__reset_output_field(struct perf_hpp_list *list);
void perf_hpp__append_sort_keys(struct perf_hpp_list *list);
int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
struct evlist *evlist);
+int perf_hpp__alloc_mem_stats(struct perf_hpp_list *list,
+ struct evlist *evlist);
bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
@@ -643,6 +664,9 @@ int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he, hpp_field_fn get_field,
const char *fmtstr, hpp_snprint_fn print_fn,
enum perf_hpp_fmt_type fmtype);
+int hpp__fmt_mem_stat(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he, enum mem_stat_type mst,
+ const char *fmtstr, hpp_snprint_fn print_fn);
static inline void advance_hpp(struct perf_hpp *hpp, int inc)
{
@@ -685,15 +709,18 @@ struct block_hist {
struct hist_entry he;
};
+#define NO_ADDR 0
+
#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
void attr_to_script(char *buf, struct perf_event_attr *attr);
-int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct hist_browser_timer *hbt);
+int __hist_entry__tui_annotate(struct hist_entry *he, struct map_symbol *ms,
+ struct evsel *evsel,
+ struct hist_browser_timer *hbt, u64 al_addr);
int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
- struct hist_browser_timer *hbt);
+ struct hist_browser_timer *hbt, u64 al_addr);
int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
float min_pcnt, struct perf_env *env, bool warn_lost_event);
@@ -718,16 +745,19 @@ int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
{
return 0;
}
-static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
- struct evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused)
+static inline int __hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
+ struct map_symbol *ms __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ struct hist_browser_timer *hbt __maybe_unused,
+ u64 al_addr __maybe_unused)
{
return 0;
}
static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
struct evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused)
+ struct hist_browser_timer *hbt __maybe_unused,
+ u64 al_addr __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/hwmon_pmu.c b/tools/perf/util/hwmon_pmu.c
index 3cce77fc8004..279d6b1a47f0 100644
--- a/tools/perf/util/hwmon_pmu.c
+++ b/tools/perf/util/hwmon_pmu.c
@@ -104,7 +104,7 @@ static const char *const hwmon_units[HWMON_TYPE_MAX] = {
struct hwmon_pmu {
struct perf_pmu pmu;
struct hashmap events;
- int hwmon_dir_fd;
+ char *hwmon_dir;
};
/**
@@ -245,7 +245,7 @@ static int hwmon_pmu__read_events(struct hwmon_pmu *pmu)
return 0;
/* Use openat so that the directory contents are refreshed. */
- io_dir__init(&dir, openat(pmu->hwmon_dir_fd, ".", O_CLOEXEC | O_DIRECTORY | O_RDONLY));
+ io_dir__init(&dir, open(pmu->hwmon_dir, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
if (dir.dirfd < 0)
return -ENOENT;
@@ -283,7 +283,7 @@ static int hwmon_pmu__read_events(struct hwmon_pmu *pmu)
__set_bit(item, alarm ? value->alarm_items : value->items);
if (item == HWMON_ITEM_LABEL) {
char buf[128];
- int fd = openat(pmu->hwmon_dir_fd, ent->d_name, O_RDONLY);
+ int fd = openat(dir.dirfd, ent->d_name, O_RDONLY);
ssize_t read_len;
if (fd < 0)
@@ -342,46 +342,52 @@ err_out:
return err;
}
-struct perf_pmu *hwmon_pmu__new(struct list_head *pmus, int hwmon_dir, const char *sysfs_name, const char *name)
+struct perf_pmu *hwmon_pmu__new(struct list_head *pmus, const char *hwmon_dir,
+ const char *sysfs_name, const char *name)
{
- char buf[32];
+ char buf[64];
struct hwmon_pmu *hwm;
+ __u32 type = PERF_PMU_TYPE_HWMON_START + strtoul(sysfs_name + 5, NULL, 10);
+
+ if (type > PERF_PMU_TYPE_HWMON_END) {
+ pr_err("Unable to encode hwmon type from %s in valid PMU type\n", sysfs_name);
+ return NULL;
+ }
+
+ snprintf(buf, sizeof(buf), "hwmon_%s", name);
+ fix_name(buf + 6);
hwm = zalloc(sizeof(*hwm));
if (!hwm)
return NULL;
- hwm->hwmon_dir_fd = hwmon_dir;
- hwm->pmu.type = PERF_PMU_TYPE_HWMON_START + strtoul(sysfs_name + 5, NULL, 10);
- if (hwm->pmu.type > PERF_PMU_TYPE_HWMON_END) {
- pr_err("Unable to encode hwmon type from %s in valid PMU type\n", sysfs_name);
- goto err_out;
+ if (perf_pmu__init(&hwm->pmu, type, buf) != 0) {
+ perf_pmu__delete(&hwm->pmu);
+ return NULL;
+ }
+
+ hwm->hwmon_dir = strdup(hwmon_dir);
+ if (!hwm->hwmon_dir) {
+ perf_pmu__delete(&hwm->pmu);
+ return NULL;
}
- snprintf(buf, sizeof(buf), "hwmon_%s", name);
- fix_name(buf + 6);
- hwm->pmu.name = strdup(buf);
- if (!hwm->pmu.name)
- goto err_out;
hwm->pmu.alias_name = strdup(sysfs_name);
- if (!hwm->pmu.alias_name)
- goto err_out;
- hwm->pmu.cpus = perf_cpu_map__new("0");
- if (!hwm->pmu.cpus)
- goto err_out;
+ if (!hwm->pmu.alias_name) {
+ perf_pmu__delete(&hwm->pmu);
+ return NULL;
+ }
+ hwm->pmu.cpus = perf_cpu_map__new_int(0);
+ if (!hwm->pmu.cpus) {
+ perf_pmu__delete(&hwm->pmu);
+ return NULL;
+ }
INIT_LIST_HEAD(&hwm->pmu.format);
- INIT_LIST_HEAD(&hwm->pmu.aliases);
INIT_LIST_HEAD(&hwm->pmu.caps);
hashmap__init(&hwm->events, hwmon_pmu__event_hashmap_hash,
hwmon_pmu__event_hashmap_equal, /*ctx=*/NULL);
list_add_tail(&hwm->pmu.list, pmus);
return &hwm->pmu;
-err_out:
- free((char *)hwm->pmu.name);
- free(hwm->pmu.alias_name);
- free(hwm);
- close(hwmon_dir);
- return NULL;
}
void hwmon_pmu__exit(struct perf_pmu *pmu)
@@ -398,7 +404,7 @@ void hwmon_pmu__exit(struct perf_pmu *pmu)
free(value);
}
hashmap__clear(&hwm->events);
- close(hwm->hwmon_dir_fd);
+ zfree(&hwm->hwmon_dir);
}
static size_t hwmon_pmu__describe_items(struct hwmon_pmu *hwm, char *out_buf, size_t out_buf_len,
@@ -408,6 +414,10 @@ static size_t hwmon_pmu__describe_items(struct hwmon_pmu *hwm, char *out_buf, si
size_t bit;
char buf[64];
size_t len = 0;
+ int dir = open(hwm->hwmon_dir, O_CLOEXEC | O_DIRECTORY | O_RDONLY);
+
+ if (dir < 0)
+ return 0;
for_each_set_bit(bit, items, HWMON_ITEM__MAX) {
int fd;
@@ -420,7 +430,7 @@ static size_t hwmon_pmu__describe_items(struct hwmon_pmu *hwm, char *out_buf, si
key.num,
hwmon_item_strs[bit],
is_alarm ? "_alarm" : "");
- fd = openat(hwm->hwmon_dir_fd, buf, O_RDONLY);
+ fd = openat(dir, buf, O_RDONLY);
if (fd > 0) {
ssize_t read_len = read(fd, buf, sizeof(buf));
@@ -442,6 +452,7 @@ static size_t hwmon_pmu__describe_items(struct hwmon_pmu *hwm, char *out_buf, si
close(fd);
}
}
+ close(dir);
return len;
}
@@ -711,6 +722,7 @@ int perf_pmus__read_hwmon_pmus(struct list_head *pmus)
size_t line_len;
int hwmon_dir, name_fd;
struct io io;
+ char buf2[128];
if (class_hwmon_ent->d_type != DT_LNK)
continue;
@@ -729,12 +741,12 @@ int perf_pmus__read_hwmon_pmus(struct list_head *pmus)
close(hwmon_dir);
continue;
}
- io__init(&io, name_fd, buf, sizeof(buf));
- io__getline(&io, &line, &line_len);
- if (line_len > 0 && line[line_len - 1] == '\n')
+ io__init(&io, name_fd, buf2, sizeof(buf2));
+ if (io__getline(&io, &line, &line_len) > 0 && line[line_len - 1] == '\n')
line[line_len - 1] = '\0';
- hwmon_pmu__new(pmus, hwmon_dir, class_hwmon_ent->d_name, line);
+ hwmon_pmu__new(pmus, buf, class_hwmon_ent->d_name, line);
close(name_fd);
+ close(hwmon_dir);
}
free(line);
close(class_hwmon_dir.dirfd);
@@ -752,6 +764,10 @@ int evsel__hwmon_pmu_open(struct evsel *evsel,
.type_and_num = evsel->core.attr.config,
};
int idx = 0, thread = 0, nthreads, err = 0;
+ int dir = open(hwm->hwmon_dir, O_CLOEXEC | O_DIRECTORY | O_RDONLY);
+
+ if (dir < 0)
+ return -errno;
nthreads = perf_thread_map__nr(threads);
for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
@@ -762,7 +778,7 @@ int evsel__hwmon_pmu_open(struct evsel *evsel,
snprintf(buf, sizeof(buf), "%s%d_input",
hwmon_type_strs[key.type], key.num);
- fd = openat(hwm->hwmon_dir_fd, buf, O_RDONLY);
+ fd = openat(dir, buf, O_RDONLY);
FD(evsel, idx, thread) = fd;
if (fd < 0) {
err = -errno;
@@ -770,6 +786,7 @@ int evsel__hwmon_pmu_open(struct evsel *evsel,
}
}
}
+ close(dir);
return 0;
out_close:
if (err)
@@ -783,6 +800,7 @@ out_close:
}
thread = nthreads;
} while (--idx >= 0);
+ close(dir);
return err;
}
diff --git a/tools/perf/util/hwmon_pmu.h b/tools/perf/util/hwmon_pmu.h
index b3329774d2b2..d1e403c8b70b 100644
--- a/tools/perf/util/hwmon_pmu.h
+++ b/tools/perf/util/hwmon_pmu.h
@@ -37,7 +37,7 @@ enum hwmon_type {
/**
* enum hwmon_item:
*
- * Similar to enum hwmon_type but describes the item part of a a sysfs filename.
+ * Similar to enum hwmon_type but describes the item part of a sysfs filename.
*
* This enum is exposed for testing.
*/
@@ -135,14 +135,14 @@ bool parse_hwmon_filename(const char *filename,
* hwmon_pmu__new() - Allocate and construct a hwmon PMU.
*
* @pmus: The list of PMUs to be added to.
- * @hwmon_dir: An O_DIRECTORY file descriptor for a hwmon directory.
+ * @hwmon_dir: The path to a hwmon directory.
* @sysfs_name: Name of the hwmon sysfs directory like hwmon0.
* @name: The contents of the "name" file in the hwmon directory.
*
* Exposed for testing. Regular construction should happen via
* perf_pmus__read_hwmon_pmus.
*/
-struct perf_pmu *hwmon_pmu__new(struct list_head *pmus, int hwmon_dir,
+struct perf_pmu *hwmon_pmu__new(struct list_head *pmus, const char *hwmon_dir,
const char *sysfs_name, const char *name);
void hwmon_pmu__exit(struct perf_pmu *pmu);
diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h
index 178b00205fe6..34e2fdfe7300 100644
--- a/tools/perf/util/include/linux/linkage.h
+++ b/tools/perf/util/include/linux/linkage.h
@@ -120,7 +120,7 @@
#endif
// In the kernel sources (include/linux/cfi_types.h), this has a different
-// definition when CONFIG_CFI_CLANG is used, for tools/ just use the !clang
+// definition when CONFIG_CFI is used, for tools/ just use the !cfi
// definition:
#ifndef SYM_TYPED_START
#define SYM_TYPED_START(name, linkage, align...) \
@@ -132,4 +132,8 @@
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
+#ifndef SYM_PIC_ALIAS
+#define SYM_PIC_ALIAS(sym) SYM_ALIAS(__pi_ ## sym, sym, SYM_T_FUNC, SYM_L_GLOBAL)
+#endif
+
#endif /* PERF_LINUX_LINKAGE_H_ */
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 3625c6224750..382255393fb3 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -777,9 +777,7 @@ static int intel_bts_synth_events(struct intel_bts *bts,
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
- id = evsel->core.id[0] + 1000000000;
- if (!id)
- id = 1;
+ id = auxtrace_synth_id_range_start(evsel);
if (bts->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 5b8f0149167d..8fd7e4330044 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -1,4 +1,4 @@
-perf-util-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
+perf-util-y += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
inat_tables_script = $(srctree)/tools/arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = $(srctree)/tools/arch/x86/lib/x86-opcode-map.txt
@@ -7,11 +7,7 @@ $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_table
$(call rule_mkdir)
@$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
-ifeq ($(SRCARCH),x86)
- perf-util-y += inat.o insn.o
-else
- perf-util-$(CONFIG_AUXTRACE) += inat.o insn.o
-endif
+perf-util-y += inat.o insn.o
$(OUTPUT)util/intel-pt-decoder/inat.o: $(srctree)/tools/arch/x86/lib/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
$(call rule_mkdir)
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 8fabddc1c0da..72c7a4e15d61 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -32,7 +32,7 @@ static void intel_pt_insn_decoder(struct insn *insn,
intel_pt_insn->rel = 0;
intel_pt_insn->emulated_ptwrite = false;
- if (insn_is_avx(insn)) {
+ if (insn_is_avx_or_xop(insn)) {
intel_pt_insn->op = INTEL_PT_OP_OTHER;
intel_pt_insn->branch = INTEL_PT_BR_NO_BRANCH;
intel_pt_insn->length = insn->length;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 4e8a9b172fbc..fc9eec8b54b8 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -127,6 +127,7 @@ struct intel_pt {
bool single_pebs;
bool sample_pebs;
+ int pebs_data_src_fmt;
struct evsel *pebs_evsel;
u64 evt_sample_type;
@@ -175,6 +176,7 @@ enum switch_state {
struct intel_pt_pebs_event {
struct evsel *evsel;
u64 id;
+ int data_src_fmt;
};
struct intel_pt_queue {
@@ -2272,7 +2274,146 @@ static void intel_pt_add_lbrs(struct branch_stack *br_stack,
}
}
-static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
+#define P(a, b) PERF_MEM_S(a, b)
+#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
+#define LEVEL(x) P(LVLNUM, x)
+#define REM P(REMOTE, REMOTE)
+#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
+
+#define PERF_PEBS_DATA_SOURCE_GRT_MAX 0x10
+#define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
+
+/* Based on kernel __intel_pmu_pebs_data_source_grt() and pebs_data_source */
+static const u64 pebs_data_source_grt[PERF_PEBS_DATA_SOURCE_GRT_MAX] = {
+ P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* L3 miss|SNP N/A */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* L1 hit|SNP None */
+ OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* LFB/MAB hit|SNP None */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* L2 hit|SNP None */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* L3 hit|SNP None */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* L3 hit|SNP Hit */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP HitM */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP HitM */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* L3 hit|SNP Fwd */
+ OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* Remote L3 hit|SNP HitM */
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* RAM hit|SNP Hit */
+ OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* Remote L3 hit|SNP Hit */
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* RAM hit|SNP None or Miss */
+ OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* Remote RAM hit|SNP None or Miss */
+ OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* I/O hit|SNP None */
+ OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* Uncached hit|SNP None */
+};
+
+/* Based on kernel __intel_pmu_pebs_data_source_cmt() and pebs_data_source */
+static const u64 pebs_data_source_cmt[PERF_PEBS_DATA_SOURCE_GRT_MAX] = {
+ P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA), /* L3 miss|SNP N/A */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* L1 hit|SNP None */
+ OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* LFB/MAB hit|SNP None */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* L2 hit|SNP None */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* L3 hit|SNP None */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* L3 hit|SNP Hit */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* L3 hit|SNP HitM */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD), /* L3 hit|SNP HitM */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* L3 hit|SNP Fwd */
+ OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* Remote L3 hit|SNP HitM */
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE), /* RAM hit|SNP Hit */
+ OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE), /* Remote L3 hit|SNP Hit */
+ OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD), /* RAM hit|SNP None or Miss */
+ OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM), /* Remote RAM hit|SNP None or Miss */
+ OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* I/O hit|SNP None */
+ OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* Uncached hit|SNP None */
+};
+
+/* Based on kernel pebs_set_tlb_lock() */
+static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
+{
+ /*
+ * TLB access
+ * 0 = did not miss 2nd level TLB
+ * 1 = missed 2nd level TLB
+ */
+ if (tlb)
+ *val |= P(TLB, MISS) | P(TLB, L2);
+ else
+ *val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
+
+ /* locked prefix */
+ if (lock)
+ *val |= P(LOCK, LOCKED);
+}
+
+/* Based on kernel __grt_latency_data() */
+static u64 intel_pt_grt_latency_data(u8 dse, bool tlb, bool lock, bool blk,
+ const u64 *pebs_data_source)
+{
+ u64 val;
+
+ dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
+ val = pebs_data_source[dse];
+
+ pebs_set_tlb_lock(&val, tlb, lock);
+
+ if (blk)
+ val |= P(BLK, DATA);
+ else
+ val |= P(BLK, NA);
+
+ return val;
+}
+
+/* Default value for data source */
+#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
+ PERF_MEM_S(LVL, NA) |\
+ PERF_MEM_S(SNOOP, NA) |\
+ PERF_MEM_S(LOCK, NA) |\
+ PERF_MEM_S(TLB, NA) |\
+ PERF_MEM_S(LVLNUM, NA))
+
+enum DATA_SRC_FORMAT {
+ DATA_SRC_FORMAT_ERR = -1,
+ DATA_SRC_FORMAT_NA = 0,
+ DATA_SRC_FORMAT_GRT = 1,
+ DATA_SRC_FORMAT_CMT = 2,
+};
+
+/* Based on kernel grt_latency_data() and cmt_latency_data */
+static u64 intel_pt_get_data_src(u64 mem_aux_info, int data_src_fmt)
+{
+ switch (data_src_fmt) {
+ case DATA_SRC_FORMAT_GRT: {
+ union {
+ u64 val;
+ struct {
+ unsigned int dse:4;
+ unsigned int locked:1;
+ unsigned int stlb_miss:1;
+ unsigned int fwd_blk:1;
+ unsigned int reserved:25;
+ };
+ } x = {.val = mem_aux_info};
+ return intel_pt_grt_latency_data(x.dse, x.stlb_miss, x.locked, x.fwd_blk,
+ pebs_data_source_grt);
+ }
+ case DATA_SRC_FORMAT_CMT: {
+ union {
+ u64 val;
+ struct {
+ unsigned int dse:5;
+ unsigned int locked:1;
+ unsigned int stlb_miss:1;
+ unsigned int fwd_blk:1;
+ unsigned int reserved:24;
+ };
+ } x = {.val = mem_aux_info};
+ return intel_pt_grt_latency_data(x.dse, x.stlb_miss, x.locked, x.fwd_blk,
+ pebs_data_source_cmt);
+ }
+ default:
+ return PERF_MEM_NA;
+ }
+}
+
+static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel,
+ u64 id, int data_src_fmt)
{
const struct intel_pt_blk_items *items = &ptq->state->items;
struct perf_sample sample;
@@ -2393,6 +2534,18 @@ static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evse
}
}
+ if (sample_type & PERF_SAMPLE_DATA_SRC) {
+ if (items->has_mem_aux_info && data_src_fmt) {
+ if (data_src_fmt < 0) {
+ pr_err("Intel PT missing data_src info\n");
+ return -1;
+ }
+ sample.data_src = intel_pt_get_data_src(items->mem_aux_info, data_src_fmt);
+ } else {
+ sample.data_src = PERF_MEM_NA;
+ }
+ }
+
if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
u64 ax = items->has_rax ? items->rax : 0;
/* Refer kernel's intel_hsw_transaction() */
@@ -2413,9 +2566,10 @@ static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
struct evsel *evsel = pt->pebs_evsel;
+ int data_src_fmt = pt->pebs_data_src_fmt;
u64 id = evsel->core.id[0];
- return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
+ return intel_pt_do_synth_pebs_sample(ptq, evsel, id, data_src_fmt);
}
static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
@@ -2440,7 +2594,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
hw_id);
return intel_pt_synth_single_pebs_sample(ptq);
}
- err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
+ err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id, pe->data_src_fmt);
if (err)
return err;
}
@@ -3407,6 +3561,49 @@ static int intel_pt_process_itrace_start(struct intel_pt *pt,
event->itrace_start.tid);
}
+/*
+ * Events with data_src are identified by L1_Hit_Indication
+ * refer https://github.com/intel/perfmon
+ */
+static int intel_pt_data_src_fmt(struct intel_pt *pt, struct evsel *evsel)
+{
+ struct perf_env *env = pt->machine->env;
+ int fmt = DATA_SRC_FORMAT_NA;
+
+ if (!env->cpuid)
+ return DATA_SRC_FORMAT_ERR;
+
+ /*
+ * PEBS-via-PT is only supported on E-core non-hybrid. Of those only
+ * Gracemont and Crestmont have data_src. Check for:
+ * Alderlake N (Gracemont)
+ * Sierra Forest (Crestmont)
+ * Grand Ridge (Crestmont)
+ */
+
+ if (!strncmp(env->cpuid, "GenuineIntel,6,190,", 19))
+ fmt = DATA_SRC_FORMAT_GRT;
+
+ if (!strncmp(env->cpuid, "GenuineIntel,6,175,", 19) ||
+ !strncmp(env->cpuid, "GenuineIntel,6,182,", 19))
+ fmt = DATA_SRC_FORMAT_CMT;
+
+ if (fmt == DATA_SRC_FORMAT_NA)
+ return fmt;
+
+ /*
+ * Only data_src events are:
+ * mem-loads event=0xd0,umask=0x5
+ * mem-stores event=0xd0,umask=0x6
+ */
+ if (evsel->core.attr.type == PERF_TYPE_RAW &&
+ ((evsel->core.attr.config & 0xffff) == 0x5d0 ||
+ (evsel->core.attr.config & 0xffff) == 0x6d0))
+ return fmt;
+
+ return DATA_SRC_FORMAT_NA;
+}
+
static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample)
@@ -3427,6 +3624,7 @@ static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
ptq->pebs[hw_id].evsel = evsel;
ptq->pebs[hw_id].id = sample->id;
+ ptq->pebs[hw_id].data_src_fmt = intel_pt_data_src_fmt(pt, evsel);
return 0;
}
@@ -3789,9 +3987,7 @@ static int intel_pt_synth_events(struct intel_pt *pt,
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
- id = evsel->core.id[0] + 1000000000;
- if (!id)
- id = 1;
+ id = auxtrace_synth_id_range_start(evsel);
if (pt->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
@@ -3976,6 +4172,7 @@ static void intel_pt_setup_pebs_events(struct intel_pt *pt)
}
pt->single_pebs = true;
pt->sample_pebs = true;
+ pt->pebs_data_src_fmt = intel_pt_data_src_fmt(pt, evsel);
pt->pebs_evsel = evsel;
}
}
diff --git a/tools/perf/util/intel-tpebs.c b/tools/perf/util/intel-tpebs.c
index 2c421b475b3b..3c958d738ca6 100644
--- a/tools/perf/util/intel-tpebs.c
+++ b/tools/perf/util/intel-tpebs.c
@@ -3,7 +3,7 @@
* intel_tpebs.c: Intel TPEBS support
*/
-
+#include <api/fs/fs.h>
#include <sys/param.h>
#include <subcmd/run-command.h>
#include <thread.h>
@@ -12,110 +12,175 @@
#include <linux/zalloc.h>
#include <linux/err.h>
#include "sample.h"
+#include "counts.h"
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
+#include "mutex.h"
#include "session.h"
+#include "stat.h"
#include "tool.h"
#include "cpumap.h"
#include "metricgroup.h"
+#include "stat.h"
#include <sys/stat.h>
#include <sys/file.h>
+#include <errno.h>
#include <poll.h>
#include <math.h>
#define PERF_DATA "-"
bool tpebs_recording;
-static pid_t tpebs_pid = -1;
-static size_t tpebs_event_size;
+enum tpebs_mode tpebs_mode;
static LIST_HEAD(tpebs_results);
static pthread_t tpebs_reader_thread;
-static struct child_process *tpebs_cmd;
+static struct child_process tpebs_cmd;
+static int control_fd[2], ack_fd[2];
+static struct mutex tpebs_mtx;
struct tpebs_retire_lat {
struct list_head nd;
- /* Event name */
- const char *name;
- /* Event name with the TPEBS modifier R */
- const char *tpebs_name;
- /* Count of retire_latency values found in sample data */
- size_t count;
- /* Sum of all the retire_latency values in sample data */
- int sum;
- /* Average of retire_latency, val = sum / count */
- double val;
+ /** @evsel: The evsel that opened the retire_lat event. */
+ struct evsel *evsel;
+ /** @event: Event passed to perf record. */
+ char *event;
+ /** @stats: Recorded retirement latency stats. */
+ struct stats stats;
+ /** @last: Last retirement latency read. */
+ uint64_t last;
+ /* Has the event been sent to perf record? */
+ bool started;
};
-static int get_perf_record_args(const char **record_argv, char buf[],
- const char *cpumap_buf)
+static void tpebs_mtx_init(void)
+{
+ mutex_init(&tpebs_mtx);
+}
+
+static struct mutex *tpebs_mtx_get(void)
{
- struct tpebs_retire_lat *e;
- int i = 0;
+ static pthread_once_t tpebs_mtx_once = PTHREAD_ONCE_INIT;
+
+ pthread_once(&tpebs_mtx_once, tpebs_mtx_init);
+ return &tpebs_mtx;
+}
- pr_debug("tpebs: Prepare perf record for retire_latency\n");
+static struct tpebs_retire_lat *tpebs_retire_lat__find(struct evsel *evsel)
+ EXCLUSIVE_LOCKS_REQUIRED(tpebs_mtx_get());
+
+static int evsel__tpebs_start_perf_record(struct evsel *evsel)
+{
+ const char **record_argv;
+ int tpebs_event_size = 0, i = 0, ret;
+ char control_fd_buf[32];
+ char cpumap_buf[50];
+ struct tpebs_retire_lat *t;
+
+ list_for_each_entry(t, &tpebs_results, nd)
+ tpebs_event_size++;
+
+ record_argv = malloc((10 + 2 * tpebs_event_size) * sizeof(*record_argv));
+ if (!record_argv)
+ return -ENOMEM;
record_argv[i++] = "perf";
record_argv[i++] = "record";
record_argv[i++] = "-W";
record_argv[i++] = "--synth=no";
- record_argv[i++] = buf;
- if (!cpumap_buf) {
- pr_err("tpebs: Require cpumap list to run sampling\n");
- return -ECANCELED;
- }
- /* Use -C when cpumap_buf is not "-1" */
- if (strcmp(cpumap_buf, "-1")) {
+ scnprintf(control_fd_buf, sizeof(control_fd_buf), "--control=fd:%d,%d",
+ control_fd[0], ack_fd[1]);
+ record_argv[i++] = control_fd_buf;
+
+ record_argv[i++] = "-o";
+ record_argv[i++] = PERF_DATA;
+
+ if (!perf_cpu_map__is_any_cpu_or_is_empty(evsel->evlist->core.user_requested_cpus)) {
+ cpu_map__snprint(evsel->evlist->core.user_requested_cpus, cpumap_buf,
+ sizeof(cpumap_buf));
record_argv[i++] = "-C";
record_argv[i++] = cpumap_buf;
}
- list_for_each_entry(e, &tpebs_results, nd) {
+ list_for_each_entry(t, &tpebs_results, nd) {
record_argv[i++] = "-e";
- record_argv[i++] = e->name;
+ record_argv[i++] = t->event;
}
+ record_argv[i++] = NULL;
+ assert(i == 10 + 2 * tpebs_event_size || i == 8 + 2 * tpebs_event_size);
+ /* Note, no workload given so system wide is implied. */
+
+ assert(tpebs_cmd.pid == 0);
+ tpebs_cmd.argv = record_argv;
+ tpebs_cmd.out = -1;
+ ret = start_command(&tpebs_cmd);
+ zfree(&tpebs_cmd.argv);
+ list_for_each_entry(t, &tpebs_results, nd)
+ t->started = true;
- record_argv[i++] = "-o";
- record_argv[i++] = PERF_DATA;
-
- return 0;
+ return ret;
}
-static int prepare_run_command(const char **argv)
+static bool is_child_pid(pid_t parent, pid_t child)
{
- tpebs_cmd = zalloc(sizeof(struct child_process));
- if (!tpebs_cmd)
- return -ENOMEM;
- tpebs_cmd->argv = argv;
- tpebs_cmd->out = -1;
- return 0;
+ if (parent < 0 || child < 0)
+ return false;
+
+ while (true) {
+ char path[PATH_MAX];
+ char line[256];
+ FILE *fp;
+
+new_child:
+ if (parent == child)
+ return true;
+
+ if (child <= 0)
+ return false;
+
+ scnprintf(path, sizeof(path), "%s/%d/status", procfs__mountpoint(), child);
+ fp = fopen(path, "r");
+ if (!fp) {
+ /* Presumably the process went away. Assume not a child. */
+ return false;
+ }
+ while (fgets(line, sizeof(line), fp) != NULL) {
+ if (strncmp(line, "PPid:", 5) == 0) {
+ fclose(fp);
+ if (sscanf(line + 5, "%d", &child) != 1) {
+ /* Unexpected error parsing. */
+ return false;
+ }
+ goto new_child;
+ }
+ }
+ /* Unexpected EOF. */
+ fclose(fp);
+ return false;
+ }
}
-static int start_perf_record(int control_fd[], int ack_fd[],
- const char *cpumap_buf)
+static bool should_ignore_sample(const struct perf_sample *sample, const struct tpebs_retire_lat *t)
{
- const char **record_argv;
- int ret;
- char buf[32];
+ pid_t workload_pid, sample_pid = sample->pid;
- scnprintf(buf, sizeof(buf), "--control=fd:%d,%d", control_fd[0], ack_fd[1]);
+ /*
+ * During evlist__purge the evlist will be removed prior to the
+ * evsel__exit calling evsel__tpebs_close and taking the
+ * tpebs_mtx. Avoid a segfault by ignoring samples in this case.
+ */
+ if (t->evsel->evlist == NULL)
+ return true;
- record_argv = calloc(12 + 2 * tpebs_event_size, sizeof(char *));
- if (!record_argv)
- return -ENOMEM;
+ workload_pid = t->evsel->evlist->workload.pid;
+ if (workload_pid < 0 || workload_pid == sample_pid)
+ return false;
- ret = get_perf_record_args(record_argv, buf, cpumap_buf);
- if (ret)
- goto out;
+ if (!t->evsel->core.attr.inherit)
+ return true;
- ret = prepare_run_command(record_argv);
- if (ret)
- goto out;
- ret = start_command(tpebs_cmd);
-out:
- free(record_argv);
- return ret;
+ return !is_child_pid(workload_pid, sample_pid);
}
static int process_sample_event(const struct perf_tool *tool __maybe_unused,
@@ -124,30 +189,36 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
struct evsel *evsel,
struct machine *machine __maybe_unused)
{
- int ret = 0;
- const char *evname;
struct tpebs_retire_lat *t;
- evname = evsel__name(evsel);
-
+ mutex_lock(tpebs_mtx_get());
+ if (tpebs_cmd.pid == 0) {
+ /* Record has terminated. */
+ mutex_unlock(tpebs_mtx_get());
+ return 0;
+ }
+ t = tpebs_retire_lat__find(evsel);
+ if (!t) {
+ mutex_unlock(tpebs_mtx_get());
+ return -EINVAL;
+ }
+ if (should_ignore_sample(sample, t)) {
+ mutex_unlock(tpebs_mtx_get());
+ return 0;
+ }
/*
* Need to handle per core results? We are assuming average retire
* latency value will be used. Save the number of samples and the sum of
* retire latency value for each event.
*/
- list_for_each_entry(t, &tpebs_results, nd) {
- if (!strcmp(evname, t->name)) {
- t->count += 1;
- t->sum += sample->retire_lat;
- t->val = (double) t->sum / t->count;
- break;
- }
- }
-
- return ret;
+ t->last = sample->weight3;
+ update_stats(&t->stats, sample->weight3);
+ mutex_unlock(tpebs_mtx_get());
+ return 0;
}
-static int process_feature_event(struct perf_session *session,
+static int process_feature_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
if (event->feat.feat_id < HEADER_LAST_FEATURE)
@@ -155,14 +226,13 @@ static int process_feature_event(struct perf_session *session,
return 0;
}
-static void *__sample_reader(void *arg)
+static void *__sample_reader(void *arg __maybe_unused)
{
- struct child_process *child = arg;
struct perf_session *session;
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
.path = PERF_DATA,
- .file.fd = child->out,
+ .file.fd = tpebs_cmd.out,
};
struct perf_tool tool;
@@ -180,94 +250,277 @@ static void *__sample_reader(void *arg)
return NULL;
}
+static int tpebs_send_record_cmd(const char *msg) EXCLUSIVE_LOCKS_REQUIRED(tpebs_mtx_get())
+{
+ struct pollfd pollfd = { .events = POLLIN, };
+ int ret, len, retries = 0;
+ char ack_buf[8];
+
+ /* Check if the command exited before the send, done with the lock held. */
+ if (tpebs_cmd.pid == 0)
+ return 0;
+
+ /*
+ * Let go of the lock while sending/receiving as blocking can starve the
+ * sample reading thread.
+ */
+ mutex_unlock(tpebs_mtx_get());
+
+ /* Send perf record command.*/
+ len = strlen(msg);
+ ret = write(control_fd[1], msg, len);
+ if (ret != len) {
+ pr_err("perf record control write control message '%s' failed\n", msg);
+ ret = -EPIPE;
+ goto out;
+ }
+
+ if (!strcmp(msg, EVLIST_CTL_CMD_STOP_TAG)) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Wait for an ack. */
+ pollfd.fd = ack_fd[0];
+
+ /*
+ * We need this poll to ensure the ack_fd PIPE will not hang
+ * when perf record failed for any reason. The timeout value
+ * 3000ms is an empirical selection.
+ */
+again:
+ if (!poll(&pollfd, 1, 500)) {
+ if (check_if_command_finished(&tpebs_cmd)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (retries++ < 6)
+ goto again;
+ pr_err("tpebs failed: perf record ack timeout for '%s'\n", msg);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (!(pollfd.revents & POLLIN)) {
+ if (check_if_command_finished(&tpebs_cmd)) {
+ ret = 0;
+ goto out;
+ }
+
+ pr_err("tpebs failed: did not received an ack for '%s'\n", msg);
+ ret = -EPIPE;
+ goto out;
+ }
+
+ ret = read(ack_fd[0], ack_buf, sizeof(ack_buf));
+ if (ret > 0)
+ ret = strcmp(ack_buf, EVLIST_CTL_CMD_ACK_TAG);
+ else
+ pr_err("tpebs: perf record control ack failed\n");
+out:
+ /* Re-take lock as expected by caller. */
+ mutex_lock(tpebs_mtx_get());
+ return ret;
+}
+
/*
* tpebs_stop - stop the sample data read thread and the perf record process.
*/
-static int tpebs_stop(void)
+static int tpebs_stop(void) EXCLUSIVE_LOCKS_REQUIRED(tpebs_mtx_get())
{
int ret = 0;
/* Like tpebs_start, we should only run tpebs_end once. */
- if (tpebs_pid != -1) {
- kill(tpebs_cmd->pid, SIGTERM);
- tpebs_pid = -1;
+ if (tpebs_cmd.pid != 0) {
+ tpebs_send_record_cmd(EVLIST_CTL_CMD_STOP_TAG);
+ tpebs_cmd.pid = 0;
+ mutex_unlock(tpebs_mtx_get());
pthread_join(tpebs_reader_thread, NULL);
- close(tpebs_cmd->out);
- ret = finish_command(tpebs_cmd);
+ mutex_lock(tpebs_mtx_get());
+ close(control_fd[0]);
+ close(control_fd[1]);
+ close(ack_fd[0]);
+ close(ack_fd[1]);
+ close(tpebs_cmd.out);
+ ret = finish_command(&tpebs_cmd);
+ tpebs_cmd.pid = 0;
if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL)
ret = 0;
}
return ret;
}
-/*
- * tpebs_start - start tpebs execution.
- * @evsel_list: retire_latency evsels in this list will be selected and sampled
- * to get the average retire_latency value.
- *
- * This function will be called from evlist level later when evlist__open() is
- * called consistently.
+/**
+ * evsel__tpebs_event() - Create string event encoding to pass to `perf record`.
*/
-int tpebs_start(struct evlist *evsel_list)
+static int evsel__tpebs_event(struct evsel *evsel, char **event)
{
- int ret = 0;
- struct evsel *evsel;
- char cpumap_buf[50];
+ char *name, *modifier;
+ int ret;
+
+ name = strdup(evsel->name);
+ if (!name)
+ return -ENOMEM;
+
+ modifier = strrchr(name, 'R');
+ if (!modifier) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *modifier = 'p';
+ modifier = strchr(name, ':');
+ if (!modifier)
+ modifier = strrchr(name, '/');
+ if (!modifier) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *modifier = '\0';
+ if (asprintf(event, "%s/name=tpebs_event_%p/%s", name, evsel, modifier + 1) > 0)
+ ret = 0;
+ else
+ ret = -ENOMEM;
+out:
+ if (ret)
+ pr_err("Tpebs event modifier broken '%s'\n", evsel->name);
+ free(name);
+ return ret;
+}
+
+static struct tpebs_retire_lat *tpebs_retire_lat__new(struct evsel *evsel)
+{
+ struct tpebs_retire_lat *result = zalloc(sizeof(*result));
+ int ret;
+
+ if (!result)
+ return NULL;
+
+ ret = evsel__tpebs_event(evsel, &result->event);
+ if (ret) {
+ free(result);
+ return NULL;
+ }
+ result->evsel = evsel;
+ return result;
+}
+
+static void tpebs_retire_lat__delete(struct tpebs_retire_lat *r)
+{
+ zfree(&r->event);
+ free(r);
+}
+
+static struct tpebs_retire_lat *tpebs_retire_lat__find(struct evsel *evsel)
+{
+ struct tpebs_retire_lat *t;
+ unsigned long num;
+ const char *evsel_name;
/*
- * We should only run tpebs_start when tpebs_recording is enabled.
- * And we should only run it once with all the required events.
+ * Evsels will match for evlist with the retirement latency event. The
+ * name with "tpebs_event_" prefix will be present on events being read
+ * from `perf record`.
*/
- if (tpebs_pid != -1 || !tpebs_recording)
+ if (evsel__is_retire_lat(evsel)) {
+ list_for_each_entry(t, &tpebs_results, nd) {
+ if (t->evsel == evsel)
+ return t;
+ }
+ return NULL;
+ }
+ evsel_name = strstr(evsel->name, "tpebs_event_");
+ if (!evsel_name) {
+ /* Unexpected that the perf record should have other events. */
+ return NULL;
+ }
+ errno = 0;
+ num = strtoull(evsel_name + 12, NULL, 16);
+ if (errno) {
+ pr_err("Bad evsel for tpebs find '%s'\n", evsel->name);
+ return NULL;
+ }
+ list_for_each_entry(t, &tpebs_results, nd) {
+ if ((unsigned long)t->evsel == num)
+ return t;
+ }
+ return NULL;
+}
+
+/**
+ * evsel__tpebs_prepare - create tpebs data structures ready for opening.
+ * @evsel: retire_latency evsel, all evsels on its list will be prepared.
+ */
+static int evsel__tpebs_prepare(struct evsel *evsel)
+{
+ struct evsel *pos;
+ struct tpebs_retire_lat *tpebs_event;
+
+ mutex_lock(tpebs_mtx_get());
+ tpebs_event = tpebs_retire_lat__find(evsel);
+ if (tpebs_event) {
+ /* evsel, or an identically named one, was already prepared. */
+ mutex_unlock(tpebs_mtx_get());
return 0;
+ }
+ tpebs_event = tpebs_retire_lat__new(evsel);
+ if (!tpebs_event) {
+ mutex_unlock(tpebs_mtx_get());
+ return -ENOMEM;
+ }
+ list_add_tail(&tpebs_event->nd, &tpebs_results);
+ mutex_unlock(tpebs_mtx_get());
- cpu_map__snprint(evsel_list->core.user_requested_cpus, cpumap_buf, sizeof(cpumap_buf));
/*
- * Prepare perf record for sampling event retire_latency before fork and
- * prepare workload
+ * Eagerly prepare all other evsels on the list to try to ensure that by
+ * open they are all known.
*/
- evlist__for_each_entry(evsel_list, evsel) {
- int i;
- char *name;
- struct tpebs_retire_lat *new;
+ evlist__for_each_entry(evsel->evlist, pos) {
+ int ret;
- if (!evsel->retire_lat)
+ if (pos == evsel || !pos->retire_lat)
continue;
- pr_debug("tpebs: Retire_latency of event %s is required\n", evsel->name);
- for (i = strlen(evsel->name) - 1; i > 0; i--) {
- if (evsel->name[i] == 'R')
- break;
- }
- if (i <= 0 || evsel->name[i] != 'R') {
- ret = -1;
- goto err;
- }
+ ret = evsel__tpebs_prepare(pos);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
- name = strdup(evsel->name);
- if (!name) {
- ret = -ENOMEM;
- goto err;
- }
- name[i] = 'p';
+/**
+ * evsel__tpebs_open - starts tpebs execution.
+ * @evsel: retire_latency evsel, all evsels on its list will be selected. Each
+ * evsel is sampled to get the average retire_latency value.
+ */
+int evsel__tpebs_open(struct evsel *evsel)
+{
+ int ret;
+ bool tpebs_empty;
- new = zalloc(sizeof(*new));
- if (!new) {
- ret = -1;
- zfree(&name);
- goto err;
- }
- new->name = name;
- new->tpebs_name = evsel->name;
- list_add_tail(&new->nd, &tpebs_results);
- tpebs_event_size += 1;
+ /* We should only run tpebs_start when tpebs_recording is enabled. */
+ if (!tpebs_recording)
+ return 0;
+ /* Only start the events once. */
+ if (tpebs_cmd.pid != 0) {
+ struct tpebs_retire_lat *t;
+ bool valid;
+
+ mutex_lock(tpebs_mtx_get());
+ t = tpebs_retire_lat__find(evsel);
+ valid = t && t->started;
+ mutex_unlock(tpebs_mtx_get());
+ /* May fail as the event wasn't started. */
+ return valid ? 0 : -EBUSY;
}
- if (tpebs_event_size > 0) {
- struct pollfd pollfd = { .events = POLLIN, };
- int control_fd[2], ack_fd[2], len;
- char ack_buf[8];
+ ret = evsel__tpebs_prepare(evsel);
+ if (ret)
+ return ret;
+ mutex_lock(tpebs_mtx_get());
+ tpebs_empty = list_empty(&tpebs_results);
+ if (!tpebs_empty) {
/*Create control and ack fd for --control*/
if (pipe(control_fd) < 0) {
pr_err("tpebs: Failed to create control fifo");
@@ -280,153 +533,131 @@ int tpebs_start(struct evlist *evsel_list)
goto out;
}
- ret = start_perf_record(control_fd, ack_fd, cpumap_buf);
+ ret = evsel__tpebs_start_perf_record(evsel);
if (ret)
goto out;
- tpebs_pid = tpebs_cmd->pid;
- if (pthread_create(&tpebs_reader_thread, NULL, __sample_reader, tpebs_cmd)) {
- kill(tpebs_cmd->pid, SIGTERM);
- close(tpebs_cmd->out);
- pr_err("Could not create thread to process sample data.\n");
- ret = -1;
- goto out;
- }
- /* Wait for perf record initialization.*/
- len = strlen(EVLIST_CTL_CMD_ENABLE_TAG);
- ret = write(control_fd[1], EVLIST_CTL_CMD_ENABLE_TAG, len);
- if (ret != len) {
- pr_err("perf record control write control message failed\n");
- goto out;
- }
-
- /* wait for an ack */
- pollfd.fd = ack_fd[0];
-
- /*
- * We need this poll to ensure the ack_fd PIPE will not hang
- * when perf record failed for any reason. The timeout value
- * 3000ms is an empirical selection.
- */
- if (!poll(&pollfd, 1, 3000)) {
- pr_err("tpebs failed: perf record ack timeout\n");
- ret = -1;
- goto out;
- }
- if (!(pollfd.revents & POLLIN)) {
- pr_err("tpebs failed: did not received an ack\n");
+ if (pthread_create(&tpebs_reader_thread, /*attr=*/NULL, __sample_reader,
+ /*arg=*/NULL)) {
+ kill(tpebs_cmd.pid, SIGTERM);
+ close(tpebs_cmd.out);
+ pr_err("Could not create thread to process sample data.\n");
ret = -1;
goto out;
}
-
- ret = read(ack_fd[0], ack_buf, sizeof(ack_buf));
- if (ret > 0)
- ret = strcmp(ack_buf, EVLIST_CTL_CMD_ACK_TAG);
- else {
- pr_err("tpebs: perf record control ack failed\n");
- goto out;
- }
+ ret = tpebs_send_record_cmd(EVLIST_CTL_CMD_ENABLE_TAG);
+ }
out:
- close(control_fd[0]);
- close(control_fd[1]);
- close(ack_fd[0]);
- close(ack_fd[1]);
+ if (ret) {
+ struct tpebs_retire_lat *t = tpebs_retire_lat__find(evsel);
+
+ list_del_init(&t->nd);
+ tpebs_retire_lat__delete(t);
}
-err:
- if (ret)
- tpebs_delete();
+ mutex_unlock(tpebs_mtx_get());
return ret;
}
-
-int tpebs_set_evsel(struct evsel *evsel, int cpu_map_idx, int thread)
+int evsel__tpebs_read(struct evsel *evsel, int cpu_map_idx, int thread)
{
- __u64 val;
- bool found = false;
+ struct perf_counts_values *count, *old_count = NULL;
struct tpebs_retire_lat *t;
- struct perf_counts_values *count;
+ uint64_t val;
+ int ret;
- /* Non reitre_latency evsel should never enter this function. */
- if (!evsel__is_retire_lat(evsel))
- return -1;
+ /* Only set retire_latency value to the first CPU and thread. */
+ if (cpu_map_idx != 0 || thread != 0)
+ return 0;
+
+ if (evsel->prev_raw_counts)
+ old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
- /*
- * Need to stop the forked record to ensure get sampled data from the
- * PIPE to process and get non-zero retire_lat value for hybrid.
- */
- tpebs_stop();
count = perf_counts(evsel->counts, cpu_map_idx, thread);
- list_for_each_entry(t, &tpebs_results, nd) {
- if (t->tpebs_name == evsel->name ||
- (evsel->metric_id && !strcmp(t->tpebs_name, evsel->metric_id))) {
- found = true;
+ mutex_lock(tpebs_mtx_get());
+ t = tpebs_retire_lat__find(evsel);
+ /*
+ * If reading the first tpebs result, send a ping to the record
+ * process. Allow the sample reader a chance to read by releasing and
+ * reacquiring the lock.
+ */
+ if (t && &t->nd == tpebs_results.next) {
+ ret = tpebs_send_record_cmd(EVLIST_CTL_CMD_PING_TAG);
+ mutex_unlock(tpebs_mtx_get());
+ if (ret)
+ return ret;
+ mutex_lock(tpebs_mtx_get());
+ }
+ if (t == NULL || t->stats.n == 0) {
+ /* No sample data, use default. */
+ if (tpebs_recording) {
+ pr_warning_once(
+ "Using precomputed retirement latency data as no samples\n");
+ }
+ val = 0;
+ switch (tpebs_mode) {
+ case TPEBS_MODE__MIN:
+ val = rint(evsel->retirement_latency.min);
+ break;
+ case TPEBS_MODE__MAX:
+ val = rint(evsel->retirement_latency.max);
+ break;
+ default:
+ case TPEBS_MODE__LAST:
+ case TPEBS_MODE__MEAN:
+ val = rint(evsel->retirement_latency.mean);
+ break;
+ }
+ } else {
+ switch (tpebs_mode) {
+ case TPEBS_MODE__MIN:
+ val = t->stats.min;
+ break;
+ case TPEBS_MODE__MAX:
+ val = t->stats.max;
+ break;
+ case TPEBS_MODE__LAST:
+ val = t->last;
+ break;
+ default:
+ case TPEBS_MODE__MEAN:
+ val = rint(t->stats.mean);
break;
}
}
-
- /* Set ena and run to non-zero */
- count->ena = count->run = 1;
- count->lost = 0;
-
- if (!found) {
- /*
- * Set default value or 0 when retire_latency for this event is
- * not found from sampling data (record_tpebs not set or 0
- * sample recorded).
- */
- count->val = 0;
- return 0;
+ mutex_unlock(tpebs_mtx_get());
+
+ if (old_count) {
+ count->val = old_count->val + val;
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+ } else {
+ count->val = val;
+ count->run++;
+ count->ena++;
}
-
- /*
- * Only set retire_latency value to the first CPU and thread.
- */
- if (cpu_map_idx == 0 && thread == 0)
- val = rint(t->val);
- else
- val = 0;
-
- count->val = val;
return 0;
}
-static void tpebs_retire_lat__delete(struct tpebs_retire_lat *r)
-{
- zfree(&r->name);
- free(r);
-}
-
-
-/*
- * tpebs_delete - delete tpebs related data and stop the created thread and
- * process by calling tpebs_stop().
+/**
+ * evsel__tpebs_close() - delete tpebs related data. If the last event, stop the
+ * created thread and process by calling tpebs_stop().
*
- * This function is called from evlist_delete() and also from builtin-stat
- * stat_handle_error(). If tpebs_start() is called from places other then perf
- * stat, need to ensure tpebs_delete() is also called to safely free mem and
- * close the data read thread and the forked perf record process.
- *
- * This function is also called in evsel__close() to be symmetric with
- * tpebs_start() being called in evsel__open(). We will update this call site
- * when move tpebs_start() to evlist level.
+ * This function is called in evsel__close() to be symmetric with
+ * evsel__tpebs_open() being called in evsel__open().
*/
-void tpebs_delete(void)
+void evsel__tpebs_close(struct evsel *evsel)
{
- struct tpebs_retire_lat *r, *rtmp;
-
- if (tpebs_pid == -1)
- return;
-
- tpebs_stop();
+ struct tpebs_retire_lat *t;
- list_for_each_entry_safe(r, rtmp, &tpebs_results, nd) {
- list_del_init(&r->nd);
- tpebs_retire_lat__delete(r);
- }
+ mutex_lock(tpebs_mtx_get());
+ t = tpebs_retire_lat__find(evsel);
+ if (t) {
+ list_del_init(&t->nd);
+ tpebs_retire_lat__delete(t);
- if (tpebs_cmd) {
- free(tpebs_cmd);
- tpebs_cmd = NULL;
+ if (list_empty(&tpebs_results))
+ tpebs_stop();
}
+ mutex_unlock(tpebs_mtx_get());
}
diff --git a/tools/perf/util/intel-tpebs.h b/tools/perf/util/intel-tpebs.h
index 766b3fbd79f1..9475e2e6ea74 100644
--- a/tools/perf/util/intel-tpebs.h
+++ b/tools/perf/util/intel-tpebs.h
@@ -2,34 +2,24 @@
/*
* intel_tpebs.h: Intel TEPBS support
*/
-#ifndef INCLUDE__PERF_INTEL_TPEBS_H__
-#define INCLUDE__PERF_INTEL_TPEBS_H__
+#ifndef __INTEL_TPEBS_H
+#define __INTEL_TPEBS_H
-#include "stat.h"
-#include "evsel.h"
+struct evlist;
+struct evsel;
-#ifdef HAVE_ARCH_X86_64_SUPPORT
+enum tpebs_mode {
+ TPEBS_MODE__MEAN,
+ TPEBS_MODE__MIN,
+ TPEBS_MODE__MAX,
+ TPEBS_MODE__LAST,
+};
extern bool tpebs_recording;
-int tpebs_start(struct evlist *evsel_list);
-void tpebs_delete(void);
-int tpebs_set_evsel(struct evsel *evsel, int cpu_map_idx, int thread);
+extern enum tpebs_mode tpebs_mode;
-#else
+int evsel__tpebs_open(struct evsel *evsel);
+void evsel__tpebs_close(struct evsel *evsel);
+int evsel__tpebs_read(struct evsel *evsel, int cpu_map_idx, int thread);
-static inline int tpebs_start(struct evlist *evsel_list __maybe_unused)
-{
- return 0;
-}
-
-static inline void tpebs_delete(void) {};
-
-static inline int tpebs_set_evsel(struct evsel *evsel __maybe_unused,
- int cpu_map_idx __maybe_unused,
- int thread __maybe_unused)
-{
- return 0;
-}
-
-#endif
-#endif
+#endif /* __INTEL_TPEBS_H */
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 624964f01b5f..f00814e37de9 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -14,9 +14,9 @@
#include <sys/mman.h>
#include <linux/stringify.h>
-#include "build-id.h"
#include "event.h"
#include "debug.h"
+#include "dso.h"
#include "evlist.h"
#include "namespaces.h"
#include "symbol.h"
@@ -233,7 +233,8 @@ jit_open(struct jit_buf_desc *jd, const char *name)
/*
* keep dirname for generating files and mmap records
*/
- strcpy(jd->dir, name);
+ strncpy(jd->dir, name, PATH_MAX);
+ jd->dir[PATH_MAX - 1] = '\0';
dirname(jd->dir);
free(buf);
@@ -531,9 +532,24 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
/*
* mark dso as use to generate buildid in the header
*/
- if (!ret)
- build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
-
+ if (!ret) {
+ struct dso_id dso_id = {
+ {
+ .maj = event->mmap2.maj,
+ .min = event->mmap2.min,
+ .ino = event->mmap2.ino,
+ .ino_generation = event->mmap2.ino_generation,
+ },
+ .mmap2_valid = true,
+ .mmap2_ino_generation_valid = true,
+ };
+ struct dso *dso = machine__findnew_dso_id(jd->machine, filename, &dso_id);
+
+ if (dso)
+ dso__set_hit(dso);
+
+ dso__put(dso);
+ }
out:
perf_sample__exit(&sample);
free(event);
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 4249542544bb..a356b839c2ee 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -10,6 +10,7 @@
#include "symbol.h"
#include "record.h"
+#include <errno.h>
#include <stdlib.h>
#include <linux/zalloc.h>
@@ -190,5 +191,15 @@ static inline struct kvm_info *kvm_info__new(void)
#define kvm_info__zput(ki) do { } while (0)
#endif /* HAVE_KVM_STAT_SUPPORT */
+#define STRDUP_FAIL_EXIT(s) \
+ ({ char *_p; \
+ _p = strdup(s); \
+ if (!_p) { \
+ ret = -ENOMEM; \
+ goto EXIT; \
+ } \
+ _p; \
+ })
+
extern int kvm_add_default_arch_event(int *argc, const char **argv);
#endif /* __PERF_KVM_STAT_H */
diff --git a/tools/perf/util/libbfd.c b/tools/perf/util/libbfd.c
new file mode 100644
index 000000000000..cc0c474cbfaa
--- /dev/null
+++ b/tools/perf/util/libbfd.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "libbfd.h"
+#include "annotate.h"
+#include "bpf-event.h"
+#include "bpf-utils.h"
+#include "debug.h"
+#include "dso.h"
+#include "env.h"
+#include "map.h"
+#include "srcline.h"
+#include "symbol.h"
+#include "symbol_conf.h"
+#include "util.h"
+#include <tools/dis-asm-compat.h>
+#ifdef HAVE_LIBBPF_SUPPORT
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#endif
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#define PACKAGE "perf"
+#include <bfd.h>
+
+/*
+ * Implement addr2line using libbfd.
+ */
+struct a2l_data {
+ const char *input;
+ u64 addr;
+
+ bool found;
+ const char *filename;
+ const char *funcname;
+ unsigned int line;
+
+ bfd *abfd;
+ asymbol **syms;
+};
+
+static bool perf_bfd_lock(void *bfd_mutex)
+{
+ mutex_lock(bfd_mutex);
+ return true;
+}
+
+static bool perf_bfd_unlock(void *bfd_mutex)
+{
+ mutex_unlock(bfd_mutex);
+ return true;
+}
+
+static void perf_bfd_init(void)
+{
+ static struct mutex bfd_mutex;
+
+ mutex_init_recursive(&bfd_mutex);
+
+ if (bfd_init() != BFD_INIT_MAGIC) {
+ pr_err("Error initializing libbfd\n");
+ return;
+ }
+ if (!bfd_thread_init(perf_bfd_lock, perf_bfd_unlock, &bfd_mutex))
+ pr_err("Error initializing libbfd threading\n");
+}
+
+static void ensure_bfd_init(void)
+{
+ static pthread_once_t bfd_init_once = PTHREAD_ONCE_INIT;
+
+ pthread_once(&bfd_init_once, perf_bfd_init);
+}
+
+static int bfd_error(const char *string)
+{
+ const char *errmsg;
+
+ errmsg = bfd_errmsg(bfd_get_error());
+ fflush(stdout);
+
+ if (string)
+ pr_debug("%s: %s\n", string, errmsg);
+ else
+ pr_debug("%s\n", errmsg);
+
+ return -1;
+}
+
+static int slurp_symtab(bfd *abfd, struct a2l_data *a2l)
+{
+ long storage;
+ long symcount;
+ asymbol **syms;
+ bfd_boolean dynamic = FALSE;
+
+ if ((bfd_get_file_flags(abfd) & HAS_SYMS) == 0)
+ return bfd_error(bfd_get_filename(abfd));
+
+ storage = bfd_get_symtab_upper_bound(abfd);
+ if (storage == 0L) {
+ storage = bfd_get_dynamic_symtab_upper_bound(abfd);
+ dynamic = TRUE;
+ }
+ if (storage < 0L)
+ return bfd_error(bfd_get_filename(abfd));
+
+ syms = malloc(storage);
+ if (dynamic)
+ symcount = bfd_canonicalize_dynamic_symtab(abfd, syms);
+ else
+ symcount = bfd_canonicalize_symtab(abfd, syms);
+
+ if (symcount < 0) {
+ free(syms);
+ return bfd_error(bfd_get_filename(abfd));
+ }
+
+ a2l->syms = syms;
+ return 0;
+}
+
+static void find_address_in_section(bfd *abfd, asection *section, void *data)
+{
+ bfd_vma pc, vma;
+ bfd_size_type size;
+ struct a2l_data *a2l = data;
+ flagword flags;
+
+ if (a2l->found)
+ return;
+
+#ifdef bfd_get_section_flags
+ flags = bfd_get_section_flags(abfd, section);
+#else
+ flags = bfd_section_flags(section);
+#endif
+ if ((flags & SEC_ALLOC) == 0)
+ return;
+
+ pc = a2l->addr;
+#ifdef bfd_get_section_vma
+ vma = bfd_get_section_vma(abfd, section);
+#else
+ vma = bfd_section_vma(section);
+#endif
+#ifdef bfd_get_section_size
+ size = bfd_get_section_size(section);
+#else
+ size = bfd_section_size(section);
+#endif
+
+ if (pc < vma || pc >= vma + size)
+ return;
+
+ a2l->found = bfd_find_nearest_line(abfd, section, a2l->syms, pc - vma,
+ &a2l->filename, &a2l->funcname,
+ &a2l->line);
+
+ if (a2l->filename && !strlen(a2l->filename))
+ a2l->filename = NULL;
+}
+
+static struct a2l_data *addr2line_init(const char *path)
+{
+ bfd *abfd;
+ struct a2l_data *a2l = NULL;
+
+ ensure_bfd_init();
+ abfd = bfd_openr(path, NULL);
+ if (abfd == NULL)
+ return NULL;
+
+ if (!bfd_check_format(abfd, bfd_object))
+ goto out;
+
+ a2l = zalloc(sizeof(*a2l));
+ if (a2l == NULL)
+ goto out;
+
+ a2l->abfd = abfd;
+ a2l->input = strdup(path);
+ if (a2l->input == NULL)
+ goto out;
+
+ if (slurp_symtab(abfd, a2l))
+ goto out;
+
+ return a2l;
+
+out:
+ if (a2l) {
+ zfree((char **)&a2l->input);
+ free(a2l);
+ }
+ bfd_close(abfd);
+ return NULL;
+}
+
+static void addr2line_cleanup(struct a2l_data *a2l)
+{
+ if (a2l->abfd)
+ bfd_close(a2l->abfd);
+ zfree((char **)&a2l->input);
+ zfree(&a2l->syms);
+ free(a2l);
+}
+
+static int inline_list__append_dso_a2l(struct dso *dso,
+ struct inline_node *node,
+ struct symbol *sym)
+{
+ struct a2l_data *a2l = dso__a2l(dso);
+ struct symbol *inline_sym = new_inline_sym(dso, sym, a2l->funcname);
+ char *srcline = NULL;
+
+ if (a2l->filename)
+ srcline = srcline_from_fileline(a2l->filename, a2l->line);
+
+ return inline_list__append(inline_sym, srcline, node);
+}
+
+int libbfd__addr2line(const char *dso_name, u64 addr,
+ char **file, unsigned int *line, struct dso *dso,
+ bool unwind_inlines, struct inline_node *node,
+ struct symbol *sym)
+{
+ int ret = 0;
+ struct a2l_data *a2l = dso__a2l(dso);
+
+ if (!a2l) {
+ a2l = addr2line_init(dso_name);
+ dso__set_a2l(dso, a2l);
+ }
+
+ if (a2l == NULL) {
+ if (!symbol_conf.disable_add2line_warn)
+ pr_warning("addr2line_init failed for %s\n", dso_name);
+ return 0;
+ }
+
+ a2l->addr = addr;
+ a2l->found = false;
+
+ bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
+
+ if (!a2l->found)
+ return 0;
+
+ if (unwind_inlines) {
+ int cnt = 0;
+
+ if (node && inline_list__append_dso_a2l(dso, node, sym))
+ return 0;
+
+ while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
+ &a2l->funcname, &a2l->line) &&
+ cnt++ < MAX_INLINE_NEST) {
+
+ if (a2l->filename && !strlen(a2l->filename))
+ a2l->filename = NULL;
+
+ if (node != NULL) {
+ if (inline_list__append_dso_a2l(dso, node, sym))
+ return 0;
+ // found at least one inline frame
+ ret = 1;
+ }
+ }
+ }
+
+ if (file) {
+ *file = a2l->filename ? strdup(a2l->filename) : NULL;
+ ret = *file ? 1 : 0;
+ }
+
+ if (line)
+ *line = a2l->line;
+
+ return ret;
+}
+
+void dso__free_a2l_libbfd(struct dso *dso)
+{
+ struct a2l_data *a2l = dso__a2l(dso);
+
+ if (!a2l)
+ return;
+
+ addr2line_cleanup(a2l);
+
+ dso__set_a2l(dso, NULL);
+}
+
+static int bfd_symbols__cmpvalue(const void *a, const void *b)
+{
+ const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
+
+ if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
+ return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
+
+ return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
+}
+
+static int bfd2elf_binding(asymbol *symbol)
+{
+ if (symbol->flags & BSF_WEAK)
+ return STB_WEAK;
+ if (symbol->flags & BSF_GLOBAL)
+ return STB_GLOBAL;
+ if (symbol->flags & BSF_LOCAL)
+ return STB_LOCAL;
+ return -1;
+}
+
+int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
+{
+ int err = -1;
+ long symbols_size, symbols_count, i;
+ asection *section;
+ asymbol **symbols, *sym;
+ struct symbol *symbol;
+ bfd *abfd;
+ u64 start, len;
+
+ ensure_bfd_init();
+ abfd = bfd_openr(debugfile, NULL);
+ if (!abfd)
+ return -1;
+
+ if (!bfd_check_format(abfd, bfd_object)) {
+ pr_debug2("%s: cannot read %s bfd file.\n", __func__,
+ dso__long_name(dso));
+ goto out_close;
+ }
+
+ if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
+ goto out_close;
+
+ symbols_size = bfd_get_symtab_upper_bound(abfd);
+ if (symbols_size == 0) {
+ bfd_close(abfd);
+ return 0;
+ }
+
+ if (symbols_size < 0)
+ goto out_close;
+
+ symbols = malloc(symbols_size);
+ if (!symbols)
+ goto out_close;
+
+ symbols_count = bfd_canonicalize_symtab(abfd, symbols);
+ if (symbols_count < 0)
+ goto out_free;
+
+ section = bfd_get_section_by_name(abfd, ".text");
+ if (section) {
+ for (i = 0; i < symbols_count; ++i) {
+ if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
+ !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
+ break;
+ }
+ if (i < symbols_count) {
+ /* PE symbols can only have 4 bytes, so use .text high bits */
+ u64 text_offset = (section->vma - (u32)section->vma)
+ + (u32)bfd_asymbol_value(symbols[i]);
+ dso__set_text_offset(dso, text_offset);
+ dso__set_text_end(dso, (section->vma - text_offset) + section->size);
+ } else {
+ dso__set_text_offset(dso, section->vma - section->filepos);
+ dso__set_text_end(dso, section->filepos + section->size);
+ }
+ }
+
+ qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
+
+#ifdef bfd_get_section
+#define bfd_asymbol_section bfd_get_section
+#endif
+ for (i = 0; i < symbols_count; ++i) {
+ sym = symbols[i];
+ section = bfd_asymbol_section(sym);
+ if (bfd2elf_binding(sym) < 0)
+ continue;
+
+ while (i + 1 < symbols_count &&
+ bfd_asymbol_section(symbols[i + 1]) == section &&
+ bfd2elf_binding(symbols[i + 1]) < 0)
+ i++;
+
+ if (i + 1 < symbols_count &&
+ bfd_asymbol_section(symbols[i + 1]) == section)
+ len = symbols[i + 1]->value - sym->value;
+ else
+ len = section->size - sym->value;
+
+ start = bfd_asymbol_value(sym) - dso__text_offset(dso);
+ symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
+ bfd_asymbol_name(sym));
+ if (!symbol)
+ goto out_free;
+
+ symbols__insert(dso__symbols(dso), symbol);
+ }
+#ifdef bfd_get_section
+#undef bfd_asymbol_section
+#endif
+
+ symbols__fixup_end(dso__symbols(dso), false);
+ symbols__fixup_duplicate(dso__symbols(dso));
+ dso__set_adjust_symbols(dso, true);
+
+ err = 0;
+out_free:
+ free(symbols);
+out_close:
+ bfd_close(abfd);
+ return err;
+}
+
+int libbfd__read_build_id(const char *filename, struct build_id *bid)
+{
+ size_t size = sizeof(bid->data);
+ int err = -1, fd;
+ bfd *abfd;
+
+ if (!filename)
+ return -EFAULT;
+ if (!is_regular_file(filename))
+ return -EWOULDBLOCK;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ ensure_bfd_init();
+ abfd = bfd_fdopenr(filename, /*target=*/NULL, fd);
+ if (!abfd)
+ return -1;
+
+ if (!bfd_check_format(abfd, bfd_object)) {
+ pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ if (!abfd->build_id || abfd->build_id->size > size)
+ goto out_close;
+
+ memcpy(bid->data, abfd->build_id->data, abfd->build_id->size);
+ memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size);
+ err = bid->size = abfd->build_id->size;
+
+out_close:
+ bfd_close(abfd);
+ return err;
+}
+
+int libbfd_filename__read_debuglink(const char *filename, char *debuglink,
+ size_t size)
+{
+ int err = -1;
+ asection *section;
+ bfd *abfd;
+
+ ensure_bfd_init();
+ abfd = bfd_openr(filename, NULL);
+ if (!abfd)
+ return -1;
+
+ if (!bfd_check_format(abfd, bfd_object)) {
+ pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ section = bfd_get_section_by_name(abfd, ".gnu_debuglink");
+ if (!section)
+ goto out_close;
+
+ if (section->size > size)
+ goto out_close;
+
+ if (!bfd_get_section_contents(abfd, section, debuglink, 0,
+ section->size))
+ goto out_close;
+
+ err = 0;
+
+out_close:
+ bfd_close(abfd);
+ return err;
+}
+
+int symbol__disassemble_bpf_libbfd(struct symbol *sym __maybe_unused,
+ struct annotate_args *args __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ struct annotation *notes = symbol__annotation(sym);
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ struct bpf_prog_info_node *info_node;
+ int len = sym->end - sym->start;
+ disassembler_ftype disassemble;
+ struct map *map = args->ms.map;
+ struct perf_bpil *info_linear;
+ struct disassemble_info info;
+ struct dso *dso = map__dso(map);
+ int pc = 0, count, sub_id;
+ struct btf *btf = NULL;
+ char tpath[PATH_MAX];
+ size_t buf_size;
+ int nr_skip = 0;
+ char *buf;
+ bfd *bfdf;
+ int ret;
+ FILE *s;
+
+ if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
+
+ pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
+ sym->name, sym->start, sym->end - sym->start);
+
+ memset(tpath, 0, sizeof(tpath));
+ perf_exe(tpath, sizeof(tpath));
+
+ ensure_bfd_init();
+ bfdf = bfd_openr(tpath, NULL);
+ if (bfdf == NULL)
+ abort();
+
+ if (!bfd_check_format(bfdf, bfd_object))
+ abort();
+
+ s = open_memstream(&buf, &buf_size);
+ if (!s) {
+ ret = errno;
+ goto out;
+ }
+ init_disassemble_info_compat(&info, s,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
+ info.arch = bfd_get_arch(bfdf);
+ info.mach = bfd_get_mach(bfdf);
+
+ info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env,
+ dso__bpf_prog(dso)->id);
+ if (!info_node) {
+ ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
+ goto out;
+ }
+ info_linear = info_node->info_linear;
+ sub_id = dso__bpf_prog(dso)->sub_id;
+
+ info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
+ info.buffer_length = info_linear->info.jited_prog_len;
+
+ if (info_linear->info.nr_line_info)
+ prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+ if (info_linear->info.btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(dso__bpf_prog(dso)->env,
+ info_linear->info.btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
+ disassemble = disassembler(bfdf);
+#endif
+ if (disassemble == NULL)
+ abort();
+
+ fflush(s);
+ do {
+ const struct bpf_line_info *linfo = NULL;
+ struct disasm_line *dl;
+ size_t prev_buf_size;
+ const char *srcline;
+ u64 addr;
+
+ addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
+ count = disassemble(pc, &info);
+
+ if (prog_linfo)
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ addr, sub_id,
+ nr_skip);
+
+ if (linfo && btf) {
+ srcline = btf__name_by_offset(btf, linfo->line_off);
+ nr_skip++;
+ } else
+ srcline = NULL;
+
+ fprintf(s, "\n");
+ prev_buf_size = buf_size;
+ fflush(s);
+
+ if (!annotate_opts.hide_src_code && srcline) {
+ args->offset = -1;
+ args->line = strdup(srcline);
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl) {
+ annotation_line__add(&dl->al,
+ &notes->src->source);
+ }
+ }
+
+ args->offset = pc;
+ args->line = buf + prev_buf_size;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc += count;
+ } while (count > 0 && pc < len);
+
+ ret = 0;
+out:
+ free(prog_linfo);
+ btf__free(btf);
+ fclose(s);
+ bfd_close(bfdf);
+ return ret;
+#else
+ return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+#endif
+}
diff --git a/tools/perf/util/libbfd.h b/tools/perf/util/libbfd.h
new file mode 100644
index 000000000000..953886f3d62f
--- /dev/null
+++ b/tools/perf/util/libbfd.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_LIBBFD_H
+#define __PERF_LIBBFD_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+struct annotate_args;
+struct build_id;
+struct dso;
+struct inline_node;
+struct symbol;
+
+#ifdef HAVE_LIBBFD_SUPPORT
+int libbfd__addr2line(const char *dso_name, u64 addr,
+ char **file, unsigned int *line, struct dso *dso,
+ bool unwind_inlines, struct inline_node *node,
+ struct symbol *sym);
+
+
+void dso__free_a2l_libbfd(struct dso *dso);
+
+int symbol__disassemble_libbfd(const char *filename, struct symbol *sym,
+ struct annotate_args *args);
+
+int libbfd__read_build_id(const char *filename, struct build_id *bid);
+
+int libbfd_filename__read_debuglink(const char *filename, char *debuglink, size_t size);
+
+int symbol__disassemble_bpf_libbfd(struct symbol *sym, struct annotate_args *args);
+
+#else // !defined(HAVE_LIBBFD_SUPPORT)
+#include "annotate.h"
+
+static inline int libbfd__addr2line(const char *dso_name __always_unused,
+ u64 addr __always_unused,
+ char **file __always_unused,
+ unsigned int *line __always_unused,
+ struct dso *dso __always_unused,
+ bool unwind_inlines __always_unused,
+ struct inline_node *node __always_unused,
+ struct symbol *sym __always_unused)
+{
+ return -1;
+}
+
+
+static inline void dso__free_a2l_libbfd(struct dso *dso __always_unused)
+{
+}
+
+static inline int symbol__disassemble_libbfd(const char *filename __always_unused,
+ struct symbol *sym __always_unused,
+ struct annotate_args *args __always_unused)
+{
+ return -1;
+}
+
+static inline int libbfd__read_build_id(const char *filename __always_unused,
+ struct build_id *bid __always_unused)
+{
+ return -1;
+}
+
+static inline int libbfd_filename__read_debuglink(const char *filename __always_unused,
+ char *debuglink __always_unused,
+ size_t size __always_unused)
+{
+ return -1;
+}
+
+static inline int symbol__disassemble_bpf_libbfd(struct symbol *sym __always_unused,
+ struct annotate_args *args __always_unused)
+{
+ return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+
+#endif // defined(HAVE_LIBBFD_SUPPORT)
+
+#endif /* __PERF_LIBBFD_H */
diff --git a/tools/perf/util/llvm.c b/tools/perf/util/llvm.c
new file mode 100644
index 000000000000..2ebf1f5f65bf
--- /dev/null
+++ b/tools/perf/util/llvm.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "llvm.h"
+#include "annotate.h"
+#include "debug.h"
+#include "dso.h"
+#include "map.h"
+#include "namespaces.h"
+#include "srcline.h"
+#include "symbol.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <linux/zalloc.h>
+
+#ifdef HAVE_LIBLLVM_SUPPORT
+#include "llvm-c-helpers.h"
+#include <llvm-c/Disassembler.h>
+#include <llvm-c/Target.h>
+#endif
+
+#ifdef HAVE_LIBLLVM_SUPPORT
+static void free_llvm_inline_frames(struct llvm_a2l_frame *inline_frames,
+ int num_frames)
+{
+ if (inline_frames != NULL) {
+ for (int i = 0; i < num_frames; ++i) {
+ zfree(&inline_frames[i].filename);
+ zfree(&inline_frames[i].funcname);
+ }
+ zfree(&inline_frames);
+ }
+}
+#endif
+
+int llvm__addr2line(const char *dso_name __maybe_unused, u64 addr __maybe_unused,
+ char **file __maybe_unused, unsigned int *line __maybe_unused,
+ struct dso *dso __maybe_unused, bool unwind_inlines __maybe_unused,
+ struct inline_node *node __maybe_unused, struct symbol *sym __maybe_unused)
+{
+#ifdef HAVE_LIBLLVM_SUPPORT
+ struct llvm_a2l_frame *inline_frames = NULL;
+ int num_frames = llvm_addr2line(dso_name, addr, file, line,
+ node && unwind_inlines, &inline_frames);
+
+ if (num_frames == 0 || !inline_frames) {
+ /* Error, or we didn't want inlines. */
+ return num_frames;
+ }
+
+ for (int i = 0; i < num_frames; ++i) {
+ struct symbol *inline_sym =
+ new_inline_sym(dso, sym, inline_frames[i].funcname);
+ char *srcline = NULL;
+
+ if (inline_frames[i].filename) {
+ srcline =
+ srcline_from_fileline(inline_frames[i].filename,
+ inline_frames[i].line);
+ }
+ if (inline_list__append(inline_sym, srcline, node) != 0) {
+ free_llvm_inline_frames(inline_frames, num_frames);
+ return 0;
+ }
+ }
+ free_llvm_inline_frames(inline_frames, num_frames);
+
+ return num_frames;
+#else
+ return -1;
+#endif
+}
+
+#ifdef HAVE_LIBLLVM_SUPPORT
+static void init_llvm(void)
+{
+ static bool init;
+
+ if (!init) {
+ LLVMInitializeAllTargetInfos();
+ LLVMInitializeAllTargetMCs();
+ LLVMInitializeAllDisassemblers();
+ init = true;
+ }
+}
+
+/*
+ * Whenever LLVM wants to resolve an address into a symbol, it calls this
+ * callback. We don't ever actually _return_ anything (in particular, because
+ * it puts quotation marks around what we return), but we use this as a hint
+ * that there is a branch or PC-relative address in the expression that we
+ * should add some textual annotation for after the instruction. The caller
+ * will use this information to add the actual annotation.
+ */
+struct symbol_lookup_storage {
+ u64 branch_addr;
+ u64 pcrel_load_addr;
+};
+
+static const char *
+symbol_lookup_callback(void *disinfo, uint64_t value,
+ uint64_t *ref_type,
+ uint64_t address __maybe_unused,
+ const char **ref __maybe_unused)
+{
+ struct symbol_lookup_storage *storage = disinfo;
+
+ if (*ref_type == LLVMDisassembler_ReferenceType_In_Branch)
+ storage->branch_addr = value;
+ else if (*ref_type == LLVMDisassembler_ReferenceType_In_PCrel_Load)
+ storage->pcrel_load_addr = value;
+ *ref_type = LLVMDisassembler_ReferenceType_InOut_None;
+ return NULL;
+}
+#endif
+
+int symbol__disassemble_llvm(const char *filename, struct symbol *sym,
+ struct annotate_args *args __maybe_unused)
+{
+#ifdef HAVE_LIBLLVM_SUPPORT
+ struct annotation *notes = symbol__annotation(sym);
+ struct map *map = args->ms.map;
+ struct dso *dso = map__dso(map);
+ u64 start = map__rip_2objdump(map, sym->start);
+ /* Malloc-ed buffer containing instructions read from disk. */
+ u8 *code_buf = NULL;
+ /* Pointer to code to be disassembled. */
+ const u8 *buf;
+ u64 buf_len;
+ u64 pc;
+ bool is_64bit;
+ char disasm_buf[2048];
+ size_t disasm_len;
+ struct disasm_line *dl;
+ LLVMDisasmContextRef disasm = NULL;
+ struct symbol_lookup_storage storage;
+ char *line_storage = NULL;
+ size_t line_storage_len = 0;
+ int ret = -1;
+
+ if (args->options->objdump_path)
+ return -1;
+
+ buf = dso__read_symbol(dso, filename, map, sym,
+ &code_buf, &buf_len, &is_64bit);
+ if (buf == NULL)
+ return errno;
+
+ init_llvm();
+ if (arch__is(args->arch, "x86")) {
+ const char *triplet = is_64bit ? "x86_64-pc-linux" : "i686-pc-linux";
+
+ disasm = LLVMCreateDisasm(triplet, &storage, /*tag_type=*/0,
+ /*get_op_info=*/NULL, symbol_lookup_callback);
+ } else {
+ char triplet[64];
+
+ scnprintf(triplet, sizeof(triplet), "%s-linux-gnu",
+ args->arch->name);
+ disasm = LLVMCreateDisasm(triplet, &storage, /*tag_type=*/0,
+ /*get_op_info=*/NULL, symbol_lookup_callback);
+ }
+
+ if (disasm == NULL)
+ goto err;
+
+ if (args->options->disassembler_style &&
+ !strcmp(args->options->disassembler_style, "intel"))
+ LLVMSetDisasmOptions(disasm,
+ LLVMDisassembler_Option_AsmPrinterVariant);
+
+ /*
+ * This needs to be set after AsmPrinterVariant, due to a bug in LLVM;
+ * setting AsmPrinterVariant makes a new instruction printer, making it
+ * forget about the PrintImmHex flag (which is applied before if both
+ * are given to the same call).
+ */
+ LLVMSetDisasmOptions(disasm, LLVMDisassembler_Option_PrintImmHex);
+
+ /* add the function address and name */
+ scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
+ start, sym->name);
+
+ args->offset = -1;
+ args->line = disasm_buf;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc = start;
+ for (u64 offset = 0; offset < buf_len; ) {
+ unsigned int ins_len;
+
+ storage.branch_addr = 0;
+ storage.pcrel_load_addr = 0;
+
+ /*
+ * LLVM's API has the code be disassembled as non-const, cast
+ * here as we may be disassembling from mapped read-only memory.
+ */
+ ins_len = LLVMDisasmInstruction(disasm, (u8 *)(buf + offset),
+ buf_len - offset, pc,
+ disasm_buf, sizeof(disasm_buf));
+ if (ins_len == 0)
+ goto err;
+ disasm_len = strlen(disasm_buf);
+
+ if (storage.branch_addr != 0) {
+ char *name = llvm_name_for_code(dso, filename,
+ storage.branch_addr);
+ if (name != NULL) {
+ disasm_len += scnprintf(disasm_buf + disasm_len,
+ sizeof(disasm_buf) -
+ disasm_len,
+ " <%s>", name);
+ free(name);
+ }
+ }
+ if (storage.pcrel_load_addr != 0) {
+ char *name = llvm_name_for_data(dso, filename,
+ storage.pcrel_load_addr);
+ disasm_len += scnprintf(disasm_buf + disasm_len,
+ sizeof(disasm_buf) - disasm_len,
+ " # %#"PRIx64,
+ storage.pcrel_load_addr);
+ if (name) {
+ disasm_len += scnprintf(disasm_buf + disasm_len,
+ sizeof(disasm_buf) -
+ disasm_len,
+ " <%s>", name);
+ free(name);
+ }
+ }
+
+ args->offset = offset;
+ args->line = expand_tabs(disasm_buf, &line_storage,
+ &line_storage_len);
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ llvm_addr2line(filename, pc, &args->fileloc,
+ (unsigned int *)&args->line_nr, false, NULL);
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ free(args->fileloc);
+ pc += ins_len;
+ offset += ins_len;
+ }
+
+ ret = 0;
+
+err:
+ LLVMDisasmDispose(disasm);
+ free(code_buf);
+ free(line_storage);
+ return ret;
+#else // HAVE_LIBLLVM_SUPPORT
+ pr_debug("The LLVM disassembler isn't linked in for %s in %s\n",
+ sym->name, filename);
+ return -1;
+#endif
+}
diff --git a/tools/perf/util/llvm.h b/tools/perf/util/llvm.h
new file mode 100644
index 000000000000..57f6bafb24bb
--- /dev/null
+++ b/tools/perf/util/llvm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_LLVM_H
+#define __PERF_LLVM_H
+
+#include <stdbool.h>
+#include <linux/types.h>
+
+struct annotate_args;
+struct dso;
+struct inline_node;
+struct symbol;
+
+int llvm__addr2line(const char *dso_name, u64 addr,
+ char **file, unsigned int *line, struct dso *dso,
+ bool unwind_inlines, struct inline_node *node,
+ struct symbol *sym);
+
+int symbol__disassemble_llvm(const char *filename, struct symbol *sym,
+ struct annotate_args *args);
+
+#endif /* __PERF_LLVM_H */
diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
index b5d916aa49df..59c94190b092 100644
--- a/tools/perf/util/lock-contention.h
+++ b/tools/perf/util/lock-contention.h
@@ -18,6 +18,12 @@ struct lock_filter {
char **slabs;
};
+struct lock_delay {
+ char *sym;
+ unsigned long addr;
+ unsigned long time;
+};
+
struct lock_stat {
struct hlist_node hash_entry;
struct rb_node rb; /* used for sorting */
@@ -140,14 +146,17 @@ struct lock_contention {
struct machine *machine;
struct hlist_head *result;
struct lock_filter *filters;
+ struct lock_delay *delays;
struct lock_contention_fails fails;
struct rb_root cgroups;
+ void *btf;
unsigned long map_nr_entries;
int max_stack;
int stack_skip;
int aggr_mode;
int owner;
int nr_filtered;
+ int nr_delays;
bool save_callstack;
};
diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
index bbcd2ffcf4bd..c355757ed391 100644
--- a/tools/perf/util/lzma.c
+++ b/tools/perf/util/lzma.c
@@ -120,7 +120,7 @@ bool lzma_is_compressed(const char *input)
ssize_t rc;
if (fd < 0)
- return -1;
+ return false;
rc = read(fd, buf, sizeof(buf));
close(fd);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 2531b373f2cf..841b711d970e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -20,6 +20,7 @@
#include "path.h"
#include "srcline.h"
#include "symbol.h"
+#include "synthetic-events.h"
#include "sort.h"
#include "strlist.h"
#include "target.h"
@@ -128,28 +129,62 @@ out:
return 0;
}
-struct machine *machine__new_host(void)
+static struct machine *__machine__new_host(struct perf_env *host_env, bool kernel_maps)
{
struct machine *machine = malloc(sizeof(*machine));
- if (machine != NULL) {
- machine__init(machine, "", HOST_KERNEL_ID);
+ if (!machine)
+ return NULL;
- if (machine__create_kernel_maps(machine) < 0)
- goto out_delete;
+ machine__init(machine, "", HOST_KERNEL_ID);
- machine->env = &perf_env;
+ if (kernel_maps && machine__create_kernel_maps(machine) < 0) {
+ free(machine);
+ return NULL;
}
+ machine->env = host_env;
+ return machine;
+}
+
+struct machine *machine__new_host(struct perf_env *host_env)
+{
+ return __machine__new_host(host_env, /*kernel_maps=*/true);
+}
+
+static int mmap_handler(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_mmap2_event(machine, event, sample);
+}
+
+static int machine__init_live(struct machine *machine, pid_t pid)
+{
+ union perf_event event;
+ memset(&event, 0, sizeof(event));
+ return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
+ mmap_handler, machine, true);
+}
+
+struct machine *machine__new_live(struct perf_env *host_env, bool kernel_maps, pid_t pid)
+{
+ struct machine *machine = __machine__new_host(host_env, kernel_maps);
+
+ if (!machine)
+ return NULL;
+
+ if (machine__init_live(machine, pid)) {
+ machine__delete(machine);
+ return NULL;
+ }
return machine;
-out_delete:
- free(machine);
- return NULL;
}
-struct machine *machine__new_kallsyms(void)
+struct machine *machine__new_kallsyms(struct perf_env *host_env)
{
- struct machine *machine = machine__new_host();
+ struct machine *machine = machine__new_host(host_env);
/*
* FIXME:
* 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
@@ -1696,21 +1731,21 @@ int machine__process_mmap2_event(struct machine *machine,
{
struct thread *thread;
struct map *map;
- struct dso_id dso_id = {
- .maj = event->mmap2.maj,
- .min = event->mmap2.min,
- .ino = event->mmap2.ino,
- .ino_generation = event->mmap2.ino_generation,
- };
- struct build_id __bid, *bid = NULL;
+ struct dso_id dso_id = dso_id_empty;
int ret = 0;
if (dump_trace)
perf_event__fprintf_mmap2(event, stdout);
if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
- bid = &__bid;
- build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
+ build_id__init(&dso_id.build_id, event->mmap2.build_id, event->mmap2.build_id_size);
+ } else {
+ dso_id.maj = event->mmap2.maj;
+ dso_id.min = event->mmap2.min;
+ dso_id.ino = event->mmap2.ino;
+ dso_id.ino_generation = event->mmap2.ino_generation;
+ dso_id.mmap2_valid = true;
+ dso_id.mmap2_ino_generation_valid = true;
}
if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
@@ -1722,7 +1757,7 @@ int machine__process_mmap2_event(struct machine *machine,
};
strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
- ret = machine__process_kernel_mmap_event(machine, &xm, bid);
+ ret = machine__process_kernel_mmap_event(machine, &xm, &dso_id.build_id);
if (ret < 0)
goto out_problem;
return 0;
@@ -1736,7 +1771,7 @@ int machine__process_mmap2_event(struct machine *machine,
map = map__new(machine, event->mmap2.start,
event->mmap2.len, event->mmap2.pgoff,
&dso_id, event->mmap2.prot,
- event->mmap2.flags, bid,
+ event->mmap2.flags,
event->mmap2.filename, thread);
if (map == NULL)
@@ -1794,8 +1829,8 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
prot = PROT_EXEC;
map = map__new(machine, event->mmap.start,
- event->mmap.len, event->mmap.pgoff,
- NULL, prot, 0, NULL, event->mmap.filename, thread);
+ event->mmap.len, event->mmap.pgoff,
+ &dso_id_empty, prot, /*flags=*/0, event->mmap.filename, thread);
if (map == NULL)
goto out_problem_map;
@@ -1976,7 +2011,7 @@ static void ip__resolve_ams(struct thread *thread,
* Thus, we have to try consecutively until we find a match
* or else, the symbol is unknown
*/
- thread__find_cpumode_addr_location(thread, ip, &al);
+ thread__find_cpumode_addr_location(thread, ip, /*symbols=*/true, &al);
ams->addr = ip;
ams->al_addr = al.addr;
@@ -2078,7 +2113,7 @@ static int add_callchain_ip(struct thread *thread,
al.sym = NULL;
al.srcline = NULL;
if (!cpumode) {
- thread__find_cpumode_addr_location(thread, ip, &al);
+ thread__find_cpumode_addr_location(thread, ip, symbols, &al);
} else {
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
@@ -2089,6 +2124,7 @@ static int add_callchain_ip(struct thread *thread,
*cpumode = PERF_RECORD_MISC_KERNEL;
break;
case PERF_CONTEXT_USER:
+ case PERF_CONTEXT_USER_DEFERRED:
*cpumode = PERF_RECORD_MISC_USER;
break;
default:
@@ -2106,6 +2142,8 @@ static int add_callchain_ip(struct thread *thread,
}
if (symbols)
thread__find_symbol(thread, *cpumode, ip, &al);
+ else
+ thread__find_map(thread, *cpumode, ip, &al);
}
if (al.sym != NULL) {
@@ -3155,7 +3193,7 @@ struct dso *machine__findnew_dso_id(struct machine *machine, const char *filenam
struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
- return machine__findnew_dso_id(machine, filename, NULL);
+ return machine__findnew_dso_id(machine, filename, &dso_id_empty);
}
char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index b56abec84fed..22a42c5825fa 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -169,8 +169,9 @@ struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid);
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
void machines__set_comm_exec(struct machines *machines, bool comm_exec);
-struct machine *machine__new_host(void);
-struct machine *machine__new_kallsyms(void);
+struct machine *machine__new_host(struct perf_env *host_env);
+struct machine *machine__new_kallsyms(struct perf_env *host_env);
+struct machine *machine__new_live(struct perf_env *host_env, bool kernel_maps, pid_t pid);
int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
void machine__exit(struct machine *machine);
void machine__delete_threads(struct machine *machine);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index d729438b7d65..41cdddc987ee 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -120,8 +120,8 @@ static void map__init(struct map *map, u64 start, u64 end, u64 pgoff,
}
struct map *map__new(struct machine *machine, u64 start, u64 len,
- u64 pgoff, struct dso_id *id,
- u32 prot, u32 flags, struct build_id *bid,
+ u64 pgoff, const struct dso_id *id,
+ u32 prot, u32 flags,
char *filename, struct thread *thread)
{
struct map *result;
@@ -132,7 +132,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
map = zalloc(sizeof(*map));
if (ADD_RC_CHK(result, map)) {
char newfilename[PATH_MAX];
- struct dso *dso, *header_bid_dso;
+ struct dso *dso;
int anon, no_dso, vdso, android;
android = is_android_lib(filename);
@@ -189,16 +189,15 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
dso__set_nsinfo(dso, nsi);
mutex_unlock(dso__lock(dso));
- if (build_id__is_defined(bid)) {
- dso__set_build_id(dso, bid);
- } else {
+ if (!build_id__is_defined(&id->build_id)) {
/*
* If the mmap event had no build ID, search for an existing dso from the
* build ID header by name. Otherwise only the dso loaded at the time of
* reading the header will have the build ID set and all future mmaps will
* have it missing.
*/
- header_bid_dso = dsos__find(&machine->dsos, filename, false);
+ struct dso *header_bid_dso = dsos__find(&machine->dsos, filename, false);
+
if (header_bid_dso && dso__header_build_id(header_bid_dso)) {
dso__set_build_id(dso, dso__bid(header_bid_dso));
dso__set_header_build_id(dso, 1);
@@ -354,7 +353,7 @@ int map__load(struct map *map)
if (dso__has_build_id(dso)) {
char sbuild_id[SBUILD_ID_SIZE];
- build_id__sprintf(dso__bid(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
pr_debug("%s with build id %s not found", name, sbuild_id);
} else
pr_debug("Failed to open %s", name);
@@ -514,6 +513,8 @@ void srccode_state_free(struct srccode_state *state)
state->line = 0;
}
+static const struct kmap *__map__const_kmap(const struct map *map);
+
/**
* map__rip_2objdump - convert symbol start address to objdump address.
* @map: memory map
@@ -525,9 +526,9 @@ void srccode_state_free(struct srccode_state *state)
*
* Return: Address suitable for passing to "objdump --start-address="
*/
-u64 map__rip_2objdump(struct map *map, u64 rip)
+u64 map__rip_2objdump(const struct map *map, u64 rip)
{
- struct kmap *kmap = __map__kmap(map);
+ const struct kmap *kmap = __map__const_kmap(map);
const struct dso *dso = map__dso(map);
/*
@@ -570,7 +571,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
*
* Return: Memory address.
*/
-u64 map__objdump_2mem(struct map *map, u64 ip)
+u64 map__objdump_2mem(const struct map *map, u64 ip)
{
const struct dso *dso = map__dso(map);
@@ -587,7 +588,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
}
/* convert objdump address to relative address. (To be removed) */
-u64 map__objdump_2rip(struct map *map, u64 ip)
+u64 map__objdump_2rip(const struct map *map, u64 ip)
{
const struct dso *dso = map__dso(map);
@@ -619,6 +620,15 @@ struct kmap *__map__kmap(struct map *map)
return (struct kmap *)(&RC_CHK_ACCESS(map)[1]);
}
+static const struct kmap *__map__const_kmap(const struct map *map)
+{
+ const struct dso *dso = map__dso(map);
+
+ if (!dso || !dso__kernel(dso))
+ return NULL;
+ return (struct kmap *)(&RC_CHK_ACCESS(map)[1]);
+}
+
struct kmap *map__kmap(struct map *map)
{
struct kmap *kmap = __map__kmap(map);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 4262f5a143be..979b3e11b9bc 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -133,13 +133,13 @@ static inline u64 map__unmap_ip(const struct map *map, u64 ip_or_rip)
}
/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
-u64 map__rip_2objdump(struct map *map, u64 rip);
+u64 map__rip_2objdump(const struct map *map, u64 rip);
/* objdump address -> memory address */
-u64 map__objdump_2mem(struct map *map, u64 ip);
+u64 map__objdump_2mem(const struct map *map, u64 ip);
/* objdump address -> rip */
-u64 map__objdump_2rip(struct map *map, u64 ip);
+u64 map__objdump_2rip(const struct map *map, u64 ip);
struct symbol;
struct thread;
@@ -173,11 +173,10 @@ struct thread;
__map__for_each_symbol_by_name(map, sym_name, (pos), idx)
struct dso_id;
-struct build_id;
struct map *map__new(struct machine *machine, u64 start, u64 len,
- u64 pgoff, struct dso_id *id, u32 prot, u32 flags,
- struct build_id *bid, char *filename, struct thread *thread);
+ u64 pgoff, const struct dso_id *id, u32 prot, u32 flags,
+ char *filename, struct thread *thread);
struct map *map__new2(u64 start, struct dso *dso);
void map__delete(struct map *map);
struct map *map__clone(struct map *map);
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index 0b40d901675e..c321d4f4d846 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -477,6 +477,7 @@ static int __maps__insert(struct maps *maps, struct map *new)
}
/* Insert the value at the end. */
maps_by_address[nr_maps] = map__get(new);
+ map__set_kmap_maps(new, maps);
if (maps_by_name)
maps_by_name[nr_maps] = map__get(new);
@@ -502,8 +503,6 @@ static int __maps__insert(struct maps *maps, struct map *new)
if (map__end(new) < map__start(new))
RC_CHK_ACCESS(maps)->ends_broken = true;
- map__set_kmap_maps(new, maps);
-
return 0;
}
@@ -891,6 +890,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
if (before) {
map__put(maps_by_address[i]);
maps_by_address[i] = before;
+ map__set_kmap_maps(before, maps);
if (maps_by_name) {
map__put(maps_by_name[ni]);
@@ -918,6 +918,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
*/
map__put(maps_by_address[i]);
maps_by_address[i] = map__get(new);
+ map__set_kmap_maps(new, maps);
if (maps_by_name) {
map__put(maps_by_name[ni]);
@@ -930,8 +931,9 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
return err;
} else {
struct map *next = NULL;
+ unsigned int nr_maps = maps__nr_maps(maps);
- if (i + 1 < maps__nr_maps(maps))
+ if (i + 1 < nr_maps)
next = maps_by_address[i + 1];
if (!next || map__start(next) >= map__end(new)) {
@@ -942,18 +944,34 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
*/
map__put(maps_by_address[i]);
maps_by_address[i] = map__get(new);
+ map__set_kmap_maps(new, maps);
if (maps_by_name) {
map__put(maps_by_name[ni]);
maps_by_name[ni] = map__get(new);
}
- map__set_kmap_maps(new, maps);
-
check_invariants(maps);
return err;
}
- __maps__remove(maps, pos);
+ /*
+ * pos fully covers the previous mapping so remove
+ * it. The following is an inlined version of
+ * maps__remove that reuses the already computed
+ * indices.
+ */
+ map__put(maps_by_address[i]);
+ memmove(&maps_by_address[i],
+ &maps_by_address[i + 1],
+ (nr_maps - i - 1) * sizeof(*maps_by_address));
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+ memmove(&maps_by_name[ni],
+ &maps_by_name[ni + 1],
+ (nr_maps - ni - 1) * sizeof(*maps_by_name));
+ }
+ --RC_CHK_ACCESS(maps)->nr_maps;
check_invariants(maps);
/*
* Maps are ordered but no need to increase `i` as the
@@ -1019,6 +1037,7 @@ int maps__copy_from(struct maps *dest, struct maps *parent)
err = unwind__prepare_access(dest, new, NULL);
if (!err) {
dest_maps_by_address[i] = new;
+ map__set_kmap_maps(new, dest);
if (dest_maps_by_name)
dest_maps_by_name[i] = map__get(new);
RC_CHK_ACCESS(dest)->nr_maps = i + 1;
@@ -1082,10 +1101,13 @@ struct map *maps__find(struct maps *maps, u64 ip)
while (!done) {
down_read(maps__lock(maps));
if (maps__maps_by_address_sorted(maps)) {
- struct map **mapp =
- bsearch(&ip, maps__maps_by_address(maps), maps__nr_maps(maps),
- sizeof(*mapp), map__addr_cmp);
+ struct map **mapp = NULL;
+ struct map **maps_by_address = maps__maps_by_address(maps);
+ unsigned int nr_maps = maps__nr_maps(maps);
+ if (maps_by_address && nr_maps)
+ mapp = bsearch(&ip, maps_by_address, nr_maps, sizeof(*mapp),
+ map__addr_cmp);
if (mapp)
result = map__get(*mapp);
done = true;
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 884d9aebce91..0b49fce251fc 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -680,7 +680,10 @@ do { \
if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
if (lvl & P(LVL, L2)) {
- stats->ld_l2hit++;
+ if (snoop & P(SNOOP, HITM))
+ HITM_INC(lcl_hitm);
+ else
+ stats->ld_l2hit++;
if (snoopx & P(SNOOPX, PEER))
PEER_INC(lcl_peer);
@@ -799,3 +802,181 @@ void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
stats->nomap += add->nomap;
stats->noparse += add->noparse;
}
+
+/*
+ * It returns an index in hist_entry->mem_stat array for the given val which
+ * represents a data-src based on the mem_stat_type.
+ */
+int mem_stat_index(const enum mem_stat_type mst, const u64 val)
+{
+ union perf_mem_data_src src = {
+ .val = val,
+ };
+
+ switch (mst) {
+ case PERF_MEM_STAT_OP:
+ switch (src.mem_op) {
+ case PERF_MEM_OP_LOAD:
+ return MEM_STAT_OP_LOAD;
+ case PERF_MEM_OP_STORE:
+ return MEM_STAT_OP_STORE;
+ case PERF_MEM_OP_LOAD | PERF_MEM_OP_STORE:
+ return MEM_STAT_OP_LDST;
+ default:
+ if (src.mem_op & PERF_MEM_OP_PFETCH)
+ return MEM_STAT_OP_PFETCH;
+ if (src.mem_op & PERF_MEM_OP_EXEC)
+ return MEM_STAT_OP_EXEC;
+ return MEM_STAT_OP_OTHER;
+ }
+ case PERF_MEM_STAT_CACHE:
+ switch (src.mem_lvl_num) {
+ case PERF_MEM_LVLNUM_L1:
+ return MEM_STAT_CACHE_L1;
+ case PERF_MEM_LVLNUM_L2:
+ return MEM_STAT_CACHE_L2;
+ case PERF_MEM_LVLNUM_L3:
+ return MEM_STAT_CACHE_L3;
+ case PERF_MEM_LVLNUM_L4:
+ return MEM_STAT_CACHE_L4;
+ case PERF_MEM_LVLNUM_LFB:
+ return MEM_STAT_CACHE_L1_BUF;
+ case PERF_MEM_LVLNUM_L2_MHB:
+ return MEM_STAT_CACHE_L2_BUF;
+ default:
+ return MEM_STAT_CACHE_OTHER;
+ }
+ case PERF_MEM_STAT_MEMORY:
+ switch (src.mem_lvl_num) {
+ case PERF_MEM_LVLNUM_MSC:
+ return MEM_STAT_MEMORY_MSC;
+ case PERF_MEM_LVLNUM_RAM:
+ return MEM_STAT_MEMORY_RAM;
+ case PERF_MEM_LVLNUM_UNC:
+ return MEM_STAT_MEMORY_UNC;
+ case PERF_MEM_LVLNUM_CXL:
+ return MEM_STAT_MEMORY_CXL;
+ case PERF_MEM_LVLNUM_IO:
+ return MEM_STAT_MEMORY_IO;
+ case PERF_MEM_LVLNUM_PMEM:
+ return MEM_STAT_MEMORY_PMEM;
+ default:
+ return MEM_STAT_MEMORY_OTHER;
+ }
+ case PERF_MEM_STAT_SNOOP:
+ switch (src.mem_snoop) {
+ case PERF_MEM_SNOOP_HIT:
+ return MEM_STAT_SNOOP_HIT;
+ case PERF_MEM_SNOOP_HITM:
+ return MEM_STAT_SNOOP_HITM;
+ case PERF_MEM_SNOOP_MISS:
+ return MEM_STAT_SNOOP_MISS;
+ default:
+ return MEM_STAT_SNOOP_OTHER;
+ }
+ case PERF_MEM_STAT_DTLB:
+ switch (src.mem_dtlb) {
+ case PERF_MEM_TLB_L1 | PERF_MEM_TLB_HIT:
+ return MEM_STAT_DTLB_L1_HIT;
+ case PERF_MEM_TLB_L2 | PERF_MEM_TLB_HIT:
+ return MEM_STAT_DTLB_L2_HIT;
+ case PERF_MEM_TLB_L1 | PERF_MEM_TLB_L2 | PERF_MEM_TLB_HIT:
+ return MEM_STAT_DTLB_ANY_HIT;
+ default:
+ if (src.mem_dtlb & PERF_MEM_TLB_MISS)
+ return MEM_STAT_DTLB_MISS;
+ return MEM_STAT_DTLB_OTHER;
+ }
+ default:
+ break;
+ }
+ return -1;
+}
+
+/* To align output, returned string should be shorter than MEM_STAT_PRINT_LEN */
+const char *mem_stat_name(const enum mem_stat_type mst, const int idx)
+{
+ switch (mst) {
+ case PERF_MEM_STAT_OP:
+ switch (idx) {
+ case MEM_STAT_OP_LOAD:
+ return "Load";
+ case MEM_STAT_OP_STORE:
+ return "Store";
+ case MEM_STAT_OP_LDST:
+ return "Ld+St";
+ case MEM_STAT_OP_PFETCH:
+ return "Pfetch";
+ case MEM_STAT_OP_EXEC:
+ return "Exec";
+ case MEM_STAT_OP_OTHER:
+ default:
+ return "Other";
+ }
+ case PERF_MEM_STAT_CACHE:
+ switch (idx) {
+ case MEM_STAT_CACHE_L1:
+ return "L1";
+ case MEM_STAT_CACHE_L2:
+ return "L2";
+ case MEM_STAT_CACHE_L3:
+ return "L3";
+ case MEM_STAT_CACHE_L4:
+ return "L4";
+ case MEM_STAT_CACHE_L1_BUF:
+ return "L1-buf";
+ case MEM_STAT_CACHE_L2_BUF:
+ return "L2-buf";
+ case MEM_STAT_CACHE_OTHER:
+ default:
+ return "Other";
+ }
+ case PERF_MEM_STAT_MEMORY:
+ switch (idx) {
+ case MEM_STAT_MEMORY_RAM:
+ return "RAM";
+ case MEM_STAT_MEMORY_MSC:
+ return "MSC";
+ case MEM_STAT_MEMORY_UNC:
+ return "Uncach";
+ case MEM_STAT_MEMORY_CXL:
+ return "CXL";
+ case MEM_STAT_MEMORY_IO:
+ return "IO";
+ case MEM_STAT_MEMORY_PMEM:
+ return "PMEM";
+ case MEM_STAT_MEMORY_OTHER:
+ default:
+ return "Other";
+ }
+ case PERF_MEM_STAT_SNOOP:
+ switch (idx) {
+ case MEM_STAT_SNOOP_HIT:
+ return "Hit";
+ case MEM_STAT_SNOOP_HITM:
+ return "HitM";
+ case MEM_STAT_SNOOP_MISS:
+ return "Miss";
+ case MEM_STAT_SNOOP_OTHER:
+ default:
+ return "Other";
+ }
+ case PERF_MEM_STAT_DTLB:
+ switch (idx) {
+ case MEM_STAT_DTLB_L1_HIT:
+ return "L1-Hit";
+ case MEM_STAT_DTLB_L2_HIT:
+ return "L2-Hit";
+ case MEM_STAT_DTLB_ANY_HIT:
+ return "L?-Hit";
+ case MEM_STAT_DTLB_MISS:
+ return "Miss";
+ case MEM_STAT_DTLB_OTHER:
+ default:
+ return "Other";
+ }
+ default:
+ break;
+ }
+ return "N/A";
+}
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index a5c19d39ee37..5b98076904b0 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -89,4 +89,61 @@ struct hist_entry;
int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi);
void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add);
+enum mem_stat_type {
+ PERF_MEM_STAT_OP,
+ PERF_MEM_STAT_CACHE,
+ PERF_MEM_STAT_MEMORY,
+ PERF_MEM_STAT_SNOOP,
+ PERF_MEM_STAT_DTLB,
+};
+
+#define MEM_STAT_PRINT_LEN 7 /* 1 space + 5 digits + 1 percent sign */
+
+enum mem_stat_op {
+ MEM_STAT_OP_LOAD,
+ MEM_STAT_OP_STORE,
+ MEM_STAT_OP_LDST,
+ MEM_STAT_OP_PFETCH,
+ MEM_STAT_OP_EXEC,
+ MEM_STAT_OP_OTHER,
+};
+
+enum mem_stat_cache {
+ MEM_STAT_CACHE_L1,
+ MEM_STAT_CACHE_L2,
+ MEM_STAT_CACHE_L3,
+ MEM_STAT_CACHE_L4,
+ MEM_STAT_CACHE_L1_BUF,
+ MEM_STAT_CACHE_L2_BUF,
+ MEM_STAT_CACHE_OTHER,
+};
+
+enum mem_stat_memory {
+ MEM_STAT_MEMORY_RAM,
+ MEM_STAT_MEMORY_MSC,
+ MEM_STAT_MEMORY_UNC,
+ MEM_STAT_MEMORY_CXL,
+ MEM_STAT_MEMORY_IO,
+ MEM_STAT_MEMORY_PMEM,
+ MEM_STAT_MEMORY_OTHER,
+};
+
+enum mem_stat_snoop {
+ MEM_STAT_SNOOP_HIT,
+ MEM_STAT_SNOOP_HITM,
+ MEM_STAT_SNOOP_MISS,
+ MEM_STAT_SNOOP_OTHER,
+};
+
+enum mem_stat_dtlb {
+ MEM_STAT_DTLB_L1_HIT,
+ MEM_STAT_DTLB_L2_HIT,
+ MEM_STAT_DTLB_ANY_HIT,
+ MEM_STAT_DTLB_MISS,
+ MEM_STAT_DTLB_OTHER,
+};
+
+int mem_stat_index(const enum mem_stat_type mst, const u64 data_src);
+const char *mem_stat_name(const enum mem_stat_type mst, const int idx);
+
#endif /* __PERF_MEM_EVENTS_H */
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 46920ebadfd1..25c75fdbfc52 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -103,7 +103,7 @@ static void metric_event_delete(struct rblist *rblist __maybe_unused,
free(me);
}
-static void metricgroup__rblist_init(struct rblist *metric_events)
+void metricgroup__rblist_init(struct rblist *metric_events)
{
rblist__init(metric_events);
metric_events->node_cmp = metric_event_cmp;
@@ -152,6 +152,8 @@ struct metric {
* Should events of the metric be grouped?
*/
bool group_events;
+ /** Show events even if in the Default metric group. */
+ bool default_show_events;
/**
* Parsed events for the metric. Optional as events may be taken from a
* different metric whose group contains all the IDs necessary for this
@@ -179,7 +181,7 @@ static void metric__watchdog_constraint_hint(const char *name, bool foot)
" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
}
-static bool metric__group_events(const struct pmu_metric *pm)
+static bool metric__group_events(const struct pmu_metric *pm, bool metric_no_threshold)
{
switch (pm->event_grouping) {
case MetricNoGroupEvents:
@@ -191,6 +193,13 @@ static bool metric__group_events(const struct pmu_metric *pm)
return false;
case MetricNoGroupEventsSmt:
return !smt_on();
+ case MetricNoGroupEventsThresholdAndNmi:
+ if (metric_no_threshold)
+ return true;
+ if (!sysctl__nmi_watchdog_enabled())
+ return true;
+ metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
+ return false;
case MetricGroupEvents:
default:
return true;
@@ -212,6 +221,7 @@ static void metric__free(struct metric *m)
static struct metric *metric__new(const struct pmu_metric *pm,
const char *modifier,
bool metric_no_group,
+ bool metric_no_threshold,
int runtime,
const char *user_requested_cpu_list,
bool system_wide)
@@ -246,7 +256,8 @@ static struct metric *metric__new(const struct pmu_metric *pm,
}
m->pctx->sctx.runtime = runtime;
m->pctx->sctx.system_wide = system_wide;
- m->group_events = !metric_no_group && metric__group_events(pm);
+ m->group_events = !metric_no_group && metric__group_events(pm, metric_no_threshold);
+ m->default_show_events = pm->default_show_events;
m->metric_refs = NULL;
m->evlist = NULL;
@@ -353,7 +364,7 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids,
return 0;
}
-static bool match_metric(const char *metric_or_groups, const char *sought)
+static bool match_metric_or_groups(const char *metric_or_groups, const char *sought)
{
int len;
char *m;
@@ -369,117 +380,19 @@ static bool match_metric(const char *metric_or_groups, const char *sought)
(metric_or_groups[len] == 0 || metric_or_groups[len] == ';'))
return true;
m = strchr(metric_or_groups, ';');
- return m && match_metric(m + 1, sought);
+ return m && match_metric_or_groups(m + 1, sought);
}
-static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric)
+static bool match_pm_metric_or_groups(const struct pmu_metric *pm, const char *pmu,
+ const char *metric_or_groups)
{
const char *pm_pmu = pm->pmu ?: "cpu";
if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
return false;
- return match_metric(pm->metric_group, metric) ||
- match_metric(pm->metric_name, metric);
-}
-
-/** struct mep - RB-tree node for building printing information. */
-struct mep {
- /** nd - RB-tree element. */
- struct rb_node nd;
- /** @metric_group: Owned metric group name, separated others with ';'. */
- char *metric_group;
- const char *metric_name;
- const char *metric_desc;
- const char *metric_long_desc;
- const char *metric_expr;
- const char *metric_threshold;
- const char *metric_unit;
-};
-
-static int mep_cmp(struct rb_node *rb_node, const void *entry)
-{
- struct mep *a = container_of(rb_node, struct mep, nd);
- struct mep *b = (struct mep *)entry;
- int ret;
-
- ret = strcmp(a->metric_group, b->metric_group);
- if (ret)
- return ret;
-
- return strcmp(a->metric_name, b->metric_name);
-}
-
-static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
-{
- struct mep *me = malloc(sizeof(struct mep));
-
- if (!me)
- return NULL;
-
- memcpy(me, entry, sizeof(struct mep));
- return &me->nd;
-}
-
-static void mep_delete(struct rblist *rl __maybe_unused,
- struct rb_node *nd)
-{
- struct mep *me = container_of(nd, struct mep, nd);
-
- zfree(&me->metric_group);
- free(me);
-}
-
-static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
- const char *metric_name)
-{
- struct rb_node *nd;
- struct mep me = {
- .metric_group = strdup(metric_group),
- .metric_name = metric_name,
- };
- nd = rblist__find(groups, &me);
- if (nd) {
- free(me.metric_group);
- return container_of(nd, struct mep, nd);
- }
- rblist__add_node(groups, &me);
- nd = rblist__find(groups, &me);
- if (nd)
- return container_of(nd, struct mep, nd);
- return NULL;
-}
-
-static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
- struct rblist *groups)
-{
- const char *g;
- char *omg, *mg;
-
- mg = strdup(pm->metric_group ?: pm->metric_name);
- if (!mg)
- return -ENOMEM;
- omg = mg;
- while ((g = strsep(&mg, ";")) != NULL) {
- struct mep *me;
-
- g = skip_spaces(g);
- if (strlen(g))
- me = mep_lookup(groups, g, pm->metric_name);
- else
- me = mep_lookup(groups, pm->metric_name, pm->metric_name);
-
- if (me) {
- me->metric_desc = pm->desc;
- me->metric_long_desc = pm->long_desc;
- me->metric_expr = pm->metric_expr;
- me->metric_threshold = pm->metric_threshold;
- me->metric_unit = pm->unit;
- }
- }
- free(omg);
-
- return 0;
+ return match_metric_or_groups(pm->metric_group, metric_or_groups) ||
+ match_metric_or_groups(pm->metric_name, metric_or_groups);
}
struct metricgroup_iter_data {
@@ -507,53 +420,30 @@ static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
return 0;
}
-static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
- const struct pmu_metrics_table *table __maybe_unused,
- void *vdata)
+int metricgroup__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
+ void *data)
{
- struct rblist *groups = vdata;
+ struct metricgroup_iter_data sys_data = {
+ .fn = fn,
+ .data = data,
+ };
+ const struct pmu_metrics_table *tables[2] = {
+ table,
+ pmu_metrics_table__default(),
+ };
- return metricgroup__add_to_mep_groups(pm, groups);
-}
+ for (size_t i = 0; i < ARRAY_SIZE(tables); i++) {
+ int ret;
-void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
-{
- struct rblist groups;
- const struct pmu_metrics_table *table;
- struct rb_node *node, *next;
-
- rblist__init(&groups);
- groups.node_new = mep_new;
- groups.node_cmp = mep_cmp;
- groups.node_delete = mep_delete;
- table = pmu_metrics_table__find();
- if (table) {
- pmu_metrics_table__for_each_metric(table,
- metricgroup__add_to_mep_groups_callback,
- &groups);
- }
- {
- struct metricgroup_iter_data data = {
- .fn = metricgroup__add_to_mep_groups_callback,
- .data = &groups,
- };
- pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
- }
+ if (!tables[i])
+ continue;
- for (node = rb_first_cached(&groups.entries); node; node = next) {
- struct mep *me = container_of(node, struct mep, nd);
-
- print_cb->print_metric(print_state,
- me->metric_group,
- me->metric_name,
- me->metric_desc,
- me->metric_long_desc,
- me->metric_expr,
- me->metric_threshold,
- me->metric_unit);
- next = rb_next(node);
- rblist__remove_node(&groups, node);
+ ret = pmu_metrics_table__for_each_metric(tables[i], fn, data);
+ if (ret)
+ return ret;
}
+
+ return pmu_for_each_sys_metric(metricgroup__sys_event_iter, &sys_data);
}
static const char *code_characters = ",-=@";
@@ -802,11 +692,6 @@ struct metricgroup_add_iter_data {
const struct pmu_metrics_table *table;
};
-static bool metricgroup__find_metric(const char *pmu,
- const char *metric,
- const struct pmu_metrics_table *table,
- struct pmu_metric *pm);
-
static int add_metric(struct list_head *metric_list,
const struct pmu_metric *pm,
const char *modifier,
@@ -818,6 +703,16 @@ static int add_metric(struct list_head *metric_list,
const struct visited_metric *visited,
const struct pmu_metrics_table *table);
+static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
+ const struct pmu_metrics_table *table __maybe_unused,
+ void *vdata)
+{
+ struct pmu_metric *copied_pm = vdata;
+
+ memcpy(copied_pm, pm, sizeof(*pm));
+ return 0;
+}
+
/**
* resolve_metric - Locate metrics within the root metric and recursively add
* references to them.
@@ -838,7 +733,7 @@ static int add_metric(struct list_head *metric_list,
* architecture perf is running upon.
*/
static int resolve_metric(struct list_head *metric_list,
- const char *pmu,
+ struct perf_pmu *pmu,
const char *modifier,
bool metric_no_group,
bool metric_no_threshold,
@@ -868,7 +763,9 @@ static int resolve_metric(struct list_head *metric_list,
hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
struct pmu_metric pm;
- if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) {
+ if (pmu_metrics_table__find_metric(table, pmu, cur->pkey,
+ metricgroup__find_metric_callback,
+ &pm) != PMU_METRICS__NOT_FOUND) {
pending = realloc(pending,
(pending_cnt + 1) * sizeof(struct to_resolve));
if (!pending)
@@ -953,8 +850,8 @@ static int __add_metric(struct list_head *metric_list,
* This metric is the root of a tree and may reference other
* metrics that are added recursively.
*/
- root_metric = metric__new(pm, modifier, metric_no_group, runtime,
- user_requested_cpu_list, system_wide);
+ root_metric = metric__new(pm, modifier, metric_no_group, metric_no_threshold,
+ runtime, user_requested_cpu_list, system_wide);
if (!root_metric)
return -ENOMEM;
@@ -1019,7 +916,12 @@ static int __add_metric(struct list_head *metric_list,
}
if (!ret) {
/* Resolve referenced metrics. */
- const char *pmu = pm->pmu ?: "cpu";
+ struct perf_pmu *pmu;
+
+ if (pm->pmu && pm->pmu[0] != '\0')
+ pmu = perf_pmus__find(pm->pmu);
+ else
+ pmu = perf_pmus__scan_core(/*pmu=*/ NULL);
ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
metric_no_threshold, user_requested_cpu_list,
@@ -1036,44 +938,6 @@ static int __add_metric(struct list_head *metric_list,
return ret;
}
-struct metricgroup__find_metric_data {
- const char *pmu;
- const char *metric;
- struct pmu_metric *pm;
-};
-
-static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
- const struct pmu_metrics_table *table __maybe_unused,
- void *vdata)
-{
- struct metricgroup__find_metric_data *data = vdata;
- const char *pm_pmu = pm->pmu ?: "cpu";
-
- if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu))
- return 0;
-
- if (!match_metric(pm->metric_name, data->metric))
- return 0;
-
- memcpy(data->pm, pm, sizeof(*pm));
- return 1;
-}
-
-static bool metricgroup__find_metric(const char *pmu,
- const char *metric,
- const struct pmu_metrics_table *table,
- struct pmu_metric *pm)
-{
- struct metricgroup__find_metric_data data = {
- .pmu = pmu,
- .metric = metric,
- .pm = pm,
- };
-
- return pmu_metrics_table__for_each_metric(table, metricgroup__find_metric_callback, &data)
- ? true : false;
-}
-
static int add_metric(struct list_head *metric_list,
const struct pmu_metric *pm,
const char *modifier,
@@ -1112,29 +976,6 @@ static int add_metric(struct list_head *metric_list,
return ret;
}
-static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm,
- const struct pmu_metrics_table *table __maybe_unused,
- void *data)
-{
- struct metricgroup_add_iter_data *d = data;
- int ret;
-
- if (!match_pm_metric(pm, d->pmu, d->metric_name))
- return 0;
-
- ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group,
- d->metric_no_threshold, d->user_requested_cpu_list,
- d->system_wide, d->root_metric, d->visited, d->table);
- if (ret)
- goto out;
-
- *(d->has_match) = true;
-
-out:
- *(d->ret) = ret;
- return ret;
-}
-
/**
* metric_list_cmp - list_sort comparator that sorts metrics with more events to
* the front. tool events are excluded from the count.
@@ -1200,9 +1041,9 @@ static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
struct metricgroup__add_metric_data *data = vdata;
int ret = 0;
- if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) {
+ if (pm->metric_expr && match_pm_metric_or_groups(pm, data->pmu, data->metric_name)) {
bool metric_no_group = data->metric_no_group ||
- match_metric(pm->metricgroup_no_group, data->metric_name);
+ match_metric_or_groups(pm->metricgroup_no_group, data->metric_name);
data->has_match = true;
ret = add_metric(data->list, pm, data->modifier, metric_no_group,
@@ -1238,55 +1079,26 @@ static int metricgroup__add_metric(const char *pmu, const char *metric_name, con
{
LIST_HEAD(list);
int ret;
- bool has_match = false;
-
- {
- struct metricgroup__add_metric_data data = {
- .list = &list,
- .pmu = pmu,
- .metric_name = metric_name,
- .modifier = modifier,
- .metric_no_group = metric_no_group,
- .metric_no_threshold = metric_no_threshold,
- .user_requested_cpu_list = user_requested_cpu_list,
- .system_wide = system_wide,
- .has_match = false,
- };
- /*
- * Iterate over all metrics seeing if metric matches either the
- * name or group. When it does add the metric to the list.
- */
- ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback,
- &data);
- if (ret)
- goto out;
+ struct metricgroup__add_metric_data data = {
+ .list = &list,
+ .pmu = pmu,
+ .metric_name = metric_name,
+ .modifier = modifier,
+ .metric_no_group = metric_no_group,
+ .metric_no_threshold = metric_no_threshold,
+ .user_requested_cpu_list = user_requested_cpu_list,
+ .system_wide = system_wide,
+ .has_match = false,
+ };
- has_match = data.has_match;
- }
- {
- struct metricgroup_iter_data data = {
- .fn = metricgroup__add_metric_sys_event_iter,
- .data = (void *) &(struct metricgroup_add_iter_data) {
- .metric_list = &list,
- .pmu = pmu,
- .metric_name = metric_name,
- .modifier = modifier,
- .metric_no_group = metric_no_group,
- .user_requested_cpu_list = user_requested_cpu_list,
- .system_wide = system_wide,
- .has_match = &has_match,
- .ret = &ret,
- .table = table,
- },
- };
-
- pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
- }
- /* End of pmu events. */
- if (!has_match)
+ /*
+ * Iterate over all metrics seeing if metric matches either the
+ * name or group. When it does add the metric to the list.
+ */
+ ret = metricgroup__for_each_metric(table, metricgroup__add_metric_callback, &data);
+ if (!ret && !data.has_match)
ret = -EINVAL;
-out:
/*
* add to metric_list so that they can be released
* even if it's failed
@@ -1522,6 +1334,51 @@ err_out:
return ret;
}
+/* How many times will a given evsel be used in a set of metrics? */
+static int count_uses(struct list_head *metric_list, struct evsel *evsel)
+{
+ const char *metric_id = evsel__metric_id(evsel);
+ struct metric *m;
+ int uses = 0;
+
+ list_for_each_entry(m, metric_list, nd) {
+ if (hashmap__find(m->pctx->ids, metric_id, NULL))
+ uses++;
+ }
+ return uses;
+}
+
+/*
+ * Select the evsel that stat-display will use to trigger shadow/metric
+ * printing. Pick the least shared non-tool evsel, encouraging metrics to be
+ * with a hardware counter that is specific to them.
+ */
+static struct evsel *pick_display_evsel(struct list_head *metric_list,
+ struct evsel **metric_events)
+{
+ struct evsel *selected = metric_events[0];
+ size_t selected_uses;
+ bool selected_is_tool;
+
+ if (!selected)
+ return NULL;
+
+ selected_uses = count_uses(metric_list, selected);
+ selected_is_tool = evsel__is_tool(selected);
+ for (int i = 1; metric_events[i]; i++) {
+ struct evsel *candidate = metric_events[i];
+ size_t candidate_uses = count_uses(metric_list, candidate);
+
+ if ((selected_is_tool && !evsel__is_tool(candidate)) ||
+ (candidate_uses < selected_uses)) {
+ selected = candidate;
+ selected_uses = candidate_uses;
+ selected_is_tool = evsel__is_tool(selected);
+ }
+ }
+ return selected;
+}
+
static int parse_groups(struct evlist *perf_evlist,
const char *pmu, const char *str,
bool metric_no_group,
@@ -1530,7 +1387,6 @@ static int parse_groups(struct evlist *perf_evlist,
const char *user_requested_cpu_list,
bool system_wide,
bool fake_pmu,
- struct rblist *metric_events_list,
const struct pmu_metrics_table *table)
{
struct evlist *combined_evlist = NULL;
@@ -1540,8 +1396,6 @@ static int parse_groups(struct evlist *perf_evlist,
bool is_default = !strcmp(str, "Default");
int ret;
- if (metric_events_list->nr_entries == 0)
- metricgroup__rblist_init(metric_events_list);
ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
user_requested_cpu_list,
system_wide, &metric_list, table);
@@ -1632,7 +1486,9 @@ static int parse_groups(struct evlist *perf_evlist,
goto out;
}
- me = metricgroup__lookup(metric_events_list, metric_events[0], true);
+ me = metricgroup__lookup(&perf_evlist->metric_events,
+ pick_display_evsel(&metric_list, metric_events),
+ /*create=*/true);
expr = malloc(sizeof(struct metric_expr));
if (!expr) {
@@ -1656,9 +1512,20 @@ static int parse_groups(struct evlist *perf_evlist,
if (!expr->metric_name) {
ret = -ENOMEM;
+ free(expr);
free(metric_events);
goto out;
}
+ if (m->default_show_events) {
+ struct evsel *pos;
+
+ for (int i = 0; metric_events[i]; i++)
+ metric_events[i]->default_show_events = true;
+ evlist__for_each_entry(metric_evlist, pos) {
+ if (pos->metric_leader && pos->metric_leader->default_show_events)
+ pos->default_show_events = true;
+ }
+ }
expr->metric_threshold = m->metric_threshold;
expr->metric_unit = m->metric_unit;
expr->metric_events = metric_events;
@@ -1692,8 +1559,7 @@ int metricgroup__parse_groups(struct evlist *perf_evlist,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
- bool hardware_aware_grouping,
- struct rblist *metric_events)
+ bool hardware_aware_grouping)
{
const struct pmu_metrics_table *table = pmu_metrics_table__find();
@@ -1704,13 +1570,12 @@ int metricgroup__parse_groups(struct evlist *perf_evlist,
return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
metric_no_threshold, user_requested_cpu_list, system_wide,
- /*fake_pmu=*/false, metric_events, table);
+ /*fake_pmu=*/false, table);
}
int metricgroup__parse_groups_test(struct evlist *evlist,
const struct pmu_metrics_table *table,
- const char *str,
- struct rblist *metric_events)
+ const char *str)
{
return parse_groups(evlist, "all", str,
/*metric_no_group=*/false,
@@ -1718,35 +1583,41 @@ int metricgroup__parse_groups_test(struct evlist *evlist,
/*metric_no_threshold=*/false,
/*user_requested_cpu_list=*/NULL,
/*system_wide=*/false,
- /*fake_pmu=*/true, metric_events, table);
+ /*fake_pmu=*/true, table);
}
struct metricgroup__has_metric_data {
const char *pmu;
- const char *metric;
+ const char *metric_or_groups;
};
-static int metricgroup__has_metric_callback(const struct pmu_metric *pm,
- const struct pmu_metrics_table *table __maybe_unused,
- void *vdata)
+static int metricgroup__has_metric_or_groups_callback(const struct pmu_metric *pm,
+ const struct pmu_metrics_table *table
+ __maybe_unused,
+ void *vdata)
{
struct metricgroup__has_metric_data *data = vdata;
- return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0;
+ return match_pm_metric_or_groups(pm, data->pmu, data->metric_or_groups) ? 1 : 0;
}
-bool metricgroup__has_metric(const char *pmu, const char *metric)
+bool metricgroup__has_metric_or_groups(const char *pmu, const char *metric_or_groups)
{
- const struct pmu_metrics_table *table = pmu_metrics_table__find();
+ const struct pmu_metrics_table *tables[2] = {
+ pmu_metrics_table__find(),
+ pmu_metrics_table__default(),
+ };
struct metricgroup__has_metric_data data = {
.pmu = pmu,
- .metric = metric,
+ .metric_or_groups = metric_or_groups,
};
- if (!table)
- return false;
-
- return pmu_metrics_table__for_each_metric(table, metricgroup__has_metric_callback, &data)
- ? true : false;
+ for (size_t i = 0; i < ARRAY_SIZE(tables); i++) {
+ if (pmu_metrics_table__for_each_metric(tables[i],
+ metricgroup__has_metric_or_groups_callback,
+ &data))
+ return true;
+ }
+ return false;
}
static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
@@ -1800,13 +1671,14 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
if (!evsel)
return -EINVAL;
- new_me = metricgroup__lookup(new_metric_events, evsel, true);
+ new_me = metricgroup__lookup(new_metric_events, evsel, /*create=*/true);
if (!new_me)
return -ENOMEM;
pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
+ new_me->is_default = old_me->is_default;
list_for_each_entry(old_expr, &old_me->head, nd) {
new_expr = malloc(sizeof(*new_expr));
if (!new_expr)
@@ -1820,6 +1692,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
new_expr->metric_unit = old_expr->metric_unit;
new_expr->runtime = old_expr->runtime;
+ new_expr->default_metricgroup_name = old_expr->default_metricgroup_name;
if (old_expr->metric_refs) {
/* calculate number of metric_events */
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 779f6ede1b51..4be6bfc13c46 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -16,7 +16,7 @@ struct cgroup;
/**
* A node in a rblist keyed by the evsel. The global rblist of metric events
- * generally exists in perf_stat_config. The evsel is looked up in the rblist
+ * generally exists in evlist. The evsel is looked up in the rblist
* yielding a list of metric_expr.
*/
struct metric_event {
@@ -77,17 +77,17 @@ int metricgroup__parse_groups(struct evlist *perf_evlist,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
- bool hardware_aware_grouping,
- struct rblist *metric_events);
+ bool hardware_aware_grouping);
int metricgroup__parse_groups_test(struct evlist *evlist,
const struct pmu_metrics_table *table,
- const char *str,
- struct rblist *metric_events);
+ const char *str);
-void metricgroup__print(const struct print_callbacks *print_cb, void *print_state);
-bool metricgroup__has_metric(const char *pmu, const char *metric);
+int metricgroup__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
+ void *data);
+bool metricgroup__has_metric_or_groups(const char *pmu, const char *metric_or_groups);
unsigned int metricgroups__topdown_max_level(void);
int arch_get_runtimeparam(const struct pmu_metric *pm);
+void metricgroup__rblist_init(struct rblist *metric_events);
void metricgroup__rblist_exit(struct rblist *metric_events);
int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index a34726219af3..b69f926d314b 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -7,6 +7,7 @@
*/
#include <sys/mman.h>
+#include <errno.h>
#include <inttypes.h>
#include <asm/bug.h>
#include <linux/zalloc.h>
diff --git a/tools/perf/util/mutex.c b/tools/perf/util/mutex.c
index bca7f0717f35..7aa1f3f55a7d 100644
--- a/tools/perf/util/mutex.c
+++ b/tools/perf/util/mutex.c
@@ -17,7 +17,7 @@ static void check_err(const char *fn, int err)
#define CHECK_ERR(err) check_err(__func__, err)
-static void __mutex_init(struct mutex *mtx, bool pshared)
+static void __mutex_init(struct mutex *mtx, bool pshared, bool recursive)
{
pthread_mutexattr_t attr;
@@ -27,21 +27,27 @@ static void __mutex_init(struct mutex *mtx, bool pshared)
/* In normal builds enable error checking, such as recursive usage. */
CHECK_ERR(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
#endif
+ if (recursive)
+ CHECK_ERR(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
if (pshared)
CHECK_ERR(pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
-
CHECK_ERR(pthread_mutex_init(&mtx->lock, &attr));
CHECK_ERR(pthread_mutexattr_destroy(&attr));
}
void mutex_init(struct mutex *mtx)
{
- __mutex_init(mtx, /*pshared=*/false);
+ __mutex_init(mtx, /*pshared=*/false, /*recursive=*/false);
}
void mutex_init_pshared(struct mutex *mtx)
{
- __mutex_init(mtx, /*pshared=*/true);
+ __mutex_init(mtx, /*pshared=*/true, /*recursive=*/false);
+}
+
+void mutex_init_recursive(struct mutex *mtx)
+{
+ __mutex_init(mtx, /*pshared=*/false, /*recursive=*/true);
}
void mutex_destroy(struct mutex *mtx)
diff --git a/tools/perf/util/mutex.h b/tools/perf/util/mutex.h
index 62d258c71ded..70232d8d094f 100644
--- a/tools/perf/util/mutex.h
+++ b/tools/perf/util/mutex.h
@@ -43,6 +43,12 @@
#define EXCLUSIVE_LOCK_FUNCTION(...) __attribute__((exclusive_lock_function(__VA_ARGS__)))
/*
+ * Documents functions that acquire a shared (reader) lock in the body of a
+ * function, and do not release it.
+ */
+#define SHARED_LOCK_FUNCTION(...) __attribute__((shared_lock_function(__VA_ARGS__)))
+
+/*
* Documents functions that expect a lock to be held on entry to the function,
* and release it in the body of the function.
*/
@@ -55,6 +61,9 @@
/* Documents a function that expects a mutex to be held prior to entry. */
#define EXCLUSIVE_LOCKS_REQUIRED(...) __attribute__((exclusive_locks_required(__VA_ARGS__)))
+/* Documents a function that expects a shared (reader) lock to be held prior to entry. */
+#define SHARED_LOCKS_REQUIRED(...) __attribute__((shared_locks_required(__VA_ARGS__)))
+
/* Turns off thread safety checking within the body of a particular function. */
#define NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis))
@@ -66,9 +75,11 @@
#define LOCKS_EXCLUDED(...)
#define LOCK_RETURNED(x)
#define EXCLUSIVE_LOCK_FUNCTION(...)
+#define SHARED_LOCK_FUNCTION(...)
#define UNLOCK_FUNCTION(...)
#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
#define EXCLUSIVE_LOCKS_REQUIRED(...)
+#define SHARED_LOCKS_REQUIRED(...)
#define NO_THREAD_SAFETY_ANALYSIS
#endif
@@ -93,6 +104,8 @@ void mutex_init(struct mutex *mtx);
* process-private attribute.
*/
void mutex_init_pshared(struct mutex *mtx);
+/* Initializes a mutex that may be recursively held on the same thread. */
+void mutex_init_recursive(struct mutex *mtx);
void mutex_destroy(struct mutex *mtx);
void mutex_lock(struct mutex *mtx) EXCLUSIVE_LOCK_FUNCTION(*mtx);
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index 68f5de2d79c7..01502570b32d 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -6,7 +6,6 @@
#include "namespaces.h"
#include "event.h"
-#include "get_current_dir_name.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -293,14 +292,14 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
if (!nsi || !nsinfo__need_setns(nsi))
return;
- if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
+ if (!getcwd(curpath, sizeof(curpath)))
return;
- oldcwd = get_current_dir_name();
+ oldcwd = strdup(curpath);
if (!oldcwd)
return;
- oldns = open(curpath, O_RDONLY);
+ oldns = open("/proc/self/ns/mnt", O_RDONLY);
if (oldns < 0)
goto errout;
diff --git a/tools/perf/util/off_cpu.h b/tools/perf/util/off_cpu.h
index 2dd67c60f211..64bf763ddf50 100644
--- a/tools/perf/util/off_cpu.h
+++ b/tools/perf/util/off_cpu.h
@@ -13,9 +13,10 @@ struct record_opts;
#define OFFCPU_SAMPLE_TYPES (PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP | \
PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | \
- PERF_SAMPLE_PERIOD | PERF_SAMPLE_CALLCHAIN | \
+ PERF_SAMPLE_PERIOD | PERF_SAMPLE_RAW | \
PERF_SAMPLE_CGROUP)
+#define OFFCPU_THRESH 500000000ULL
#ifdef HAVE_BPF_SKEL
int off_cpu_prepare(struct evlist *evlist, struct target *target,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5152fd5a6ead..17c1c36a7bf9 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -7,6 +7,7 @@
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/param.h>
+#include "cpumap.h"
#include "term.h"
#include "env.h"
#include "evlist.h"
@@ -16,20 +17,23 @@
#include "string2.h"
#include "strbuf.h"
#include "debug.h"
-#include <api/fs/tracing_path.h>
-#include <api/io_dir.h>
#include <perf/cpumap.h>
#include <util/parse-events-bison.h>
#include <util/parse-events-flex.h>
#include "pmu.h"
#include "pmus.h"
+#include "tp_pmu.h"
#include "asm/bug.h"
+#include "ui/ui.h"
#include "util/parse-branch-options.h"
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/bpf-filter.h"
+#include "util/stat.h"
+#include "util/tool_pmu.h"
#include "util/util.h"
#include "tracepoint.h"
+#include <api/fs/tracing_path.h>
#define MAX_NAME_LEN 100
@@ -37,121 +41,23 @@ static int get_config_terms(const struct parse_events_terms *head_config,
struct list_head *head_terms);
static int parse_events_terms__copy(const struct parse_events_terms *src,
struct parse_events_terms *dest);
-
-const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = {
- .symbol = "cpu-cycles",
- .alias = "cycles",
- },
- [PERF_COUNT_HW_INSTRUCTIONS] = {
- .symbol = "instructions",
- .alias = "",
- },
- [PERF_COUNT_HW_CACHE_REFERENCES] = {
- .symbol = "cache-references",
- .alias = "",
- },
- [PERF_COUNT_HW_CACHE_MISSES] = {
- .symbol = "cache-misses",
- .alias = "",
- },
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
- .symbol = "branch-instructions",
- .alias = "branches",
- },
- [PERF_COUNT_HW_BRANCH_MISSES] = {
- .symbol = "branch-misses",
- .alias = "",
- },
- [PERF_COUNT_HW_BUS_CYCLES] = {
- .symbol = "bus-cycles",
- .alias = "",
- },
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
- .symbol = "stalled-cycles-frontend",
- .alias = "idle-cycles-frontend",
- },
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
- .symbol = "stalled-cycles-backend",
- .alias = "idle-cycles-backend",
- },
- [PERF_COUNT_HW_REF_CPU_CYCLES] = {
- .symbol = "ref-cycles",
- .alias = "",
- },
-};
-
-const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
- [PERF_COUNT_SW_CPU_CLOCK] = {
- .symbol = "cpu-clock",
- .alias = "",
- },
- [PERF_COUNT_SW_TASK_CLOCK] = {
- .symbol = "task-clock",
- .alias = "",
- },
- [PERF_COUNT_SW_PAGE_FAULTS] = {
- .symbol = "page-faults",
- .alias = "faults",
- },
- [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
- .symbol = "context-switches",
- .alias = "cs",
- },
- [PERF_COUNT_SW_CPU_MIGRATIONS] = {
- .symbol = "cpu-migrations",
- .alias = "migrations",
- },
- [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
- .symbol = "minor-faults",
- .alias = "",
- },
- [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
- .symbol = "major-faults",
- .alias = "",
- },
- [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
- .symbol = "alignment-faults",
- .alias = "",
- },
- [PERF_COUNT_SW_EMULATION_FAULTS] = {
- .symbol = "emulation-faults",
- .alias = "",
- },
- [PERF_COUNT_SW_DUMMY] = {
- .symbol = "dummy",
- .alias = "",
- },
- [PERF_COUNT_SW_BPF_OUTPUT] = {
- .symbol = "bpf-output",
- .alias = "",
- },
- [PERF_COUNT_SW_CGROUP_SWITCHES] = {
- .symbol = "cgroup-switches",
- .alias = "",
- },
+static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb);
+
+static const char *const event_types[] = {
+ [PERF_TYPE_HARDWARE] = "hardware",
+ [PERF_TYPE_SOFTWARE] = "software",
+ [PERF_TYPE_TRACEPOINT] = "tracepoint",
+ [PERF_TYPE_HW_CACHE] = "hardware-cache",
+ [PERF_TYPE_RAW] = "raw",
+ [PERF_TYPE_BREAKPOINT] = "breakpoint",
};
-const char *event_type(int type)
+const char *event_type(size_t type)
{
- switch (type) {
- case PERF_TYPE_HARDWARE:
- return "hardware";
-
- case PERF_TYPE_SOFTWARE:
- return "software";
-
- case PERF_TYPE_TRACEPOINT:
- return "tracepoint";
-
- case PERF_TYPE_HW_CACHE:
- return "hardware-cache";
+ if (type >= PERF_TYPE_MAX)
+ return "unknown";
- default:
- break;
- }
-
- return "unknown";
+ return event_types[type];
}
static char *get_config_str(const struct parse_events_terms *head_terms,
@@ -179,6 +85,48 @@ static char *get_config_name(const struct parse_events_terms *head_terms)
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
}
+static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms,
+ bool fake_pmu)
+{
+ struct parse_events_term *term;
+ struct perf_cpu_map *cpus = NULL;
+
+ if (!head_terms)
+ return NULL;
+
+ list_for_each_entry(term, &head_terms->terms, list) {
+ struct perf_cpu_map *term_cpus;
+
+ if (term->type_term != PARSE_EVENTS__TERM_TYPE_CPU)
+ continue;
+
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+ term_cpus = perf_cpu_map__new_int(term->val.num);
+ } else {
+ struct perf_pmu *pmu = perf_pmus__find(term->val.str);
+
+ if (pmu) {
+ term_cpus = pmu->is_core && perf_cpu_map__is_empty(pmu->cpus)
+ ? cpu_map__online()
+ : perf_cpu_map__get(pmu->cpus);
+ } else {
+ term_cpus = perf_cpu_map__new(term->val.str);
+ if (!term_cpus && fake_pmu) {
+ /*
+ * Assume the PMU string makes sense on a different
+ * machine and fake a value with all online CPUs.
+ */
+ term_cpus = cpu_map__online();
+ }
+ }
+ }
+ perf_cpu_map__merge(&cpus, term_cpus);
+ perf_cpu_map__put(term_cpus);
+ }
+
+ return cpus;
+}
+
/**
* fix_raw - For each raw term see if there is an event (aka alias) in pmu that
* matches the raw's string value. If the string value matches an
@@ -228,49 +176,104 @@ __add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
bool init_attr,
const char *name, const char *metric_id, struct perf_pmu *pmu,
- struct list_head *config_terms, bool auto_merge_stats,
- struct perf_cpu_map *cpu_list, u64 alternate_hw_config)
+ struct list_head *config_terms, struct evsel *first_wildcard_match,
+ struct perf_cpu_map *user_cpus, u64 alternate_hw_config)
{
struct evsel *evsel;
- struct perf_cpu_map *cpus = perf_cpu_map__is_empty(cpu_list) && pmu ? pmu->cpus : cpu_list;
+ bool is_pmu_core;
+ struct perf_cpu_map *cpus, *pmu_cpus;
+ bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus);
- cpus = perf_cpu_map__get(cpus);
- if (pmu)
- perf_pmu__warn_invalid_formats(pmu);
+ /*
+ * Ensure the first_wildcard_match's PMU matches that of the new event
+ * being added. Otherwise try to match with another event further down
+ * the evlist.
+ */
+ if (first_wildcard_match) {
+ struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
+
+ first_wildcard_match = NULL;
+ list_for_each_entry_continue(pos, list, core.node) {
+ if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
+ first_wildcard_match = pos;
+ break;
+ }
+ if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
+ first_wildcard_match = pos;
+ break;
+ }
+ }
+ }
- if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) {
- perf_pmu__warn_invalid_config(pmu, attr->config, name,
- PERF_PMU_FORMAT_VALUE_CONFIG, "config");
- perf_pmu__warn_invalid_config(pmu, attr->config1, name,
- PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
- perf_pmu__warn_invalid_config(pmu, attr->config2, name,
- PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
- perf_pmu__warn_invalid_config(pmu, attr->config3, name,
- PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
+ if (pmu) {
+ perf_pmu__warn_invalid_formats(pmu);
+ if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
+ perf_pmu__warn_invalid_config(pmu, attr->config, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG, "config");
+ perf_pmu__warn_invalid_config(pmu, attr->config1, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
+ perf_pmu__warn_invalid_config(pmu, attr->config2, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
+ perf_pmu__warn_invalid_config(pmu, attr->config3, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
+ perf_pmu__warn_invalid_config(pmu, attr->config4, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG4, "config4");
+ }
}
+ /*
+ * If a PMU wasn't given, such as for legacy events, find now that
+ * warnings won't be generated.
+ */
+ if (!pmu)
+ pmu = perf_pmus__find_by_attr(attr);
+
+ if (pmu) {
+ is_pmu_core = pmu->is_core;
+ pmu_cpus = perf_cpu_map__get(pmu->cpus);
+ if (perf_cpu_map__is_empty(pmu_cpus)) {
+ if (perf_pmu__is_tool(pmu))
+ pmu_cpus = tool_pmu__cpus(attr);
+ else
+ pmu_cpus = cpu_map__online();
+ }
+ } else {
+ is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
+ attr->type == PERF_TYPE_HW_CACHE);
+ pmu_cpus = is_pmu_core ? cpu_map__online() : NULL;
+ }
+
+ if (has_user_cpus)
+ cpus = perf_cpu_map__get(user_cpus);
+ else
+ cpus = perf_cpu_map__get(pmu_cpus);
+
if (init_attr)
event_attr_init(attr);
evsel = evsel__new_idx(attr, *idx);
- if (!evsel) {
- perf_cpu_map__put(cpus);
- return NULL;
+ if (!evsel)
+ goto out_err;
+
+ if (name) {
+ evsel->name = strdup(name);
+ if (!evsel->name)
+ goto out_err;
+ }
+
+ if (metric_id) {
+ evsel->metric_id = strdup(metric_id);
+ if (!evsel->metric_id)
+ goto out_err;
}
(*idx)++;
evsel->core.cpus = cpus;
- evsel->core.own_cpus = perf_cpu_map__get(cpus);
+ evsel->core.pmu_cpus = pmu_cpus;
evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
- evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
- evsel->auto_merge_stats = auto_merge_stats;
+ evsel->core.is_pmu_core = is_pmu_core;
evsel->pmu = pmu;
evsel->alternate_hw_config = alternate_hw_config;
-
- if (name)
- evsel->name = strdup(name);
-
- if (metric_id)
- evsel->metric_id = strdup(metric_id);
+ evsel->first_wildcard_match = first_wildcard_match;
if (config_terms)
list_splice_init(config_terms, &evsel->config_terms);
@@ -278,7 +281,17 @@ __add_event(struct list_head *list, int *idx,
if (list)
list_add_tail(&evsel->core.node, list);
+ if (has_user_cpus)
+ evsel__warn_user_requested_cpus(evsel, user_cpus);
+
return evsel;
+out_err:
+ perf_cpu_map__put(cpus);
+ perf_cpu_map__put(pmu_cpus);
+ zfree(&evsel->name);
+ zfree(&evsel->metric_id);
+ free(evsel);
+ return NULL;
}
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
@@ -287,7 +300,7 @@ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
{
return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
metric_id, pmu, /*config_terms=*/NULL,
- /*auto_merge_stats=*/false, /*cpu_list=*/NULL,
+ /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX);
}
@@ -298,7 +311,7 @@ static int add_event(struct list_head *list, int *idx,
{
return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
/*pmu=*/NULL, config_terms,
- /*auto_merge_stats=*/false, /*cpu_list=*/NULL,
+ /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
alternate_hw_config) ? 0 : -ENOMEM;
}
@@ -331,13 +344,13 @@ static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_A
typedef int config_term_func_t(struct perf_event_attr *attr,
struct parse_events_term *term,
- struct parse_events_error *err);
+ struct parse_events_state *parse_state);
static int config_term_common(struct perf_event_attr *attr,
struct parse_events_term *term,
- struct parse_events_error *err);
+ struct parse_events_state *parse_state);
static int config_attr(struct perf_event_attr *attr,
const struct parse_events_terms *head,
- struct parse_events_error *err,
+ struct parse_events_state *parse_state,
config_term_func_t config_term);
/**
@@ -423,72 +436,7 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
static int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, struct perf_pmu *pmu,
const struct parse_events_terms *const_parsed_terms,
- bool auto_merge_stats, u64 alternate_hw_config);
-
-int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
- struct parse_events_state *parse_state,
- struct parse_events_terms *parsed_terms)
-{
- struct perf_pmu *pmu = NULL;
- bool found_supported = false;
- const char *config_name = get_config_name(parsed_terms);
- const char *metric_id = get_config_metric_id(parsed_terms);
-
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- LIST_HEAD(config_terms);
- struct perf_event_attr attr;
- int ret;
-
- if (parse_events__filter_pmu(parse_state, pmu))
- continue;
-
- if (perf_pmu__have_event(pmu, name)) {
- /*
- * The PMU has the event so add as not a legacy cache
- * event.
- */
- ret = parse_events_add_pmu(parse_state, list, pmu,
- parsed_terms,
- perf_pmu__auto_merge_stats(pmu),
- /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
- if (ret)
- return ret;
- continue;
- }
-
- if (!pmu->is_core) {
- /* Legacy cache events are only supported by core PMUs. */
- continue;
- }
-
- memset(&attr, 0, sizeof(attr));
- attr.type = PERF_TYPE_HW_CACHE;
-
- ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
- if (ret)
- return ret;
-
- found_supported = true;
-
- if (parsed_terms) {
- if (config_attr(&attr, parsed_terms, parse_state->error,
- config_term_common))
- return -EINVAL;
-
- if (get_config_terms(parsed_terms, &config_terms))
- return -ENOMEM;
- }
-
- if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
- metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
- /*cpu_list=*/NULL,
- /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
- return -ENOMEM;
-
- free_config_terms(&config_terms);
- }
- return found_supported ? 0 : -EINVAL;
-}
+ struct evsel *first_wildcard_match);
static void tracepoint_error(struct parse_events_error *e, int err,
const char *sys, const char *name, int column)
@@ -548,105 +496,82 @@ static int add_tracepoint(struct parse_events_state *parse_state,
return 0;
}
-static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
- struct list_head *list,
- const char *sys_name, const char *evt_name,
- struct parse_events_error *err,
- struct parse_events_terms *head_config, YYLTYPE *loc)
-{
- char *evt_path;
- struct io_dirent64 *evt_ent;
- struct io_dir evt_dir;
- int ret = 0, found = 0;
-
- evt_path = get_events_file(sys_name);
- if (!evt_path) {
- tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
- return -1;
- }
- io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
- if (evt_dir.dirfd < 0) {
- put_events_file(evt_path);
- tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
- return -1;
- }
+struct add_tracepoint_multi_args {
+ struct parse_events_state *parse_state;
+ struct list_head *list;
+ const char *sys_glob;
+ const char *evt_glob;
+ struct parse_events_error *err;
+ struct parse_events_terms *head_config;
+ YYLTYPE *loc;
+ int found;
+};
- while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) {
- if (!strcmp(evt_ent->d_name, ".")
- || !strcmp(evt_ent->d_name, "..")
- || !strcmp(evt_ent->d_name, "enable")
- || !strcmp(evt_ent->d_name, "filter"))
- continue;
+static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name)
+{
+ struct add_tracepoint_multi_args *args = state;
+ int ret;
- if (!strglobmatch(evt_ent->d_name, evt_name))
- continue;
+ if (!strglobmatch(evt_name, args->evt_glob))
+ return 0;
- found++;
+ args->found++;
+ ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name,
+ args->err, args->head_config, args->loc);
- ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name,
- err, head_config, loc);
- }
+ return ret;
+}
- if (!found) {
- tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column);
- ret = -1;
+static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name)
+{
+ if (strpbrk(args->evt_glob, "*?") == NULL) {
+ /* Not a glob. */
+ args->found++;
+ return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob,
+ args->err, args->head_config, args->loc);
}
- put_events_file(evt_path);
- close(evt_dir.dirfd);
- return ret;
+ return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb);
}
-static int add_tracepoint_event(struct parse_events_state *parse_state,
- struct list_head *list,
- const char *sys_name, const char *evt_name,
- struct parse_events_error *err,
- struct parse_events_terms *head_config, YYLTYPE *loc)
+static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name)
{
- return strpbrk(evt_name, "*?") ?
- add_tracepoint_multi_event(parse_state, list, sys_name, evt_name,
- err, head_config, loc) :
- add_tracepoint(parse_state, list, sys_name, evt_name,
- err, head_config, loc);
+ struct add_tracepoint_multi_args *args = state;
+
+ if (!strglobmatch(sys_name, args->sys_glob))
+ return 0;
+
+ return add_tracepoint_multi_event(args, sys_name);
}
static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
struct list_head *list,
- const char *sys_name, const char *evt_name,
+ const char *sys_glob, const char *evt_glob,
struct parse_events_error *err,
struct parse_events_terms *head_config, YYLTYPE *loc)
{
- struct io_dirent64 *events_ent;
- struct io_dir events_dir;
- int ret = 0;
- char *events_dir_path = get_tracing_file("events");
+ struct add_tracepoint_multi_args args = {
+ .parse_state = parse_state,
+ .list = list,
+ .sys_glob = sys_glob,
+ .evt_glob = evt_glob,
+ .err = err,
+ .head_config = head_config,
+ .loc = loc,
+ .found = 0,
+ };
+ int ret;
- if (!events_dir_path) {
- tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
- return -1;
- }
- io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
- put_events_file(events_dir_path);
- if (events_dir.dirfd < 0) {
- tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
- return -1;
+ if (strpbrk(sys_glob, "*?") == NULL) {
+ /* Not a glob. */
+ ret = add_tracepoint_multi_event(&args, sys_glob);
+ } else {
+ ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb);
}
-
- while (!ret && (events_ent = io_dir__readdir(&events_dir))) {
- if (!strcmp(events_ent->d_name, ".")
- || !strcmp(events_ent->d_name, "..")
- || !strcmp(events_ent->d_name, "enable")
- || !strcmp(events_ent->d_name, "header_event")
- || !strcmp(events_ent->d_name, "header_page"))
- continue;
-
- if (!strglobmatch(events_ent->d_name, sys_name))
- continue;
-
- ret = add_tracepoint_event(parse_state, list, events_ent->d_name,
- evt_name, err, head_config, loc);
+ if (args.found == 0) {
+ tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column);
+ return -ENOENT;
}
- close(events_dir.dirfd);
return ret;
}
@@ -739,8 +664,7 @@ int parse_events_add_breakpoint(struct parse_events_state *parse_state,
attr.sample_period = 1;
if (head_config) {
- if (config_attr(&attr, head_config, parse_state->error,
- config_term_common))
+ if (config_attr(&attr, head_config, parse_state, config_term_common))
return -EINVAL;
if (get_config_terms(head_config, &config_terms))
@@ -783,6 +707,7 @@ const char *parse_events__term_type_str(enum parse_events__term_type term_type)
[PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
[PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
[PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
+ [PARSE_EVENTS__TERM_TYPE_CONFIG4] = "config4",
[PARSE_EVENTS__TERM_TYPE_NAME] = "name",
[PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
[PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
@@ -803,8 +728,10 @@ const char *parse_events__term_type_str(enum parse_events__term_type term_type)
[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
[PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
[PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
- [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
- [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
+ [PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG] = "legacy-hardware-config",
+ [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG] = "legacy-cache-config",
+ [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu",
+ [PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV] = "ratio-to-prev",
};
if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
return "unknown term";
@@ -830,10 +757,12 @@ config_term_avail(enum parse_events__term_type term_type, struct parse_events_er
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG4:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
return true;
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
@@ -852,8 +781,9 @@ config_term_avail(enum parse_events__term_type term_type, struct parse_events_er
case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_RAW:
- case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
- case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
default:
if (!err)
return false;
@@ -873,12 +803,12 @@ void parse_events__shrink_config_terms(void)
static int config_term_common(struct perf_event_attr *attr,
struct parse_events_term *term,
- struct parse_events_error *err)
+ struct parse_events_state *parse_state)
{
-#define CHECK_TYPE_VAL(type) \
-do { \
- if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
- return -EINVAL; \
+#define CHECK_TYPE_VAL(type) \
+do { \
+ if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_ ## type)) \
+ return -EINVAL; \
} while (0)
switch (term->type_term) {
@@ -898,6 +828,10 @@ do { \
CHECK_TYPE_VAL(NUM);
attr->config3 = term->val.num;
break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG4:
+ CHECK_TYPE_VAL(NUM);
+ attr->config4 = term->val.num;
+ break;
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
CHECK_TYPE_VAL(NUM);
break;
@@ -909,7 +843,7 @@ do { \
if (strcmp(term->val.str, "no") &&
parse_branch_str(term->val.str,
&attr->branch_sample_type)) {
- parse_events_error__handle(err, term->err_val,
+ parse_events_error__handle(parse_state->error, term->err_val,
strdup("invalid branch sample type"),
NULL);
return -EINVAL;
@@ -918,7 +852,7 @@ do { \
case PARSE_EVENTS__TERM_TYPE_TIME:
CHECK_TYPE_VAL(NUM);
if (term->val.num > 1) {
- parse_events_error__handle(err, term->err_val,
+ parse_events_error__handle(parse_state->error, term->err_val,
strdup("expected 0 or 1"),
NULL);
return -EINVAL;
@@ -960,7 +894,7 @@ do { \
case PARSE_EVENTS__TERM_TYPE_PERCORE:
CHECK_TYPE_VAL(NUM);
if ((unsigned int)term->val.num > 1) {
- parse_events_error__handle(err, term->err_val,
+ parse_events_error__handle(parse_state->error, term->err_val,
strdup("expected 0 or 1"),
NULL);
return -EINVAL;
@@ -975,18 +909,59 @@ do { \
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
CHECK_TYPE_VAL(NUM);
if (term->val.num > UINT_MAX) {
- parse_events_error__handle(err, term->err_val,
+ parse_events_error__handle(parse_state->error, term->err_val,
strdup("too big"),
NULL);
return -EINVAL;
}
break;
+ case PARSE_EVENTS__TERM_TYPE_CPU: {
+ struct perf_cpu_map *map;
+
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+ if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
+ parse_events_error__handle(parse_state->error, term->err_val,
+ strdup("too big"),
+ /*help=*/NULL);
+ return -EINVAL;
+ }
+ break;
+ }
+ assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
+ if (perf_pmus__find(term->val.str) != NULL)
+ break;
+
+ map = perf_cpu_map__new(term->val.str);
+ if (!map && !parse_state->fake_pmu) {
+ parse_events_error__handle(parse_state->error, term->err_val,
+ strdup("not a valid PMU or CPU number"),
+ /*help=*/NULL);
+ return -EINVAL;
+ }
+ perf_cpu_map__put(map);
+ break;
+ }
+ case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
+ CHECK_TYPE_VAL(STR);
+ if (strtod(term->val.str, NULL) <= 0) {
+ parse_events_error__handle(parse_state->error, term->err_val,
+ strdup("zero or negative"),
+ NULL);
+ return -EINVAL;
+ }
+ if (errno == ERANGE) {
+ parse_events_error__handle(parse_state->error, term->err_val,
+ strdup("too big"),
+ NULL);
+ return -EINVAL;
+ }
+ break;
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_USER:
- case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
- case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
default:
- parse_events_error__handle(err, term->err_term,
+ parse_events_error__handle(parse_state->error, term->err_term,
strdup(parse_events__term_type_str(term->type_term)),
parse_events_formats_error_string(NULL));
return -EINVAL;
@@ -1001,67 +976,72 @@ do { \
* if an invalid config term is provided for legacy events
* (for example, instructions/badterm/...), which is confusing.
*/
- if (!config_term_avail(term->type_term, err))
+ if (!config_term_avail(term->type_term, parse_state->error))
return -EINVAL;
return 0;
#undef CHECK_TYPE_VAL
}
-static int config_term_pmu(struct perf_event_attr *attr,
- struct parse_events_term *term,
- struct parse_events_error *err)
+static bool check_pmu_is_core(__u32 type, const struct parse_events_term *term,
+ struct parse_events_error *err)
{
- if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
- struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+ struct perf_pmu *pmu = NULL;
- if (!pmu) {
- char *err_str;
+ /* Avoid loading all PMUs with perf_pmus__find_by_type, just scan the core ones. */
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ if (pmu->type == type)
+ return true;
+ }
+ parse_events_error__handle(err, term->err_val,
+ strdup("needs a core PMU"),
+ NULL);
+ return false;
+}
- if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
- parse_events_error__handle(err, term->err_term,
- err_str, /*help=*/NULL);
+static int config_term_pmu(struct perf_event_attr *attr,
+ struct parse_events_term *term,
+ struct parse_events_state *parse_state)
+{
+ if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG) {
+ if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM))
+ return -EINVAL;
+ if (term->val.num >= PERF_COUNT_HW_MAX) {
+ parse_events_error__handle(parse_state->error, term->err_val,
+ strdup("too big"),
+ NULL);
return -EINVAL;
}
- /*
- * Rewrite the PMU event to a legacy cache one unless the PMU
- * doesn't support legacy cache events or the event is present
- * within the PMU.
- */
- if (perf_pmu__supports_legacy_cache(pmu) &&
- !perf_pmu__have_event(pmu, term->config)) {
- attr->type = PERF_TYPE_HW_CACHE;
- return parse_events__decode_legacy_cache(term->config, pmu->type,
- &attr->config);
- } else {
- term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
- term->no_value = true;
- }
+ if (!check_pmu_is_core(attr->type, term, parse_state->error))
+ return -EINVAL;
+ attr->config = term->val.num;
+ if (perf_pmus__supports_extended_type())
+ attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT;
+ attr->type = PERF_TYPE_HARDWARE;
+ return 0;
}
- if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
- struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+ if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG) {
+ int cache_type, cache_op, cache_result;
- if (!pmu) {
- char *err_str;
-
- if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
- parse_events_error__handle(err, term->err_term,
- err_str, /*help=*/NULL);
+ if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM))
+ return -EINVAL;
+ cache_type = term->val.num & 0xFF;
+ cache_op = (term->val.num >> 8) & 0xFF;
+ cache_result = (term->val.num >> 16) & 0xFF;
+ if ((term->val.num & ~0xFFFFFF) ||
+ cache_type >= PERF_COUNT_HW_CACHE_MAX ||
+ cache_op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+ cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) {
+ parse_events_error__handle(parse_state->error, term->err_val,
+ strdup("too big"),
+ NULL);
return -EINVAL;
}
- /*
- * If the PMU has a sysfs or json event prefer it over
- * legacy. ARM requires this.
- */
- if (perf_pmu__have_event(pmu, term->config)) {
- term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
- term->no_value = true;
- term->alternate_hw_config = true;
- } else {
- attr->type = PERF_TYPE_HARDWARE;
- attr->config = term->val.num;
- if (perf_pmus__supports_extended_type())
- attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
- }
+ if (!check_pmu_is_core(attr->type, term, parse_state->error))
+ return -EINVAL;
+ attr->config = term->val.num;
+ if (perf_pmus__supports_extended_type())
+ attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT;
+ attr->type = PERF_TYPE_HW_CACHE;
return 0;
}
if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
@@ -1072,12 +1052,12 @@ static int config_term_pmu(struct perf_event_attr *attr,
*/
return 0;
}
- return config_term_common(attr, term, err);
+ return config_term_common(attr, term, parse_state);
}
static int config_term_tracepoint(struct perf_event_attr *attr,
struct parse_events_term *term,
- struct parse_events_error *err)
+ struct parse_events_state *parse_state)
{
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
@@ -1091,12 +1071,15 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
- return config_term_common(attr, term, err);
+ return config_term_common(attr, term, parse_state);
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG4:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
@@ -1106,15 +1089,13 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_PERCORE:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
- case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
- case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
+ case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
default:
- if (err) {
- parse_events_error__handle(err, term->err_term,
+ parse_events_error__handle(parse_state->error, term->err_term,
strdup(parse_events__term_type_str(term->type_term)),
strdup("valid terms: call-graph,stack-size\n")
);
- }
return -EINVAL;
}
@@ -1123,13 +1104,13 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
static int config_attr(struct perf_event_attr *attr,
const struct parse_events_terms *head,
- struct parse_events_error *err,
+ struct parse_events_state *parse_state,
config_term_func_t config_term)
{
struct parse_events_term *term;
list_for_each_entry(term, &head->terms, list)
- if (config_term(attr, term, err))
+ if (config_term(attr, term, parse_state))
return -EINVAL;
return 0;
@@ -1232,16 +1213,21 @@ do { \
ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num, term->weak);
break;
+ case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
+ ADD_CONFIG_TERM_STR(RATIO_TO_PREV, term->val.str, term->weak);
+ break;
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG4:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
- case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
- case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
default:
break;
}
@@ -1274,6 +1260,9 @@ static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_CONFIG3:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG4:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
@@ -1294,8 +1283,8 @@ static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
- case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
- case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
+ case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
default:
break;
}
@@ -1319,27 +1308,24 @@ int parse_events_add_tracepoint(struct parse_events_state *parse_state,
if (head_config) {
struct perf_event_attr attr;
- if (config_attr(&attr, head_config, err,
- config_term_tracepoint))
+ if (config_attr(&attr, head_config, parse_state, config_term_tracepoint))
return -EINVAL;
}
- if (strpbrk(sys, "*?"))
- return add_tracepoint_multi_sys(parse_state, list, sys, event,
- err, head_config, loc);
- else
- return add_tracepoint_event(parse_state, list, sys, event,
- err, head_config, loc);
+ return add_tracepoint_multi_sys(parse_state, list, sys, event,
+ err, head_config, loc);
}
static int __parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_pmu *pmu, u32 type, u32 extended_type,
- u64 config, const struct parse_events_terms *head_config)
+ u64 config, const struct parse_events_terms *head_config,
+ struct evsel *first_wildcard_match)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
const char *name, *metric_id;
+ struct perf_cpu_map *cpus;
int ret;
memset(&attr, 0, sizeof(attr));
@@ -1351,8 +1337,7 @@ static int __parse_events_add_numeric(struct parse_events_state *parse_state,
}
if (head_config) {
- if (config_attr(&attr, head_config, parse_state->error,
- config_term_common))
+ if (config_attr(&attr, head_config, parse_state, config_term_common))
return -EINVAL;
if (get_config_terms(head_config, &config_terms))
@@ -1361,10 +1346,11 @@ static int __parse_events_add_numeric(struct parse_events_state *parse_state,
name = get_config_name(head_config);
metric_id = get_config_metric_id(head_config);
+ cpus = get_config_cpu(head_config, parse_state->fake_pmu);
ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
- metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
- /*cpu_list=*/NULL, /*alternate_hw_config=*/PERF_COUNT_HW_MAX
- ) == NULL ? -ENOMEM : 0;
+ metric_id, pmu, &config_terms, first_wildcard_match,
+ cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
+ perf_cpu_map__put(cpus);
free_config_terms(&config_terms);
return ret;
}
@@ -1380,6 +1366,7 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
/* Wildcards on numeric values are only supported by core PMUs. */
if (wildcard && perf_pmus__supports_extended_type()) {
+ struct evsel *first_wildcard_match = NULL;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
int ret;
@@ -1389,15 +1376,20 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
ret = __parse_events_add_numeric(parse_state, list, pmu,
type, pmu->type,
- config, head_config);
+ config, head_config,
+ first_wildcard_match);
if (ret)
return ret;
+ if (first_wildcard_match == NULL)
+ first_wildcard_match =
+ container_of(list->prev, struct evsel, core.node);
}
if (found_supported)
return 0;
}
return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
- type, /*extended_type=*/0, config, head_config);
+ type, /*extended_type=*/0, config, head_config,
+ /*first_wildcard_match=*/NULL);
}
static bool config_term_percore(struct list_head *config_terms)
@@ -1415,8 +1407,9 @@ static bool config_term_percore(struct list_head *config_terms)
static int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, struct perf_pmu *pmu,
const struct parse_events_terms *const_parsed_terms,
- bool auto_merge_stats, u64 alternate_hw_config)
+ struct evsel *first_wildcard_match)
{
+ u64 alternate_hw_config = PERF_COUNT_HW_MAX;
struct perf_event_attr attr;
struct perf_pmu_info info;
struct evsel *evsel;
@@ -1424,6 +1417,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
LIST_HEAD(config_terms);
struct parse_events_terms parsed_terms;
bool alias_rewrote_terms = false;
+ struct perf_cpu_map *term_cpu = NULL;
if (verbose > 1) {
struct strbuf sb;
@@ -1451,7 +1445,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel = __add_event(list, &parse_state->idx, &attr,
/*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, pmu,
- /*config_terms=*/NULL, auto_merge_stats,
+ /*config_terms=*/NULL, first_wildcard_match,
/*cpu_list=*/NULL, alternate_hw_config);
return evsel ? 0 : -ENOMEM;
}
@@ -1466,7 +1460,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
fix_raw(&parsed_terms, pmu);
/* Configure attr/terms with a known PMU, this will set hardcoded terms. */
- if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
+ if (config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
parse_events_terms__exit(&parsed_terms);
return -EINVAL;
}
@@ -1490,7 +1484,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
/* Configure attr/terms again if an alias was expanded. */
if (alias_rewrote_terms &&
- config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
+ config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
parse_events_terms__exit(&parsed_terms);
return -EINVAL;
}
@@ -1518,11 +1512,12 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
return -EINVAL;
}
+ term_cpu = get_config_cpu(&parsed_terms, parse_state->fake_pmu);
evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
get_config_name(&parsed_terms),
get_config_metric_id(&parsed_terms), pmu,
- &config_terms, auto_merge_stats, /*cpu_list=*/NULL,
- alternate_hw_config);
+ &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
+ perf_cpu_map__put(term_cpu);
if (!evsel) {
parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
@@ -1539,11 +1534,15 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel->scale = info.scale;
evsel->per_pkg = info.per_pkg;
evsel->snapshot = info.snapshot;
+ evsel->retirement_latency.mean = info.retirement_latency_mean;
+ evsel->retirement_latency.min = info.retirement_latency_min;
+ evsel->retirement_latency.max = info.retirement_latency_max;
+
return 0;
}
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
- const char *event_name, u64 hw_config,
+ const char *event_name,
const struct parse_events_terms *const_parsed_terms,
struct list_head **listp, void *loc_)
{
@@ -1554,6 +1553,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
int ok = 0;
const char *config;
struct parse_events_terms parsed_terms;
+ struct evsel *first_wildcard_match = NULL;
*listp = NULL;
@@ -1585,8 +1585,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
INIT_LIST_HEAD(list);
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- bool auto_merge_stats;
+ while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) {
if (parse_events__filter_pmu(parse_state, pmu))
continue;
@@ -1594,9 +1593,8 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
if (!perf_pmu__have_event(pmu, event_name))
continue;
- auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
if (!parse_events_add_pmu(parse_state, list, pmu,
- &parsed_terms, auto_merge_stats, hw_config)) {
+ &parsed_terms, first_wildcard_match)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
@@ -1605,11 +1603,13 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
strbuf_release(&sb);
ok++;
}
+ if (first_wildcard_match == NULL)
+ first_wildcard_match = container_of(list->prev, struct evsel, core.node);
}
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
- /*auto_merge_stats=*/true, hw_config)) {
+ first_wildcard_match)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
@@ -1640,6 +1640,7 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
struct perf_pmu *pmu;
int ok = 0;
char *help;
+ struct evsel *first_wildcard_match = NULL;
*listp = malloc(sizeof(**listp));
if (!*listp)
@@ -1650,32 +1651,32 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
/* Attempt to add to list assuming event_or_pmu is a PMU name. */
pmu = perf_pmus__find(event_or_pmu);
if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
- /*auto_merge_stats=*/false,
- /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
+ first_wildcard_match))
return 0;
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
const_parsed_terms,
- /*auto_merge_stats=*/false,
- /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
+ first_wildcard_match))
return 0;
}
pmu = NULL;
/* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- if (!parse_events__filter_pmu(parse_state, pmu) &&
- perf_pmu__wildcard_match(pmu, event_or_pmu)) {
- bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
-
- if (!parse_events_add_pmu(parse_state, *listp, pmu,
- const_parsed_terms,
- auto_merge_stats,
- /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
- ok++;
- parse_state->wild_card_pmus = true;
- }
+ while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) {
+
+ if (parse_events__filter_pmu(parse_state, pmu))
+ continue;
+
+ if (!parse_events_add_pmu(parse_state, *listp, pmu,
+ const_parsed_terms,
+ first_wildcard_match)) {
+ ok++;
+ parse_state->wild_card_pmus = true;
+ }
+ if (first_wildcard_match == NULL) {
+ first_wildcard_match =
+ container_of((*listp)->prev, struct evsel, core.node);
}
}
if (ok)
@@ -1683,7 +1684,7 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
/* Failure to add, assume event_or_pmu is an event name. */
zfree(listp);
- if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX,
+ if (!parse_events_multi_pmu_add(parse_state, event_or_pmu,
const_parsed_terms, listp, loc))
return 0;
@@ -1733,13 +1734,11 @@ static int parse_events__modifier_list(struct parse_events_state *parse_state,
int eH = group ? evsel->core.attr.exclude_host : 0;
int eG = group ? evsel->core.attr.exclude_guest : 0;
int exclude = eu | ek | eh;
- int exclude_GH = group ? evsel->exclude_GH : 0;
+ int exclude_GH = eG | eH;
if (mod.user) {
if (!exclude)
exclude = eu = ek = eh = 1;
- if (!exclude_GH && !perf_guest && exclude_GH_default)
- eG = 1;
eu = 0;
}
if (mod.kernel) {
@@ -1762,6 +1761,13 @@ static int parse_events__modifier_list(struct parse_events_state *parse_state,
exclude_GH = eG = eH = 1;
eH = 0;
}
+ if (!exclude_GH && exclude_GH_default) {
+ if (perf_host)
+ eG = 1;
+ else if (perf_guest)
+ eH = 1;
+ }
+
evsel->core.attr.exclude_user = eu;
evsel->core.attr.exclude_kernel = ek;
evsel->core.attr.exclude_hv = eh;
@@ -1812,6 +1818,8 @@ static int parse_events__modifier_list(struct parse_events_state *parse_state,
evsel->bpf_counter = true;
if (mod.retire_lat)
evsel->retire_lat = true;
+ if (mod.dont_regroup)
+ evsel->dont_regroup = true;
}
return 0;
}
@@ -1849,7 +1857,6 @@ int parse_events__set_default_name(struct list_head *list, char *name)
}
static int parse_events__scanner(const char *str,
- FILE *input,
struct parse_events_state *parse_state)
{
YY_BUFFER_STATE buffer;
@@ -1860,10 +1867,7 @@ static int parse_events__scanner(const char *str,
if (ret)
return ret;
- if (str)
- buffer = parse_events__scan_string(str, scanner);
- else
- parse_events_set_in(input, scanner);
+ buffer = parse_events__scan_string(str, scanner);
#ifdef PARSER_DEBUG
parse_events_debug = 1;
@@ -1871,10 +1875,8 @@ static int parse_events__scanner(const char *str,
#endif
ret = parse_events_parse(parse_state, scanner);
- if (str) {
- parse_events__flush_buffer(buffer, scanner);
- parse_events__delete_buffer(buffer, scanner);
- }
+ parse_events__flush_buffer(buffer, scanner);
+ parse_events__delete_buffer(buffer, scanner);
parse_events_lex_destroy(scanner);
return ret;
}
@@ -1882,7 +1884,7 @@ static int parse_events__scanner(const char *str,
/*
* parse event config string, return a list of event terms.
*/
-int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
+int parse_events_terms(struct parse_events_terms *terms, const char *str)
{
struct parse_events_state parse_state = {
.terms = NULL,
@@ -1890,7 +1892,7 @@ int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *
};
int ret;
- ret = parse_events__scanner(str, input, &parse_state);
+ ret = parse_events__scanner(str, &parse_state);
if (!ret)
list_splice(&parse_state.terms->terms, &terms->terms);
@@ -1987,14 +1989,18 @@ static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct li
* event's index is used. An index may be forced for events that
* must be in the same group, namely Intel topdown events.
*/
- if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
+ if (lhs->dont_regroup) {
+ lhs_sort_idx = lhs_core->idx;
+ } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
lhs_sort_idx = *force_grouped_idx;
} else {
bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
}
- if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
+ if (rhs->dont_regroup) {
+ rhs_sort_idx = rhs_core->idx;
+ } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
rhs_sort_idx = *force_grouped_idx;
} else {
bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
@@ -2032,6 +2038,11 @@ static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct li
return arch_evlist__cmp(lhs, rhs);
}
+int __weak arch_evlist__add_required_events(struct list_head *list __always_unused)
+{
+ return 0;
+}
+
static int parse_events__sort_events_and_fix_groups(struct list_head *list)
{
int idx = 0, force_grouped_idx = -1;
@@ -2043,6 +2054,11 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
struct evsel *force_grouped_leader = NULL;
bool last_event_was_forced_leader = false;
+ /* On x86 topdown metrics events require a slots event. */
+ ret = arch_evlist__add_required_events(list);
+ if (ret)
+ return ret;
+
/*
* Compute index to insert ungrouped events at. Place them where the
* first ungrouped event appears.
@@ -2082,10 +2098,10 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
*/
idx = 0;
list_for_each_entry(pos, list, core.node) {
- const struct evsel *pos_leader = evsel__leader(pos);
+ struct evsel *pos_leader = evsel__leader(pos);
const char *pos_pmu_name = pos->group_pmu_name;
const char *cur_leader_pmu_name;
- bool pos_force_grouped = force_grouped_idx != -1 &&
+ bool pos_force_grouped = force_grouped_idx != -1 && !pos->dont_regroup &&
arch_evsel__must_be_in_group(pos);
/* Reset index and nr_members. */
@@ -2098,13 +2114,12 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
* Set the group leader respecting the given groupings and that
* groups can't span PMUs.
*/
- if (!cur_leader) {
- cur_leader = pos;
- cur_leaders_grp = &pos->core;
+ if (!cur_leader || pos->dont_regroup) {
+ cur_leader = pos->dont_regroup ? pos_leader : pos;
+ cur_leaders_grp = &cur_leader->core;
if (pos_force_grouped)
force_grouped_leader = pos;
}
-
cur_leader_pmu_name = cur_leader->group_pmu_name;
if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
/* PMU changed so the group/leader must change. */
@@ -2185,7 +2200,7 @@ int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filte
};
int ret, ret2;
- ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
+ ret = parse_events__scanner(str, &parse_state);
if (!ret && list_empty(&parse_state.list)) {
WARN_ONCE(true, "WARNING: event parser found nothing\n");
@@ -2196,14 +2211,23 @@ int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filte
if (ret2 < 0)
return ret;
- if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus)
- pr_warning("WARNING: events were regrouped to match PMUs\n");
-
/*
* Add list to the evlist even with errors to allow callers to clean up.
*/
evlist__splice_list_tail(evlist, &parse_state.list);
+ if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
+ pr_warning("WARNING: events were regrouped to match PMUs\n");
+
+ if (verbose > 0) {
+ struct strbuf sb = STRBUF_INIT;
+
+ evlist__uniquify_evsel_names(evlist, &stat_config);
+ evlist__format_evsels(evlist, &sb, 2048);
+ pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
+ strbuf_release(&sb);
+ }
+ }
if (!ret) {
struct evsel *last;
@@ -2228,6 +2252,8 @@ int parse_event(struct evlist *evlist, const char *str)
parse_events_error__init(&err);
ret = parse_events(evlist, str, &err);
+ if (ret && verbose > 0)
+ parse_events_error__print(&err, str);
parse_events_error__exit(&err);
return ret;
}
@@ -2456,12 +2482,17 @@ foreach_evsel_in_last_glob(struct evlist *evlist,
return 0;
}
+/* Will a tracepoint filter work for str or should a BPF filter be used? */
+static bool is_possible_tp_filter(const char *str)
+{
+ return strstr(str, "uid") == NULL;
+}
+
static int set_filter(struct evsel *evsel, const void *arg)
{
const char *str = arg;
- bool found = false;
int nr_addr_filters = 0;
- struct perf_pmu *pmu = NULL;
+ struct perf_pmu *pmu;
if (evsel == NULL) {
fprintf(stderr,
@@ -2469,7 +2500,7 @@ static int set_filter(struct evsel *evsel, const void *arg)
return -1;
}
- if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
+ if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) {
if (evsel__append_tp_filter(evsel, str) < 0) {
fprintf(stderr,
"not enough memory to hold filter string\n");
@@ -2479,16 +2510,11 @@ static int set_filter(struct evsel *evsel, const void *arg)
return 0;
}
- while ((pmu = perf_pmus__scan(pmu)) != NULL)
- if (pmu->type == evsel->core.attr.type) {
- found = true;
- break;
- }
-
- if (found)
+ pmu = evsel__find_pmu(evsel);
+ if (pmu) {
perf_pmu__scan_file(pmu, "nr_addr_filters",
"%d", &nr_addr_filters);
-
+ }
if (!nr_addr_filters)
return perf_bpf_filter__parse(&evsel->bpf_filters, str);
@@ -2510,6 +2536,30 @@ int parse_filter(const struct option *opt, const char *str,
(const void *)str);
}
+int parse_uid_filter(struct evlist *evlist, uid_t uid)
+{
+ struct option opt = {
+ .value = &evlist,
+ };
+ char buf[128];
+ int ret;
+
+ snprintf(buf, sizeof(buf), "uid == %d", uid);
+ ret = parse_filter(&opt, buf, /*unset=*/0);
+ if (ret) {
+ if (use_browser >= 1) {
+ /*
+ * Use ui__warning so a pop up appears above the
+ * underlying BPF error message.
+ */
+ ui__warning("Failed to add UID filtering that uses BPF filtering.\n");
+ } else {
+ fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n");
+ }
+ }
+ return ret;
+}
+
static int add_exclude_perf_filter(struct evsel *evsel,
const void *arg __maybe_unused)
{
@@ -2700,7 +2750,7 @@ void parse_events_terms__delete(struct parse_events_terms *terms)
free(terms);
}
-int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
+static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
{
struct parse_events_term *term;
bool first = true;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index e176a34ab088..3577ab213730 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -9,8 +9,8 @@
#include <stdbool.h>
#include <linux/types.h>
#include <linux/perf_event.h>
-#include <stdio.h>
#include <string.h>
+#include <sys/types.h>
struct evsel;
struct evlist;
@@ -20,7 +20,7 @@ struct option;
struct perf_pmu;
struct strbuf;
-const char *event_type(int type);
+const char *event_type(size_t type);
/* Arguments encoded in opt->value. */
struct parse_events_option_args {
@@ -45,6 +45,7 @@ static inline int parse_events(struct evlist *evlist, const char *str,
int parse_event(struct evlist *evlist, const char *str);
int parse_filter(const struct option *opt, const char *str, int unset);
+int parse_uid_filter(struct evlist *evlist, uid_t uid);
int exclude_perf(const struct option *opt, const char *arg, int unset);
enum parse_events__term_val_type {
@@ -58,6 +59,7 @@ enum parse_events__term_type {
PARSE_EVENTS__TERM_TYPE_CONFIG1,
PARSE_EVENTS__TERM_TYPE_CONFIG2,
PARSE_EVENTS__TERM_TYPE_CONFIG3,
+ PARSE_EVENTS__TERM_TYPE_CONFIG4,
PARSE_EVENTS__TERM_TYPE_NAME,
PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ,
@@ -78,9 +80,11 @@ enum parse_events__term_type {
PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE,
PARSE_EVENTS__TERM_TYPE_METRIC_ID,
PARSE_EVENTS__TERM_TYPE_RAW,
- PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE,
- PARSE_EVENTS__TERM_TYPE_HARDWARE,
-#define __PARSE_EVENTS__TERM_TYPE_NR (PARSE_EVENTS__TERM_TYPE_HARDWARE + 1)
+ PARSE_EVENTS__TERM_TYPE_CPU,
+ PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV,
+ PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG,
+ PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG,
+#define __PARSE_EVENTS__TERM_TYPE_NR (PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG + 1)
};
struct parse_events_term {
@@ -128,12 +132,6 @@ struct parse_events_term {
* value is assumed to be 1. An event name also has no value.
*/
bool no_value;
- /**
- * @alternate_hw_config: config is the event name but num is an
- * alternate PERF_TYPE_HARDWARE config value which is often nice for the
- * sake of quick matching.
- */
- bool alternate_hw_config;
};
struct parse_events_error {
@@ -195,8 +193,7 @@ void parse_events_term__delete(struct parse_events_term *term);
void parse_events_terms__delete(struct parse_events_terms *terms);
void parse_events_terms__init(struct parse_events_terms *terms);
void parse_events_terms__exit(struct parse_events_terms *terms);
-int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input);
-int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb);
+int parse_events_terms(struct parse_events_terms *terms, const char *str);
struct parse_events_modifier {
u8 precise; /* Number of repeated 'p' for precision. */
@@ -213,6 +210,7 @@ struct parse_events_modifier {
bool guest : 1; /* 'G' */
bool host : 1; /* 'H' */
bool retire_lat : 1; /* 'R' */
+ bool dont_regroup : 1; /* 'X' */
};
int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
@@ -230,9 +228,6 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
u32 type, u64 config,
const struct parse_events_terms *head_config,
bool wildcard);
-int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
- struct parse_events_state *parse_state,
- struct parse_events_terms *parsed_terms);
int parse_events__decode_legacy_cache(const char *name, int pmu_type, __u64 *config);
int parse_events_add_breakpoint(struct parse_events_state *parse_state,
struct list_head *list,
@@ -244,7 +239,7 @@ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
struct perf_pmu *pmu);
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
- const char *event_name, u64 hw_config,
+ const char *event_name,
const struct parse_events_terms *const_parsed_terms,
struct list_head **listp, void *loc);
@@ -260,8 +255,6 @@ struct event_symbol {
const char *symbol;
const char *alias;
};
-extern const struct event_symbol event_symbols_hw[];
-extern const struct event_symbol event_symbols_sw[];
char *parse_events_formats_error_string(char *additional_terms);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 7ed86e3e34e3..251ce4321878 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -5,16 +5,14 @@
%option stack
%option bison-locations
%option yylineno
-%option reject
+%option noyywrap
%{
#include <errno.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
#include "parse-events.h"
#include "parse-events-bison.h"
-#include "evsel.h"
char *parse_events_get_text(yyscan_t yyscanner);
YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
@@ -75,11 +73,6 @@ static int quoted_str(yyscan_t scanner, int token)
return token;
}
-static int lc_str(yyscan_t scanner, const struct parse_events_state *state)
-{
- return str(scanner, state->match_legacy_cache_terms ? PE_LEGACY_CACHE : PE_NAME);
-}
-
/*
* This function is called when the parser gets two kind of input:
*
@@ -117,14 +110,6 @@ do { \
yyless(0); \
} while (0)
-static int sym(yyscan_t scanner, int type, int config)
-{
- YYSTYPE *yylval = parse_events_get_lval(scanner);
-
- yylval->num = (type << 16) + config;
- return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW;
-}
-
static int term(yyscan_t scanner, enum parse_events__term_type type)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
@@ -133,16 +118,6 @@ static int term(yyscan_t scanner, enum parse_events__term_type type)
return PE_TERM;
}
-static int hw_term(yyscan_t scanner, int config)
-{
- YYSTYPE *yylval = parse_events_get_lval(scanner);
- char *text = parse_events_get_text(scanner);
-
- yylval->hardware_term.str = strdup(text);
- yylval->hardware_term.num = PERF_TYPE_HARDWARE + config;
- return PE_TERM_HW;
-}
-
static void modifiers_error(struct parse_events_state *parse_state, yyscan_t scanner,
int pos, char mod_char, const char *mod_name)
{
@@ -206,6 +181,7 @@ static int modifiers(struct parse_events_state *parse_state, yyscan_t scanner)
CASE('e', exclusive);
CASE('b', bpf);
CASE('R', retire_lat);
+ CASE('X', dont_regroup);
default:
return PE_ERROR;
}
@@ -222,10 +198,6 @@ do { \
yycolumn += yyleng; \
} while (0);
-#define USER_REJECT \
- yycolumn -= yyleng; \
- REJECT
-
%}
%x mem
@@ -251,13 +223,11 @@ term_name {name_start}[a-zA-Z0-9_*?.\[\]!\-:]*
quoted_name [\']{name_start}[a-zA-Z0-9_*?.\[\]!\-:,\.=]*[\']
drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
/*
- * If you add a modifier you need to update check_modifier().
+ * If you add a modifier you need to update modifiers().
* Also, the letters in modifier_event must not be in modifier_bp.
*/
-modifier_event [ukhpPGHSDIWebR]{1,16}
+modifier_event [ukhpPGHSDIWebRX]{1,17}
modifier_bp [rwx]{1,3}
-lc_type (L1-dcache|l1-d|l1d|L1-data|L1-icache|l1-i|l1i|L1-instruction|LLC|L2|dTLB|d-tlb|Data-TLB|iTLB|i-tlb|Instruction-TLB|branch|branches|bpu|btb|bpc|node)
-lc_op_result (load|loads|read|store|stores|write|prefetch|prefetches|speculative-read|speculative-load|refs|Reference|ops|access|misses|miss)
digit [0-9]
non_digit [^0-9]
@@ -317,6 +287,7 @@ config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
config3 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG3); }
+config4 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG4); }
name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
freq { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ); }
@@ -335,23 +306,14 @@ aux-output { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT); }
aux-action { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_ACTION); }
aux-sample-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE); }
metric-id { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_METRIC_ID); }
-cpu-cycles|cycles { return hw_term(yyscanner, PERF_COUNT_HW_CPU_CYCLES); }
-stalled-cycles-frontend|idle-cycles-frontend { return hw_term(yyscanner, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
-stalled-cycles-backend|idle-cycles-backend { return hw_term(yyscanner, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
-instructions { return hw_term(yyscanner, PERF_COUNT_HW_INSTRUCTIONS); }
-cache-references { return hw_term(yyscanner, PERF_COUNT_HW_CACHE_REFERENCES); }
-cache-misses { return hw_term(yyscanner, PERF_COUNT_HW_CACHE_MISSES); }
-branch-instructions|branches { return hw_term(yyscanner, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
-branch-misses { return hw_term(yyscanner, PERF_COUNT_HW_BRANCH_MISSES); }
-bus-cycles { return hw_term(yyscanner, PERF_COUNT_HW_BUS_CYCLES); }
-ref-cycles { return hw_term(yyscanner, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CPU); }
+ratio-to-prev { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV); }
+legacy-hardware-config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG); }
+legacy-cache-config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG); }
r{num_raw_hex} { return str(yyscanner, PE_RAW); }
r0x{num_raw_hex} { return str(yyscanner, PE_RAW); }
, { return ','; }
"/" { BEGIN(INITIAL); return '/'; }
-{lc_type} { return lc_str(yyscanner, _parse_state); }
-{lc_type}-{lc_op_result} { return lc_str(yyscanner, _parse_state); }
-{lc_type}-{lc_op_result}-{lc_op_result} { return lc_str(yyscanner, _parse_state); }
{num_dec} { return value(_parse_state, yyscanner, 10); }
{num_hex} { return value(_parse_state, yyscanner, 16); }
{term_name} { return str(yyscanner, PE_NAME); }
@@ -390,32 +352,6 @@ r0x{num_raw_hex} { return str(yyscanner, PE_RAW); }
<<EOF>> { BEGIN(INITIAL); }
}
-cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
-stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
-stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
-instructions { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
-cache-references { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
-cache-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
-branch-instructions|branches { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
-branch-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
-bus-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
-ref-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
-cpu-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
-task-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
-page-faults|faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
-minor-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
-major-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
-context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
-cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
-alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
-emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
-dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
-bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
-cgroup-switches { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CGROUP_SWITCHES); }
-
-{lc_type} { return str(yyscanner, PE_LEGACY_CACHE); }
-{lc_type}-{lc_op_result} { return str(yyscanner, PE_LEGACY_CACHE); }
-{lc_type}-{lc_op_result}-{lc_op_result} { return str(yyscanner, PE_LEGACY_CACHE); }
mem: { BEGIN(mem); return PE_PREFIX_MEM; }
r{num_raw_hex} { return str(yyscanner, PE_RAW); }
{num_dec} { return value(_parse_state, yyscanner, 10); }
@@ -434,8 +370,3 @@ r{num_raw_hex} { return str(yyscanner, PE_RAW); }
. { }
%%
-
-int parse_events_wrap(void *scanner __maybe_unused)
-{
- return 1;
-}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index f888cbb076d6..c194de5ec1ec 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -55,24 +55,18 @@ static void free_list_evsel(struct list_head* list_evsel)
%}
%token PE_START_EVENTS PE_START_TERMS
-%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_TERM
+%token PE_VALUE PE_TERM
%token PE_EVENT_NAME
%token PE_RAW PE_NAME
%token PE_MODIFIER_EVENT PE_MODIFIER_BP PE_BP_COLON PE_BP_SLASH
-%token PE_LEGACY_CACHE
%token PE_PREFIX_MEM
%token PE_ERROR
%token PE_DRV_CFG_TERM
-%token PE_TERM_HW
%type <num> PE_VALUE
-%type <num> PE_VALUE_SYM_HW
-%type <num> PE_VALUE_SYM_SW
%type <mod> PE_MODIFIER_EVENT
%type <term_type> PE_TERM
-%type <num> value_sym
%type <str> PE_RAW
%type <str> PE_NAME
-%type <str> PE_LEGACY_CACHE
%type <str> PE_MODIFIER_BP
%type <str> PE_EVENT_NAME
%type <str> PE_DRV_CFG_TERM
@@ -85,8 +79,6 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <list_terms> opt_pmu_config
%destructor { parse_events_terms__delete ($$); } <list_terms>
%type <list_evsel> event_pmu
-%type <list_evsel> event_legacy_symbol
-%type <list_evsel> event_legacy_cache
%type <list_evsel> event_legacy_mem
%type <list_evsel> event_legacy_tracepoint
%type <list_evsel> event_legacy_numeric
@@ -102,8 +94,6 @@ static void free_list_evsel(struct list_head* list_evsel)
%destructor { free_list_evsel ($$); } <list_evsel>
%type <tracepoint_name> tracepoint_name
%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
-%type <hardware_term> PE_TERM_HW
-%destructor { free ($$.str); } <hardware_term>
%union
{
@@ -118,10 +108,6 @@ static void free_list_evsel(struct list_head* list_evsel)
char *sys;
char *event;
} tracepoint_name;
- struct hardware_term {
- char *str;
- u64 num;
- } hardware_term;
}
%%
@@ -264,8 +250,6 @@ PE_EVENT_NAME event_def
event_def
event_def: event_pmu |
- event_legacy_symbol |
- event_legacy_cache sep_dc |
event_legacy_mem sep_dc |
event_legacy_tracepoint sep_dc |
event_legacy_numeric sep_dc |
@@ -290,7 +274,7 @@ PE_NAME sep_dc
struct list_head *list;
int err;
- err = parse_events_multi_pmu_add(_parse_state, $1, PERF_COUNT_HW_MAX, NULL, &list, &@1);
+ err = parse_events_multi_pmu_add(_parse_state, $1, /*const_parsed_terms*/NULL, &list, &@1);
if (err < 0) {
struct parse_events_state *parse_state = _parse_state;
struct parse_events_error *error = parse_state->error;
@@ -306,71 +290,6 @@ PE_NAME sep_dc
$$ = list;
}
-value_sym:
-PE_VALUE_SYM_HW
-|
-PE_VALUE_SYM_SW
-
-event_legacy_symbol:
-value_sym '/' event_config '/'
-{
- struct list_head *list;
- int type = $1 >> 16;
- int config = $1 & 255;
- int err;
- bool wildcard = (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE);
-
- list = alloc_list();
- if (!list)
- YYNOMEM;
- err = parse_events_add_numeric(_parse_state, list, type, config, $3, wildcard);
- parse_events_terms__delete($3);
- if (err) {
- free_list_evsel(list);
- PE_ABORT(err);
- }
- $$ = list;
-}
-|
-value_sym sep_slash_slash_dc
-{
- struct list_head *list;
- int type = $1 >> 16;
- int config = $1 & 255;
- bool wildcard = (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE);
- int err;
-
- list = alloc_list();
- if (!list)
- YYNOMEM;
- err = parse_events_add_numeric(_parse_state, list, type, config, /*head_config=*/NULL, wildcard);
- if (err)
- PE_ABORT(err);
- $$ = list;
-}
-
-event_legacy_cache:
-PE_LEGACY_CACHE opt_event_config
-{
- struct parse_events_state *parse_state = _parse_state;
- struct list_head *list;
- int err;
-
- list = alloc_list();
- if (!list)
- YYNOMEM;
-
- err = parse_events_add_cache(list, &parse_state->idx, $1, parse_state, $2);
-
- parse_events_terms__delete($2);
- free($1);
- if (err) {
- free_list_evsel(list);
- PE_ABORT(err);
- }
- $$ = list;
-}
-
event_legacy_mem:
PE_PREFIX_MEM PE_VALUE PE_BP_SLASH PE_VALUE PE_BP_COLON PE_MODIFIER_BP opt_event_config
{
@@ -589,12 +508,7 @@ event_term
$$ = head;
}
-name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE
-|
-PE_TERM_HW
-{
- $$ = $1.str;
-}
+name_or_raw: PE_RAW | PE_NAME
event_term:
PE_RAW
@@ -636,19 +550,6 @@ name_or_raw '=' PE_VALUE
$$ = term;
}
|
-PE_LEGACY_CACHE
-{
- struct parse_events_term *term;
- int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE,
- $1, /*num=*/1, /*novalue=*/true, &@1, /*loc_val=*/NULL);
-
- if (err) {
- free($1);
- PE_ABORT(err);
- }
- $$ = term;
-}
-|
PE_NAME
{
struct parse_events_term *term;
@@ -662,20 +563,6 @@ PE_NAME
$$ = term;
}
|
-PE_TERM_HW
-{
- struct parse_events_term *term;
- int err = parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_HARDWARE,
- $1.str, $1.num & 255, /*novalue=*/false,
- &@1, /*loc_val=*/NULL);
-
- if (err) {
- free($1.str);
- PE_ABORT(err);
- }
- $$ = term;
-}
-|
PE_TERM '=' name_or_raw
{
struct parse_events_term *term;
@@ -744,8 +631,6 @@ PE_DRV_CFG_TERM
sep_dc: ':' |
-sep_slash_slash_dc: '/' '/' | ':' |
-
%%
void parse_events_error(YYLTYPE *loc, void *_parse_state,
diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c
index 1de3b69cdf4a..6ecf38314f01 100644
--- a/tools/perf/util/perf_api_probe.c
+++ b/tools/perf/util/perf_api_probe.c
@@ -59,10 +59,10 @@ out_delete:
static bool perf_probe_api(setup_probe_fn_t fn)
{
- const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
+ struct perf_pmu *pmu;
struct perf_cpu_map *cpus;
struct perf_cpu cpu;
- int ret, i = 0;
+ int ret = 0;
cpus = perf_cpu_map__new_online_cpus();
if (!cpus)
@@ -70,12 +70,23 @@ static bool perf_probe_api(setup_probe_fn_t fn)
cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus);
- do {
- ret = perf_do_probe_api(fn, cpu, try[i++]);
- if (!ret)
- return true;
- } while (ret == -EAGAIN && try[i]);
-
+ ret = perf_do_probe_api(fn, cpu, "software/cpu-clock/u");
+ if (!ret)
+ return true;
+
+ pmu = perf_pmus__scan_core(/*pmu=*/NULL);
+ if (pmu) {
+ const char *try[] = {"cycles", "instructions", NULL};
+ char buf[256];
+ int i = 0;
+
+ while (ret == -EAGAIN && try[i]) {
+ snprintf(buf, sizeof(buf), "%s/%s/u", pmu->name, try[i++]);
+ ret = perf_do_probe_api(fn, cpu, buf);
+ if (!ret)
+ return true;
+ }
+ }
return false;
}
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 66b666d9ce64..741c3d657a8b 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -343,6 +343,8 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(inherit_thread, p_unsigned);
PRINT_ATTRf(remove_on_exec, p_unsigned);
PRINT_ATTRf(sigtrap, p_unsigned);
+ PRINT_ATTRf(defer_callchain, p_unsigned);
+ PRINT_ATTRf(defer_output, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned, false);
PRINT_ATTRf(bp_type, p_unsigned);
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
index 0dacc133ed39..d9043f4afbe7 100644
--- a/tools/perf/util/pfm.c
+++ b/tools/perf/util/pfm.c
@@ -15,6 +15,7 @@
#include "util/strbuf.h"
#include "util/thread_map.h"
+#include <errno.h>
#include <string.h>
#include <linux/kernel.h>
#include <perfmon/pfmlib_perf_event.h>
@@ -47,10 +48,6 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
p_orig = p = strdup(str);
if (!p)
return -1;
- /*
- * force loading of the PMU list
- */
- perf_pmus__scan(NULL);
for (q = p; strsep(&p, ",{}"); q = p) {
sep = p ? str + (p - p_orig - 1) : "";
@@ -234,6 +231,7 @@ print_libpfm_event(const struct print_callbacks *print_cb, void *print_state,
if (is_libpfm_event_supported(name, cpus, threads)) {
print_cb->print_event(print_state, topic, pinfo->name,
+ /*pmu_type=*/PERF_TYPE_RAW,
name, info->equiv,
/*scale_unit=*/NULL,
/*deprecated=*/NULL, "PFM event",
@@ -269,6 +267,7 @@ print_libpfm_event(const struct print_callbacks *print_cb, void *print_state,
print_cb->print_event(print_state,
topic,
pinfo->name,
+ /*pmu_type=*/PERF_TYPE_RAW,
name, /*alias=*/NULL,
/*scale_unit=*/NULL,
/*deprecated=*/NULL, "PFM event",
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index b7ebac5ab1d1..956ea273c2c7 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -20,13 +20,16 @@
#include "debug.h"
#include "evsel.h"
#include "pmu.h"
+#include "drm_pmu.h"
#include "hwmon_pmu.h"
#include "pmus.h"
#include "tool_pmu.h"
+#include "tp_pmu.h"
#include <util/pmu-bison.h>
#include <util/pmu-flex.h>
#include "parse-events.h"
#include "print-events.h"
+#include "hashmap.h"
#include "header.h"
#include "string2.h"
#include "strbuf.h"
@@ -64,10 +67,13 @@ struct perf_pmu_alias {
* json events.
*/
char *topic;
- /** @terms: Owned list of the original parsed parameters. */
- struct parse_events_terms terms;
- /** @list: List element of struct perf_pmu aliases. */
- struct list_head list;
+ /** @terms: Owned copy of the event terms. */
+ char *terms;
+ /**
+ * @legacy_terms: If the event aliases a legacy event, holds a copy
+ * ofthe legacy event string.
+ */
+ char *legacy_terms;
/**
* @pmu_name: The name copied from the json struct pmu_event. This can
* differ from the PMU name as it won't have suffixes.
@@ -77,6 +83,12 @@ struct perf_pmu_alias {
char unit[UNIT_MAX_LEN+1];
/** @scale: Value to scale read counter values by. */
double scale;
+ /** @retirement_latency_mean: Value to be given for unsampled retirement latency mean. */
+ double retirement_latency_mean;
+ /** @retirement_latency_min: Value to be given for unsampled retirement latency min. */
+ double retirement_latency_min;
+ /** @retirement_latency_max: Value to be given for unsampled retirement latency max. */
+ double retirement_latency_max;
/**
* @per_pkg: Does the file
* <sysfs>/bus/event_source/devices/<pmu_name>/events/<name>.per-pkg or
@@ -94,6 +106,12 @@ struct perf_pmu_alias {
* default.
*/
bool deprecated;
+ /**
+ * @legacy_deprecated_checked: Legacy events may not be supported by the
+ * PMU need to be checked. If they aren't supported they are marked
+ * deprecated.
+ */
+ bool legacy_deprecated_checked;
/** @from_sysfs: Was the alias from sysfs or a json event? */
bool from_sysfs;
/** @info_loaded: Have the scale, unit and other values been read from disk? */
@@ -257,7 +275,7 @@ static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name, bool ea
return 0;
}
-int perf_pmu__convert_scale(const char *scale, char **end, double *sval)
+static int parse_double(const char *scale, char **end, double *sval)
{
char *lc;
int ret = 0;
@@ -294,6 +312,11 @@ out:
return ret;
}
+int perf_pmu__convert_scale(const char *scale, char **end, double *sval)
+{
+ return parse_double(scale, end, sval);
+}
+
static int perf_pmu__parse_scale(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
struct stat st;
@@ -407,25 +430,34 @@ static void perf_pmu__parse_snapshot(struct perf_pmu *pmu, struct perf_pmu_alias
}
/* Delete an alias entry. */
-static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+static void perf_pmu_free_alias(struct perf_pmu_alias *alias)
{
- zfree(&newalias->name);
- zfree(&newalias->desc);
- zfree(&newalias->long_desc);
- zfree(&newalias->topic);
- zfree(&newalias->pmu_name);
- parse_events_terms__exit(&newalias->terms);
- free(newalias);
+ if (!alias)
+ return;
+
+ zfree(&alias->name);
+ zfree(&alias->desc);
+ zfree(&alias->long_desc);
+ zfree(&alias->topic);
+ zfree(&alias->pmu_name);
+ zfree(&alias->terms);
+ zfree(&alias->legacy_terms);
+ free(alias);
}
static void perf_pmu__del_aliases(struct perf_pmu *pmu)
{
- struct perf_pmu_alias *alias, *tmp;
+ struct hashmap_entry *entry;
+ size_t bkt;
- list_for_each_entry_safe(alias, tmp, &pmu->aliases, list) {
- list_del(&alias->list);
- perf_pmu_free_alias(alias);
- }
+ if (!pmu->aliases)
+ return;
+
+ hashmap__for_each_entry(pmu->aliases, entry, bkt)
+ perf_pmu_free_alias(entry->pvalue);
+
+ hashmap__free(pmu->aliases);
+ pmu->aliases = NULL;
}
static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu,
@@ -433,35 +465,37 @@ static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu,
bool load)
{
struct perf_pmu_alias *alias;
+ bool has_sysfs_event;
+ char event_file_name[NAME_MAX + 8];
- if (load && !pmu->sysfs_aliases_loaded) {
- bool has_sysfs_event;
- char event_file_name[FILENAME_MAX + 8];
+ if (hashmap__find(pmu->aliases, name, &alias))
+ return alias;
- /*
- * Test if alias/event 'name' exists in the PMU's sysfs/events
- * directory. If not skip parsing the sysfs aliases. Sysfs event
- * name must be all lower or all upper case.
- */
- scnprintf(event_file_name, sizeof(event_file_name), "events/%s", name);
- for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
- event_file_name[i] = tolower(event_file_name[i]);
+ if (!load || pmu->sysfs_aliases_loaded)
+ return NULL;
- has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
- if (!has_sysfs_event) {
- for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
- event_file_name[i] = toupper(event_file_name[i]);
+ /*
+ * Test if alias/event 'name' exists in the PMU's sysfs/events
+ * directory. If not skip parsing the sysfs aliases. Sysfs event
+ * name must be all lower or all upper case.
+ */
+ scnprintf(event_file_name, sizeof(event_file_name), "events/%s", name);
+ for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
+ event_file_name[i] = tolower(event_file_name[i]);
- has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
- }
- if (has_sysfs_event)
- pmu_aliases_parse(pmu);
+ has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
+ if (!has_sysfs_event) {
+ for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
+ event_file_name[i] = toupper(event_file_name[i]);
+ has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
}
- list_for_each_entry(alias, &pmu->aliases, list) {
- if (!strcasecmp(alias->name, name))
+ if (has_sysfs_event) {
+ pmu_aliases_parse(pmu);
+ if (hashmap__find(pmu->aliases, name, &alias))
return alias;
}
+
return NULL;
}
@@ -500,6 +534,7 @@ static void read_alias_info(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
struct update_alias_data {
struct perf_pmu *pmu;
struct perf_pmu_alias *alias;
+ bool legacy;
};
static int update_alias(const struct pmu_event *pe,
@@ -515,8 +550,13 @@ static int update_alias(const struct pmu_event *pe,
assign_str(pe->name, "topic", &data->alias->topic, pe->topic);
data->alias->per_pkg = pe->perpkg;
if (pe->event) {
- parse_events_terms__exit(&data->alias->terms);
- ret = parse_events_terms(&data->alias->terms, pe->event, /*input=*/NULL);
+ if (data->legacy) {
+ zfree(&data->alias->legacy_terms);
+ data->alias->legacy_terms = strdup(pe->event);
+ } else {
+ zfree(&data->alias->terms);
+ data->alias->terms = strdup(pe->event);
+ }
}
if (!ret && pe->unit) {
char *unit;
@@ -525,15 +565,27 @@ static int update_alias(const struct pmu_event *pe,
if (!ret)
snprintf(data->alias->unit, sizeof(data->alias->unit), "%s", unit);
}
+ if (!ret && pe->retirement_latency_mean) {
+ ret = parse_double(pe->retirement_latency_mean, NULL,
+ &data->alias->retirement_latency_mean);
+ }
+ if (!ret && pe->retirement_latency_min) {
+ ret = parse_double(pe->retirement_latency_min, NULL,
+ &data->alias->retirement_latency_min);
+ }
+ if (!ret && pe->retirement_latency_max) {
+ ret = parse_double(pe->retirement_latency_max, NULL,
+ &data->alias->retirement_latency_max);
+ }
return ret;
}
static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
- const char *desc, const char *val, FILE *val_fd,
+ const char *desc, const char *val, int val_fd,
const struct pmu_event *pe, enum event_source src)
{
- struct perf_pmu_alias *alias;
- int ret;
+ struct perf_pmu_alias *alias, *old_alias;
+ int ret = 0;
const char *long_desc = NULL, *topic = NULL, *unit = NULL, *pmu_name = NULL;
bool deprecated = false, perpkg = false;
@@ -556,24 +608,49 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
if (!alias)
return -ENOMEM;
- parse_events_terms__init(&alias->terms);
alias->scale = 1.0;
alias->unit[0] = '\0';
alias->per_pkg = perpkg;
alias->snapshot = false;
alias->deprecated = deprecated;
+ alias->retirement_latency_mean = 0.0;
+ alias->retirement_latency_min = 0.0;
+ alias->retirement_latency_max = 0.0;
- ret = parse_events_terms(&alias->terms, val, val_fd);
- if (ret) {
- pr_err("Cannot parse alias %s: %d\n", val, ret);
- free(alias);
- return ret;
+ if (!ret && pe && pe->retirement_latency_mean) {
+ ret = parse_double(pe->retirement_latency_mean, NULL,
+ &alias->retirement_latency_mean);
+ }
+ if (!ret && pe && pe->retirement_latency_min) {
+ ret = parse_double(pe->retirement_latency_min, NULL,
+ &alias->retirement_latency_min);
+ }
+ if (!ret && pe && pe->retirement_latency_max) {
+ ret = parse_double(pe->retirement_latency_max, NULL,
+ &alias->retirement_latency_max);
}
+ if (ret)
+ return ret;
+ if (val_fd < 0) {
+ alias->terms = strdup(val);
+ } else {
+ char buf[256];
+ struct io io;
+ size_t line_len;
+
+ io__init(&io, val_fd, buf, sizeof(buf));
+ ret = io__getline(&io, &alias->terms, &line_len) < 0 ? -errno : 0;
+ if (ret) {
+ pr_err("Failed to read alias %s\n", name);
+ return ret;
+ }
+ if (line_len >= 1 && alias->terms[line_len - 1] == '\n')
+ alias->terms[line_len - 1] = '\0';
+ }
alias->name = strdup(name);
alias->desc = desc ? strdup(desc) : NULL;
- alias->long_desc = long_desc ? strdup(long_desc) :
- desc ? strdup(desc) : NULL;
+ alias->long_desc = long_desc ? strdup(long_desc) : NULL;
alias->topic = topic ? strdup(topic) : NULL;
alias->pmu_name = pmu_name ? strdup(pmu_name) : NULL;
if (unit) {
@@ -587,15 +664,29 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
default:
case EVENT_SRC_SYSFS:
alias->from_sysfs = true;
- if (pmu->events_table) {
+ if (pmu->events_table || pmu->is_core) {
/* Update an event from sysfs with json data. */
struct update_alias_data data = {
.pmu = pmu,
.alias = alias,
+ .legacy = false,
};
- if (pmu_events_table__find_event(pmu->events_table, pmu, name,
- update_alias, &data) == 0)
+ if ((pmu_events_table__find_event(pmu->events_table, pmu, name,
+ update_alias, &data) == 0)) {
+ /*
+ * Override sysfs encodings with json encodings
+ * specific to the cpuid.
+ */
pmu->cpu_common_json_aliases++;
+ }
+ if (pmu->is_core) {
+ /* Add in legacy encodings. */
+ data.legacy = true;
+ if (pmu_events_table__find_event(
+ perf_pmu__default_core_events_table(),
+ pmu, name, update_alias, &data) == 0)
+ pmu->cpu_common_json_aliases++;
+ }
}
pmu->sysfs_aliases++;
break;
@@ -607,7 +698,8 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
break;
}
- list_add_tail(&alias->list, &pmu->aliases);
+ hashmap__set(pmu->aliases, alias->name, alias, /*old_key=*/ NULL, &old_alias);
+ perf_pmu_free_alias(old_alias);
return 0;
}
@@ -642,7 +734,6 @@ static int __pmu_aliases_parse(struct perf_pmu *pmu, int events_dir_fd)
while ((evt_ent = io_dir__readdir(&event_dir))) {
char *name = evt_ent->d_name;
int fd;
- FILE *file;
if (!strcmp(name, ".") || !strcmp(name, ".."))
continue;
@@ -658,17 +749,12 @@ static int __pmu_aliases_parse(struct perf_pmu *pmu, int events_dir_fd)
pr_debug("Cannot open %s\n", name);
continue;
}
- file = fdopen(fd, "r");
- if (!file) {
- close(fd);
- continue;
- }
if (perf_pmu__new_alias(pmu, name, /*desc=*/ NULL,
- /*val=*/ NULL, file, /*pe=*/ NULL,
+ /*val=*/ NULL, fd, /*pe=*/ NULL,
EVENT_SRC_SYSFS) < 0)
pr_debug("Cannot set up %s\n", name);
- fclose(file);
+ close(fd);
}
pmu->sysfs_aliases_loaded = true;
@@ -701,7 +787,7 @@ static int pmu_aliases_parse(struct perf_pmu *pmu)
static int pmu_aliases_parse_eager(struct perf_pmu *pmu, int sysfs_fd)
{
- char path[FILENAME_MAX + 7];
+ char path[NAME_MAX + 8];
int ret, events_dir_fd;
scnprintf(path, sizeof(path), "%s/events", pmu->name);
@@ -715,29 +801,29 @@ static int pmu_aliases_parse_eager(struct perf_pmu *pmu, int sysfs_fd)
return ret;
}
-static int pmu_alias_terms(struct perf_pmu_alias *alias, int err_loc, struct list_head *terms)
+static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms)
{
- struct parse_events_term *term, *cloned;
- struct parse_events_terms clone_terms;
-
- parse_events_terms__init(&clone_terms);
- list_for_each_entry(term, &alias->terms.terms, list) {
- int ret = parse_events_term__clone(&cloned, term);
+ struct parse_events_terms alias_terms;
+ struct parse_events_term *term;
+ int ret;
- if (ret) {
- parse_events_terms__exit(&clone_terms);
- return ret;
- }
+ parse_events_terms__init(&alias_terms);
+ ret = parse_events_terms(&alias_terms, alias->terms);
+ if (ret) {
+ pr_err("Cannot parse '%s' terms '%s': %d\n",
+ alias->name, alias->terms, ret);
+ parse_events_terms__exit(&alias_terms);
+ return ret;
+ }
+ list_for_each_entry(term, &alias_terms.terms, list) {
/*
* Weak terms don't override command line options,
* which we don't want for implicit terms in aliases.
*/
- cloned->weak = true;
- cloned->err_term = cloned->err_val = err_loc;
- list_add_tail(&cloned->list, &clone_terms.terms);
+ term->weak = true;
}
- list_splice_init(&clone_terms.terms, terms);
- parse_events_terms__exit(&clone_terms);
+ list_splice_init(&alias_terms.terms, terms);
+ parse_events_terms__exit(&alias_terms);
return 0;
}
@@ -993,7 +1079,7 @@ static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
{
struct perf_pmu *pmu = vdata;
- perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL,
+ perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ -1,
pe, EVENT_SRC_CPU_JSON);
return 0;
}
@@ -1009,13 +1095,16 @@ void pmu_add_cpu_aliases_table(struct perf_pmu *pmu, const struct pmu_events_tab
static void pmu_add_cpu_aliases(struct perf_pmu *pmu)
{
- if (!pmu->events_table)
+ if (!pmu->events_table && !pmu->is_core)
return;
if (pmu->cpu_aliases_added)
return;
pmu_add_cpu_aliases_table(pmu, pmu->events_table);
+ if (pmu->is_core)
+ pmu_add_cpu_aliases_table(pmu, perf_pmu__default_core_events_table());
+
pmu->cpu_aliases_added = true;
}
@@ -1042,7 +1131,7 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
pe->name,
pe->desc,
pe->event,
- /*val_fd=*/ NULL,
+ /*val_fd=*/ -1,
pe,
EVENT_SRC_SYS_JSON);
}
@@ -1095,43 +1184,107 @@ perf_pmu__arch_init(struct perf_pmu *pmu)
pmu->mem_events = perf_mem_events;
}
+/* Variant of str_hash that does tolower on each character. */
+static size_t aliases__hash(long key, void *ctx __maybe_unused)
+{
+ const char *s = (const char *)key;
+ size_t h = 0;
+
+ while (*s) {
+ h = h * 31 + tolower(*s);
+ s++;
+ }
+ return h;
+}
+
+static bool aliases__equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return strcasecmp((const char *)key1, (const char *)key2) == 0;
+}
+
+int perf_pmu__init(struct perf_pmu *pmu, __u32 type, const char *name)
+{
+ pmu->type = type;
+ INIT_LIST_HEAD(&pmu->format);
+ INIT_LIST_HEAD(&pmu->caps);
+
+ pmu->name = strdup(name);
+ if (!pmu->name)
+ return -ENOMEM;
+
+ pmu->aliases = hashmap__new(aliases__hash, aliases__equal, /*ctx=*/ NULL);
+ if (!pmu->aliases)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static __u32 wellknown_pmu_type(const char *pmu_name)
+{
+ struct {
+ const char *pmu_name;
+ __u32 type;
+ } wellknown_pmus[] = {
+ {
+ "software",
+ PERF_TYPE_SOFTWARE
+ },
+ {
+ "tracepoint",
+ PERF_TYPE_TRACEPOINT
+ },
+ {
+ "breakpoint",
+ PERF_TYPE_BREAKPOINT
+ },
+ };
+ for (size_t i = 0; i < ARRAY_SIZE(wellknown_pmus); i++) {
+ if (!strcmp(wellknown_pmus[i].pmu_name, pmu_name))
+ return wellknown_pmus[i].type;
+ }
+ return PERF_TYPE_MAX;
+}
+
struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *name,
bool eager_load)
{
struct perf_pmu *pmu;
- __u32 type;
pmu = zalloc(sizeof(*pmu));
if (!pmu)
return NULL;
- pmu->name = strdup(name);
- if (!pmu->name)
- goto err;
+ if (perf_pmu__init(pmu, PERF_PMU_TYPE_FAKE, name) != 0) {
+ perf_pmu__delete(pmu);
+ return NULL;
+ }
/*
* Read type early to fail fast if a lookup name isn't a PMU. Ensure
* that type value is successfully assigned (return 1).
*/
- if (perf_pmu__scan_file_at(pmu, dirfd, "type", "%u", &type) != 1)
- goto err;
-
- INIT_LIST_HEAD(&pmu->format);
- INIT_LIST_HEAD(&pmu->aliases);
- INIT_LIST_HEAD(&pmu->caps);
+ if (perf_pmu__scan_file_at(pmu, dirfd, "type", "%u", &pmu->type) != 1) {
+ /* Double check the PMU's name isn't wellknown. */
+ pmu->type = wellknown_pmu_type(name);
+ if (pmu->type == PERF_TYPE_MAX) {
+ perf_pmu__delete(pmu);
+ return NULL;
+ }
+ }
/*
* The pmu data we store & need consists of the pmu
* type value and format definitions. Load both right
* now.
*/
- if (pmu_format(pmu, dirfd, name, eager_load))
- goto err;
+ if (pmu_format(pmu, dirfd, name, eager_load)) {
+ perf_pmu__delete(pmu);
+ return NULL;
+ }
pmu->is_core = is_pmu_core(name);
pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
- pmu->type = type;
pmu->is_uncore = pmu_is_uncore(dirfd, name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
@@ -1153,10 +1306,6 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
pmu_aliases_parse_eager(pmu, dirfd);
return pmu;
-err:
- zfree(&pmu->name);
- free(pmu);
- return NULL;
}
/* Creates the PMU when sysfs scanning fails. */
@@ -1178,7 +1327,7 @@ struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pm
pmu->cpus = cpu_map__online();
INIT_LIST_HEAD(&pmu->format);
- INIT_LIST_HEAD(&pmu->aliases);
+ pmu->aliases = hashmap__new(aliases__hash, aliases__equal, /*ctx=*/ NULL);
INIT_LIST_HEAD(&pmu->caps);
list_add_tail(&pmu->list, core_pmus);
return pmu;
@@ -1427,9 +1576,41 @@ static int pmu_config_term(const struct perf_pmu *pmu,
assert(term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
pmu_format_value(bits, term->val.num, &attr->config3, zero);
break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG4:
+ assert(term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ pmu_format_value(bits, term->val.num, &attr->config4, zero);
+ break;
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
+ assert(term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ assert(term->val.num < PERF_COUNT_HW_MAX);
+ assert(pmu->is_core);
+ attr->config = term->val.num;
+ if (perf_pmus__supports_extended_type())
+ attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+ attr->type = PERF_TYPE_HARDWARE;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG: {
+#ifndef NDEBUG
+ int cache_type = term->val.num & 0xFF;
+ int cache_op = (term->val.num >> 8) & 0xFF;
+ int cache_result = (term->val.num >> 16) & 0xFF;
+
+ assert(cache_type < PERF_COUNT_HW_CACHE_MAX);
+ assert(cache_op < PERF_COUNT_HW_CACHE_OP_MAX);
+ assert(cache_result < PERF_COUNT_HW_CACHE_RESULT_MAX);
+#endif
+ assert(term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ assert((term->val.num & ~0xFFFFFF) == 0);
+ assert(pmu->is_core);
+ attr->config = term->val.num;
+ if (perf_pmus__supports_extended_type())
+ attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+ attr->type = PERF_TYPE_HW_CACHE;
+ break;
+ }
case PARSE_EVENTS__TERM_TYPE_USER: /* Not hardcoded. */
return -EINVAL;
- case PARSE_EVENTS__TERM_TYPE_NAME ... PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_NAME ... PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
/* Skip non-config terms. */
break;
default:
@@ -1474,6 +1655,9 @@ static int pmu_config_term(const struct perf_pmu *pmu,
case PERF_PMU_FORMAT_VALUE_CONFIG3:
vp = &attr->config3;
break;
+ case PERF_PMU_FORMAT_VALUE_CONFIG4:
+ vp = &attr->config4;
+ break;
default:
return -EINVAL;
}
@@ -1546,6 +1730,8 @@ int perf_pmu__config_terms(const struct perf_pmu *pmu,
if (perf_pmu__is_hwmon(pmu))
return hwmon_pmu__config_terms(pmu, attr, terms, err);
+ if (perf_pmu__is_drm(pmu))
+ return drm_pmu__config_terms(pmu, attr, terms, err);
list_for_each_entry(term, &terms->terms, list) {
if (pmu_config_term(pmu, attr, term, terms, zero, apply_hardcoded, err))
@@ -1603,10 +1789,14 @@ static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
return alias;
/* Alias doesn't exist, try to get it from the json events. */
- if (pmu->events_table &&
- pmu_events_table__find_event(pmu->events_table, pmu, name,
- pmu_add_cpu_aliases_map_callback,
- pmu) == 0) {
+ if ((pmu_events_table__find_event(pmu->events_table, pmu, name,
+ pmu_add_cpu_aliases_map_callback,
+ pmu) == 0) ||
+ (pmu->is_core &&
+ pmu_events_table__find_event(perf_pmu__default_core_events_table(),
+ pmu, name,
+ pmu_add_cpu_aliases_map_callback,
+ pmu) == 0)) {
alias = perf_pmu__find_alias(pmu, name, /*load=*/ false);
}
return alias;
@@ -1656,6 +1846,24 @@ static int check_info_data(struct perf_pmu *pmu,
return 0;
}
+static int perf_pmu__parse_terms_to_attr(struct perf_pmu *pmu, const char *terms_str,
+ struct perf_event_attr *attr)
+{
+ struct parse_events_terms terms;
+ int ret;
+
+ parse_events_terms__init(&terms);
+ ret = parse_events_terms(&terms, terms_str);
+ if (ret) {
+ pr_debug("Failed to parse terms '%s': %d\n", terms_str, ret);
+ parse_events_terms__exit(&terms);
+ return ret;
+ }
+ ret = perf_pmu__config(pmu, attr, &terms, /*apply_hardcoded=*/true, /*err=*/NULL);
+ parse_events_terms__exit(&terms);
+ return ret;
+}
+
/*
* Find alias in the terms list and replace it with the terms
* defined for the alias
@@ -1678,11 +1886,18 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_
info->unit = NULL;
info->scale = 0.0;
info->snapshot = false;
+ info->retirement_latency_mean = 0.0;
+ info->retirement_latency_min = 0.0;
+ info->retirement_latency_max = 0.0;
if (perf_pmu__is_hwmon(pmu)) {
ret = hwmon_pmu__check_alias(head_terms, info, err);
goto out;
}
+ if (perf_pmu__is_drm(pmu)) {
+ ret = drm_pmu__check_alias(pmu, head_terms, info, err);
+ goto out;
+ }
/* Fake PMU doesn't rewrite terms. */
if (perf_pmu__is_fake(pmu))
@@ -1692,10 +1907,10 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_
alias = pmu_find_alias(pmu, term);
if (!alias)
continue;
- ret = pmu_alias_terms(alias, term->err_term, &term->list);
+ ret = pmu_alias_terms(alias, &term->list);
if (ret) {
parse_events_error__handle(err, term->err_term,
- strdup("Failure to duplicate terms"),
+ strdup("Failed to parse terms"),
NULL);
return ret;
}
@@ -1705,11 +1920,26 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_
if (ret)
return ret;
+ if (alias->legacy_terms) {
+ struct perf_event_attr attr = {.config = 0,};
+
+ ret = perf_pmu__parse_terms_to_attr(pmu, alias->legacy_terms, &attr);
+ if (ret) {
+ parse_events_error__handle(err, term->err_term,
+ strdup("Error evaluating legacy terms"),
+ NULL);
+ return ret;
+ }
+ if (attr.type == PERF_TYPE_HARDWARE)
+ *alternate_hw_config = attr.config & PERF_HW_EVENT_MASK;
+ }
+
if (alias->per_pkg)
info->per_pkg = true;
- if (term->alternate_hw_config)
- *alternate_hw_config = term->val.num;
+ info->retirement_latency_mean = alias->retirement_latency_mean;
+ info->retirement_latency_min = alias->retirement_latency_min;
+ info->retirement_latency_max = alias->retirement_latency_max;
list_del_init(&term->list);
parse_events_term__delete(term);
@@ -1787,6 +2017,9 @@ int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_call
"config1=0..0xffffffffffffffff",
"config2=0..0xffffffffffffffff",
"config3=0..0xffffffffffffffff",
+ "config4=0..0xffffffffffffffff",
+ "legacy-hardware-config=0..9,",
+ "legacy-cache-config=0..0xffffff,",
"name=string",
"period=number",
"freq=number",
@@ -1804,16 +2037,18 @@ int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_call
"aux-output",
"aux-action=(pause|resume|start-paused)",
"aux-sample-size=number",
+ "cpu=number",
+ "ratio-to-prev=string",
};
struct perf_pmu_format *format;
int ret;
/*
* max-events and driver-config are missing above as are the internal
- * types user, metric-id, raw, legacy cache and hardware. Assert against
- * the enum parse_events__term_type so they are kept in sync.
+ * types user, metric-id, and raw. Assert against the enum
+ * parse_events__term_type so they are kept in sync.
*/
- _Static_assert(ARRAY_SIZE(terms) == __PARSE_EVENTS__TERM_TYPE_NR - 6,
+ _Static_assert(ARRAY_SIZE(terms) == __PARSE_EVENTS__TERM_TYPE_NR - 4,
"perf_pmu__for_each_format()'s terms must be kept in sync with enum parse_events__term_type");
list_for_each_entry(format, &pmu->format, list) {
perf_pmu_format__load(pmu, format);
@@ -1858,32 +2093,49 @@ bool perf_pmu__have_event(struct perf_pmu *pmu, const char *name)
return false;
if (perf_pmu__is_tool(pmu) && tool_pmu__skip_event(name))
return false;
+ if (perf_pmu__is_tracepoint(pmu))
+ return tp_pmu__have_event(pmu, name);
if (perf_pmu__is_hwmon(pmu))
return hwmon_pmu__have_event(pmu, name);
+ if (perf_pmu__is_drm(pmu))
+ return drm_pmu__have_event(pmu, name);
if (perf_pmu__find_alias(pmu, name, /*load=*/ true) != NULL)
return true;
- if (pmu->cpu_aliases_added || !pmu->events_table)
+ if (pmu->cpu_aliases_added || (!pmu->events_table && !pmu->is_core))
return false;
- return pmu_events_table__find_event(pmu->events_table, pmu, name, NULL, NULL) == 0;
+ if (pmu_events_table__find_event(pmu->events_table, pmu, name, NULL, NULL) == 0)
+ return true;
+ return pmu->is_core &&
+ pmu_events_table__find_event(perf_pmu__default_core_events_table(),
+ pmu, name, NULL, NULL) == 0;
}
size_t perf_pmu__num_events(struct perf_pmu *pmu)
{
size_t nr;
+ if (perf_pmu__is_tracepoint(pmu))
+ return tp_pmu__num_events(pmu);
if (perf_pmu__is_hwmon(pmu))
return hwmon_pmu__num_events(pmu);
+ if (perf_pmu__is_drm(pmu))
+ return drm_pmu__num_events(pmu);
pmu_aliases_parse(pmu);
nr = pmu->sysfs_aliases + pmu->sys_json_aliases;
- if (pmu->cpu_aliases_added)
- nr += pmu->cpu_json_aliases;
- else if (pmu->events_table)
- nr += pmu_events_table__num_events(pmu->events_table, pmu) -
- pmu->cpu_common_json_aliases;
- else
+ if (pmu->cpu_aliases_added) {
+ nr += pmu->cpu_json_aliases;
+ } else if (pmu->events_table || pmu->is_core) {
+ nr += pmu_events_table__num_events(pmu->events_table, pmu);
+ if (pmu->is_core) {
+ nr += pmu_events_table__num_events(
+ perf_pmu__default_core_events_table(), pmu);
+ }
+ nr -= pmu->cpu_common_json_aliases;
+ } else {
assert(pmu->cpu_json_aliases == 0 && pmu->cpu_common_json_aliases == 0);
+ }
if (perf_pmu__is_tool(pmu))
nr -= tool_pmu__num_skip_events();
@@ -1901,18 +2153,37 @@ static int sub_non_neg(int a, int b)
static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
const struct perf_pmu_alias *alias, bool skip_duplicate_pmus)
{
+ struct parse_events_terms terms;
struct parse_events_term *term;
+ int ret, used;
size_t pmu_name_len = pmu_deduped_name_len(pmu, pmu->name,
skip_duplicate_pmus);
- int used = snprintf(buf, len, "%.*s/%s", (int)pmu_name_len, pmu->name, alias->name);
- list_for_each_entry(term, &alias->terms.terms, list) {
+ /* Paramemterized events have the parameters shown. */
+ if (strstr(alias->terms, "=?")) {
+ /* No parameters. */
+ snprintf(buf, len, "%.*s/%s/", (int)pmu_name_len, pmu->name, alias->name);
+ return buf;
+ }
+
+ parse_events_terms__init(&terms);
+ ret = parse_events_terms(&terms, alias->terms);
+ if (ret) {
+ pr_err("Failure to parse '%s' terms '%s': %d\n",
+ alias->name, alias->terms, ret);
+ parse_events_terms__exit(&terms);
+ snprintf(buf, len, "%.*s/%s/", (int)pmu_name_len, pmu->name, alias->name);
+ return buf;
+ }
+ used = snprintf(buf, len, "%.*s/%s", (int)pmu_name_len, pmu->name, alias->name);
+
+ list_for_each_entry(term, &terms.terms, list) {
if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
used += snprintf(buf + used, sub_non_neg(len, used),
",%s=%s", term->config,
term->val.str);
}
-
+ parse_events_terms__exit(&terms);
if (sub_non_neg(len, used) > 0) {
buf[used] = '/';
used++;
@@ -1926,25 +2197,65 @@ static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
return buf;
}
+static bool perf_pmu_alias__check_deprecated(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
+{
+ struct perf_event_attr attr = {.config = 0,};
+ const char *check_terms;
+ bool has_legacy_config;
+
+ if (alias->legacy_deprecated_checked)
+ return alias->deprecated;
+
+ alias->legacy_deprecated_checked = true;
+ if (alias->deprecated)
+ return true;
+
+ check_terms = alias->terms;
+ has_legacy_config =
+ strstr(check_terms, "legacy-hardware-config=") != NULL ||
+ strstr(check_terms, "legacy-cache-config=") != NULL;
+ if (!has_legacy_config && alias->legacy_terms) {
+ check_terms = alias->legacy_terms;
+ has_legacy_config =
+ strstr(check_terms, "legacy-hardware-config=") != NULL ||
+ strstr(check_terms, "legacy-cache-config=") != NULL;
+ }
+ if (!has_legacy_config)
+ return false;
+
+ if (perf_pmu__parse_terms_to_attr(pmu, check_terms, &attr) != 0) {
+ /* Parsing failed, set as deprecated. */
+ alias->deprecated = true;
+ } else if (attr.type < PERF_TYPE_MAX) {
+ /* Flag unsupported legacy events as deprecated. */
+ alias->deprecated = !is_event_supported(attr.type, attr.config);
+ }
+ return alias->deprecated;
+}
+
int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
void *state, pmu_event_callback cb)
{
char buf[1024];
- struct perf_pmu_alias *event;
struct pmu_event_info info = {
.pmu = pmu,
.event_type_desc = "Kernel PMU event",
};
int ret = 0;
- struct strbuf sb;
+ struct hashmap_entry *entry;
+ size_t bkt;
+ if (perf_pmu__is_tracepoint(pmu))
+ return tp_pmu__for_each_event(pmu, state, cb);
if (perf_pmu__is_hwmon(pmu))
return hwmon_pmu__for_each_event(pmu, state, cb);
+ if (perf_pmu__is_drm(pmu))
+ return drm_pmu__for_each_event(pmu, state, cb);
- strbuf_init(&sb, /*hint=*/ 0);
pmu_aliases_parse(pmu);
pmu_add_cpu_aliases(pmu);
- list_for_each_entry(event, &pmu->aliases, list) {
+ hashmap__for_each_entry(pmu->aliases, entry, bkt) {
+ struct perf_pmu_alias *event = entry->pvalue;
size_t buf_used, pmu_name_len;
if (perf_pmu__is_tool(pmu) && tool_pmu__skip_event(event->name))
@@ -1975,16 +2286,14 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
info.desc = event->desc;
info.long_desc = event->long_desc;
info.encoding_desc = buf + buf_used;
- parse_events_terms__to_strbuf(&event->terms, &sb);
buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
- "%.*s/%s/", (int)pmu_name_len, info.pmu_name, sb.buf) + 1;
+ "%.*s/%s/", (int)pmu_name_len, info.pmu_name, event->terms) + 1;
+ info.str = event->terms;
info.topic = event->topic;
- info.str = sb.buf;
- info.deprecated = event->deprecated;
+ info.deprecated = perf_pmu_alias__check_deprecated(pmu, event);
ret = cb(state, &info);
if (ret)
goto out;
- strbuf_setlen(&sb, /*len=*/ 0);
}
if (pmu->selectable) {
info.name = buf;
@@ -2000,7 +2309,6 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
ret = cb(state, &info);
}
out:
- strbuf_release(&sb);
return ret;
}
@@ -2052,6 +2360,9 @@ static bool perf_pmu___name_match(const struct perf_pmu *pmu, const char *to_mat
for (size_t i = 0; i < ARRAY_SIZE(names); i++) {
const char *name = names[i];
+ if (!name)
+ continue;
+
if (wildcard && perf_pmu__match_wildcard_uncore(name, to_match))
return true;
if (!wildcard && perf_pmu__match_ignoring_suffix_uncore(name, to_match))
@@ -2211,6 +2522,17 @@ static void perf_pmu__del_caps(struct perf_pmu *pmu)
}
}
+struct perf_pmu_caps *perf_pmu__get_cap(struct perf_pmu *pmu, const char *name)
+{
+ struct perf_pmu_caps *caps;
+
+ list_for_each_entry(caps, &pmu->caps, list) {
+ if (!strcmp(caps->name, name))
+ return caps;
+ }
+ return NULL;
+}
+
/*
* Reading/parsing the given pmu capabilities, which should be located at:
* /sys/bus/event_source/devices/<dev>/caps as sysfs group attributes.
@@ -2401,8 +2723,13 @@ int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename,
void perf_pmu__delete(struct perf_pmu *pmu)
{
+ if (!pmu)
+ return;
+
if (perf_pmu__is_hwmon(pmu))
hwmon_pmu__exit(pmu);
+ else if (perf_pmu__is_drm(pmu))
+ drm_pmu__exit(pmu);
perf_pmu__del_formats(&pmu->format);
perf_pmu__del_aliases(pmu);
@@ -2418,18 +2745,18 @@ void perf_pmu__delete(struct perf_pmu *pmu)
const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config)
{
- struct perf_pmu_alias *event;
+ struct hashmap_entry *entry;
+ size_t bkt;
if (!pmu)
return NULL;
pmu_aliases_parse(pmu);
pmu_add_cpu_aliases(pmu);
- list_for_each_entry(event, &pmu->aliases, list) {
+ hashmap__for_each_entry(pmu->aliases, entry, bkt) {
+ struct perf_pmu_alias *event = entry->pvalue;
struct perf_event_attr attr = {.config = 0,};
-
- int ret = perf_pmu__config(pmu, &attr, &event->terms, /*apply_hardcoded=*/true,
- /*err=*/NULL);
+ int ret = perf_pmu__parse_terms_to_attr(pmu, event->terms, &attr);
if (ret == 0 && config == attr.config)
return event->name;
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index b93014cc3670..8f11bfe8ed6d 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -14,6 +14,7 @@
#include "mem-events.h"
struct evsel_config_term;
+struct hashmap;
struct perf_cpu_map;
struct print_callbacks;
@@ -22,6 +23,7 @@ enum {
PERF_PMU_FORMAT_VALUE_CONFIG1,
PERF_PMU_FORMAT_VALUE_CONFIG2,
PERF_PMU_FORMAT_VALUE_CONFIG3,
+ PERF_PMU_FORMAT_VALUE_CONFIG4,
PERF_PMU_FORMAT_VALUE_CONFIG_END,
};
@@ -36,9 +38,24 @@ struct perf_pmu_caps {
struct list_head list;
};
+enum pmu_kind {
+ /* A perf event syscall PMU. */
+ PERF_PMU_KIND_PE,
+ /* A perf tool provided DRM PMU. */
+ PERF_PMU_KIND_DRM,
+ /* A perf tool provided HWMON PMU. */
+ PERF_PMU_KIND_HWMON,
+ /* Perf tool provided PMU for tool events like time. */
+ PERF_PMU_KIND_TOOL,
+ /* A testing PMU kind. */
+ PERF_PMU_KIND_FAKE
+};
+
enum {
PERF_PMU_TYPE_PE_START = 0,
- PERF_PMU_TYPE_PE_END = 0xFFFEFFFF,
+ PERF_PMU_TYPE_PE_END = 0xFFFDFFFF,
+ PERF_PMU_TYPE_DRM_START = 0xFFFE0000,
+ PERF_PMU_TYPE_DRM_END = 0xFFFEFFFF,
PERF_PMU_TYPE_HWMON_START = 0xFFFF0000,
PERF_PMU_TYPE_HWMON_END = 0xFFFFFFFD,
PERF_PMU_TYPE_TOOL = 0xFFFFFFFE,
@@ -125,7 +142,7 @@ struct perf_pmu {
* event read from <sysfs>/bus/event_source/devices/<name>/events/ or
* from json events in pmu-events.c.
*/
- struct list_head aliases;
+ struct hashmap *aliases;
/**
* @events_table: The events table for json events in pmu-events.c.
*/
@@ -194,6 +211,9 @@ struct perf_pmu {
struct perf_pmu_info {
const char *unit;
double scale;
+ double retirement_latency_mean;
+ double retirement_latency_min;
+ double retirement_latency_max;
bool per_pkg;
bool snapshot;
};
@@ -274,6 +294,8 @@ bool pmu_uncore_identifier_match(const char *compat, const char *id);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
+struct perf_pmu_caps *perf_pmu__get_cap(struct perf_pmu *pmu, const char *name);
+
int perf_pmu__caps_parse(struct perf_pmu *pmu);
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
@@ -289,13 +311,32 @@ int perf_pmu__pathname_scnprintf(char *buf, size_t size,
int perf_pmu__event_source_devices_fd(void);
int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename, int flags);
+int perf_pmu__init(struct perf_pmu *pmu, __u32 type, const char *name);
struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name,
bool eager_load);
struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
void perf_pmu__delete(struct perf_pmu *pmu);
-struct perf_pmu *perf_pmus__find_core_pmu(void);
const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config);
bool perf_pmu__is_fake(const struct perf_pmu *pmu);
+static inline enum pmu_kind perf_pmu__kind(const struct perf_pmu *pmu)
+{
+ __u32 type;
+
+ if (!pmu)
+ return PERF_PMU_KIND_PE;
+
+ type = pmu->type;
+ if (type <= PERF_PMU_TYPE_PE_END)
+ return PERF_PMU_KIND_PE;
+ if (type <= PERF_PMU_TYPE_DRM_END)
+ return PERF_PMU_KIND_DRM;
+ if (type <= PERF_PMU_TYPE_HWMON_END)
+ return PERF_PMU_KIND_HWMON;
+ if (type == PERF_PMU_TYPE_TOOL)
+ return PERF_PMU_KIND_TOOL;
+ return PERF_PMU_KIND_FAKE;
+}
+
#endif /* __PMU_H */
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index b99292de7669..98be2eb8f1f0 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -12,6 +12,7 @@
#include <unistd.h>
#include "cpumap.h"
#include "debug.h"
+#include "drm_pmu.h"
#include "evsel.h"
#include "pmus.h"
#include "pmu.h"
@@ -19,6 +20,7 @@
#include "tool_pmu.h"
#include "print-events.h"
#include "strbuf.h"
+#include "string2.h"
/*
* core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
@@ -42,16 +44,19 @@ enum perf_tool_pmu_type {
PERF_TOOL_PMU_TYPE_PE_OTHER,
PERF_TOOL_PMU_TYPE_TOOL,
PERF_TOOL_PMU_TYPE_HWMON,
+ PERF_TOOL_PMU_TYPE_DRM,
#define PERF_TOOL_PMU_TYPE_PE_CORE_MASK (1 << PERF_TOOL_PMU_TYPE_PE_CORE)
#define PERF_TOOL_PMU_TYPE_PE_OTHER_MASK (1 << PERF_TOOL_PMU_TYPE_PE_OTHER)
#define PERF_TOOL_PMU_TYPE_TOOL_MASK (1 << PERF_TOOL_PMU_TYPE_TOOL)
#define PERF_TOOL_PMU_TYPE_HWMON_MASK (1 << PERF_TOOL_PMU_TYPE_HWMON)
+#define PERF_TOOL_PMU_TYPE_DRM_MASK (1 << PERF_TOOL_PMU_TYPE_DRM)
#define PERF_TOOL_PMU_TYPE_ALL_MASK (PERF_TOOL_PMU_TYPE_PE_CORE_MASK | \
PERF_TOOL_PMU_TYPE_PE_OTHER_MASK | \
PERF_TOOL_PMU_TYPE_TOOL_MASK | \
- PERF_TOOL_PMU_TYPE_HWMON_MASK)
+ PERF_TOOL_PMU_TYPE_HWMON_MASK | \
+ PERF_TOOL_PMU_TYPE_DRM_MASK)
};
static unsigned int read_pmu_types;
@@ -172,6 +177,8 @@ struct perf_pmu *perf_pmus__find(const char *name)
/* Looking up an individual perf event PMU failed, check if a tool PMU should be read. */
if (!strncmp(name, "hwmon_", 6))
to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK;
+ else if (!strncmp(name, "drm_", 4))
+ to_read_pmus |= PERF_TOOL_PMU_TYPE_DRM_MASK;
else if (!strcmp(name, "tool"))
to_read_pmus |= PERF_TOOL_PMU_TYPE_TOOL_MASK;
@@ -272,6 +279,10 @@ skip_pe_pmus:
(read_pmu_types & PERF_TOOL_PMU_TYPE_HWMON_MASK) == 0)
perf_pmus__read_hwmon_pmus(&other_pmus);
+ if ((to_read_types & PERF_TOOL_PMU_TYPE_DRM_MASK) != 0 &&
+ (read_pmu_types & PERF_TOOL_PMU_TYPE_DRM_MASK) == 0)
+ perf_pmus__read_drm_pmus(&other_pmus);
+
list_sort(NULL, &other_pmus, pmus_cmp);
read_pmu_types |= to_read_types;
@@ -304,6 +315,8 @@ struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
if (type >= PERF_PMU_TYPE_PE_START && type <= PERF_PMU_TYPE_PE_END) {
to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK |
PERF_TOOL_PMU_TYPE_PE_OTHER_MASK;
+ } else if (type >= PERF_PMU_TYPE_DRM_START && type <= PERF_PMU_TYPE_DRM_END) {
+ to_read_pmus = PERF_TOOL_PMU_TYPE_DRM_MASK;
} else if (type >= PERF_PMU_TYPE_HWMON_START && type <= PERF_PMU_TYPE_HWMON_END) {
to_read_pmus = PERF_TOOL_PMU_TYPE_HWMON_MASK;
} else {
@@ -350,6 +363,92 @@ struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
return NULL;
}
+struct perf_pmu *perf_pmus__scan_for_event(struct perf_pmu *pmu, const char *event)
+{
+ bool use_core_pmus = !pmu || pmu->is_core;
+
+ if (!pmu) {
+ /* Hwmon filename values that aren't used. */
+ enum hwmon_type type;
+ int number;
+ /*
+ * Core PMUs, other sysfs PMUs and tool PMU can take all event
+ * types or aren't wother optimizing for.
+ */
+ unsigned int to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK |
+ PERF_TOOL_PMU_TYPE_PE_OTHER_MASK |
+ PERF_TOOL_PMU_TYPE_TOOL_MASK;
+
+ /* Could the event be a hwmon event? */
+ if (parse_hwmon_filename(event, &type, &number, /*item=*/NULL, /*alarm=*/NULL))
+ to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK;
+
+ /* Could the event be a DRM event? */
+ if (strlen(event) > 4 && strncmp("drm-", event, 4) == 0)
+ to_read_pmus |= PERF_TOOL_PMU_TYPE_DRM_MASK;
+
+ pmu_read_sysfs(to_read_pmus);
+ pmu = list_prepare_entry(pmu, &core_pmus, list);
+ }
+ if (use_core_pmus) {
+ list_for_each_entry_continue(pmu, &core_pmus, list)
+ return pmu;
+
+ pmu = NULL;
+ pmu = list_prepare_entry(pmu, &other_pmus, list);
+ }
+ list_for_each_entry_continue(pmu, &other_pmus, list)
+ return pmu;
+ return NULL;
+}
+
+struct perf_pmu *perf_pmus__scan_matching_wildcard(struct perf_pmu *pmu, const char *wildcard)
+{
+ bool use_core_pmus = !pmu || pmu->is_core;
+
+ if (!pmu) {
+ /*
+ * Core PMUs, other sysfs PMUs and tool PMU can have any name or
+ * aren't wother optimizing for.
+ */
+ unsigned int to_read_pmus = PERF_TOOL_PMU_TYPE_PE_CORE_MASK |
+ PERF_TOOL_PMU_TYPE_PE_OTHER_MASK |
+ PERF_TOOL_PMU_TYPE_TOOL_MASK;
+
+ /*
+ * Hwmon PMUs have an alias from a sysfs name like hwmon0,
+ * hwmon1, etc. or have a name of hwmon_<name>. They therefore
+ * can only have a wildcard match if the wildcard begins with
+ * "hwmon". Similarly drm PMUs must start "drm_", avoid reading
+ * such events unless the PMU could match.
+ */
+ if (strisglob(wildcard)) {
+ to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK |
+ PERF_TOOL_PMU_TYPE_DRM_MASK;
+ } else if (strlen(wildcard) >= 4 && strncmp("drm_", wildcard, 4) == 0) {
+ to_read_pmus |= PERF_TOOL_PMU_TYPE_DRM_MASK;
+ } else if (strlen(wildcard) >= 5 && strncmp("hwmon", wildcard, 5) == 0) {
+ to_read_pmus |= PERF_TOOL_PMU_TYPE_HWMON_MASK;
+ }
+
+ pmu_read_sysfs(to_read_pmus);
+ pmu = list_prepare_entry(pmu, &core_pmus, list);
+ }
+ if (use_core_pmus) {
+ list_for_each_entry_continue(pmu, &core_pmus, list) {
+ if (perf_pmu__wildcard_match(pmu, wildcard))
+ return pmu;
+ }
+ pmu = NULL;
+ pmu = list_prepare_entry(pmu, &other_pmus, list);
+ }
+ list_for_each_entry_continue(pmu, &other_pmus, list) {
+ if (perf_pmu__wildcard_match(pmu, wildcard))
+ return pmu;
+ }
+ return NULL;
+}
+
static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
{
bool use_core_pmus = !pmu || pmu->is_core;
@@ -546,6 +645,7 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
print_cb->print_event(print_state,
aliases[j].topic,
aliases[j].pmu_name,
+ aliases[j].pmu->type,
aliases[j].name,
aliases[j].alias,
aliases[j].scale_unit,
@@ -650,6 +750,7 @@ void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, voi
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
+ pmu->type,
format_args.short_string.buf,
/*event_alias=*/NULL,
/*scale_unit=*/NULL,
@@ -715,27 +816,39 @@ bool perf_pmus__supports_extended_type(void)
return perf_pmus__do_support_extended_type;
}
+struct perf_pmu *perf_pmus__find_by_attr(const struct perf_event_attr *attr)
+{
+ struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+ u32 type = attr->type;
+ bool legacy_core_type = type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE;
+
+ if (!pmu && legacy_core_type && perf_pmus__supports_extended_type()) {
+ type = attr->config >> PERF_PMU_TYPE_SHIFT;
+
+ pmu = perf_pmus__find_by_type(type);
+ }
+ if (!pmu && (legacy_core_type || type == PERF_TYPE_RAW)) {
+ /*
+ * For legacy events, if there was no extended type info then
+ * assume the PMU is the first core PMU.
+ *
+ * On architectures like ARM there is no sysfs PMU with type
+ * PERF_TYPE_RAW, assume the RAW events are going to be handled
+ * by the first core PMU.
+ */
+ pmu = perf_pmus__find_core_pmu();
+ }
+ return pmu;
+}
+
struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
{
struct perf_pmu *pmu = evsel->pmu;
- bool legacy_core_type;
if (pmu)
return pmu;
- pmu = perf_pmus__find_by_type(evsel->core.attr.type);
- legacy_core_type =
- evsel->core.attr.type == PERF_TYPE_HARDWARE ||
- evsel->core.attr.type == PERF_TYPE_HW_CACHE;
- if (!pmu && legacy_core_type) {
- if (perf_pmus__supports_extended_type()) {
- u32 type = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
-
- pmu = perf_pmus__find_by_type(type);
- } else {
- pmu = perf_pmus__find_core_pmu();
- }
- }
+ pmu = perf_pmus__find_by_attr(&evsel->core.attr);
((struct evsel *)evsel)->pmu = pmu;
return pmu;
}
@@ -755,7 +868,7 @@ struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);
}
-struct perf_pmu *perf_pmus__add_test_hwmon_pmu(int hwmon_dir,
+struct perf_pmu *perf_pmus__add_test_hwmon_pmu(const char *hwmon_dir,
const char *sysfs_name,
const char *name)
{
diff --git a/tools/perf/util/pmus.h b/tools/perf/util/pmus.h
index 8def20e615ad..7cb36863711a 100644
--- a/tools/perf/util/pmus.h
+++ b/tools/perf/util/pmus.h
@@ -5,6 +5,7 @@
#include <stdbool.h>
#include <stddef.h>
+struct perf_event_attr;
struct perf_pmu;
struct print_callbacks;
@@ -16,9 +17,12 @@ void perf_pmus__destroy(void);
struct perf_pmu *perf_pmus__find(const char *name);
struct perf_pmu *perf_pmus__find_by_type(unsigned int type);
+struct perf_pmu *perf_pmus__find_by_attr(const struct perf_event_attr *attr);
struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu);
struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu);
+struct perf_pmu *perf_pmus__scan_for_event(struct perf_pmu *pmu, const char *event);
+struct perf_pmu *perf_pmus__scan_matching_wildcard(struct perf_pmu *pmu, const char *wildcard);
const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str);
@@ -29,9 +33,10 @@ int perf_pmus__num_core_pmus(void);
bool perf_pmus__supports_extended_type(void);
struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name);
-struct perf_pmu *perf_pmus__add_test_hwmon_pmu(int hwmon_dir,
+struct perf_pmu *perf_pmus__add_test_hwmon_pmu(const char *hwmon_dir,
const char *sysfs_name,
const char *name);
struct perf_pmu *perf_pmus__fake_pmu(void);
+struct perf_pmu *perf_pmus__find_core_pmu(void);
#endif /* __PMUS_H */
diff --git a/tools/perf/util/powerpc-vpadtl.c b/tools/perf/util/powerpc-vpadtl.c
new file mode 100644
index 000000000000..d1c3396f182f
--- /dev/null
+++ b/tools/perf/util/powerpc-vpadtl.c
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPA DTL PMU support
+ */
+
+#include <linux/string.h>
+#include <errno.h>
+#include <inttypes.h>
+#include "color.h"
+#include "evlist.h"
+#include "session.h"
+#include "auxtrace.h"
+#include "data.h"
+#include "machine.h"
+#include "debug.h"
+#include "powerpc-vpadtl.h"
+#include "sample.h"
+#include "tool.h"
+
+/*
+ * Structure to save the auxtrace queue
+ */
+struct powerpc_vpadtl {
+ struct auxtrace auxtrace;
+ struct auxtrace_queues queues;
+ struct auxtrace_heap heap;
+ u32 auxtrace_type;
+ struct perf_session *session;
+ struct machine *machine;
+ u32 pmu_type;
+ u64 sample_id;
+};
+
+struct boottb_freq {
+ u64 boot_tb;
+ u64 tb_freq;
+ u64 timebase;
+ u64 padded[3];
+};
+
+struct powerpc_vpadtl_queue {
+ struct powerpc_vpadtl *vpa;
+ unsigned int queue_nr;
+ struct auxtrace_buffer *buffer;
+ struct thread *thread;
+ bool on_heap;
+ struct powerpc_vpadtl_entry *dtl;
+ u64 timestamp;
+ unsigned long pkt_len;
+ unsigned long buf_len;
+ u64 boot_tb;
+ u64 tb_freq;
+ unsigned int tb_buffer;
+ unsigned int size;
+ bool done;
+ pid_t pid;
+ pid_t tid;
+ int cpu;
+};
+
+const char *dispatch_reasons[11] = {
+ "external_interrupt",
+ "firmware_internal_event",
+ "H_PROD",
+ "decrementer_interrupt",
+ "system_reset",
+ "firmware_internal_event",
+ "conferred_cycles",
+ "time_slice",
+ "virtual_memory_page_fault",
+ "expropriated_adjunct",
+ "priv_doorbell"};
+
+const char *preempt_reasons[10] = {
+ "unused",
+ "firmware_internal_event",
+ "H_CEDE",
+ "H_CONFER",
+ "time_slice",
+ "migration_hibernation_page_fault",
+ "virtual_memory_page_fault",
+ "H_CONFER_ADJUNCT",
+ "hcall_adjunct",
+ "HDEC_adjunct"};
+
+#define dtl_entry_size sizeof(struct powerpc_vpadtl_entry)
+
+/*
+ * Function to dump the dispatch trace data when perf report
+ * is invoked with -D
+ */
+static void powerpc_vpadtl_dump(struct powerpc_vpadtl *vpa __maybe_unused,
+ unsigned char *buf, size_t len)
+{
+ struct powerpc_vpadtl_entry *dtl;
+ int pkt_len, pos = 0;
+ const char *color = PERF_COLOR_BLUE;
+
+ color_fprintf(stdout, color,
+ ". ... VPA DTL PMU data: size %zu bytes, entries is %zu\n",
+ len, len/dtl_entry_size);
+
+ if (len % dtl_entry_size)
+ len = len - (len % dtl_entry_size);
+
+ while (len) {
+ pkt_len = dtl_entry_size;
+ printf(".");
+ color_fprintf(stdout, color, " %08x: ", pos);
+ dtl = (struct powerpc_vpadtl_entry *)buf;
+ if (dtl->timebase != 0) {
+ printf("dispatch_reason:%s, preempt_reason:%s, "
+ "enqueue_to_dispatch_time:%d, ready_to_enqueue_time:%d, "
+ "waiting_to_ready_time:%d\n",
+ dispatch_reasons[dtl->dispatch_reason],
+ preempt_reasons[dtl->preempt_reason],
+ be32_to_cpu(dtl->enqueue_to_dispatch_time),
+ be32_to_cpu(dtl->ready_to_enqueue_time),
+ be32_to_cpu(dtl->waiting_to_ready_time));
+ } else {
+ struct boottb_freq *boot_tb = (struct boottb_freq *)buf;
+
+ printf("boot_tb: %" PRIu64 ", tb_freq: %" PRIu64 "\n",
+ boot_tb->boot_tb, boot_tb->tb_freq);
+ }
+
+ pos += pkt_len;
+ buf += pkt_len;
+ len -= pkt_len;
+ }
+}
+
+static unsigned long long powerpc_vpadtl_timestamp(struct powerpc_vpadtl_queue *vpaq)
+{
+ struct powerpc_vpadtl_entry *record = vpaq->dtl;
+ unsigned long long timestamp = 0;
+ unsigned long long boot_tb;
+ unsigned long long diff;
+ double result, div;
+ double boot_freq;
+ /*
+ * Formula used to get timestamp that can be co-related with
+ * other perf events:
+ * ((timbase from DTL entry - boot time) / frequency) * 1000000000
+ */
+ if (record->timebase) {
+ boot_tb = vpaq->boot_tb;
+ boot_freq = vpaq->tb_freq;
+ diff = be64_to_cpu(record->timebase) - boot_tb;
+ div = diff / boot_freq;
+ result = div;
+ result = result * 1000000000;
+ timestamp = result;
+ }
+
+ return timestamp;
+}
+
+static struct powerpc_vpadtl *session_to_vpa(struct perf_session *session)
+{
+ return container_of(session->auxtrace, struct powerpc_vpadtl, auxtrace);
+}
+
+static void powerpc_vpadtl_dump_event(struct powerpc_vpadtl *vpa, unsigned char *buf,
+ size_t len)
+{
+ printf(".\n");
+ powerpc_vpadtl_dump(vpa, buf, len);
+}
+
+/*
+ * Generate perf sample for each entry in the dispatch trace log.
+ * - sample ip is picked from srr0 field of powerpc_vpadtl_entry
+ * - sample cpu is logical cpu.
+ * - cpumode is set to PERF_RECORD_MISC_KERNEL
+ * - Additionally save the details in raw_data of sample. This
+ * is to print the relevant fields in perf_sample__fprintf_synth()
+ * when called from builtin-script
+ */
+static int powerpc_vpadtl_sample(struct powerpc_vpadtl_entry *record,
+ struct powerpc_vpadtl *vpa, u64 save, int cpu)
+{
+ struct perf_sample sample;
+ union perf_event event;
+
+ sample.ip = be64_to_cpu(record->srr0);
+ sample.period = 1;
+ sample.cpu = cpu;
+ sample.id = vpa->sample_id;
+ sample.callchain = NULL;
+ sample.branch_stack = NULL;
+ memset(&event, 0, sizeof(event));
+ sample.cpumode = PERF_RECORD_MISC_KERNEL;
+ sample.time = save;
+ sample.raw_data = record;
+ sample.raw_size = sizeof(record);
+ event.sample.header.type = PERF_RECORD_SAMPLE;
+ event.sample.header.misc = sample.cpumode;
+ event.sample.header.size = sizeof(struct perf_event_header);
+
+ if (perf_session__deliver_synth_event(vpa->session, &event, &sample)) {
+ pr_debug("Failed to create sample for dtl entry\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int powerpc_vpadtl_get_buffer(struct powerpc_vpadtl_queue *vpaq)
+{
+ struct auxtrace_buffer *buffer = vpaq->buffer;
+ struct auxtrace_queues *queues = &vpaq->vpa->queues;
+ struct auxtrace_queue *queue;
+
+ queue = &queues->queue_array[vpaq->queue_nr];
+ buffer = auxtrace_buffer__next(queue, buffer);
+
+ if (!buffer)
+ return 0;
+
+ vpaq->buffer = buffer;
+ vpaq->size = buffer->size;
+
+ /* If the aux_buffer doesn't have data associated, try to load it */
+ if (!buffer->data) {
+ /* get the file desc associated with the perf data file */
+ int fd = perf_data__fd(vpaq->vpa->session->data);
+
+ buffer->data = auxtrace_buffer__get_data(buffer, fd);
+ if (!buffer->data)
+ return -ENOMEM;
+ }
+
+ vpaq->buf_len = buffer->size;
+
+ if (buffer->size % dtl_entry_size)
+ vpaq->buf_len = buffer->size - (buffer->size % dtl_entry_size);
+
+ if (vpaq->tb_buffer != buffer->buffer_nr) {
+ vpaq->pkt_len = 0;
+ vpaq->tb_buffer = 0;
+ }
+
+ return 1;
+}
+
+/*
+ * The first entry in the queue for VPA DTL PMU has the boot timebase,
+ * frequency details which are needed to get timestamp which is required to
+ * correlate with other events. Save the boot_tb and tb_freq as part of
+ * powerpc_vpadtl_queue. The very next entry is the actual trace data to
+ * be returned.
+ */
+static int powerpc_vpadtl_decode(struct powerpc_vpadtl_queue *vpaq)
+{
+ int ret;
+ char *buf;
+ struct boottb_freq *boottb;
+
+ ret = powerpc_vpadtl_get_buffer(vpaq);
+ if (ret <= 0)
+ return ret;
+
+ boottb = (struct boottb_freq *)vpaq->buffer->data;
+ if (boottb->timebase == 0) {
+ vpaq->boot_tb = boottb->boot_tb;
+ vpaq->tb_freq = boottb->tb_freq;
+ vpaq->pkt_len += dtl_entry_size;
+ }
+
+ buf = vpaq->buffer->data;
+ buf += vpaq->pkt_len;
+ vpaq->dtl = (struct powerpc_vpadtl_entry *)buf;
+
+ vpaq->tb_buffer = vpaq->buffer->buffer_nr;
+ vpaq->buffer = NULL;
+ vpaq->buf_len = 0;
+
+ return 1;
+}
+
+static int powerpc_vpadtl_decode_all(struct powerpc_vpadtl_queue *vpaq)
+{
+ int ret;
+ unsigned char *buf;
+
+ if (!vpaq->buf_len || vpaq->pkt_len == vpaq->size) {
+ ret = powerpc_vpadtl_get_buffer(vpaq);
+ if (ret <= 0)
+ return ret;
+ }
+
+ if (vpaq->buffer) {
+ buf = vpaq->buffer->data;
+ buf += vpaq->pkt_len;
+ vpaq->dtl = (struct powerpc_vpadtl_entry *)buf;
+ if ((long long)be64_to_cpu(vpaq->dtl->timebase) <= 0) {
+ if (vpaq->pkt_len != dtl_entry_size && vpaq->buf_len) {
+ vpaq->pkt_len += dtl_entry_size;
+ vpaq->buf_len -= dtl_entry_size;
+ }
+ return -1;
+ }
+ vpaq->pkt_len += dtl_entry_size;
+ vpaq->buf_len -= dtl_entry_size;
+ } else {
+ return 0;
+ }
+
+ return 1;
+}
+
+static int powerpc_vpadtl_run_decoder(struct powerpc_vpadtl_queue *vpaq, u64 *timestamp)
+{
+ struct powerpc_vpadtl *vpa = vpaq->vpa;
+ struct powerpc_vpadtl_entry *record;
+ int ret;
+ unsigned long long vpaq_timestamp;
+
+ while (1) {
+ ret = powerpc_vpadtl_decode_all(vpaq);
+ if (!ret) {
+ pr_debug("All data in the queue has been processed.\n");
+ return 1;
+ }
+
+ /*
+ * Error is detected when decoding VPA PMU trace. Continue to
+ * the next trace data and find out more dtl entries.
+ */
+ if (ret < 0)
+ continue;
+
+ record = vpaq->dtl;
+
+ vpaq_timestamp = powerpc_vpadtl_timestamp(vpaq);
+
+ /* Update timestamp for the last record */
+ if (vpaq_timestamp > vpaq->timestamp)
+ vpaq->timestamp = vpaq_timestamp;
+
+ /*
+ * If the timestamp of the queue is later than timestamp of the
+ * coming perf event, bail out so can allow the perf event to
+ * be processed ahead.
+ */
+ if (vpaq->timestamp >= *timestamp) {
+ *timestamp = vpaq->timestamp;
+ vpaq->pkt_len -= dtl_entry_size;
+ vpaq->buf_len += dtl_entry_size;
+ return 0;
+ }
+
+ ret = powerpc_vpadtl_sample(record, vpa, vpaq_timestamp, vpaq->cpu);
+ if (ret)
+ continue;
+ }
+ return 0;
+}
+
+/*
+ * For each of the PERF_RECORD_XX record, compare the timestamp
+ * of perf record with timestamp of top element in the auxtrace heap.
+ * Process the auxtrace queue if the timestamp of element from heap is
+ * lower than timestamp from entry in perf record.
+ *
+ * Update the timestamp of the auxtrace heap with the timestamp
+ * of last processed entry from the auxtrace buffer.
+ */
+static int powerpc_vpadtl_process_queues(struct powerpc_vpadtl *vpa, u64 timestamp)
+{
+ unsigned int queue_nr;
+ u64 ts;
+ int ret;
+
+ while (1) {
+ struct auxtrace_queue *queue;
+ struct powerpc_vpadtl_queue *vpaq;
+
+ if (!vpa->heap.heap_cnt)
+ return 0;
+
+ if (vpa->heap.heap_array[0].ordinal >= timestamp)
+ return 0;
+
+ queue_nr = vpa->heap.heap_array[0].queue_nr;
+ queue = &vpa->queues.queue_array[queue_nr];
+ vpaq = queue->priv;
+
+ auxtrace_heap__pop(&vpa->heap);
+
+ if (vpa->heap.heap_cnt) {
+ ts = vpa->heap.heap_array[0].ordinal + 1;
+ if (ts > timestamp)
+ ts = timestamp;
+ } else {
+ ts = timestamp;
+ }
+
+ ret = powerpc_vpadtl_run_decoder(vpaq, &ts);
+ if (ret < 0) {
+ auxtrace_heap__add(&vpa->heap, queue_nr, ts);
+ return ret;
+ }
+
+ if (!ret) {
+ ret = auxtrace_heap__add(&vpa->heap, queue_nr, ts);
+ if (ret < 0)
+ return ret;
+ } else {
+ vpaq->on_heap = false;
+ }
+ }
+ return 0;
+}
+
+static struct powerpc_vpadtl_queue *powerpc_vpadtl__alloc_queue(struct powerpc_vpadtl *vpa,
+ unsigned int queue_nr)
+{
+ struct powerpc_vpadtl_queue *vpaq;
+
+ vpaq = zalloc(sizeof(*vpaq));
+ if (!vpaq)
+ return NULL;
+
+ vpaq->vpa = vpa;
+ vpaq->queue_nr = queue_nr;
+
+ return vpaq;
+}
+
+/*
+ * When the Dispatch Trace Log data is collected along with other events
+ * like sched tracepoint events, it needs to be correlated and present
+ * interleaved along with these events. Perf events can be collected
+ * parallely across the CPUs.
+ *
+ * An auxtrace_queue is created for each CPU. Data within each queue is in
+ * increasing order of timestamp. Allocate and setup auxtrace queues here.
+ * All auxtrace queues is maintained in auxtrace heap in the increasing order
+ * of timestamp. So always the lowest timestamp (entries to be processed first)
+ * is on top of the heap.
+ *
+ * To add to auxtrace heap, fetch the timestamp from first DTL entry
+ * for each of the queue.
+ */
+static int powerpc_vpadtl__setup_queue(struct powerpc_vpadtl *vpa,
+ struct auxtrace_queue *queue,
+ unsigned int queue_nr)
+{
+ struct powerpc_vpadtl_queue *vpaq = queue->priv;
+
+ if (list_empty(&queue->head) || vpaq)
+ return 0;
+
+ vpaq = powerpc_vpadtl__alloc_queue(vpa, queue_nr);
+ if (!vpaq)
+ return -ENOMEM;
+
+ queue->priv = vpaq;
+
+ if (queue->cpu != -1)
+ vpaq->cpu = queue->cpu;
+
+ if (!vpaq->on_heap) {
+ int ret;
+retry:
+ ret = powerpc_vpadtl_decode(vpaq);
+ if (!ret)
+ return 0;
+
+ if (ret < 0)
+ goto retry;
+
+ vpaq->timestamp = powerpc_vpadtl_timestamp(vpaq);
+
+ ret = auxtrace_heap__add(&vpa->heap, queue_nr, vpaq->timestamp);
+ if (ret)
+ return ret;
+ vpaq->on_heap = true;
+ }
+
+ return 0;
+}
+
+static int powerpc_vpadtl__setup_queues(struct powerpc_vpadtl *vpa)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < vpa->queues.nr_queues; i++) {
+ ret = powerpc_vpadtl__setup_queue(vpa, &vpa->queues.queue_array[i], i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int powerpc_vpadtl__update_queues(struct powerpc_vpadtl *vpa)
+{
+ if (vpa->queues.new_data) {
+ vpa->queues.new_data = false;
+ return powerpc_vpadtl__setup_queues(vpa);
+ }
+
+ return 0;
+}
+
+static int powerpc_vpadtl_process_event(struct perf_session *session,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ const struct perf_tool *tool)
+{
+ struct powerpc_vpadtl *vpa = session_to_vpa(session);
+ int err = 0;
+
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events) {
+ pr_err("VPA requires ordered events\n");
+ return -EINVAL;
+ }
+
+ if (sample->time) {
+ err = powerpc_vpadtl__update_queues(vpa);
+ if (err)
+ return err;
+
+ err = powerpc_vpadtl_process_queues(vpa, sample->time);
+ }
+
+ return err;
+}
+
+/*
+ * Process PERF_RECORD_AUXTRACE records
+ */
+static int powerpc_vpadtl_process_auxtrace_event(struct perf_session *session,
+ union perf_event *event,
+ const struct perf_tool *tool __maybe_unused)
+{
+ struct powerpc_vpadtl *vpa = session_to_vpa(session);
+ struct auxtrace_buffer *buffer;
+ int fd = perf_data__fd(session->data);
+ off_t data_offset;
+ int err;
+
+ if (!dump_trace)
+ return 0;
+
+ if (perf_data__is_pipe(session->data)) {
+ data_offset = 0;
+ } else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
+
+ err = auxtrace_queues__add_event(&vpa->queues, session, event,
+ data_offset, &buffer);
+
+ if (err)
+ return err;
+
+ /* Dump here now we have copied a piped trace out of the pipe */
+ if (auxtrace_buffer__get_data(buffer, fd)) {
+ powerpc_vpadtl_dump_event(vpa, buffer->data, buffer->size);
+ auxtrace_buffer__put_data(buffer);
+ }
+
+ return 0;
+}
+
+static int powerpc_vpadtl_flush(struct perf_session *session __maybe_unused,
+ const struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static void powerpc_vpadtl_free_events(struct perf_session *session)
+{
+ struct powerpc_vpadtl *vpa = session_to_vpa(session);
+ struct auxtrace_queues *queues = &vpa->queues;
+
+ for (unsigned int i = 0; i < queues->nr_queues; i++)
+ zfree(&queues->queue_array[i].priv);
+
+ auxtrace_queues__free(queues);
+}
+
+static void powerpc_vpadtl_free(struct perf_session *session)
+{
+ struct powerpc_vpadtl *vpa = session_to_vpa(session);
+
+ auxtrace_heap__free(&vpa->heap);
+ powerpc_vpadtl_free_events(session);
+ session->auxtrace = NULL;
+ free(vpa);
+}
+
+static const char * const powerpc_vpadtl_info_fmts[] = {
+ [POWERPC_VPADTL_TYPE] = " PMU Type %"PRId64"\n",
+};
+
+static void powerpc_vpadtl_print_info(__u64 *arr)
+{
+ if (!dump_trace)
+ return;
+
+ fprintf(stdout, powerpc_vpadtl_info_fmts[POWERPC_VPADTL_TYPE], arr[POWERPC_VPADTL_TYPE]);
+}
+
+static void set_event_name(struct evlist *evlist, u64 id,
+ const char *name)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.id && evsel->core.id[0] == id) {
+ if (evsel->name)
+ zfree(&evsel->name);
+ evsel->name = strdup(name);
+ break;
+ }
+ }
+}
+
+static int
+powerpc_vpadtl_synth_events(struct powerpc_vpadtl *vpa, struct perf_session *session)
+{
+ struct evlist *evlist = session->evlist;
+ struct evsel *evsel;
+ struct perf_event_attr attr;
+ bool found = false;
+ u64 id;
+ int err;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (strstarts(evsel->name, "vpa_dtl")) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("No selected events with VPA trace data\n");
+ return 0;
+ }
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+ attr.size = sizeof(struct perf_event_attr);
+ attr.sample_type = evsel->core.attr.sample_type;
+ attr.sample_id_all = evsel->core.attr.sample_id_all;
+ attr.type = PERF_TYPE_SYNTH;
+ attr.config = PERF_SYNTH_POWERPC_VPA_DTL;
+
+ /* create new id val to be a fixed offset from evsel id */
+ id = auxtrace_synth_id_range_start(evsel);
+
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
+ if (err)
+ return err;
+
+ vpa->sample_id = id;
+ set_event_name(evlist, id, "vpa-dtl");
+
+ return 0;
+}
+
+/*
+ * Process the PERF_RECORD_AUXTRACE_INFO records and setup
+ * the infrastructure to process auxtrace events. PERF_RECORD_AUXTRACE_INFO
+ * is processed first since it is of type perf_user_event_type.
+ * Initialise the aux buffer queues using auxtrace_queues__init().
+ * auxtrace_queue is created for each CPU.
+ */
+int powerpc_vpadtl_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session)
+{
+ struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
+ size_t min_sz = sizeof(u64) * POWERPC_VPADTL_TYPE;
+ struct powerpc_vpadtl *vpa;
+ int err;
+
+ if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
+ min_sz)
+ return -EINVAL;
+
+ vpa = zalloc(sizeof(struct powerpc_vpadtl));
+ if (!vpa)
+ return -ENOMEM;
+
+ err = auxtrace_queues__init(&vpa->queues);
+ if (err)
+ goto err_free;
+
+ vpa->session = session;
+ vpa->machine = &session->machines.host; /* No kvm support */
+ vpa->auxtrace_type = auxtrace_info->type;
+ vpa->pmu_type = auxtrace_info->priv[POWERPC_VPADTL_TYPE];
+
+ vpa->auxtrace.process_event = powerpc_vpadtl_process_event;
+ vpa->auxtrace.process_auxtrace_event = powerpc_vpadtl_process_auxtrace_event;
+ vpa->auxtrace.flush_events = powerpc_vpadtl_flush;
+ vpa->auxtrace.free_events = powerpc_vpadtl_free_events;
+ vpa->auxtrace.free = powerpc_vpadtl_free;
+ session->auxtrace = &vpa->auxtrace;
+
+ powerpc_vpadtl_print_info(&auxtrace_info->priv[0]);
+
+ if (dump_trace)
+ return 0;
+
+ err = powerpc_vpadtl_synth_events(vpa, session);
+ if (err)
+ goto err_free_queues;
+
+ err = auxtrace_queues__process_index(&vpa->queues, session);
+ if (err)
+ goto err_free_queues;
+
+ return 0;
+
+err_free_queues:
+ auxtrace_queues__free(&vpa->queues);
+ session->auxtrace = NULL;
+
+err_free:
+ free(vpa);
+ return err;
+}
diff --git a/tools/perf/util/powerpc-vpadtl.h b/tools/perf/util/powerpc-vpadtl.h
new file mode 100644
index 000000000000..ca809660b9bb
--- /dev/null
+++ b/tools/perf/util/powerpc-vpadtl.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VPA DTL PMU Support
+ */
+
+#ifndef INCLUDE__PERF_POWERPC_VPADTL_H__
+#define INCLUDE__PERF_POWERPC_VPADTL_H__
+
+enum {
+ POWERPC_VPADTL_TYPE,
+ VPADTL_AUXTRACE_PRIV_MAX,
+};
+
+#define VPADTL_AUXTRACE_PRIV_SIZE (VPADTL_AUXTRACE_PRIV_MAX * sizeof(u64))
+
+union perf_event;
+struct perf_session;
+struct perf_pmu;
+
+int powerpc_vpadtl_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session);
+
+#endif
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index a786cbfb0ff5..8f3ed83853a9 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -44,97 +44,6 @@ static const char * const event_type_descriptors[] = {
"Hardware breakpoint",
};
-/*
- * Print the events from <debugfs_mount_point>/tracing/events
- */
-void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unused, void *print_state __maybe_unused)
-{
- char *events_path = get_tracing_file("events");
- int events_fd = open(events_path, O_PATH);
- struct dirent **sys_namelist = NULL;
- int sys_items;
-
- if (events_fd < 0) {
- pr_err("Error: failed to open tracing events directory\n");
- pr_err("%s: %s\n", events_path, strerror(errno));
- return;
- }
- put_tracing_file(events_path);
-
- sys_items = tracing_events__scandir_alphasort(&sys_namelist);
-
- for (int i = 0; i < sys_items; i++) {
- struct dirent *sys_dirent = sys_namelist[i];
- struct dirent **evt_namelist = NULL;
- int dir_fd;
- int evt_items;
-
- if (sys_dirent->d_type != DT_DIR ||
- !strcmp(sys_dirent->d_name, ".") ||
- !strcmp(sys_dirent->d_name, ".."))
- goto next_sys;
-
- dir_fd = openat(events_fd, sys_dirent->d_name, O_PATH);
- if (dir_fd < 0)
- goto next_sys;
-
- evt_items = scandirat(events_fd, sys_dirent->d_name, &evt_namelist, NULL, alphasort);
- for (int j = 0; j < evt_items; j++) {
- /*
- * Buffer sized at twice the max filename length + 1
- * separator + 1 \0 terminator.
- */
- char buf[NAME_MAX * 2 + 2];
- /* 16 possible hex digits and 22 other characters and \0. */
- char encoding[16 + 22];
- struct dirent *evt_dirent = evt_namelist[j];
- struct io id;
- __u64 config;
-
- if (evt_dirent->d_type != DT_DIR ||
- !strcmp(evt_dirent->d_name, ".") ||
- !strcmp(evt_dirent->d_name, ".."))
- goto next_evt;
-
- snprintf(buf, sizeof(buf), "%s/id", evt_dirent->d_name);
- io__init(&id, openat(dir_fd, buf, O_RDONLY), buf, sizeof(buf));
-
- if (id.fd < 0)
- goto next_evt;
-
- if (io__get_dec(&id, &config) < 0) {
- close(id.fd);
- goto next_evt;
- }
- close(id.fd);
-
- snprintf(buf, sizeof(buf), "%s:%s",
- sys_dirent->d_name, evt_dirent->d_name);
- snprintf(encoding, sizeof(encoding), "tracepoint/config=0x%llx/", config);
- print_cb->print_event(print_state,
- /*topic=*/NULL,
- /*pmu_name=*/NULL, /* really "tracepoint" */
- /*event_name=*/buf,
- /*event_alias=*/NULL,
- /*scale_unit=*/NULL,
- /*deprecated=*/false,
- "Tracepoint event",
- /*desc=*/NULL,
- /*long_desc=*/NULL,
- encoding);
-next_evt:
- free(evt_namelist[j]);
- }
- close(dir_fd);
- free(evt_namelist);
-next_sys:
- free(sys_namelist[i]);
- }
-
- free(sys_namelist);
- close(events_fd);
-}
-
void print_sdt_events(const struct print_callbacks *print_cb, void *print_state)
{
struct strlist *bidlist, *sdtlist;
@@ -212,6 +121,7 @@ void print_sdt_events(const struct print_callbacks *print_cb, void *print_state)
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
+ PERF_TYPE_TRACEPOINT,
evt_name ?: sdt_name->s,
/*event_alias=*/NULL,
/*deprecated=*/false,
@@ -268,6 +178,7 @@ bool is_event_supported(u8 type, u64 config)
ret = evsel__open(evsel, NULL, tmap) >= 0;
}
+ evsel__close(evsel);
evsel__delete(evsel);
}
@@ -275,109 +186,137 @@ bool is_event_supported(u8 type, u64 config)
return ret;
}
-int print_hwcache_events(const struct print_callbacks *print_cb, void *print_state)
+/** struct mep - RB-tree node for building printing information. */
+struct mep {
+ /** nd - RB-tree element. */
+ struct rb_node nd;
+ /** @metric_group: Owned metric group name, separated others with ';'. */
+ char *metric_group;
+ const char *metric_name;
+ const char *metric_desc;
+ const char *metric_long_desc;
+ const char *metric_expr;
+ const char *metric_threshold;
+ const char *metric_unit;
+ const char *pmu_name;
+};
+
+static int mep_cmp(struct rb_node *rb_node, const void *entry)
{
- struct perf_pmu *pmu = NULL;
- const char *event_type_descriptor = event_type_descriptors[PERF_TYPE_HW_CACHE];
+ struct mep *a = container_of(rb_node, struct mep, nd);
+ struct mep *b = (struct mep *)entry;
+ int ret;
- /*
- * Only print core PMUs, skipping uncore for performance and
- * PERF_TYPE_SOFTWARE that can succeed in opening legacy cache evenst.
- */
- while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
- if (pmu->is_uncore || pmu->type == PERF_TYPE_SOFTWARE)
- continue;
+ ret = strcmp(a->metric_group, b->metric_group);
+ if (ret)
+ return ret;
- for (int type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
- for (int op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
- /* skip invalid cache type */
- if (!evsel__is_cache_op_valid(type, op))
- continue;
-
- for (int res = 0; res < PERF_COUNT_HW_CACHE_RESULT_MAX; res++) {
- char name[64];
- char alias_name[128];
- __u64 config;
- int ret;
-
- __evsel__hw_cache_type_op_res_name(type, op, res,
- name, sizeof(name));
-
- ret = parse_events__decode_legacy_cache(name, pmu->type,
- &config);
- if (ret || !is_event_supported(PERF_TYPE_HW_CACHE, config))
- continue;
- snprintf(alias_name, sizeof(alias_name), "%s/%s/",
- pmu->name, name);
- print_cb->print_event(print_state,
- "cache",
- pmu->name,
- name,
- alias_name,
- /*scale_unit=*/NULL,
- /*deprecated=*/false,
- event_type_descriptor,
- /*desc=*/NULL,
- /*long_desc=*/NULL,
- /*encoding_desc=*/NULL);
- }
- }
- }
- }
- return 0;
+ return strcmp(a->metric_name, b->metric_name);
}
-void print_symbol_events(const struct print_callbacks *print_cb, void *print_state,
- unsigned int type, const struct event_symbol *syms,
- unsigned int max)
+static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
{
- struct strlist *evt_name_list = strlist__new(NULL, NULL);
- struct str_node *nd;
+ struct mep *me = malloc(sizeof(struct mep));
- if (!evt_name_list) {
- pr_debug("Failed to allocate new strlist for symbol events\n");
- return;
- }
- for (unsigned int i = 0; i < max; i++) {
- /*
- * New attr.config still not supported here, the latest
- * example was PERF_COUNT_SW_CGROUP_SWITCHES
- */
- if (syms[i].symbol == NULL)
- continue;
+ if (!me)
+ return NULL;
- if (!is_event_supported(type, i))
- continue;
+ memcpy(me, entry, sizeof(struct mep));
+ return &me->nd;
+}
- if (strlen(syms[i].alias)) {
- char name[MAX_NAME_LEN];
+static void mep_delete(struct rblist *rl __maybe_unused,
+ struct rb_node *nd)
+{
+ struct mep *me = container_of(nd, struct mep, nd);
- snprintf(name, MAX_NAME_LEN, "%s OR %s", syms[i].symbol, syms[i].alias);
- strlist__add(evt_name_list, name);
- } else
- strlist__add(evt_name_list, syms[i].symbol);
- }
+ zfree(&me->metric_group);
+ free(me);
+}
- strlist__for_each_entry(nd, evt_name_list) {
- char *alias = strstr(nd->s, " OR ");
+static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
+ const char *metric_name)
+{
+ struct rb_node *nd;
+ struct mep me = {
+ .metric_group = strdup(metric_group),
+ .metric_name = metric_name,
+ };
+ nd = rblist__find(groups, &me);
+ if (nd) {
+ free(me.metric_group);
+ return container_of(nd, struct mep, nd);
+ }
+ rblist__add_node(groups, &me);
+ nd = rblist__find(groups, &me);
+ if (nd)
+ return container_of(nd, struct mep, nd);
+ return NULL;
+}
- if (alias) {
- *alias = '\0';
- alias += 4;
+static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
+ const struct pmu_metrics_table *table __maybe_unused,
+ void *vdata)
+{
+ struct rblist *groups = vdata;
+ const char *g;
+ char *omg, *mg;
+
+ mg = strdup(pm->metric_group ?: pm->metric_name);
+ if (!mg)
+ return -ENOMEM;
+ omg = mg;
+ while ((g = strsep(&mg, ";")) != NULL) {
+ struct mep *me;
+
+ g = skip_spaces(g);
+ if (strlen(g))
+ me = mep_lookup(groups, g, pm->metric_name);
+ else
+ me = mep_lookup(groups, pm->metric_name, pm->metric_name);
+
+ if (me) {
+ me->metric_desc = pm->desc;
+ me->metric_long_desc = pm->long_desc;
+ me->metric_expr = pm->metric_expr;
+ me->metric_threshold = pm->metric_threshold;
+ me->metric_unit = pm->unit;
+ me->pmu_name = pm->pmu;
}
- print_cb->print_event(print_state,
- /*topic=*/NULL,
- /*pmu_name=*/NULL,
- nd->s,
- alias,
- /*scale_unit=*/NULL,
- /*deprecated=*/false,
- event_type_descriptors[type],
- /*desc=*/NULL,
- /*long_desc=*/NULL,
- /*encoding_desc=*/NULL);
}
- strlist__delete(evt_name_list);
+ free(omg);
+
+ return 0;
+}
+
+void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
+{
+ struct rblist groups;
+ struct rb_node *node, *next;
+ const struct pmu_metrics_table *table = pmu_metrics_table__find();
+
+ rblist__init(&groups);
+ groups.node_new = mep_new;
+ groups.node_cmp = mep_cmp;
+ groups.node_delete = mep_delete;
+
+ metricgroup__for_each_metric(table, metricgroup__add_to_mep_groups_callback, &groups);
+
+ for (node = rb_first_cached(&groups.entries); node; node = next) {
+ struct mep *me = container_of(node, struct mep, nd);
+
+ print_cb->print_metric(print_state,
+ me->metric_group,
+ me->metric_name,
+ me->metric_desc,
+ me->metric_long_desc,
+ me->metric_expr,
+ me->metric_threshold,
+ me->metric_unit,
+ me->pmu_name);
+ next = rb_next(node);
+ rblist__remove_node(&groups, node);
+ }
}
/*
@@ -385,18 +324,12 @@ void print_symbol_events(const struct print_callbacks *print_cb, void *print_sta
*/
void print_events(const struct print_callbacks *print_cb, void *print_state)
{
- print_symbol_events(print_cb, print_state, PERF_TYPE_HARDWARE,
- event_symbols_hw, PERF_COUNT_HW_MAX);
- print_symbol_events(print_cb, print_state, PERF_TYPE_SOFTWARE,
- event_symbols_sw, PERF_COUNT_SW_MAX);
-
- print_hwcache_events(print_cb, print_state);
-
perf_pmus__print_pmu_events(print_cb, print_state);
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
+ PERF_TYPE_RAW,
"rNNN",
/*event_alias=*/NULL,
/*scale_unit=*/NULL,
@@ -411,6 +344,7 @@ void print_events(const struct print_callbacks *print_cb, void *print_state)
print_cb->print_event(print_state,
/*topic=*/NULL,
/*pmu_name=*/NULL,
+ PERF_TYPE_BREAKPOINT,
"mem:<addr>[/len][:access]",
/*scale_unit=*/NULL,
/*event_alias=*/NULL,
@@ -420,8 +354,6 @@ void print_events(const struct print_callbacks *print_cb, void *print_state)
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
- print_tracepoint_events(print_cb, print_state);
-
print_sdt_events(print_cb, print_state);
metricgroup__print(print_cb, print_state);
diff --git a/tools/perf/util/print-events.h b/tools/perf/util/print-events.h
index 445efa1636c1..eabba5d4a1fd 100644
--- a/tools/perf/util/print-events.h
+++ b/tools/perf/util/print-events.h
@@ -12,7 +12,7 @@ struct print_callbacks {
void (*print_start)(void *print_state);
void (*print_end)(void *print_state);
void (*print_event)(void *print_state, const char *topic,
- const char *pmu_name,
+ const char *pmu_name, u32 pmu_type,
const char *event_name, const char *event_alias,
const char *scale_unit,
bool deprecated, const char *event_type_desc,
@@ -25,18 +25,15 @@ struct print_callbacks {
const char *long_desc,
const char *expr,
const char *threshold,
- const char *unit);
+ const char *unit,
+ const char *pmu_name);
bool (*skip_duplicate_pmus)(void *print_state);
};
/** Print all events, the default when no options are specified. */
void print_events(const struct print_callbacks *print_cb, void *print_state);
-int print_hwcache_events(const struct print_callbacks *print_cb, void *print_state);
void print_sdt_events(const struct print_callbacks *print_cb, void *print_state);
-void print_symbol_events(const struct print_callbacks *print_cb, void *print_state,
- unsigned int type, const struct event_symbol *syms,
- unsigned int max);
-void print_tracepoint_events(const struct print_callbacks *print_cb, void *print_state);
+void metricgroup__print(const struct print_callbacks *print_cb, void *print_state);
bool is_event_supported(u8 type, u64 config);
#endif /* __PERF_PRINT_EVENTS_H */
diff --git a/tools/perf/util/print_insn.c b/tools/perf/util/print_insn.c
index a33a7726422d..02e6fbb8ca04 100644
--- a/tools/perf/util/print_insn.c
+++ b/tools/perf/util/print_insn.c
@@ -7,6 +7,7 @@
#include <inttypes.h>
#include <string.h>
#include <stdbool.h>
+#include "capstone.h"
#include "debug.h"
#include "sample.h"
#include "symbol.h"
@@ -29,84 +30,6 @@ size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp)
return printed;
}
-#ifdef HAVE_LIBCAPSTONE_SUPPORT
-#include <capstone/capstone.h>
-
-int capstone_init(struct machine *machine, csh *cs_handle, bool is64, bool disassembler_style);
-
-int capstone_init(struct machine *machine, csh *cs_handle, bool is64, bool disassembler_style)
-{
- cs_arch arch;
- cs_mode mode;
-
- if (machine__is(machine, "x86_64") && is64) {
- arch = CS_ARCH_X86;
- mode = CS_MODE_64;
- } else if (machine__normalized_is(machine, "x86")) {
- arch = CS_ARCH_X86;
- mode = CS_MODE_32;
- } else if (machine__normalized_is(machine, "arm64")) {
- arch = CS_ARCH_ARM64;
- mode = CS_MODE_ARM;
- } else if (machine__normalized_is(machine, "arm")) {
- arch = CS_ARCH_ARM;
- mode = CS_MODE_ARM + CS_MODE_V8;
- } else if (machine__normalized_is(machine, "s390")) {
- arch = CS_ARCH_SYSZ;
- mode = CS_MODE_BIG_ENDIAN;
- } else {
- return -1;
- }
-
- if (cs_open(arch, mode, cs_handle) != CS_ERR_OK) {
- pr_warning_once("cs_open failed\n");
- return -1;
- }
-
- if (machine__normalized_is(machine, "x86")) {
- /*
- * In case of using capstone_init while symbol__disassemble
- * setting CS_OPT_SYNTAX_ATT depends if disassembler_style opts
- * is set via annotation args
- */
- if (disassembler_style)
- cs_option(*cs_handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT);
- /*
- * Resolving address operands to symbols is implemented
- * on x86 by investigating instruction details.
- */
- cs_option(*cs_handle, CS_OPT_DETAIL, CS_OPT_ON);
- }
-
- return 0;
-}
-
-static size_t print_insn_x86(struct thread *thread, u8 cpumode, cs_insn *insn,
- int print_opts, FILE *fp)
-{
- struct addr_location al;
- size_t printed = 0;
-
- if (insn->detail && insn->detail->x86.op_count == 1) {
- cs_x86_op *op = &insn->detail->x86.operands[0];
-
- addr_location__init(&al);
- if (op->type == X86_OP_IMM &&
- thread__find_symbol(thread, cpumode, op->imm, &al)) {
- printed += fprintf(fp, "%s ", insn[0].mnemonic);
- printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
- if (print_opts & PRINT_INSN_IMM_HEX)
- printed += fprintf(fp, " [%#" PRIx64 "]", op->imm);
- addr_location__exit(&al);
- return printed;
- }
- addr_location__exit(&al);
- }
-
- printed += fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str);
- return printed;
-}
-
static bool is64bitip(struct machine *machine, struct addr_location *al)
{
const struct dso *dso = al->map ? map__dso(al->map) : NULL;
@@ -123,32 +46,8 @@ ssize_t fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpum
bool is64bit, const uint8_t *code, size_t code_size,
uint64_t ip, int *lenp, int print_opts, FILE *fp)
{
- size_t printed;
- cs_insn *insn;
- csh cs_handle;
- size_t count;
- int ret;
-
- /* TODO: Try to initiate capstone only once but need a proper place. */
- ret = capstone_init(machine, &cs_handle, is64bit, true);
- if (ret < 0)
- return ret;
-
- count = cs_disasm(cs_handle, code, code_size, ip, 1, &insn);
- if (count > 0) {
- if (machine__normalized_is(machine, "x86"))
- printed = print_insn_x86(thread, cpumode, &insn[0], print_opts, fp);
- else
- printed = fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str);
- if (lenp)
- *lenp = insn->size;
- cs_free(insn, count);
- } else {
- printed = -1;
- }
-
- cs_close(&cs_handle);
- return printed;
+ return capstone__fprintf_insn_asm(machine, thread, cpumode, is64bit, code, code_size,
+ ip, lenp, print_opts, fp);
}
size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread,
@@ -166,13 +65,3 @@ size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *threa
return printed;
}
-#else
-size_t sample__fprintf_insn_asm(struct perf_sample *sample __maybe_unused,
- struct thread *thread __maybe_unused,
- struct machine *machine __maybe_unused,
- FILE *fp __maybe_unused,
- struct addr_location *al __maybe_unused)
-{
- return 0;
-}
-#endif
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 307ad6242a4e..710e4620923e 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -75,12 +75,14 @@ int e_snprintf(char *str, size_t size, const char *format, ...)
}
static struct machine *host_machine;
+static struct perf_env host_env;
/* Initialize symbol maps and path of vmlinux/modules */
int init_probe_symbol_maps(bool user_only)
{
int ret;
+ perf_env__init(&host_env);
symbol_conf.allow_aliases = true;
ret = symbol__init(NULL);
if (ret < 0) {
@@ -94,7 +96,7 @@ int init_probe_symbol_maps(bool user_only)
if (symbol_conf.vmlinux_name)
pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
- host_machine = machine__new_host();
+ host_machine = machine__new_host(&host_env);
if (!host_machine) {
pr_debug("machine__new_host() failed.\n");
symbol__exit();
@@ -111,6 +113,7 @@ void exit_probe_symbol_maps(void)
machine__delete(host_machine);
host_machine = NULL;
symbol__exit();
+ perf_env__exit(&host_env);
}
static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap)
@@ -502,7 +505,7 @@ static struct debuginfo *open_from_debuginfod(struct dso *dso, struct nsinfo *ns
if (!c)
return NULL;
- build_id__sprintf(dso__bid(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
fd = debuginfod_find_debuginfo(c, (const unsigned char *)sbuild_id,
0, &path);
if (fd >= 0)
@@ -1063,7 +1066,6 @@ static int sprint_line_description(char *sbuf, size_t size, struct line_range *l
static int __show_line_range(struct line_range *lr, const char *module,
bool user)
{
- struct build_id bid;
int l = 1;
struct int_node *ln;
struct debuginfo *dinfo;
@@ -1088,8 +1090,10 @@ static int __show_line_range(struct line_range *lr, const char *module,
ret = -ENOENT;
}
if (dinfo->build_id) {
+ struct build_id bid;
+
build_id__init(&bid, dinfo->build_id, BUILD_ID_SIZE);
- build_id__sprintf(&bid, sbuild_id);
+ build_id__snprintf(&bid, sbuild_id, sizeof(sbuild_id));
}
debuginfo__delete(dinfo);
if (ret == 0 || ret == -ENOENT) {
@@ -2415,6 +2419,7 @@ void clear_perf_probe_event(struct perf_probe_event *pev)
}
pev->nargs = 0;
zfree(&pev->args);
+ nsinfo__zput(pev->nsi);
}
#define strdup_or_goto(str, label) \
@@ -3763,12 +3768,11 @@ void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs)
/* Loop 3: cleanup and free trace events */
for (i = 0; i < npevs; i++) {
pev = &pevs[i];
- for (j = 0; j < pevs[i].ntevs; j++)
- clear_probe_trace_event(&pevs[i].tevs[j]);
- zfree(&pevs[i].tevs);
- pevs[i].ntevs = 0;
- nsinfo__zput(pev->nsi);
- clear_perf_probe_event(&pevs[i]);
+ for (j = 0; j < pev->ntevs; j++)
+ clear_probe_trace_event(&pev->tevs[j]);
+ zfree(&pev->tevs);
+ pev->ntevs = 0;
+ clear_perf_probe_event(pev);
}
}
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index ec8ac242fedb..5069fb61f48c 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -448,10 +448,10 @@ static int probe_cache__open(struct probe_cache *pcache, const char *target,
if (!target || !strcmp(target, DSO__NAME_KALLSYMS)) {
target = DSO__NAME_KALLSYMS;
is_kallsyms = true;
- ret = sysfs__sprintf_build_id("/", sbuildid);
+ ret = sysfs__snprintf_build_id("/", sbuildid, sizeof(sbuildid));
} else {
nsinfo__mountns_enter(nsi, &nsc);
- ret = filename__sprintf_build_id(target, sbuildid);
+ ret = filename__snprintf_build_id(target, sbuildid, sizeof(sbuildid));
nsinfo__mountns_exit(&nsc);
}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 3cc7c40f5097..5ffd97ee4898 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -848,7 +848,6 @@ static int probe_point_lazy_walker(const char *fname, int lineno,
/* Find probe points from lazy pattern */
static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
{
- struct build_id bid;
char sbuild_id[SBUILD_ID_SIZE] = "";
int ret = 0;
char *fpath;
@@ -858,8 +857,10 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
comp_dir = cu_get_comp_dir(&pf->cu_die);
if (pf->dbg->build_id) {
+ struct build_id bid;
+
build_id__init(&bid, pf->dbg->build_id, BUILD_ID_SIZE);
- build_id__sprintf(&bid, sbuild_id);
+ build_id__snprintf(&bid, sbuild_id, sizeof(sbuild_id));
}
ret = find_source_path(pf->fname, sbuild_id, comp_dir, &fpath);
if (ret < 0) {
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index f3c05da25b4a..cc1019d29a5d 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -10,14 +10,18 @@
#endif
#include <perf/mmap.h>
#include "callchain.h"
+#include "counts.h"
#include "evlist.h"
#include "evsel.h"
#include "event.h"
+#include "expr.h"
#include "print_binary.h"
#include "record.h"
#include "strbuf.h"
#include "thread_map.h"
+#include "tp_pmu.h"
#include "trace-event.h"
+#include "metricgroup.h"
#include "mmap.h"
#include "util/sample.h"
#include <internal/lib.h>
@@ -335,7 +339,6 @@ tracepoint_field(const struct pyrf_event *pe, struct tep_format_field *field)
static PyObject*
get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
{
- const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
struct evsel *evsel = pevent->evsel;
struct tep_event *tp_format = evsel__tp_format(evsel);
struct tep_format_field *field;
@@ -343,7 +346,18 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
if (IS_ERR_OR_NULL(tp_format))
return NULL;
+ PyObject *obj = PyObject_Str(attr_name);
+ if (obj == NULL)
+ return NULL;
+
+ const char *str = PyUnicode_AsUTF8(obj);
+ if (str == NULL) {
+ Py_DECREF(obj);
+ return NULL;
+ }
+
field = tep_find_any_field(tp_format, str);
+ Py_DECREF(obj);
return field ? tracepoint_field(pevent, field) : NULL;
}
#endif /* HAVE_LIBTRACEEVENT */
@@ -473,13 +487,19 @@ static PyObject *pyrf_event__new(const union perf_event *event)
if ((event->header.type < PERF_RECORD_MMAP ||
event->header.type > PERF_RECORD_SAMPLE) &&
!(event->header.type == PERF_RECORD_SWITCH ||
- event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
+ event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) {
+ PyErr_Format(PyExc_TypeError, "Unexpected header type %u",
+ event->header.type);
return NULL;
+ }
// FIXME this better be dynamic or we need to parse everything
// before calling perf_mmap__consume(), including tracepoint fields.
- if (sizeof(pevent->event) < event->header.size)
+ if (sizeof(pevent->event) < event->header.size) {
+ PyErr_Format(PyExc_TypeError, "Unexpected event size: %zd < %u",
+ sizeof(pevent->event), event->header.size);
return NULL;
+ }
ptype = pyrf_event__type[event->header.type];
pevent = PyObject_New(struct pyrf_event, ptype);
@@ -527,8 +547,10 @@ static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
{
struct pyrf_cpu_map *pcpus = (void *)obj;
- if (i >= perf_cpu_map__nr(pcpus->cpus))
+ if (i >= perf_cpu_map__nr(pcpus->cpus)) {
+ PyErr_SetString(PyExc_IndexError, "Index out of range");
return NULL;
+ }
return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
}
@@ -566,14 +588,14 @@ struct pyrf_thread_map {
static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
PyObject *args, PyObject *kwargs)
{
- static char *kwlist[] = { "pid", "tid", "uid", NULL };
- int pid = -1, tid = -1, uid = UINT_MAX;
+ static char *kwlist[] = { "pid", "tid", NULL };
+ int pid = -1, tid = -1;
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
- kwlist, &pid, &tid, &uid))
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii",
+ kwlist, &pid, &tid))
return -1;
- pthreads->threads = thread_map__new(pid, tid, uid);
+ pthreads->threads = thread_map__new(pid, tid);
if (pthreads->threads == NULL)
return -1;
return 0;
@@ -596,8 +618,10 @@ static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
{
struct pyrf_thread_map *pthreads = (void *)obj;
- if (i >= perf_thread_map__nr(pthreads->threads))
+ if (i >= perf_thread_map__nr(pthreads->threads)) {
+ PyErr_SetString(PyExc_IndexError, "Index out of range");
return NULL;
+ }
return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
}
@@ -626,6 +650,295 @@ static int pyrf_thread_map__setup_types(void)
return PyType_Ready(&pyrf_thread_map__type);
}
+/**
+ * A python wrapper for perf_pmus that are globally owned by the pmus.c code.
+ */
+struct pyrf_pmu {
+ PyObject_HEAD
+
+ struct perf_pmu *pmu;
+};
+
+static void pyrf_pmu__delete(struct pyrf_pmu *ppmu)
+{
+ Py_TYPE(ppmu)->tp_free((PyObject *)ppmu);
+}
+
+static PyObject *pyrf_pmu__name(PyObject *self)
+{
+ struct pyrf_pmu *ppmu = (void *)self;
+
+ return PyUnicode_FromString(ppmu->pmu->name);
+}
+
+static bool add_to_dict(PyObject *dict, const char *key, const char *value)
+{
+ PyObject *pkey, *pvalue;
+ bool ret;
+
+ if (value == NULL)
+ return true;
+
+ pkey = PyUnicode_FromString(key);
+ pvalue = PyUnicode_FromString(value);
+
+ ret = pkey && pvalue && PyDict_SetItem(dict, pkey, pvalue) == 0;
+ Py_XDECREF(pkey);
+ Py_XDECREF(pvalue);
+ return ret;
+}
+
+static int pyrf_pmu__events_cb(void *state, struct pmu_event_info *info)
+{
+ PyObject *py_list = state;
+ PyObject *dict = PyDict_New();
+
+ if (!dict)
+ return -ENOMEM;
+
+ if (!add_to_dict(dict, "name", info->name) ||
+ !add_to_dict(dict, "alias", info->alias) ||
+ !add_to_dict(dict, "scale_unit", info->scale_unit) ||
+ !add_to_dict(dict, "desc", info->desc) ||
+ !add_to_dict(dict, "long_desc", info->long_desc) ||
+ !add_to_dict(dict, "encoding_desc", info->encoding_desc) ||
+ !add_to_dict(dict, "topic", info->topic) ||
+ !add_to_dict(dict, "event_type_desc", info->event_type_desc) ||
+ !add_to_dict(dict, "str", info->str) ||
+ !add_to_dict(dict, "deprecated", info->deprecated ? "deprecated" : NULL) ||
+ PyList_Append(py_list, dict) != 0) {
+ Py_DECREF(dict);
+ return -ENOMEM;
+ }
+ Py_DECREF(dict);
+ return 0;
+}
+
+static PyObject *pyrf_pmu__events(PyObject *self)
+{
+ struct pyrf_pmu *ppmu = (void *)self;
+ PyObject *py_list = PyList_New(0);
+ int ret;
+
+ if (!py_list)
+ return NULL;
+
+ ret = perf_pmu__for_each_event(ppmu->pmu,
+ /*skip_duplicate_pmus=*/false,
+ py_list,
+ pyrf_pmu__events_cb);
+ if (ret) {
+ Py_DECREF(py_list);
+ errno = -ret;
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ return py_list;
+}
+
+static PyObject *pyrf_pmu__repr(PyObject *self)
+{
+ struct pyrf_pmu *ppmu = (void *)self;
+
+ return PyUnicode_FromFormat("pmu(%s)", ppmu->pmu->name);
+}
+
+static const char pyrf_pmu__doc[] = PyDoc_STR("perf Performance Monitoring Unit (PMU) object.");
+
+static PyMethodDef pyrf_pmu__methods[] = {
+ {
+ .ml_name = "events",
+ .ml_meth = (PyCFunction)pyrf_pmu__events,
+ .ml_flags = METH_NOARGS,
+ .ml_doc = PyDoc_STR("Returns a sequence of events encoded as a dictionaries.")
+ },
+ {
+ .ml_name = "name",
+ .ml_meth = (PyCFunction)pyrf_pmu__name,
+ .ml_flags = METH_NOARGS,
+ .ml_doc = PyDoc_STR("Name of the PMU including suffixes.")
+ },
+ { .ml_name = NULL, }
+};
+
+/** The python type for a perf.pmu. */
+static PyTypeObject pyrf_pmu__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.pmu",
+ .tp_basicsize = sizeof(struct pyrf_pmu),
+ .tp_dealloc = (destructor)pyrf_pmu__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_pmu__doc,
+ .tp_methods = pyrf_pmu__methods,
+ .tp_str = pyrf_pmu__name,
+ .tp_repr = pyrf_pmu__repr,
+};
+
+static int pyrf_pmu__setup_types(void)
+{
+ pyrf_pmu__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_pmu__type);
+}
+
+
+/** A python iterator for pmus that has no equivalent in the C code. */
+struct pyrf_pmu_iterator {
+ PyObject_HEAD
+ struct perf_pmu *pmu;
+};
+
+static void pyrf_pmu_iterator__dealloc(struct pyrf_pmu_iterator *self)
+{
+ Py_TYPE(self)->tp_free((PyObject *) self);
+}
+
+static PyObject *pyrf_pmu_iterator__new(PyTypeObject *type, PyObject *args __maybe_unused,
+ PyObject *kwds __maybe_unused)
+{
+ struct pyrf_pmu_iterator *itr = (void *)type->tp_alloc(type, 0);
+
+ if (itr != NULL)
+ itr->pmu = perf_pmus__scan(/*pmu=*/NULL);
+
+ return (PyObject *) itr;
+}
+
+static PyObject *pyrf_pmu_iterator__iter(PyObject *self)
+{
+ Py_INCREF(self);
+ return self;
+}
+
+static PyObject *pyrf_pmu_iterator__iternext(PyObject *self)
+{
+ struct pyrf_pmu_iterator *itr = (void *)self;
+ struct pyrf_pmu *ppmu;
+
+ if (itr->pmu == NULL) {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+ // Create object to return.
+ ppmu = PyObject_New(struct pyrf_pmu, &pyrf_pmu__type);
+ if (ppmu) {
+ ppmu->pmu = itr->pmu;
+ // Advance iterator.
+ itr->pmu = perf_pmus__scan(itr->pmu);
+ }
+ return (PyObject *)ppmu;
+}
+
+/** The python type for the PMU iterator. */
+static PyTypeObject pyrf_pmu_iterator__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "pmus.iterator",
+ .tp_doc = "Iterator for the pmus string sequence.",
+ .tp_basicsize = sizeof(struct pyrf_pmu_iterator),
+ .tp_itemsize = 0,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_new = pyrf_pmu_iterator__new,
+ .tp_dealloc = (destructor) pyrf_pmu_iterator__dealloc,
+ .tp_iter = pyrf_pmu_iterator__iter,
+ .tp_iternext = pyrf_pmu_iterator__iternext,
+};
+
+static int pyrf_pmu_iterator__setup_types(void)
+{
+ return PyType_Ready(&pyrf_pmu_iterator__type);
+}
+
+static PyObject *pyrf__pmus(PyObject *self, PyObject *args)
+{
+ // Calling the class creates an instance of the iterator.
+ return PyObject_CallObject((PyObject *) &pyrf_pmu_iterator__type, /*args=*/NULL);
+}
+
+struct pyrf_counts_values {
+ PyObject_HEAD
+
+ struct perf_counts_values values;
+};
+
+static const char pyrf_counts_values__doc[] = PyDoc_STR("perf counts values object.");
+
+static void pyrf_counts_values__delete(struct pyrf_counts_values *pcounts_values)
+{
+ Py_TYPE(pcounts_values)->tp_free((PyObject *)pcounts_values);
+}
+
+#define counts_values_member_def(member, ptype, help) \
+ { #member, ptype, \
+ offsetof(struct pyrf_counts_values, values.member), \
+ 0, help }
+
+static PyMemberDef pyrf_counts_values_members[] = {
+ counts_values_member_def(val, T_ULONG, "Value of event"),
+ counts_values_member_def(ena, T_ULONG, "Time for which enabled"),
+ counts_values_member_def(run, T_ULONG, "Time for which running"),
+ counts_values_member_def(id, T_ULONG, "Unique ID for an event"),
+ counts_values_member_def(lost, T_ULONG, "Num of lost samples"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_counts_values_get_values(struct pyrf_counts_values *self, void *closure)
+{
+ PyObject *vals = PyList_New(5);
+
+ if (!vals)
+ return NULL;
+ for (int i = 0; i < 5; i++)
+ PyList_SetItem(vals, i, PyLong_FromLong(self->values.values[i]));
+
+ return vals;
+}
+
+static int pyrf_counts_values_set_values(struct pyrf_counts_values *self, PyObject *list,
+ void *closure)
+{
+ Py_ssize_t size;
+ PyObject *item = NULL;
+
+ if (!PyList_Check(list)) {
+ PyErr_SetString(PyExc_TypeError, "Value assigned must be a list");
+ return -1;
+ }
+
+ size = PyList_Size(list);
+ for (Py_ssize_t i = 0; i < size; i++) {
+ item = PyList_GetItem(list, i);
+ if (!PyLong_Check(item)) {
+ PyErr_SetString(PyExc_TypeError, "List members should be numbers");
+ return -1;
+ }
+ self->values.values[i] = PyLong_AsLong(item);
+ }
+
+ return 0;
+}
+
+static PyGetSetDef pyrf_counts_values_getset[] = {
+ {"values", (getter)pyrf_counts_values_get_values, (setter)pyrf_counts_values_set_values,
+ "Name field", NULL},
+ { .name = NULL, },
+};
+
+static PyTypeObject pyrf_counts_values__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.counts_values",
+ .tp_basicsize = sizeof(struct pyrf_counts_values),
+ .tp_dealloc = (destructor)pyrf_counts_values__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_counts_values__doc,
+ .tp_members = pyrf_counts_values_members,
+ .tp_getset = pyrf_counts_values_getset,
+};
+
+static int pyrf_counts_values__setup_types(void)
+{
+ pyrf_counts_values__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_counts_values__type);
+}
+
struct pyrf_evsel {
PyObject_HEAD
@@ -781,15 +1094,104 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
return Py_None;
}
+static PyObject *pyrf_evsel__cpus(struct pyrf_evsel *pevsel)
+{
+ struct pyrf_cpu_map *pcpu_map = PyObject_New(struct pyrf_cpu_map, &pyrf_cpu_map__type);
+
+ if (pcpu_map)
+ pcpu_map->cpus = perf_cpu_map__get(pevsel->evsel.core.cpus);
+
+ return (PyObject *)pcpu_map;
+}
+
+static PyObject *pyrf_evsel__threads(struct pyrf_evsel *pevsel)
+{
+ struct pyrf_thread_map *pthread_map =
+ PyObject_New(struct pyrf_thread_map, &pyrf_thread_map__type);
+
+ if (pthread_map)
+ pthread_map->threads = perf_thread_map__get(pevsel->evsel.core.threads);
+
+ return (PyObject *)pthread_map;
+}
+
+/*
+ * Ensure evsel's counts and prev_raw_counts are allocated, the latter
+ * used by tool PMUs to compute the cumulative count as expected by
+ * stat's process_counter_values.
+ */
+static int evsel__ensure_counts(struct evsel *evsel)
+{
+ int nthreads, ncpus;
+
+ if (evsel->counts != NULL)
+ return 0;
+
+ nthreads = perf_thread_map__nr(evsel->core.threads);
+ ncpus = perf_cpu_map__nr(evsel->core.cpus);
+
+ evsel->counts = perf_counts__new(ncpus, nthreads);
+ if (evsel->counts == NULL)
+ return -ENOMEM;
+
+ evsel->prev_raw_counts = perf_counts__new(ncpus, nthreads);
+ if (evsel->prev_raw_counts == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static PyObject *pyrf_evsel__read(struct pyrf_evsel *pevsel,
+ PyObject *args, PyObject *kwargs)
+{
+ struct evsel *evsel = &pevsel->evsel;
+ int cpu = 0, cpu_idx, thread = 0, thread_idx;
+ struct perf_counts_values *old_count, *new_count;
+ struct pyrf_counts_values *count_values = PyObject_New(struct pyrf_counts_values,
+ &pyrf_counts_values__type);
+
+ if (!count_values)
+ return NULL;
+
+ if (!PyArg_ParseTuple(args, "ii", &cpu, &thread))
+ return NULL;
+
+ cpu_idx = perf_cpu_map__idx(evsel->core.cpus, (struct perf_cpu){.cpu = cpu});
+ if (cpu_idx < 0) {
+ PyErr_Format(PyExc_TypeError, "CPU %d is not part of evsel's CPUs", cpu);
+ return NULL;
+ }
+ thread_idx = perf_thread_map__idx(evsel->core.threads, thread);
+ if (thread_idx < 0) {
+ PyErr_Format(PyExc_TypeError, "Thread %d is not part of evsel's threads",
+ thread);
+ return NULL;
+ }
+
+ if (evsel__ensure_counts(evsel))
+ return PyErr_NoMemory();
+
+ /* Set up pointers to the old and newly read counter values. */
+ old_count = perf_counts(evsel->prev_raw_counts, cpu_idx, thread_idx);
+ new_count = perf_counts(evsel->counts, cpu_idx, thread_idx);
+ /* Update the value in evsel->counts. */
+ evsel__read_counter(evsel, cpu_idx, thread_idx);
+ /* Copy the value and turn it into the delta from old_count. */
+ count_values->values = *new_count;
+ count_values->values.val -= old_count->val;
+ count_values->values.ena -= old_count->ena;
+ count_values->values.run -= old_count->run;
+ /* Save the new count over the old_count for the next read. */
+ *old_count = *new_count;
+ return (PyObject *)count_values;
+}
+
static PyObject *pyrf_evsel__str(PyObject *self)
{
struct pyrf_evsel *pevsel = (void *)self;
struct evsel *evsel = &pevsel->evsel;
- if (!evsel->pmu)
- return PyUnicode_FromFormat("evsel(%s)", evsel__name(evsel));
-
- return PyUnicode_FromFormat("evsel(%s/%s/)", evsel->pmu->name, evsel__name(evsel));
+ return PyUnicode_FromFormat("evsel(%s/%s/)", evsel__pmu_name(evsel), evsel__name(evsel));
}
static PyMethodDef pyrf_evsel__methods[] = {
@@ -799,6 +1201,24 @@ static PyMethodDef pyrf_evsel__methods[] = {
.ml_flags = METH_VARARGS | METH_KEYWORDS,
.ml_doc = PyDoc_STR("open the event selector file descriptor table.")
},
+ {
+ .ml_name = "cpus",
+ .ml_meth = (PyCFunction)pyrf_evsel__cpus,
+ .ml_flags = METH_NOARGS,
+ .ml_doc = PyDoc_STR("CPUs the event is to be used with.")
+ },
+ {
+ .ml_name = "threads",
+ .ml_meth = (PyCFunction)pyrf_evsel__threads,
+ .ml_flags = METH_NOARGS,
+ .ml_doc = PyDoc_STR("threads the event is to be used with.")
+ },
+ {
+ .ml_name = "read",
+ .ml_meth = (PyCFunction)pyrf_evsel__read,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("read counters")
+ },
{ .ml_name = NULL, }
};
@@ -884,6 +1304,178 @@ static PyObject *pyrf_evlist__all_cpus(struct pyrf_evlist *pevlist)
return (PyObject *)pcpu_map;
}
+static PyObject *pyrf_evlist__metrics(struct pyrf_evlist *pevlist)
+{
+ PyObject *list = PyList_New(/*len=*/0);
+ struct rb_node *node;
+
+ if (!list)
+ return NULL;
+
+ for (node = rb_first_cached(&pevlist->evlist.metric_events.entries); node;
+ node = rb_next(node)) {
+ struct metric_event *me = container_of(node, struct metric_event, nd);
+ struct list_head *pos;
+
+ list_for_each(pos, &me->head) {
+ struct metric_expr *expr = container_of(pos, struct metric_expr, nd);
+ PyObject *str = PyUnicode_FromString(expr->metric_name);
+
+ if (!str || PyList_Append(list, str) != 0) {
+ Py_DECREF(list);
+ return NULL;
+ }
+ Py_DECREF(str);
+ }
+ }
+ return list;
+}
+
+static int prepare_metric(const struct metric_expr *mexp,
+ const struct evsel *evsel,
+ struct expr_parse_ctx *pctx,
+ int cpu_idx, int thread_idx)
+{
+ struct evsel * const *metric_events = mexp->metric_events;
+ struct metric_ref *metric_refs = mexp->metric_refs;
+
+ for (int i = 0; metric_events[i]; i++) {
+ struct evsel *cur = metric_events[i];
+ double val, ena, run;
+ int ret, source_count = 0;
+ struct perf_counts_values *old_count, *new_count;
+ char *n = strdup(evsel__metric_id(cur));
+
+ if (!n)
+ return -ENOMEM;
+
+ /*
+ * If there are multiple uncore PMUs and we're not reading the
+ * leader's stats, determine the stats for the appropriate
+ * uncore PMU.
+ */
+ if (evsel && evsel->metric_leader &&
+ evsel->pmu != evsel->metric_leader->pmu &&
+ cur->pmu == evsel->metric_leader->pmu) {
+ struct evsel *pos;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ if (pos->pmu != evsel->pmu)
+ continue;
+ if (pos->metric_leader != cur)
+ continue;
+ cur = pos;
+ source_count = 1;
+ break;
+ }
+ }
+
+ if (source_count == 0)
+ source_count = evsel__source_count(cur);
+
+ ret = evsel__ensure_counts(cur);
+ if (ret)
+ return ret;
+
+ /* Set up pointers to the old and newly read counter values. */
+ old_count = perf_counts(cur->prev_raw_counts, cpu_idx, thread_idx);
+ new_count = perf_counts(cur->counts, cpu_idx, thread_idx);
+ /* Update the value in cur->counts. */
+ evsel__read_counter(cur, cpu_idx, thread_idx);
+
+ val = new_count->val - old_count->val;
+ ena = new_count->ena - old_count->ena;
+ run = new_count->run - old_count->run;
+
+ if (ena != run && run != 0)
+ val = val * ena / run;
+ ret = expr__add_id_val_source_count(pctx, n, val, source_count);
+ if (ret)
+ return ret;
+ }
+
+ for (int i = 0; metric_refs && metric_refs[i].metric_name; i++) {
+ int ret = expr__add_ref(pctx, &metric_refs[i]);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static PyObject *pyrf_evlist__compute_metric(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ int ret, cpu = 0, cpu_idx = 0, thread = 0, thread_idx = 0;
+ const char *metric;
+ struct rb_node *node;
+ struct metric_expr *mexp = NULL;
+ struct expr_parse_ctx *pctx;
+ double result = 0;
+ struct evsel *metric_evsel = NULL;
+
+ if (!PyArg_ParseTuple(args, "sii", &metric, &cpu, &thread))
+ return NULL;
+
+ for (node = rb_first_cached(&pevlist->evlist.metric_events.entries);
+ mexp == NULL && node;
+ node = rb_next(node)) {
+ struct metric_event *me = container_of(node, struct metric_event, nd);
+ struct list_head *pos;
+
+ list_for_each(pos, &me->head) {
+ struct metric_expr *e = container_of(pos, struct metric_expr, nd);
+ struct evsel *pos2;
+
+ if (strcmp(e->metric_name, metric))
+ continue;
+
+ if (e->metric_events[0] == NULL)
+ continue;
+
+ evlist__for_each_entry(&pevlist->evlist, pos2) {
+ if (pos2->metric_leader != e->metric_events[0])
+ continue;
+ cpu_idx = perf_cpu_map__idx(pos2->core.cpus,
+ (struct perf_cpu){.cpu = cpu});
+ if (cpu_idx < 0)
+ continue;
+
+ thread_idx = perf_thread_map__idx(pos2->core.threads, thread);
+ if (thread_idx < 0)
+ continue;
+ metric_evsel = pos2;
+ mexp = e;
+ goto done;
+ }
+ }
+ }
+done:
+ if (!mexp) {
+ PyErr_Format(PyExc_TypeError, "Unknown metric '%s' for CPU '%d' and thread '%d'",
+ metric, cpu, thread);
+ return NULL;
+ }
+
+ pctx = expr__ctx_new();
+ if (!pctx)
+ return PyErr_NoMemory();
+
+ ret = prepare_metric(mexp, metric_evsel, pctx, cpu_idx, thread_idx);
+ if (ret) {
+ expr__ctx_free(pctx);
+ errno = -ret;
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ if (expr__parse(&result, pctx, mexp->metric_expr))
+ result = 0.0;
+
+ expr__ctx_free(pctx);
+ return PyFloat_FromDouble(result);
+}
+
static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
PyObject *args, PyObject *kwargs)
{
@@ -1000,8 +1592,10 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
return NULL;
md = get_md(evlist, cpu);
- if (!md)
+ if (!md) {
+ PyErr_Format(PyExc_TypeError, "Unknown CPU '%d'", cpu);
return NULL;
+ }
if (perf_mmap__read_init(&md->core) < 0)
goto end;
@@ -1054,6 +1648,16 @@ static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
return Py_None;
}
+static PyObject *pyrf_evlist__close(struct pyrf_evlist *pevlist)
+{
+ struct evlist *evlist = &pevlist->evlist;
+
+ evlist__close(evlist);
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
static PyObject *pyrf_evlist__config(struct pyrf_evlist *pevlist)
{
struct record_opts opts = {
@@ -1101,6 +1705,18 @@ static PyMethodDef pyrf_evlist__methods[] = {
.ml_doc = PyDoc_STR("CPU map union of all evsel CPU maps.")
},
{
+ .ml_name = "metrics",
+ .ml_meth = (PyCFunction)pyrf_evlist__metrics,
+ .ml_flags = METH_NOARGS,
+ .ml_doc = PyDoc_STR("List of metric names within the evlist.")
+ },
+ {
+ .ml_name = "compute_metric",
+ .ml_meth = (PyCFunction)pyrf_evlist__compute_metric,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("compute metric for given name, cpu and thread")
+ },
+ {
.ml_name = "mmap",
.ml_meth = (PyCFunction)pyrf_evlist__mmap,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
@@ -1113,6 +1729,12 @@ static PyMethodDef pyrf_evlist__methods[] = {
.ml_doc = PyDoc_STR("open the file descriptors.")
},
{
+ .ml_name = "close",
+ .ml_meth = (PyCFunction)pyrf_evlist__close,
+ .ml_flags = METH_NOARGS,
+ .ml_doc = PyDoc_STR("close the file descriptors.")
+ },
+ {
.ml_name = "poll",
.ml_meth = (PyCFunction)pyrf_evlist__poll,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
@@ -1321,10 +1943,6 @@ static const struct perf_constant perf__constants[] = {
static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
PyObject *args, PyObject *kwargs)
{
-#ifndef HAVE_LIBTRACEEVENT
- return NULL;
-#else
- struct tep_event *tp_format;
static char *kwlist[] = { "sys", "name", NULL };
char *sys = NULL;
char *name = NULL;
@@ -1333,12 +1951,7 @@ static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
&sys, &name))
return NULL;
- tp_format = trace_event__tp_format(sys, name);
- if (IS_ERR(tp_format))
- return PyLong_FromLong(-1);
-
- return PyLong_FromLong(tp_format->id);
-#endif // HAVE_LIBTRACEEVENT
+ return PyLong_FromLong(tp_pmu__id(sys, name));
}
static PyObject *pyrf_evsel__from_evsel(struct evsel *evsel)
@@ -1357,10 +1970,37 @@ static PyObject *pyrf_evsel__from_evsel(struct evsel *evsel)
return (PyObject *)pevsel;
}
+static int evlist__pos(struct evlist *evlist, struct evsel *evsel)
+{
+ struct evsel *pos;
+ int idx = 0;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (evsel == pos)
+ return idx;
+ idx++;
+ }
+ return -1;
+}
+
+static struct evsel *evlist__at(struct evlist *evlist, int idx)
+{
+ struct evsel *pos;
+ int idx2 = 0;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (idx == idx2)
+ return pos;
+ idx2++;
+ }
+ return NULL;
+}
+
static PyObject *pyrf_evlist__from_evlist(struct evlist *evlist)
{
struct pyrf_evlist *pevlist = PyObject_New(struct pyrf_evlist, &pyrf_evlist__type);
struct evsel *pos;
+ struct rb_node *node;
if (!pevlist)
return NULL;
@@ -1372,6 +2012,50 @@ static PyObject *pyrf_evlist__from_evlist(struct evlist *evlist)
evlist__add(&pevlist->evlist, &pevsel->evsel);
}
+ evlist__for_each_entry(&pevlist->evlist, pos) {
+ struct evsel *leader = evsel__leader(pos);
+
+ if (pos != leader) {
+ int idx = evlist__pos(evlist, leader);
+
+ if (idx >= 0)
+ evsel__set_leader(pos, evlist__at(&pevlist->evlist, idx));
+ else if (leader == NULL)
+ evsel__set_leader(pos, pos);
+ }
+
+ leader = pos->metric_leader;
+
+ if (pos != leader) {
+ int idx = evlist__pos(evlist, leader);
+
+ if (idx >= 0)
+ pos->metric_leader = evlist__at(&pevlist->evlist, idx);
+ else if (leader == NULL)
+ pos->metric_leader = pos;
+ }
+ }
+ metricgroup__copy_metric_events(&pevlist->evlist, /*cgrp=*/NULL,
+ &pevlist->evlist.metric_events,
+ &evlist->metric_events);
+ for (node = rb_first_cached(&pevlist->evlist.metric_events.entries); node;
+ node = rb_next(node)) {
+ struct metric_event *me = container_of(node, struct metric_event, nd);
+ struct list_head *mpos;
+ int idx = evlist__pos(evlist, me->evsel);
+
+ if (idx >= 0)
+ me->evsel = evlist__at(&pevlist->evlist, idx);
+ list_for_each(mpos, &me->head) {
+ struct metric_expr *e = container_of(mpos, struct metric_expr, nd);
+
+ for (int j = 0; e->metric_events[j]; j++) {
+ idx = evlist__pos(evlist, e->metric_events[j]);
+ if (idx >= 0)
+ e->metric_events[j] = evlist__at(&pevlist->evlist, idx);
+ }
+ }
+ }
return (PyObject *)pevlist;
}
@@ -1403,8 +2087,128 @@ static PyObject *pyrf__parse_events(PyObject *self, PyObject *args)
return result;
}
+static PyObject *pyrf__parse_metrics(PyObject *self, PyObject *args)
+{
+ const char *input, *pmu = NULL;
+ struct evlist evlist = {};
+ PyObject *result;
+ PyObject *pcpus = NULL, *pthreads = NULL;
+ struct perf_cpu_map *cpus;
+ struct perf_thread_map *threads;
+ int ret;
+
+ if (!PyArg_ParseTuple(args, "s|sOO", &input, &pmu, &pcpus, &pthreads))
+ return NULL;
+
+ threads = pthreads ? ((struct pyrf_thread_map *)pthreads)->threads : NULL;
+ cpus = pcpus ? ((struct pyrf_cpu_map *)pcpus)->cpus : NULL;
+
+ evlist__init(&evlist, cpus, threads);
+ ret = metricgroup__parse_groups(&evlist, pmu ?: "all", input,
+ /*metric_no_group=*/ false,
+ /*metric_no_merge=*/ false,
+ /*metric_no_threshold=*/ true,
+ /*user_requested_cpu_list=*/ NULL,
+ /*system_wide=*/true,
+ /*hardware_aware_grouping=*/ false);
+ if (ret) {
+ errno = -ret;
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ result = pyrf_evlist__from_evlist(&evlist);
+ evlist__exit(&evlist);
+ return result;
+}
+
+static PyObject *pyrf__metrics_groups(const struct pmu_metric *pm)
+{
+ PyObject *groups = PyList_New(/*len=*/0);
+ const char *mg = pm->metric_group;
+
+ if (!groups)
+ return NULL;
+
+ while (mg) {
+ PyObject *val = NULL;
+ const char *sep = strchr(mg, ';');
+ size_t len = sep ? (size_t)(sep - mg) : strlen(mg);
+
+ if (len > 0) {
+ val = PyUnicode_FromStringAndSize(mg, len);
+ if (val)
+ PyList_Append(groups, val);
+
+ Py_XDECREF(val);
+ }
+ mg = sep ? sep + 1 : NULL;
+ }
+ return groups;
+}
+
+static int pyrf__metrics_cb(const struct pmu_metric *pm,
+ const struct pmu_metrics_table *table __maybe_unused,
+ void *vdata)
+{
+ PyObject *py_list = vdata;
+ PyObject *dict = PyDict_New();
+ PyObject *key = dict ? PyUnicode_FromString("MetricGroup") : NULL;
+ PyObject *value = key ? pyrf__metrics_groups(pm) : NULL;
+
+ if (!value || PyDict_SetItem(dict, key, value) != 0) {
+ Py_XDECREF(key);
+ Py_XDECREF(value);
+ Py_XDECREF(dict);
+ return -ENOMEM;
+ }
+
+ if (!add_to_dict(dict, "MetricName", pm->metric_name) ||
+ !add_to_dict(dict, "PMU", pm->pmu) ||
+ !add_to_dict(dict, "MetricExpr", pm->metric_expr) ||
+ !add_to_dict(dict, "MetricThreshold", pm->metric_threshold) ||
+ !add_to_dict(dict, "ScaleUnit", pm->unit) ||
+ !add_to_dict(dict, "Compat", pm->compat) ||
+ !add_to_dict(dict, "BriefDescription", pm->desc) ||
+ !add_to_dict(dict, "PublicDescription", pm->long_desc) ||
+ PyList_Append(py_list, dict) != 0) {
+ Py_DECREF(dict);
+ return -ENOMEM;
+ }
+ Py_DECREF(dict);
+ return 0;
+}
+
+static PyObject *pyrf__metrics(PyObject *self, PyObject *args)
+{
+ const struct pmu_metrics_table *table = pmu_metrics_table__find();
+ PyObject *list = PyList_New(/*len=*/0);
+ int ret;
+
+ if (!list)
+ return NULL;
+
+ ret = pmu_metrics_table__for_each_metric(table, pyrf__metrics_cb, list);
+ if (!ret)
+ ret = pmu_for_each_sys_metric(pyrf__metrics_cb, list);
+
+ if (ret) {
+ Py_DECREF(list);
+ errno = -ret;
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ return list;
+}
+
static PyMethodDef perf__methods[] = {
{
+ .ml_name = "metrics",
+ .ml_meth = (PyCFunction) pyrf__metrics,
+ .ml_flags = METH_NOARGS,
+ .ml_doc = PyDoc_STR(
+ "Returns a list of metrics represented as string values in dictionaries.")
+ },
+ {
.ml_name = "tracepoint",
.ml_meth = (PyCFunction) pyrf__tracepoint,
.ml_flags = METH_VARARGS | METH_KEYWORDS,
@@ -1416,6 +2220,19 @@ static PyMethodDef perf__methods[] = {
.ml_flags = METH_VARARGS,
.ml_doc = PyDoc_STR("Parse a string of events and return an evlist.")
},
+ {
+ .ml_name = "parse_metrics",
+ .ml_meth = (PyCFunction) pyrf__parse_metrics,
+ .ml_flags = METH_VARARGS,
+ .ml_doc = PyDoc_STR(
+ "Parse a string of metrics or metric groups and return an evlist.")
+ },
+ {
+ .ml_name = "pmus",
+ .ml_meth = (PyCFunction) pyrf__pmus,
+ .ml_flags = METH_NOARGS,
+ .ml_doc = PyDoc_STR("Returns a sequence of pmus.")
+ },
{ .ml_name = NULL, }
};
@@ -1442,7 +2259,10 @@ PyMODINIT_FUNC PyInit_perf(void)
pyrf_evlist__setup_types() < 0 ||
pyrf_evsel__setup_types() < 0 ||
pyrf_thread_map__setup_types() < 0 ||
- pyrf_cpu_map__setup_types() < 0)
+ pyrf_cpu_map__setup_types() < 0 ||
+ pyrf_pmu_iterator__setup_types() < 0 ||
+ pyrf_pmu__setup_types() < 0 ||
+ pyrf_counts_values__setup_types() < 0)
return module;
/* The page_size is placed in util object. */
@@ -1487,6 +2307,9 @@ PyMODINIT_FUNC PyInit_perf(void)
Py_INCREF(&pyrf_cpu_map__type);
PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
+ Py_INCREF(&pyrf_counts_values__type);
+ PyModule_AddObject(module, "counts_values", (PyObject *)&pyrf_counts_values__type);
+
dict = PyModule_GetDict(module);
if (dict == NULL)
goto error;
diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h
index a6566134e09e..ea3a6c4657ee 100644
--- a/tools/perf/util/record.h
+++ b/tools/perf/util/record.h
@@ -28,6 +28,7 @@ struct record_opts {
bool sample_time_set;
bool sample_cpu;
bool sample_identifier;
+ bool sample_data_src;
bool period;
bool period_set;
bool running_time;
@@ -79,6 +80,7 @@ struct record_opts {
int synth;
int threads_spec;
const char *threads_user_spec;
+ u64 off_cpu_thresh_ns;
};
extern const char * const *record_usage;
diff --git a/tools/perf/util/rwsem.c b/tools/perf/util/rwsem.c
index 5109167f27f7..9d26832398db 100644
--- a/tools/perf/util/rwsem.c
+++ b/tools/perf/util/rwsem.c
@@ -27,6 +27,7 @@ int exit_rwsem(struct rw_semaphore *sem)
}
int down_read(struct rw_semaphore *sem)
+ NO_THREAD_SAFETY_ANALYSIS
{
#if RWS_ERRORCHECK
mutex_lock(&sem->mtx);
@@ -37,6 +38,7 @@ int down_read(struct rw_semaphore *sem)
}
int up_read(struct rw_semaphore *sem)
+ NO_THREAD_SAFETY_ANALYSIS
{
#if RWS_ERRORCHECK
mutex_unlock(&sem->mtx);
@@ -47,6 +49,7 @@ int up_read(struct rw_semaphore *sem)
}
int down_write(struct rw_semaphore *sem)
+ NO_THREAD_SAFETY_ANALYSIS
{
#if RWS_ERRORCHECK
mutex_lock(&sem->mtx);
@@ -57,6 +60,7 @@ int down_write(struct rw_semaphore *sem)
}
int up_write(struct rw_semaphore *sem)
+ NO_THREAD_SAFETY_ANALYSIS
{
#if RWS_ERRORCHECK
mutex_unlock(&sem->mtx);
diff --git a/tools/perf/util/rwsem.h b/tools/perf/util/rwsem.h
index ef5cbc31d967..b102d8143181 100644
--- a/tools/perf/util/rwsem.h
+++ b/tools/perf/util/rwsem.h
@@ -10,7 +10,7 @@
*/
#define RWS_ERRORCHECK 0
-struct rw_semaphore {
+struct LOCKABLE rw_semaphore {
#if RWS_ERRORCHECK
struct mutex mtx;
#else
@@ -21,10 +21,10 @@ struct rw_semaphore {
int init_rwsem(struct rw_semaphore *sem);
int exit_rwsem(struct rw_semaphore *sem);
-int down_read(struct rw_semaphore *sem);
-int up_read(struct rw_semaphore *sem);
+int down_read(struct rw_semaphore *sem) SHARED_LOCK_FUNCTION(sem);
+int up_read(struct rw_semaphore *sem) UNLOCK_FUNCTION(sem);
-int down_write(struct rw_semaphore *sem);
-int up_write(struct rw_semaphore *sem);
+int down_write(struct rw_semaphore *sem) EXCLUSIVE_LOCK_FUNCTION(sem);
+int up_write(struct rw_semaphore *sem) UNLOCK_FUNCTION(sem);
#endif /* _PERF_RWSEM_H */
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 0ce52f0280b8..c17dbe232c54 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -1142,7 +1142,7 @@ int s390_cpumsf_process_auxtrace_info(union perf_event *event,
sf->machine = &session->machines.host; /* No kvm support */
sf->auxtrace_type = auxtrace_info->type;
sf->pmu_type = PERF_TYPE_RAW;
- sf->machine_type = s390_cpumsf_get_type(session->evlist->env->cpuid);
+ sf->machine_type = s390_cpumsf_get_type(perf_session__env(session)->cpuid);
sf->auxtrace.process_event = s390_cpumsf_process_event;
sf->auxtrace.process_auxtrace_event = s390_cpumsf_process_auxtrace_event;
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index 335217bb532b..c6ae0ae8d86a 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -19,12 +19,14 @@
#include <sys/stat.h>
#include <linux/compiler.h>
+#include <linux/err.h>
#include <asm/byteorder.h>
#include "debug.h"
#include "session.h"
#include "evlist.h"
#include "color.h"
+#include "hashmap.h"
#include "sample-raw.h"
#include "s390-cpumcf-kernel.h"
#include "util/pmu.h"
@@ -132,8 +134,8 @@ static int get_counterset_start(int setnr)
}
struct get_counter_name_data {
- int wanted;
- char *result;
+ long wanted;
+ const char *result;
};
static int get_counter_name_callback(void *vdata, struct pmu_event_info *info)
@@ -151,12 +153,22 @@ static int get_counter_name_callback(void *vdata, struct pmu_event_info *info)
rc = sscanf(event_str, "event=%x", &event_nr);
if (rc == 1 && event_nr == data->wanted) {
- data->result = strdup(info->name);
+ data->result = info->name;
return 1; /* Terminate the search. */
}
return 0;
}
+static size_t get_counter_name_hash_fn(long key, void *ctx __maybe_unused)
+{
+ return key;
+}
+
+static bool get_counter_name_hashmap_equal_fn(long key1, long key2, void *ctx __maybe_unused)
+{
+ return key1 == key2;
+}
+
/* Scan the PMU and extract the logical name of a counter from the event. Input
* is the counter set and counter number with in the set. Construct the event
* number and use this as key. If they match return the name of this counter.
@@ -164,17 +176,50 @@ static int get_counter_name_callback(void *vdata, struct pmu_event_info *info)
*/
static char *get_counter_name(int set, int nr, struct perf_pmu *pmu)
{
+ static struct hashmap *cache;
+ static struct perf_pmu *cache_pmu;
+ long cache_key = get_counterset_start(set) + nr;
struct get_counter_name_data data = {
- .wanted = get_counterset_start(set) + nr,
+ .wanted = cache_key,
.result = NULL,
};
+ char *result = NULL;
if (!pmu)
return NULL;
+ if (cache_pmu == pmu && hashmap__find(cache, cache_key, &result))
+ return strdup(result);
+
perf_pmu__for_each_event(pmu, /*skip_duplicate_pmus=*/ true,
&data, get_counter_name_callback);
- return data.result;
+
+ result = strdup(data.result ?: "<unknown>");
+
+ if (cache_pmu == NULL) {
+ struct hashmap *tmp = hashmap__new(get_counter_name_hash_fn,
+ get_counter_name_hashmap_equal_fn,
+ /*ctx=*/NULL);
+
+ if (!IS_ERR(tmp)) {
+ cache = tmp;
+ cache_pmu = pmu;
+ }
+ }
+
+ if (cache_pmu == pmu && result) {
+ char *old_value = NULL, *new_value = strdup(result);
+
+ if (new_value) {
+ hashmap__set(cache, cache_key, new_value, /*old_key=*/NULL, &old_value);
+ /*
+ * Free in case of a race, but resizing would be broken
+ * in that case.
+ */
+ free(old_value);
+ }
+ }
+ return result;
}
static void s390_cpumcfdg_dump(struct perf_pmu *pmu, struct perf_sample *sample)
diff --git a/tools/perf/util/sample-raw.c b/tools/perf/util/sample-raw.c
index f3f6bd9d290e..bcf442574d6e 100644
--- a/tools/perf/util/sample-raw.c
+++ b/tools/perf/util/sample-raw.c
@@ -6,15 +6,16 @@
#include "env.h"
#include "header.h"
#include "sample-raw.h"
+#include "session.h"
/*
* Check platform the perf data file was created on and perform platform
* specific interpretation.
*/
-void evlist__init_trace_event_sample_raw(struct evlist *evlist)
+void evlist__init_trace_event_sample_raw(struct evlist *evlist, struct perf_env *env)
{
- const char *arch_pf = perf_env__arch(evlist->env);
- const char *cpuid = perf_env__cpuid(evlist->env);
+ const char *arch_pf = perf_env__arch(env);
+ const char *cpuid = perf_env__cpuid(env);
if (arch_pf && !strcmp("s390", arch_pf))
evlist->trace_event_sample_raw = evlist__s390_sample_raw;
diff --git a/tools/perf/util/sample-raw.h b/tools/perf/util/sample-raw.h
index ea01c5811503..896e9a87e373 100644
--- a/tools/perf/util/sample-raw.h
+++ b/tools/perf/util/sample-raw.h
@@ -11,5 +11,5 @@ void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event,
bool evlist__has_amd_ibs(struct evlist *evlist);
void evlist__amd_sample_raw(struct evlist *evlist, union perf_event *event,
struct perf_sample *sample);
-void evlist__init_trace_event_sample_raw(struct evlist *evlist);
+void evlist__init_trace_event_sample_raw(struct evlist *evlist, struct perf_env *env);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/sample.h b/tools/perf/util/sample.h
index 0e96240052e9..a8307b20a9ea 100644
--- a/tools/perf/util/sample.h
+++ b/tools/perf/util/sample.h
@@ -104,11 +104,11 @@ struct perf_sample {
u8 cpumode;
u16 misc;
u16 ins_lat;
- union {
- u16 p_stage_cyc;
- u16 retire_lat;
- };
+ /** @weight3: On x86 holds retire_lat, on powerpc holds p_stage_cyc. */
+ u16 weight3;
bool no_hw_idx; /* No hw_idx collected in branch_stack */
+ bool deferred_callchain; /* Has deferred user callchains */
+ u64 deferred_cookie;
char insn[MAX_INSN];
void *raw_data;
struct ip_callchain *callchain;
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 2282fe3772f3..24f087b0cd11 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -3,7 +3,7 @@ ifeq ($(CONFIG_LIBTRACEEVENT),y)
endif
perf-util-$(CONFIG_LIBPYTHON) += trace-event-python.o
-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
+CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum -Wno-thread-safety-analysis
# -Wno-declaration-after-statement: The python headers have mixed code with declarations (decls after asserts, for instance)
CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-deprecated-declarations -Wno-switch-enum -Wno-declaration-after-statement
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 520729e78965..6655c0bbe0d8 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -780,14 +780,13 @@ static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
const char *sym_field, const char *symoff_field,
const char *map_pgoff)
{
- char sbuild_id[SBUILD_ID_SIZE];
-
if (al->map) {
+ char sbuild_id[SBUILD_ID_SIZE];
struct dso *dso = map__dso(al->map);
pydict_set_item_string_decref(dict, dso_field,
_PyUnicode_FromString(dso__name(dso)));
- build_id__sprintf(dso__bid(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
pydict_set_item_string_decref(dict, dso_bid_field,
_PyUnicode_FromString(sbuild_id));
pydict_set_item_string_decref(dict, dso_map_start,
@@ -1238,7 +1237,7 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso,
char sbuild_id[SBUILD_ID_SIZE];
PyObject *t;
- build_id__sprintf(dso__bid(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
t = tuple_new(5);
@@ -1306,7 +1305,7 @@ static void python_export_sample_table(struct db_export *dbe,
tuple_set_d64(t, 0, es->db_id);
tuple_set_d64(t, 1, es->evsel->db_id);
- tuple_set_d64(t, 2, maps__machine(es->al->maps)->db_id);
+ tuple_set_d64(t, 2, maps__machine(thread__maps(es->al->thread))->db_id);
tuple_set_d64(t, 3, thread__db_id(es->al->thread));
tuple_set_d64(t, 4, es->comm_db_id);
tuple_set_d64(t, 5, es->dso_db_id);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 60fb9997ea0d..4236503c8f6c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -12,6 +12,7 @@
#include <sys/types.h>
#include <sys/mman.h>
#include <perf/cpumap.h>
+#include <perf/event.h>
#include "map_symbol.h"
#include "branch.h"
@@ -137,7 +138,8 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
struct perf_session *__perf_session__new(struct perf_data *data,
struct perf_tool *tool,
- bool trace_event_repipe)
+ bool trace_event_repipe,
+ struct perf_env *host_env)
{
int ret = -ENOMEM;
struct perf_session *session = zalloc(sizeof(*session));
@@ -176,7 +178,7 @@ struct perf_session *__perf_session__new(struct perf_data *data,
perf_session__set_comm_exec(session);
}
- evlist__init_trace_event_sample_raw(session->evlist);
+ evlist__init_trace_event_sample_raw(session->evlist, &session->header.env);
/* Open the directory data. */
if (data->is_dir) {
@@ -190,8 +192,11 @@ struct perf_session *__perf_session__new(struct perf_data *data,
symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
}
} else {
- session->machines.host.env = &perf_env;
+ assert(host_env != NULL);
+ session->machines.host.env = host_env;
}
+ if (session->evlist)
+ session->evlist->session = session;
session->machines.host.single_address_space =
perf_env__single_address_space(session->machines.host.env);
@@ -715,6 +720,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
[PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
[PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap,
+ [PERF_RECORD_CALLCHAIN_DEFERRED] = perf_event__all64_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
@@ -849,6 +855,9 @@ static void callchain__printf(struct evsel *evsel,
for (i = 0; i < callchain->nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
i, callchain->ips[i]);
+
+ if (sample->deferred_callchain)
+ printf("...... (deferred)\n");
}
static void branch_stack__printf(struct perf_sample *sample,
@@ -1094,7 +1103,7 @@ static void dump_sample(struct evsel *evsel, union perf_event *event,
printf("... weight: %" PRIu64 "", sample->weight);
if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
printf(",0x%"PRIx16"", sample->ins_lat);
- printf(",0x%"PRIx16"", sample->p_stage_cyc);
+ printf(",0x%"PRIx16"", sample->weight3);
}
printf("\n");
}
@@ -1118,6 +1127,19 @@ static void dump_sample(struct evsel *evsel, union perf_event *event,
sample_read__printf(sample, evsel->core.attr.read_format);
}
+static void dump_deferred_callchain(struct evsel *evsel, union perf_event *event,
+ struct perf_sample *sample)
+{
+ if (!dump_trace)
+ return;
+
+ printf("(IP, 0x%x): %d/%d: %#" PRIx64 "\n",
+ event->header.misc, sample->pid, sample->tid, sample->deferred_cookie);
+
+ if (evsel__has_callchain(evsel))
+ callchain__printf(evsel, sample);
+}
+
static void dump_read(struct evsel *evsel, union perf_event *event)
{
struct perf_record_read *read_event = &event->read;
@@ -1263,6 +1285,106 @@ static int evlist__deliver_sample(struct evlist *evlist, const struct perf_tool
per_thread);
}
+/*
+ * Samples with deferred callchains should wait for the next matching
+ * PERF_RECORD_CALLCHAIN_RECORD entries. Keep the events in a list and
+ * deliver them once it finds the callchains.
+ */
+struct deferred_event {
+ struct list_head list;
+ union perf_event *event;
+};
+
+/*
+ * This is called when a deferred callchain record comes up. Find all matching
+ * samples, merge the callchains and process them.
+ */
+static int evlist__deliver_deferred_callchain(struct evlist *evlist,
+ const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct deferred_event *de, *tmp;
+ struct evsel *evsel;
+ int ret = 0;
+
+ if (!tool->merge_deferred_callchains) {
+ evsel = evlist__id2evsel(evlist, sample->id);
+ return tool->callchain_deferred(tool, event, sample,
+ evsel, machine);
+ }
+
+ list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) {
+ struct perf_sample orig_sample;
+
+ ret = evlist__parse_sample(evlist, de->event, &orig_sample);
+ if (ret < 0) {
+ pr_err("failed to parse original sample\n");
+ break;
+ }
+
+ if (sample->tid != orig_sample.tid)
+ continue;
+
+ if (event->callchain_deferred.cookie == orig_sample.deferred_cookie)
+ sample__merge_deferred_callchain(&orig_sample, sample);
+ else
+ orig_sample.deferred_callchain = false;
+
+ evsel = evlist__id2evsel(evlist, orig_sample.id);
+ ret = evlist__deliver_sample(evlist, tool, de->event,
+ &orig_sample, evsel, machine);
+
+ if (orig_sample.deferred_callchain)
+ free(orig_sample.callchain);
+
+ list_del(&de->list);
+ free(de->event);
+ free(de);
+
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/*
+ * This is called at the end of the data processing for the session. Flush the
+ * remaining samples as there's no hope for matching deferred callchains.
+ */
+static int session__flush_deferred_samples(struct perf_session *session,
+ const struct perf_tool *tool)
+{
+ struct evlist *evlist = session->evlist;
+ struct machine *machine = &session->machines.host;
+ struct deferred_event *de, *tmp;
+ struct evsel *evsel;
+ int ret = 0;
+
+ list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) {
+ struct perf_sample sample;
+
+ ret = evlist__parse_sample(evlist, de->event, &sample);
+ if (ret < 0) {
+ pr_err("failed to parse original sample\n");
+ break;
+ }
+
+ evsel = evlist__id2evsel(evlist, sample.id);
+ ret = evlist__deliver_sample(evlist, tool, de->event,
+ &sample, evsel, machine);
+
+ list_del(&de->list);
+ free(de->event);
+ free(de);
+
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
static int machines__deliver_event(struct machines *machines,
struct evlist *evlist,
union perf_event *event,
@@ -1291,6 +1413,22 @@ static int machines__deliver_event(struct machines *machines,
return 0;
}
dump_sample(evsel, event, sample, perf_env__arch(machine->env));
+ if (sample->deferred_callchain && tool->merge_deferred_callchains) {
+ struct deferred_event *de = malloc(sizeof(*de));
+ size_t sz = event->header.size;
+
+ if (de == NULL)
+ return -ENOMEM;
+
+ de->event = malloc(sz);
+ if (de->event == NULL) {
+ free(de);
+ return -ENOMEM;
+ }
+ memcpy(de->event, event, sz);
+ list_add_tail(&de->list, &evlist->deferred_samples);
+ return 0;
+ }
return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
@@ -1348,6 +1486,10 @@ static int machines__deliver_event(struct machines *machines,
return tool->text_poke(tool, event, sample, machine);
case PERF_RECORD_AUX_OUTPUT_HW_ID:
return tool->aux_output_hw_id(tool, event, sample, machine);
+ case PERF_RECORD_CALLCHAIN_DEFERRED:
+ dump_deferred_callchain(evsel, event, sample);
+ return evlist__deliver_deferred_callchain(evlist, tool, event,
+ sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
@@ -1397,10 +1539,12 @@ static s64 perf_session__process_user_event(struct perf_session *session,
const struct perf_tool *tool = session->tool;
struct perf_sample sample;
int fd = perf_data__fd(session->data);
- int err;
+ s64 err;
perf_sample__init(&sample, /*all=*/true);
- if (event->header.type != PERF_RECORD_COMPRESSED || perf_tool__compressed_is_stub(tool))
+ if ((event->header.type != PERF_RECORD_COMPRESSED &&
+ event->header.type != PERF_RECORD_COMPRESSED2) ||
+ perf_tool__compressed_is_stub(tool))
dump_event(session->evlist, event, file_offset, &sample, file_path);
/* These events are processed right away */
@@ -1430,19 +1574,19 @@ static s64 perf_session__process_user_event(struct perf_session *session,
*/
if (!perf_data__is_pipe(session->data))
lseek(fd, file_offset, SEEK_SET);
- err = tool->tracing_data(session, event);
+ err = tool->tracing_data(tool, session, event);
break;
case PERF_RECORD_HEADER_BUILD_ID:
- err = tool->build_id(session, event);
+ err = tool->build_id(tool, session, event);
break;
case PERF_RECORD_FINISHED_ROUND:
err = tool->finished_round(tool, event, oe);
break;
case PERF_RECORD_ID_INDEX:
- err = tool->id_index(session, event);
+ err = tool->id_index(tool, session, event);
break;
case PERF_RECORD_AUXTRACE_INFO:
- err = tool->auxtrace_info(session, event);
+ err = tool->auxtrace_info(tool, session, event);
break;
case PERF_RECORD_AUXTRACE:
/*
@@ -1452,41 +1596,45 @@ static s64 perf_session__process_user_event(struct perf_session *session,
*/
if (!perf_data__is_pipe(session->data))
lseek(fd, file_offset + event->header.size, SEEK_SET);
- err = tool->auxtrace(session, event);
+ err = tool->auxtrace(tool, session, event);
break;
case PERF_RECORD_AUXTRACE_ERROR:
perf_session__auxtrace_error_inc(session, event);
- err = tool->auxtrace_error(session, event);
+ err = tool->auxtrace_error(tool, session, event);
break;
case PERF_RECORD_THREAD_MAP:
- err = tool->thread_map(session, event);
+ err = tool->thread_map(tool, session, event);
break;
case PERF_RECORD_CPU_MAP:
- err = tool->cpu_map(session, event);
+ err = tool->cpu_map(tool, session, event);
break;
case PERF_RECORD_STAT_CONFIG:
- err = tool->stat_config(session, event);
+ err = tool->stat_config(tool, session, event);
break;
case PERF_RECORD_STAT:
- err = tool->stat(session, event);
+ err = tool->stat(tool, session, event);
break;
case PERF_RECORD_STAT_ROUND:
- err = tool->stat_round(session, event);
+ err = tool->stat_round(tool, session, event);
break;
case PERF_RECORD_TIME_CONV:
session->time_conv = event->time_conv;
- err = tool->time_conv(session, event);
+ err = tool->time_conv(tool, session, event);
break;
case PERF_RECORD_HEADER_FEATURE:
- err = tool->feature(session, event);
+ err = tool->feature(tool, session, event);
break;
case PERF_RECORD_COMPRESSED:
- err = tool->compressed(session, event, file_offset, file_path);
+ case PERF_RECORD_COMPRESSED2:
+ err = tool->compressed(tool, session, event, file_offset, file_path);
if (err)
dump_event(session->evlist, event, file_offset, &sample, file_path);
break;
case PERF_RECORD_FINISHED_INIT:
- err = tool->finished_init(session, event);
+ err = tool->finished_init(tool, session, event);
+ break;
+ case PERF_RECORD_BPF_METADATA:
+ err = tool->bpf_metadata(tool, session, event);
break;
default:
err = -EINVAL;
@@ -1639,8 +1787,17 @@ static s64 perf_session__process_event(struct perf_session *session,
if (session->header.needs_swap)
event_swap(event, evlist__sample_id_all(evlist));
- if (event->header.type >= PERF_RECORD_HEADER_MAX)
- return -EINVAL;
+ if (event->header.type >= PERF_RECORD_HEADER_MAX) {
+ /* perf should not support unaligned event, stop here. */
+ if (event->header.size % sizeof(u64))
+ return -EINVAL;
+
+ /* This perf is outdated and does not support the latest event type. */
+ ui__warning("Unsupported header type %u, please consider updating perf.\n",
+ event->header.type);
+ /* Skip unsupported event by returning its size. */
+ return event->header.size;
+ }
events_stats__inc(&evlist->stats, event->header.type);
@@ -1923,6 +2080,9 @@ done:
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
goto out_err;
+ err = session__flush_deferred_samples(session, tool);
+ if (err)
+ goto out_err;
err = auxtrace__flush_events(session, tool);
if (err)
goto out_err;
@@ -2269,6 +2429,9 @@ static int __perf_session__process_events(struct perf_session *session)
err = auxtrace__flush_events(session, tool);
if (err)
goto out_err;
+ err = session__flush_deferred_samples(session, tool);
+ if (err)
+ goto out_err;
err = perf_session__flush_thread_stacks(session);
out_err:
ui_progress__finish();
@@ -2389,6 +2552,10 @@ static int __perf_session__process_dir_events(struct perf_session *session)
if (ret)
goto out_err;
+ ret = session__flush_deferred_samples(session, tool);
+ if (ret)
+ goto out_err;
+
ret = perf_session__flush_thread_stacks(session);
out_err:
ui_progress__finish();
@@ -2542,7 +2709,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
{
int i, err = -1;
struct perf_cpu_map *map;
- int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
+ int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS);
struct perf_cpu cpu;
for (i = 0; i < PERF_TYPE_MAX; ++i) {
@@ -2627,7 +2794,8 @@ static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
return 0;
}
-int perf_event__process_id_index(struct perf_session *session,
+int perf_event__process_id_index(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
struct evlist *evlist = session->evlist;
@@ -2731,3 +2899,8 @@ int perf_session__dsos_hit_all(struct perf_session *session)
return 0;
}
+
+struct perf_env *perf_session__env(struct perf_session *session)
+{
+ return &session->header.env;
+}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index db1c120a9e67..22d3ff877e83 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -107,12 +107,13 @@ struct perf_tool;
struct perf_session *__perf_session__new(struct perf_data *data,
struct perf_tool *tool,
- bool trace_event_repipe);
+ bool trace_event_repipe,
+ struct perf_env *host_env);
static inline struct perf_session *perf_session__new(struct perf_data *data,
struct perf_tool *tool)
{
- return __perf_session__new(data, tool, /*trace_event_repipe=*/false);
+ return __perf_session__new(data, tool, /*trace_event_repipe=*/false, /*host_env=*/NULL);
}
void perf_session__delete(struct perf_session *session);
@@ -201,11 +202,14 @@ int perf_session__deliver_synth_attr_event(struct perf_session *session,
int perf_session__dsos_hit_all(struct perf_session *session);
-int perf_event__process_id_index(struct perf_session *session,
+int perf_event__process_id_index(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event);
int perf_event__process_finished_round(const struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe);
+struct perf_env *perf_session__env(struct perf_session *session);
+
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index dd289d15acfd..b65b1792ca05 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -1,6 +1,7 @@
from os import getenv, path
from subprocess import Popen, PIPE
from re import sub
+import shlex
cc = getenv("CC")
assert cc, "Environment variable CC not set"
@@ -22,8 +23,17 @@ assert srctree, "Environment variable srctree, for the Linux sources, not set"
src_feature_tests = f'{srctree}/tools/build/feature'
def clang_has_option(option):
- cc_output = Popen([cc, cc_options + option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
- return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o) or (b"unknown warning option" in o))] == [ ]
+ error_substrings = (
+ b"unknown argument",
+ b"is not supported",
+ b"unknown warning option"
+ )
+ cmd = shlex.split(f"{cc} {cc_options} {option}") + [
+ "-o", "/dev/null",
+ path.join(src_feature_tests, "test-hello.c")
+ ]
+ cc_output = Popen(cmd, stderr=PIPE).stderr.readlines()
+ return not any(any(error in line for error in error_substrings) for line in cc_output)
if cc_is_clang:
from sysconfig import get_config_vars
diff --git a/tools/perf/util/sha1.c b/tools/perf/util/sha1.c
new file mode 100644
index 000000000000..7032fa4ff3fd
--- /dev/null
+++ b/tools/perf/util/sha1.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SHA-1 message digest algorithm
+ *
+ * Copyright 2025 Google LLC
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/unaligned.h>
+#include <string.h>
+
+#include "sha1.h"
+
+#define SHA1_BLOCK_SIZE 64
+
+static const u32 sha1_K[4] = { 0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6 };
+
+#define SHA1_ROUND(i, a, b, c, d, e) \
+ do { \
+ if ((i) >= 16) \
+ w[i] = rol32(w[(i) - 16] ^ w[(i) - 14] ^ w[(i) - 8] ^ \
+ w[(i) - 3], \
+ 1); \
+ e += w[i] + rol32(a, 5) + sha1_K[(i) / 20]; \
+ if ((i) < 20) \
+ e += (b & (c ^ d)) ^ d; \
+ else if ((i) < 40 || (i) >= 60) \
+ e += b ^ c ^ d; \
+ else \
+ e += (c & d) ^ (b & (c ^ d)); \
+ b = rol32(b, 30); \
+ /* The new (a, b, c, d, e) is the old (e, a, b, c, d). */ \
+ } while (0)
+
+#define SHA1_5ROUNDS(i) \
+ do { \
+ SHA1_ROUND((i) + 0, a, b, c, d, e); \
+ SHA1_ROUND((i) + 1, e, a, b, c, d); \
+ SHA1_ROUND((i) + 2, d, e, a, b, c); \
+ SHA1_ROUND((i) + 3, c, d, e, a, b); \
+ SHA1_ROUND((i) + 4, b, c, d, e, a); \
+ } while (0)
+
+#define SHA1_20ROUNDS(i) \
+ do { \
+ SHA1_5ROUNDS((i) + 0); \
+ SHA1_5ROUNDS((i) + 5); \
+ SHA1_5ROUNDS((i) + 10); \
+ SHA1_5ROUNDS((i) + 15); \
+ } while (0)
+
+static void sha1_blocks(u32 h[5], const u8 *data, size_t nblocks)
+{
+ while (nblocks--) {
+ u32 a = h[0];
+ u32 b = h[1];
+ u32 c = h[2];
+ u32 d = h[3];
+ u32 e = h[4];
+ u32 w[80];
+
+ for (int i = 0; i < 16; i++)
+ w[i] = get_unaligned_be32(&data[i * 4]);
+ SHA1_20ROUNDS(0);
+ SHA1_20ROUNDS(20);
+ SHA1_20ROUNDS(40);
+ SHA1_20ROUNDS(60);
+
+ h[0] += a;
+ h[1] += b;
+ h[2] += c;
+ h[3] += d;
+ h[4] += e;
+ data += SHA1_BLOCK_SIZE;
+ }
+}
+
+/* Calculate the SHA-1 message digest of the given data. */
+void sha1(const void *data, size_t len, u8 out[SHA1_DIGEST_SIZE])
+{
+ u32 h[5] = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476,
+ 0xC3D2E1F0 };
+ u8 final_data[2 * SHA1_BLOCK_SIZE] = { 0 };
+ size_t final_len = len % SHA1_BLOCK_SIZE;
+
+ sha1_blocks(h, data, len / SHA1_BLOCK_SIZE);
+
+ memcpy(final_data, data + len - final_len, final_len);
+ final_data[final_len] = 0x80;
+ final_len = round_up(final_len + 9, SHA1_BLOCK_SIZE);
+ put_unaligned_be64((u64)len * 8, &final_data[final_len - 8]);
+
+ sha1_blocks(h, final_data, final_len / SHA1_BLOCK_SIZE);
+
+ for (int i = 0; i < 5; i++)
+ put_unaligned_be32(h[i], &out[i * 4]);
+}
diff --git a/tools/perf/util/sha1.h b/tools/perf/util/sha1.h
new file mode 100644
index 000000000000..e92c9966e1d5
--- /dev/null
+++ b/tools/perf/util/sha1.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/types.h>
+
+#define SHA1_DIGEST_SIZE 20
+
+void sha1(const void *data, size_t len, u8 out[SHA1_DIGEST_SIZE]);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index c51049087e4e..f3a565b0e230 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -141,6 +141,43 @@ struct sort_entry sort_thread = {
.se_width_idx = HISTC_THREAD,
};
+/* --sort tgid */
+
+static int64_t
+sort__tgid_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return thread__pid(right->thread) - thread__pid(left->thread);
+}
+
+static int hist_entry__tgid_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ int tgid = thread__pid(he->thread);
+ const char *comm = NULL;
+
+ /* display comm of the thread-group leader */
+ if (thread__pid(he->thread) == thread__tid(he->thread)) {
+ comm = thread__comm_str(he->thread);
+ } else {
+ struct maps *maps = thread__maps(he->thread);
+ struct thread *leader = machine__find_thread(maps__machine(maps),
+ tgid, tgid);
+ if (leader) {
+ comm = thread__comm_str(leader);
+ thread__put(leader);
+ }
+ }
+ width = max(7U, width) - 8;
+ return repsep_snprintf(bf, size, "%7d:%-*.*s", tgid, width, width, comm ?: "");
+}
+
+struct sort_entry sort_tgid = {
+ .se_header = " Tgid:Command",
+ .se_cmp = sort__tgid_cmp,
+ .se_snprintf = hist_entry__tgid_snprintf,
+ .se_width_idx = HISTC_TGID,
+};
+
/* --sort simd */
static int64_t
@@ -1709,22 +1746,27 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
if (rc)
return rc;
/*
- * Addresses with no major/minor numbers are assumed to be
+ * Addresses with no major/minor numbers or build ID are assumed to be
* anonymous in userspace. Sort those on pid then address.
*
* The kernel and non-zero major/minor mapped areas are
* assumed to be unity mapped. Sort those on address.
*/
+ if (left->cpumode != PERF_RECORD_MISC_KERNEL && (map__flags(l_map) & MAP_SHARED) == 0) {
+ const struct dso_id *dso_id = dso__id_const(l_dso);
- if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
- (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min &&
- !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) {
- /* userspace anonymous */
+ if (!dso_id->mmap2_valid)
+ dso_id = dso__id_const(r_dso);
- if (thread__pid(left->thread) > thread__pid(right->thread))
- return -1;
- if (thread__pid(left->thread) < thread__pid(right->thread))
- return 1;
+ if (!build_id__is_defined(&dso_id->build_id) &&
+ (!dso_id->mmap2_valid || (dso_id->maj == 0 && dso_id->min == 0))) {
+ /* userspace anonymous */
+
+ if (thread__pid(left->thread) > thread__pid(right->thread))
+ return -1;
+ if (thread__pid(left->thread) < thread__pid(right->thread))
+ return 1;
+ }
}
addr:
@@ -1749,6 +1791,7 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
if (he->mem_info) {
struct map *map = mem_info__daddr(he->mem_info)->ms.map;
struct dso *dso = map ? map__dso(map) : NULL;
+ const struct dso_id *dso_id = dso ? dso__id_const(dso) : &dso_id_empty;
addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
ms = &mem_info__daddr(he->mem_info)->ms;
@@ -1757,8 +1800,7 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
map && !(map__prot(map) & PROT_EXEC) &&
(map__flags(map) & MAP_SHARED) &&
- (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino ||
- dso__id(dso)->ino_generation))
+ (!dso_id->mmap2_valid || (dso_id->maj == 0 && dso_id->min == 0)))
level = 's';
else if (!map)
level = 'X';
@@ -1842,21 +1884,20 @@ struct sort_entry sort_global_ins_lat = {
static int64_t
sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return left->p_stage_cyc - right->p_stage_cyc;
+ return left->weight3 - right->weight3;
}
static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*u", width,
- he->p_stage_cyc * he->stat.nr_events);
+ return repsep_snprintf(bf, size, "%-*u", width, he->weight3 * he->stat.nr_events);
}
static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
+ return repsep_snprintf(bf, size, "%-*u", width, he->weight3);
}
struct sort_entry sort_local_p_stage_cyc = {
@@ -2489,25 +2530,51 @@ struct sort_dimension {
int taken;
};
-int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
+static int arch_support_sort_key(const char *sort_key, struct perf_env *env)
{
+ const char *arch = perf_env__arch(env);
+
+ if (!strcmp("x86", arch) || !strcmp("powerpc", arch)) {
+ if (!strcmp(sort_key, "p_stage_cyc"))
+ return 1;
+ if (!strcmp(sort_key, "local_p_stage_cyc"))
+ return 1;
+ }
return 0;
}
-const char * __weak arch_perf_header_entry(const char *se_header)
-{
+static const char *arch_perf_header_entry(const char *se_header, struct perf_env *env)
+{
+ const char *arch = perf_env__arch(env);
+
+ if (!strcmp("x86", arch)) {
+ if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
+ return "Local Retire Latency";
+ else if (!strcmp(se_header, "Pipeline Stage Cycle"))
+ return "Retire Latency";
+ } else if (!strcmp("powerpc", arch)) {
+ if (!strcmp(se_header, "Local INSTR Latency"))
+ return "Finish Cyc";
+ else if (!strcmp(se_header, "INSTR Latency"))
+ return "Global Finish_cyc";
+ else if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
+ return "Dispatch Cyc";
+ else if (!strcmp(se_header, "Pipeline Stage Cycle"))
+ return "Global Dispatch_cyc";
+ }
return se_header;
}
-static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
+static void sort_dimension_add_dynamic_header(struct sort_dimension *sd, struct perf_env *env)
{
- sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
+ sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header, env);
}
#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_PID, "pid", sort_thread),
+ DIM(SORT_TGID, "tgid", sort_tgid),
DIM(SORT_COMM, "comm", sort_comm),
DIM(SORT_DSO, "dso", sort_dso),
DIM(SORT_SYM, "symbol", sort_sym),
@@ -2598,9 +2665,11 @@ struct hpp_dimension {
struct perf_hpp_fmt *fmt;
int taken;
int was_taken;
+ int mem_mode;
};
#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
+#define DIM_MEM(d, n) { .name = n, .fmt = &perf_hpp__format[d], .mem_mode = 1, }
static struct hpp_dimension hpp_sort_dimensions[] = {
DIM(PERF_HPP__OVERHEAD, "overhead"),
@@ -2620,8 +2689,15 @@ static struct hpp_dimension hpp_sort_dimensions[] = {
DIM(PERF_HPP__WEIGHT2, "ins_lat"),
DIM(PERF_HPP__WEIGHT3, "retire_lat"),
DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
+ /* used for output only when SORT_MODE__MEM */
+ DIM_MEM(PERF_HPP__MEM_STAT_OP, "op"),
+ DIM_MEM(PERF_HPP__MEM_STAT_CACHE, "cache"),
+ DIM_MEM(PERF_HPP__MEM_STAT_MEMORY, "memory"),
+ DIM_MEM(PERF_HPP__MEM_STAT_SNOOP, "snoop"),
+ DIM_MEM(PERF_HPP__MEM_STAT_DTLB, "dtlb"),
};
+#undef DIM_MEM
#undef DIM
struct hpp_sort_entry {
@@ -2641,18 +2717,22 @@ void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
}
static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
- struct hists *hists, int line __maybe_unused,
+ struct hists *hists, int line,
int *span __maybe_unused)
{
struct hpp_sort_entry *hse;
size_t len = fmt->user_len;
+ const char *hdr = "";
+
+ if (line == hists->hpp_list->nr_header_lines - 1)
+ hdr = fmt->name;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
if (!len)
len = hists__col_len(hists, hse->se->se_width_idx);
- return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
+ return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, hdr);
}
static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
@@ -2884,9 +2964,10 @@ static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
}
static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
- struct perf_hpp_list *list)
+ struct perf_hpp_list *list,
+ int level)
{
- struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
+ struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
if (hse == NULL)
return -1;
@@ -3495,12 +3576,13 @@ static int __hpp_dimension__add(struct hpp_dimension *hd,
}
static int __sort_dimension__add_output(struct perf_hpp_list *list,
- struct sort_dimension *sd)
+ struct sort_dimension *sd,
+ int level)
{
if (sd->taken)
return 0;
- if (__sort_dimension__add_hpp_output(sd, list) < 0)
+ if (__sort_dimension__add_hpp_output(sd, list, level) < 0)
return -1;
sd->taken = 1;
@@ -3508,14 +3590,15 @@ static int __sort_dimension__add_output(struct perf_hpp_list *list,
}
static int __hpp_dimension__add_output(struct perf_hpp_list *list,
- struct hpp_dimension *hd)
+ struct hpp_dimension *hd,
+ int level)
{
struct perf_hpp_fmt *fmt;
if (hd->taken)
return 0;
- fmt = __hpp_dimension__alloc_hpp(hd, 0);
+ fmt = __hpp_dimension__alloc_hpp(hd, level);
if (!fmt)
return -1;
@@ -3532,11 +3615,11 @@ int hpp_dimension__add_output(unsigned col, bool implicit)
hd = &hpp_sort_dimensions[col];
if (implicit && !hd->was_taken)
return 0;
- return __hpp_dimension__add_output(&perf_hpp_list, hd);
+ return __hpp_dimension__add_output(&perf_hpp_list, hd, /*level=*/0);
}
int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
- struct evlist *evlist,
+ struct evlist *evlist, struct perf_env *env,
int level)
{
unsigned int i, j;
@@ -3549,7 +3632,7 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
*/
for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
if (!strcmp(arch_specific_sort_keys[j], tok) &&
- !arch_support_sort_key(tok)) {
+ !arch_support_sort_key(tok, env)) {
return 0;
}
}
@@ -3562,7 +3645,7 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
if (sd->name && !strcmp(dynamic_headers[j], sd->name))
- sort_dimension_add_dynamic_header(sd);
+ sort_dimension_add_dynamic_header(sd, env);
}
if (sd->entry == &sort_parent && parent_pattern) {
@@ -3601,15 +3684,6 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
return __sort_dimension__add(sd, list, level);
}
- for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
- struct hpp_dimension *hd = &hpp_sort_dimensions[i];
-
- if (strncasecmp(tok, hd->name, strlen(tok)))
- continue;
-
- return __hpp_dimension__add(hd, list, level);
- }
-
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
struct sort_dimension *sd = &bstack_sort_dimensions[i];
@@ -3651,6 +3725,15 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
return 0;
}
+ for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
+ struct hpp_dimension *hd = &hpp_sort_dimensions[i];
+
+ if (strncasecmp(tok, hd->name, strlen(tok)))
+ continue;
+
+ return __hpp_dimension__add(hd, list, level);
+ }
+
if (!add_dynamic_entry(evlist, tok, level))
return 0;
@@ -3658,13 +3741,13 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
}
/* This should match with sort_dimension__add() above */
-static bool is_hpp_sort_key(const char *key)
+static bool is_hpp_sort_key(const char *key, struct perf_env *env)
{
unsigned i;
for (i = 0; i < ARRAY_SIZE(arch_specific_sort_keys); i++) {
if (!strcmp(arch_specific_sort_keys[i], key) &&
- !arch_support_sort_key(key)) {
+ !arch_support_sort_key(key, env)) {
return false;
}
}
@@ -3686,7 +3769,7 @@ static bool is_hpp_sort_key(const char *key)
}
static int setup_sort_list(struct perf_hpp_list *list, char *str,
- struct evlist *evlist)
+ struct evlist *evlist, struct perf_env *env)
{
char *tmp, *tok;
int ret = 0;
@@ -3715,7 +3798,7 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
}
if (*tok) {
- if (is_hpp_sort_key(tok)) {
+ if (is_hpp_sort_key(tok, env)) {
/* keep output (hpp) sort keys in the same level */
if (prev_was_hpp) {
bool next_same = (level == next_level);
@@ -3728,7 +3811,7 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
prev_was_hpp = false;
}
- ret = sort_dimension__add(list, tok, evlist, level);
+ ret = sort_dimension__add(list, tok, evlist, env, level);
if (ret == -EINVAL) {
if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
@@ -3857,7 +3940,7 @@ static char *setup_overhead(char *keys)
return keys;
}
-static int __setup_sorting(struct evlist *evlist)
+static int __setup_sorting(struct evlist *evlist, struct perf_env *env)
{
char *str;
const char *sort_keys;
@@ -3897,7 +3980,7 @@ static int __setup_sorting(struct evlist *evlist)
}
}
- ret = setup_sort_list(&perf_hpp_list, str, evlist);
+ ret = setup_sort_list(&perf_hpp_list, str, evlist, env);
free(str);
return ret;
@@ -4000,7 +4083,7 @@ void sort__setup_elide(FILE *output)
}
}
-int output_field_add(struct perf_hpp_list *list, const char *tok)
+int output_field_add(struct perf_hpp_list *list, const char *tok, int *level)
{
unsigned int i;
@@ -4013,16 +4096,25 @@ int output_field_add(struct perf_hpp_list *list, const char *tok)
if (!strcasecmp(tok, "weight"))
ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
- return __hpp_dimension__add_output(list, hd);
+ if (hd->mem_mode && sort__mode != SORT_MODE__MEMORY)
+ continue;
+
+ return __hpp_dimension__add_output(list, hd, *level);
}
+ /*
+ * A non-output field will increase level so that it can be in a
+ * different hierarchy.
+ */
+ (*level)++;
+
for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
struct sort_dimension *sd = &common_sort_dimensions[i];
if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
- return __sort_dimension__add_output(list, sd);
+ return __sort_dimension__add_output(list, sd, *level);
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
@@ -4034,7 +4126,7 @@ int output_field_add(struct perf_hpp_list *list, const char *tok)
if (sort__mode != SORT_MODE__BRANCH)
return -EINVAL;
- return __sort_dimension__add_output(list, sd);
+ return __sort_dimension__add_output(list, sd, *level);
}
for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
@@ -4046,7 +4138,7 @@ int output_field_add(struct perf_hpp_list *list, const char *tok)
if (sort__mode != SORT_MODE__MEMORY)
return -EINVAL;
- return __sort_dimension__add_output(list, sd);
+ return __sort_dimension__add_output(list, sd, *level);
}
return -ESRCH;
@@ -4056,10 +4148,11 @@ static int setup_output_list(struct perf_hpp_list *list, char *str)
{
char *tmp, *tok;
int ret = 0;
+ int level = 0;
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
- ret = output_field_add(list, tok);
+ ret = output_field_add(list, tok, &level);
if (ret == -EINVAL) {
ui__error("Invalid --fields key: `%s'", tok);
break;
@@ -4123,16 +4216,16 @@ out:
return ret;
}
-int setup_sorting(struct evlist *evlist)
+int setup_sorting(struct evlist *evlist, struct perf_env *env)
{
int err;
- err = __setup_sorting(evlist);
+ err = __setup_sorting(evlist, env);
if (err < 0)
return err;
if (parent_pattern != default_parent_pattern) {
- err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
+ err = sort_dimension__add(&perf_hpp_list, "parent", evlist, env, -1);
if (err < 0)
return err;
}
@@ -4149,6 +4242,10 @@ int setup_sorting(struct evlist *evlist)
if (err < 0)
return err;
+ err = perf_hpp__alloc_mem_stats(&perf_hpp_list, evlist);
+ if (err < 0)
+ return err;
+
/* copy sort keys to output fields */
perf_hpp__setup_output_field(&perf_hpp_list);
/* and then copy output fields to sort keys */
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 180d36a2bea3..d7787958e06b 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -6,6 +6,7 @@
#include "hist.h"
struct option;
+struct perf_env;
extern regex_t parent_regex;
extern const char *sort_order;
@@ -73,6 +74,7 @@ enum sort_type {
SORT_SYM_OFFSET,
SORT_ANNOTATE_DATA_TYPE_CACHELINE,
SORT_PARALLELISM,
+ SORT_TGID,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -129,7 +131,7 @@ extern struct sort_entry sort_thread;
struct evlist;
struct tep_handle;
-int setup_sorting(struct evlist *evlist);
+int setup_sorting(struct evlist *evlist, struct perf_env *env);
int setup_output_field(void);
void reset_output_field(void);
void sort__setup_elide(FILE *fp);
@@ -144,9 +146,9 @@ bool is_strict_order(const char *order);
int hpp_dimension__add_output(unsigned col, bool implicit);
void reset_dimensions(void);
int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
- struct evlist *evlist,
+ struct evlist *evlist, struct perf_env *env,
int level);
-int output_field_add(struct perf_hpp_list *list, const char *tok);
+int output_field_add(struct perf_hpp_list *list, const char *tok, int *level);
int64_t
sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right);
int64_t
diff --git a/tools/perf/util/spark.c b/tools/perf/util/spark.c
index 70272a8b81a6..65ca253cc22e 100644
--- a/tools/perf/util/spark.c
+++ b/tools/perf/util/spark.c
@@ -1,9 +1,7 @@
-#include <stdio.h>
-#include <limits.h>
-#include <string.h>
-#include <stdlib.h>
+// SPDX-License-Identifier: GPL-2.0
#include "spark.h"
-#include "stat.h"
+#include <limits.h>
+#include <linux/kernel.h>
#define SPARK_SHIFT 8
diff --git a/tools/perf/util/spark.h b/tools/perf/util/spark.h
index 25402d7d7a64..78597c38ef35 100644
--- a/tools/perf/util/spark.h
+++ b/tools/perf/util/spark.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SPARK_H
#define SPARK_H 1
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
index 476e99896d5e..0f4907843ac1 100644
--- a/tools/perf/util/srccode.c
+++ b/tools/perf/util/srccode.c
@@ -16,7 +16,7 @@
#include "srccode.h"
#include "debug.h"
#include <internal/lib.h> // page_size
-#include "fncache.h"
+#include "hashmap.h"
#define MAXSRCCACHE (32*1024*1024)
#define MAXSRCFILES 64
@@ -92,7 +92,7 @@ static struct srcfile *find_srcfile(char *fn)
struct srcfile *h;
int fd;
unsigned long sz;
- unsigned hval = shash((unsigned char *)fn) % SRC_HTAB_SZ;
+ size_t hval = str_hash(fn) % SRC_HTAB_SZ;
hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) {
if (!strcmp(fn, h->fn)) {
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index f32d0d4f4bc9..27c0966611ab 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -1,32 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
-#include <inttypes.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/zalloc.h>
-
-#include <api/io.h>
-
-#include "util/dso.h"
-#include "util/debug.h"
-#include "util/callchain.h"
-#include "util/symbol_conf.h"
-#ifdef HAVE_LIBLLVM_SUPPORT
-#include "util/llvm-c-helpers.h"
-#endif
#include "srcline.h"
-#include "string2.h"
+#include "addr2line.h"
+#include "dso.h"
+#include "callchain.h"
+#include "libbfd.h"
+#include "llvm.h"
#include "symbol.h"
-#include "subcmd/run-command.h"
-/* If addr2line doesn't return data for 1 second then timeout. */
-int addr2line_timeout_ms = 1 * 1000;
+#include <inttypes.h>
+#include <string.h>
+
bool srcline_full_filename;
char *srcline__unknown = (char *)"??:0";
@@ -49,8 +32,7 @@ static const char *srcline_dso_name(struct dso *dso)
return dso_name;
}
-static int inline_list__append(struct symbol *symbol, char *srcline,
- struct inline_node *node)
+int inline_list__append(struct symbol *symbol, char *srcline, struct inline_node *node)
{
struct inline_list *ilist;
@@ -77,7 +59,7 @@ static const char *gnu_basename(const char *path)
return base ? base + 1 : path;
}
-static char *srcline_from_fileline(const char *file, unsigned int line)
+char *srcline_from_fileline(const char *file, unsigned int line)
{
char *srcline;
@@ -93,9 +75,9 @@ static char *srcline_from_fileline(const char *file, unsigned int line)
return srcline;
}
-static struct symbol *new_inline_sym(struct dso *dso,
- struct symbol *base_sym,
- const char *funcname)
+struct symbol *new_inline_sym(struct dso *dso,
+ struct symbol *base_sym,
+ const char *funcname)
{
struct symbol *inline_sym;
char *demangled = NULL;
@@ -132,722 +114,23 @@ static struct symbol *new_inline_sym(struct dso *dso,
return inline_sym;
}
-#define MAX_INLINE_NEST 1024
-
-#ifdef HAVE_LIBLLVM_SUPPORT
-
-static void free_llvm_inline_frames(struct llvm_a2l_frame *inline_frames,
- int num_frames)
-{
- if (inline_frames != NULL) {
- for (int i = 0; i < num_frames; ++i) {
- zfree(&inline_frames[i].filename);
- zfree(&inline_frames[i].funcname);
- }
- zfree(&inline_frames);
- }
-}
-
-static int addr2line(const char *dso_name, u64 addr,
- char **file, unsigned int *line, struct dso *dso,
- bool unwind_inlines, struct inline_node *node,
+static int addr2line(const char *dso_name, u64 addr, char **file, unsigned int *line_nr,
+ struct dso *dso, bool unwind_inlines, struct inline_node *node,
struct symbol *sym)
{
- struct llvm_a2l_frame *inline_frames = NULL;
- int num_frames = llvm_addr2line(dso_name, addr, file, line,
- node && unwind_inlines, &inline_frames);
-
- if (num_frames == 0 || !inline_frames) {
- /* Error, or we didn't want inlines. */
- return num_frames;
- }
-
- for (int i = 0; i < num_frames; ++i) {
- struct symbol *inline_sym =
- new_inline_sym(dso, sym, inline_frames[i].funcname);
- char *srcline = NULL;
-
- if (inline_frames[i].filename) {
- srcline =
- srcline_from_fileline(inline_frames[i].filename,
- inline_frames[i].line);
- }
- if (inline_list__append(inline_sym, srcline, node) != 0) {
- free_llvm_inline_frames(inline_frames, num_frames);
- return 0;
- }
- }
- free_llvm_inline_frames(inline_frames, num_frames);
-
- return num_frames;
-}
-
-void dso__free_a2l(struct dso *dso __maybe_unused)
-{
- /* Nothing to free. */
-}
-
-#elif defined(HAVE_LIBBFD_SUPPORT)
-
-/*
- * Implement addr2line using libbfd.
- */
-#define PACKAGE "perf"
-#include <bfd.h>
-
-struct a2l_data {
- const char *input;
- u64 addr;
-
- bool found;
- const char *filename;
- const char *funcname;
- unsigned line;
-
- bfd *abfd;
- asymbol **syms;
-};
-
-static int bfd_error(const char *string)
-{
- const char *errmsg;
-
- errmsg = bfd_errmsg(bfd_get_error());
- fflush(stdout);
-
- if (string)
- pr_debug("%s: %s\n", string, errmsg);
- else
- pr_debug("%s\n", errmsg);
-
- return -1;
-}
-
-static int slurp_symtab(bfd *abfd, struct a2l_data *a2l)
-{
- long storage;
- long symcount;
- asymbol **syms;
- bfd_boolean dynamic = FALSE;
-
- if ((bfd_get_file_flags(abfd) & HAS_SYMS) == 0)
- return bfd_error(bfd_get_filename(abfd));
-
- storage = bfd_get_symtab_upper_bound(abfd);
- if (storage == 0L) {
- storage = bfd_get_dynamic_symtab_upper_bound(abfd);
- dynamic = TRUE;
- }
- if (storage < 0L)
- return bfd_error(bfd_get_filename(abfd));
-
- syms = malloc(storage);
- if (dynamic)
- symcount = bfd_canonicalize_dynamic_symtab(abfd, syms);
- else
- symcount = bfd_canonicalize_symtab(abfd, syms);
-
- if (symcount < 0) {
- free(syms);
- return bfd_error(bfd_get_filename(abfd));
- }
-
- a2l->syms = syms;
- return 0;
-}
-
-static void find_address_in_section(bfd *abfd, asection *section, void *data)
-{
- bfd_vma pc, vma;
- bfd_size_type size;
- struct a2l_data *a2l = data;
- flagword flags;
-
- if (a2l->found)
- return;
+ int ret;
-#ifdef bfd_get_section_flags
- flags = bfd_get_section_flags(abfd, section);
-#else
- flags = bfd_section_flags(section);
-#endif
- if ((flags & SEC_ALLOC) == 0)
- return;
-
- pc = a2l->addr;
-#ifdef bfd_get_section_vma
- vma = bfd_get_section_vma(abfd, section);
-#else
- vma = bfd_section_vma(section);
-#endif
-#ifdef bfd_get_section_size
- size = bfd_get_section_size(section);
-#else
- size = bfd_section_size(section);
-#endif
-
- if (pc < vma || pc >= vma + size)
- return;
+ ret = llvm__addr2line(dso_name, addr, file, line_nr, dso, unwind_inlines, node, sym);
+ if (ret > 0)
+ return ret;
- a2l->found = bfd_find_nearest_line(abfd, section, a2l->syms, pc - vma,
- &a2l->filename, &a2l->funcname,
- &a2l->line);
-
- if (a2l->filename && !strlen(a2l->filename))
- a2l->filename = NULL;
-}
-
-static struct a2l_data *addr2line_init(const char *path)
-{
- bfd *abfd;
- struct a2l_data *a2l = NULL;
-
- abfd = bfd_openr(path, NULL);
- if (abfd == NULL)
- return NULL;
+ ret = libbfd__addr2line(dso_name, addr, file, line_nr, dso, unwind_inlines, node, sym);
+ if (ret > 0)
+ return ret;
- if (!bfd_check_format(abfd, bfd_object))
- goto out;
-
- a2l = zalloc(sizeof(*a2l));
- if (a2l == NULL)
- goto out;
-
- a2l->abfd = abfd;
- a2l->input = strdup(path);
- if (a2l->input == NULL)
- goto out;
-
- if (slurp_symtab(abfd, a2l))
- goto out;
-
- return a2l;
-
-out:
- if (a2l) {
- zfree((char **)&a2l->input);
- free(a2l);
- }
- bfd_close(abfd);
- return NULL;
+ return cmd__addr2line(dso_name, addr, file, line_nr, dso, unwind_inlines, node, sym);
}
-static void addr2line_cleanup(struct a2l_data *a2l)
-{
- if (a2l->abfd)
- bfd_close(a2l->abfd);
- zfree((char **)&a2l->input);
- zfree(&a2l->syms);
- free(a2l);
-}
-
-static int inline_list__append_dso_a2l(struct dso *dso,
- struct inline_node *node,
- struct symbol *sym)
-{
- struct a2l_data *a2l = dso__a2l(dso);
- struct symbol *inline_sym = new_inline_sym(dso, sym, a2l->funcname);
- char *srcline = NULL;
-
- if (a2l->filename)
- srcline = srcline_from_fileline(a2l->filename, a2l->line);
-
- return inline_list__append(inline_sym, srcline, node);
-}
-
-static int addr2line(const char *dso_name, u64 addr,
- char **file, unsigned int *line, struct dso *dso,
- bool unwind_inlines, struct inline_node *node,
- struct symbol *sym)
-{
- int ret = 0;
- struct a2l_data *a2l = dso__a2l(dso);
-
- if (!a2l) {
- a2l = addr2line_init(dso_name);
- dso__set_a2l(dso, a2l);
- }
-
- if (a2l == NULL) {
- if (!symbol_conf.disable_add2line_warn)
- pr_warning("addr2line_init failed for %s\n", dso_name);
- return 0;
- }
-
- a2l->addr = addr;
- a2l->found = false;
-
- bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
-
- if (!a2l->found)
- return 0;
-
- if (unwind_inlines) {
- int cnt = 0;
-
- if (node && inline_list__append_dso_a2l(dso, node, sym))
- return 0;
-
- while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
- &a2l->funcname, &a2l->line) &&
- cnt++ < MAX_INLINE_NEST) {
-
- if (a2l->filename && !strlen(a2l->filename))
- a2l->filename = NULL;
-
- if (node != NULL) {
- if (inline_list__append_dso_a2l(dso, node, sym))
- return 0;
- // found at least one inline frame
- ret = 1;
- }
- }
- }
-
- if (file) {
- *file = a2l->filename ? strdup(a2l->filename) : NULL;
- ret = *file ? 1 : 0;
- }
-
- if (line)
- *line = a2l->line;
-
- return ret;
-}
-
-void dso__free_a2l(struct dso *dso)
-{
- struct a2l_data *a2l = dso__a2l(dso);
-
- if (!a2l)
- return;
-
- addr2line_cleanup(a2l);
-
- dso__set_a2l(dso, NULL);
-}
-
-#else /* HAVE_LIBBFD_SUPPORT */
-
-static int filename_split(char *filename, unsigned int *line_nr)
-{
- char *sep;
-
- sep = strchr(filename, '\n');
- if (sep)
- *sep = '\0';
-
- if (!strcmp(filename, "??:0"))
- return 0;
-
- sep = strchr(filename, ':');
- if (sep) {
- *sep++ = '\0';
- *line_nr = strtoul(sep, NULL, 0);
- return 1;
- }
- pr_debug("addr2line missing ':' in filename split\n");
- return 0;
-}
-
-static void addr2line_subprocess_cleanup(struct child_process *a2l)
-{
- if (a2l->pid != -1) {
- kill(a2l->pid, SIGKILL);
- finish_command(a2l); /* ignore result, we don't care */
- a2l->pid = -1;
- close(a2l->in);
- close(a2l->out);
- }
-
- free(a2l);
-}
-
-static struct child_process *addr2line_subprocess_init(const char *addr2line_path,
- const char *binary_path)
-{
- const char *argv[] = {
- addr2line_path ?: "addr2line",
- "-e", binary_path,
- "-a", "-i", "-f", NULL
- };
- struct child_process *a2l = zalloc(sizeof(*a2l));
- int start_command_status = 0;
-
- if (a2l == NULL) {
- pr_err("Failed to allocate memory for addr2line");
- return NULL;
- }
-
- a2l->pid = -1;
- a2l->in = -1;
- a2l->out = -1;
- a2l->no_stderr = 1;
-
- a2l->argv = argv;
- start_command_status = start_command(a2l);
- a2l->argv = NULL; /* it's not used after start_command; avoid dangling pointers */
-
- if (start_command_status != 0) {
- pr_warning("could not start addr2line (%s) for %s: start_command return code %d\n",
- addr2line_path, binary_path, start_command_status);
- addr2line_subprocess_cleanup(a2l);
- return NULL;
- }
-
- return a2l;
-}
-
-enum a2l_style {
- BROKEN,
- GNU_BINUTILS,
- LLVM,
-};
-
-static enum a2l_style addr2line_configure(struct child_process *a2l, const char *dso_name)
-{
- static bool cached;
- static enum a2l_style style;
-
- if (!cached) {
- char buf[128];
- struct io io;
- int ch;
- int lines;
-
- if (write(a2l->in, ",\n", 2) != 2)
- return BROKEN;
-
- io__init(&io, a2l->out, buf, sizeof(buf));
- ch = io__get_char(&io);
- if (ch == ',') {
- style = LLVM;
- cached = true;
- lines = 1;
- pr_debug("Detected LLVM addr2line style\n");
- } else if (ch == '0') {
- style = GNU_BINUTILS;
- cached = true;
- lines = 3;
- pr_debug("Detected binutils addr2line style\n");
- } else {
- if (!symbol_conf.disable_add2line_warn) {
- char *output = NULL;
- size_t output_len;
-
- io__getline(&io, &output, &output_len);
- pr_warning("%s %s: addr2line configuration failed\n",
- __func__, dso_name);
- pr_warning("\t%c%s", ch, output);
- }
- pr_debug("Unknown/broken addr2line style\n");
- return BROKEN;
- }
- while (lines) {
- ch = io__get_char(&io);
- if (ch <= 0)
- break;
- if (ch == '\n')
- lines--;
- }
- /* Ignore SIGPIPE in the event addr2line exits. */
- signal(SIGPIPE, SIG_IGN);
- }
- return style;
-}
-
-static int read_addr2line_record(struct io *io,
- enum a2l_style style,
- const char *dso_name,
- u64 addr,
- bool first,
- char **function,
- char **filename,
- unsigned int *line_nr)
-{
- /*
- * Returns:
- * -1 ==> error
- * 0 ==> sentinel (or other ill-formed) record read
- * 1 ==> a genuine record read
- */
- char *line = NULL;
- size_t line_len = 0;
- unsigned int dummy_line_nr = 0;
- int ret = -1;
-
- if (function != NULL)
- zfree(function);
-
- if (filename != NULL)
- zfree(filename);
-
- if (line_nr != NULL)
- *line_nr = 0;
-
- /*
- * Read the first line. Without an error this will be:
- * - for the first line an address like 0x1234,
- * - the binutils sentinel 0x0000000000000000,
- * - the llvm-addr2line the sentinel ',' character,
- * - the function name line for an inlined function.
- */
- if (io__getline(io, &line, &line_len) < 0 || !line_len)
- goto error;
-
- pr_debug("%s %s: addr2line read address for sentinel: %s", __func__, dso_name, line);
- if (style == LLVM && line_len == 2 && line[0] == ',') {
- /* Found the llvm-addr2line sentinel character. */
- zfree(&line);
- return 0;
- } else if (style == GNU_BINUTILS && (!first || addr != 0)) {
- int zero_count = 0, non_zero_count = 0;
- /*
- * Check for binutils sentinel ignoring it for the case the
- * requested address is 0.
- */
-
- /* A given address should always start 0x. */
- if (line_len >= 2 || line[0] != '0' || line[1] != 'x') {
- for (size_t i = 2; i < line_len; i++) {
- if (line[i] == '0')
- zero_count++;
- else if (line[i] != '\n')
- non_zero_count++;
- }
- if (!non_zero_count) {
- int ch;
-
- if (first && !zero_count) {
- /* Line was erroneous just '0x'. */
- goto error;
- }
- /*
- * Line was 0x0..0, the sentinel for binutils. Remove
- * the function and filename lines.
- */
- zfree(&line);
- do {
- ch = io__get_char(io);
- } while (ch > 0 && ch != '\n');
- do {
- ch = io__get_char(io);
- } while (ch > 0 && ch != '\n');
- return 0;
- }
- }
- }
- /* Read the second function name line (if inline data then this is the first line). */
- if (first && (io__getline(io, &line, &line_len) < 0 || !line_len))
- goto error;
-
- pr_debug("%s %s: addr2line read line: %s", __func__, dso_name, line);
- if (function != NULL)
- *function = strdup(strim(line));
-
- zfree(&line);
- line_len = 0;
-
- /* Read the third filename and line number line. */
- if (io__getline(io, &line, &line_len) < 0 || !line_len)
- goto error;
-
- pr_debug("%s %s: addr2line filename:number : %s", __func__, dso_name, line);
- if (filename_split(line, line_nr == NULL ? &dummy_line_nr : line_nr) == 0 &&
- style == GNU_BINUTILS) {
- ret = 0;
- goto error;
- }
-
- if (filename != NULL)
- *filename = strdup(line);
-
- zfree(&line);
- line_len = 0;
-
- return 1;
-
-error:
- free(line);
- if (function != NULL)
- zfree(function);
- if (filename != NULL)
- zfree(filename);
- return ret;
-}
-
-static int inline_list__append_record(struct dso *dso,
- struct inline_node *node,
- struct symbol *sym,
- const char *function,
- const char *filename,
- unsigned int line_nr)
-{
- struct symbol *inline_sym = new_inline_sym(dso, sym, function);
-
- return inline_list__append(inline_sym, srcline_from_fileline(filename, line_nr), node);
-}
-
-static int addr2line(const char *dso_name, u64 addr,
- char **file, unsigned int *line_nr,
- struct dso *dso,
- bool unwind_inlines,
- struct inline_node *node,
- struct symbol *sym __maybe_unused)
-{
- struct child_process *a2l = dso__a2l(dso);
- char *record_function = NULL;
- char *record_filename = NULL;
- unsigned int record_line_nr = 0;
- int record_status = -1;
- int ret = 0;
- size_t inline_count = 0;
- int len;
- char buf[128];
- ssize_t written;
- struct io io = { .eof = false };
- enum a2l_style a2l_style;
-
- if (!a2l) {
- if (!filename__has_section(dso_name, ".debug_line"))
- goto out;
-
- dso__set_a2l(dso,
- addr2line_subprocess_init(symbol_conf.addr2line_path, dso_name));
- a2l = dso__a2l(dso);
- }
-
- if (a2l == NULL) {
- if (!symbol_conf.disable_add2line_warn)
- pr_warning("%s %s: addr2line_subprocess_init failed\n", __func__, dso_name);
- goto out;
- }
- a2l_style = addr2line_configure(a2l, dso_name);
- if (a2l_style == BROKEN)
- goto out;
-
- /*
- * Send our request and then *deliberately* send something that can't be
- * interpreted as a valid address to ask addr2line about (namely,
- * ","). This causes addr2line to first write out the answer to our
- * request, in an unbounded/unknown number of records, and then to write
- * out the lines "0x0...0", "??" and "??:0", for GNU binutils, or ","
- * for llvm-addr2line, so that we can detect when it has finished giving
- * us anything useful.
- */
- len = snprintf(buf, sizeof(buf), "%016"PRIx64"\n,\n", addr);
- written = len > 0 ? write(a2l->in, buf, len) : -1;
- if (written != len) {
- if (!symbol_conf.disable_add2line_warn)
- pr_warning("%s %s: could not send request\n", __func__, dso_name);
- goto out;
- }
- io__init(&io, a2l->out, buf, sizeof(buf));
- io.timeout_ms = addr2line_timeout_ms;
- switch (read_addr2line_record(&io, a2l_style, dso_name, addr, /*first=*/true,
- &record_function, &record_filename, &record_line_nr)) {
- case -1:
- if (!symbol_conf.disable_add2line_warn)
- pr_warning("%s %s: could not read first record\n", __func__, dso_name);
- goto out;
- case 0:
- /*
- * The first record was invalid, so return failure, but first
- * read another record, since we sent a sentinel ',' for the
- * sake of detected the last inlined function. Treat this as the
- * first of a record as the ',' generates a new start with GNU
- * binutils, also force a non-zero address as we're no longer
- * reading that record.
- */
- switch (read_addr2line_record(&io, a2l_style, dso_name,
- /*addr=*/1, /*first=*/true,
- NULL, NULL, NULL)) {
- case -1:
- if (!symbol_conf.disable_add2line_warn)
- pr_warning("%s %s: could not read sentinel record\n",
- __func__, dso_name);
- break;
- case 0:
- /* The sentinel as expected. */
- break;
- default:
- if (!symbol_conf.disable_add2line_warn)
- pr_warning("%s %s: unexpected record instead of sentinel",
- __func__, dso_name);
- break;
- }
- goto out;
- default:
- /* First record as expected. */
- break;
- }
-
- if (file) {
- *file = strdup(record_filename);
- ret = 1;
- }
- if (line_nr)
- *line_nr = record_line_nr;
-
- if (unwind_inlines) {
- if (node && inline_list__append_record(dso, node, sym,
- record_function,
- record_filename,
- record_line_nr)) {
- ret = 0;
- goto out;
- }
- }
-
- /*
- * We have to read the records even if we don't care about the inline
- * info. This isn't the first record and force the address to non-zero
- * as we're reading records beyond the first.
- */
- while ((record_status = read_addr2line_record(&io,
- a2l_style,
- dso_name,
- /*addr=*/1,
- /*first=*/false,
- &record_function,
- &record_filename,
- &record_line_nr)) == 1) {
- if (unwind_inlines && node && inline_count++ < MAX_INLINE_NEST) {
- if (inline_list__append_record(dso, node, sym,
- record_function,
- record_filename,
- record_line_nr)) {
- ret = 0;
- goto out;
- }
- ret = 1; /* found at least one inline frame */
- }
- }
-
-out:
- free(record_function);
- free(record_filename);
- if (io.eof) {
- dso__set_a2l(dso, NULL);
- addr2line_subprocess_cleanup(a2l);
- }
- return ret;
-}
-
-void dso__free_a2l(struct dso *dso)
-{
- struct child_process *a2l = dso__a2l(dso);
-
- if (!a2l)
- return;
-
- addr2line_subprocess_cleanup(a2l);
-
- dso__set_a2l(dso, NULL);
-}
-
-#endif /* HAVE_LIBBFD_SUPPORT */
-
static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
struct dso *dso, struct symbol *sym)
{
@@ -862,7 +145,9 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
INIT_LIST_HEAD(&node->val);
node->addr = addr;
- addr2line(dso_name, addr, NULL, NULL, dso, true, node, sym);
+ addr2line(dso_name, addr, /*file=*/NULL, /*line_nr=*/NULL, dso,
+ /*unwind_inlines=*/true, node, sym);
+
return node;
}
@@ -889,7 +174,7 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
goto out_err;
if (!addr2line(dso_name, addr, &file, &line, dso,
- unwind_inlines, NULL, sym))
+ unwind_inlines, /*node=*/NULL, sym))
goto out_err;
srcline = srcline_from_fileline(file, line);
@@ -935,7 +220,8 @@ char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line)
if (dso_name == NULL)
goto out_err;
- if (!addr2line(dso_name, addr, &file, line, dso, true, NULL, NULL))
+ if (!addr2line(dso_name, addr, &file, line, dso, /*unwind_inlines=*/true,
+ /*node=*/NULL, /*sym=*/NULL))
goto out_err;
dso__set_a2l_fails(dso, 0);
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 75010d39ea28..c36f573cd339 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -9,7 +9,6 @@
struct dso;
struct symbol;
-extern int addr2line_timeout_ms;
extern bool srcline_full_filename;
char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
bool show_sym, bool show_addr, u64 ip);
@@ -29,6 +28,8 @@ void srcline__tree_delete(struct rb_root_cached *tree);
extern char *srcline__unknown;
#define SRCLINE_UNKNOWN srcline__unknown
+#define MAX_INLINE_NEST 1024
+
struct inline_list {
struct symbol *symbol;
char *srcline;
@@ -55,4 +56,10 @@ struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all nodes within the tree of inline_node s */
void inlines__tree_delete(struct rb_root_cached *tree);
+int inline_list__append(struct symbol *symbol, char *srcline, struct inline_node *node);
+char *srcline_from_fileline(const char *file, unsigned int line);
+struct symbol *new_inline_sym(struct dso *dso,
+ struct symbol *base_sym,
+ const char *funcname);
+
#endif /* PERF_SRCLINE_H */
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index e852ac0d9847..6d02f84c5691 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -50,15 +50,15 @@ static int aggr_header_lens[] = {
};
static const char *aggr_header_csv[] = {
- [AGGR_CORE] = "core,cpus,",
- [AGGR_CACHE] = "cache,cpus,",
- [AGGR_CLUSTER] = "cluster,cpus,",
- [AGGR_DIE] = "die,cpus,",
- [AGGR_SOCKET] = "socket,cpus,",
- [AGGR_NONE] = "cpu,",
- [AGGR_THREAD] = "comm-pid,",
- [AGGR_NODE] = "node,",
- [AGGR_GLOBAL] = ""
+ [AGGR_CORE] = "core,ctrs,",
+ [AGGR_CACHE] = "cache,ctrs,",
+ [AGGR_CLUSTER] = "cluster,ctrs,",
+ [AGGR_DIE] = "die,ctrs,",
+ [AGGR_SOCKET] = "socket,ctrs,",
+ [AGGR_NONE] = "cpu,",
+ [AGGR_THREAD] = "comm-pid,",
+ [AGGR_NODE] = "node,",
+ [AGGR_GLOBAL] = ""
};
static const char *aggr_header_std[] = {
@@ -304,7 +304,7 @@ static void print_aggr_id_std(struct perf_stat_config *config,
return;
}
- fprintf(output, "%-*s %*d ", aggr_header_lens[idx], buf, 4, aggr_nr);
+ fprintf(output, "%-*s %*d ", aggr_header_lens[idx], buf, /*strlen("ctrs")*/ 4, aggr_nr);
}
static void print_aggr_id_csv(struct perf_stat_config *config,
@@ -366,27 +366,27 @@ static void print_aggr_id_json(struct perf_stat_config *config, struct outstate
{
switch (config->aggr_mode) {
case AGGR_CORE:
- json_out(os, "\"core\" : \"S%d-D%d-C%d\", \"aggregate-number\" : %d",
+ json_out(os, "\"core\" : \"S%d-D%d-C%d\", \"counters\" : %d",
id.socket, id.die, id.core, aggr_nr);
break;
case AGGR_CACHE:
- json_out(os, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d",
+ json_out(os, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"counters\" : %d",
id.socket, id.die, id.cache_lvl, id.cache, aggr_nr);
break;
case AGGR_CLUSTER:
- json_out(os, "\"cluster\" : \"S%d-D%d-CLS%d\", \"aggregate-number\" : %d",
+ json_out(os, "\"cluster\" : \"S%d-D%d-CLS%d\", \"counters\" : %d",
id.socket, id.die, id.cluster, aggr_nr);
break;
case AGGR_DIE:
- json_out(os, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d",
+ json_out(os, "\"die\" : \"S%d-D%d\", \"counters\" : %d",
id.socket, id.die, aggr_nr);
break;
case AGGR_SOCKET:
- json_out(os, "\"socket\" : \"S%d\", \"aggregate-number\" : %d",
+ json_out(os, "\"socket\" : \"S%d\", \"counters\" : %d",
id.socket, aggr_nr);
break;
case AGGR_NODE:
- json_out(os, "\"node\" : \"N%d\", \"aggregate-number\" : %d",
+ json_out(os, "\"node\" : \"N%d\", \"counters\" : %d",
id.node, aggr_nr);
break;
case AGGR_NONE:
@@ -439,9 +439,9 @@ static inline void __new_line_std_csv(struct perf_stat_config *config,
aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
}
-static inline void __new_line_std(struct outstate *os)
+static inline void __new_line_std(struct perf_stat_config *config, struct outstate *os)
{
- fprintf(os->fh, " ");
+ fprintf(os->fh, "%*s", COUNTS_LEN + EVNAME_LEN + config->unit_width + 2, "");
}
static void do_new_line_std(struct perf_stat_config *config,
@@ -450,7 +450,7 @@ static void do_new_line_std(struct perf_stat_config *config,
__new_line_std_csv(config, os);
if (config->aggr_mode == AGGR_NONE)
fprintf(os->fh, " ");
- __new_line_std(os);
+ __new_line_std(config, os);
}
static void print_metric_std(struct perf_stat_config *config,
@@ -583,36 +583,13 @@ static void print_metricgroup_header_std(struct perf_stat_config *config,
int n;
if (!metricgroup_name) {
- __new_line_std(os);
+ __new_line_std(config, os);
return;
}
n = fprintf(config->output, " %*s", EVNAME_LEN, metricgroup_name);
- fprintf(config->output, "%*s", MGROUP_LEN - n - 1, "");
-}
-
-/* Filter out some columns that don't work well in metrics only mode */
-
-static bool valid_only_metric(const char *unit)
-{
- if (!unit)
- return false;
- if (strstr(unit, "/sec") ||
- strstr(unit, "CPUs utilized"))
- return false;
- return true;
-}
-
-static const char *fixunit(char *buf, struct evsel *evsel,
- const char *unit)
-{
- if (!strncmp(unit, "of all", 6)) {
- snprintf(buf, 1024, "%s %s", evsel__name(evsel),
- unit);
- return buf;
- }
- return unit;
+ fprintf(config->output, "%*s", MGROUP_LEN + config->unit_width + 2 - n, "");
}
static void print_metric_only(struct perf_stat_config *config,
@@ -621,13 +598,12 @@ static void print_metric_only(struct perf_stat_config *config,
{
struct outstate *os = ctx;
FILE *out = os->fh;
- char buf[1024], str[1024];
+ char str[1024];
unsigned mlen = config->metric_only_len;
const char *color = metric_threshold_classify__color(thresh);
- if (!valid_only_metric(unit))
- return;
- unit = fixunit(buf, os->evsel, unit);
+ if (!unit)
+ unit = "";
if (mlen < strlen(unit))
mlen = strlen(unit) + 1;
@@ -643,16 +619,15 @@ static void print_metric_only_csv(struct perf_stat_config *config __maybe_unused
void *ctx,
enum metric_threshold_classify thresh __maybe_unused,
const char *fmt,
- const char *unit, double val)
+ const char *unit __maybe_unused, double val)
{
struct outstate *os = ctx;
FILE *out = os->fh;
char buf[64], *vals, *ends;
- char tbuf[1024];
- if (!valid_only_metric(unit))
+ if (!unit)
return;
- unit = fixunit(tbuf, os->evsel, unit);
+
snprintf(buf, sizeof(buf), fmt ?: "", val);
ends = vals = skip_spaces(buf);
while (isdigit(*ends) || *ends == '.')
@@ -670,13 +645,9 @@ static void print_metric_only_json(struct perf_stat_config *config __maybe_unuse
{
struct outstate *os = ctx;
char buf[64], *ends;
- char tbuf[1024];
const char *vals;
- if (!valid_only_metric(unit))
- return;
- unit = fixunit(tbuf, os->evsel, unit);
- if (!unit[0])
+ if (!unit || !unit[0])
return;
snprintf(buf, sizeof(buf), fmt ?: "", val);
vals = ends = skip_spaces(buf);
@@ -695,7 +666,6 @@ static void print_metric_header(struct perf_stat_config *config,
const char *unit, double val __maybe_unused)
{
struct outstate *os = ctx;
- char tbuf[1024];
/* In case of iostat, print metric header for first root port only */
if (config->iostat_run &&
@@ -705,9 +675,8 @@ static void print_metric_header(struct perf_stat_config *config,
if (os->evsel->cgrp != os->cgrp)
return;
- if (!valid_only_metric(unit))
+ if (!unit)
return;
- unit = fixunit(tbuf, os->evsel, unit);
if (config->json_output)
return;
@@ -798,40 +767,28 @@ static void abs_printout(struct perf_stat_config *config,
print_cgroup(config, os, evsel->cgrp);
}
-static bool is_mixed_hw_group(struct evsel *counter)
-{
- struct evlist *evlist = counter->evlist;
- u32 pmu_type = counter->core.attr.type;
- struct evsel *pos;
-
- if (counter->core.nr_members < 2)
- return false;
-
- evlist__for_each_entry(evlist, pos) {
- /* software events can be part of any hardware group */
- if (pos->core.attr.type == PERF_TYPE_SOFTWARE)
- continue;
- if (pmu_type == PERF_TYPE_SOFTWARE) {
- pmu_type = pos->core.attr.type;
- continue;
- }
- if (pmu_type != pos->core.attr.type)
- return true;
- }
-
- return false;
-}
-
-static bool evlist__has_hybrid(struct evlist *evlist)
+static bool evlist__has_hybrid_pmus(struct evlist *evlist)
{
struct evsel *evsel;
+ struct perf_pmu *last_core_pmu = NULL;
if (perf_pmus__num_core_pmus() == 1)
return false;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->core.is_pmu_core)
+ if (evsel->core.is_pmu_core) {
+ struct perf_pmu *pmu = evsel__find_pmu(evsel);
+
+ if (pmu == last_core_pmu)
+ continue;
+
+ if (last_core_pmu == NULL) {
+ last_core_pmu = pmu;
+ continue;
+ }
+ /* A distinct core PMU. */
return true;
+ }
}
return false;
@@ -872,10 +829,8 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
ok = false;
if (counter->supported) {
- if (!evlist__has_hybrid(counter->evlist)) {
+ if (!evlist__has_hybrid_pmus(counter->evlist)) {
config->print_free_counters_hint = 1;
- if (is_mixed_hw_group(counter))
- config->print_mixed_hw_group_error = 1;
}
}
}
@@ -886,7 +841,7 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
out.ctx = os;
out.force_header = false;
- if (!config->metric_only && !counter->default_metricgroup) {
+ if (!config->metric_only && (!counter->default_metricgroup || counter->default_show_events)) {
abs_printout(config, os, os->id, os->aggr_nr, counter, uval, ok);
print_noise(config, os, counter, noise, /*before_metric=*/true);
@@ -894,7 +849,7 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
}
if (ok) {
- if (!config->metric_only && counter->default_metricgroup) {
+ if (!config->metric_only && counter->default_metricgroup && !counter->default_show_events) {
void *from = NULL;
aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
@@ -913,12 +868,11 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
print_noise(config, os, counter, noise, /*before_metric=*/true);
print_running(config, os, run, ena, /*before_metric=*/true);
from = perf_stat__print_shadow_stats_metricgroup(config, counter, aggr_idx,
- &num, from, &out,
- &config->metric_events);
+ &num, from, &out);
} while (from != NULL);
- } else
- perf_stat__print_shadow_stats(config, counter, uval, aggr_idx,
- &out, &config->metric_events);
+ } else {
+ perf_stat__print_shadow_stats(config, counter, aggr_idx, &out);
+ }
} else {
pm(config, os, METRIC_THRESHOLD_UNKNOWN, /*format=*/NULL, /*unit=*/NULL, /*val=*/0);
}
@@ -929,61 +883,6 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
}
}
-static void evsel__uniquify_counter(struct evsel *counter)
-{
- const char *name, *pmu_name;
- char *new_name, *config;
- int ret;
-
- /* No uniquification necessary. */
- if (!counter->needs_uniquify)
- return;
-
- /* The evsel was already uniquified. */
- if (counter->uniquified_name)
- return;
-
- /* Avoid checking to uniquify twice. */
- counter->uniquified_name = true;
-
- name = evsel__name(counter);
- pmu_name = counter->pmu->name;
- /* Already prefixed by the PMU name. */
- if (!strncmp(name, pmu_name, strlen(pmu_name)))
- return;
-
- config = strchr(name, '/');
- if (config) {
- int len = config - name;
-
- if (config[1] == '/') {
- /* case: event// */
- ret = asprintf(&new_name, "%s/%.*s/%s", pmu_name, len, name, config + 2);
- } else {
- /* case: event/.../ */
- ret = asprintf(&new_name, "%s/%.*s,%s", pmu_name, len, name, config + 1);
- }
- } else {
- config = strchr(name, ':');
- if (config) {
- /* case: event:.. */
- int len = config - name;
-
- ret = asprintf(&new_name, "%s/%.*s/%s", pmu_name, len, name, config + 1);
- } else {
- /* case: event */
- ret = asprintf(&new_name, "%s/%s/", pmu_name, name);
- }
- }
- if (ret > 0) {
- free(counter->name);
- counter->name = new_name;
- } else {
- /* ENOMEM from asprintf. */
- counter->uniquified_name = false;
- }
-}
-
/**
* should_skip_zero_count() - Check if the event should print 0 values.
* @config: The perf stat configuration (including aggregation mode).
@@ -1014,6 +913,9 @@ static bool should_skip_zero_counter(struct perf_stat_config *config,
if (verbose == 0 && counter->skippable && !counter->supported)
return true;
+ /* Metric only counts won't be displayed but the metric wants to be computed. */
+ if (config->metric_only)
+ return false;
/*
* Skip value 0 when enabling --per-thread globally,
* otherwise it will have too many 0 output.
@@ -1022,8 +924,16 @@ static bool should_skip_zero_counter(struct perf_stat_config *config,
return true;
/*
- * Many tool events are only gathered on the first index, skip other
- * zero values.
+ * In per-thread mode the aggr_map and aggr_get_id functions may be
+ * NULL, assume all 0 values should be output in that case.
+ */
+ if (!config->aggr_map || !config->aggr_get_id)
+ return false;
+
+ /*
+ * Tool events may be gathered on all logical CPUs, for example
+ * system_time, but for many the first index is the only one used, for
+ * example num_cores. Don't skip for the first index.
*/
if (evsel__is_tool(counter)) {
struct aggr_cpu_id own_id =
@@ -1031,15 +941,12 @@ static bool should_skip_zero_counter(struct perf_stat_config *config,
return !aggr_cpu_id__equal(id, &own_id);
}
-
/*
- * Skip value 0 when it's an uncore event and the given aggr id
- * does not belong to the PMU cpumask.
+ * Skip value 0 when the counter's cpumask doesn't match the given aggr
+ * id.
*/
- if (!counter->pmu || !counter->pmu->is_uncore)
- return false;
- perf_cpu_map__for_each_cpu(cpu, idx, counter->pmu->cpus) {
+ perf_cpu_map__for_each_cpu(cpu, idx, counter->core.cpus) {
struct aggr_cpu_id own_id = config->aggr_get_id(config, cpu);
if (aggr_cpu_id__equal(id, &own_id))
@@ -1066,16 +973,21 @@ static void print_counter_aggrdata(struct perf_stat_config *config,
os->evsel = counter;
/* Skip already merged uncore/hybrid events */
- if (counter->merged_stat)
- return;
-
- evsel__uniquify_counter(counter);
+ if (config->aggr_mode != AGGR_NONE) {
+ if (evsel__is_hybrid(counter)) {
+ if (config->hybrid_merge && counter->first_wildcard_match != NULL)
+ return;
+ } else {
+ if (counter->first_wildcard_match != NULL)
+ return;
+ }
+ }
val = aggr->counts.val;
ena = aggr->counts.ena;
run = aggr->counts.run;
- if (perf_stat__skip_metric_event(counter, &config->metric_events, ena, run))
+ if (perf_stat__skip_metric_event(counter, ena, run))
return;
if (val == 0 && should_skip_zero_counter(config, counter, &id))
@@ -1334,10 +1246,7 @@ static void print_metric_headers(struct perf_stat_config *config,
os.evsel = counter;
- perf_stat__print_shadow_stats(config, counter, 0,
- 0,
- &out,
- &config->metric_events);
+ perf_stat__print_shadow_stats(config, counter, /*aggr_idx=*/0, &out);
}
if (!config->json_output)
@@ -1376,7 +1285,7 @@ static void print_header_interval_std(struct perf_stat_config *config,
case AGGR_CLUSTER:
case AGGR_CACHE:
case AGGR_CORE:
- fprintf(output, "#%*s %-*s cpus",
+ fprintf(output, "#%*s %-*s ctrs",
INTERVAL_LEN - 1, "time",
aggr_header_lens[config->aggr_mode],
aggr_header_std[config->aggr_mode]);
@@ -1575,11 +1484,6 @@ static void print_footer(struct perf_stat_config *config)
" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
" perf stat ...\n"
" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
-
- if (config->print_mixed_hw_group_error)
- fprintf(output,
- "The events in group usually have to be from "
- "the same PMU. Try reorganizing the group.\n");
}
static void print_percore(struct perf_stat_config *config,
@@ -1650,96 +1554,6 @@ static void print_cgroup_counter(struct perf_stat_config *config, struct evlist
print_metric_end(config, os);
}
-/* Should uniquify be disabled for the evlist? */
-static bool evlist__disable_uniquify(const struct evlist *evlist)
-{
- struct evsel *counter;
- struct perf_pmu *last_pmu = NULL;
- bool first = true;
-
- evlist__for_each_entry(evlist, counter) {
- /* If PMUs vary then uniquify can be useful. */
- if (!first && counter->pmu != last_pmu)
- return false;
- first = false;
- if (counter->pmu) {
- /* Allow uniquify for uncore PMUs. */
- if (!counter->pmu->is_core)
- return false;
- /* Keep hybrid event names uniquified for clarity. */
- if (perf_pmus__num_core_pmus() > 1)
- return false;
- }
- }
- return true;
-}
-
-static void evsel__set_needs_uniquify(struct evsel *counter, const struct perf_stat_config *config)
-{
- struct evsel *evsel;
-
- if (counter->merged_stat) {
- /* Counter won't be shown. */
- return;
- }
-
- if (counter->use_config_name || counter->is_libpfm_event) {
- /* Original name will be used. */
- return;
- }
-
- if (!config->hybrid_merge && evsel__is_hybrid(counter)) {
- /* Unique hybrid counters necessary. */
- counter->needs_uniquify = true;
- return;
- }
-
- if (counter->core.attr.type < PERF_TYPE_MAX && counter->core.attr.type != PERF_TYPE_RAW) {
- /* Legacy event, don't uniquify. */
- return;
- }
-
- if (counter->pmu && counter->pmu->is_core &&
- counter->alternate_hw_config != PERF_COUNT_HW_MAX) {
- /* A sysfs or json event replacing a legacy event, don't uniquify. */
- return;
- }
-
- if (config->aggr_mode == AGGR_NONE) {
- /* Always unique with no aggregation. */
- counter->needs_uniquify = true;
- return;
- }
-
- /*
- * Do other non-merged events in the evlist have the same name? If so
- * uniquify is necessary.
- */
- evlist__for_each_entry(counter->evlist, evsel) {
- if (evsel == counter || evsel->merged_stat)
- continue;
-
- if (evsel__name_is(counter, evsel__name(evsel))) {
- counter->needs_uniquify = true;
- return;
- }
- }
-}
-
-static void evlist__set_needs_uniquify(struct evlist *evlist, const struct perf_stat_config *config)
-{
- struct evsel *counter;
-
- if (evlist__disable_uniquify(evlist)) {
- evlist__for_each_entry(evlist, counter)
- counter->uniquified_name = true;
- return;
- }
-
- evlist__for_each_entry(evlist, counter)
- evsel__set_needs_uniquify(counter, config);
-}
-
void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config,
struct target *_target, struct timespec *ts,
int argc, const char **argv)
@@ -1751,7 +1565,7 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
.first = true,
};
- evlist__set_needs_uniquify(evlist, config);
+ evlist__uniquify_evsel_names(evlist, config);
if (config->iostat_run)
evlist->selected = evlist__first(evlist);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index d83bda5824d2..9c83f7d96caa 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
#include <math.h>
#include <stdio.h>
#include "evsel.h"
@@ -17,361 +18,32 @@
#include "util/hashmap.h"
#include "tool_pmu.h"
-struct stats walltime_nsecs_stats;
-struct rusage_stats ru_stats;
-
-enum {
- CTX_BIT_USER = 1 << 0,
- CTX_BIT_KERNEL = 1 << 1,
- CTX_BIT_HV = 1 << 2,
- CTX_BIT_HOST = 1 << 3,
- CTX_BIT_IDLE = 1 << 4,
- CTX_BIT_MAX = 1 << 5,
-};
-
-enum stat_type {
- STAT_NONE = 0,
- STAT_NSECS,
- STAT_CYCLES,
- STAT_INSTRUCTIONS,
- STAT_STALLED_CYCLES_FRONT,
- STAT_STALLED_CYCLES_BACK,
- STAT_BRANCHES,
- STAT_BRANCH_MISS,
- STAT_CACHE_REFS,
- STAT_CACHE_MISSES,
- STAT_L1_DCACHE,
- STAT_L1_ICACHE,
- STAT_LL_CACHE,
- STAT_ITLB_CACHE,
- STAT_DTLB_CACHE,
- STAT_L1D_MISS,
- STAT_L1I_MISS,
- STAT_LL_MISS,
- STAT_DTLB_MISS,
- STAT_ITLB_MISS,
- STAT_MAX
-};
-
-static int evsel_context(const struct evsel *evsel)
+static bool tool_pmu__is_time_event(const struct perf_stat_config *config,
+ const struct evsel *evsel, int *tool_aggr_idx)
{
- int ctx = 0;
-
- if (evsel->core.attr.exclude_kernel)
- ctx |= CTX_BIT_KERNEL;
- if (evsel->core.attr.exclude_user)
- ctx |= CTX_BIT_USER;
- if (evsel->core.attr.exclude_hv)
- ctx |= CTX_BIT_HV;
- if (evsel->core.attr.exclude_host)
- ctx |= CTX_BIT_HOST;
- if (evsel->core.attr.exclude_idle)
- ctx |= CTX_BIT_IDLE;
-
- return ctx;
-}
-
-void perf_stat__reset_shadow_stats(void)
-{
- memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
- memset(&ru_stats, 0, sizeof(ru_stats));
-}
-
-static enum stat_type evsel__stat_type(struct evsel *evsel)
-{
- /* Fake perf_hw_cache_op_id values for use with evsel__match. */
- u64 PERF_COUNT_hw_cache_l1d_miss = PERF_COUNT_HW_CACHE_L1D |
- ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
- u64 PERF_COUNT_hw_cache_l1i_miss = PERF_COUNT_HW_CACHE_L1I |
- ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
- u64 PERF_COUNT_hw_cache_ll_miss = PERF_COUNT_HW_CACHE_LL |
- ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
- u64 PERF_COUNT_hw_cache_dtlb_miss = PERF_COUNT_HW_CACHE_DTLB |
- ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
- u64 PERF_COUNT_hw_cache_itlb_miss = PERF_COUNT_HW_CACHE_ITLB |
- ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16);
-
- if (evsel__is_clock(evsel))
- return STAT_NSECS;
- else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES))
- return STAT_CYCLES;
- else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS))
- return STAT_INSTRUCTIONS;
- else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
- return STAT_STALLED_CYCLES_FRONT;
- else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND))
- return STAT_STALLED_CYCLES_BACK;
- else if (evsel__match(evsel, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- return STAT_BRANCHES;
- else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES))
- return STAT_BRANCH_MISS;
- else if (evsel__match(evsel, HARDWARE, HW_CACHE_REFERENCES))
- return STAT_CACHE_REFS;
- else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES))
- return STAT_CACHE_MISSES;
- else if (evsel__match(evsel, HW_CACHE, HW_CACHE_L1D))
- return STAT_L1_DCACHE;
- else if (evsel__match(evsel, HW_CACHE, HW_CACHE_L1I))
- return STAT_L1_ICACHE;
- else if (evsel__match(evsel, HW_CACHE, HW_CACHE_LL))
- return STAT_LL_CACHE;
- else if (evsel__match(evsel, HW_CACHE, HW_CACHE_DTLB))
- return STAT_DTLB_CACHE;
- else if (evsel__match(evsel, HW_CACHE, HW_CACHE_ITLB))
- return STAT_ITLB_CACHE;
- else if (evsel__match(evsel, HW_CACHE, hw_cache_l1d_miss))
- return STAT_L1D_MISS;
- else if (evsel__match(evsel, HW_CACHE, hw_cache_l1i_miss))
- return STAT_L1I_MISS;
- else if (evsel__match(evsel, HW_CACHE, hw_cache_ll_miss))
- return STAT_LL_MISS;
- else if (evsel__match(evsel, HW_CACHE, hw_cache_dtlb_miss))
- return STAT_DTLB_MISS;
- else if (evsel__match(evsel, HW_CACHE, hw_cache_itlb_miss))
- return STAT_ITLB_MISS;
- return STAT_NONE;
-}
-
-static enum metric_threshold_classify get_ratio_thresh(const double ratios[3], double val)
-{
- assert(ratios[0] > ratios[1]);
- assert(ratios[1] > ratios[2]);
-
- return val > ratios[1]
- ? (val > ratios[0] ? METRIC_THRESHOLD_BAD : METRIC_THRESHOLD_NEARLY_BAD)
- : (val > ratios[2] ? METRIC_THRESHOLD_LESS_GOOD : METRIC_THRESHOLD_GOOD);
-}
-
-static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type type)
-{
- struct evsel *cur;
- int evsel_ctx = evsel_context(evsel);
- struct perf_pmu *evsel_pmu = evsel__find_pmu(evsel);
-
- evlist__for_each_entry(evsel->evlist, cur) {
- struct perf_stat_aggr *aggr;
-
- /* Ignore the evsel that is being searched from. */
- if (evsel == cur)
- continue;
-
- /* Ignore evsels that are part of different groups. */
- if (evsel->core.leader->nr_members > 1 &&
- evsel->core.leader != cur->core.leader)
- continue;
- /* Ignore evsels with mismatched modifiers. */
- if (evsel_ctx != evsel_context(cur))
- continue;
- /* Ignore if not the cgroup we're looking for. */
- if (evsel->cgrp != cur->cgrp)
- continue;
- /* Ignore if not the stat we're looking for. */
- if (type != evsel__stat_type(cur))
- continue;
-
- /*
- * Except the SW CLOCK events,
- * ignore if not the PMU we're looking for.
- */
- if ((type != STAT_NSECS) && (evsel_pmu != evsel__find_pmu(cur)))
- continue;
-
- aggr = &cur->stats->aggr[aggr_idx];
- if (type == STAT_NSECS)
- return aggr->counts.val;
- return aggr->counts.val * cur->scale;
- }
- return 0.0;
-}
-
-static void print_ratio(struct perf_stat_config *config,
- const struct evsel *evsel, int aggr_idx,
- double numerator, struct perf_stat_output_ctx *out,
- enum stat_type denominator_type,
- const double thresh_ratios[3], const char *_unit)
-{
- double denominator = find_stat(evsel, aggr_idx, denominator_type);
- double ratio = 0;
- enum metric_threshold_classify thresh = METRIC_THRESHOLD_UNKNOWN;
- const char *fmt = NULL;
- const char *unit = NULL;
-
- if (numerator && denominator) {
- ratio = numerator / denominator * 100.0;
- thresh = get_ratio_thresh(thresh_ratios, ratio);
- fmt = "%7.2f%%";
- unit = _unit;
- }
- out->print_metric(config, out->ctx, thresh, fmt, unit, ratio);
-}
-
-static void print_stalled_cycles_front(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double stalled,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {50.0, 30.0, 10.0};
-
- print_ratio(config, evsel, aggr_idx, stalled, out, STAT_CYCLES, thresh_ratios,
- "frontend cycles idle");
-}
-
-static void print_stalled_cycles_back(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double stalled,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {75.0, 50.0, 20.0};
-
- print_ratio(config, evsel, aggr_idx, stalled, out, STAT_CYCLES, thresh_ratios,
- "backend cycles idle");
-}
-
-static void print_branch_miss(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double misses,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {20.0, 10.0, 5.0};
-
- print_ratio(config, evsel, aggr_idx, misses, out, STAT_BRANCHES, thresh_ratios,
- "of all branches");
-}
-
-static void print_l1d_miss(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double misses,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {20.0, 10.0, 5.0};
-
- print_ratio(config, evsel, aggr_idx, misses, out, STAT_L1_DCACHE, thresh_ratios,
- "of all L1-dcache accesses");
-}
-
-static void print_l1i_miss(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double misses,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {20.0, 10.0, 5.0};
-
- print_ratio(config, evsel, aggr_idx, misses, out, STAT_L1_ICACHE, thresh_ratios,
- "of all L1-icache accesses");
-}
-
-static void print_ll_miss(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double misses,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {20.0, 10.0, 5.0};
+ enum tool_pmu_event event = evsel__tool_event(evsel);
+ int aggr_idx;
- print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, thresh_ratios,
- "of all LL-cache accesses");
-}
-
-static void print_dtlb_miss(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double misses,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {20.0, 10.0, 5.0};
-
- print_ratio(config, evsel, aggr_idx, misses, out, STAT_DTLB_CACHE, thresh_ratios,
- "of all dTLB cache accesses");
-}
-
-static void print_itlb_miss(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double misses,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {20.0, 10.0, 5.0};
-
- print_ratio(config, evsel, aggr_idx, misses, out, STAT_ITLB_CACHE, thresh_ratios,
- "of all iTLB cache accesses");
-}
-
-static void print_cache_miss(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double misses,
- struct perf_stat_output_ctx *out)
-{
- const double thresh_ratios[3] = {20.0, 10.0, 5.0};
-
- print_ratio(config, evsel, aggr_idx, misses, out, STAT_CACHE_REFS, thresh_ratios,
- "of all cache refs");
-}
-
-static void print_instructions(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double instructions,
- struct perf_stat_output_ctx *out)
-{
- print_metric_t print_metric = out->print_metric;
- void *ctxp = out->ctx;
- double cycles = find_stat(evsel, aggr_idx, STAT_CYCLES);
- double max_stalled = max(find_stat(evsel, aggr_idx, STAT_STALLED_CYCLES_FRONT),
- find_stat(evsel, aggr_idx, STAT_STALLED_CYCLES_BACK));
-
- if (cycles) {
- print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%7.2f ",
- "insn per cycle", instructions / cycles);
- } else {
- print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL,
- "insn per cycle", 0);
- }
- if (max_stalled && instructions) {
- if (out->new_line)
- out->new_line(config, ctxp);
- print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%7.2f ",
- "stalled cycles per insn", max_stalled / instructions);
- }
-}
-
-static void print_cycles(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double cycles,
- struct perf_stat_output_ctx *out)
-{
- double nsecs = find_stat(evsel, aggr_idx, STAT_NSECS);
-
- if (cycles && nsecs) {
- double ratio = cycles / nsecs;
-
- out->print_metric(config, out->ctx, METRIC_THRESHOLD_UNKNOWN, "%8.3f",
- "GHz", ratio);
- } else {
- out->print_metric(config, out->ctx, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL,
- "GHz", 0);
- }
-}
-
-static void print_nsecs(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx __maybe_unused, double nsecs,
- struct perf_stat_output_ctx *out)
-{
- print_metric_t print_metric = out->print_metric;
- void *ctxp = out->ctx;
- double wall_time = avg_stats(&walltime_nsecs_stats);
+ if (event != TOOL_PMU__EVENT_DURATION_TIME &&
+ event != TOOL_PMU__EVENT_USER_TIME &&
+ event != TOOL_PMU__EVENT_SYSTEM_TIME)
+ return false;
- if (wall_time) {
- print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%8.3f", "CPUs utilized",
- nsecs / (wall_time * evsel->scale));
- } else {
- print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL,
- "CPUs utilized", 0);
+ if (config) {
+ cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) {
+ if (config->aggr_map->map[aggr_idx].cpu.cpu == 0) {
+ *tool_aggr_idx = aggr_idx;
+ return true;
+ }
+ }
+ pr_debug("Unexpected CPU0 missing in aggregation for tool event.\n");
}
+ *tool_aggr_idx = 0; /* Assume the first aggregation index works. */
+ return true;
}
-static int prepare_metric(const struct metric_expr *mexp,
+static int prepare_metric(struct perf_stat_config *config,
+ const struct metric_expr *mexp,
const struct evsel *evsel,
struct expr_parse_ctx *pctx,
int aggr_idx)
@@ -381,91 +53,51 @@ static int prepare_metric(const struct metric_expr *mexp,
int i;
for (i = 0; metric_events[i]; i++) {
+ int source_count = 0, tool_aggr_idx;
+ bool is_tool_time =
+ tool_pmu__is_time_event(config, metric_events[i], &tool_aggr_idx);
+ struct perf_stat_evsel *ps = metric_events[i]->stats;
+ struct perf_stat_aggr *aggr;
char *n;
double val;
- int source_count = 0;
- if (evsel__is_tool(metric_events[i])) {
- struct stats *stats;
- double scale;
-
- switch (evsel__tool_event(metric_events[i])) {
- case TOOL_PMU__EVENT_DURATION_TIME:
- stats = &walltime_nsecs_stats;
- scale = 1e-9;
- break;
- case TOOL_PMU__EVENT_USER_TIME:
- stats = &ru_stats.ru_utime_usec_stat;
- scale = 1e-6;
- break;
- case TOOL_PMU__EVENT_SYSTEM_TIME:
- stats = &ru_stats.ru_stime_usec_stat;
- scale = 1e-6;
+ /*
+ * If there are multiple uncore PMUs and we're not reading the
+ * leader's stats, determine the stats for the appropriate
+ * uncore PMU.
+ */
+ if (evsel && evsel->metric_leader &&
+ evsel->pmu != evsel->metric_leader->pmu &&
+ mexp->metric_events[i]->pmu == evsel->metric_leader->pmu) {
+ struct evsel *pos;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ if (pos->pmu != evsel->pmu)
+ continue;
+ if (pos->metric_leader != mexp->metric_events[i])
+ continue;
+ ps = pos->stats;
+ source_count = 1;
break;
- case TOOL_PMU__EVENT_NONE:
- pr_err("Invalid tool event 'none'");
- abort();
- case TOOL_PMU__EVENT_MAX:
- pr_err("Invalid tool event 'max'");
- abort();
- case TOOL_PMU__EVENT_HAS_PMEM:
- case TOOL_PMU__EVENT_NUM_CORES:
- case TOOL_PMU__EVENT_NUM_CPUS:
- case TOOL_PMU__EVENT_NUM_CPUS_ONLINE:
- case TOOL_PMU__EVENT_NUM_DIES:
- case TOOL_PMU__EVENT_NUM_PACKAGES:
- case TOOL_PMU__EVENT_SLOTS:
- case TOOL_PMU__EVENT_SMT_ON:
- case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ:
- default:
- pr_err("Unexpected tool event '%s'", evsel__name(metric_events[i]));
- abort();
}
- val = avg_stats(stats) * scale;
- source_count = 1;
- } else {
- struct perf_stat_evsel *ps = metric_events[i]->stats;
- struct perf_stat_aggr *aggr;
-
+ }
+ /* Time events are always on CPU0, the first aggregation index. */
+ aggr = &ps->aggr[is_tool_time ? tool_aggr_idx : aggr_idx];
+ if (!aggr || !metric_events[i]->supported) {
/*
- * If there are multiple uncore PMUs and we're not
- * reading the leader's stats, determine the stats for
- * the appropriate uncore PMU.
+ * Not supported events will have a count of 0, which
+ * can be confusing in a metric. Explicitly set the
+ * value to NAN. Not counted events (enable time of 0)
+ * are read as 0.
*/
- if (evsel && evsel->metric_leader &&
- evsel->pmu != evsel->metric_leader->pmu &&
- mexp->metric_events[i]->pmu == evsel->metric_leader->pmu) {
- struct evsel *pos;
-
- evlist__for_each_entry(evsel->evlist, pos) {
- if (pos->pmu != evsel->pmu)
- continue;
- if (pos->metric_leader != mexp->metric_events[i])
- continue;
- ps = pos->stats;
- source_count = 1;
- break;
- }
- }
- aggr = &ps->aggr[aggr_idx];
- if (!aggr)
- break;
-
- if (!metric_events[i]->supported) {
- /*
- * Not supported events will have a count of 0,
- * which can be confusing in a
- * metric. Explicitly set the value to NAN. Not
- * counted events (enable time of 0) are read as
- * 0.
- */
- val = NAN;
- source_count = 0;
- } else {
- val = aggr->counts.val;
- if (!source_count)
- source_count = evsel__source_count(metric_events[i]);
- }
+ val = NAN;
+ source_count = 0;
+ } else {
+ val = aggr->counts.val;
+ if (is_tool_time)
+ val *= 1e-9; /* Convert time event nanoseconds to seconds. */
+ if (!source_count)
+ source_count = evsel__source_count(metric_events[i]);
}
n = strdup(evsel__metric_id(metric_events[i]));
if (!n)
@@ -511,7 +143,7 @@ static void generic_metric(struct perf_stat_config *config,
pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list);
pctx->sctx.runtime = runtime;
pctx->sctx.system_wide = config->system_wide;
- i = prepare_metric(mexp, evsel, pctx, aggr_idx);
+ i = prepare_metric(config, mexp, evsel, pctx, aggr_idx);
if (i < 0) {
expr__ctx_free(pctx);
return;
@@ -572,7 +204,7 @@ double test_generic_metric(struct metric_expr *mexp, int aggr_idx)
if (!pctx)
return NAN;
- if (prepare_metric(mexp, /*evsel=*/NULL, pctx, aggr_idx) < 0)
+ if (prepare_metric(/*config=*/NULL, mexp, /*evsel=*/NULL, pctx, aggr_idx) < 0)
goto out;
if (expr__parse(&ratio, pctx, mexp->metric_expr))
@@ -601,11 +233,9 @@ static void perf_stat__print_metricgroup_header(struct perf_stat_config *config,
* event. Only align with other metics from
* different metric events.
*/
- if (last_name && !strcmp(last_name, name)) {
- if (!need_full_name || last_pmu != evsel->pmu) {
- out->print_metricgroup_header(config, ctxp, NULL);
- return;
- }
+ if (last_name && !strcmp(last_name, name) && last_pmu == evsel->pmu) {
+ out->print_metricgroup_header(config, ctxp, NULL);
+ return;
}
if (need_full_name && evsel->pmu)
@@ -635,14 +265,14 @@ void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config,
int aggr_idx,
int *num,
void *from,
- struct perf_stat_output_ctx *out,
- struct rblist *metric_events)
+ struct perf_stat_output_ctx *out)
{
struct metric_event *me;
struct metric_expr *mexp = from;
void *ctxp = out->ctx;
bool header_printed = false;
const char *name = NULL;
+ struct rblist *metric_events = &evsel->evlist->metric_events;
me = metricgroup__lookup(metric_events, evsel, false);
if (me == NULL)
@@ -665,7 +295,7 @@ void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config,
if (strcmp(name, mexp->default_metricgroup_name))
return (void *)mexp;
/* Only print the name of the metricgroup once */
- if (!header_printed) {
+ if (!header_printed && !evsel->default_show_events) {
header_printed = true;
perf_stat__print_metricgroup_header(config, evsel, ctxp,
name, out);
@@ -682,60 +312,18 @@ void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config,
void perf_stat__print_shadow_stats(struct perf_stat_config *config,
struct evsel *evsel,
- double avg, int aggr_idx,
- struct perf_stat_output_ctx *out,
- struct rblist *metric_events)
+ int aggr_idx,
+ struct perf_stat_output_ctx *out)
{
- typedef void (*stat_print_function_t)(struct perf_stat_config *config,
- const struct evsel *evsel,
- int aggr_idx, double misses,
- struct perf_stat_output_ctx *out);
- static const stat_print_function_t stat_print_function[STAT_MAX] = {
- [STAT_INSTRUCTIONS] = print_instructions,
- [STAT_BRANCH_MISS] = print_branch_miss,
- [STAT_L1D_MISS] = print_l1d_miss,
- [STAT_L1I_MISS] = print_l1i_miss,
- [STAT_DTLB_MISS] = print_dtlb_miss,
- [STAT_ITLB_MISS] = print_itlb_miss,
- [STAT_LL_MISS] = print_ll_miss,
- [STAT_CACHE_MISSES] = print_cache_miss,
- [STAT_STALLED_CYCLES_FRONT] = print_stalled_cycles_front,
- [STAT_STALLED_CYCLES_BACK] = print_stalled_cycles_back,
- [STAT_CYCLES] = print_cycles,
- [STAT_NSECS] = print_nsecs,
- };
print_metric_t print_metric = out->print_metric;
void *ctxp = out->ctx;
- int num = 1;
+ int num = 0;
- if (config->iostat_run) {
+ if (config->iostat_run)
iostat_print_metric(config, evsel, out);
- } else {
- stat_print_function_t fn = stat_print_function[evsel__stat_type(evsel)];
-
- if (fn)
- fn(config, evsel, aggr_idx, avg, out);
- else {
- double nsecs = find_stat(evsel, aggr_idx, STAT_NSECS);
-
- if (nsecs) {
- char unit = ' ';
- char unit_buf[10] = "/sec";
- double ratio = convert_unit_double(1000000000.0 * avg / nsecs,
- &unit);
-
- if (unit != ' ')
- snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
- print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%8.3f",
- unit_buf, ratio);
- } else {
- num = 0;
- }
- }
- }
perf_stat__print_shadow_stats_metricgroup(config, evsel, aggr_idx,
- &num, NULL, out, metric_events);
+ &num, NULL, out);
if (num == 0) {
print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN,
@@ -748,7 +336,6 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
* if it's not running or not the metric event.
*/
bool perf_stat__skip_metric_event(struct evsel *evsel,
- struct rblist *metric_events,
u64 ena, u64 run)
{
if (!evsel->default_metricgroup)
@@ -757,5 +344,5 @@ bool perf_stat__skip_metric_event(struct evsel *evsel,
if (!ena || !run)
return true;
- return !metricgroup__lookup(metric_events, evsel, false);
+ return !metricgroup__lookup(&evsel->evlist->metric_events, evsel, false);
}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 1f7abd8754c7..976a06e63252 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -526,7 +526,7 @@ static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
- /* NB: don't increase aggr.nr for aliases */
+ ps_a->aggr[i].nr += ps_b->aggr[i].nr;
aggr_counts_a->val += aggr_counts_b->val;
aggr_counts_a->ena += aggr_counts_b->ena;
@@ -535,35 +535,6 @@ static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
return 0;
}
-/*
- * Events should have the same name, scale, unit, cgroup but on different core
- * PMUs or on different but matching uncore PMUs.
- */
-static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
-{
- if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
- return false;
-
- if (evsel_a->scale != evsel_b->scale)
- return false;
-
- if (evsel_a->cgrp != evsel_b->cgrp)
- return false;
-
- if (strcmp(evsel_a->unit, evsel_b->unit))
- return false;
-
- if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
- return false;
-
- if (evsel_a->pmu == evsel_b->pmu || evsel_a->pmu == NULL || evsel_b->pmu == NULL)
- return false;
-
- if (evsel_a->pmu->is_core)
- return evsel_b->pmu->is_core;
-
- return perf_pmu__name_no_suffix_match(evsel_a->pmu, evsel_b->pmu->name);
-}
static void evsel__merge_aliases(struct evsel *evsel)
{
@@ -572,10 +543,9 @@ static void evsel__merge_aliases(struct evsel *evsel)
alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
- /* Merge the same events on different PMUs. */
- if (evsel__is_alias(evsel, alias)) {
+ if (alias->first_wildcard_match == evsel) {
+ /* Merge the same events on different PMUs. */
evsel__merge_aggr_counters(evsel, alias);
- alias->merged_stat = true;
}
}
}
@@ -588,11 +558,7 @@ static bool evsel__should_merge_hybrid(const struct evsel *evsel,
static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
{
- /* this evsel is already merged */
- if (evsel->merged_stat)
- return;
-
- if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
+ if (!evsel->pmu || !evsel->pmu->is_core || evsel__should_merge_hybrid(evsel, config))
evsel__merge_aliases(evsel);
}
@@ -679,7 +645,8 @@ void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *e
evsel__process_percore(evsel);
}
-int perf_event__process_stat_event(struct perf_session *session,
+int perf_event__process_stat_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event)
{
struct perf_counts_values count, *ptr;
@@ -750,61 +717,3 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
return ret;
}
-
-int create_perf_stat_counter(struct evsel *evsel,
- struct perf_stat_config *config,
- struct target *target,
- int cpu_map_idx)
-{
- struct perf_event_attr *attr = &evsel->core.attr;
- struct evsel *leader = evsel__leader(evsel);
-
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
-
- /*
- * The event is part of non trivial group, let's enable
- * the group read (for leader) and ID retrieval for all
- * members.
- */
- if (leader->core.nr_members > 1)
- attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
-
- attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
-
- /*
- * Some events get initialized with sample_(period/type) set,
- * like tracepoints. Clear it up for counting.
- */
- attr->sample_period = 0;
-
- if (config->identifier)
- attr->sample_type = PERF_SAMPLE_IDENTIFIER;
-
- if (config->all_user) {
- attr->exclude_kernel = 1;
- attr->exclude_user = 0;
- }
-
- if (config->all_kernel) {
- attr->exclude_kernel = 0;
- attr->exclude_user = 1;
- }
-
- /*
- * Disabling all counters initially, they will be enabled
- * either manually by us or by kernel via enable_on_exec
- * set later.
- */
- if (evsel__is_group_leader(evsel)) {
- attr->disabled = 1;
-
- if (target__enable_on_exec(target))
- attr->enable_on_exec = 1;
- }
-
- if (target__has_cpu(target) && !target__has_per_thread(target))
- return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
-
- return evsel__open_per_thread(evsel, evsel->core.threads);
-}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 2fda9acd7374..f986911c9296 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -7,7 +7,6 @@
#include <sys/types.h>
#include <sys/resource.h>
#include "cpumap.h"
-#include "rblist.h"
#include "counts.h"
struct perf_cpu_map;
@@ -57,11 +56,6 @@ enum aggr_mode {
AGGR_MAX
};
-struct rusage_stats {
- struct stats ru_utime_usec_stat;
- struct stats ru_stime_usec_stat;
-};
-
typedef struct aggr_cpu_id (*aggr_get_id_t)(struct perf_stat_config *config, struct perf_cpu cpu);
struct perf_stat_config {
@@ -100,16 +94,13 @@ struct perf_stat_config {
int times;
int run_count;
int print_free_counters_hint;
- int print_mixed_hw_group_error;
const char *csv_sep;
struct stats *walltime_nsecs_stats;
struct rusage ru_data;
- struct rusage_stats *ru_stats;
struct cpu_aggr_map *aggr_map;
aggr_get_id_t aggr_get_id;
struct cpu_aggr_map *cpus_aggr_map;
u64 *walltime_run;
- struct rblist metric_events;
int ctl_fd;
int ctl_fd_ack;
bool ctl_fd_close;
@@ -135,26 +126,9 @@ static inline void init_stats(struct stats *stats)
stats->max = 0;
}
-static inline void init_rusage_stats(struct rusage_stats *ru_stats) {
- init_stats(&ru_stats->ru_utime_usec_stat);
- init_stats(&ru_stats->ru_stime_usec_stat);
-}
-
-static inline void update_rusage_stats(struct rusage_stats *ru_stats, struct rusage* rusage) {
- const u64 us_to_ns = 1000;
- const u64 s_to_ns = 1000000000;
- update_stats(&ru_stats->ru_utime_usec_stat,
- (rusage->ru_utime.tv_usec * us_to_ns + rusage->ru_utime.tv_sec * s_to_ns));
- update_stats(&ru_stats->ru_stime_usec_stat,
- (rusage->ru_stime.tv_usec * us_to_ns + rusage->ru_stime.tv_sec * s_to_ns));
-}
-
struct evsel;
struct evlist;
-extern struct stats walltime_nsecs_stats;
-extern struct rusage_stats ru_stats;
-
enum metric_threshold_classify {
METRIC_THRESHOLD_UNKNOWN,
METRIC_THRESHOLD_BAD,
@@ -187,19 +161,15 @@ struct perf_stat_output_ctx {
void perf_stat__print_shadow_stats(struct perf_stat_config *config,
struct evsel *evsel,
- double avg, int aggr_idx,
- struct perf_stat_output_ctx *out,
- struct rblist *metric_events);
-bool perf_stat__skip_metric_event(struct evsel *evsel,
- struct rblist *metric_events,
- u64 ena, u64 run);
+ int aggr_idx,
+ struct perf_stat_output_ctx *out);
+bool perf_stat__skip_metric_event(struct evsel *evsel, u64 ena, u64 run);
void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config,
struct evsel *evsel,
int aggr_idx,
int *num,
void *from,
- struct perf_stat_output_ctx *out,
- struct rblist *metric_events);
+ struct perf_stat_output_ctx *out);
int evlist__alloc_stats(struct perf_stat_config *config,
struct evlist *evlist, bool alloc_raw);
@@ -223,17 +193,14 @@ union perf_event;
struct perf_session;
struct target;
-int perf_event__process_stat_event(struct perf_session *session,
+int perf_event__process_stat_event(const struct perf_tool *tool,
+ struct perf_session *session,
union perf_event *event);
size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
-int create_perf_stat_counter(struct evsel *evsel,
- struct perf_stat_config *config,
- struct target *target,
- int cpu_map_idx);
void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config,
struct target *_target, struct timespec *ts, int argc, const char **argv);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index fbf6d0f73af9..957143fbf8a0 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -9,14 +9,11 @@
#include "compress.h"
#include "dso.h"
+#include "libbfd.h"
#include "map.h"
#include "maps.h"
#include "symbol.h"
#include "symsrc.h"
-#include "demangle-cxx.h"
-#include "demangle-ocaml.h"
-#include "demangle-java.h"
-#include "demangle-rust.h"
#include "machine.h"
#include "vdso.h"
#include "debug.h"
@@ -28,18 +25,6 @@
#include <symbol/kallsyms.h>
#include <internal/lib.h>
-#ifdef HAVE_LIBBFD_SUPPORT
-#define PACKAGE 'perf'
-#include <bfd.h>
-#endif
-
-#if defined(HAVE_LIBBFD_SUPPORT) || defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
-#ifndef DMGL_PARAMS
-#define DMGL_PARAMS (1 << 0) /* Include function args */
-#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
-#endif
-#endif
-
#ifndef EM_AARCH64
#define EM_AARCH64 183 /* ARM 64 bit */
#endif
@@ -279,62 +264,6 @@ static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
return -1;
}
-static bool want_demangle(bool is_kernel_sym)
-{
- return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
-}
-
-/*
- * Demangle C++ function signature, typically replaced by demangle-cxx.cpp
- * version.
- */
-#ifndef HAVE_CXA_DEMANGLE_SUPPORT
-char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
- bool modifiers __maybe_unused)
-{
-#ifdef HAVE_LIBBFD_SUPPORT
- int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
-
- return bfd_demangle(NULL, str, flags);
-#elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
- int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
-
- return cplus_demangle(str, flags);
-#else
- return NULL;
-#endif
-}
-#endif /* !HAVE_CXA_DEMANGLE_SUPPORT */
-
-static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
-{
- char *demangled = NULL;
-
- /*
- * We need to figure out if the object was created from C++ sources
- * DWARF DW_compile_unit has this, but we don't always have access
- * to it...
- */
- if (!want_demangle(dso__kernel(dso) || kmodule))
- return demangled;
-
- demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0);
- if (demangled == NULL) {
- demangled = ocaml_demangle_sym(elf_name);
- if (demangled == NULL) {
- demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
- }
- }
- else if (rust_is_mangled(demangled))
- /*
- * Input to Rust demangling is the BFD-demangled
- * name which it Rust-demangles in place.
- */
- rust_demangle_sym(demangled);
-
- return demangled;
-}
-
struct rel_info {
u32 nr_entries;
u32 *sorted;
@@ -620,7 +549,7 @@ static bool get_plt_got_name(GElf_Shdr *shdr, size_t i,
/* Get the associated symbol */
gelf_getsym(di->dynsym_data, vr->sym_idx, &sym);
sym_name = elf_sym__name(&sym, di->dynstr_data);
- demangled = demangle_sym(di->dso, 0, sym_name);
+ demangled = dso__demangle_sym(di->dso, /*kmodule=*/0, sym_name);
if (demangled != NULL)
sym_name = demangled;
@@ -818,7 +747,7 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
gelf_getsym(syms, get_rel_symidx(&ri, idx), &sym);
elf_name = elf_sym__name(&sym, symstrs);
- demangled = demangle_sym(dso, 0, elf_name);
+ demangled = dso__demangle_sym(dso, /*kmodule=*/0, elf_name);
if (demangled)
elf_name = demangled;
if (*elf_name)
@@ -847,11 +776,6 @@ out_elf_end:
return 0;
}
-char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
-{
- return demangle_sym(dso, kmodule, elf_name);
-}
-
/*
* Align offset to 4 bytes as needed for note name and descriptor data.
*/
@@ -936,43 +860,16 @@ out:
return err;
}
-#ifdef HAVE_LIBBFD_BUILDID_SUPPORT
-
-static int read_build_id(const char *filename, struct build_id *bid)
-{
- size_t size = sizeof(bid->data);
- int err = -1;
- bfd *abfd;
-
- abfd = bfd_openr(filename, NULL);
- if (!abfd)
- return -1;
-
- if (!bfd_check_format(abfd, bfd_object)) {
- pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
- goto out_close;
- }
-
- if (!abfd->build_id || abfd->build_id->size > size)
- goto out_close;
-
- memcpy(bid->data, abfd->build_id->data, abfd->build_id->size);
- memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size);
- err = bid->size = abfd->build_id->size;
-
-out_close:
- bfd_close(abfd);
- return err;
-}
-
-#else // HAVE_LIBBFD_BUILDID_SUPPORT
-
static int read_build_id(const char *filename, struct build_id *bid)
{
size_t size = sizeof(bid->data);
- int fd, err = -1;
+ int fd, err;
Elf *elf;
+ err = libbfd__read_build_id(filename, bid);
+ if (err >= 0)
+ goto out;
+
if (size < BUILD_ID_SIZE)
goto out;
@@ -997,8 +894,6 @@ out:
return err;
}
-#endif // HAVE_LIBBFD_BUILDID_SUPPORT
-
int filename__read_build_id(const char *filename, struct build_id *bid)
{
struct kmod_path m = { .name = NULL, };
@@ -1007,6 +902,8 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
if (!filename)
return -EFAULT;
+ if (!is_regular_file(filename))
+ return -EWOULDBLOCK;
err = kmod_path__parse(&m, filename);
if (err)
@@ -1082,44 +979,6 @@ out:
return err;
}
-#ifdef HAVE_LIBBFD_SUPPORT
-
-int filename__read_debuglink(const char *filename, char *debuglink,
- size_t size)
-{
- int err = -1;
- asection *section;
- bfd *abfd;
-
- abfd = bfd_openr(filename, NULL);
- if (!abfd)
- return -1;
-
- if (!bfd_check_format(abfd, bfd_object)) {
- pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
- goto out_close;
- }
-
- section = bfd_get_section_by_name(abfd, ".gnu_debuglink");
- if (!section)
- goto out_close;
-
- if (section->size > size)
- goto out_close;
-
- if (!bfd_get_section_contents(abfd, section, debuglink, 0,
- section->size))
- goto out_close;
-
- err = 0;
-
-out_close:
- bfd_close(abfd);
- return err;
-}
-
-#else
-
int filename__read_debuglink(const char *filename, char *debuglink,
size_t size)
{
@@ -1131,6 +990,10 @@ int filename__read_debuglink(const char *filename, char *debuglink,
Elf_Scn *sec;
Elf_Kind ek;
+ err = libbfd_filename__read_debuglink(filename, debuglink, size);
+ if (err >= 0)
+ goto out;
+
fd = open(filename, O_RDONLY);
if (fd < 0)
goto out;
@@ -1172,8 +1035,6 @@ out:
return err;
}
-#endif
-
bool symsrc__possibly_runtime(struct symsrc *ss)
{
return ss->dynsym || ss->opdsec;
@@ -1586,8 +1447,11 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
}
dso__set_symtab_type(curr_dso, dso__symtab_type(dso));
- if (maps__insert(kmaps, curr_map))
+ if (maps__insert(kmaps, curr_map)) {
+ dso__put(curr_dso);
+ map__put(curr_map);
return -1;
+ }
dsos__add(&maps__machine(kmaps)->dsos, curr_dso);
dso__set_loaded(curr_dso);
dso__put(*curr_dsop);
@@ -1733,6 +1597,12 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
continue;
}
+ /* Reject RISCV ELF "mapping symbols" */
+ if (ehdr.e_machine == EM_RISCV) {
+ if (elf_name[0] == '$' && strchr("dx", elf_name[1]))
+ continue;
+ }
+
if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
u64 *opd = opddata->d_buf + offset;
@@ -1840,7 +1710,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
}
}
- demangled = demangle_sym(dso, kmodule, elf_name);
+ demangled = dso__demangle_sym(dso, kmodule, elf_name);
if (demangled != NULL)
elf_name = demangled;
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index c6f369b5d893..c6b17c14a2e9 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -4,7 +4,6 @@
#include <errno.h>
#include <unistd.h>
-#include <stdio.h>
#include <fcntl.h>
#include <string.h>
#include <stdlib.h>
@@ -43,7 +42,7 @@ static int read_build_id(void *note_data, size_t note_len, struct build_id *bid,
void *ptr;
ptr = note_data;
- while (ptr < (note_data + note_len)) {
+ while ((ptr + sizeof(*nhdr)) < (note_data + note_len)) {
const char *name;
size_t namesz, descsz;
@@ -88,139 +87,118 @@ int filename__read_debuglink(const char *filename __maybe_unused,
*/
int filename__read_build_id(const char *filename, struct build_id *bid)
{
- FILE *fp;
- int ret = -1;
- bool need_swap = false;
- u8 e_ident[EI_NIDENT];
- size_t buf_size;
- void *buf;
- int i;
+ int fd, ret = -1;
+ bool need_swap = false, elf32;
+ union {
+ struct {
+ Elf32_Ehdr ehdr32;
+ Elf32_Phdr *phdr32;
+ };
+ struct {
+ Elf64_Ehdr ehdr64;
+ Elf64_Phdr *phdr64;
+ };
+ } hdrs;
+ void *phdr, *buf = NULL;
+ ssize_t phdr_size, ehdr_size, buf_size = 0;
+
+ if (!filename)
+ return -EFAULT;
+ if (!is_regular_file(filename))
+ return -EWOULDBLOCK;
- fp = fopen(filename, "r");
- if (fp == NULL)
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
return -1;
- if (fread(e_ident, sizeof(e_ident), 1, fp) != 1)
+ if (read(fd, hdrs.ehdr32.e_ident, EI_NIDENT) != EI_NIDENT)
goto out;
- if (memcmp(e_ident, ELFMAG, SELFMAG) ||
- e_ident[EI_VERSION] != EV_CURRENT)
+ if (memcmp(hdrs.ehdr32.e_ident, ELFMAG, SELFMAG) ||
+ hdrs.ehdr32.e_ident[EI_VERSION] != EV_CURRENT)
goto out;
- need_swap = check_need_swap(e_ident[EI_DATA]);
-
- /* for simplicity */
- fseek(fp, 0, SEEK_SET);
+ need_swap = check_need_swap(hdrs.ehdr32.e_ident[EI_DATA]);
+ elf32 = hdrs.ehdr32.e_ident[EI_CLASS] == ELFCLASS32;
+ ehdr_size = (elf32 ? sizeof(hdrs.ehdr32) : sizeof(hdrs.ehdr64)) - EI_NIDENT;
- if (e_ident[EI_CLASS] == ELFCLASS32) {
- Elf32_Ehdr ehdr;
- Elf32_Phdr *phdr;
-
- if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
- goto out;
+ if (read(fd,
+ (elf32 ? (void *)&hdrs.ehdr32 : (void *)&hdrs.ehdr64) + EI_NIDENT,
+ ehdr_size) != ehdr_size)
+ goto out;
- if (need_swap) {
- ehdr.e_phoff = bswap_32(ehdr.e_phoff);
- ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
- ehdr.e_phnum = bswap_16(ehdr.e_phnum);
+ if (need_swap) {
+ if (elf32) {
+ hdrs.ehdr32.e_phoff = bswap_32(hdrs.ehdr32.e_phoff);
+ hdrs.ehdr32.e_phentsize = bswap_16(hdrs.ehdr32.e_phentsize);
+ hdrs.ehdr32.e_phnum = bswap_16(hdrs.ehdr32.e_phnum);
+ } else {
+ hdrs.ehdr64.e_phoff = bswap_64(hdrs.ehdr64.e_phoff);
+ hdrs.ehdr64.e_phentsize = bswap_16(hdrs.ehdr64.e_phentsize);
+ hdrs.ehdr64.e_phnum = bswap_16(hdrs.ehdr64.e_phnum);
}
+ }
+ if ((elf32 && hdrs.ehdr32.e_phentsize != sizeof(Elf32_Phdr)) ||
+ (!elf32 && hdrs.ehdr64.e_phentsize != sizeof(Elf64_Phdr)))
+ goto out;
- buf_size = ehdr.e_phentsize * ehdr.e_phnum;
- buf = malloc(buf_size);
- if (buf == NULL)
- goto out;
-
- fseek(fp, ehdr.e_phoff, SEEK_SET);
- if (fread(buf, buf_size, 1, fp) != 1)
- goto out_free;
-
- for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
- void *tmp;
- long offset;
-
- if (need_swap) {
- phdr->p_type = bswap_32(phdr->p_type);
- phdr->p_offset = bswap_32(phdr->p_offset);
- phdr->p_filesz = bswap_32(phdr->p_filesz);
- }
-
- if (phdr->p_type != PT_NOTE)
- continue;
-
- buf_size = phdr->p_filesz;
- offset = phdr->p_offset;
- tmp = realloc(buf, buf_size);
- if (tmp == NULL)
- goto out_free;
+ phdr_size = elf32 ? sizeof(Elf32_Phdr) * hdrs.ehdr32.e_phnum
+ : sizeof(Elf64_Phdr) * hdrs.ehdr64.e_phnum;
+ phdr = malloc(phdr_size);
+ if (phdr == NULL)
+ goto out;
- buf = tmp;
- fseek(fp, offset, SEEK_SET);
- if (fread(buf, buf_size, 1, fp) != 1)
- goto out_free;
+ lseek(fd, elf32 ? hdrs.ehdr32.e_phoff : hdrs.ehdr64.e_phoff, SEEK_SET);
+ if (read(fd, phdr, phdr_size) != phdr_size)
+ goto out_free;
- ret = read_build_id(buf, buf_size, bid, need_swap);
- if (ret == 0) {
- ret = bid->size;
- break;
- }
- }
- } else {
- Elf64_Ehdr ehdr;
- Elf64_Phdr *phdr;
+ if (elf32)
+ hdrs.phdr32 = phdr;
+ else
+ hdrs.phdr64 = phdr;
- if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
- goto out;
+ for (int i = 0; i < (elf32 ? hdrs.ehdr32.e_phnum : hdrs.ehdr64.e_phnum); i++) {
+ ssize_t p_filesz;
if (need_swap) {
- ehdr.e_phoff = bswap_64(ehdr.e_phoff);
- ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
- ehdr.e_phnum = bswap_16(ehdr.e_phnum);
+ if (elf32) {
+ hdrs.phdr32[i].p_type = bswap_32(hdrs.phdr32[i].p_type);
+ hdrs.phdr32[i].p_offset = bswap_32(hdrs.phdr32[i].p_offset);
+ hdrs.phdr32[i].p_filesz = bswap_32(hdrs.phdr32[i].p_offset);
+ } else {
+ hdrs.phdr64[i].p_type = bswap_32(hdrs.phdr64[i].p_type);
+ hdrs.phdr64[i].p_offset = bswap_64(hdrs.phdr64[i].p_offset);
+ hdrs.phdr64[i].p_filesz = bswap_64(hdrs.phdr64[i].p_filesz);
+ }
}
+ if ((elf32 ? hdrs.phdr32[i].p_type : hdrs.phdr64[i].p_type) != PT_NOTE)
+ continue;
- buf_size = ehdr.e_phentsize * ehdr.e_phnum;
- buf = malloc(buf_size);
- if (buf == NULL)
- goto out;
-
- fseek(fp, ehdr.e_phoff, SEEK_SET);
- if (fread(buf, buf_size, 1, fp) != 1)
- goto out_free;
-
- for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
+ p_filesz = elf32 ? hdrs.phdr32[i].p_filesz : hdrs.phdr64[i].p_filesz;
+ if (p_filesz > buf_size) {
void *tmp;
- long offset;
- if (need_swap) {
- phdr->p_type = bswap_32(phdr->p_type);
- phdr->p_offset = bswap_64(phdr->p_offset);
- phdr->p_filesz = bswap_64(phdr->p_filesz);
- }
-
- if (phdr->p_type != PT_NOTE)
- continue;
-
- buf_size = phdr->p_filesz;
- offset = phdr->p_offset;
+ buf_size = p_filesz;
tmp = realloc(buf, buf_size);
if (tmp == NULL)
goto out_free;
-
buf = tmp;
- fseek(fp, offset, SEEK_SET);
- if (fread(buf, buf_size, 1, fp) != 1)
- goto out_free;
+ }
+ lseek(fd, elf32 ? hdrs.phdr32[i].p_offset : hdrs.phdr64[i].p_offset, SEEK_SET);
+ if (read(fd, buf, p_filesz) != p_filesz)
+ goto out_free;
- ret = read_build_id(buf, buf_size, bid, need_swap);
- if (ret == 0) {
- ret = bid->size;
- break;
- }
+ ret = read_build_id(buf, p_filesz, bid, need_swap);
+ if (ret == 0) {
+ ret = bid->size;
+ break;
}
}
out_free:
free(buf);
+ free(phdr);
out:
- fclose(fp);
+ close(fd);
return ret;
}
@@ -343,7 +321,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
struct symsrc *runtime_ss __maybe_unused,
int kmodule __maybe_unused)
{
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
int ret;
ret = fd__is_64_bit(ss->fd);
@@ -381,13 +359,6 @@ void symbol__elf_init(void)
{
}
-char *dso__demangle_sym(struct dso *dso __maybe_unused,
- int kmodule __maybe_unused,
- const char *elf_name __maybe_unused)
-{
- return NULL;
-}
-
bool filename__has_section(const char *filename __maybe_unused, const char *sec __maybe_unused)
{
return false;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 11540219481b..814f960fa8f8 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -19,6 +19,11 @@
#include "build-id.h"
#include "cap.h"
#include "cpumap.h"
+#include "debug.h"
+#include "demangle-cxx.h"
+#include "demangle-java.h"
+#include "demangle-ocaml.h"
+#include "demangle-rust-v0.h"
#include "dso.h"
#include "util.h" // lsdir()
#include "debug.h"
@@ -36,6 +41,7 @@
#include "header.h"
#include "path.h"
#include <linux/ctype.h>
+#include <linux/log2.h>
#include <linux/zalloc.h>
#include <elf.h>
@@ -98,10 +104,21 @@ static enum dso_binary_type binary_type_symtab[] = {
#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
-static bool symbol_type__filter(char symbol_type)
+static bool symbol_type__filter(char __symbol_type)
{
- symbol_type = toupper(symbol_type);
- return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
+ // Since 'U' == undefined and 'u' == unique global symbol, we can't use toupper there
+ // 'N' is for debugging symbols, 'n' is a non-data, non-code, non-debug read-only section.
+ // According to 'man nm'.
+ // 'N' first seen in:
+ // ffffffff9b35d130 N __pfx__RNCINvNtNtNtCsbDUBuN8AbD4_4core4iter8adapters3map12map_try_foldjNtCs6vVzKs5jPr6_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_
+ // a seemingly Rust mangled name
+ // Ditto for '1':
+ // root@x1:~# grep ' 1 ' /proc/kallsyms
+ // ffffffffb098bc00 1 __pfx__RNCINvNtNtNtCsfwaGRd4cjqE_4core4iter8adapters3map12map_try_foldjNtCskFudTml27HW_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_
+ // ffffffffb098bc10 1 _RNCINvNtNtNtCsfwaGRd4cjqE_4core4iter8adapters3map12map_try_foldjNtCskFudTml27HW_12drm_panic_qr7VersionuINtNtNtBa_3ops12control_flow11ControlFlowB10_ENcB10_0NCINvNvNtNtNtB8_6traits8iterator8Iterator4find5checkB10_NCNvMB12_B10_13from_segments0E0E0B12_
+ char symbol_type = toupper(__symbol_type);
+ return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B' ||
+ __symbol_type == 'u' || __symbol_type == 'l' || __symbol_type == 'N' || __symbol_type == '1';
}
static int prefix_underscores_count(const char *str)
@@ -623,7 +640,7 @@ void dso__sort_by_name(struct dso *dso)
{
mutex_lock(dso__lock(dso));
if (!dso__sorted_by_name(dso)) {
- size_t len;
+ size_t len = 0;
dso__set_symbol_names(dso, symbols__sort_by_name(dso__symbols(dso), &len));
if (dso__symbol_names(dso)) {
@@ -938,7 +955,8 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
pos->end -= delta;
}
- if (count == 0) {
+ if (map__start(initial_map) <= (pos->start + delta) &&
+ (pos->start + delta) < map__end(initial_map)) {
map__zput(curr_map);
curr_map = map__get(initial_map);
goto add_symbol;
@@ -947,11 +965,11 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
snprintf(dso_name, sizeof(dso_name),
"[guest.kernel].%d",
- kernel_range++);
+ kernel_range);
else
snprintf(dso_name, sizeof(dso_name),
"[kernel].%d",
- kernel_range++);
+ kernel_range);
ndso = dso__new(dso_name);
map__zput(curr_map);
@@ -959,6 +977,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
return -1;
dso__set_kernel(ndso, dso__kernel(dso));
+ dso__set_loaded(ndso);
curr_map = map__new2(pos->start, ndso);
if (curr_map == NULL) {
@@ -972,6 +991,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
dso__put(ndso);
return -1;
}
+ dso__put(ndso);
++kernel_range;
} else if (delta) {
/* Kernel was relocated at boot time */
@@ -1414,6 +1434,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
goto out_err;
}
}
+ map__zput(new_node->map);
free(new_node);
}
@@ -1575,137 +1596,6 @@ out_failure:
return -1;
}
-#ifdef HAVE_LIBBFD_SUPPORT
-#define PACKAGE 'perf'
-#include <bfd.h>
-
-static int bfd_symbols__cmpvalue(const void *a, const void *b)
-{
- const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
-
- if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
- return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
-
- return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
-}
-
-static int bfd2elf_binding(asymbol *symbol)
-{
- if (symbol->flags & BSF_WEAK)
- return STB_WEAK;
- if (symbol->flags & BSF_GLOBAL)
- return STB_GLOBAL;
- if (symbol->flags & BSF_LOCAL)
- return STB_LOCAL;
- return -1;
-}
-
-int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
-{
- int err = -1;
- long symbols_size, symbols_count, i;
- asection *section;
- asymbol **symbols, *sym;
- struct symbol *symbol;
- bfd *abfd;
- u64 start, len;
-
- abfd = bfd_openr(debugfile, NULL);
- if (!abfd)
- return -1;
-
- if (!bfd_check_format(abfd, bfd_object)) {
- pr_debug2("%s: cannot read %s bfd file.\n", __func__,
- dso__long_name(dso));
- goto out_close;
- }
-
- if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
- goto out_close;
-
- symbols_size = bfd_get_symtab_upper_bound(abfd);
- if (symbols_size == 0) {
- bfd_close(abfd);
- return 0;
- }
-
- if (symbols_size < 0)
- goto out_close;
-
- symbols = malloc(symbols_size);
- if (!symbols)
- goto out_close;
-
- symbols_count = bfd_canonicalize_symtab(abfd, symbols);
- if (symbols_count < 0)
- goto out_free;
-
- section = bfd_get_section_by_name(abfd, ".text");
- if (section) {
- for (i = 0; i < symbols_count; ++i) {
- if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
- !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
- break;
- }
- if (i < symbols_count) {
- /* PE symbols can only have 4 bytes, so use .text high bits */
- u64 text_offset = (section->vma - (u32)section->vma)
- + (u32)bfd_asymbol_value(symbols[i]);
- dso__set_text_offset(dso, text_offset);
- dso__set_text_end(dso, (section->vma - text_offset) + section->size);
- } else {
- dso__set_text_offset(dso, section->vma - section->filepos);
- dso__set_text_end(dso, section->filepos + section->size);
- }
- }
-
- qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
-
-#ifdef bfd_get_section
-#define bfd_asymbol_section bfd_get_section
-#endif
- for (i = 0; i < symbols_count; ++i) {
- sym = symbols[i];
- section = bfd_asymbol_section(sym);
- if (bfd2elf_binding(sym) < 0)
- continue;
-
- while (i + 1 < symbols_count &&
- bfd_asymbol_section(symbols[i + 1]) == section &&
- bfd2elf_binding(symbols[i + 1]) < 0)
- i++;
-
- if (i + 1 < symbols_count &&
- bfd_asymbol_section(symbols[i + 1]) == section)
- len = symbols[i + 1]->value - sym->value;
- else
- len = section->size - sym->value;
-
- start = bfd_asymbol_value(sym) - dso__text_offset(dso);
- symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
- bfd_asymbol_name(sym));
- if (!symbol)
- goto out_free;
-
- symbols__insert(dso__symbols(dso), symbol);
- }
-#ifdef bfd_get_section
-#undef bfd_asymbol_section
-#endif
-
- symbols__fixup_end(dso__symbols(dso), false);
- symbols__fixup_duplicate(dso__symbols(dso));
- dso__set_adjust_symbols(dso, true);
-
- err = 0;
-out_free:
- free(symbols);
-out_close:
- bfd_close(abfd);
- return err;
-}
-#endif
-
static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
enum dso_binary_type type)
{
@@ -1804,7 +1694,6 @@ int dso__load(struct dso *dso, struct map *map)
struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
bool kmod;
bool perfmap;
- struct build_id bid;
struct nscookie nsc;
char newmapname[PATH_MAX];
const char *map_path = dso__long_name(dso);
@@ -1861,10 +1750,11 @@ int dso__load(struct dso *dso, struct map *map)
/*
* Read the build id if possible. This is required for
- * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
+ * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work.
*/
- if (!dso__has_build_id(dso) &&
- is_regular_file(dso__long_name(dso))) {
+ if (!dso__has_build_id(dso)) {
+ struct build_id bid = { .size = 0, };
+
__symbol__join_symfs(name, PATH_MAX, dso__long_name(dso));
if (filename__read_build_id(name, &bid) > 0)
dso__set_build_id(dso, &bid);
@@ -2113,10 +2003,11 @@ static bool filename__readable(const char *file)
static char *dso__find_kallsyms(struct dso *dso, struct map *map)
{
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
char sbuild_id[SBUILD_ID_SIZE];
bool is_host = false;
char path[PATH_MAX];
+ struct maps *kmaps = map__kmaps(map);
if (!dso__has_build_id(dso)) {
/*
@@ -2143,7 +2034,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
goto proc_kallsyms;
}
- build_id__sprintf(dso__bid(dso), sbuild_id);
+ build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
/* Find kallsyms in build-id cache with kcore */
scnprintf(path, sizeof(path), "%s/%s/%s",
@@ -2153,8 +2044,13 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
return strdup(path);
/* Use current /proc/kallsyms if possible */
- if (is_host) {
proc_kallsyms:
+ if (kmaps) {
+ struct machine *machine = maps__machine(kmaps);
+
+ scnprintf(path, sizeof(path), "%s/proc/kallsyms", machine->root_dir);
+ return strdup(path);
+ } else if (is_host) {
return strdup("/proc/kallsyms");
}
@@ -2646,3 +2542,79 @@ int symbol__validate_sym_arguments(void)
}
return 0;
}
+
+static bool want_demangle(bool is_kernel_sym)
+{
+ return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
+}
+
+/*
+ * Demangle C++ function signature, typically replaced by demangle-cxx.cpp
+ * version.
+ */
+#ifndef HAVE_CXA_DEMANGLE_SUPPORT
+char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
+ bool modifiers __maybe_unused)
+{
+#ifdef HAVE_LIBBFD_SUPPORT
+ int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
+
+ return bfd_demangle(NULL, str, flags);
+#elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
+ int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
+
+ return cplus_demangle(str, flags);
+#else
+ return NULL;
+#endif
+}
+#endif /* !HAVE_CXA_DEMANGLE_SUPPORT */
+
+char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
+{
+ struct demangle rust_demangle = {
+ .style = DemangleStyleUnknown,
+ };
+ char *demangled = NULL;
+
+ /*
+ * We need to figure out if the object was created from C++ sources
+ * DWARF DW_compile_unit has this, but we don't always have access
+ * to it...
+ */
+ if (!want_demangle((dso && dso__kernel(dso)) || kmodule))
+ return demangled;
+
+ rust_demangle_demangle(elf_name, &rust_demangle);
+ if (rust_demangle_is_known(&rust_demangle)) {
+ /* A rust mangled name. */
+ if (rust_demangle.mangled_len == 0)
+ return demangled;
+
+ for (size_t buf_len = roundup_pow_of_two(rust_demangle.mangled_len * 2);
+ buf_len < 1024 * 1024; buf_len += 32) {
+ char *tmp = realloc(demangled, buf_len);
+
+ if (!tmp) {
+ /* Failure to grow output buffer, return what is there. */
+ return demangled;
+ }
+ demangled = tmp;
+ if (rust_demangle_display_demangle(&rust_demangle, demangled, buf_len,
+ /*alternate=*/true) == OverflowOk)
+ return demangled;
+ }
+ /* Buffer exceeded sensible bounds, return what is there. */
+ return demangled;
+ }
+
+ demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0);
+ if (demangled)
+ return demangled;
+
+ demangled = ocaml_demangle_sym(elf_name);
+ if (demangled)
+ return demangled;
+
+ return java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
+}
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index cd9aa82c7d5a..7a80d2c14d9b 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -43,7 +43,7 @@ struct symbol_conf {
report_individual_block,
inline_name,
disable_add2line_warn,
- buildid_mmap2,
+ no_buildid_mmap2,
guest_code,
lazy_load_kernel_maps,
keep_exited_threads,
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 2fc4d0537840..2ba9fa25e00a 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -368,11 +368,11 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
struct machine *machine,
bool is_kernel)
{
- struct build_id bid;
+ struct build_id bid = { .size = 0, };
struct nsinfo *nsi;
struct nscookie nc;
struct dso *dso = NULL;
- struct dso_id id;
+ struct dso_id dso_id = dso_id_empty;
int rc;
if (is_kernel) {
@@ -380,12 +380,18 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
goto out;
}
- id.maj = event->maj;
- id.min = event->min;
- id.ino = event->ino;
- id.ino_generation = event->ino_generation;
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ build_id__init(&dso_id.build_id, event->build_id, event->build_id_size);
+ } else {
+ dso_id.maj = event->maj;
+ dso_id.min = event->min;
+ dso_id.ino = event->ino;
+ dso_id.ino_generation = event->ino_generation;
+ dso_id.mmap2_valid = true;
+ dso_id.mmap2_ino_generation_valid = true;
+ }
- dso = dsos__findnew_id(&machine->dsos, event->filename, &id);
+ dso = dsos__findnew_id(&machine->dsos, event->filename, &dso_id);
if (dso && dso__has_build_id(dso)) {
bid = *dso__bid(dso);
rc = 0;
@@ -526,7 +532,7 @@ out:
event->mmap2.pid = tgid;
event->mmap2.tid = pid;
- if (symbol_conf.buildid_mmap2)
+ if (!symbol_conf.no_buildid_mmap2)
perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
@@ -684,7 +690,7 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
return 0;
dso = map__dso(map);
- if (symbol_conf.buildid_mmap2) {
+ if (!symbol_conf.no_buildid_mmap2) {
size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.size = (sizeof(event->mmap2) -
@@ -728,9 +734,9 @@ int perf_event__synthesize_modules(const struct perf_tool *tool, perf_event__han
.process = process,
.machine = machine,
};
- size_t size = symbol_conf.buildid_mmap2
- ? sizeof(args.event->mmap2)
- : sizeof(args.event->mmap);
+ size_t size = symbol_conf.no_buildid_mmap2
+ ? sizeof(args.event->mmap)
+ : sizeof(args.event->mmap2);
args.event = zalloc(size + machine->id_hdr_size);
if (args.event == NULL) {
@@ -1118,8 +1124,8 @@ static int __perf_event__synthesize_kernel_mmap(const struct perf_tool *tool,
struct machine *machine)
{
union perf_event *event;
- size_t size = symbol_conf.buildid_mmap2 ?
- sizeof(event->mmap2) : sizeof(event->mmap);
+ size_t size = symbol_conf.no_buildid_mmap2 ?
+ sizeof(event->mmap) : sizeof(event->mmap2);
struct map *map = machine__kernel_map(machine);
struct kmap *kmap;
int err;
@@ -1153,7 +1159,7 @@ static int __perf_event__synthesize_kernel_mmap(const struct perf_tool *tool,
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
}
- if (symbol_conf.buildid_mmap2) {
+ if (!symbol_conf.no_buildid_mmap2) {
size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
size = PERF_ALIGN(size, sizeof(u64));
@@ -1567,10 +1573,16 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
return result;
}
-void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
+static void perf_synthesize_sample_weight(const struct perf_sample *data,
__u64 *array, u64 type __maybe_unused)
{
*array = data->weight;
+
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ *array &= 0xffffffff;
+ *array |= ((u64)data->ins_lat << 32);
+ *array |= ((u64)data->weight3 << 48);
+ }
}
static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
@@ -1730,7 +1742,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
}
if (type & PERF_SAMPLE_WEIGHT_TYPE) {
- arch_perf_synthesize_sample_weight(sample, array, type);
+ perf_synthesize_sample_weight(sample, array, type);
array++;
}
@@ -2045,7 +2057,7 @@ int perf_event__synthesize_event_update_name(const struct perf_tool *tool, struc
int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
- struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
+ struct synthesize_cpu_map_data syn_data = { .map = evsel->core.pmu_cpus };
struct perf_record_event_update *ev;
int err;
@@ -2126,7 +2138,7 @@ int perf_event__synthesize_extra_attr(const struct perf_tool *tool, struct evlis
}
}
- if (evsel->core.own_cpus) {
+ if (evsel->core.pmu_cpus) {
err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
if (err < 0) {
pr_err("Couldn't synthesize evsel cpus.\n");
@@ -2248,7 +2260,9 @@ int perf_event__synthesize_build_id(const struct perf_tool *tool,
memset(&ev, 0, len);
- ev.build_id.size = min(bid->size, sizeof(ev.build_id.build_id));
+ ev.build_id.size = bid->size;
+ if (ev.build_id.size > sizeof(ev.build_id.build_id))
+ ev.build_id.size = sizeof(ev.build_id.build_id);
memcpy(ev.build_id.build_id, bid->data, ev.build_id.size);
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE;
@@ -2308,7 +2322,9 @@ int perf_event__synthesize_mmap2_build_id(const struct perf_tool *tool,
ev.mmap2.len = len;
ev.mmap2.pgoff = pgoff;
- ev.mmap2.build_id_size = min(bid->size, sizeof(ev.mmap2.build_id));
+ ev.mmap2.build_id_size = bid->size;
+ if (ev.mmap2.build_id_size > sizeof(ev.mmap2.build_id))
+ ev.build_id.size = sizeof(ev.mmap2.build_id);
memcpy(ev.mmap2.build_id, bid->data, ev.mmap2.build_id_size);
ev.mmap2.prot = prot;
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
index b9c936b5cfeb..f8588b6cf11a 100644
--- a/tools/perf/util/synthetic-events.h
+++ b/tools/perf/util/synthetic-events.h
@@ -92,6 +92,8 @@ int perf_event__synthesize_threads(const struct perf_tool *tool, perf_event__han
int perf_event__synthesize_tracing_data(const struct perf_tool *tool, int fd, struct evlist *evlist, perf_event__handler_t process);
int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, const struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
pid_t perf_event__synthesize_comm(const struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine);
+void perf_event__synthesize_final_bpf_metadata(struct perf_session *session,
+ perf_event__handler_t process);
int perf_tool__process_synth_event(const struct perf_tool *tool, union perf_event *event, struct machine *machine, perf_event__handler_t process);
@@ -105,24 +107,9 @@ int machine__synthesize_threads(struct machine *machine, struct target *target,
struct perf_thread_map *threads, bool needs_mmap, bool data_mmap,
unsigned int nr_threads_synthesize);
-#ifdef HAVE_AUXTRACE_SUPPORT
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, const struct perf_tool *tool,
struct perf_session *session, perf_event__handler_t process);
-#else // HAVE_AUXTRACE_SUPPORT
-
-#include <errno.h>
-
-static inline int
-perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
- const struct perf_tool *tool __maybe_unused,
- struct perf_session *session __maybe_unused,
- perf_event__handler_t process __maybe_unused)
-{
- return -EINVAL;
-}
-#endif // HAVE_AUXTRACE_SUPPORT
-
#ifdef HAVE_LIBBPF_SUPPORT
int perf_event__synthesize_bpf_events(struct perf_session *session, perf_event__handler_t process,
struct machine *machine, struct record_opts *opts);
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 0f383418e3df..8cf71bea295a 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -28,20 +28,6 @@ enum target_errno target__validate(struct target *target)
ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
}
- /* UID and PID are mutually exclusive */
- if (target->tid && target->uid_str) {
- target->uid_str = NULL;
- if (ret == TARGET_ERRNO__SUCCESS)
- ret = TARGET_ERRNO__PID_OVERRIDE_UID;
- }
-
- /* UID and CPU are mutually exclusive */
- if (target->uid_str && target->cpu_list) {
- target->cpu_list = NULL;
- if (ret == TARGET_ERRNO__SUCCESS)
- ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
- }
-
/* PID and SYSTEM are mutually exclusive */
if (target->tid && target->system_wide) {
target->system_wide = false;
@@ -49,13 +35,6 @@ enum target_errno target__validate(struct target *target)
ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
}
- /* UID and SYSTEM are mutually exclusive */
- if (target->uid_str && target->system_wide) {
- target->system_wide = false;
- if (ret == TARGET_ERRNO__SUCCESS)
- ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
- }
-
/* BPF and CPU are mutually exclusive */
if (target->bpf_str && target->cpu_list) {
target->cpu_list = NULL;
@@ -70,13 +49,6 @@ enum target_errno target__validate(struct target *target)
ret = TARGET_ERRNO__BPF_OVERRIDE_PID;
}
- /* BPF and UID are mutually exclusive */
- if (target->bpf_str && target->uid_str) {
- target->uid_str = NULL;
- if (ret == TARGET_ERRNO__SUCCESS)
- ret = TARGET_ERRNO__BPF_OVERRIDE_UID;
- }
-
/* BPF and THREADS are mutually exclusive */
if (target->bpf_str && target->per_thread) {
target->per_thread = false;
@@ -94,15 +66,13 @@ enum target_errno target__validate(struct target *target)
return ret;
}
-enum target_errno target__parse_uid(struct target *target)
+uid_t parse_uid(const char *str)
{
struct passwd pwd, *result;
char buf[1024];
- const char *str = target->uid_str;
- target->uid = UINT_MAX;
if (str == NULL)
- return TARGET_ERRNO__SUCCESS;
+ return UINT_MAX;
/* Try user name first */
getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
@@ -115,16 +85,15 @@ enum target_errno target__parse_uid(struct target *target)
int uid = strtol(str, &endptr, 10);
if (*endptr != '\0')
- return TARGET_ERRNO__INVALID_UID;
+ return UINT_MAX;
getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
if (result == NULL)
- return TARGET_ERRNO__USER_NOT_FOUND;
+ return UINT_MAX;
}
- target->uid = result->pw_uid;
- return TARGET_ERRNO__SUCCESS;
+ return result->pw_uid;
}
/*
@@ -132,20 +101,14 @@ enum target_errno target__parse_uid(struct target *target)
*/
static const char *target__error_str[] = {
"PID/TID switch overriding CPU",
- "PID/TID switch overriding UID",
- "UID switch overriding CPU",
"PID/TID switch overriding SYSTEM",
- "UID switch overriding SYSTEM",
"SYSTEM/CPU switch overriding PER-THREAD",
"BPF switch overriding CPU",
"BPF switch overriding PID/TID",
- "BPF switch overriding UID",
"BPF switch overriding THREAD",
- "Invalid User: %s",
- "Problems obtaining information for user %s",
};
-int target__strerror(struct target *target, int errnum,
+int target__strerror(struct target *target __maybe_unused, int errnum,
char *buf, size_t buflen)
{
int idx;
@@ -170,11 +133,6 @@ int target__strerror(struct target *target, int errnum,
snprintf(buf, buflen, "%s", msg);
break;
- case TARGET_ERRNO__INVALID_UID:
- case TARGET_ERRNO__USER_NOT_FOUND:
- snprintf(buf, buflen, msg, target->uid_str);
- break;
-
default:
/* cannot reach here */
break;
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index 2ee2cc30340f..84ebb9c940c6 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -9,9 +9,7 @@ struct target {
const char *pid;
const char *tid;
const char *cpu_list;
- const char *uid_str;
const char *bpf_str;
- uid_t uid;
bool system_wide;
bool uses_mmap;
bool default_per_cpu;
@@ -36,31 +34,24 @@ enum target_errno {
/* for target__validate() */
TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START,
- TARGET_ERRNO__PID_OVERRIDE_UID,
- TARGET_ERRNO__UID_OVERRIDE_CPU,
TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
- TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD,
TARGET_ERRNO__BPF_OVERRIDE_CPU,
TARGET_ERRNO__BPF_OVERRIDE_PID,
- TARGET_ERRNO__BPF_OVERRIDE_UID,
TARGET_ERRNO__BPF_OVERRIDE_THREAD,
- /* for target__parse_uid() */
- TARGET_ERRNO__INVALID_UID,
- TARGET_ERRNO__USER_NOT_FOUND,
-
__TARGET_ERRNO__END,
};
enum target_errno target__validate(struct target *target);
-enum target_errno target__parse_uid(struct target *target);
+
+uid_t parse_uid(const char *str);
int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
static inline bool target__has_task(struct target *target)
{
- return target->tid || target->pid || target->uid_str;
+ return target->tid || target->pid;
}
static inline bool target__has_cpu(struct target *target)
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 89585f53c1d5..aa9c58bbf9d3 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -41,6 +41,7 @@ int thread__init_maps(struct thread *thread, struct machine *machine)
}
struct thread *thread__new(pid_t pid, pid_t tid)
+ NO_THREAD_SAFETY_ANALYSIS /* Allocation/creation is inherently single threaded. */
{
RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
struct thread *thread;
@@ -200,7 +201,8 @@ int thread__set_namespaces(struct thread *thread, u64 timestamp,
return ret;
}
-struct comm *thread__comm(struct thread *thread)
+static struct comm *__thread__comm(struct thread *thread)
+ SHARED_LOCKS_REQUIRED(thread__comm_lock(thread))
{
if (list_empty(thread__comm_list(thread)))
return NULL;
@@ -208,16 +210,30 @@ struct comm *thread__comm(struct thread *thread)
return list_first_entry(thread__comm_list(thread), struct comm, list);
}
+struct comm *thread__comm(struct thread *thread)
+{
+ struct comm *res = NULL;
+
+ down_read(thread__comm_lock(thread));
+ res = __thread__comm(thread);
+ up_read(thread__comm_lock(thread));
+ return res;
+}
+
struct comm *thread__exec_comm(struct thread *thread)
{
struct comm *comm, *last = NULL, *second_last = NULL;
+ down_read(thread__comm_lock(thread));
list_for_each_entry(comm, thread__comm_list(thread), list) {
- if (comm->exec)
+ if (comm->exec) {
+ up_read(thread__comm_lock(thread));
return comm;
+ }
second_last = last;
last = comm;
}
+ up_read(thread__comm_lock(thread));
/*
* 'last' with no start time might be the parent's comm of a synthesized
@@ -233,8 +249,9 @@ struct comm *thread__exec_comm(struct thread *thread)
static int ____thread__set_comm(struct thread *thread, const char *str,
u64 timestamp, bool exec)
+ EXCLUSIVE_LOCKS_REQUIRED(thread__comm_lock(thread))
{
- struct comm *new, *curr = thread__comm(thread);
+ struct comm *new, *curr = __thread__comm(thread);
/* Override the default :tid entry */
if (!thread__comm_set(thread)) {
@@ -285,8 +302,9 @@ int thread__set_comm_from_proc(struct thread *thread)
}
static const char *__thread__comm_str(struct thread *thread)
+ SHARED_LOCKS_REQUIRED(thread__comm_lock(thread))
{
- const struct comm *comm = thread__comm(thread);
+ const struct comm *comm = __thread__comm(thread);
if (!comm)
return NULL;
@@ -410,7 +428,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bo
}
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
- struct addr_location *al)
+ bool symbols, struct addr_location *al)
{
size_t i;
const u8 cpumodes[] = {
@@ -421,7 +439,11 @@ void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
};
for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
- thread__find_symbol(thread, cpumodes[i], addr, al);
+ if (symbols)
+ thread__find_symbol(thread, cpumodes[i], addr, al);
+ else
+ thread__find_map(thread, cpumodes[i], addr, al);
+
if (al->map)
break;
}
@@ -471,6 +493,7 @@ uint16_t thread__e_machine(struct thread *thread, struct machine *machine)
if (parent) {
e_machine = thread__e_machine(parent, machine);
+ thread__put(parent);
thread__set_e_machine(thread, e_machine);
return e_machine;
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index cd574a896418..310eaea344bb 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -126,7 +126,7 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al);
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
- struct addr_location *al);
+ bool symbols, struct addr_location *al);
int thread__memcpy(struct thread *thread, struct machine *machine,
void *buf, u64 ip, int len, bool *is64bit);
@@ -236,14 +236,15 @@ static inline struct rw_semaphore *thread__namespaces_lock(struct thread *thread
return &RC_CHK_ACCESS(thread)->namespaces_lock;
}
-static inline struct list_head *thread__comm_list(struct thread *thread)
+static inline struct rw_semaphore *thread__comm_lock(struct thread *thread)
{
- return &RC_CHK_ACCESS(thread)->comm_list;
+ return &RC_CHK_ACCESS(thread)->comm_lock;
}
-static inline struct rw_semaphore *thread__comm_lock(struct thread *thread)
+static inline struct list_head *thread__comm_list(struct thread *thread)
+ SHARED_LOCKS_REQUIRED(thread__comm_lock(thread))
{
- return &RC_CHK_ACCESS(thread)->comm_lock;
+ return &RC_CHK_ACCESS(thread)->comm_list;
}
static inline u64 thread__db_id(const struct thread *thread)
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index b5f12390c355..ca193c1374ed 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -72,7 +72,7 @@ struct perf_thread_map *thread_map__new_by_tid(pid_t tid)
return threads;
}
-static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)
+static struct perf_thread_map *thread_map__new_all_cpus(void)
{
DIR *proc;
int max_threads = 32, items, i;
@@ -98,15 +98,6 @@ static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)
if (*end) /* only interested in proper numerical dirents */
continue;
- snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
-
- if (uid != UINT_MAX) {
- struct stat st;
-
- if (stat(path, &st) != 0 || st.st_uid != uid)
- continue;
- }
-
snprintf(path, sizeof(path), "/proc/%d/task", pid);
items = scandir(path, &namelist, filter, NULL);
if (items <= 0) {
@@ -157,24 +148,11 @@ out_free_namelist:
goto out_closedir;
}
-struct perf_thread_map *thread_map__new_all_cpus(void)
-{
- return __thread_map__new_all_cpus(UINT_MAX);
-}
-
-struct perf_thread_map *thread_map__new_by_uid(uid_t uid)
-{
- return __thread_map__new_all_cpus(uid);
-}
-
-struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
+struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid)
{
if (pid != -1)
return thread_map__new_by_pid(pid);
- if (tid == -1 && uid != UINT_MAX)
- return thread_map__new_by_uid(uid);
-
return thread_map__new_by_tid(tid);
}
@@ -289,15 +267,11 @@ out_free_threads:
goto out;
}
-struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid,
- uid_t uid, bool all_threads)
+struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid, bool all_threads)
{
if (pid)
return thread_map__new_by_pid_str(pid);
- if (!tid && uid != UINT_MAX)
- return thread_map__new_by_uid(uid);
-
if (all_threads)
return thread_map__new_all_cpus();
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index 00ec05fc1656..fc16d87f32fb 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -11,13 +11,11 @@ struct perf_record_thread_map;
struct perf_thread_map *thread_map__new_dummy(void);
struct perf_thread_map *thread_map__new_by_pid(pid_t pid);
struct perf_thread_map *thread_map__new_by_tid(pid_t tid);
-struct perf_thread_map *thread_map__new_by_uid(uid_t uid);
-struct perf_thread_map *thread_map__new_all_cpus(void);
-struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
+struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid);
struct perf_thread_map *thread_map__new_event(struct perf_record_thread_map *event);
struct perf_thread_map *thread_map__new_str(const char *pid,
- const char *tid, uid_t uid, bool all_threads);
+ const char *tid, bool all_threads);
struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str);
diff --git a/tools/perf/util/tool.c b/tools/perf/util/tool.c
index 3b7f390f26eb..27ba5849c74a 100644
--- a/tools/perf/util/tool.c
+++ b/tools/perf/util/tool.c
@@ -1,23 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
#include "data.h"
#include "debug.h"
+#include "event.h"
#include "header.h"
#include "session.h"
#include "stat.h"
#include "tool.h"
#include "tsc.h"
+#include <linux/compiler.h>
#include <sys/mman.h>
+#include <stddef.h>
#include <unistd.h>
#ifdef HAVE_ZSTD_SUPPORT
-static int perf_session__process_compressed_event(struct perf_session *session,
+static int perf_session__process_compressed_event(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session,
union perf_event *event, u64 file_offset,
const char *file_path)
{
void *src;
size_t decomp_size, src_size;
u64 decomp_last_rem = 0;
- size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
+ size_t mmap_len, decomp_len = perf_session__env(session)->comp_mmap_len;
struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
if (decomp_last) {
@@ -43,8 +47,15 @@ static int perf_session__process_compressed_event(struct perf_session *session,
decomp->size = decomp_last_rem;
}
- src = (void *)event + sizeof(struct perf_record_compressed);
- src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
+ if (event->header.type == PERF_RECORD_COMPRESSED) {
+ src = (void *)event + sizeof(struct perf_record_compressed);
+ src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
+ } else if (event->header.type == PERF_RECORD_COMPRESSED2) {
+ src = (void *)event + sizeof(struct perf_record_compressed2);
+ src_size = event->pack2.data_size;
+ } else {
+ return -1;
+ }
decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
@@ -69,10 +80,9 @@ static int perf_session__process_compressed_event(struct perf_session *session,
}
#endif
-static int process_event_synth_tracing_data_stub(struct perf_session *session
- __maybe_unused,
- union perf_event *event
- __maybe_unused)
+static int process_event_synth_tracing_data_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
@@ -80,8 +90,7 @@ static int process_event_synth_tracing_data_stub(struct perf_session *session
static int process_event_synth_attr_stub(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
- struct evlist **pevlist
- __maybe_unused)
+ struct evlist **pevlist __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
@@ -89,8 +98,7 @@ static int process_event_synth_attr_stub(const struct perf_tool *tool __maybe_un
static int process_event_synth_event_update_stub(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
- struct evlist **pevlist
- __maybe_unused)
+ struct evlist **pevlist __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_event_update(event, stdout);
@@ -141,7 +149,8 @@ static int skipn(int fd, off_t n)
return 0;
}
-static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
+static s64 process_event_auxtrace_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
union perf_event *event)
{
dump_printf(": unhandled!\n");
@@ -150,7 +159,8 @@ static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unus
return event->auxtrace.size;
}
-static int process_event_op2_stub(struct perf_session *session __maybe_unused,
+static int process_event_op2_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
dump_printf(": unhandled!\n");
@@ -159,7 +169,8 @@ static int process_event_op2_stub(struct perf_session *session __maybe_unused,
static
-int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
+int process_event_thread_map_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
if (dump_trace)
@@ -170,7 +181,8 @@ int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
}
static
-int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
+int process_event_cpu_map_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
if (dump_trace)
@@ -181,7 +193,8 @@ int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
}
static
-int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
+int process_event_stat_config_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused)
{
if (dump_trace)
@@ -191,7 +204,8 @@ int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
return 0;
}
-static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
+static int process_stat_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *perf_session __maybe_unused,
union perf_event *event)
{
if (dump_trace)
@@ -201,7 +215,8 @@ static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
return 0;
}
-static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
+static int process_stat_round_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *perf_session __maybe_unused,
union perf_event *event)
{
if (dump_trace)
@@ -211,7 +226,8 @@ static int process_stat_round_stub(struct perf_session *perf_session __maybe_unu
return 0;
}
-static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
+static int process_event_time_conv_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *perf_session __maybe_unused,
union perf_event *event)
{
if (dump_trace)
@@ -221,7 +237,8 @@ static int process_event_time_conv_stub(struct perf_session *perf_session __mayb
return 0;
}
-static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
+static int perf_session__process_compressed_event_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
u64 file_offset __maybe_unused,
const char *file_path __maybe_unused)
@@ -230,6 +247,17 @@ static int perf_session__process_compressed_event_stub(struct perf_session *sess
return 0;
}
+static int perf_event__process_bpf_metadata_stub(const struct perf_tool *tool __maybe_unused,
+ struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_bpf_metadata(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
void perf_tool__init(struct perf_tool *tool, bool ordered_events)
{
tool->ordered_events = ordered_events;
@@ -238,6 +266,7 @@ void perf_tool__init(struct perf_tool *tool, bool ordered_events)
tool->cgroup_events = false;
tool->no_warn = false;
tool->show_feat_hdr = SHOW_FEAT_NO_HEADER;
+ tool->merge_deferred_callchains = true;
tool->sample = process_event_sample_stub;
tool->mmap = process_event_stub;
@@ -259,6 +288,7 @@ void perf_tool__init(struct perf_tool *tool, bool ordered_events)
tool->read = process_event_sample_stub;
tool->throttle = process_event_stub;
tool->unthrottle = process_event_stub;
+ tool->callchain_deferred = process_event_sample_stub;
tool->attr = process_event_synth_attr_stub;
tool->event_update = process_event_synth_event_update_stub;
tool->tracing_data = process_event_synth_tracing_data_stub;
@@ -286,9 +316,184 @@ void perf_tool__init(struct perf_tool *tool, bool ordered_events)
tool->compressed = perf_session__process_compressed_event_stub;
#endif
tool->finished_init = process_event_op2_stub;
+ tool->bpf_metadata = perf_event__process_bpf_metadata_stub;
}
bool perf_tool__compressed_is_stub(const struct perf_tool *tool)
{
return tool->compressed == perf_session__process_compressed_event_stub;
}
+
+#define CREATE_DELEGATE_SAMPLE(name) \
+ static int delegate_ ## name(const struct perf_tool *tool, \
+ union perf_event *event, \
+ struct perf_sample *sample, \
+ struct evsel *evsel, \
+ struct machine *machine) \
+ { \
+ struct delegate_tool *del_tool = container_of(tool, struct delegate_tool, tool); \
+ struct perf_tool *delegate = del_tool->delegate; \
+ return delegate->name(delegate, event, sample, evsel, machine); \
+ }
+CREATE_DELEGATE_SAMPLE(read);
+CREATE_DELEGATE_SAMPLE(sample);
+CREATE_DELEGATE_SAMPLE(callchain_deferred);
+
+#define CREATE_DELEGATE_ATTR(name) \
+ static int delegate_ ## name(const struct perf_tool *tool, \
+ union perf_event *event, \
+ struct evlist **pevlist) \
+ { \
+ struct delegate_tool *del_tool = container_of(tool, struct delegate_tool, tool); \
+ struct perf_tool *delegate = del_tool->delegate; \
+ return delegate->name(delegate, event, pevlist); \
+ }
+CREATE_DELEGATE_ATTR(attr);
+CREATE_DELEGATE_ATTR(event_update);
+
+#define CREATE_DELEGATE_OE(name) \
+ static int delegate_ ## name(const struct perf_tool *tool, \
+ union perf_event *event, \
+ struct ordered_events *oe) \
+ { \
+ struct delegate_tool *del_tool = container_of(tool, struct delegate_tool, tool); \
+ struct perf_tool *delegate = del_tool->delegate; \
+ return delegate->name(delegate, event, oe); \
+ }
+CREATE_DELEGATE_OE(finished_round);
+
+#define CREATE_DELEGATE_OP(name) \
+ static int delegate_ ## name(const struct perf_tool *tool, \
+ union perf_event *event, \
+ struct perf_sample *sample, \
+ struct machine *machine) \
+ { \
+ struct delegate_tool *del_tool = container_of(tool, struct delegate_tool, tool); \
+ struct perf_tool *delegate = del_tool->delegate; \
+ return delegate->name(delegate, event, sample, machine); \
+ }
+CREATE_DELEGATE_OP(aux);
+CREATE_DELEGATE_OP(aux_output_hw_id);
+CREATE_DELEGATE_OP(bpf);
+CREATE_DELEGATE_OP(cgroup);
+CREATE_DELEGATE_OP(comm);
+CREATE_DELEGATE_OP(context_switch);
+CREATE_DELEGATE_OP(exit);
+CREATE_DELEGATE_OP(fork);
+CREATE_DELEGATE_OP(itrace_start);
+CREATE_DELEGATE_OP(ksymbol);
+CREATE_DELEGATE_OP(lost);
+CREATE_DELEGATE_OP(lost_samples);
+CREATE_DELEGATE_OP(mmap);
+CREATE_DELEGATE_OP(mmap2);
+CREATE_DELEGATE_OP(namespaces);
+CREATE_DELEGATE_OP(text_poke);
+CREATE_DELEGATE_OP(throttle);
+CREATE_DELEGATE_OP(unthrottle);
+
+#define CREATE_DELEGATE_OP2(name) \
+ static int delegate_ ## name(const struct perf_tool *tool, \
+ struct perf_session *session, \
+ union perf_event *event) \
+ { \
+ struct delegate_tool *del_tool = container_of(tool, struct delegate_tool, tool); \
+ struct perf_tool *delegate = del_tool->delegate; \
+ return delegate->name(delegate, session, event); \
+ }
+CREATE_DELEGATE_OP2(auxtrace_error);
+CREATE_DELEGATE_OP2(auxtrace_info);
+CREATE_DELEGATE_OP2(bpf_metadata);
+CREATE_DELEGATE_OP2(build_id);
+CREATE_DELEGATE_OP2(cpu_map);
+CREATE_DELEGATE_OP2(feature);
+CREATE_DELEGATE_OP2(finished_init);
+CREATE_DELEGATE_OP2(id_index);
+CREATE_DELEGATE_OP2(stat);
+CREATE_DELEGATE_OP2(stat_config);
+CREATE_DELEGATE_OP2(stat_round);
+CREATE_DELEGATE_OP2(thread_map);
+CREATE_DELEGATE_OP2(time_conv);
+CREATE_DELEGATE_OP2(tracing_data);
+
+#define CREATE_DELEGATE_OP3(name) \
+ static s64 delegate_ ## name(const struct perf_tool *tool, \
+ struct perf_session *session, \
+ union perf_event *event) \
+ { \
+ struct delegate_tool *del_tool = container_of(tool, struct delegate_tool, tool); \
+ struct perf_tool *delegate = del_tool->delegate; \
+ return delegate->name(delegate, session, event); \
+ }
+CREATE_DELEGATE_OP3(auxtrace);
+
+#define CREATE_DELEGATE_OP4(name) \
+ static int delegate_ ## name(const struct perf_tool *tool, \
+ struct perf_session *session, \
+ union perf_event *event, \
+ u64 data, \
+ const char *str) \
+ { \
+ struct delegate_tool *del_tool = container_of(tool, struct delegate_tool, tool); \
+ struct perf_tool *delegate = del_tool->delegate; \
+ return delegate->name(delegate, session, event, data, str); \
+ }
+CREATE_DELEGATE_OP4(compressed);
+
+void delegate_tool__init(struct delegate_tool *tool, struct perf_tool *delegate)
+{
+ tool->delegate = delegate;
+
+ tool->tool.ordered_events = delegate->ordered_events;
+ tool->tool.ordering_requires_timestamps = delegate->ordering_requires_timestamps;
+ tool->tool.namespace_events = delegate->namespace_events;
+ tool->tool.cgroup_events = delegate->cgroup_events;
+ tool->tool.no_warn = delegate->no_warn;
+ tool->tool.show_feat_hdr = delegate->show_feat_hdr;
+ tool->tool.merge_deferred_callchains = delegate->merge_deferred_callchains;
+
+ tool->tool.sample = delegate_sample;
+ tool->tool.read = delegate_read;
+
+ tool->tool.mmap = delegate_mmap;
+ tool->tool.mmap2 = delegate_mmap2;
+ tool->tool.comm = delegate_comm;
+ tool->tool.namespaces = delegate_namespaces;
+ tool->tool.cgroup = delegate_cgroup;
+ tool->tool.fork = delegate_fork;
+ tool->tool.exit = delegate_exit;
+ tool->tool.lost = delegate_lost;
+ tool->tool.lost_samples = delegate_lost_samples;
+ tool->tool.aux = delegate_aux;
+ tool->tool.itrace_start = delegate_itrace_start;
+ tool->tool.aux_output_hw_id = delegate_aux_output_hw_id;
+ tool->tool.context_switch = delegate_context_switch;
+ tool->tool.throttle = delegate_throttle;
+ tool->tool.unthrottle = delegate_unthrottle;
+ tool->tool.ksymbol = delegate_ksymbol;
+ tool->tool.bpf = delegate_bpf;
+ tool->tool.text_poke = delegate_text_poke;
+ tool->tool.callchain_deferred = delegate_callchain_deferred;
+
+ tool->tool.attr = delegate_attr;
+ tool->tool.event_update = delegate_event_update;
+
+ tool->tool.tracing_data = delegate_tracing_data;
+
+ tool->tool.finished_round = delegate_finished_round;
+
+ tool->tool.build_id = delegate_build_id;
+ tool->tool.id_index = delegate_id_index;
+ tool->tool.auxtrace_info = delegate_auxtrace_info;
+ tool->tool.auxtrace_error = delegate_auxtrace_error;
+ tool->tool.time_conv = delegate_time_conv;
+ tool->tool.thread_map = delegate_thread_map;
+ tool->tool.cpu_map = delegate_cpu_map;
+ tool->tool.stat_config = delegate_stat_config;
+ tool->tool.stat = delegate_stat;
+ tool->tool.stat_round = delegate_stat_round;
+ tool->tool.feature = delegate_feature;
+ tool->tool.finished_init = delegate_finished_init;
+ tool->tool.bpf_metadata = delegate_bpf_metadata;
+ tool->tool.compressed = delegate_compressed;
+ tool->tool.auxtrace = delegate_auxtrace;
+}
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index db1c7642b0d1..e96b69d25a5b 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -26,10 +26,12 @@ typedef int (*event_attr_op)(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist);
-typedef int (*event_op2)(struct perf_session *session, union perf_event *event);
-typedef s64 (*event_op3)(struct perf_session *session, union perf_event *event);
-typedef int (*event_op4)(struct perf_session *session, union perf_event *event, u64 data,
- const char *str);
+typedef int (*event_op2)(const struct perf_tool *tool, struct perf_session *session,
+ union perf_event *event);
+typedef s64 (*event_op3)(const struct perf_tool *tool, struct perf_session *session,
+ union perf_event *event);
+typedef int (*event_op4)(const struct perf_tool *tool, struct perf_session *session,
+ union perf_event *event, u64 data, const char *str);
typedef int (*event_oe)(const struct perf_tool *tool, union perf_event *event,
struct ordered_events *oe);
@@ -42,7 +44,8 @@ enum show_feature_header {
struct perf_tool {
event_sample sample,
- read;
+ read,
+ callchain_deferred;
event_op mmap,
mmap2,
comm,
@@ -77,7 +80,8 @@ struct perf_tool {
stat,
stat_round,
feature,
- finished_init;
+ finished_init,
+ bpf_metadata;
event_op4 compressed;
event_op3 auxtrace;
bool ordered_events;
@@ -86,6 +90,7 @@ struct perf_tool {
bool cgroup_events;
bool no_warn;
bool dont_split_sample_group;
+ bool merge_deferred_callchains;
enum show_feature_header show_feat_hdr;
};
@@ -99,4 +104,13 @@ int process_event_sample_stub(const struct perf_tool *tool,
struct evsel *evsel,
struct machine *machine);
+struct delegate_tool {
+ /** @tool: The actual tool that calls the delegate. */
+ struct perf_tool tool;
+ /** @delegate: The tool that is delegated to. */
+ struct perf_tool *delegate;
+};
+
+void delegate_tool__init(struct delegate_tool *tool, struct perf_tool *delegate);
+
#endif /* __PERF_TOOL_H */
diff --git a/tools/perf/util/tool_pmu.c b/tools/perf/util/tool_pmu.c
index 97b327d1ce4a..37c4eae0bef1 100644
--- a/tools/perf/util/tool_pmu.c
+++ b/tools/perf/util/tool_pmu.c
@@ -2,16 +2,19 @@
#include "cgroup.h"
#include "counts.h"
#include "cputopo.h"
+#include "debug.h"
#include "evsel.h"
#include "pmu.h"
#include "print-events.h"
#include "smt.h"
+#include "stat.h"
#include "time-utils.h"
#include "tool_pmu.h"
#include "tsc.h"
#include <api/fs/fs.h>
#include <api/io.h>
#include <internal/threadmap.h>
+#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <fcntl.h>
#include <strings.h>
@@ -30,6 +33,8 @@ static const char *const tool_pmu__event_names[TOOL_PMU__EVENT_MAX] = {
"slots",
"smt_on",
"system_tsc_freq",
+ "core_wide",
+ "target_cpu",
};
bool tool_pmu__skip_event(const char *name __maybe_unused)
@@ -106,6 +111,23 @@ const char *evsel__tool_pmu_event_name(const struct evsel *evsel)
return tool_pmu__event_to_str(evsel->core.attr.config);
}
+struct perf_cpu_map *tool_pmu__cpus(struct perf_event_attr *attr)
+{
+ static struct perf_cpu_map *cpu0_map;
+ enum tool_pmu_event event = (enum tool_pmu_event)attr->config;
+
+ if (event <= TOOL_PMU__EVENT_NONE || event >= TOOL_PMU__EVENT_MAX) {
+ pr_err("Invalid tool PMU event config %llx\n", attr->config);
+ return NULL;
+ }
+ if (event == TOOL_PMU__EVENT_USER_TIME || event == TOOL_PMU__EVENT_SYSTEM_TIME)
+ return cpu_map__online();
+
+ if (!cpu0_map)
+ cpu0_map = perf_cpu_map__new_int(0);
+ return perf_cpu_map__get(cpu0_map);
+}
+
static bool read_until_char(struct io *io, char e)
{
int c;
@@ -239,9 +261,6 @@ int evsel__tool_pmu_open(struct evsel *evsel,
nthreads = perf_thread_map__nr(threads);
for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
for (thread = 0; thread < nthreads; thread++) {
- if (thread >= nthreads)
- break;
-
if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
@@ -332,7 +351,11 @@ static bool has_pmem(void)
return has_pmem;
}
-bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result)
+bool tool_pmu__read_event(enum tool_pmu_event ev,
+ struct evsel *evsel,
+ bool system_wide,
+ const char *user_requested_cpu_list,
+ u64 *result)
{
const struct cpu_topology *topology;
@@ -347,18 +370,60 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result)
return true;
case TOOL_PMU__EVENT_NUM_CPUS:
- *result = cpu__max_present_cpu().cpu;
+ if (!evsel || perf_cpu_map__is_empty(evsel->core.cpus)) {
+ /* No evsel to be specific to. */
+ *result = cpu__max_present_cpu().cpu;
+ } else if (!perf_cpu_map__has_any_cpu(evsel->core.cpus)) {
+ /* Evsel just has specific CPUs. */
+ *result = perf_cpu_map__nr(evsel->core.cpus);
+ } else {
+ /*
+ * "Any CPU" event that can be scheduled on any CPU in
+ * the PMU's cpumask. The PMU cpumask should be saved in
+ * pmu_cpus. If not present fall back to max.
+ */
+ if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus))
+ *result = perf_cpu_map__nr(evsel->core.pmu_cpus);
+ else
+ *result = cpu__max_present_cpu().cpu;
+ }
return true;
case TOOL_PMU__EVENT_NUM_CPUS_ONLINE: {
struct perf_cpu_map *online = cpu_map__online();
- if (online) {
+ if (!online)
+ return false;
+
+ if (!evsel || perf_cpu_map__is_empty(evsel->core.cpus)) {
+ /* No evsel to be specific to. */
*result = perf_cpu_map__nr(online);
- perf_cpu_map__put(online);
- return true;
+ } else if (!perf_cpu_map__has_any_cpu(evsel->core.cpus)) {
+ /* Evsel just has specific CPUs. */
+ struct perf_cpu_map *tmp =
+ perf_cpu_map__intersect(online, evsel->core.cpus);
+
+ *result = perf_cpu_map__nr(tmp);
+ perf_cpu_map__put(tmp);
+ } else {
+ /*
+ * "Any CPU" event that can be scheduled on any CPU in
+ * the PMU's cpumask. The PMU cpumask should be saved in
+ * pmu_cpus, if not present then just the online cpu
+ * mask.
+ */
+ if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus)) {
+ struct perf_cpu_map *tmp =
+ perf_cpu_map__intersect(online, evsel->core.pmu_cpus);
+
+ *result = perf_cpu_map__nr(tmp);
+ perf_cpu_map__put(tmp);
+ } else {
+ *result = perf_cpu_map__nr(online);
+ }
}
- return false;
+ perf_cpu_map__put(online);
+ return true;
}
case TOOL_PMU__EVENT_NUM_DIES:
topology = online_topology();
@@ -382,6 +447,14 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result)
*result = arch_get_tsc_freq();
return true;
+ case TOOL_PMU__EVENT_CORE_WIDE:
+ *result = core_wide(system_wide, user_requested_cpu_list) ? 1 : 0;
+ return true;
+
+ case TOOL_PMU__EVENT_TARGET_CPU:
+ *result = system_wide || (user_requested_cpu_list != NULL) ? 1 : 0;
+ return true;
+
case TOOL_PMU__EVENT_NONE:
case TOOL_PMU__EVENT_DURATION_TIME:
case TOOL_PMU__EVENT_USER_TIME:
@@ -392,16 +465,39 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result)
}
}
+static void perf_counts__update(struct perf_counts_values *count,
+ const struct perf_counts_values *old_count,
+ bool raw, u64 val)
+{
+ /*
+ * The values of enabled and running must make a ratio of 100%. The
+ * exact values don't matter as long as they are non-zero to avoid
+ * issues with evsel__count_has_error.
+ */
+ if (old_count) {
+ count->val = raw ? val : old_count->val + val;
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+ count->lost = old_count->lost;
+ } else {
+ count->val = val;
+ count->run++;
+ count->ena++;
+ count->lost = 0;
+ }
+}
+
int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
{
__u64 *start_time, cur_time, delta_start;
- u64 val;
- int fd, err = 0;
+ int err = 0;
struct perf_counts_values *count, *old_count = NULL;
bool adjust = false;
enum tool_pmu_event ev = evsel__tool_event(evsel);
count = perf_counts(evsel->counts, cpu_map_idx, thread);
+ if (evsel->prev_raw_counts)
+ old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
switch (ev) {
case TOOL_PMU__EVENT_HAS_PMEM:
@@ -412,26 +508,23 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
case TOOL_PMU__EVENT_NUM_PACKAGES:
case TOOL_PMU__EVENT_SLOTS:
case TOOL_PMU__EVENT_SMT_ON:
- case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ:
- if (evsel->prev_raw_counts)
- old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
- val = 0;
+ case TOOL_PMU__EVENT_CORE_WIDE:
+ case TOOL_PMU__EVENT_TARGET_CPU:
+ case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ: {
+ u64 val = 0;
+
if (cpu_map_idx == 0 && thread == 0) {
- if (!tool_pmu__read_event(ev, &val)) {
+ if (!tool_pmu__read_event(ev, evsel,
+ stat_config.system_wide,
+ stat_config.user_requested_cpu_list,
+ &val)) {
count->lost++;
val = 0;
}
}
- if (old_count) {
- count->val = old_count->val + val;
- count->run = old_count->run + 1;
- count->ena = old_count->ena + 1;
- } else {
- count->val = val;
- count->run++;
- count->ena++;
- }
+ perf_counts__update(count, old_count, /*raw=*/false, val);
return 0;
+ }
case TOOL_PMU__EVENT_DURATION_TIME:
/*
* Pretend duration_time is only on the first CPU and thread, or
@@ -447,9 +540,9 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
case TOOL_PMU__EVENT_USER_TIME:
case TOOL_PMU__EVENT_SYSTEM_TIME: {
bool system = evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME;
+ int fd = FD(evsel, cpu_map_idx, thread);
start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread);
- fd = FD(evsel, cpu_map_idx, thread);
lseek(fd, SEEK_SET, 0);
if (evsel->pid_stat) {
/* The event exists solely on 1 CPU. */
@@ -483,11 +576,9 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
if (adjust) {
__u64 ticks_per_sec = sysconf(_SC_CLK_TCK);
- delta_start *= 1000000000 / ticks_per_sec;
+ delta_start *= 1e9 / ticks_per_sec;
}
- count->val = delta_start;
- count->ena = count->run = delta_start;
- count->lost = 0;
+ perf_counts__update(count, old_count, /*raw=*/true, delta_start);
return 0;
}
@@ -496,19 +587,12 @@ struct perf_pmu *tool_pmu__new(void)
struct perf_pmu *tool = zalloc(sizeof(struct perf_pmu));
if (!tool)
- goto out;
- tool->name = strdup("tool");
- if (!tool->name) {
- zfree(&tool);
- goto out;
- }
+ return NULL;
- tool->type = PERF_PMU_TYPE_TOOL;
- INIT_LIST_HEAD(&tool->aliases);
- INIT_LIST_HEAD(&tool->caps);
- INIT_LIST_HEAD(&tool->format);
+ if (perf_pmu__init(tool, PERF_PMU_TYPE_TOOL, "tool") != 0) {
+ perf_pmu__delete(tool);
+ return NULL;
+ }
tool->events_table = find_core_events_table("common", "common");
-
-out:
return tool;
}
diff --git a/tools/perf/util/tool_pmu.h b/tools/perf/util/tool_pmu.h
index c6ad1dd90a56..ea343d1983d3 100644
--- a/tools/perf/util/tool_pmu.h
+++ b/tools/perf/util/tool_pmu.h
@@ -22,6 +22,8 @@ enum tool_pmu_event {
TOOL_PMU__EVENT_SLOTS,
TOOL_PMU__EVENT_SMT_ON,
TOOL_PMU__EVENT_SYSTEM_TSC_FREQ,
+ TOOL_PMU__EVENT_CORE_WIDE,
+ TOOL_PMU__EVENT_TARGET_CPU,
TOOL_PMU__EVENT_MAX,
};
@@ -34,11 +36,17 @@ enum tool_pmu_event tool_pmu__str_to_event(const char *str);
bool tool_pmu__skip_event(const char *name);
int tool_pmu__num_skip_events(void);
-bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result);
+bool tool_pmu__read_event(enum tool_pmu_event ev,
+ struct evsel *evsel,
+ bool system_wide,
+ const char *user_requested_cpu_list,
+ u64 *result);
+
u64 tool_pmu__cpu_slots_per_cycle(void);
bool perf_pmu__is_tool(const struct perf_pmu *pmu);
+struct perf_cpu_map *tool_pmu__cpus(struct perf_event_attr *attr);
bool evsel__is_tool(const struct evsel *evsel);
enum tool_pmu_event evsel__tool_event(const struct evsel *evsel);
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 4db3d1bd686c..b06e10a116bb 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -88,9 +88,9 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
else if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
target->tid);
- else if (target->uid_str != NULL)
+ else if (top->uid_str != NULL)
ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
- target->uid_str);
+ top->uid_str);
else
ret += SNPRINTF(bf + ret, size - ret, " (all");
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 4c5588dbb131..04ff926846be 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -48,6 +48,7 @@ struct perf_top {
const char *sym_filter;
float min_percent;
unsigned int nr_threads_synthesize;
+ const char *uid_str;
struct {
struct ordered_events *in;
diff --git a/tools/perf/util/tp_pmu.c b/tools/perf/util/tp_pmu.c
new file mode 100644
index 000000000000..eddb9807131a
--- /dev/null
+++ b/tools/perf/util/tp_pmu.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include "tp_pmu.h"
+#include "pmus.h"
+#include <api/fs/fs.h>
+#include <api/fs/tracing_path.h>
+#include <api/io_dir.h>
+#include <linux/kernel.h>
+#include <errno.h>
+#include <string.h>
+
+int tp_pmu__id(const char *sys, const char *name)
+{
+ char *tp_dir = get_events_file(sys);
+ char path[PATH_MAX];
+ int id, err;
+
+ if (!tp_dir)
+ return -1;
+
+ scnprintf(path, PATH_MAX, "%s/%s/id", tp_dir, name);
+ put_events_file(tp_dir);
+ err = filename__read_int(path, &id);
+ if (err)
+ return err;
+
+ return id;
+}
+
+
+int tp_pmu__for_each_tp_event(const char *sys, void *state, tp_event_callback cb)
+{
+ char *evt_path;
+ struct io_dirent64 *evt_ent;
+ struct io_dir evt_dir;
+ int ret = 0;
+
+ evt_path = get_events_file(sys);
+ if (!evt_path)
+ return -errno;
+
+ io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
+ if (evt_dir.dirfd < 0) {
+ ret = -errno;
+ put_events_file(evt_path);
+ return ret;
+ }
+ put_events_file(evt_path);
+
+ while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) {
+ if (!strcmp(evt_ent->d_name, ".")
+ || !strcmp(evt_ent->d_name, "..")
+ || !strcmp(evt_ent->d_name, "enable")
+ || !strcmp(evt_ent->d_name, "filter"))
+ continue;
+
+ ret = cb(state, sys, evt_ent->d_name);
+ if (ret)
+ break;
+ }
+ close(evt_dir.dirfd);
+ return ret;
+}
+
+int tp_pmu__for_each_tp_sys(void *state, tp_sys_callback cb)
+{
+ struct io_dirent64 *events_ent;
+ struct io_dir events_dir;
+ int ret = 0;
+ char *events_dir_path = get_tracing_file("events");
+
+ if (!events_dir_path)
+ return -errno;
+
+ io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
+ if (events_dir.dirfd < 0) {
+ ret = -errno;
+ put_events_file(events_dir_path);
+ return ret;
+ }
+ put_events_file(events_dir_path);
+
+ while (!ret && (events_ent = io_dir__readdir(&events_dir))) {
+ if (!strcmp(events_ent->d_name, ".") ||
+ !strcmp(events_ent->d_name, "..") ||
+ !strcmp(events_ent->d_name, "enable") ||
+ !strcmp(events_ent->d_name, "header_event") ||
+ !strcmp(events_ent->d_name, "header_page"))
+ continue;
+
+ ret = cb(state, events_ent->d_name);
+ }
+ close(events_dir.dirfd);
+ return ret;
+}
+
+bool perf_pmu__is_tracepoint(const struct perf_pmu *pmu)
+{
+ return pmu->type == PERF_TYPE_TRACEPOINT;
+}
+
+struct for_each_event_args {
+ void *state;
+ pmu_event_callback cb;
+ const struct perf_pmu *pmu;
+};
+
+static int for_each_event_cb(void *state, const char *sys_name, const char *evt_name)
+{
+ struct for_each_event_args *args = state;
+ char name[2 * FILENAME_MAX + 2];
+ /* 16 possible hex digits and 22 other characters and \0. */
+ char encoding[16 + 22];
+ char *format = NULL;
+ size_t format_size;
+ struct pmu_event_info info = {
+ .pmu = args->pmu,
+ .pmu_name = args->pmu->name,
+ .event_type_desc = "Tracepoint event",
+ };
+ char *tp_dir = get_events_file(sys_name);
+ char path[PATH_MAX];
+ int id, err;
+
+ if (!tp_dir)
+ return -1;
+
+ scnprintf(path, sizeof(path), "%s/%s/id", tp_dir, evt_name);
+ err = filename__read_int(path, &id);
+ if (err == 0) {
+ snprintf(encoding, sizeof(encoding), "tracepoint/config=0x%x/", id);
+ info.encoding_desc = encoding;
+ }
+
+ scnprintf(path, sizeof(path), "%s/%s/format", tp_dir, evt_name);
+ put_events_file(tp_dir);
+ err = filename__read_str(path, &format, &format_size);
+ if (err == 0) {
+ info.long_desc = format;
+ for (size_t i = 0 ; i < format_size; i++) {
+ /* Swap tabs to spaces due to some rendering issues. */
+ if (format[i] == '\t')
+ format[i] = ' ';
+ }
+ }
+ snprintf(name, sizeof(name), "%s:%s", sys_name, evt_name);
+ info.name = name;
+ err = args->cb(args->state, &info);
+ free(format);
+ return err;
+}
+
+static int for_each_event_sys_cb(void *state, const char *sys_name)
+{
+ return tp_pmu__for_each_tp_event(sys_name, state, for_each_event_cb);
+}
+
+int tp_pmu__for_each_event(struct perf_pmu *pmu, void *state, pmu_event_callback cb)
+{
+ struct for_each_event_args args = {
+ .state = state,
+ .cb = cb,
+ .pmu = pmu,
+ };
+
+ return tp_pmu__for_each_tp_sys(&args, for_each_event_sys_cb);
+}
+
+static int num_events_cb(void *state, const char *sys_name __maybe_unused,
+ const char *evt_name __maybe_unused)
+{
+ size_t *count = state;
+
+ (*count)++;
+ return 0;
+}
+
+static int num_events_sys_cb(void *state, const char *sys_name)
+{
+ return tp_pmu__for_each_tp_event(sys_name, state, num_events_cb);
+}
+
+size_t tp_pmu__num_events(struct perf_pmu *pmu __maybe_unused)
+{
+ size_t count = 0;
+
+ tp_pmu__for_each_tp_sys(&count, num_events_sys_cb);
+ return count;
+}
+
+bool tp_pmu__have_event(struct perf_pmu *pmu __maybe_unused, const char *name)
+{
+ char *dup_name, *colon;
+ int id;
+
+ colon = strchr(name, ':');
+ if (colon == NULL)
+ return false;
+
+ dup_name = strdup(name);
+ if (!dup_name)
+ return false;
+
+ colon = dup_name + (colon - name);
+ *colon = '\0';
+ id = tp_pmu__id(dup_name, colon + 1);
+ free(dup_name);
+ return id >= 0;
+}
diff --git a/tools/perf/util/tp_pmu.h b/tools/perf/util/tp_pmu.h
new file mode 100644
index 000000000000..30456bd6943d
--- /dev/null
+++ b/tools/perf/util/tp_pmu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __TP_PMU_H
+#define __TP_PMU_H
+
+#include "pmu.h"
+
+typedef int (*tp_sys_callback)(void *state, const char *sys_name);
+typedef int (*tp_event_callback)(void *state, const char *sys_name, const char *evt_name);
+
+int tp_pmu__id(const char *sys, const char *name);
+int tp_pmu__for_each_tp_event(const char *sys, void *state, tp_event_callback cb);
+int tp_pmu__for_each_tp_sys(void *state, tp_sys_callback cb);
+
+bool perf_pmu__is_tracepoint(const struct perf_pmu *pmu);
+int tp_pmu__for_each_event(struct perf_pmu *pmu, void *state, pmu_event_callback cb);
+size_t tp_pmu__num_events(struct perf_pmu *pmu);
+bool tp_pmu__have_event(struct perf_pmu *pmu, const char *name);
+
+#endif /* __TP_PMU_H */
diff --git a/tools/perf/util/trace.h b/tools/perf/util/trace.h
new file mode 100644
index 000000000000..fbbcfe6f44fe
--- /dev/null
+++ b/tools/perf/util/trace.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef UTIL_TRACE_H
+#define UTIL_TRACE_H
+
+#include <stdio.h> /* for FILE */
+
+enum trace_summary_mode {
+ SUMMARY__NONE = 0,
+ SUMMARY__BY_TOTAL,
+ SUMMARY__BY_THREAD,
+ SUMMARY__BY_CGROUP,
+};
+
+#ifdef HAVE_BPF_SKEL
+
+int trace_prepare_bpf_summary(enum trace_summary_mode mode);
+void trace_start_bpf_summary(void);
+void trace_end_bpf_summary(void);
+int trace_print_bpf_summary(FILE *fp, int max_summary);
+void trace_cleanup_bpf_summary(void);
+
+#else /* !HAVE_BPF_SKEL */
+
+static inline int trace_prepare_bpf_summary(enum trace_summary_mode mode __maybe_unused)
+{
+ return -1;
+}
+static inline void trace_start_bpf_summary(void) {}
+static inline void trace_end_bpf_summary(void) {}
+static inline int trace_print_bpf_summary(FILE *fp __maybe_unused, int max_summary __maybe_unused)
+{
+ return 0;
+}
+static inline void trace_cleanup_bpf_summary(void) {}
+
+#endif /* HAVE_BPF_SKEL */
+
+#endif /* UTIL_TRACE_H */
diff --git a/tools/perf/util/trace_augment.h b/tools/perf/util/trace_augment.h
index 57a3e5045937..4f729bc67753 100644
--- a/tools/perf/util/trace_augment.h
+++ b/tools/perf/util/trace_augment.h
@@ -1,6 +1,66 @@
#ifndef TRACE_AUGMENT_H
#define TRACE_AUGMENT_H
-#define TRACE_AUG_MAX_BUF 32 /* for buffer augmentation in perf trace */
+#include <linux/compiler.h>
+
+struct bpf_program;
+struct evlist;
+
+#ifdef HAVE_BPF_SKEL
+
+int augmented_syscalls__prepare(void);
+int augmented_syscalls__create_bpf_output(struct evlist *evlist);
+void augmented_syscalls__setup_bpf_output(void);
+int augmented_syscalls__set_filter_pids(unsigned int nr, pid_t *pids);
+int augmented_syscalls__get_map_fds(int *enter_fd, int *exit_fd, int *beauty_fd);
+struct bpf_program *augmented_syscalls__find_by_title(const char *name);
+struct bpf_program *augmented_syscalls__unaugmented(void);
+void augmented_syscalls__cleanup(void);
+
+#else /* !HAVE_BPF_SKEL */
+
+static inline int augmented_syscalls__prepare(void)
+{
+ return -1;
+}
+
+static inline int augmented_syscalls__create_bpf_output(struct evlist *evlist __maybe_unused)
+{
+ return -1;
+}
+
+static inline void augmented_syscalls__setup_bpf_output(void)
+{
+}
+
+static inline int augmented_syscalls__set_filter_pids(unsigned int nr __maybe_unused,
+ pid_t *pids __maybe_unused)
+{
+ return 0;
+}
+
+static inline int augmented_syscalls__get_map_fds(int *enter_fd __maybe_unused,
+ int *exit_fd __maybe_unused,
+ int *beauty_fd __maybe_unused)
+{
+ return -1;
+}
+
+static inline struct bpf_program *
+augmented_syscalls__find_by_title(const char *name __maybe_unused)
+{
+ return NULL;
+}
+
+static inline struct bpf_program *augmented_syscalls__unaugmented(void)
+{
+ return NULL;
+}
+
+static inline void augmented_syscalls__cleanup(void)
+{
+}
+
+#endif /* HAVE_BPF_SKEL */
#endif
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 793d11832694..ae70fb56a057 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -84,8 +84,11 @@ static int __report_module(struct addr_location *al, u64 ip,
char filename[PATH_MAX];
__symbol__join_symfs(filename, sizeof(filename), dso__long_name(dso));
- mod = dwfl_report_elf(ui->dwfl, dso__short_name(dso), filename, -1,
- base, false);
+ /* Don't hang up on device files like /dev/dri/renderD128. */
+ if (is_regular_file(filename)) {
+ mod = dwfl_report_elf(ui->dwfl, dso__short_name(dso), filename, -1,
+ base, false);
+ }
}
if (!mod) {
char filename[PATH_MAX];
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 78d2297c1b67..1f7c06523059 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -88,7 +88,7 @@ bool gzip_is_compressed(const char *input)
ssize_t rc;
if (fd < 0)
- return -1;
+ return false;
rc = read(fd, buf, sizeof(buf));
close(fd);
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 9741e7503591..de93067a5da3 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -995,7 +995,7 @@ static acpi_status osl_list_customized_tables(char *directory)
{
void *table_dir;
u32 instance;
- char temp_name[ACPI_NAMESEG_SIZE];
+ char temp_name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
char *filename;
acpi_status status = AE_OK;
@@ -1312,7 +1312,7 @@ osl_get_customized_table(char *pathname,
{
void *table_dir;
u32 current_instance = 0;
- char temp_name[ACPI_NAMESEG_SIZE];
+ char temp_name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
char table_filename[PATH_MAX];
char *filename;
acpi_status status;
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index bf30143efbdc..7a6223aa703c 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -86,9 +86,10 @@ u8 ap_is_valid_checksum(struct acpi_table_header *table)
if (ACPI_FAILURE(status)) {
fprintf(stderr, "%4.4s: Warning: wrong checksum in table\n",
table->signature);
+ return (FALSE);
}
- return (AE_OK);
+ return (TRUE);
}
/******************************************************************************
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index 75db0091e275..d6b8a201480b 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -103,7 +103,7 @@ int ap_open_output_file(char *pathname)
int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
{
- char filename[ACPI_NAMESEG_SIZE + 16] ACPI_NONSTRING;
+ char filename[ACPI_NAMESEG_SIZE + 16];
char instance_str[16];
ACPI_FILE file;
acpi_size actual;
diff --git a/tools/power/acpi/tools/pfrut/pfrut.c b/tools/power/acpi/tools/pfrut/pfrut.c
index 44a9ecbd91e8..4d9b0177c312 100644
--- a/tools/power/acpi/tools/pfrut/pfrut.c
+++ b/tools/power/acpi/tools/pfrut/pfrut.c
@@ -222,6 +222,7 @@ int main(int argc, char *argv[])
fd_update_log = open("/dev/acpi_pfr_telemetry0", O_RDWR);
if (fd_update_log < 0) {
printf("PFRT device not supported - Quit...\n");
+ close(fd_update);
return 1;
}
@@ -265,7 +266,8 @@ int main(int argc, char *argv[])
printf("chunk2_size:%d\n", data_info.chunk2_size);
printf("rollover_cnt:%d\n", data_info.rollover_cnt);
printf("reset_cnt:%d\n", data_info.reset_cnt);
-
+ close(fd_update);
+ close(fd_update_log);
return 0;
}
@@ -358,6 +360,7 @@ int main(int argc, char *argv[])
if (ret == -1) {
perror("Failed to load capsule file");
+ munmap(addr_map_capsule, st.st_size);
close(fd_capsule);
close(fd_update);
close(fd_update_log);
@@ -420,7 +423,7 @@ int main(int argc, char *argv[])
if (p_mmap == MAP_FAILED) {
perror("mmap error.");
close(fd_update_log);
-
+ free(log_buf);
return 1;
}
diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore
index 5113d5a7aee0..7677329c42a6 100644
--- a/tools/power/cpupower/.gitignore
+++ b/tools/power/cpupower/.gitignore
@@ -27,6 +27,3 @@ debug/i386/intel_gsic
debug/i386/powernow-k8-decode
debug/x86_64/centrino-decode
debug/x86_64/powernow-k8-decode
-
-# Clang's compilation database file
-compile_commands.json
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index be8dfac14076..a1df9196dc45 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -37,9 +37,7 @@ NLS ?= true
# cpufreq-bench benchmarking tool
CPUFREQ_BENCH ?= true
-# Do not build libraries, but build the code in statically
-# Libraries are still built, otherwise the Makefile code would
-# be rather ugly.
+# Build the code, including libraries, statically.
export STATIC ?= false
# Prefix to the directories we're installing to
@@ -73,6 +71,7 @@ sbindir ?= /usr/sbin
mandir ?= /usr/man
libdir ?= /usr/lib
libexecdir ?= /usr/libexec
+unitdir ?= /usr/lib/systemd/system
includedir ?= /usr/include
localedir ?= /usr/share/locale
docdir ?= /usr/share/doc/packages/cpupower
@@ -206,14 +205,25 @@ $(OUTPUT)lib/%.o: $(LIB_SRC) $(LIB_HEADERS)
$(ECHO) " CC " $@
$(QUIET) $(CC) $(CFLAGS) -fPIC -o $@ -c lib/$*.c
-$(OUTPUT)libcpupower.so.$(LIB_VER): $(LIB_OBJS)
+ifeq ($(strip $(STATIC)),true)
+LIBCPUPOWER := libcpupower.a
+else
+LIBCPUPOWER := libcpupower.so.$(LIB_VER)
+endif
+
+$(OUTPUT)$(LIBCPUPOWER): $(LIB_OBJS)
+ifeq ($(strip $(STATIC)),true)
+ $(ECHO) " AR " $@
+ $(QUIET) $(AR) rcs $@ $(LIB_OBJS)
+else
$(ECHO) " LD " $@
$(QUIET) $(CC) -shared $(CFLAGS) $(LDFLAGS) -o $@ \
-Wl,-soname,libcpupower.so.$(LIB_MAJ) $(LIB_OBJS)
@ln -sf $(@F) $(OUTPUT)libcpupower.so
@ln -sf $(@F) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
+endif
-libcpupower: $(OUTPUT)libcpupower.so.$(LIB_VER)
+libcpupower: $(OUTPUT)$(LIBCPUPOWER)
# Let all .o files depend on its .c file and all headers
# Might be worth to put this into utils/Makefile at some point of time
@@ -223,7 +233,7 @@ $(OUTPUT)%.o: %.c
$(ECHO) " CC " $@
$(QUIET) $(CC) $(CFLAGS) -I./lib -I ./utils -o $@ -c $*.c
-$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_VER)
+$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)$(LIBCPUPOWER)
$(ECHO) " CC " $@
ifeq ($(strip $(STATIC)),true)
$(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lrt -lpci -L$(OUTPUT) -o $@
@@ -268,7 +278,7 @@ update-po: $(OUTPUT)po/$(PACKAGE).pot
done;
endif
-compile-bench: $(OUTPUT)libcpupower.so.$(LIB_VER)
+compile-bench: $(OUTPUT)$(LIBCPUPOWER)
@V=$(V) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT)
# we compile into subdirectories. if the target directory is not the
@@ -286,6 +296,7 @@ clean:
-find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
| xargs rm -f
-rm -f $(OUTPUT)cpupower
+ -rm -f $(OUTPUT)libcpupower.a
-rm -f $(OUTPUT)libcpupower.so*
-rm -rf $(OUTPUT)po/*.gmo
-rm -rf $(OUTPUT)po/*.pot
@@ -294,7 +305,11 @@ clean:
install-lib: libcpupower
$(INSTALL) -d $(DESTDIR)${libdir}
+ifeq ($(strip $(STATIC)),true)
+ $(CP) $(OUTPUT)libcpupower.a $(DESTDIR)${libdir}/
+else
$(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/
+endif
$(INSTALL) -d $(DESTDIR)${includedir}
$(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
$(INSTALL_DATA) lib/cpuidle.h $(DESTDIR)${includedir}/cpuidle.h
@@ -309,9 +324,9 @@ install-tools: $(OUTPUT)cpupower
$(INSTALL_DATA) cpupower-service.conf '$(DESTDIR)${confdir}'
$(INSTALL) -d $(DESTDIR)${libexecdir}
$(INSTALL_PROGRAM) cpupower.sh '$(DESTDIR)${libexecdir}/cpupower'
- $(INSTALL) -d $(DESTDIR)${libdir}/systemd/system
- sed 's|___CDIR___|${confdir}|; s|___LDIR___|${libexecdir}|' cpupower.service.in > '$(DESTDIR)${libdir}/systemd/system/cpupower.service'
- $(SETPERM_DATA) '$(DESTDIR)${libdir}/systemd/system/cpupower.service'
+ $(INSTALL) -d $(DESTDIR)${unitdir}
+ sed 's|___CDIR___|${confdir}|; s|___LDIR___|${libexecdir}|' cpupower.service.in > '$(DESTDIR)${unitdir}/cpupower.service'
+ $(SETPERM_DATA) '$(DESTDIR)${unitdir}/cpupower.service'
install-man:
$(INSTALL_DATA) -D man/cpupower.1 $(DESTDIR)${mandir}/man1/cpupower.1
@@ -335,11 +350,7 @@ install-bench: compile-bench
@#DESTDIR must be set from outside to survive
@sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install
-ifeq ($(strip $(STATIC)),true)
-install: all install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
-else
install: all install-lib install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
-endif
uninstall:
- rm -f $(DESTDIR)${libdir}/libcpupower.*
@@ -348,7 +359,7 @@ uninstall:
- rm -f $(DESTDIR)${bindir}/utils/cpupower
- rm -f $(DESTDIR)${confdir}cpupower-service.conf
- rm -f $(DESTDIR)${libexecdir}/cpupower
- - rm -f $(DESTDIR)${libdir}/systemd/system/cpupower.service
+ - rm -f $(DESTDIR)${unitdir}/cpupower.service
- rm -f $(DESTDIR)${mandir}/man1/cpupower.1
- rm -f $(DESTDIR)${mandir}/man1/cpupower-frequency-set.1
- rm -f $(DESTDIR)${mandir}/man1/cpupower-frequency-info.1
diff --git a/tools/power/cpupower/bindings/python/Makefile b/tools/power/cpupower/bindings/python/Makefile
index 81db39a03efb..4527cd732b42 100644
--- a/tools/power/cpupower/bindings/python/Makefile
+++ b/tools/power/cpupower/bindings/python/Makefile
@@ -4,20 +4,22 @@
# This Makefile expects you have already run `make install-lib` in the lib
# directory for the bindings to be created.
-CC := gcc
+CC ?= gcc
+# CFLAGS ?=
+LDFLAGS ?= -lcpupower
HAVE_SWIG := $(shell if which swig >/dev/null 2>&1; then echo 1; else echo 0; fi)
HAVE_PYCONFIG := $(shell if which python-config >/dev/null 2>&1; then echo 1; else echo 0; fi)
-PY_INCLUDE = $(firstword $(shell python-config --includes))
-INSTALL_DIR = $(shell python3 -c "import site; print(site.getsitepackages()[0])")
+PY_INCLUDE ?= $(firstword $(shell python-config --includes))
+INSTALL_DIR ?= $(shell python3 -c "import site; print(site.getsitepackages()[0])")
all: _raw_pylibcpupower.so
_raw_pylibcpupower.so: raw_pylibcpupower_wrap.o
- $(CC) -shared -lcpupower raw_pylibcpupower_wrap.o -o _raw_pylibcpupower.so
+ $(CC) -shared $(LDFLAGS) raw_pylibcpupower_wrap.o -o _raw_pylibcpupower.so
raw_pylibcpupower_wrap.o: raw_pylibcpupower_wrap.c
- $(CC) -fPIC -c raw_pylibcpupower_wrap.c $(PY_INCLUDE)
+ $(CC) $(CFLAGS) $(PY_INCLUDE) -fPIC -c raw_pylibcpupower_wrap.c
raw_pylibcpupower_wrap.c: raw_pylibcpupower.swg
ifeq ($(HAVE_SWIG),0)
diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
index 0ecac009273c..f2c1139adf71 100644
--- a/tools/power/cpupower/lib/cpuidle.c
+++ b/tools/power/cpupower/lib/cpuidle.c
@@ -233,6 +233,7 @@ int cpuidle_state_disable(unsigned int cpu,
{
char value[SYSFS_PATH_MAX];
int bytes_written;
+ int len;
if (cpuidle_state_count(cpu) <= idlestate)
return -1;
@@ -241,10 +242,10 @@ int cpuidle_state_disable(unsigned int cpu,
idlestate_value_files[IDLESTATE_DISABLE]))
return -2;
- snprintf(value, SYSFS_PATH_MAX, "%u", disable);
+ len = snprintf(value, SYSFS_PATH_MAX, "%u", disable);
bytes_written = cpuidle_state_write_file(cpu, idlestate, "disable",
- value, sizeof(disable));
+ value, len);
if (bytes_written)
return 0;
return -3;
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
index ce8dfb8e46ab..d7f7ec6f151c 100644
--- a/tools/power/cpupower/lib/cpupower.c
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -56,7 +56,7 @@ unsigned int cpupower_write_sysfs(const char *path, char *buf, size_t buflen)
if (numwritten < 1) {
perror(path);
close(fd);
- return -1;
+ return 0;
}
close(fd);
diff --git a/tools/power/cpupower/man/cpupower-set.1 b/tools/power/cpupower/man/cpupower-set.1
index 500653ef98c7..8ac82b6f9189 100644
--- a/tools/power/cpupower/man/cpupower-set.1
+++ b/tools/power/cpupower/man/cpupower-set.1
@@ -81,10 +81,11 @@ Refer to the AMD P-State kernel documentation for further information.
.RE
.PP
-\-\-turbo\-boost, \-t
+\-\-turbo\-boost, \-\-boost, \-t
.RS 4
-This option is used to enable or disable the turbo boost feature on
-supported Intel and AMD processors.
+This option is used to enable or disable the boost feature on
+supported Intel and AMD processors, and other boost supported systems.
+(The --boost option is an alias for the --turbo-boost option)
This option takes as parameter either \fB1\fP to enable, or \fB0\fP to disable the feature.
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index fc750e127404..7d3732f5f2f6 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -128,7 +128,7 @@ static int get_boost_mode_x86(unsigned int cpu)
/* ToDo: Make this more global */
unsigned long pstates[MAX_HW_PSTATES] = {0,};
- ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states);
+ ret = cpufreq_has_x86_boost_support(cpu, &support, &active, &b_states);
if (ret) {
printf(_("Error while evaluating Boost Capabilities"
" on CPU %d -- are you root?\n"), cpu);
@@ -204,6 +204,18 @@ static int get_boost_mode_x86(unsigned int cpu)
return 0;
}
+static int get_boost_mode_generic(unsigned int cpu)
+{
+ bool active;
+
+ if (!cpufreq_has_generic_boost_support(&active)) {
+ printf(_(" boost state support:\n"));
+ printf(_(" Active: %s\n"), active ? _("yes") : _("no"));
+ }
+
+ return 0;
+}
+
/* --boost / -b */
static int get_boost_mode(unsigned int cpu)
@@ -214,6 +226,8 @@ static int get_boost_mode(unsigned int cpu)
cpupower_cpu_info.vendor == X86_VENDOR_HYGON ||
cpupower_cpu_info.vendor == X86_VENDOR_INTEL)
return get_boost_mode_x86(cpu);
+ else
+ get_boost_mode_generic(cpu);
freqs = cpufreq_get_boost_frequencies(cpu);
if (freqs) {
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index 0677b58374ab..c2117e5650dd 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -21,6 +21,7 @@ static struct option set_opts[] = {
{"epp", required_argument, NULL, 'e'},
{"amd-pstate-mode", required_argument, NULL, 'm'},
{"turbo-boost", required_argument, NULL, 't'},
+ {"boost", required_argument, NULL, 't'},
{ },
};
@@ -62,8 +63,8 @@ int cmd_set(int argc, char **argv)
params.params = 0;
/* parameter parsing */
- while ((ret = getopt_long(argc, argv, "b:e:m:",
- set_opts, NULL)) != -1) {
+ while ((ret = getopt_long(argc, argv, "b:e:m:t:",
+ set_opts, NULL)) != -1) {
switch (ret) {
case 'b':
if (params.perf_bias)
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 95749b8ee475..82ea62bdf5a2 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -103,6 +103,9 @@ extern struct cpupower_cpu_info cpupower_cpu_info;
/* cpuid and cpuinfo helpers **************************/
+int cpufreq_has_generic_boost_support(bool *active);
+int cpupower_set_turbo_boost(int turbo_boost);
+
/* X86 ONLY ****************************************/
#if defined(__i386__) || defined(__x86_64__)
@@ -118,7 +121,6 @@ extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu);
extern int cpupower_set_epp(unsigned int cpu, char *epp);
extern int cpupower_set_amd_pstate_mode(char *mode);
-extern int cpupower_set_turbo_boost(int turbo_boost);
/* Read/Write msr ****************************/
@@ -139,8 +141,8 @@ extern int decode_pstates(unsigned int cpu, int boost_states,
/* AMD HW pstate decoding **************************/
-extern int cpufreq_has_boost_support(unsigned int cpu, int *support,
- int *active, int * states);
+int cpufreq_has_x86_boost_support(unsigned int cpu, int *support,
+ int *active, int *states);
/* AMD P-State stuff **************************/
bool cpupower_amd_pstate_enabled(void);
@@ -181,13 +183,11 @@ static inline int cpupower_set_epp(unsigned int cpu, char *epp)
{ return -1; };
static inline int cpupower_set_amd_pstate_mode(char *mode)
{ return -1; };
-static inline int cpupower_set_turbo_boost(int turbo_boost)
-{ return -1; };
/* Read/Write msr ****************************/
-static inline int cpufreq_has_boost_support(unsigned int cpu, int *support,
- int *active, int * states)
+static inline int cpufreq_has_x86_boost_support(unsigned int cpu, int *support,
+ int *active, int *states)
{ return -1; }
static inline bool cpupower_amd_pstate_enabled(void)
diff --git a/tools/power/cpupower/utils/helpers/misc.c b/tools/power/cpupower/utils/helpers/misc.c
index 76e461ff4f74..166dc1e470ea 100644
--- a/tools/power/cpupower/utils/helpers/misc.c
+++ b/tools/power/cpupower/utils/helpers/misc.c
@@ -8,15 +8,14 @@
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
#include "cpufreq.h"
+#include "cpupower_intern.h"
#if defined(__i386__) || defined(__x86_64__)
-#include "cpupower_intern.h"
-
#define MSR_AMD_HWCR 0xc0010015
-int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
- int *states)
+int cpufreq_has_x86_boost_support(unsigned int cpu, int *support, int *active,
+ int *states)
{
int ret;
unsigned long long val;
@@ -124,24 +123,6 @@ int cpupower_set_amd_pstate_mode(char *mode)
return 0;
}
-int cpupower_set_turbo_boost(int turbo_boost)
-{
- char path[SYSFS_PATH_MAX];
- char linebuf[2] = {};
-
- snprintf(path, sizeof(path), PATH_TO_CPU "cpufreq/boost");
-
- if (!is_valid_path(path))
- return -1;
-
- snprintf(linebuf, sizeof(linebuf), "%d", turbo_boost);
-
- if (cpupower_write_sysfs(path, linebuf, 2) <= 0)
- return -1;
-
- return 0;
-}
-
bool cpupower_amd_pstate_enabled(void)
{
char *driver = cpufreq_get_driver(0);
@@ -160,6 +141,39 @@ bool cpupower_amd_pstate_enabled(void)
#endif /* #if defined(__i386__) || defined(__x86_64__) */
+int cpufreq_has_generic_boost_support(bool *active)
+{
+ char path[SYSFS_PATH_MAX];
+ char linebuf[2] = {};
+ unsigned long val;
+ char *endp;
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpufreq/boost");
+
+ if (!is_valid_path(path))
+ return -EACCES;
+
+ if (cpupower_read_sysfs(path, linebuf, 2) <= 0)
+ return -EINVAL;
+
+ val = strtoul(linebuf, &endp, 0);
+ if (endp == linebuf || errno == ERANGE)
+ return -EINVAL;
+
+ switch (val) {
+ case 0:
+ *active = false;
+ break;
+ case 1:
+ *active = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* get_cpustate
*
* Gather the information of all online CPUs into bitmask struct
@@ -259,3 +273,21 @@ void print_speed(unsigned long speed, int no_rounding)
}
}
}
+
+int cpupower_set_turbo_boost(int turbo_boost)
+{
+ char path[SYSFS_PATH_MAX];
+ char linebuf[2] = {};
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpufreq/boost");
+
+ if (!is_valid_path(path))
+ return -1;
+
+ snprintf(linebuf, sizeof(linebuf), "%d", turbo_boost);
+
+ if (cpupower_write_sysfs(path, linebuf, 2) <= 0)
+ return -1;
+
+ return 0;
+}
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index ad493157f826..e8b3841d5c0f 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -121,10 +121,8 @@ void print_header(int topology_depth)
switch (topology_depth) {
case TOPOLOGY_DEPTH_PKG:
printf(" PKG|");
- break;
case TOPOLOGY_DEPTH_CORE:
printf("CORE|");
- break;
case TOPOLOGY_DEPTH_CPU:
printf(" CPU|");
break;
@@ -167,10 +165,8 @@ void print_results(int topology_depth, int cpu)
switch (topology_depth) {
case TOPOLOGY_DEPTH_PKG:
printf("%4d|", cpu_top.core_info[cpu].pkg);
- break;
case TOPOLOGY_DEPTH_CORE:
printf("%4d|", cpu_top.core_info[cpu].core);
- break;
case TOPOLOGY_DEPTH_CPU:
printf("%4d|", cpu_top.core_info[cpu].cpu);
break;
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index 73b6b10cbdd2..5ae02c3d5b64 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -240,9 +240,9 @@ static int mperf_stop(void)
int cpu;
for (cpu = 0; cpu < cpu_count; cpu++) {
- mperf_measure_stats(cpu);
- mperf_get_tsc(&tsc_at_measure_end[cpu]);
clock_gettime(CLOCK_REALTIME, &time_end[cpu]);
+ mperf_get_tsc(&tsc_at_measure_end[cpu]);
+ mperf_measure_stats(cpu);
}
return 0;
diff --git a/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py b/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
index feb9f9421c7b..875b086550d1 100755
--- a/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
+++ b/tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
@@ -11,7 +11,7 @@ Prerequisites:
gnuplot 5.0 or higher
gnuplot-py 1.8 or higher
(Most of the distributions have these required packages. They may be called
- gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
+ gnuplot-py, python-gnuplot or python3-gnuplot, gnuplot-nox, ... )
Kernel config for Linux trace is enabled
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 0ce251b8d466..558138eea75e 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -16,7 +16,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.23";
+static const char *version_str = "v1.24";
static const int supported_api_ver = 3;
static struct isst_if_platform_info isst_platform_info;
diff --git a/tools/power/x86/intel-speed-select/isst-core-tpmi.c b/tools/power/x86/intel-speed-select/isst-core-tpmi.c
index 4f389e1c0525..ebaad0dc8ca6 100644
--- a/tools/power/x86/intel-speed-select/isst-core-tpmi.c
+++ b/tools/power/x86/intel-speed-select/isst-core-tpmi.c
@@ -452,13 +452,16 @@ static int tpmi_get_pbf_info(struct isst_id *id, int level,
return _pbf_get_coremask_info(id, level, pbf_info);
}
+#define FEATURE_ENABLE_WAIT_US 1000
+#define FEATURE_ENABLE_RETRIES 5
+
static int tpmi_set_pbf_fact_status(struct isst_id *id, int pbf, int enable)
{
struct isst_pkg_ctdp pkg_dev;
struct isst_pkg_ctdp_level_info ctdp_level;
int current_level;
struct isst_perf_feature_control info;
- int ret;
+ int ret, i;
ret = isst_get_ctdp_levels(id, &pkg_dev);
if (ret)
@@ -503,6 +506,30 @@ static int tpmi_set_pbf_fact_status(struct isst_id *id, int pbf, int enable)
if (ret == -1)
return ret;
+ for (i = 0; i < FEATURE_ENABLE_RETRIES; ++i) {
+
+ usleep(FEATURE_ENABLE_WAIT_US);
+
+ /* Check status */
+ ret = isst_get_ctdp_control(id, current_level, &ctdp_level);
+ if (ret)
+ return ret;
+
+ debug_printf("pbf_enabled:%d fact_enabled:%d\n",
+ ctdp_level.pbf_enabled, ctdp_level.fact_enabled);
+
+ if (pbf) {
+ if (ctdp_level.pbf_enabled == enable)
+ break;
+ } else {
+ if (ctdp_level.fact_enabled == enable)
+ break;
+ }
+ }
+
+ if (i == FEATURE_ENABLE_RETRIES)
+ return -1;
+
return 0;
}
@@ -513,6 +540,7 @@ static int tpmi_get_fact_info(struct isst_id *id, int level, int fact_bucket,
int i, j;
int ret;
+ memset(&info, 0, sizeof(info));
info.socket_id = id->pkg;
info.power_domain_id = id->punit;
info.level = level;
@@ -659,7 +687,8 @@ static int tpmi_pm_qos_config(struct isst_id *id, int enable_clos,
int priority_type)
{
struct isst_core_power info;
- int i, ret, saved_punit;
+ int cp_state = 0, cp_cap = 0;
+ int i, j, ret, saved_punit;
info.get_set = 1;
info.socket_id = id->pkg;
@@ -679,6 +708,19 @@ static int tpmi_pm_qos_config(struct isst_id *id, int enable_clos,
id->punit = saved_punit;
return ret;
}
+ /* Get status */
+ for (j = 0; j < FEATURE_ENABLE_RETRIES; ++j) {
+ usleep(FEATURE_ENABLE_WAIT_US);
+ ret = tpmi_read_pm_config(id, &cp_state, &cp_cap);
+ debug_printf("ret:%d cp_state:%d enable_clos:%d\n", ret,
+ cp_state, enable_clos);
+ if (ret || cp_state == enable_clos)
+ break;
+ }
+ if (j == FEATURE_ENABLE_RETRIES) {
+ id->punit = saved_punit;
+ return -1;
+ }
}
}
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index b74ed916057e..1551fcdbfd8a 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -47,10 +47,11 @@ name as necessary to disambiguate it from others is necessary. Note that option
MSRs are read as 64-bits, u32 truncates the displayed value to 32-bits.
default: u64
- format: {\fBraw\fP | \fBdelta\fP | \fBpercent\fP}
+ format: {\fBraw\fP | \fBdelta\fP | \fBpercent\fP | \fBaverage\fP}
'raw' shows the MSR contents in hex.
'delta' shows the difference in values during the measurement interval.
'percent' shows the delta as a percentage of the cycles elapsed.
+ 'average' similar to raw, but also averaged for node/package summaries (or when using -S).
default: delta
name: "name_string"
@@ -100,7 +101,7 @@ The column name "all" can be used to enable all disabled-by-default built-in cou
.PP
\fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names.
.PP
-\fB--show CATEGORY --hide CATEGORY\fP Show and hide also accept a single CATEGORY of columns: "all", "topology", "idle", "frequency", "power", "cpuidle", "hwidle", "swidle", "other". "idle" (enabled by default), includes "hwidle" and "idle_pct". "cpuidle" (default disabled) includes cpuidle software invocation counters. "swidle" includes "cpuidle" plus "idle_pct". "hwidle" includes only hardware based idle residency counters. Older versions of turbostat used the term "sysfs" for what is now "swidle".
+\fB--show CATEGORY --hide CATEGORY\fP Show and hide also accept a comma-separated-list of CATEGORIES of columns: "all", "topology", "idle", "frequency", "power", "cpuidle", "hwidle", "swidle", "cache", "llc", "other". "idle" (enabled by default), includes "hwidle" and "pct_idle". "cpuidle" (default disabled) includes cpuidle software invocation counters. "swidle" includes "cpuidle" plus "pct_idle". "hwidle" includes only hardware based idle residency counters. Older versions of turbostat used the term "sysfs" for what is now "swidle".
.PP
\fB--Dump\fP displays the raw counter values.
.PP
@@ -158,6 +159,10 @@ The system configuration dump (if --quiet is not used) is followed by statistics
.PP
\fBSMI\fP The number of System Management Interrupts serviced CPU during the measurement interval. While this counter is actually per-CPU, SMI are triggered on all processors, so the number should be the same for all CPUs.
.PP
+\fBLLCkRPS\fP Last Level Cache Thousands of References Per Second. For CPUs with an L3 LLC, this is the number of references that CPU made to the L3 (and the number of misses that CPU made to it's L2). For CPUs with an L2 LLC, this is the number of references to the L2 (and the number of misses to the CPU's L1). The system summary row shows the sum for all CPUs. In both cases, the value displayed is the actual value divided by 1000 in the interest of usually fitting into 8 columns.
+.PP
+\fBLLC%hit\fP Last Level Cache Hit Rate %. Hit Rate Percent = 100.0 * (References - Misses)/References. The system summary row shows the weighted average for all CPUs (100.0 * (Sum_References - Sum_Misses)/Sum_References).
+.PP
\fBC1, C2, C3...\fP The number times Linux requested the C1, C2, C3 idle state during the measurement interval. The system summary line shows the sum for all CPUs. These are C-state names as exported in /sys/devices/system/cpu/cpu*/cpuidle/state*/name. While their names are generic, their attributes are processor specific. They the system description section of output shows what MWAIT sub-states they are mapped to on each system. These counters are in the "cpuidle" group, which is disabled, by default.
.PP
\fBC1+, C2+, C3+...\fP The idle governor idle state misprediction statistics. Inidcates the number times Linux requested the C1, C2, C3 idle state during the measurement interval, but should have requested a deeper idle state (if it exists and enabled). These statistics come from the /sys/devices/system/cpu/cpu*/cpuidle/state*/below file. These counters are in the "cpuidle" group, which is disabled, by default.
@@ -186,6 +191,14 @@ The system configuration dump (if --quiet is not used) is followed by statistics
.PP
\fBSAMAMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/drm/card0/gt/gt1/rps_act_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/act_freq depending on the graphics driver being used.
.PP
+\fBTotl%C0\fP Weighted percentage of time that CPUs are busy. If N CPUs are busy during an interval, the percentage is N * 100%.
+.PP
+\fBAny%C0\fP Percentage of time that at least one CPU is busy.
+.PP
+\fBGFX%C0\fP Percentage of time that at least one GFX compute engine is busy.
+.PP
+\fBCPUGFX%\fP Percentage of time that at least one CPU is busy at the same time as at least one Graphics compute enginer is busy.
+.PP
\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
.PP
\fBPkgWatt\fP Watts consumed by the whole package.
@@ -204,8 +217,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
.PP
\fBUncMHz\fP per-package uncore MHz, instantaneous sample.
.PP
-\fBUMHz1.0\fP per-package uncore MHz for domain=1 and fabric_cluster=0, instantaneous sample. System summary is the average of all packages.
-Intel Granite Rapids systems use domains 0-2 for CPUs, and 3-4 for IO, with cluster always 0.
+\fBUMHz1.0\fP per-package uncore MHz for pm_domain=1 and fabric_cluster=0, instantaneous sample. System summary is the average of all packages.
+Intel Granite Rapids systems use pm_domains 0-2 for CPUs, and 3-4 for IO, with cluster always 0.
For the "--show" and "--hide" options, use "UncMHz" to operate on all UMHz*.* as a group.
.SH TOO MUCH INFORMATION EXAMPLE
By default, turbostat dumps all possible information -- a system configuration header, followed by columns for all counters.
@@ -401,25 +414,24 @@ CPU pCPU%c1 CPU%c1
.fi
.SH ADD PERF COUNTER EXAMPLE #2 (using virtual cpu device)
-Here we run on hybrid, Raptor Lake platform.
-We limit turbostat to show output for just cpu0 (pcore) and cpu12 (ecore).
+Here we run on hybrid, Meteor Lake platform.
+We limit turbostat to show output for just cpu0 (pcore) and cpu4 (ecore).
We add a counter showing number of L3 cache misses, using virtual "cpu" device,
labeling it with the column header, "VCMISS".
We add a counter showing number of L3 cache misses, using virtual "cpu_core" device,
-labeling it with the column header, "PCMISS". This will fail on ecore cpu12.
+labeling it with the column header, "PCMISS". This will fail on ecore cpu4.
We add a counter showing number of L3 cache misses, using virtual "cpu_atom" device,
labeling it with the column header, "ECMISS". This will fail on pcore cpu0.
We display it only once, after the conclusion of 0.1 second sleep.
.nf
-sudo ./turbostat --quiet --cpu 0,12 --show CPU --add perf/cpu/cache-misses,cpu,delta,raw,VCMISS --add perf/cpu_core/cache-misses,cpu,delta,raw,PCMISS --add perf/cpu_atom/cache-misses,cpu,delta,raw,ECMISS sleep .1
+sudo ./turbostat --quiet --cpu 0,4 --show CPU --add perf/cpu/cache-misses,cpu,delta,VCMISS --add perf/cpu_core/cache-misses,cpu,delta,PCMISS --add perf/cpu_atom/cache-misses,cpu,delta,ECMISS sleep 5
turbostat: added_perf_counters_init_: perf/cpu_atom/cache-misses: failed to open counter on cpu0
-turbostat: added_perf_counters_init_: perf/cpu_core/cache-misses: failed to open counter on cpu12
-0.104630 sec
-CPU ECMISS PCMISS VCMISS
-- 0x0000000000000000 0x0000000000000000 0x0000000000000000
-0 0x0000000000000000 0x0000000000007951 0x0000000000007796
-12 0x000000000001137a 0x0000000000000000 0x0000000000011392
-
+turbostat: added_perf_counters_init_: perf/cpu_core/cache-misses: failed to open counter on cpu4
+5.001207 sec
+CPU ECMISS PCMISS VCMISS
+- 41586506 46291219 87877749
+4 83173012 0 83173040
+0 0 92582439 92582458
.fi
.SH ADD PMT COUNTER EXAMPLE
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 0170d3cc6819..5ad45c2ac5bd 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -67,6 +67,7 @@
#include <stdbool.h>
#include <assert.h>
#include <linux/kernel.h>
+#include <limits.h>
#define UNUSED(x) (void)(x)
@@ -141,6 +142,7 @@ struct msr_counter {
#define FLAGS_SHOW (1 << 1)
#define SYSFS_PERCPU (1 << 1)
};
+static int use_android_msr_path;
struct msr_counter bic[] = {
{ 0x0, "usec", NULL, 0, 0, 0, NULL, 0 },
@@ -194,6 +196,7 @@ struct msr_counter bic[] = {
{ 0x0, "APIC", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "X2APIC", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "Die", NULL, 0, 0, 0, NULL, 0 },
+ { 0x0, "L3", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "GFXAMHz", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "IPC", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "CoreThr", NULL, 0, 0, 0, NULL, 0 },
@@ -207,93 +210,250 @@ struct msr_counter bic[] = {
{ 0x0, "NMI", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "CPU%c1e", NULL, 0, 0, 0, NULL, 0 },
{ 0x0, "pct_idle", NULL, 0, 0, 0, NULL, 0 },
+ { 0x0, "LLCkRPS", NULL, 0, 0, 0, NULL, 0 },
+ { 0x0, "LLC%hit", NULL, 0, 0, 0, NULL, 0 },
};
-#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
-#define BIC_USEC (1ULL << 0)
-#define BIC_TOD (1ULL << 1)
-#define BIC_Package (1ULL << 2)
-#define BIC_Node (1ULL << 3)
-#define BIC_Avg_MHz (1ULL << 4)
-#define BIC_Busy (1ULL << 5)
-#define BIC_Bzy_MHz (1ULL << 6)
-#define BIC_TSC_MHz (1ULL << 7)
-#define BIC_IRQ (1ULL << 8)
-#define BIC_SMI (1ULL << 9)
-#define BIC_cpuidle (1ULL << 10)
-#define BIC_CPU_c1 (1ULL << 11)
-#define BIC_CPU_c3 (1ULL << 12)
-#define BIC_CPU_c6 (1ULL << 13)
-#define BIC_CPU_c7 (1ULL << 14)
-#define BIC_ThreadC (1ULL << 15)
-#define BIC_CoreTmp (1ULL << 16)
-#define BIC_CoreCnt (1ULL << 17)
-#define BIC_PkgTmp (1ULL << 18)
-#define BIC_GFX_rc6 (1ULL << 19)
-#define BIC_GFXMHz (1ULL << 20)
-#define BIC_Pkgpc2 (1ULL << 21)
-#define BIC_Pkgpc3 (1ULL << 22)
-#define BIC_Pkgpc6 (1ULL << 23)
-#define BIC_Pkgpc7 (1ULL << 24)
-#define BIC_Pkgpc8 (1ULL << 25)
-#define BIC_Pkgpc9 (1ULL << 26)
-#define BIC_Pkgpc10 (1ULL << 27)
-#define BIC_CPU_LPI (1ULL << 28)
-#define BIC_SYS_LPI (1ULL << 29)
-#define BIC_PkgWatt (1ULL << 30)
-#define BIC_CorWatt (1ULL << 31)
-#define BIC_GFXWatt (1ULL << 32)
-#define BIC_PkgCnt (1ULL << 33)
-#define BIC_RAMWatt (1ULL << 34)
-#define BIC_PKG__ (1ULL << 35)
-#define BIC_RAM__ (1ULL << 36)
-#define BIC_Pkg_J (1ULL << 37)
-#define BIC_Cor_J (1ULL << 38)
-#define BIC_GFX_J (1ULL << 39)
-#define BIC_RAM_J (1ULL << 40)
-#define BIC_Mod_c6 (1ULL << 41)
-#define BIC_Totl_c0 (1ULL << 42)
-#define BIC_Any_c0 (1ULL << 43)
-#define BIC_GFX_c0 (1ULL << 44)
-#define BIC_CPUGFX (1ULL << 45)
-#define BIC_Core (1ULL << 46)
-#define BIC_CPU (1ULL << 47)
-#define BIC_APIC (1ULL << 48)
-#define BIC_X2APIC (1ULL << 49)
-#define BIC_Die (1ULL << 50)
-#define BIC_GFXACTMHz (1ULL << 51)
-#define BIC_IPC (1ULL << 52)
-#define BIC_CORE_THROT_CNT (1ULL << 53)
-#define BIC_UNCORE_MHZ (1ULL << 54)
-#define BIC_SAM_mc6 (1ULL << 55)
-#define BIC_SAMMHz (1ULL << 56)
-#define BIC_SAMACTMHz (1ULL << 57)
-#define BIC_Diec6 (1ULL << 58)
-#define BIC_SysWatt (1ULL << 59)
-#define BIC_Sys_J (1ULL << 60)
-#define BIC_NMI (1ULL << 61)
-#define BIC_CPU_c1e (1ULL << 62)
-#define BIC_pct_idle (1ULL << 63)
-
-#define BIC_GROUP_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die)
-#define BIC_GROUP_THERMAL_PWR (BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__ | BIC_SysWatt)
-#define BIC_GROUP_FREQUENCY (BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz | BIC_SAMMHz | BIC_SAMACTMHz | BIC_UNCORE_MHZ)
-#define BIC_GROUP_HW_IDLE (BIC_Busy | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX | BIC_SAM_mc6 | BIC_Diec6)
-#define BIC_GROUP_SW_IDLE (BIC_Busy | BIC_cpuidle | BIC_pct_idle )
-#define BIC_GROUP_IDLE (BIC_GROUP_HW_IDLE | BIC_pct_idle)
-#define BIC_OTHER (BIC_IRQ | BIC_NMI | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC)
-
-#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC | BIC_cpuidle)
-
-unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
-unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_cpuidle | BIC_pct_idle | BIC_APIC | BIC_X2APIC;
-
-#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
-#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
-#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
-#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
-#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
-#define BIC_IS_ENABLED(COUNTER_BIT) (bic_enabled & COUNTER_BIT)
+/* n.b. bic_names must match the order in bic[], above */
+enum bic_names {
+ BIC_USEC,
+ BIC_TOD,
+ BIC_Package,
+ BIC_Node,
+ BIC_Avg_MHz,
+ BIC_Busy,
+ BIC_Bzy_MHz,
+ BIC_TSC_MHz,
+ BIC_IRQ,
+ BIC_SMI,
+ BIC_cpuidle,
+ BIC_CPU_c1,
+ BIC_CPU_c3,
+ BIC_CPU_c6,
+ BIC_CPU_c7,
+ BIC_ThreadC,
+ BIC_CoreTmp,
+ BIC_CoreCnt,
+ BIC_PkgTmp,
+ BIC_GFX_rc6,
+ BIC_GFXMHz,
+ BIC_Pkgpc2,
+ BIC_Pkgpc3,
+ BIC_Pkgpc6,
+ BIC_Pkgpc7,
+ BIC_Pkgpc8,
+ BIC_Pkgpc9,
+ BIC_Pkgpc10,
+ BIC_CPU_LPI,
+ BIC_SYS_LPI,
+ BIC_PkgWatt,
+ BIC_CorWatt,
+ BIC_GFXWatt,
+ BIC_PkgCnt,
+ BIC_RAMWatt,
+ BIC_PKG__,
+ BIC_RAM__,
+ BIC_Pkg_J,
+ BIC_Cor_J,
+ BIC_GFX_J,
+ BIC_RAM_J,
+ BIC_Mod_c6,
+ BIC_Totl_c0,
+ BIC_Any_c0,
+ BIC_GFX_c0,
+ BIC_CPUGFX,
+ BIC_Core,
+ BIC_CPU,
+ BIC_APIC,
+ BIC_X2APIC,
+ BIC_Die,
+ BIC_L3,
+ BIC_GFXACTMHz,
+ BIC_IPC,
+ BIC_CORE_THROT_CNT,
+ BIC_UNCORE_MHZ,
+ BIC_SAM_mc6,
+ BIC_SAMMHz,
+ BIC_SAMACTMHz,
+ BIC_Diec6,
+ BIC_SysWatt,
+ BIC_Sys_J,
+ BIC_NMI,
+ BIC_CPU_c1e,
+ BIC_pct_idle,
+ BIC_LLC_RPS,
+ BIC_LLC_HIT,
+ MAX_BIC
+};
+
+void print_bic_set(char *s, cpu_set_t *set)
+{
+ int i;
+
+ assert(MAX_BIC < CPU_SETSIZE);
+
+ printf("%s:", s);
+
+ for (i = 0; i <= MAX_BIC; ++i) {
+
+ if (CPU_ISSET(i, set)) {
+ assert(i < MAX_BIC);
+ printf(" %s", bic[i].name);
+ }
+ }
+ putchar('\n');
+}
+
+static cpu_set_t bic_group_topology;
+static cpu_set_t bic_group_thermal_pwr;
+static cpu_set_t bic_group_frequency;
+static cpu_set_t bic_group_hw_idle;
+static cpu_set_t bic_group_sw_idle;
+static cpu_set_t bic_group_idle;
+static cpu_set_t bic_group_cache;
+static cpu_set_t bic_group_other;
+static cpu_set_t bic_group_disabled_by_default;
+static cpu_set_t bic_enabled;
+static cpu_set_t bic_present;
+
+/* modify */
+#define BIC_INIT(set) CPU_ZERO(set)
+
+#define SET_BIC(COUNTER_NUMBER, set) CPU_SET(COUNTER_NUMBER, set)
+#define CLR_BIC(COUNTER_NUMBER, set) CPU_CLR(COUNTER_NUMBER, set)
+
+#define BIC_PRESENT(COUNTER_NUMBER) SET_BIC(COUNTER_NUMBER, &bic_present)
+#define BIC_NOT_PRESENT(COUNTER_NUMBER) CPU_CLR(COUNTER_NUMBER, &bic_present)
+
+/* test */
+#define BIC_IS_ENABLED(COUNTER_NUMBER) CPU_ISSET(COUNTER_NUMBER, &bic_enabled)
+#define DO_BIC_READ(COUNTER_NUMBER) CPU_ISSET(COUNTER_NUMBER, &bic_present)
+#define DO_BIC(COUNTER_NUMBER) (CPU_ISSET(COUNTER_NUMBER, &bic_enabled) && CPU_ISSET(COUNTER_NUMBER, &bic_present))
+
+static void bic_set_all(cpu_set_t *set)
+{
+ int i;
+
+ assert(MAX_BIC < CPU_SETSIZE);
+
+ for (i = 0; i < MAX_BIC; ++i)
+ SET_BIC(i, set);
+}
+
+/*
+ * bic_clear_bits()
+ * clear all the bits from "clr" in "dst"
+ */
+static void bic_clear_bits(cpu_set_t *dst, cpu_set_t *clr)
+{
+ int i;
+
+ assert(MAX_BIC < CPU_SETSIZE);
+
+ for (i = 0; i < MAX_BIC; ++i)
+ if (CPU_ISSET(i, clr))
+ CLR_BIC(i, dst);
+}
+
+static void bic_groups_init(void)
+{
+ BIC_INIT(&bic_group_topology);
+ SET_BIC(BIC_Package, &bic_group_topology);
+ SET_BIC(BIC_Node, &bic_group_topology);
+ SET_BIC(BIC_CoreCnt, &bic_group_topology);
+ SET_BIC(BIC_PkgCnt, &bic_group_topology);
+ SET_BIC(BIC_Core, &bic_group_topology);
+ SET_BIC(BIC_CPU, &bic_group_topology);
+ SET_BIC(BIC_Die, &bic_group_topology);
+ SET_BIC(BIC_L3, &bic_group_topology);
+
+ BIC_INIT(&bic_group_thermal_pwr);
+ SET_BIC(BIC_CoreTmp, &bic_group_thermal_pwr);
+ SET_BIC(BIC_PkgTmp, &bic_group_thermal_pwr);
+ SET_BIC(BIC_PkgWatt, &bic_group_thermal_pwr);
+ SET_BIC(BIC_CorWatt, &bic_group_thermal_pwr);
+ SET_BIC(BIC_GFXWatt, &bic_group_thermal_pwr);
+ SET_BIC(BIC_RAMWatt, &bic_group_thermal_pwr);
+ SET_BIC(BIC_PKG__, &bic_group_thermal_pwr);
+ SET_BIC(BIC_RAM__, &bic_group_thermal_pwr);
+ SET_BIC(BIC_SysWatt, &bic_group_thermal_pwr);
+
+ BIC_INIT(&bic_group_frequency);
+ SET_BIC(BIC_Avg_MHz, &bic_group_frequency);
+ SET_BIC(BIC_Busy, &bic_group_frequency);
+ SET_BIC(BIC_Bzy_MHz, &bic_group_frequency);
+ SET_BIC(BIC_TSC_MHz, &bic_group_frequency);
+ SET_BIC(BIC_GFXMHz, &bic_group_frequency);
+ SET_BIC(BIC_GFXACTMHz, &bic_group_frequency);
+ SET_BIC(BIC_SAMMHz, &bic_group_frequency);
+ SET_BIC(BIC_SAMACTMHz, &bic_group_frequency);
+ SET_BIC(BIC_UNCORE_MHZ, &bic_group_frequency);
+
+ BIC_INIT(&bic_group_hw_idle);
+ SET_BIC(BIC_Busy, &bic_group_hw_idle);
+ SET_BIC(BIC_CPU_c1, &bic_group_hw_idle);
+ SET_BIC(BIC_CPU_c3, &bic_group_hw_idle);
+ SET_BIC(BIC_CPU_c6, &bic_group_hw_idle);
+ SET_BIC(BIC_CPU_c7, &bic_group_hw_idle);
+ SET_BIC(BIC_GFX_rc6, &bic_group_hw_idle);
+ SET_BIC(BIC_Pkgpc2, &bic_group_hw_idle);
+ SET_BIC(BIC_Pkgpc3, &bic_group_hw_idle);
+ SET_BIC(BIC_Pkgpc6, &bic_group_hw_idle);
+ SET_BIC(BIC_Pkgpc7, &bic_group_hw_idle);
+ SET_BIC(BIC_Pkgpc8, &bic_group_hw_idle);
+ SET_BIC(BIC_Pkgpc9, &bic_group_hw_idle);
+ SET_BIC(BIC_Pkgpc10, &bic_group_hw_idle);
+ SET_BIC(BIC_CPU_LPI, &bic_group_hw_idle);
+ SET_BIC(BIC_SYS_LPI, &bic_group_hw_idle);
+ SET_BIC(BIC_Mod_c6, &bic_group_hw_idle);
+ SET_BIC(BIC_Totl_c0, &bic_group_hw_idle);
+ SET_BIC(BIC_Any_c0, &bic_group_hw_idle);
+ SET_BIC(BIC_GFX_c0, &bic_group_hw_idle);
+ SET_BIC(BIC_CPUGFX, &bic_group_hw_idle);
+ SET_BIC(BIC_SAM_mc6, &bic_group_hw_idle);
+ SET_BIC(BIC_Diec6, &bic_group_hw_idle);
+
+ BIC_INIT(&bic_group_sw_idle);
+ SET_BIC(BIC_Busy, &bic_group_sw_idle);
+ SET_BIC(BIC_cpuidle, &bic_group_sw_idle);
+ SET_BIC(BIC_pct_idle, &bic_group_sw_idle);
+
+ BIC_INIT(&bic_group_idle);
+
+ CPU_OR(&bic_group_idle, &bic_group_idle, &bic_group_hw_idle);
+ SET_BIC(BIC_pct_idle, &bic_group_idle);
+
+ BIC_INIT(&bic_group_cache);
+ SET_BIC(BIC_LLC_RPS, &bic_group_cache);
+ SET_BIC(BIC_LLC_HIT, &bic_group_cache);
+
+ BIC_INIT(&bic_group_other);
+ SET_BIC(BIC_IRQ, &bic_group_other);
+ SET_BIC(BIC_NMI, &bic_group_other);
+ SET_BIC(BIC_SMI, &bic_group_other);
+ SET_BIC(BIC_ThreadC, &bic_group_other);
+ SET_BIC(BIC_CoreTmp, &bic_group_other);
+ SET_BIC(BIC_IPC, &bic_group_other);
+
+ BIC_INIT(&bic_group_disabled_by_default);
+ SET_BIC(BIC_USEC, &bic_group_disabled_by_default);
+ SET_BIC(BIC_TOD, &bic_group_disabled_by_default);
+ SET_BIC(BIC_cpuidle, &bic_group_disabled_by_default);
+ SET_BIC(BIC_APIC, &bic_group_disabled_by_default);
+ SET_BIC(BIC_X2APIC, &bic_group_disabled_by_default);
+
+ BIC_INIT(&bic_enabled);
+ bic_set_all(&bic_enabled);
+ bic_clear_bits(&bic_enabled, &bic_group_disabled_by_default);
+
+ BIC_INIT(&bic_present);
+ SET_BIC(BIC_USEC, &bic_present);
+ SET_BIC(BIC_TOD, &bic_present);
+ SET_BIC(BIC_cpuidle, &bic_present);
+ SET_BIC(BIC_APIC, &bic_present);
+ SET_BIC(BIC_X2APIC, &bic_present);
+ SET_BIC(BIC_pct_idle, &bic_present);
+}
/*
* MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
@@ -317,12 +477,11 @@ unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_cpuidle | BIC_pct_idle
#define PCL_10 14 /* PC10 */
#define PCLUNL 15 /* Unlimited */
-struct amperf_group_fd;
-
char *proc_stat = "/proc/stat";
FILE *outf;
int *fd_percpu;
int *fd_instr_count_percpu;
+int *fd_llc_percpu;
struct timeval interval_tv = { 5, 0 };
struct timespec interval_ts = { 5, 0 };
@@ -333,11 +492,12 @@ unsigned int quiet;
unsigned int shown;
unsigned int sums_need_wide_columns;
unsigned int rapl_joules;
+unsigned int valid_rapl_msrs;
unsigned int summary_only;
unsigned int list_header_only;
unsigned int dump_only;
unsigned int force_load;
-unsigned int has_aperf;
+unsigned int cpuid_has_aperf_mperf;
unsigned int has_aperf_access;
unsigned int has_epb;
unsigned int has_turbo;
@@ -403,8 +563,7 @@ static struct gfx_sysfs_info gfx_info[GFX_MAX];
int get_msr(int cpu, off_t offset, unsigned long long *msr);
int add_counter(unsigned int msr_num, char *path, char *name,
- unsigned int width, enum counter_scope scope,
- enum counter_type type, enum counter_format format, int flags, int package_num);
+ unsigned int width, enum counter_scope scope, enum counter_type type, enum counter_format format, int flags, int package_num);
/* Model specific support Start */
@@ -429,7 +588,7 @@ struct platform_features {
bool has_cst_prewake_bit; /* Cstate prewake bit in MSR_IA32_POWER_CTL */
int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */
int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */
- int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */
+ int plat_rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */
bool has_per_core_rapl; /* Indicates cores energy collection is per-core, not per-package. AMD specific for now */
bool has_rapl_divisor; /* Divisor for Energy unit raw value from MSR_RAPL_POWER_UNIT */
bool has_fixed_rapl_unit; /* Fixed Energy Unit used for DRAM RAPL Domain */
@@ -539,7 +698,7 @@ enum rapl_msrs {
#define RAPL_PKG_ALL (RAPL_PKG | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO)
#define RAPL_DRAM_ALL (RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_DRAM_POWER_INFO)
#define RAPL_CORE_ALL (RAPL_CORE | RAPL_CORE_POLICY)
-#define RAPL_GFX_ALL (RAPL_GFX | RAPL_GFX_POLIGY)
+#define RAPL_GFX_ALL (RAPL_GFX | RAPL_GFX_POLICY)
#define RAPL_AMD_F17H (RAPL_AMD_PWR_UNIT | RAPL_AMD_CORE_ENERGY_STAT | RAPL_AMD_PKG_ENERGY_STAT)
@@ -584,7 +743,7 @@ static const struct platform_features snb_features = {
.cst_limit = CST_LIMIT_SNB,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
};
static const struct platform_features snx_features = {
@@ -596,7 +755,7 @@ static const struct platform_features snx_features = {
.cst_limit = CST_LIMIT_SNB,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL,
};
static const struct platform_features ivb_features = {
@@ -609,7 +768,7 @@ static const struct platform_features ivb_features = {
.cst_limit = CST_LIMIT_SNB,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
};
static const struct platform_features ivx_features = {
@@ -621,7 +780,7 @@ static const struct platform_features ivx_features = {
.cst_limit = CST_LIMIT_SNB,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE | TRL_LIMIT1,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL,
};
static const struct platform_features hsw_features = {
@@ -635,7 +794,7 @@ static const struct platform_features hsw_features = {
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
.plr_msrs = PLR_CORE | PLR_GFX | PLR_RING,
- .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
};
static const struct platform_features hsx_features = {
@@ -649,7 +808,7 @@ static const struct platform_features hsx_features = {
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2,
.plr_msrs = PLR_CORE | PLR_RING,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL,
.has_fixed_rapl_unit = 1,
};
@@ -664,7 +823,7 @@ static const struct platform_features hswl_features = {
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
.plr_msrs = PLR_CORE | PLR_GFX | PLR_RING,
- .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
};
static const struct platform_features hswg_features = {
@@ -678,7 +837,7 @@ static const struct platform_features hswg_features = {
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
.plr_msrs = PLR_CORE | PLR_GFX | PLR_RING,
- .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
};
static const struct platform_features bdw_features = {
@@ -691,7 +850,7 @@ static const struct platform_features bdw_features = {
.cst_limit = CST_LIMIT_HSW,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
};
static const struct platform_features bdwg_features = {
@@ -704,7 +863,7 @@ static const struct platform_features bdwg_features = {
.cst_limit = CST_LIMIT_HSW,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO,
};
static const struct platform_features bdx_features = {
@@ -718,7 +877,7 @@ static const struct platform_features bdx_features = {
.has_irtl_msrs = 1,
.has_cst_auto_convension = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL,
.has_fixed_rapl_unit = 1,
};
@@ -735,7 +894,7 @@ static const struct platform_features skl_features = {
.has_ext_cst_msrs = 1,
.trl_msrs = TRL_BASE,
.tcc_offset_bits = 6,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX | RAPL_PSYS,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX | RAPL_PSYS,
.enable_tsc_tweak = 1,
};
@@ -752,7 +911,7 @@ static const struct platform_features cnl_features = {
.has_ext_cst_msrs = 1,
.trl_msrs = TRL_BASE,
.tcc_offset_bits = 6,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX | RAPL_PSYS,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX | RAPL_PSYS,
.enable_tsc_tweak = 1,
};
@@ -770,7 +929,7 @@ static const struct platform_features adl_features = {
.has_ext_cst_msrs = cnl_features.has_ext_cst_msrs,
.trl_msrs = cnl_features.trl_msrs,
.tcc_offset_bits = cnl_features.tcc_offset_bits,
- .rapl_msrs = cnl_features.rapl_msrs,
+ .plat_rapl_msrs = cnl_features.plat_rapl_msrs,
.enable_tsc_tweak = cnl_features.enable_tsc_tweak,
};
@@ -788,7 +947,7 @@ static const struct platform_features lnl_features = {
.has_ext_cst_msrs = adl_features.has_ext_cst_msrs,
.trl_msrs = adl_features.trl_msrs,
.tcc_offset_bits = adl_features.tcc_offset_bits,
- .rapl_msrs = adl_features.rapl_msrs,
+ .plat_rapl_msrs = adl_features.plat_rapl_msrs,
.enable_tsc_tweak = adl_features.enable_tsc_tweak,
};
@@ -803,7 +962,7 @@ static const struct platform_features skx_features = {
.has_irtl_msrs = 1,
.has_cst_auto_convension = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL,
.has_fixed_rapl_unit = 1,
};
@@ -819,7 +978,7 @@ static const struct platform_features icx_features = {
.has_irtl_msrs = 1,
.has_cst_prewake_bit = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
.has_fixed_rapl_unit = 1,
};
@@ -836,7 +995,25 @@ static const struct platform_features spr_features = {
.has_cst_prewake_bit = 1,
.has_fixed_rapl_psys_unit = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
+};
+
+static const struct platform_features dmr_features = {
+ .has_msr_misc_feature_control = spr_features.has_msr_misc_feature_control,
+ .has_msr_misc_pwr_mgmt = spr_features.has_msr_misc_pwr_mgmt,
+ .has_nhm_msrs = spr_features.has_nhm_msrs,
+ .bclk_freq = spr_features.bclk_freq,
+ .supported_cstates = spr_features.supported_cstates,
+ .cst_limit = spr_features.cst_limit,
+ .has_msr_core_c1_res = spr_features.has_msr_core_c1_res,
+ .has_cst_prewake_bit = spr_features.has_cst_prewake_bit,
+ .has_fixed_rapl_psys_unit = spr_features.has_fixed_rapl_psys_unit,
+ .trl_msrs = spr_features.trl_msrs,
+ .has_msr_module_c6_res_ms = 1, /* DMR has Dual-Core-Module and MC6 MSR */
+ .plat_rapl_msrs = 0, /* DMR does not have RAPL MSRs */
+ .plr_msrs = 0, /* DMR does not have PLR MSRs */
+ .has_irtl_msrs = 0, /* DMR does not have IRTL MSRs */
+ .has_config_tdp = 0, /* DMR does not have CTDP MSRs */
};
static const struct platform_features srf_features = {
@@ -852,7 +1029,7 @@ static const struct platform_features srf_features = {
.has_irtl_msrs = 1,
.has_cst_prewake_bit = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
};
static const struct platform_features grr_features = {
@@ -868,7 +1045,7 @@ static const struct platform_features grr_features = {
.has_irtl_msrs = 1,
.has_cst_prewake_bit = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_PSYS,
};
static const struct platform_features slv_features = {
@@ -881,7 +1058,7 @@ static const struct platform_features slv_features = {
.has_msr_c6_demotion_policy_config = 1,
.has_msr_atom_pkg_c6_residency = 1,
.trl_msrs = TRL_ATOM,
- .rapl_msrs = RAPL_PKG | RAPL_CORE,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE,
.has_rapl_divisor = 1,
.rapl_quirk_tdp = 30,
};
@@ -894,7 +1071,7 @@ static const struct platform_features slvd_features = {
.cst_limit = CST_LIMIT_SLV,
.has_msr_atom_pkg_c6_residency = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG | RAPL_CORE,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_CORE,
.rapl_quirk_tdp = 30,
};
@@ -915,7 +1092,7 @@ static const struct platform_features gmt_features = {
.cst_limit = CST_LIMIT_GMT,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
- .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO,
};
static const struct platform_features gmtd_features = {
@@ -928,7 +1105,7 @@ static const struct platform_features gmtd_features = {
.has_irtl_msrs = 1,
.has_msr_core_c1_res = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS,
};
static const struct platform_features gmtp_features = {
@@ -940,7 +1117,7 @@ static const struct platform_features gmtp_features = {
.cst_limit = CST_LIMIT_GMT,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO,
+ .plat_rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO,
};
static const struct platform_features tmt_features = {
@@ -951,7 +1128,7 @@ static const struct platform_features tmt_features = {
.cst_limit = CST_LIMIT_GMT,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX,
.enable_tsc_tweak = 1,
};
@@ -963,7 +1140,7 @@ static const struct platform_features tmtd_features = {
.cst_limit = CST_LIMIT_GMT,
.has_irtl_msrs = 1,
.trl_msrs = TRL_BASE | TRL_CORECOUNT,
- .rapl_msrs = RAPL_PKG_ALL,
+ .plat_rapl_msrs = RAPL_PKG_ALL,
};
static const struct platform_features knl_features = {
@@ -975,7 +1152,7 @@ static const struct platform_features knl_features = {
.cst_limit = CST_LIMIT_KNL,
.has_msr_knl_core_c6_residency = 1,
.trl_msrs = TRL_KNL,
- .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL,
+ .plat_rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL,
.has_fixed_rapl_unit = 1,
.need_perf_multiplier = 1,
};
@@ -984,7 +1161,7 @@ static const struct platform_features default_features = {
};
static const struct platform_features amd_features_with_rapl = {
- .rapl_msrs = RAPL_AMD_F17H,
+ .plat_rapl_msrs = RAPL_AMD_F17H,
.has_per_core_rapl = 1,
.rapl_quirk_tdp = 280, /* This is the max stock TDP of HEDT/Server Fam17h+ chips */
};
@@ -1028,12 +1205,14 @@ static const struct platform_data turbostat_pdata[] = {
{ INTEL_EMERALDRAPIDS_X, &spr_features },
{ INTEL_GRANITERAPIDS_X, &spr_features },
{ INTEL_GRANITERAPIDS_D, &spr_features },
+ { INTEL_DIAMONDRAPIDS_X, &dmr_features },
{ INTEL_LAKEFIELD, &cnl_features },
{ INTEL_ALDERLAKE, &adl_features },
{ INTEL_ALDERLAKE_L, &adl_features },
{ INTEL_RAPTORLAKE, &adl_features },
{ INTEL_RAPTORLAKE_P, &adl_features },
{ INTEL_RAPTORLAKE_S, &adl_features },
+ { INTEL_BARTLETTLAKE, &adl_features },
{ INTEL_METEORLAKE, &adl_features },
{ INTEL_METEORLAKE_L, &adl_features },
{ INTEL_ARROWLAKE_H, &adl_features },
@@ -1041,6 +1220,9 @@ static const struct platform_data turbostat_pdata[] = {
{ INTEL_ARROWLAKE, &adl_features },
{ INTEL_LUNARLAKE_M, &lnl_features },
{ INTEL_PANTHERLAKE_L, &lnl_features },
+ { INTEL_NOVALAKE, &lnl_features },
+ { INTEL_NOVALAKE_L, &lnl_features },
+ { INTEL_WILDCATLAKE_L, &lnl_features },
{ INTEL_ATOM_SILVERMONT, &slv_features },
{ INTEL_ATOM_SILVERMONT_D, &slvd_features },
{ INTEL_ATOM_AIRMONT, &amt_features },
@@ -1072,7 +1254,6 @@ void probe_platform_features(unsigned int family, unsigned int model)
{
int i;
-
if (authentic_amd || hygon_genuine) {
/* fallback to default features on unsupported models */
force_load++;
@@ -1106,8 +1287,7 @@ end:
if (platform)
return;
- fprintf(stderr, "Unsupported platform detected.\n"
- "\tSee RUN THE LATEST VERSION on turbostat(8)\n");
+ fprintf(stderr, "Unsupported platform detected.\n\tSee RUN THE LATEST VERSION on turbostat(8)\n");
exit(1);
}
@@ -1186,7 +1366,7 @@ struct rapl_counter_arch_info {
int msr_shift; /* Positive mean shift right, negative mean shift left */
double *platform_rapl_msr_scale; /* Scale applied to values read by MSR (platform dependent, filled at runtime) */
unsigned int rci_index; /* Maps data from perf counters to global variables */
- unsigned long long bic;
+ unsigned int bic_number;
double compat_scale; /* Some counters require constant scaling to be in the same range as other, similar ones */
unsigned long long flags;
};
@@ -1201,7 +1381,33 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_energy_units,
.rci_index = RAPL_RCI_INDEX_ENERGY_PKG,
- .bic = BIC_PkgWatt | BIC_Pkg_J,
+ .bic_number = BIC_PkgWatt,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_PKG,
+ .perf_subsys = "power",
+ .perf_name = "energy-pkg",
+ .msr = MSR_PKG_ENERGY_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_ENERGY_PKG,
+ .bic_number = BIC_Pkg_J,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_AMD_F17H,
+ .perf_subsys = "power",
+ .perf_name = "energy-pkg",
+ .msr = MSR_PKG_ENERGY_STAT,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_ENERGY_PKG,
+ .bic_number = BIC_PkgWatt,
.compat_scale = 1.0,
.flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
},
@@ -1214,7 +1420,7 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_energy_units,
.rci_index = RAPL_RCI_INDEX_ENERGY_PKG,
- .bic = BIC_PkgWatt | BIC_Pkg_J,
+ .bic_number = BIC_Pkg_J,
.compat_scale = 1.0,
.flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
},
@@ -1227,7 +1433,33 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_energy_units,
.rci_index = RAPL_RCI_INDEX_ENERGY_CORES,
- .bic = BIC_CorWatt | BIC_Cor_J,
+ .bic_number = BIC_CorWatt,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_CORE_ENERGY_STATUS,
+ .perf_subsys = "power",
+ .perf_name = "energy-cores",
+ .msr = MSR_PP0_ENERGY_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_ENERGY_CORES,
+ .bic_number = BIC_Cor_J,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_DRAM,
+ .perf_subsys = "power",
+ .perf_name = "energy-ram",
+ .msr = MSR_DRAM_ENERGY_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_dram_energy_units,
+ .rci_index = RAPL_RCI_INDEX_DRAM,
+ .bic_number = BIC_RAMWatt,
.compat_scale = 1.0,
.flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
},
@@ -1240,7 +1472,7 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_dram_energy_units,
.rci_index = RAPL_RCI_INDEX_DRAM,
- .bic = BIC_RAMWatt | BIC_RAM_J,
+ .bic_number = BIC_RAM_J,
.compat_scale = 1.0,
.flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
},
@@ -1253,7 +1485,20 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_energy_units,
.rci_index = RAPL_RCI_INDEX_GFX,
- .bic = BIC_GFXWatt | BIC_GFX_J,
+ .bic_number = BIC_GFXWatt,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_GFX,
+ .perf_subsys = "power",
+ .perf_name = "energy-gpu",
+ .msr = MSR_PP1_ENERGY_STATUS,
+ .msr_mask = 0xFFFFFFFFFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_GFX,
+ .bic_number = BIC_GFX_J,
.compat_scale = 1.0,
.flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
},
@@ -1266,7 +1511,7 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_time_units,
.rci_index = RAPL_RCI_INDEX_PKG_PERF_STATUS,
- .bic = BIC_PKG__,
+ .bic_number = BIC_PKG__,
.compat_scale = 100.0,
.flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
},
@@ -1279,7 +1524,7 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_time_units,
.rci_index = RAPL_RCI_INDEX_DRAM_PERF_STATUS,
- .bic = BIC_RAM__,
+ .bic_number = BIC_RAM__,
.compat_scale = 100.0,
.flags = RAPL_COUNTER_FLAG_USE_MSR_SUM,
},
@@ -1292,7 +1537,20 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_energy_units,
.rci_index = RAPL_RCI_INDEX_CORE_ENERGY,
- .bic = BIC_CorWatt | BIC_Cor_J,
+ .bic_number = BIC_CorWatt,
+ .compat_scale = 1.0,
+ .flags = 0,
+ },
+ {
+ .feature_mask = RAPL_AMD_F17H,
+ .perf_subsys = NULL,
+ .perf_name = NULL,
+ .msr = MSR_CORE_ENERGY_STAT,
+ .msr_mask = 0xFFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_energy_units,
+ .rci_index = RAPL_RCI_INDEX_CORE_ENERGY,
+ .bic_number = BIC_Cor_J,
.compat_scale = 1.0,
.flags = 0,
},
@@ -1305,7 +1563,20 @@ static const struct rapl_counter_arch_info rapl_counter_arch_infos[] = {
.msr_shift = 0,
.platform_rapl_msr_scale = &rapl_psys_energy_units,
.rci_index = RAPL_RCI_INDEX_ENERGY_PLATFORM,
- .bic = BIC_SysWatt | BIC_Sys_J,
+ .bic_number = BIC_SysWatt,
+ .compat_scale = 1.0,
+ .flags = RAPL_COUNTER_FLAG_PLATFORM_COUNTER | RAPL_COUNTER_FLAG_USE_MSR_SUM,
+ },
+ {
+ .feature_mask = RAPL_PSYS,
+ .perf_subsys = "power",
+ .perf_name = "energy-psys",
+ .msr = MSR_PLATFORM_ENERGY_STATUS,
+ .msr_mask = 0x00000000FFFFFFFF,
+ .msr_shift = 0,
+ .platform_rapl_msr_scale = &rapl_psys_energy_units,
+ .rci_index = RAPL_RCI_INDEX_ENERGY_PLATFORM,
+ .bic_number = BIC_Sys_J,
.compat_scale = 1.0,
.flags = RAPL_COUNTER_FLAG_PLATFORM_COUNTER | RAPL_COUNTER_FLAG_USE_MSR_SUM,
},
@@ -1354,7 +1625,7 @@ struct cstate_counter_arch_info {
const char *perf_name;
unsigned long long msr;
unsigned int rci_index; /* Maps data from perf counters to global variables */
- unsigned long long bic;
+ unsigned int bic_number;
unsigned long long flags;
int pkg_cstate_limit;
};
@@ -1366,7 +1637,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c1-residency",
.msr = MSR_CORE_C1_RES,
.rci_index = CCSTATE_RCI_INDEX_C1_RESIDENCY,
- .bic = BIC_CPU_c1,
+ .bic_number = BIC_CPU_c1,
.flags = CSTATE_COUNTER_FLAG_COLLECT_PER_THREAD,
.pkg_cstate_limit = 0,
},
@@ -1376,7 +1647,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c3-residency",
.msr = MSR_CORE_C3_RESIDENCY,
.rci_index = CCSTATE_RCI_INDEX_C3_RESIDENCY,
- .bic = BIC_CPU_c3,
+ .bic_number = BIC_CPU_c3,
.flags = CSTATE_COUNTER_FLAG_COLLECT_PER_CORE | CSTATE_COUNTER_FLAG_SOFT_C1_DEPENDENCY,
.pkg_cstate_limit = 0,
},
@@ -1386,7 +1657,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c6-residency",
.msr = MSR_CORE_C6_RESIDENCY,
.rci_index = CCSTATE_RCI_INDEX_C6_RESIDENCY,
- .bic = BIC_CPU_c6,
+ .bic_number = BIC_CPU_c6,
.flags = CSTATE_COUNTER_FLAG_COLLECT_PER_CORE | CSTATE_COUNTER_FLAG_SOFT_C1_DEPENDENCY,
.pkg_cstate_limit = 0,
},
@@ -1396,7 +1667,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c7-residency",
.msr = MSR_CORE_C7_RESIDENCY,
.rci_index = CCSTATE_RCI_INDEX_C7_RESIDENCY,
- .bic = BIC_CPU_c7,
+ .bic_number = BIC_CPU_c7,
.flags = CSTATE_COUNTER_FLAG_COLLECT_PER_CORE | CSTATE_COUNTER_FLAG_SOFT_C1_DEPENDENCY,
.pkg_cstate_limit = 0,
},
@@ -1406,7 +1677,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c2-residency",
.msr = MSR_PKG_C2_RESIDENCY,
.rci_index = PCSTATE_RCI_INDEX_C2_RESIDENCY,
- .bic = BIC_Pkgpc2,
+ .bic_number = BIC_Pkgpc2,
.flags = 0,
.pkg_cstate_limit = PCL__2,
},
@@ -1416,7 +1687,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c3-residency",
.msr = MSR_PKG_C3_RESIDENCY,
.rci_index = PCSTATE_RCI_INDEX_C3_RESIDENCY,
- .bic = BIC_Pkgpc3,
+ .bic_number = BIC_Pkgpc3,
.flags = 0,
.pkg_cstate_limit = PCL__3,
},
@@ -1426,7 +1697,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c6-residency",
.msr = MSR_PKG_C6_RESIDENCY,
.rci_index = PCSTATE_RCI_INDEX_C6_RESIDENCY,
- .bic = BIC_Pkgpc6,
+ .bic_number = BIC_Pkgpc6,
.flags = 0,
.pkg_cstate_limit = PCL__6,
},
@@ -1436,7 +1707,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c7-residency",
.msr = MSR_PKG_C7_RESIDENCY,
.rci_index = PCSTATE_RCI_INDEX_C7_RESIDENCY,
- .bic = BIC_Pkgpc7,
+ .bic_number = BIC_Pkgpc7,
.flags = 0,
.pkg_cstate_limit = PCL__7,
},
@@ -1446,7 +1717,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c8-residency",
.msr = MSR_PKG_C8_RESIDENCY,
.rci_index = PCSTATE_RCI_INDEX_C8_RESIDENCY,
- .bic = BIC_Pkgpc8,
+ .bic_number = BIC_Pkgpc8,
.flags = 0,
.pkg_cstate_limit = PCL__8,
},
@@ -1456,7 +1727,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c9-residency",
.msr = MSR_PKG_C9_RESIDENCY,
.rci_index = PCSTATE_RCI_INDEX_C9_RESIDENCY,
- .bic = BIC_Pkgpc9,
+ .bic_number = BIC_Pkgpc9,
.flags = 0,
.pkg_cstate_limit = PCL__9,
},
@@ -1466,7 +1737,7 @@ static struct cstate_counter_arch_info ccstate_counter_arch_infos[] = {
.perf_name = "c10-residency",
.msr = MSR_PKG_C10_RESIDENCY,
.rci_index = PCSTATE_RCI_INDEX_C10_RESIDENCY,
- .bic = BIC_Pkgpc10,
+ .bic_number = BIC_Pkgpc10,
.flags = 0,
.pkg_cstate_limit = PCL_10,
},
@@ -1631,7 +1902,7 @@ int pmt_telemdir_sort(const struct dirent **a, const struct dirent **b)
sscanf((*a)->d_name, "telem%u", &aidx);
sscanf((*b)->d_name, "telem%u", &bidx);
- return aidx >= bidx;
+ return (aidx > bidx) ? 1 : (aidx < bidx) ? -1 : 0;
}
const struct dirent *pmt_diriter_next(struct pmt_diriter_t *iter)
@@ -1732,6 +2003,10 @@ void pmt_counter_resize(struct pmt_counter *pcounter, unsigned int new_size)
pmt_counter_resize_(pcounter, new_size);
}
+struct llc_stats {
+ unsigned long long references;
+ unsigned long long misses;
+};
struct thread_data {
struct timeval tv_begin;
struct timeval tv_end;
@@ -1744,6 +2019,7 @@ struct thread_data {
unsigned long long irq_count;
unsigned long long nmi_count;
unsigned int smi_count;
+ struct llc_stats llc;
unsigned int cpu_id;
unsigned int apic_id;
unsigned int x2apic_id;
@@ -1822,8 +2098,6 @@ struct pkg_data {
((node_no) * topo.cores_per_node) + \
(core_no))
-#define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
-
/*
* The accumulated sum of MSR is defined as a monotonic
* increasing MSR, it will be accumulated periodically,
@@ -1861,7 +2135,7 @@ off_t idx_to_offset(int idx)
switch (idx) {
case IDX_PKG_ENERGY:
- if (platform->rapl_msrs & RAPL_AMD_F17H)
+ if (valid_rapl_msrs & RAPL_AMD_F17H)
offset = MSR_PKG_ENERGY_STAT;
else
offset = MSR_PKG_ENERGY_STATUS;
@@ -1927,19 +2201,19 @@ int idx_valid(int idx)
{
switch (idx) {
case IDX_PKG_ENERGY:
- return platform->rapl_msrs & (RAPL_PKG | RAPL_AMD_F17H);
+ return valid_rapl_msrs & (RAPL_PKG | RAPL_AMD_F17H);
case IDX_DRAM_ENERGY:
- return platform->rapl_msrs & RAPL_DRAM;
+ return valid_rapl_msrs & RAPL_DRAM;
case IDX_PP0_ENERGY:
- return platform->rapl_msrs & RAPL_CORE_ENERGY_STATUS;
+ return valid_rapl_msrs & RAPL_CORE_ENERGY_STATUS;
case IDX_PP1_ENERGY:
- return platform->rapl_msrs & RAPL_GFX;
+ return valid_rapl_msrs & RAPL_GFX;
case IDX_PKG_PERF:
- return platform->rapl_msrs & RAPL_PKG_PERF_STATUS;
+ return valid_rapl_msrs & RAPL_PKG_PERF_STATUS;
case IDX_DRAM_PERF:
- return platform->rapl_msrs & RAPL_DRAM_PERF_STATUS;
+ return valid_rapl_msrs & RAPL_DRAM_PERF_STATUS;
case IDX_PSYS_ENERGY:
- return platform->rapl_msrs & RAPL_PSYS;
+ return valid_rapl_msrs & RAPL_PSYS;
default:
return 0;
}
@@ -2018,6 +2292,7 @@ struct platform_counters {
struct cpu_topology {
int physical_package_id;
int die_id;
+ int l3_id;
int logical_cpu_id;
int physical_node_id;
int logical_node_id; /* 0-based count within the package */
@@ -2039,6 +2314,7 @@ struct topo_params {
int max_core_id;
int max_package_id;
int max_die_id;
+ int max_l3_id;
int max_node_num;
int nodes_per_pkg;
int cores_per_node;
@@ -2072,6 +2348,8 @@ int cpu_is_not_allowed(int cpu)
* skip non-present cpus
*/
+#define PER_THREAD_PARAMS struct thread_data *t, struct core_data *c, struct pkg_data *p
+
int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pkg_data *),
struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
{
@@ -2085,16 +2363,15 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk
for (thread_no = 0; thread_no < topo.threads_per_core; ++thread_no) {
struct thread_data *t;
struct core_data *c;
- struct pkg_data *p;
+
t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no);
if (cpu_is_not_allowed(t->cpu_id))
continue;
c = GET_CORE(core_base, core_no, node_no, pkg_no);
- p = GET_PKG(pkg_base, pkg_no);
- retval |= func(t, c, p);
+ retval |= func(t, c, &pkg_base[pkg_no]);
}
}
}
@@ -2102,23 +2379,19 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk
return retval;
}
-int is_cpu_first_thread_in_core(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int is_cpu_first_thread_in_core(struct thread_data *t, struct core_data *c)
{
- UNUSED(p);
-
return ((int)t->cpu_id == c->base_cpu || c->base_cpu < 0);
}
-int is_cpu_first_core_in_package(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int is_cpu_first_core_in_package(struct thread_data *t, struct pkg_data *p)
{
- UNUSED(c);
-
return ((int)t->cpu_id == p->base_cpu || p->base_cpu < 0);
}
int is_cpu_first_thread_in_package(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
- return is_cpu_first_thread_in_core(t, c, p) && is_cpu_first_core_in_package(t, c, p);
+ return is_cpu_first_thread_in_core(t, c) && is_cpu_first_core_in_package(t, p);
}
int cpu_migrate(int cpu)
@@ -2140,13 +2413,11 @@ int get_msr_fd(int cpu)
if (fd)
return fd;
-
- sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+ sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu);
fd = open(pathname, O_RDONLY);
if (fd < 0)
- err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, "
- "or run with --no-msr, or run as root", pathname);
-
+ err(-1, "%s open failed, try chown or chmod +r %s, "
+ "or run with --no-msr, or run as root", pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr");
fd_percpu[cpu] = fd;
return fd;
@@ -2154,14 +2425,24 @@ int get_msr_fd(int cpu)
static void bic_disable_msr_access(void)
{
- const unsigned long bic_msrs = BIC_Mod_c6 | BIC_CoreTmp |
- BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX | BIC_PkgTmp;
-
- bic_enabled &= ~bic_msrs;
+ CLR_BIC(BIC_Mod_c6, &bic_enabled);
+ CLR_BIC(BIC_CoreTmp, &bic_enabled);
+ CLR_BIC(BIC_Totl_c0, &bic_enabled);
+ CLR_BIC(BIC_Any_c0, &bic_enabled);
+ CLR_BIC(BIC_GFX_c0, &bic_enabled);
+ CLR_BIC(BIC_CPUGFX, &bic_enabled);
+ CLR_BIC(BIC_PkgTmp, &bic_enabled);
free_sys_msr_counters();
}
+static void bic_disable_perf_access(void)
+{
+ CLR_BIC(BIC_IPC, &bic_enabled);
+ CLR_BIC(BIC_LLC_RPS, &bic_enabled);
+ CLR_BIC(BIC_LLC_HIT, &bic_enabled);
+}
+
static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
{
assert(!no_perf);
@@ -2215,32 +2496,52 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
return 0;
}
-int probe_rapl_msr(int cpu, off_t offset, int index)
+int add_msr_counter(int cpu, off_t offset)
{
ssize_t retval;
unsigned long long value;
- assert(!no_msr);
+ if (no_msr)
+ return -1;
+
+ if (!offset)
+ return -1;
retval = pread(get_msr_fd(cpu), &value, sizeof(value), offset);
/* if the read failed, the probe fails */
if (retval != sizeof(value))
- return 1;
+ return -1;
+
+ if (value == 0)
+ return 0;
+
+ return 1;
+}
+
+int add_rapl_msr_counter(int cpu, const struct rapl_counter_arch_info *cai)
+{
+ int ret;
+
+ if (!(valid_rapl_msrs & cai->feature_mask))
+ return -1;
+
+ ret = add_msr_counter(cpu, cai->msr);
+ if (ret < 0)
+ return -1;
- /* If an Energy Status Counter MSR returns 0, the probe fails */
- switch (index) {
+ switch (cai->rci_index) {
case RAPL_RCI_INDEX_ENERGY_PKG:
case RAPL_RCI_INDEX_ENERGY_CORES:
case RAPL_RCI_INDEX_DRAM:
case RAPL_RCI_INDEX_GFX:
case RAPL_RCI_INDEX_ENERGY_PLATFORM:
- if (value == 0)
+ if (ret == 0)
return 1;
}
/* PKG,DRAM_PERF_STATUS MSRs, can return any value */
- return 0;
+ return 1;
}
/* Convert CPU ID to domain ID for given added perf counter. */
@@ -2265,6 +2566,8 @@ char *deferred_add_names[MAX_DEFERRED];
char *deferred_skip_names[MAX_DEFERRED];
int deferred_add_index;
int deferred_skip_index;
+unsigned int deferred_add_consumed;
+unsigned int deferred_skip_consumed;
/*
* HIDE_LIST - hide this list of counters, show the rest [default]
@@ -2327,8 +2630,7 @@ void help(void)
" degrees Celsius\n"
" -h, --help\n"
" print this help message\n"
- " -v, --version\n"
- " print version information\n\nFor more help, run \"man turbostat\"\n");
+ " -v, --version\n\t\tprint version information\n\nFor more help, run \"man turbostat\"\n");
}
/*
@@ -2336,10 +2638,9 @@ void help(void)
* for all the strings in comma separate name_list,
* set the approprate bit in return value.
*/
-unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode)
+void bic_lookup(cpu_set_t *ret_set, char *name_list, enum show_hide_mode mode)
{
unsigned int i;
- unsigned long long retval = 0;
while (name_list) {
char *comma;
@@ -2351,46 +2652,49 @@ unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode)
for (i = 0; i < MAX_BIC; ++i) {
if (!strcmp(name_list, bic[i].name)) {
- retval |= (1ULL << i);
+ SET_BIC(i, ret_set);
break;
}
if (!strcmp(name_list, "all")) {
- retval |= ~0;
+ bic_set_all(ret_set);
break;
} else if (!strcmp(name_list, "topology")) {
- retval |= BIC_GROUP_TOPOLOGY;
+ CPU_OR(ret_set, ret_set, &bic_group_topology);
break;
} else if (!strcmp(name_list, "power")) {
- retval |= BIC_GROUP_THERMAL_PWR;
+ CPU_OR(ret_set, ret_set, &bic_group_thermal_pwr);
break;
} else if (!strcmp(name_list, "idle")) {
- retval |= BIC_GROUP_IDLE;
+ CPU_OR(ret_set, ret_set, &bic_group_idle);
+ break;
+ } else if (!strcmp(name_list, "cache")) {
+ CPU_OR(ret_set, ret_set, &bic_group_cache);
+ break;
+ } else if (!strcmp(name_list, "llc")) {
+ CPU_OR(ret_set, ret_set, &bic_group_cache);
break;
} else if (!strcmp(name_list, "swidle")) {
- retval |= BIC_GROUP_SW_IDLE;
+ CPU_OR(ret_set, ret_set, &bic_group_sw_idle);
break;
} else if (!strcmp(name_list, "sysfs")) { /* legacy compatibility */
- retval |= BIC_GROUP_SW_IDLE;
+ CPU_OR(ret_set, ret_set, &bic_group_sw_idle);
break;
} else if (!strcmp(name_list, "hwidle")) {
- retval |= BIC_GROUP_HW_IDLE;
+ CPU_OR(ret_set, ret_set, &bic_group_hw_idle);
break;
} else if (!strcmp(name_list, "frequency")) {
- retval |= BIC_GROUP_FREQUENCY;
+ CPU_OR(ret_set, ret_set, &bic_group_frequency);
break;
} else if (!strcmp(name_list, "other")) {
- retval |= BIC_OTHER;
+ CPU_OR(ret_set, ret_set, &bic_group_other);
break;
}
-
}
if (i == MAX_BIC) {
- fprintf(stderr, "deferred %s\n", name_list);
if (mode == SHOW_LIST) {
deferred_add_names[deferred_add_index++] = name_list;
if (deferred_add_index >= MAX_DEFERRED) {
- fprintf(stderr, "More than max %d un-recognized --add options '%s'\n",
- MAX_DEFERRED, name_list);
+ fprintf(stderr, "More than max %d un-recognized --add options '%s'\n", MAX_DEFERRED, name_list);
help();
exit(1);
}
@@ -2399,8 +2703,7 @@ unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode)
if (debug)
fprintf(stderr, "deferred \"%s\"\n", name_list);
if (deferred_skip_index >= MAX_DEFERRED) {
- fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n",
- MAX_DEFERRED, name_list);
+ fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n", MAX_DEFERRED, name_list);
help();
exit(1);
}
@@ -2412,7 +2715,47 @@ unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode)
name_list++;
}
- return retval;
+}
+
+/*
+ * print_name()
+ * Print column header name for raw 64-bit counter in 16 columns (at least 8-char plus a tab)
+ * Otherwise, allow the name + tab to fit within 8-coumn tab-stop.
+ * In both cases, left justififed, just like other turbostat columns,
+ * to allow the column values to consume the tab.
+ *
+ * Yes, 32-bit counters can overflow 8-columns, and
+ * 64-bit counters can overflow 16-columns, but that is uncommon.
+ */
+static inline int print_name(int width, int *printed, char *delim, char *name, enum counter_type type, enum counter_format format)
+{
+ UNUSED(type);
+
+ if (format == FORMAT_RAW && width >= 64)
+ return (sprintf(outp, "%s%-8s", (*printed++ ? delim : ""), name));
+ else
+ return (sprintf(outp, "%s%s", (*printed++ ? delim : ""), name));
+}
+
+static inline int print_hex_value(int width, int *printed, char *delim, unsigned long long value)
+{
+ if (width <= 32)
+ return (sprintf(outp, "%s%08x", (*printed++ ? delim : ""), (unsigned int)value));
+ else
+ return (sprintf(outp, "%s%016llx", (*printed++ ? delim : ""), value));
+}
+
+static inline int print_decimal_value(int width, int *printed, char *delim, unsigned long long value)
+{
+ if (width <= 32)
+ return (sprintf(outp, "%s%d", (*printed++ ? delim : ""), (unsigned int)value));
+ else
+ return (sprintf(outp, "%s%-8lld", (*printed++ ? delim : ""), value));
+}
+
+static inline int print_float_value(int *printed, char *delim, double value)
+{
+ return (sprintf(outp, "%s%0.2f", (*printed++ ? delim : ""), value));
}
void print_header(char *delim)
@@ -2430,6 +2773,8 @@ void print_header(char *delim)
outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
if (DO_BIC(BIC_Die))
outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_L3))
+ outp += sprintf(outp, "%sL3", (printed++ ? delim : ""));
if (DO_BIC(BIC_Node))
outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
if (DO_BIC(BIC_Core))
@@ -2468,50 +2813,28 @@ void print_header(char *delim)
if (DO_BIC(BIC_SMI))
outp += sprintf(outp, "%sSMI", (printed++ ? delim : ""));
- for (mp = sys.tp; mp; mp = mp->next) {
+ if (DO_BIC(BIC_LLC_RPS))
+ outp += sprintf(outp, "%sLLCkRPS", (printed++ ? delim : ""));
- if (mp->format == FORMAT_RAW) {
- if (mp->width == 64)
- outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), mp->name);
- else
- outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), mp->name);
- } else {
- if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), mp->name);
- else
- outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), mp->name);
- }
- }
+ if (DO_BIC(BIC_LLC_HIT))
+ outp += sprintf(outp, "%sLLC%%hit", (printed++ ? delim : ""));
- for (pp = sys.perf_tp; pp; pp = pp->next) {
+ for (mp = sys.tp; mp; mp = mp->next)
+ outp += print_name(mp->width, &printed, delim, mp->name, mp->type, mp->format);
- if (pp->format == FORMAT_RAW) {
- if (pp->width == 64)
- outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), pp->name);
- else
- outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), pp->name);
- } else {
- if ((pp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), pp->name);
- else
- outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), pp->name);
- }
- }
+ for (pp = sys.perf_tp; pp; pp = pp->next)
+ outp += print_name(pp->width, &printed, delim, pp->name, pp->type, pp->format);
ppmt = sys.pmt_tp;
while (ppmt) {
switch (ppmt->type) {
case PMT_TYPE_RAW:
- if (pmt_counter_get_width(ppmt) <= 32)
- outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), ppmt->name);
- else
- outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), ppmt->name);
-
+ outp += print_name(pmt_counter_get_width(ppmt), &printed, delim, ppmt->name, COUNTER_ITEMS, ppmt->format);
break;
case PMT_TYPE_XTAL_TIME:
case PMT_TYPE_TCORE_CLOCK:
- outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), ppmt->name);
+ outp += print_name(32, &printed, delim, ppmt->name, COUNTER_ITEMS, ppmt->format);
break;
}
@@ -2536,63 +2859,36 @@ void print_header(char *delim)
if (DO_BIC(BIC_CORE_THROT_CNT))
outp += sprintf(outp, "%sCoreThr", (printed++ ? delim : ""));
- if (platform->rapl_msrs && !rapl_joules) {
+ if (valid_rapl_msrs && !rapl_joules) {
if (DO_BIC(BIC_CorWatt) && platform->has_per_core_rapl)
outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
- } else if (platform->rapl_msrs && rapl_joules) {
+ } else if (valid_rapl_msrs && rapl_joules) {
if (DO_BIC(BIC_Cor_J) && platform->has_per_core_rapl)
outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
}
- for (mp = sys.cp; mp; mp = mp->next) {
- if (mp->format == FORMAT_RAW) {
- if (mp->width == 64)
- outp += sprintf(outp, "%s%18.18s", delim, mp->name);
- else
- outp += sprintf(outp, "%s%10.10s", delim, mp->name);
- } else {
- if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8s", delim, mp->name);
- else
- outp += sprintf(outp, "%s%s", delim, mp->name);
- }
- }
-
- for (pp = sys.perf_cp; pp; pp = pp->next) {
+ for (mp = sys.cp; mp; mp = mp->next)
+ outp += print_name(mp->width, &printed, delim, mp->name, mp->type, mp->format);
- if (pp->format == FORMAT_RAW) {
- if (pp->width == 64)
- outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), pp->name);
- else
- outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), pp->name);
- } else {
- if ((pp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), pp->name);
- else
- outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), pp->name);
- }
- }
+ for (pp = sys.perf_cp; pp; pp = pp->next)
+ outp += print_name(pp->width, &printed, delim, pp->name, pp->type, pp->format);
ppmt = sys.pmt_cp;
while (ppmt) {
switch (ppmt->type) {
case PMT_TYPE_RAW:
- if (pmt_counter_get_width(ppmt) <= 32)
- outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), ppmt->name);
- else
- outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), ppmt->name);
+ outp += print_name(pmt_counter_get_width(ppmt), &printed, delim, ppmt->name, COUNTER_ITEMS, ppmt->format);
break;
case PMT_TYPE_XTAL_TIME:
case PMT_TYPE_TCORE_CLOCK:
- outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), ppmt->name);
+ outp += print_name(32, &printed, delim, ppmt->name, COUNTER_ITEMS, ppmt->format);
break;
}
ppmt = ppmt->next;
}
-
if (DO_BIC(BIC_PkgTmp))
outp += sprintf(outp, "%sPkgTmp", (printed++ ? delim : ""));
@@ -2644,7 +2940,7 @@ void print_header(char *delim)
if (DO_BIC(BIC_SYS_LPI))
outp += sprintf(outp, "%sSYS%%LPI", (printed++ ? delim : ""));
- if (platform->rapl_msrs && !rapl_joules) {
+ if (!rapl_joules) {
if (DO_BIC(BIC_PkgWatt))
outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
if (DO_BIC(BIC_CorWatt) && !platform->has_per_core_rapl)
@@ -2657,7 +2953,7 @@ void print_header(char *delim)
outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : ""));
if (DO_BIC(BIC_RAM__))
outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : ""));
- } else if (platform->rapl_msrs && rapl_joules) {
+ } else {
if (DO_BIC(BIC_Pkg_J))
outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
if (DO_BIC(BIC_Cor_J) && !platform->has_per_core_rapl)
@@ -2674,51 +2970,22 @@ void print_header(char *delim)
if (DO_BIC(BIC_UNCORE_MHZ))
outp += sprintf(outp, "%sUncMHz", (printed++ ? delim : ""));
- for (mp = sys.pp; mp; mp = mp->next) {
- if (mp->format == FORMAT_RAW) {
- if (mp->width == 64)
- outp += sprintf(outp, "%s%18.18s", delim, mp->name);
- else if (mp->width == 32)
- outp += sprintf(outp, "%s%10.10s", delim, mp->name);
- else
- outp += sprintf(outp, "%s%7.7s", delim, mp->name);
- } else {
- if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8s", delim, mp->name);
- else
- outp += sprintf(outp, "%s%7.7s", delim, mp->name);
- }
- }
+ for (mp = sys.pp; mp; mp = mp->next)
+ outp += print_name(mp->width, &printed, delim, mp->name, mp->type, mp->format);
- for (pp = sys.perf_pp; pp; pp = pp->next) {
-
- if (pp->format == FORMAT_RAW) {
- if (pp->width == 64)
- outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), pp->name);
- else
- outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), pp->name);
- } else {
- if ((pp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), pp->name);
- else
- outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), pp->name);
- }
- }
+ for (pp = sys.perf_pp; pp; pp = pp->next)
+ outp += print_name(pp->width, &printed, delim, pp->name, pp->type, pp->format);
ppmt = sys.pmt_pp;
while (ppmt) {
switch (ppmt->type) {
case PMT_TYPE_RAW:
- if (pmt_counter_get_width(ppmt) <= 32)
- outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), ppmt->name);
- else
- outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), ppmt->name);
-
+ outp += print_name(pmt_counter_get_width(ppmt), &printed, delim, ppmt->name, COUNTER_ITEMS, ppmt->format);
break;
case PMT_TYPE_XTAL_TIME:
case PMT_TYPE_TCORE_CLOCK:
- outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), ppmt->name);
+ outp += print_name(32, &printed, delim, ppmt->name, COUNTER_ITEMS, ppmt->format);
break;
}
@@ -2733,7 +3000,26 @@ void print_header(char *delim)
outp += sprintf(outp, "\n");
}
-int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+/*
+ * pct()
+ *
+ * If absolute value is < 1.1, return percentage
+ * otherwise, return nan
+ *
+ * return value is appropriate for printing percentages with %f
+ * while flagging some obvious erroneous values.
+ */
+double pct(double d)
+{
+
+ double abs = fabs(d);
+
+ if (abs < 1.10)
+ return (100.0 * d);
+ return nan("");
+}
+
+int dump_counters(PER_THREAD_PARAMS)
{
int i;
struct msr_counter *mp;
@@ -2758,14 +3044,16 @@ int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p
if (DO_BIC(BIC_SMI))
outp += sprintf(outp, "SMI: %d\n", t->smi_count);
+ outp += sprintf(outp, "LLC refs: %lld", t->llc.references);
+ outp += sprintf(outp, "LLC miss: %lld", t->llc.misses);
+ outp += sprintf(outp, "LLC Hit%%: %.2f", pct((t->llc.references - t->llc.misses) / t->llc.references));
+
for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
- outp +=
- sprintf(outp, "tADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num,
- t->counter[i], mp->sp->path);
+ outp += sprintf(outp, "tADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num, t->counter[i], mp->sp->path);
}
}
- if (c && is_cpu_first_thread_in_core(t, c, p)) {
+ if (c && is_cpu_first_thread_in_core(t, c)) {
outp += sprintf(outp, "core: %d\n", c->core_id);
outp += sprintf(outp, "c3: %016llX\n", c->c3);
outp += sprintf(outp, "c6: %016llX\n", c->c6);
@@ -2780,14 +3068,12 @@ int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p
outp += sprintf(outp, "Joules: %0llX (scale: %lf)\n", energy_value, energy_scale);
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
- outp +=
- sprintf(outp, "cADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num,
- c->counter[i], mp->sp->path);
+ outp += sprintf(outp, "cADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num, c->counter[i], mp->sp->path);
}
outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us);
}
- if (p && is_cpu_first_core_in_package(t, c, p)) {
+ if (p && is_cpu_first_core_in_package(t, p)) {
outp += sprintf(outp, "package: %d\n", p->package_id);
outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
@@ -2817,9 +3103,7 @@ int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p
outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
- outp +=
- sprintf(outp, "pADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num,
- p->counter[i], mp->sp->path);
+ outp += sprintf(outp, "pADDED [%d] %8s msr0x%x: %08llX %s\n", i, mp->name, mp->msr_num, p->counter[i], mp->sp->path);
}
}
@@ -2845,16 +3129,37 @@ double rapl_counter_get_value(const struct rapl_counter *c, enum rapl_unit desir
return scaled;
}
+void get_perf_llc_stats(int cpu, struct llc_stats *llc)
+{
+ struct read_format {
+ unsigned long long num_read;
+ struct llc_stats llc;
+ } r;
+ const ssize_t expected_read_size = sizeof(r);
+ ssize_t actual_read_size;
+
+ actual_read_size = read(fd_llc_percpu[cpu], &r, expected_read_size);
+
+ if (actual_read_size == -1)
+ err(-1, "%s(cpu%d,) %d,,%ld\n", __func__, cpu, fd_llc_percpu[cpu], expected_read_size);
+
+ llc->references = r.llc.references;
+ llc->misses = r.llc.misses;
+ if (actual_read_size != expected_read_size)
+ warn("%s: failed to read perf_data (req %zu act %zu)", __func__, expected_read_size, actual_read_size);
+}
+
/*
* column formatting convention & formats
*/
-int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int format_counters(PER_THREAD_PARAMS)
{
static int count;
struct platform_counters *pplat_cnt = NULL;
double interval_float, tsc;
- char *fmt8;
+ char *fmt8 = "%s%.2f";
+
int i;
struct msr_counter *mp;
struct perf_counter_info *pp;
@@ -2868,11 +3173,11 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
}
/* if showing only 1st thread in core and this isn't one, bail out */
- if (show_core_only && !is_cpu_first_thread_in_core(t, c, p))
+ if (show_core_only && !is_cpu_first_thread_in_core(t, c))
return 0;
/* if showing only 1st thread in pkg and this isn't one, bail out */
- if (show_pkg_only && !is_cpu_first_core_in_package(t, c, p))
+ if (show_pkg_only && !is_cpu_first_core_in_package(t, p))
return 0;
/*if not summary line and --cpu is used */
@@ -2901,6 +3206,8 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Die))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_L3))
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Node))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Core))
@@ -2924,10 +3231,15 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
else
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
}
+ if (DO_BIC(BIC_L3)) {
+ if (c)
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].l3_id);
+ else
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ }
if (DO_BIC(BIC_Node)) {
if (t)
- outp += sprintf(outp, "%s%d",
- (printed++ ? delim : ""), cpus[t->cpu_id].physical_node_id);
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].physical_node_id);
else
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
}
@@ -2949,15 +3261,13 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 / units * t->aperf / interval_float);
if (DO_BIC(BIC_Busy))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(t->mperf / tsc));
if (DO_BIC(BIC_Bzy_MHz)) {
if (has_base_hz)
- outp +=
- sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf);
+ outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf);
else
- outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""),
- tsc / units * t->aperf / t->mperf / interval_float);
+ outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), tsc / units * t->aperf / t->mperf / interval_float);
}
if (DO_BIC(BIC_TSC_MHz))
@@ -2986,96 +3296,81 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (DO_BIC(BIC_SMI))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count);
- /* Added counters */
+ /* LLC Stats */
+ if (DO_BIC(BIC_LLC_RPS) || DO_BIC(BIC_LLC_HIT)) {
+ if (DO_BIC(BIC_LLC_RPS))
+ outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), t->llc.references / interval_float / 1000);
+
+ if (DO_BIC(BIC_LLC_HIT))
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), pct((t->llc.references - t->llc.misses) / t->llc.references));
+ }
+
+ /* Added Thread Counters */
for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
- if (mp->format == FORMAT_RAW) {
- if (mp->width == 32)
- outp +=
- sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)t->counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]);
- } else if (mp->format == FORMAT_DELTA) {
- if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->counter[i]);
- else
- outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]);
- } else if (mp->format == FORMAT_PERCENT) {
+ if (mp->format == FORMAT_RAW)
+ outp += print_hex_value(mp->width, &printed, delim, t->counter[i]);
+ else if (mp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ outp += print_decimal_value(mp->width, &printed, delim, t->counter[i]);
+ else if (mp->format == FORMAT_PERCENT) {
if (mp->type == COUNTER_USEC)
- outp +=
- sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
- t->counter[i] / interval_float / 10000);
+ outp += print_float_value(&printed, delim, t->counter[i] / interval_float / 10000);
else
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i] / tsc);
+ outp += print_float_value(&printed, delim, pct(t->counter[i] / tsc));
}
}
- /* Added perf counters */
+ /* Added perf Thread Counters */
for (i = 0, pp = sys.perf_tp; pp; ++i, pp = pp->next) {
- if (pp->format == FORMAT_RAW) {
- if (pp->width == 32)
- outp +=
- sprintf(outp, "%s0x%08x", (printed++ ? delim : ""),
- (unsigned int)t->perf_counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->perf_counter[i]);
- } else if (pp->format == FORMAT_DELTA) {
- if ((pp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->perf_counter[i]);
- else
- outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->perf_counter[i]);
- } else if (pp->format == FORMAT_PERCENT) {
+ if (pp->format == FORMAT_RAW)
+ outp += print_hex_value(pp->width, &printed, delim, t->perf_counter[i]);
+ else if (pp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ outp += print_decimal_value(pp->width, &printed, delim, t->perf_counter[i]);
+ else if (pp->format == FORMAT_PERCENT) {
if (pp->type == COUNTER_USEC)
- outp +=
- sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
- t->perf_counter[i] / interval_float / 10000);
+ outp += print_float_value(&printed, delim, t->perf_counter[i] / interval_float / 10000);
else
- outp +=
- sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->perf_counter[i] / tsc);
+ outp += print_float_value(&printed, delim, pct(t->perf_counter[i] / tsc));
}
}
+ /* Added PMT Thread Counters */
for (i = 0, ppmt = sys.pmt_tp; ppmt; i++, ppmt = ppmt->next) {
const unsigned long value_raw = t->pmt_counter[i];
double value_converted;
switch (ppmt->type) {
case PMT_TYPE_RAW:
- if (pmt_counter_get_width(ppmt) <= 32)
- outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""),
- (unsigned int)t->pmt_counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->pmt_counter[i]);
-
+ outp += print_hex_value(pmt_counter_get_width(ppmt), &printed, delim, t->pmt_counter[i]);
break;
case PMT_TYPE_XTAL_TIME:
- value_converted = 100.0 * value_raw / crystal_hz / interval_float;
+ value_converted = pct(value_raw / crystal_hz / interval_float);
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
break;
case PMT_TYPE_TCORE_CLOCK:
- value_converted = 100.0 * value_raw / tcore_clock_freq_hz / interval_float;
+ value_converted = pct(value_raw / tcore_clock_freq_hz / interval_float);
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
}
}
/* C1 */
if (DO_BIC(BIC_CPU_c1))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(t->c1 / tsc));
/* print per-core data only for 1st thread in core */
- if (!is_cpu_first_thread_in_core(t, c, p))
+ if (!is_cpu_first_thread_in_core(t, c))
goto done;
if (DO_BIC(BIC_CPU_c3))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(c->c3 / tsc));
if (DO_BIC(BIC_CPU_c6))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(c->c6 / tsc));
if (DO_BIC(BIC_CPU_c7))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(c->c7 / tsc));
/* Mod%c6 */
if (DO_BIC(BIC_Mod_c6))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->mc6_us / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(c->mc6_us / tsc));
if (DO_BIC(BIC_CoreTmp))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c);
@@ -3084,77 +3379,53 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (DO_BIC(BIC_CORE_THROT_CNT))
outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->core_throt_cnt);
+ /* Added Core Counters */
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
- if (mp->format == FORMAT_RAW) {
- if (mp->width == 32)
- outp +=
- sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)c->counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]);
- } else if (mp->format == FORMAT_DELTA) {
- if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->counter[i]);
- else
- outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]);
- } else if (mp->format == FORMAT_PERCENT) {
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i] / tsc);
- }
+ if (mp->format == FORMAT_RAW)
+ outp += print_hex_value(mp->width, &printed, delim, c->counter[i]);
+ else if (mp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ outp += print_decimal_value(mp->width, &printed, delim, c->counter[i]);
+ else if (mp->format == FORMAT_PERCENT)
+ outp += print_float_value(&printed, delim, pct(c->counter[i] / tsc));
}
+ /* Added perf Core counters */
for (i = 0, pp = sys.perf_cp; pp; i++, pp = pp->next) {
- if (pp->format == FORMAT_RAW) {
- if (pp->width == 32)
- outp +=
- sprintf(outp, "%s0x%08x", (printed++ ? delim : ""),
- (unsigned int)c->perf_counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->perf_counter[i]);
- } else if (pp->format == FORMAT_DELTA) {
- if ((pp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->perf_counter[i]);
- else
- outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->perf_counter[i]);
- } else if (pp->format == FORMAT_PERCENT) {
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->perf_counter[i] / tsc);
- }
+ if (pp->format == FORMAT_RAW)
+ outp += print_hex_value(pp->width, &printed, delim, c->perf_counter[i]);
+ else if (pp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ outp += print_decimal_value(pp->width, &printed, delim, c->perf_counter[i]);
+ else if (pp->format == FORMAT_PERCENT)
+ outp += print_float_value(&printed, delim, pct(c->perf_counter[i] / tsc));
}
+ /* Added PMT Core counters */
for (i = 0, ppmt = sys.pmt_cp; ppmt; i++, ppmt = ppmt->next) {
const unsigned long value_raw = c->pmt_counter[i];
double value_converted;
switch (ppmt->type) {
case PMT_TYPE_RAW:
- if (pmt_counter_get_width(ppmt) <= 32)
- outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""),
- (unsigned int)c->pmt_counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->pmt_counter[i]);
-
+ outp += print_hex_value(pmt_counter_get_width(ppmt), &printed, delim, c->pmt_counter[i]);
break;
case PMT_TYPE_XTAL_TIME:
- value_converted = 100.0 * value_raw / crystal_hz / interval_float;
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
+ value_converted = pct(value_raw / crystal_hz / interval_float);
+ outp += print_float_value(&printed, delim, value_converted);
break;
case PMT_TYPE_TCORE_CLOCK:
- value_converted = 100.0 * value_raw / tcore_clock_freq_hz / interval_float;
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
+ value_converted = pct(value_raw / tcore_clock_freq_hz / interval_float);
+ outp += print_float_value(&printed, delim, value_converted);
}
}
- fmt8 = "%s%.2f";
-
if (DO_BIC(BIC_CorWatt) && platform->has_per_core_rapl)
- outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&c->core_energy, RAPL_UNIT_WATTS, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&c->core_energy, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_Cor_J) && platform->has_per_core_rapl)
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&c->core_energy, RAPL_UNIT_JOULES, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&c->core_energy, RAPL_UNIT_JOULES, interval_float));
/* print per-package data only for 1st core in package */
- if (!is_cpu_first_core_in_package(t, c, p))
+ if (!is_cpu_first_core_in_package(t, p))
goto done;
/* PkgTmp */
@@ -3166,8 +3437,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */
outp += sprintf(outp, "%s**.**", (printed++ ? delim : ""));
} else {
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
- p->gfx_rc6_ms / 10.0 / interval_float);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), p->gfx_rc6_ms / 10.0 / interval_float);
}
}
@@ -3184,8 +3454,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (p->sam_mc6_ms == -1) { /* detect GFX counter reset */
outp += sprintf(outp, "%s**.**", (printed++ ? delim : ""));
} else {
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
- p->sam_mc6_ms / 10.0 / interval_float);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), p->sam_mc6_ms / 10.0 / interval_float);
}
}
@@ -3199,150 +3468,112 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
/* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
if (DO_BIC(BIC_Totl_c0))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100 * p->pkg_wtd_core_c0 / tsc); /* can exceed 100% */
if (DO_BIC(BIC_Any_c0))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pkg_any_core_c0 / tsc));
if (DO_BIC(BIC_GFX_c0))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pkg_any_gfxe_c0 / tsc));
if (DO_BIC(BIC_CPUGFX))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pkg_both_core_gfxe_c0 / tsc));
if (DO_BIC(BIC_Pkgpc2))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pc2 / tsc));
if (DO_BIC(BIC_Pkgpc3))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pc3 / tsc));
if (DO_BIC(BIC_Pkgpc6))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pc6 / tsc));
if (DO_BIC(BIC_Pkgpc7))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pc7 / tsc));
if (DO_BIC(BIC_Pkgpc8))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pc8 / tsc));
if (DO_BIC(BIC_Pkgpc9))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pc9 / tsc));
if (DO_BIC(BIC_Pkgpc10))
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10 / tsc);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->pc10 / tsc));
if (DO_BIC(BIC_Diec6))
- outp +=
- sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->die_c6 / crystal_hz / interval_float);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->die_c6 / crystal_hz / interval_float));
if (DO_BIC(BIC_CPU_LPI)) {
if (p->cpu_lpi >= 0)
- outp +=
- sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
- 100.0 * p->cpu_lpi / 1000000.0 / interval_float);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->cpu_lpi / 1000000.0 / interval_float));
else
outp += sprintf(outp, "%s(neg)", (printed++ ? delim : ""));
}
if (DO_BIC(BIC_SYS_LPI)) {
if (p->sys_lpi >= 0)
- outp +=
- sprintf(outp, "%s%.2f", (printed++ ? delim : ""),
- 100.0 * p->sys_lpi / 1000000.0 / interval_float);
+ outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), pct(p->sys_lpi / 1000000.0 / interval_float));
else
outp += sprintf(outp, "%s(neg)", (printed++ ? delim : ""));
}
if (DO_BIC(BIC_PkgWatt))
- outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->energy_pkg, RAPL_UNIT_WATTS, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->energy_pkg, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_CorWatt) && !platform->has_per_core_rapl)
- outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->energy_cores, RAPL_UNIT_WATTS, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->energy_cores, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_GFXWatt))
- outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->energy_gfx, RAPL_UNIT_WATTS, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->energy_gfx, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_RAMWatt))
- outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->energy_dram, RAPL_UNIT_WATTS, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->energy_dram, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_Pkg_J))
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->energy_pkg, RAPL_UNIT_JOULES, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->energy_pkg, RAPL_UNIT_JOULES, interval_float));
if (DO_BIC(BIC_Cor_J) && !platform->has_per_core_rapl)
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->energy_cores, RAPL_UNIT_JOULES, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->energy_cores, RAPL_UNIT_JOULES, interval_float));
if (DO_BIC(BIC_GFX_J))
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->energy_gfx, RAPL_UNIT_JOULES, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->energy_gfx, RAPL_UNIT_JOULES, interval_float));
if (DO_BIC(BIC_RAM_J))
- outp += sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->energy_dram, RAPL_UNIT_JOULES, interval_float));
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->energy_dram, RAPL_UNIT_JOULES, interval_float));
if (DO_BIC(BIC_PKG__))
outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->rapl_pkg_perf_status, RAPL_UNIT_WATTS, interval_float));
+ sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->rapl_pkg_perf_status, RAPL_UNIT_WATTS, interval_float));
if (DO_BIC(BIC_RAM__))
outp +=
- sprintf(outp, fmt8, (printed++ ? delim : ""),
- rapl_counter_get_value(&p->rapl_dram_perf_status, RAPL_UNIT_WATTS, interval_float));
+ sprintf(outp, fmt8, (printed++ ? delim : ""), rapl_counter_get_value(&p->rapl_dram_perf_status, RAPL_UNIT_WATTS, interval_float));
/* UncMHz */
if (DO_BIC(BIC_UNCORE_MHZ))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->uncore_mhz);
+ /* Added Package Counters */
for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
- if (mp->format == FORMAT_RAW) {
- if (mp->width == 32)
- outp +=
- sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)p->counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]);
- } else if (mp->format == FORMAT_DELTA) {
- if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->counter[i]);
- else
- outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]);
- } else if (mp->format == FORMAT_PERCENT) {
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i] / tsc);
- } else if (mp->type == COUNTER_K2M)
+ if (mp->format == FORMAT_RAW)
+ outp += print_hex_value(mp->width, &printed, delim, p->counter[i]);
+ else if (mp->type == COUNTER_K2M)
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), (unsigned int)p->counter[i] / 1000);
+ else if (mp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ outp += print_decimal_value(mp->width, &printed, delim, p->counter[i]);
+ else if (mp->format == FORMAT_PERCENT)
+ outp += print_float_value(&printed, delim, pct(p->counter[i] / tsc));
}
+ /* Added perf Package Counters */
for (i = 0, pp = sys.perf_pp; pp; i++, pp = pp->next) {
- if (pp->format == FORMAT_RAW) {
- if (pp->width == 32)
- outp +=
- sprintf(outp, "%s0x%08x", (printed++ ? delim : ""),
- (unsigned int)p->perf_counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->perf_counter[i]);
- } else if (pp->format == FORMAT_DELTA) {
- if ((pp->type == COUNTER_ITEMS) && sums_need_wide_columns)
- outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->perf_counter[i]);
- else
- outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->perf_counter[i]);
- } else if (pp->format == FORMAT_PERCENT) {
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->perf_counter[i] / tsc);
- } else if (pp->type == COUNTER_K2M) {
- outp +=
- sprintf(outp, "%s%d", (printed++ ? delim : ""), (unsigned int)p->perf_counter[i] / 1000);
- }
+ if (pp->format == FORMAT_RAW)
+ outp += print_hex_value(pp->width, &printed, delim, p->perf_counter[i]);
+ else if (pp->type == COUNTER_K2M)
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), (unsigned int)p->perf_counter[i] / 1000);
+ else if (pp->format == FORMAT_DELTA || mp->format == FORMAT_AVERAGE)
+ outp += print_decimal_value(pp->width, &printed, delim, p->perf_counter[i]);
+ else if (pp->format == FORMAT_PERCENT)
+ outp += print_float_value(&printed, delim, pct(p->perf_counter[i] / tsc));
}
+ /* Added PMT Package Counters */
for (i = 0, ppmt = sys.pmt_pp; ppmt; i++, ppmt = ppmt->next) {
const unsigned long value_raw = p->pmt_counter[i];
double value_converted;
switch (ppmt->type) {
case PMT_TYPE_RAW:
- if (pmt_counter_get_width(ppmt) <= 32)
- outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""),
- (unsigned int)p->pmt_counter[i]);
- else
- outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->pmt_counter[i]);
-
+ outp += print_hex_value(pmt_counter_get_width(ppmt), &printed, delim, p->pmt_counter[i]);
break;
case PMT_TYPE_XTAL_TIME:
- value_converted = 100.0 * value_raw / crystal_hz / interval_float;
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
+ value_converted = pct(value_raw / crystal_hz / interval_float);
+ outp += print_float_value(&printed, delim, value_converted);
break;
case PMT_TYPE_TCORE_CLOCK:
- value_converted = 100.0 * value_raw / tcore_clock_freq_hz / interval_float;
- outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), value_converted);
+ value_converted = pct(value_raw / tcore_clock_freq_hz / interval_float);
+ outp += print_float_value(&printed, delim, value_converted);
}
}
@@ -3382,7 +3613,7 @@ void flush_output_stderr(void)
outp = output_buffer;
}
-void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+void format_all_counters(PER_THREAD_PARAMS)
{
static int count;
@@ -3457,8 +3688,7 @@ int delta_package(struct pkg_data *new, struct pkg_data *old)
old->energy_gfx.raw_value = new->energy_gfx.raw_value - old->energy_gfx.raw_value;
old->energy_dram.raw_value = new->energy_dram.raw_value - old->energy_dram.raw_value;
old->rapl_pkg_perf_status.raw_value = new->rapl_pkg_perf_status.raw_value - old->rapl_pkg_perf_status.raw_value;
- old->rapl_dram_perf_status.raw_value =
- new->rapl_dram_perf_status.raw_value - old->rapl_dram_perf_status.raw_value;
+ old->rapl_dram_perf_status.raw_value = new->rapl_dram_perf_status.raw_value - old->rapl_dram_perf_status.raw_value;
for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
@@ -3505,7 +3735,7 @@ void delta_core(struct core_data *new, struct core_data *old)
DELTA_WRAP32(new->core_energy.raw_value, old->core_energy.raw_value);
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
- if (mp->format == FORMAT_RAW)
+ if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE)
old->counter[i] = new->counter[i];
else
old->counter[i] = new->counter[i] - old->counter[i];
@@ -3565,8 +3795,7 @@ int delta_thread(struct thread_data *new, struct thread_data *old, struct core_d
/* check for TSC < 1 Mcycles over interval */
if (old->tsc < (1000 * 1000))
errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
- "You can disable all c-states by booting with \"idle=poll\"\n"
- "or just the deep ones with \"processor.max_cstate=1\"");
+ "You can disable all c-states by booting with \"idle=poll\"\n" "or just the deep ones with \"processor.max_cstate=1\"");
old->c1 = new->c1 - old->c1;
@@ -3595,8 +3824,7 @@ int delta_thread(struct thread_data *new, struct thread_data *old, struct core_d
old->c1 = 0;
else {
/* normal case, derive c1 */
- old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3
- - core_delta->c6 - core_delta->c7;
+ old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7;
}
}
@@ -3618,8 +3846,14 @@ int delta_thread(struct thread_data *new, struct thread_data *old, struct core_d
if (DO_BIC(BIC_SMI))
old->smi_count = new->smi_count - old->smi_count;
+ if (DO_BIC(BIC_LLC_RPS))
+ old->llc.references = new->llc.references - old->llc.references;
+
+ if (DO_BIC(BIC_LLC_HIT))
+ old->llc.misses = new->llc.misses - old->llc.misses;
+
for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
- if (mp->format == FORMAT_RAW)
+ if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE)
old->counter[i] = new->counter[i];
else
old->counter[i] = new->counter[i] - old->counter[i];
@@ -3642,20 +3876,19 @@ int delta_thread(struct thread_data *new, struct thread_data *old, struct core_d
return 0;
}
-int delta_cpu(struct thread_data *t, struct core_data *c,
- struct pkg_data *p, struct thread_data *t2, struct core_data *c2, struct pkg_data *p2)
+int delta_cpu(struct thread_data *t, struct core_data *c, struct pkg_data *p, struct thread_data *t2, struct core_data *c2, struct pkg_data *p2)
{
int retval = 0;
/* calculate core delta only for 1st thread in core */
- if (is_cpu_first_thread_in_core(t, c, p))
+ if (is_cpu_first_thread_in_core(t, c))
delta_core(c, c2);
/* always calculate thread delta */
retval = delta_thread(t, t2, c2); /* c2 is core delta */
/* calculate package delta only for 1st core in package */
- if (is_cpu_first_core_in_package(t, c, p))
+ if (is_cpu_first_core_in_package(t, p))
retval |= delta_package(p, p2);
return retval;
@@ -3673,7 +3906,7 @@ void rapl_counter_clear(struct rapl_counter *c)
c->unit = RAPL_UNIT_INVALID;
}
-void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+void clear_counters(PER_THREAD_PARAMS)
{
int i;
struct msr_counter *mp;
@@ -3696,6 +3929,9 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
t->nmi_count = 0;
t->smi_count = 0;
+ t->llc.references = 0;
+ t->llc.misses = 0;
+
c->c3 = 0;
c->c6 = 0;
c->c7 = 0;
@@ -3704,6 +3940,9 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
rapl_counter_clear(&c->core_energy);
c->core_throt_cnt = 0;
+ t->llc.references = 0;
+ t->llc.misses = 0;
+
p->pkg_wtd_core_c0 = 0;
p->pkg_any_core_c0 = 0;
p->pkg_any_gfxe_c0 = 0;
@@ -3770,7 +4009,7 @@ void rapl_counter_accumulate(struct rapl_counter *dst, const struct rapl_counter
dst->raw_value += src->raw_value;
}
-int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int sum_counters(PER_THREAD_PARAMS)
{
int i;
struct msr_counter *mp;
@@ -3801,6 +4040,9 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
average.threads.nmi_count += t->nmi_count;
average.threads.smi_count += t->smi_count;
+ average.threads.llc.references += t->llc.references;
+ average.threads.llc.misses += t->llc.misses;
+
for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
continue;
@@ -3818,7 +4060,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
}
/* sum per-core values only for 1st thread in core */
- if (!is_cpu_first_thread_in_core(t, c, p))
+ if (!is_cpu_first_thread_in_core(t, c))
return 0;
average.cores.c3 += c->c3;
@@ -3848,7 +4090,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
}
/* sum per-pkg values only for 1st core in pkg */
- if (!is_cpu_first_core_in_package(t, c, p))
+ if (!is_cpu_first_core_in_package(t, p))
return 0;
if (DO_BIC(BIC_Totl_c0))
@@ -3918,7 +4160,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
* sum the counters for all cpus in the system
* compute the weighted average
*/
-void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+void compute_average(PER_THREAD_PARAMS)
{
int i;
struct msr_counter *mp;
@@ -3943,7 +4185,6 @@ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data
if (average.threads.nmi_count > 9999999)
sums_need_wide_columns = 1;
-
average.cores.c3 /= topo.allowed_cores;
average.cores.c6 /= topo.allowed_cores;
average.cores.c7 /= topo.allowed_cores;
@@ -4115,8 +4356,7 @@ unsigned long long get_legacy_uncore_mhz(int package)
*/
for (die = 0; die <= topo.max_die_id; ++die) {
- sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d/current_freq_khz",
- package, die);
+ sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d/current_freq_khz", package, die);
if (access(path, R_OK) == 0)
return (snapshot_sysfs_counter(path) / 1000);
@@ -4227,11 +4467,6 @@ int get_core_throt_cnt(int cpu, unsigned long long *cnt)
return 0;
}
-struct amperf_group_fd {
- int aperf; /* Also the group descriptor */
- int mperf;
-};
-
static int read_perf_counter_info(const char *const path, const char *const parse_format, void *value_ptr)
{
int fdmt;
@@ -4431,8 +4666,7 @@ int get_rapl_counters(int cpu, unsigned int domain, struct core_data *c, struct
const ssize_t actual_read_size = read(rci->fd_perf, &perf_data[0], sizeof(perf_data));
if (actual_read_size != expected_read_size)
- err(-1, "%s: failed to read perf_data (%zu %zu)", __func__, expected_read_size,
- actual_read_size);
+ err(-1, "%s: failed to read perf_data (%zu %zu)", __func__, expected_read_size, actual_read_size);
}
for (unsigned int i = 0, pi = 1; i < NUM_RAPL_COUNTERS; ++i) {
@@ -4502,7 +4736,7 @@ char *find_sysfs_path_by_id(struct sysfs_path *sp, int id)
return NULL;
}
-int get_cstate_counters(unsigned int cpu, struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int get_cstate_counters(unsigned int cpu, PER_THREAD_PARAMS)
{
/*
* Overcommit memory a little bit here,
@@ -4670,8 +4904,7 @@ int get_smi_aperf_mperf(unsigned int cpu, struct thread_data *t)
const ssize_t actual_read_size = read(mci->fd_perf, &perf_data[0], sizeof(perf_data));
if (actual_read_size != expected_read_size)
- err(-1, "%s: failed to read perf_data (%zu %zu)", __func__, expected_read_size,
- actual_read_size);
+ err(-1, "%s: failed to read perf_data (%zu %zu)", __func__, expected_read_size, actual_read_size);
}
for (unsigned int i = 0, pi = 1; i < NUM_MSR_COUNTERS; ++i) {
@@ -4766,12 +4999,43 @@ unsigned long pmt_read_counter(struct pmt_counter *ppmt, unsigned int domain_id)
return (value & value_mask) >> value_shift;
}
+/* Rapl domain enumeration helpers */
+static inline int get_rapl_num_domains(void)
+{
+ int num_packages = topo.max_package_id + 1;
+ int num_cores_per_package;
+ int num_cores;
+
+ if (!platform->has_per_core_rapl)
+ return num_packages;
+
+ num_cores_per_package = topo.max_core_id + 1;
+ num_cores = num_cores_per_package * num_packages;
+
+ return num_cores;
+}
+
+static inline int get_rapl_domain_id(int cpu)
+{
+ int nr_cores_per_package = topo.max_core_id + 1;
+ int rapl_core_id;
+
+ if (!platform->has_per_core_rapl)
+ return cpus[cpu].physical_package_id;
+
+ /* Compute the system-wide unique core-id for @cpu */
+ rapl_core_id = cpus[cpu].physical_core_id;
+ rapl_core_id += cpus[cpu].physical_package_id * nr_cores_per_package;
+
+ return rapl_core_id;
+}
+
/*
* get_counters(...)
* migrate to cpu
* acquire and record local counters for that cpu
*/
-int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int get_counters(PER_THREAD_PARAMS)
{
int cpu = t->cpu_id;
unsigned long long msr;
@@ -4794,6 +5058,9 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
get_smi_aperf_mperf(cpu, t);
+ if (DO_BIC(BIC_LLC_RPS) || DO_BIC(BIC_LLC_HIT))
+ get_perf_llc_stats(cpu, &t->llc);
+
if (DO_BIC(BIC_IPC))
if (read(get_instr_count_fd(cpu), &t->instr_count, sizeof(long long)) != sizeof(long long))
return -4;
@@ -4817,11 +5084,11 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
t->pmt_counter[i] = pmt_read_counter(pp, t->cpu_id);
/* collect core counters only for 1st thread in core */
- if (!is_cpu_first_thread_in_core(t, c, p))
+ if (!is_cpu_first_thread_in_core(t, c))
goto done;
if (platform->has_per_core_rapl) {
- status = get_rapl_counters(cpu, c->core_id, c, p);
+ status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p);
if (status != 0)
return status;
}
@@ -4861,7 +5128,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
c->pmt_counter[i] = pmt_read_counter(pp, c->core_id);
/* collect package counters only for 1st core in package */
- if (!is_cpu_first_core_in_package(t, c, p))
+ if (!is_cpu_first_core_in_package(t, p))
goto done;
if (DO_BIC(BIC_Totl_c0)) {
@@ -4887,7 +5154,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
p->sys_lpi = cpuidle_cur_sys_lpi_us;
if (!platform->has_per_core_rapl) {
- status = get_rapl_counters(cpu, p->package_id, c, p);
+ status = get_rapl_counters(cpu, get_rapl_domain_id(cpu), c, p);
if (status != 0)
return status;
}
@@ -4950,48 +5217,39 @@ char *pkg_cstate_limit_strings[] = { "unknown", "reserved", "pc0", "pc1", "pc2",
"pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "pc10", "unlimited"
};
-int nhm_pkg_cstate_limits[16] =
- { PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int nhm_pkg_cstate_limits[16] = { PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCLRSV, PCLRSV
};
-int snb_pkg_cstate_limits[16] =
- { PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int snb_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCLRSV, PCLRSV
};
-int hsw_pkg_cstate_limits[16] =
- { PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int hsw_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCLRSV, PCLRSV
};
-int slv_pkg_cstate_limits[16] =
- { PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int slv_pkg_cstate_limits[16] = { PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCL__6, PCL__7
};
-int amt_pkg_cstate_limits[16] =
- { PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int amt_pkg_cstate_limits[16] = { PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCLRSV, PCLRSV
};
-int phi_pkg_cstate_limits[16] =
- { PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int phi_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCLRSV, PCLRSV
};
-int glm_pkg_cstate_limits[16] =
- { PCLUNL, PCL__1, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCL_10, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int glm_pkg_cstate_limits[16] = { PCLUNL, PCL__1, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCL_10, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCLRSV, PCLRSV
};
-int skx_pkg_cstate_limits[16] =
- { PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int skx_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCLRSV, PCLRSV
};
-int icx_pkg_cstate_limits[16] =
- { PCL__0, PCL__2, PCL__6, PCL__6, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
+int icx_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL__6, PCL__6, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV,
PCLRSV, PCLRSV
};
@@ -5066,8 +5324,7 @@ static void dump_power_ctl(void)
return;
get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
- fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
- base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
+ fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
/* C-state Pre-wake Disable (CSTATE_PREWAKE_DISABLE) */
if (platform->has_cst_prewake_bit)
@@ -5160,8 +5417,7 @@ static void dump_turbo_ratio_limits(int trl_msr_offset)
ratio = (msr >> shift) & 0xFF;
group_size = (core_counts >> shift) & 0xFF;
if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
+ fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", ratio, bclk, ratio * bclk, group_size);
}
return;
@@ -5259,9 +5515,7 @@ static void dump_knl_turbo_ratio_limits(void)
for (i = buckets_no - 1; i >= 0; i--)
if (i > 0 ? ratio[i] != ratio[i - 1] : 1)
- fprintf(outf,
- "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio[i], bclk, ratio[i] * bclk, cores[i]);
+ fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", ratio[i], bclk, ratio[i] * bclk, cores[i]);
}
static void dump_cst_cfg(void)
@@ -5346,43 +5600,37 @@ void print_irtl(void)
if (platform->supported_cstates & PC3) {
get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
- fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
- (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+ fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
}
if (platform->supported_cstates & PC6) {
get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
- fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
- (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+ fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
}
if (platform->supported_cstates & PC7) {
get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
- fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
- (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+ fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
}
if (platform->supported_cstates & PC8) {
get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
- fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
- (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+ fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
}
if (platform->supported_cstates & PC9) {
get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
- fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
- (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+ fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
}
if (platform->supported_cstates & PC10) {
get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
- fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
- (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+ fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
}
}
@@ -5416,6 +5664,20 @@ void free_fd_instr_count_percpu(void)
fd_instr_count_percpu = NULL;
}
+void free_fd_llc_percpu(void)
+{
+ if (!fd_llc_percpu)
+ return;
+
+ for (int i = 0; i < topo.max_cpu_num + 1; ++i) {
+ if (fd_llc_percpu[i] != 0)
+ close(fd_llc_percpu[i]);
+ }
+
+ free(fd_llc_percpu);
+ fd_llc_percpu = NULL;
+}
+
void free_fd_cstate(void)
{
if (!ccstate_counter_info)
@@ -5540,6 +5802,7 @@ void free_all_buffers(void)
free_fd_percpu();
free_fd_instr_count_percpu();
+ free_fd_llc_percpu();
free_fd_msr();
free_fd_rapl_percpu();
free_fd_cstate();
@@ -5599,6 +5862,11 @@ int get_die_id(int cpu)
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
}
+int get_l3_id(int cpu)
+{
+ return parse_int_file("/sys/devices/system/cpu/cpu%d/cache/index3/id", cpu);
+}
+
int get_core_id(int cpu)
{
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -5787,7 +6055,6 @@ int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *,
for (thread_no = 0; thread_no < topo.threads_per_core; ++thread_no) {
struct thread_data *t, *t2;
struct core_data *c, *c2;
- struct pkg_data *p, *p2;
t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no);
@@ -5799,10 +6066,7 @@ int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *,
c = GET_CORE(core_base, core_no, node_no, pkg_no);
c2 = GET_CORE(core_base2, core_no, node_no, pkg_no);
- p = GET_PKG(pkg_base, pkg_no);
- p2 = GET_PKG(pkg_base2, pkg_no);
-
- retval |= func(t, c, p, t2, c2, p2);
+ retval |= func(t, c, &pkg_base[pkg_no], t2, c2, &pkg_base2[pkg_no]);
}
}
}
@@ -5885,6 +6149,7 @@ void linux_perf_init(void);
void msr_perf_init(void);
void rapl_perf_init(void);
void cstate_perf_init(void);
+void perf_llc_init(void);
void added_perf_counters_init(void);
void pmt_init(void);
@@ -5896,10 +6161,10 @@ void re_initialize(void)
msr_perf_init();
rapl_perf_init();
cstate_perf_init();
+ perf_llc_init();
added_perf_counters_init();
pmt_init();
- fprintf(outf, "turbostat: re-initialized with num_cpus %d, allowed_cpus %d\n", topo.num_cpus,
- topo.allowed_cpus);
+ fprintf(outf, "turbostat: re-initialized with num_cpus %d, allowed_cpus %d\n", topo.num_cpus, topo.allowed_cpus);
}
void set_max_cpu_num(void)
@@ -6260,7 +6525,7 @@ int get_msr_sum(int cpu, off_t offset, unsigned long long *msr)
timer_t timerid;
/* Timer callback, update the sum of MSRs periodically. */
-static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+static int update_msr_sum(PER_THREAD_PARAMS)
{
int i, ret;
int cpu = t->cpu_id;
@@ -6345,6 +6610,7 @@ release_timer:
timer_delete(timerid);
release_msr:
free(per_cpu_msr_sum);
+ per_cpu_msr_sum = NULL;
}
/*
@@ -6469,18 +6735,43 @@ restart:
}
}
-void check_dev_msr()
+int probe_dev_msr(void)
{
struct stat sb;
char pathname[32];
- if (no_msr)
- return;
+ sprintf(pathname, "/dev/msr%d", base_cpu);
+ return !stat(pathname, &sb);
+}
+
+int probe_dev_cpu_msr(void)
+{
+ struct stat sb;
+ char pathname[32];
sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
- if (stat(pathname, &sb))
- if (system("/sbin/modprobe msr > /dev/null 2>&1"))
- no_msr = 1;
+ return !stat(pathname, &sb);
+}
+
+int probe_msr_driver(void)
+{
+ if (probe_dev_msr()) {
+ use_android_msr_path = 1;
+ return 1;
+ }
+ return probe_dev_cpu_msr();
+}
+
+void check_msr_driver(void)
+{
+ if (probe_msr_driver())
+ return;
+
+ if (system("/sbin/modprobe msr > /dev/null 2>&1"))
+ no_msr = 1;
+
+ if (!probe_msr_driver())
+ no_msr = 1;
}
/*
@@ -6495,8 +6786,16 @@ int check_for_cap_sys_rawio(void)
int ret = 0;
caps = cap_get_proc();
- if (caps == NULL)
+ if (caps == NULL) {
+ /*
+ * CONFIG_MULTIUSER=n kernels have no cap_get_proc()
+ * Allow them to continue and attempt to access MSRs
+ */
+ if (errno == ENOSYS)
+ return 0;
+
return 1;
+ }
if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value)) {
ret = 1;
@@ -6527,7 +6826,7 @@ void check_msr_permission(void)
failed += check_for_cap_sys_rawio();
/* test file permissions */
- sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+ sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", base_cpu);
if (euidaccess(pathname, R_OK)) {
failed++;
}
@@ -6656,10 +6955,10 @@ static void probe_intel_uncore_frequency_legacy(void)
int k, l;
char path_base[128];
- sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d", i,
- j);
+ sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d", i, j);
- if (access(path_base, R_OK))
+ sprintf(path, "%s/current_freq_khz", path_base);
+ if (access(path, R_OK))
continue;
BIC_PRESENT(BIC_UNCORE_MHZ);
@@ -6737,8 +7036,9 @@ static void probe_intel_uncore_frequency_cluster(void)
* This allows "--show/--hide UncMHz" to be effective for
* the clustered MHz counters, as a group.
*/
- if BIC_IS_ENABLED(BIC_UNCORE_MHZ)
- add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
+ if BIC_IS_ENABLED
+ (BIC_UNCORE_MHZ)
+ add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
if (quiet)
continue;
@@ -6747,8 +7047,7 @@ static void probe_intel_uncore_frequency_cluster(void)
k = read_sysfs_int(path);
sprintf(path, "%s/max_freq_khz", path_base);
l = read_sysfs_int(path);
- fprintf(outf, "Uncore Frequency package%d domain%d cluster%d: %d - %d MHz ", package_id, domain_id,
- cluster_id, k / 1000, l / 1000);
+ fprintf(outf, "Uncore Frequency package%d domain%d cluster%d: %d - %d MHz ", package_id, domain_id, cluster_id, k / 1000, l / 1000);
sprintf(path, "%s/initial_min_freq_khz", path_base);
k = read_sysfs_int(path);
@@ -6985,7 +7284,7 @@ static void dump_sysfs_pstate_config(void)
* print_epb()
* Decode the ENERGY_PERF_BIAS MSR
*/
-int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int print_epb(PER_THREAD_PARAMS)
{
char *epb_string;
int cpu, epb;
@@ -7034,7 +7333,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
* print_hwp()
* Decode the MSR_HWP_CAPABILITIES
*/
-int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int print_hwp(PER_THREAD_PARAMS)
{
unsigned long long msr;
int cpu;
@@ -7075,8 +7374,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
"(high %d guar %d eff %d low %d)\n",
cpu, msr,
(unsigned int)HWP_HIGHEST_PERF(msr),
- (unsigned int)HWP_GUARANTEED_PERF(msr),
- (unsigned int)HWP_MOSTEFFICIENT_PERF(msr), (unsigned int)HWP_LOWEST_PERF(msr));
+ (unsigned int)HWP_GUARANTEED_PERF(msr), (unsigned int)HWP_MOSTEFFICIENT_PERF(msr), (unsigned int)HWP_LOWEST_PERF(msr));
if (get_msr(cpu, MSR_HWP_REQUEST, &msr))
return 0;
@@ -7087,8 +7385,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
(unsigned int)(((msr) >> 0) & 0xff),
(unsigned int)(((msr) >> 8) & 0xff),
(unsigned int)(((msr) >> 16) & 0xff),
- (unsigned int)(((msr) >> 24) & 0xff),
- (unsigned int)(((msr) >> 32) & 0xff3), (unsigned int)(((msr) >> 42) & 0x1));
+ (unsigned int)(((msr) >> 24) & 0xff), (unsigned int)(((msr) >> 32) & 0xff3), (unsigned int)(((msr) >> 42) & 0x1));
if (has_hwp_pkg) {
if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr))
@@ -7099,23 +7396,20 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
cpu, msr,
(unsigned int)(((msr) >> 0) & 0xff),
(unsigned int)(((msr) >> 8) & 0xff),
- (unsigned int)(((msr) >> 16) & 0xff),
- (unsigned int)(((msr) >> 24) & 0xff), (unsigned int)(((msr) >> 32) & 0xff3));
+ (unsigned int)(((msr) >> 16) & 0xff), (unsigned int)(((msr) >> 24) & 0xff), (unsigned int)(((msr) >> 32) & 0xff3));
}
if (has_hwp_notify) {
if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr))
return 0;
fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx "
- "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n",
- cpu, msr, ((msr) & 0x1) ? "EN" : "Dis", ((msr) & 0x2) ? "EN" : "Dis");
+ "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n", cpu, msr, ((msr) & 0x1) ? "EN" : "Dis", ((msr) & 0x2) ? "EN" : "Dis");
}
if (get_msr(cpu, MSR_HWP_STATUS, &msr))
return 0;
fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
- "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
- cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-");
+ "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-");
return 0;
}
@@ -7123,7 +7417,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
/*
* print_perf_limit()
*/
-int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int print_perf_limit(PER_THREAD_PARAMS)
{
unsigned long long msr;
int cpu;
@@ -7160,8 +7454,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
(msr & 1 << 6) ? "VR-Therm, " : "",
(msr & 1 << 5) ? "Auto-HWP, " : "",
(msr & 1 << 4) ? "Graphics, " : "",
- (msr & 1 << 2) ? "bit2, " : "",
- (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 0) ? "PROCHOT, " : "");
+ (msr & 1 << 2) ? "bit2, " : "", (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 0) ? "PROCHOT, " : "");
fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
(msr & 1 << 31) ? "bit31, " : "",
(msr & 1 << 30) ? "bit30, " : "",
@@ -7174,8 +7467,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
(msr & 1 << 22) ? "VR-Therm, " : "",
(msr & 1 << 21) ? "Auto-HWP, " : "",
(msr & 1 << 20) ? "Graphics, " : "",
- (msr & 1 << 18) ? "bit18, " : "",
- (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 16) ? "PROCHOT, " : "");
+ (msr & 1 << 18) ? "bit18, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 16) ? "PROCHOT, " : "");
}
if (platform->plr_msrs & PLR_GFX) {
@@ -7187,16 +7479,14 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
(msr & 1 << 4) ? "Graphics, " : "",
(msr & 1 << 6) ? "VR-Therm, " : "",
(msr & 1 << 8) ? "Amps, " : "",
- (msr & 1 << 9) ? "GFXPwr, " : "",
- (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : "");
+ (msr & 1 << 9) ? "GFXPwr, " : "", (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : "");
fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n",
(msr & 1 << 16) ? "PROCHOT, " : "",
(msr & 1 << 17) ? "ThermStatus, " : "",
(msr & 1 << 20) ? "Graphics, " : "",
(msr & 1 << 22) ? "VR-Therm, " : "",
(msr & 1 << 24) ? "Amps, " : "",
- (msr & 1 << 25) ? "GFXPwr, " : "",
- (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : "");
+ (msr & 1 << 25) ? "GFXPwr, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : "");
}
if (platform->plr_msrs & PLR_RING) {
get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
@@ -7205,14 +7495,12 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
(msr & 1 << 0) ? "PROCHOT, " : "",
(msr & 1 << 1) ? "ThermStatus, " : "",
(msr & 1 << 6) ? "VR-Therm, " : "",
- (msr & 1 << 8) ? "Amps, " : "",
- (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : "");
+ (msr & 1 << 8) ? "Amps, " : "", (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : "");
fprintf(outf, " (Logged: %s%s%s%s%s%s)\n",
(msr & 1 << 16) ? "PROCHOT, " : "",
(msr & 1 << 17) ? "ThermStatus, " : "",
(msr & 1 << 22) ? "VR-Therm, " : "",
- (msr & 1 << 24) ? "Amps, " : "",
- (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : "");
+ (msr & 1 << 24) ? "Amps, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : "");
}
return 0;
}
@@ -7232,7 +7520,7 @@ double get_tdp_intel(void)
{
unsigned long long msr;
- if (platform->rapl_msrs & RAPL_PKG_POWER_INFO)
+ if (valid_rapl_msrs & RAPL_PKG_POWER_INFO)
if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
return get_quirk_tdp();
@@ -7248,18 +7536,28 @@ void rapl_probe_intel(void)
unsigned long long msr;
unsigned int time_unit;
double tdp;
- const unsigned long long bic_watt_bits = BIC_SysWatt | BIC_PkgWatt | BIC_CorWatt | BIC_RAMWatt | BIC_GFXWatt;
- const unsigned long long bic_joules_bits = BIC_Sys_J | BIC_Pkg_J | BIC_Cor_J | BIC_RAM_J | BIC_GFX_J;
- if (rapl_joules)
- bic_enabled &= ~bic_watt_bits;
- else
- bic_enabled &= ~bic_joules_bits;
+ if (rapl_joules) {
+ CLR_BIC(BIC_SysWatt, &bic_enabled);
+ CLR_BIC(BIC_PkgWatt, &bic_enabled);
+ CLR_BIC(BIC_CorWatt, &bic_enabled);
+ CLR_BIC(BIC_RAMWatt, &bic_enabled);
+ CLR_BIC(BIC_GFXWatt, &bic_enabled);
+ } else {
+ CLR_BIC(BIC_Sys_J, &bic_enabled);
+ CLR_BIC(BIC_Pkg_J, &bic_enabled);
+ CLR_BIC(BIC_Cor_J, &bic_enabled);
+ CLR_BIC(BIC_RAM_J, &bic_enabled);
+ CLR_BIC(BIC_GFX_J, &bic_enabled);
+ }
- if (!(platform->rapl_msrs & RAPL_PKG_PERF_STATUS))
- bic_enabled &= ~BIC_PKG__;
- if (!(platform->rapl_msrs & RAPL_DRAM_PERF_STATUS))
- bic_enabled &= ~BIC_RAM__;
+ if (!valid_rapl_msrs || no_msr)
+ return;
+
+ if (!(valid_rapl_msrs & RAPL_PKG_PERF_STATUS))
+ CLR_BIC(BIC_PKG__, &bic_enabled);
+ if (!(valid_rapl_msrs & RAPL_DRAM_PERF_STATUS))
+ CLR_BIC(BIC_RAM__, &bic_enabled);
/* units on package 0, verify later other packages match */
if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
@@ -7298,13 +7596,17 @@ void rapl_probe_amd(void)
{
unsigned long long msr;
double tdp;
- const unsigned long long bic_watt_bits = BIC_PkgWatt | BIC_CorWatt;
- const unsigned long long bic_joules_bits = BIC_Pkg_J | BIC_Cor_J;
- if (rapl_joules)
- bic_enabled &= ~bic_watt_bits;
- else
- bic_enabled &= ~bic_joules_bits;
+ if (rapl_joules) {
+ CLR_BIC(BIC_SysWatt, &bic_enabled);
+ CLR_BIC(BIC_CorWatt, &bic_enabled);
+ } else {
+ CLR_BIC(BIC_Pkg_J, &bic_enabled);
+ CLR_BIC(BIC_Cor_J, &bic_enabled);
+ }
+
+ if (!valid_rapl_msrs || no_msr)
+ return;
if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
return;
@@ -7326,13 +7628,164 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
cpu, label,
((msr >> 15) & 1) ? "EN" : "DIS",
((msr >> 0) & 0x7FFF) * rapl_power_units,
- (1.0 + (((msr >> 22) & 0x3) / 4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
- (((msr >> 16) & 1) ? "EN" : "DIS"));
+ (1.0 + (((msr >> 22) & 0x3) / 4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, (((msr >> 16) & 1) ? "EN" : "DIS"));
return;
}
-int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+static int fread_int(char *path, int *val)
+{
+ FILE *filep;
+ int ret;
+
+ filep = fopen(path, "r");
+ if (!filep)
+ return -1;
+
+ ret = fscanf(filep, "%d", val);
+ fclose(filep);
+ return ret;
+}
+
+static int fread_ull(char *path, unsigned long long *val)
+{
+ FILE *filep;
+ int ret;
+
+ filep = fopen(path, "r");
+ if (!filep)
+ return -1;
+
+ ret = fscanf(filep, "%llu", val);
+ fclose(filep);
+ return ret;
+}
+
+static int fread_str(char *path, char *buf, int size)
+{
+ FILE *filep;
+ int ret;
+ char *cp;
+
+ filep = fopen(path, "r");
+ if (!filep)
+ return -1;
+
+ ret = fread(buf, 1, size, filep);
+ fclose(filep);
+
+ /* replace '\n' with '\0' */
+ cp = strchr(buf, '\n');
+ if (cp != NULL)
+ *cp = '\0';
+
+ return ret;
+}
+
+#define PATH_RAPL_SYSFS "/sys/class/powercap"
+
+static int dump_one_domain(char *domain_path)
+{
+ char path[PATH_MAX];
+ char str[PATH_MAX];
+ unsigned long long val;
+ int constraint;
+ int enable;
+ int ret;
+
+ snprintf(path, PATH_MAX, "%s/name", domain_path);
+ ret = fread_str(path, str, PATH_MAX);
+ if (ret <= 0)
+ return -1;
+
+ fprintf(outf, "%s: %s", domain_path + strlen(PATH_RAPL_SYSFS) + 1, str);
+
+ snprintf(path, PATH_MAX, "%s/enabled", domain_path);
+ ret = fread_int(path, &enable);
+ if (ret <= 0)
+ return -1;
+
+ if (!enable) {
+ fputs(" disabled\n", outf);
+ return 0;
+ }
+
+ for (constraint = 0;; constraint++) {
+ snprintf(path, PATH_MAX, "%s/constraint_%d_time_window_us", domain_path, constraint);
+ ret = fread_ull(path, &val);
+ if (ret <= 0)
+ break;
+
+ if (val > 1000000)
+ fprintf(outf, " %0.1fs", (double)val / 1000000);
+ else if (val > 1000)
+ fprintf(outf, " %0.1fms", (double)val / 1000);
+ else
+ fprintf(outf, " %0.1fus", (double)val);
+
+ snprintf(path, PATH_MAX, "%s/constraint_%d_power_limit_uw", domain_path, constraint);
+ ret = fread_ull(path, &val);
+ if (ret > 0 && val)
+ fprintf(outf, ":%lluW", val / 1000000);
+
+ snprintf(path, PATH_MAX, "%s/constraint_%d_max_power_uw", domain_path, constraint);
+ ret = fread_ull(path, &val);
+ if (ret > 0 && val)
+ fprintf(outf, ",max:%lluW", val / 1000000);
+ }
+ fputc('\n', outf);
+
+ return 0;
+}
+
+static int print_rapl_sysfs(void)
+{
+ DIR *dir, *cdir;
+ struct dirent *entry, *centry;
+ char path[PATH_MAX];
+ char str[PATH_MAX];
+
+ if ((dir = opendir(PATH_RAPL_SYSFS)) == NULL) {
+ warn("open %s failed", PATH_RAPL_SYSFS);
+ return 1;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ if (strlen(entry->d_name) > 100)
+ continue;
+
+ if (strncmp(entry->d_name, "intel-rapl", strlen("intel-rapl")))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/%s/name", PATH_RAPL_SYSFS, entry->d_name);
+
+ /* Parse top level domains first, including package and psys */
+ fread_str(path, str, PATH_MAX);
+ if (strncmp(str, "package", strlen("package")) && strncmp(str, "psys", strlen("psys")))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/%s", PATH_RAPL_SYSFS, entry->d_name);
+ if ((cdir = opendir(path)) == NULL) {
+ perror("opendir() error");
+ return 1;
+ }
+
+ dump_one_domain(path);
+
+ while ((centry = readdir(cdir)) != NULL) {
+ if (strncmp(centry->d_name, "intel-rapl", strlen("intel-rapl")))
+ continue;
+ snprintf(path, PATH_MAX, "%s/%s/%s", PATH_RAPL_SYSFS, entry->d_name, centry->d_name);
+ dump_one_domain(path);
+ }
+ closedir(cdir);
+ }
+
+ closedir(dir);
+ return 0;
+}
+
+int print_rapl(PER_THREAD_PARAMS)
{
unsigned long long msr;
const char *msr_name;
@@ -7341,7 +7794,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
UNUSED(c);
UNUSED(p);
- if (!platform->rapl_msrs)
+ if (!valid_rapl_msrs)
return 0;
/* RAPL counters are per package, so print only for 1st thread/package */
@@ -7354,7 +7807,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return -1;
}
- if (platform->rapl_msrs & RAPL_AMD_F17H) {
+ if (valid_rapl_msrs & RAPL_AMD_F17H) {
msr_name = "MSR_RAPL_PWR_UNIT";
if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
return -1;
@@ -7367,7 +7820,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
rapl_power_units, rapl_energy_units, rapl_time_units);
- if (platform->rapl_msrs & RAPL_PKG_POWER_INFO) {
+ if (valid_rapl_msrs & RAPL_PKG_POWER_INFO) {
if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
return -5;
@@ -7376,25 +7829,22 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
cpu, msr,
((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
- ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
- ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
+ ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
}
- if (platform->rapl_msrs & RAPL_PKG) {
+ if (valid_rapl_msrs & RAPL_PKG) {
if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
return -9;
- fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
- cpu, msr, (msr >> 63) & 1 ? "" : "UN");
+ fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 63) & 1 ? "" : "UN");
print_power_limit_msr(cpu, msr, "PKG Limit #1");
fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%0.3f Watts, %f* sec, clamp %sabled)\n",
cpu,
((msr >> 47) & 1) ? "EN" : "DIS",
((msr >> 32) & 0x7FFF) * rapl_power_units,
- (1.0 + (((msr >> 54) & 0x3) / 4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
- ((msr >> 48) & 1) ? "EN" : "DIS");
+ (1.0 + (((msr >> 54) & 0x3) / 4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, ((msr >> 48) & 1) ? "EN" : "DIS");
if (get_msr(cpu, MSR_VR_CURRENT_CONFIG, &msr))
return -9;
@@ -7404,7 +7854,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
cpu, ((msr >> 0) & 0x1FFF) * rapl_power_units, (msr >> 31) & 1 ? "" : "UN");
}
- if (platform->rapl_msrs & RAPL_DRAM_POWER_INFO) {
+ if (valid_rapl_msrs & RAPL_DRAM_POWER_INFO) {
if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
return -6;
@@ -7412,31 +7862,28 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
cpu, msr,
((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
- ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
- ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
+ ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
}
- if (platform->rapl_msrs & RAPL_DRAM) {
+ if (valid_rapl_msrs & RAPL_DRAM) {
if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
return -9;
- fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
- cpu, msr, (msr >> 31) & 1 ? "" : "UN");
+ fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN");
print_power_limit_msr(cpu, msr, "DRAM Limit");
}
- if (platform->rapl_msrs & RAPL_CORE_POLICY) {
+ if (valid_rapl_msrs & RAPL_CORE_POLICY) {
if (get_msr(cpu, MSR_PP0_POLICY, &msr))
return -7;
fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
}
- if (platform->rapl_msrs & RAPL_CORE_POWER_LIMIT) {
+ if (valid_rapl_msrs & RAPL_CORE_POWER_LIMIT) {
if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
return -9;
- fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
- cpu, msr, (msr >> 31) & 1 ? "" : "UN");
+ fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN");
print_power_limit_msr(cpu, msr, "Cores Limit");
}
- if (platform->rapl_msrs & RAPL_GFX) {
+ if (valid_rapl_msrs & RAPL_GFX) {
if (get_msr(cpu, MSR_PP1_POLICY, &msr))
return -8;
@@ -7444,22 +7891,57 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
return -9;
- fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
- cpu, msr, (msr >> 31) & 1 ? "" : "UN");
+ fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN");
print_power_limit_msr(cpu, msr, "GFX Limit");
}
return 0;
}
/*
+ * probe_rapl_msrs
+ *
+ * initialize global valid_rapl_msrs to platform->plat_rapl_msrs
+ * only if PKG_ENERGY counter is enumerated and reads non-zero
+ */
+void probe_rapl_msrs(void)
+{
+ int ret;
+ off_t offset;
+ unsigned long long msr_value;
+
+ if (no_msr)
+ return;
+
+ if ((platform->plat_rapl_msrs & (RAPL_PKG | RAPL_AMD_F17H)) == 0)
+ return;
+
+ offset = idx_to_offset(IDX_PKG_ENERGY);
+ if (offset < 0)
+ return;
+
+ ret = get_msr(base_cpu, offset, &msr_value);
+ if (ret) {
+ if (debug)
+ fprintf(outf, "Can not read RAPL_PKG_ENERGY MSR(0x%llx)\n", (unsigned long long)offset);
+ return;
+ }
+ if (msr_value == 0) {
+ if (debug)
+ fprintf(outf, "RAPL_PKG_ENERGY MSR(0x%llx) == ZERO: disabling all RAPL MSRs\n", (unsigned long long)offset);
+ return;
+ }
+
+ valid_rapl_msrs = platform->plat_rapl_msrs; /* success */
+}
+
+/*
* probe_rapl()
*
* sets rapl_power_units, rapl_energy_units, rapl_time_units
*/
void probe_rapl(void)
{
- if (!platform->rapl_msrs || no_msr)
- return;
+ probe_rapl_msrs();
if (genuine_intel)
rapl_probe_intel();
@@ -7469,6 +7951,11 @@ void probe_rapl(void)
if (quiet)
return;
+ print_rapl_sysfs();
+
+ if (!valid_rapl_msrs || no_msr)
+ return;
+
for_all_cpus(print_rapl, ODD_COUNTERS);
}
@@ -7484,7 +7971,7 @@ void probe_rapl(void)
* below this value, including the Digital Thermal Sensor (DTS),
* Package Thermal Management Sensor (PTM), and thermal event thresholds.
*/
-int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int set_temperature_target(PER_THREAD_PARAMS)
{
unsigned long long msr;
unsigned int tcc_default, tcc_offset;
@@ -7552,7 +8039,7 @@ guess:
return 0;
}
-int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int print_thermal(PER_THREAD_PARAMS)
{
unsigned long long msr;
unsigned int dts, dts2;
@@ -7570,7 +8057,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
cpu = t->cpu_id;
/* DTS is per-core, no need to print for each thread */
- if (!is_cpu_first_thread_in_core(t, c, p))
+ if (!is_cpu_first_thread_in_core(t, c))
return 0;
if (cpu_migrate(cpu)) {
@@ -7578,7 +8065,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
return -1;
}
- if (do_ptm && is_cpu_first_core_in_package(t, c, p)) {
+ if (do_ptm && is_cpu_first_core_in_package(t, p)) {
if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
return 0;
@@ -7590,8 +8077,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
dts = (msr >> 16) & 0x7F;
dts2 = (msr >> 8) & 0x7F;
- fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
- cpu, msr, tj_max - dts, tj_max - dts2);
+ fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tj_max - dts, tj_max - dts2);
}
if (do_dts && debug) {
@@ -7602,16 +8088,14 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
dts = (msr >> 16) & 0x7F;
resolution = (msr >> 27) & 0xF;
- fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
- cpu, msr, tj_max - dts, resolution);
+ fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", cpu, msr, tj_max - dts, resolution);
if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
return 0;
dts = (msr >> 16) & 0x7F;
dts2 = (msr >> 8) & 0x7F;
- fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
- cpu, msr, tj_max - dts, tj_max - dts2);
+ fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tj_max - dts, tj_max - dts2);
}
return 0;
@@ -7632,7 +8116,7 @@ void probe_thermal(void)
for_all_cpus(print_thermal, ODD_COUNTERS);
}
-int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int get_cpu_type(PER_THREAD_PARAMS)
{
unsigned int eax, ebx, ecx, edx;
@@ -7685,8 +8169,7 @@ void decode_misc_enable_msr(void)
msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-",
msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-",
msr & MSR_IA32_MISC_ENABLE_MWAIT ? "" : "No-",
- msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "",
- msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : "");
+ msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "", msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : "");
}
void decode_misc_feature_control(void)
@@ -7725,8 +8208,7 @@ void decode_misc_pwr_mgmt_msr(void)
if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr))
fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n",
- base_cpu, msr,
- msr & (1 << 0) ? "DIS" : "EN", msr & (1 << 1) ? "EN" : "DIS", msr & (1 << 8) ? "EN" : "DIS");
+ base_cpu, msr, msr & (1 << 0) ? "DIS" : "EN", msr & (1 << 1) ? "EN" : "DIS", msr & (1 << 8) ? "EN" : "DIS");
}
/*
@@ -7779,66 +8261,60 @@ void print_dev_latency(void)
close(fd);
}
-static int has_instr_count_access(void)
+static int has_perf_instr_count_access(void)
{
int fd;
- int has_access;
if (no_perf)
return 0;
fd = open_perf_counter(base_cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS, -1, 0);
- has_access = fd != -1;
-
if (fd != -1)
close(fd);
- if (!has_access)
+ if (fd == -1)
warnx("Failed to access %s. Some of the counters may not be available\n"
- "\tRun as root to enable them or use %s to disable the access explicitly",
- "instructions retired perf counter", "--no-perf");
+ "\tRun as root to enable them or use %s to disable the access explicitly", "perf instructions retired counter",
+ "'--hide IPC' or '--no-perf'");
- return has_access;
+ return (fd != -1);
}
-int add_rapl_perf_counter_(int cpu, struct rapl_counter_info_t *rci, const struct rapl_counter_arch_info *cai,
- double *scale_, enum rapl_unit *unit_)
+int add_rapl_perf_counter(int cpu, struct rapl_counter_info_t *rci, const struct rapl_counter_arch_info *cai, double *scale_, enum rapl_unit *unit_)
{
+ int ret = -1;
+
if (no_perf)
return -1;
+ if (!cai->perf_name)
+ return -1;
+
const double scale = read_perf_scale(cai->perf_subsys, cai->perf_name);
if (scale == 0.0)
- return -1;
+ goto end;
const enum rapl_unit unit = read_perf_rapl_unit(cai->perf_subsys, cai->perf_name);
if (unit == RAPL_UNIT_INVALID)
- return -1;
+ goto end;
const unsigned int rapl_type = read_perf_type(cai->perf_subsys);
const unsigned int rapl_energy_pkg_config = read_perf_config(cai->perf_subsys, cai->perf_name);
- const int fd_counter =
- open_perf_counter(cpu, rapl_type, rapl_energy_pkg_config, rci->fd_perf, PERF_FORMAT_GROUP);
- if (fd_counter == -1)
- return -1;
+ ret = open_perf_counter(cpu, rapl_type, rapl_energy_pkg_config, rci->fd_perf, PERF_FORMAT_GROUP);
+ if (ret == -1)
+ goto end;
/* If it's the first counter opened, make it a group descriptor */
if (rci->fd_perf == -1)
- rci->fd_perf = fd_counter;
+ rci->fd_perf = ret;
*scale_ = scale;
*unit_ = unit;
- return fd_counter;
-}
-
-int add_rapl_perf_counter(int cpu, struct rapl_counter_info_t *rci, const struct rapl_counter_arch_info *cai,
- double *scale, enum rapl_unit *unit)
-{
- int ret = add_rapl_perf_counter_(cpu, rci, cai, scale, unit);
+end:
if (debug >= 2)
fprintf(stderr, "%s: %d (cpu: %d)\n", __func__, ret, cpu);
@@ -7854,16 +8330,21 @@ void linux_perf_init(void)
if (access("/proc/sys/kernel/perf_event_paranoid", F_OK))
return;
- if (BIC_IS_ENABLED(BIC_IPC) && has_aperf) {
+ if (BIC_IS_ENABLED(BIC_IPC) && cpuid_has_aperf_mperf) {
fd_instr_count_percpu = calloc(topo.max_cpu_num + 1, sizeof(int));
if (fd_instr_count_percpu == NULL)
err(-1, "calloc fd_instr_count_percpu");
}
+ if (BIC_IS_ENABLED(BIC_LLC_RPS)) {
+ fd_llc_percpu = calloc(topo.max_cpu_num + 1, sizeof(int));
+ if (fd_llc_percpu == NULL)
+ err(-1, "calloc fd_llc_percpu");
+ }
}
void rapl_perf_init(void)
{
- const unsigned int num_domains = (platform->has_per_core_rapl ? topo.max_core_id : topo.max_package_id) + 1;
+ const unsigned int num_domains = get_rapl_num_domains();
bool *domain_visited = calloc(num_domains, sizeof(bool));
rapl_counter_info_perdomain = calloc(num_domains, sizeof(*rapl_counter_info_perdomain));
@@ -7896,6 +8377,9 @@ void rapl_perf_init(void)
enum rapl_unit unit;
unsigned int next_domain;
+ if (!BIC_IS_ENABLED(cai->bic_number))
+ continue;
+
memset(domain_visited, 0, num_domains * sizeof(*domain_visited));
for (int cpu = 0; cpu < topo.max_cpu_num + 1; ++cpu) {
@@ -7904,8 +8388,7 @@ void rapl_perf_init(void)
continue;
/* Skip already seen and handled RAPL domains */
- next_domain =
- platform->has_per_core_rapl ? cpus[cpu].physical_core_id : cpus[cpu].physical_package_id;
+ next_domain = get_rapl_domain_id(cpu);
assert(next_domain < num_domains);
@@ -7919,27 +8402,37 @@ void rapl_perf_init(void)
struct rapl_counter_info_t *rci = &rapl_counter_info_perdomain[next_domain];
- /* Check if the counter is enabled and accessible */
- if (BIC_IS_ENABLED(cai->bic) && (platform->rapl_msrs & cai->feature_mask)) {
+ /*
+ * rapl_counter_arch_infos[] can have multiple entries describing the same
+ * counter, due to the difference from different platforms/Vendors.
+ * E.g. rapl_counter_arch_infos[0] and rapl_counter_arch_infos[1] share the
+ * same perf_subsys and perf_name, but with different MSR address.
+ * rapl_counter_arch_infos[0] is for Intel and rapl_counter_arch_infos[1]
+ * is for AMD.
+ * In this case, it is possible that multiple rapl_counter_arch_infos[]
+ * entries are probed just because their perf/msr is duplicate and valid.
+ *
+ * Thus need a check to avoid re-probe the same counters.
+ */
+ if (rci->source[cai->rci_index] != COUNTER_SOURCE_NONE)
+ break;
- /* Use perf API for this counter */
- if (!no_perf && cai->perf_name
- && add_rapl_perf_counter(cpu, rci, cai, &scale, &unit) != -1) {
- rci->source[cai->rci_index] = COUNTER_SOURCE_PERF;
- rci->scale[cai->rci_index] = scale * cai->compat_scale;
- rci->unit[cai->rci_index] = unit;
- rci->flags[cai->rci_index] = cai->flags;
-
- /* Use MSR for this counter */
- } else if (!no_msr && cai->msr && probe_rapl_msr(cpu, cai->msr, cai->rci_index) == 0) {
- rci->source[cai->rci_index] = COUNTER_SOURCE_MSR;
- rci->msr[cai->rci_index] = cai->msr;
- rci->msr_mask[cai->rci_index] = cai->msr_mask;
- rci->msr_shift[cai->rci_index] = cai->msr_shift;
- rci->unit[cai->rci_index] = RAPL_UNIT_JOULES;
- rci->scale[cai->rci_index] = *cai->platform_rapl_msr_scale * cai->compat_scale;
- rci->flags[cai->rci_index] = cai->flags;
- }
+ /* Use perf API for this counter */
+ if (add_rapl_perf_counter(cpu, rci, cai, &scale, &unit) != -1) {
+ rci->source[cai->rci_index] = COUNTER_SOURCE_PERF;
+ rci->scale[cai->rci_index] = scale * cai->compat_scale;
+ rci->unit[cai->rci_index] = unit;
+ rci->flags[cai->rci_index] = cai->flags;
+
+ /* Use MSR for this counter */
+ } else if (add_rapl_msr_counter(cpu, cai) >= 0) {
+ rci->source[cai->rci_index] = COUNTER_SOURCE_MSR;
+ rci->msr[cai->rci_index] = cai->msr;
+ rci->msr_mask[cai->rci_index] = cai->msr_mask;
+ rci->msr_shift[cai->rci_index] = cai->msr_shift;
+ rci->unit[cai->rci_index] = RAPL_UNIT_JOULES;
+ rci->scale[cai->rci_index] = *cai->platform_rapl_msr_scale * cai->compat_scale;
+ rci->flags[cai->rci_index] = cai->flags;
}
if (rci->source[cai->rci_index] != COUNTER_SOURCE_NONE)
@@ -7948,7 +8441,7 @@ void rapl_perf_init(void)
/* If any CPU has access to the counter, make it present */
if (has_counter)
- BIC_PRESENT(cai->bic);
+ BIC_PRESENT(cai->bic_number);
}
free(domain_visited);
@@ -7957,7 +8450,7 @@ void rapl_perf_init(void)
/* Assumes msr_counter_info is populated */
static int has_amperf_access(void)
{
- return msr_counter_arch_infos[MSR_ARCH_INFO_APERF_INDEX].present &&
+ return cpuid_has_aperf_mperf && msr_counter_arch_infos[MSR_ARCH_INFO_APERF_INDEX].present &&
msr_counter_arch_infos[MSR_ARCH_INFO_MPERF_INDEX].present;
}
@@ -7972,65 +8465,63 @@ int *get_cstate_perf_group_fd(struct cstate_counter_info_t *cci, const char *gro
return NULL;
}
-int add_cstate_perf_counter_(int cpu, struct cstate_counter_info_t *cci, const struct cstate_counter_arch_info *cai)
+int add_cstate_perf_counter(int cpu, struct cstate_counter_info_t *cci, const struct cstate_counter_arch_info *cai)
{
+ int ret = -1;
+
if (no_perf)
return -1;
+ if (!cai->perf_name)
+ return -1;
+
int *pfd_group = get_cstate_perf_group_fd(cci, cai->perf_subsys);
if (pfd_group == NULL)
- return -1;
+ goto end;
const unsigned int type = read_perf_type(cai->perf_subsys);
const unsigned int config = read_perf_config(cai->perf_subsys, cai->perf_name);
- const int fd_counter = open_perf_counter(cpu, type, config, *pfd_group, PERF_FORMAT_GROUP);
+ ret = open_perf_counter(cpu, type, config, *pfd_group, PERF_FORMAT_GROUP);
- if (fd_counter == -1)
- return -1;
+ if (ret == -1)
+ goto end;
/* If it's the first counter opened, make it a group descriptor */
if (*pfd_group == -1)
- *pfd_group = fd_counter;
-
- return fd_counter;
-}
-
-int add_cstate_perf_counter(int cpu, struct cstate_counter_info_t *cci, const struct cstate_counter_arch_info *cai)
-{
- int ret = add_cstate_perf_counter_(cpu, cci, cai);
+ *pfd_group = ret;
+end:
if (debug >= 2)
fprintf(stderr, "%s: %d (cpu: %d)\n", __func__, ret, cpu);
return ret;
}
-int add_msr_perf_counter_(int cpu, struct msr_counter_info_t *cci, const struct msr_counter_arch_info *cai)
+int add_msr_perf_counter(int cpu, struct msr_counter_info_t *cci, const struct msr_counter_arch_info *cai)
{
+ int ret = -1;
+
if (no_perf)
return -1;
+ if (!cai->perf_name)
+ return -1;
+
const unsigned int type = read_perf_type(cai->perf_subsys);
const unsigned int config = read_perf_config(cai->perf_subsys, cai->perf_name);
- const int fd_counter = open_perf_counter(cpu, type, config, cci->fd_perf, PERF_FORMAT_GROUP);
+ ret = open_perf_counter(cpu, type, config, cci->fd_perf, PERF_FORMAT_GROUP);
- if (fd_counter == -1)
- return -1;
+ if (ret == -1)
+ goto end;
/* If it's the first counter opened, make it a group descriptor */
if (cci->fd_perf == -1)
- cci->fd_perf = fd_counter;
-
- return fd_counter;
-}
-
-int add_msr_perf_counter(int cpu, struct msr_counter_info_t *cci, const struct msr_counter_arch_info *cai)
-{
- int ret = add_msr_perf_counter_(cpu, cci, cai);
+ cci->fd_perf = ret;
+end:
if (debug)
fprintf(stderr, "%s: %s/%s: %d (cpu: %d)\n", __func__, cai->perf_subsys, cai->perf_name, ret, cpu);
@@ -8064,12 +8555,12 @@ void msr_perf_init_(void)
if (cai->needed) {
/* Use perf API for this counter */
- if (!no_perf && cai->perf_name && add_msr_perf_counter(cpu, cci, cai) != -1) {
+ if (add_msr_perf_counter(cpu, cci, cai) != -1) {
cci->source[cai->rci_index] = COUNTER_SOURCE_PERF;
cai->present = true;
/* User MSR for this counter */
- } else if (!no_msr && cai->msr && probe_rapl_msr(cpu, cai->msr, cai->rci_index) == 0) {
+ } else if (add_msr_counter(cpu, cai->msr) >= 0) {
cci->source[cai->rci_index] = COUNTER_SOURCE_MSR;
cci->msr[cai->rci_index] = cai->msr;
cci->msr_mask[cai->rci_index] = cai->msr_mask;
@@ -8171,19 +8662,18 @@ void cstate_perf_init_(bool soft_c1)
if (!per_core && pkg_visited[pkg_id])
continue;
- const bool counter_needed = BIC_IS_ENABLED(cai->bic) ||
+ const bool counter_needed = BIC_IS_ENABLED(cai->bic_number) ||
(soft_c1 && (cai->flags & CSTATE_COUNTER_FLAG_SOFT_C1_DEPENDENCY));
const bool counter_supported = (platform->supported_cstates & cai->feature_mask);
if (counter_needed && counter_supported) {
/* Use perf API for this counter */
- if (!no_perf && cai->perf_name && add_cstate_perf_counter(cpu, cci, cai) != -1) {
+ if (add_cstate_perf_counter(cpu, cci, cai) != -1) {
cci->source[cai->rci_index] = COUNTER_SOURCE_PERF;
/* User MSR for this counter */
- } else if (!no_msr && cai->msr && pkg_cstate_limit >= cai->pkg_cstate_limit
- && probe_rapl_msr(cpu, cai->msr, cai->rci_index) == 0) {
+ } else if (pkg_cstate_limit >= cai->pkg_cstate_limit && add_msr_counter(cpu, cai->msr) >= 0) {
cci->source[cai->rci_index] = COUNTER_SOURCE_MSR;
cci->msr[cai->rci_index] = cai->msr;
}
@@ -8198,7 +8688,7 @@ void cstate_perf_init_(bool soft_c1)
/* If any CPU has access to the counter, make it present */
if (has_counter)
- BIC_PRESENT(cai->bic);
+ BIC_PRESENT(cai->bic_number);
}
free(cores_visited);
@@ -8301,8 +8791,7 @@ void process_cpuid()
hygon_genuine = 1;
if (!quiet)
- fprintf(outf, "CPUID(0): %.4s%.4s%.4s 0x%x CPUID levels\n",
- (char *)&ebx, (char *)&edx, (char *)&ecx, max_level);
+ fprintf(outf, "CPUID(0): %.4s%.4s%.4s 0x%x CPUID levels\n", (char *)&ebx, (char *)&edx, (char *)&ecx, max_level);
__cpuid(1, fms, ebx, ecx, edx);
family = (fms >> 8) & 0xf;
@@ -8331,8 +8820,7 @@ void process_cpuid()
__cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
if (!quiet) {
- fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d)",
- family, model, stepping, family, model, stepping);
+ fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d)", family, model, stepping, family, model, stepping);
if (ucode_patch_valid)
fprintf(outf, " microcode 0x%x", (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF));
fputc('\n', outf);
@@ -8346,8 +8834,7 @@ void process_cpuid()
ecx_flags & (1 << 8) ? "TM2" : "-",
edx_flags & (1 << 4) ? "TSC" : "-",
edx_flags & (1 << 5) ? "MSR" : "-",
- edx_flags & (1 << 22) ? "ACPI-TM" : "-",
- edx_flags & (1 << 28) ? "HT" : "-", edx_flags & (1 << 29) ? "TM" : "-");
+ edx_flags & (1 << 22) ? "ACPI-TM" : "-", edx_flags & (1 << 28) ? "HT" : "-", edx_flags & (1 << 29) ? "TM" : "-");
}
probe_platform_features(family, model);
@@ -8371,7 +8858,7 @@ void process_cpuid()
*/
__cpuid(0x6, eax, ebx, ecx, edx);
- has_aperf = ecx & (1 << 0);
+ cpuid_has_aperf_mperf = ecx & (1 << 0);
do_dts = eax & (1 << 0);
if (do_dts)
BIC_PRESENT(BIC_CoreTmp);
@@ -8389,14 +8876,13 @@ void process_cpuid()
if (!quiet)
fprintf(outf, "CPUID(6): %sAPERF, %sTURBO, %sDTS, %sPTM, %sHWP, "
"%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n",
- has_aperf ? "" : "No-",
+ cpuid_has_aperf_mperf ? "" : "No-",
has_turbo ? "" : "No-",
do_dts ? "" : "No-",
do_ptm ? "" : "No-",
has_hwp ? "" : "No-",
has_hwp_notify ? "" : "No-",
- has_hwp_activity_window ? "" : "No-",
- has_hwp_epp ? "" : "No-", has_hwp_pkg ? "" : "No-", has_epb ? "" : "No-");
+ has_hwp_activity_window ? "" : "No-", has_hwp_epp ? "" : "No-", has_hwp_pkg ? "" : "No-", has_epb ? "" : "No-");
if (!quiet)
decode_misc_enable_msr();
@@ -8430,8 +8916,7 @@ void process_cpuid()
if (ebx_tsc != 0) {
if (!quiet && (ebx != 0))
- fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
- eax_crystal, ebx_tsc, crystal_hz);
+ fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", eax_crystal, ebx_tsc, crystal_hz);
if (crystal_hz == 0)
crystal_hz = platform->crystal_freq;
@@ -8463,11 +8948,10 @@ void process_cpuid()
tsc_tweak = base_hz / tsc_hz;
if (!quiet)
- fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n",
- base_mhz, max_mhz, bus_mhz);
+ fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n", base_mhz, max_mhz, bus_mhz);
}
- if (has_aperf)
+ if (cpuid_has_aperf_mperf)
aperf_mperf_multiplier = platform->need_perf_multiplier ? 1024 : 1;
BIC_PRESENT(BIC_IRQ);
@@ -8519,6 +9003,62 @@ void probe_pm_features(void)
decode_misc_feature_control();
}
+/* perf_llc_probe
+ *
+ * return 1 on success, else 0
+ */
+int has_perf_llc_access(void)
+{
+ int fd;
+
+ if (no_perf)
+ return 0;
+
+ fd = open_perf_counter(base_cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES, -1, PERF_FORMAT_GROUP);
+ if (fd != -1)
+ close(fd);
+
+ if (fd == -1)
+ warnx("Failed to access %s. Some of the counters may not be available\n"
+ "\tRun as root to enable them or use %s to disable the access explicitly", "perf LLC counters", "'--hide LLC' or '--no-perf'");
+
+ return (fd != -1);
+}
+
+void perf_llc_init(void)
+{
+ int cpu;
+ int retval;
+
+ if (no_perf)
+ return;
+ if (!(BIC_IS_ENABLED(BIC_LLC_RPS) && BIC_IS_ENABLED(BIC_LLC_HIT)))
+ return;
+
+ for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
+
+ if (cpu_is_not_allowed(cpu))
+ continue;
+
+ assert(fd_llc_percpu != 0);
+ fd_llc_percpu[cpu] = open_perf_counter(cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES, -1, PERF_FORMAT_GROUP);
+ if (fd_llc_percpu[cpu] == -1) {
+ warnx("%s: perf REFS: failed to open counter on cpu%d", __func__, cpu);
+ free_fd_llc_percpu();
+ return;
+ }
+ assert(fd_llc_percpu != 0);
+ retval = open_perf_counter(cpu, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES, fd_llc_percpu[cpu], PERF_FORMAT_GROUP);
+ if (retval == -1) {
+ warnx("%s: perf MISS: failed to open counter on cpu%d", __func__, cpu);
+ free_fd_llc_percpu();
+ return;
+ }
+ }
+ BIC_PRESENT(BIC_LLC_RPS);
+ BIC_PRESENT(BIC_LLC_HIT);
+}
+
/*
* in /dev/cpu/ return success for names that are numbers
* ie. filter out ".", "..", "microcode".
@@ -8694,6 +9234,11 @@ void topology_probe(bool startup)
if (cpus[i].die_id > topo.max_die_id)
topo.max_die_id = cpus[i].die_id;
+ /* get l3 information */
+ cpus[i].l3_id = get_l3_id(i);
+ if (cpus[i].l3_id > topo.max_l3_id)
+ topo.max_l3_id = cpus[i].l3_id;
+
/* get numa node information */
cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
if (cpus[i].physical_node_id > topo.max_node_num)
@@ -8726,6 +9271,9 @@ void topology_probe(bool startup)
if (!summary_only && topo.num_die > 1)
BIC_PRESENT(BIC_Die);
+ if (!summary_only && topo.max_l3_id > 0)
+ BIC_PRESENT(BIC_L3);
+
topo.num_packages = max_package_id + 1;
if (debug > 1)
fprintf(outf, "max_package_id %d, sizing for %d packages\n", max_package_id, topo.num_packages);
@@ -8749,8 +9297,8 @@ void topology_probe(bool startup)
if (cpu_is_not_present(i))
continue;
fprintf(outf,
- "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
- i, cpus[i].physical_package_id, cpus[i].die_id,
+ "cpu %d pkg %d die %d l3 %d node %d lnode %d core %d thread %d\n",
+ i, cpus[i].physical_package_id, cpus[i].die_id, cpus[i].l3_id,
cpus[i].physical_node_id, cpus[i].logical_node_id, cpus[i].physical_core_id, cpus[i].thread_id);
}
@@ -8805,7 +9353,6 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base,
int thread_id = cpus[cpu_id].thread_id;
struct thread_data *t;
struct core_data *c;
- struct pkg_data *p;
/* Workaround for systems where physical_node_id==-1
* and logical_node_id==(-1 - topo.num_cpus)
@@ -8815,18 +9362,18 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base,
t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
c = GET_CORE(core_base, core_id, node_id, pkg_id);
- p = GET_PKG(pkg_base, pkg_id);
t->cpu_id = cpu_id;
if (!cpu_is_not_allowed(cpu_id)) {
+
if (c->base_cpu < 0)
c->base_cpu = t->cpu_id;
- if (p->base_cpu < 0)
- p->base_cpu = t->cpu_id;
+ if (pkg_base[pkg_id].base_cpu < 0)
+ pkg_base[pkg_id].base_cpu = t->cpu_id;
}
c->core_id = core_id;
- p->package_id = pkg_id;
+ pkg_base[pkg_id].package_id = pkg_id;
}
int initialize_counters(int cpu_id)
@@ -8866,7 +9413,7 @@ void allocate_irq_buffers(void)
err(-1, "calloc %d NMI", topo.max_cpu_num + 1);
}
-int update_topo(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int update_topo(PER_THREAD_PARAMS)
{
topo.allowed_cpus++;
if ((int)t->cpu_id == c->base_cpu)
@@ -8924,7 +9471,7 @@ bool has_added_counters(void)
void check_msr_access(void)
{
- check_dev_msr();
+ check_msr_driver();
check_msr_permission();
if (no_msr)
@@ -8933,8 +9480,16 @@ void check_msr_access(void)
void check_perf_access(void)
{
- if (no_perf || !BIC_IS_ENABLED(BIC_IPC) || !has_instr_count_access())
- bic_enabled &= ~BIC_IPC;
+ if (BIC_IS_ENABLED(BIC_IPC))
+ if (!has_perf_instr_count_access())
+ no_perf = 1;
+
+ if (BIC_IS_ENABLED(BIC_LLC_RPS) || BIC_IS_ENABLED(BIC_LLC_HIT))
+ if (!has_perf_llc_access())
+ no_perf = 1;
+
+ if (no_perf)
+ bic_disable_perf_access();
}
bool perf_has_hybrid_devices(void)
@@ -9044,22 +9599,20 @@ int added_perf_counters_init_(struct perf_counter_info *pinfo)
perf_device = "cpu_atom";
break;
- default: /* Don't change, we will probably fail and report a problem soon. */
+ default: /* Don't change, we will probably fail and report a problem soon. */
break;
}
}
perf_type = read_perf_type(perf_device);
if (perf_type == (unsigned int)-1) {
- warnx("%s: perf/%s/%s: failed to read %s",
- __func__, perf_device, pinfo->event, "type");
+ warnx("%s: perf/%s/%s: failed to read %s", __func__, perf_device, pinfo->event, "type");
continue;
}
perf_config = read_perf_config(perf_device, pinfo->event);
if (perf_config == (unsigned int)-1) {
- warnx("%s: perf/%s/%s: failed to read %s",
- __func__, perf_device, pinfo->event, "config");
+ warnx("%s: perf/%s/%s: failed to read %s", __func__, perf_device, pinfo->event, "config");
continue;
}
@@ -9070,8 +9623,7 @@ int added_perf_counters_init_(struct perf_counter_info *pinfo)
fd_perf = open_perf_counter(cpu, perf_type, perf_config, -1, 0);
if (fd_perf == -1) {
- warnx("%s: perf/%s/%s: failed to open counter on cpu%d",
- __func__, perf_device, pinfo->event, cpu);
+ warnx("%s: perf/%s/%s: failed to open counter on cpu%d", __func__, perf_device, pinfo->event, cpu);
continue;
}
@@ -9080,8 +9632,7 @@ int added_perf_counters_init_(struct perf_counter_info *pinfo)
pinfo->scale = perf_scale;
if (debug)
- fprintf(stderr, "Add perf/%s/%s cpu%d: %d\n",
- perf_device, pinfo->event, cpu, pinfo->fd_perf_per_domain[next_domain]);
+ fprintf(stderr, "Add perf/%s/%s cpu%d: %d\n", perf_device, pinfo->event, cpu, pinfo->fd_perf_per_domain[next_domain]);
}
pinfo = pinfo->next;
@@ -9154,7 +9705,7 @@ struct pmt_mmio *pmt_mmio_open(unsigned int target_guid)
return NULL;
}
- for ( ; entry != NULL; entry = pmt_diriter_next(&pmt_iter)) {
+ for (; entry != NULL; entry = pmt_diriter_next(&pmt_iter)) {
if (fstatat(dirfd(pmt_iter.dir), entry->d_name, &st, 0) == -1)
break;
@@ -9395,8 +9946,7 @@ int pmt_add_counter(unsigned int guid, unsigned int seq, const char *name, enum
}
if (conflict) {
- fprintf(stderr, "%s: conflicting parameters for the PMT counter with the same name %s\n",
- __func__, name);
+ fprintf(stderr, "%s: conflicting parameters for the PMT counter with the same name %s\n", __func__, name);
exit(1);
}
@@ -9439,8 +9989,7 @@ void pmt_init(void)
* CWF with newer firmware might require a PMT_TYPE_XTAL_TIME intead of PMT_TYPE_TCORE_CLOCK.
*/
pmt_add_counter(PMT_CWF_MC1E_GUID, seq, "CPU%c1e", PMT_TYPE_TCORE_CLOCK,
- PMT_COUNTER_CWF_MC1E_LSB, PMT_COUNTER_CWF_MC1E_MSB, offset, SCOPE_CPU,
- FORMAT_DELTA, cpu_num, PMT_OPEN_TRY);
+ PMT_COUNTER_CWF_MC1E_LSB, PMT_COUNTER_CWF_MC1E_MSB, offset, SCOPE_CPU, FORMAT_DELTA, cpu_num, PMT_OPEN_TRY);
/*
* Rather complex logic for each time we go to the next loop iteration,
@@ -9490,6 +10039,7 @@ void turbostat_init()
linux_perf_init();
rapl_perf_init();
cstate_perf_init();
+ perf_llc_init();
added_perf_counters_init();
pmt_init();
@@ -9504,8 +10054,8 @@ void turbostat_init()
* disable more BICs, since it can't be reported accurately.
*/
if (platform->enable_tsc_tweak && !has_base_hz) {
- bic_enabled &= ~BIC_Busy;
- bic_enabled &= ~BIC_Bzy_MHz;
+ CLR_BIC(BIC_Busy, &bic_enabled);
+ CLR_BIC(BIC_Bzy_MHz, &bic_enabled);
}
}
@@ -9563,6 +10113,7 @@ int fork_it(char **argv)
timersub(&tv_odd, &tv_even, &tv_delta);
if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS))
fprintf(outf, "%s: Counter reset detected\n", progname);
+ delta_platform(&platform_counters_odd, &platform_counters_even);
compute_average(EVEN_COUNTERS);
format_all_counters(EVEN_COUNTERS);
@@ -9594,7 +10145,7 @@ int get_and_dump_counters(void)
void print_version()
{
- fprintf(outf, "turbostat version 2025.04.06 - Len Brown <lenb@kernel.org>\n");
+ fprintf(outf, "turbostat version 2025.12.02 - Len Brown <lenb@kernel.org>\n");
}
#define COMMAND_LINE_SIZE 2048
@@ -9634,8 +10185,7 @@ struct msr_counter *find_msrp_by_name(struct msr_counter *head, char *name)
}
int add_counter(unsigned int msr_num, char *path, char *name,
- unsigned int width, enum counter_scope scope,
- enum counter_type type, enum counter_format format, int flags, int id)
+ unsigned int width, enum counter_scope scope, enum counter_type type, enum counter_format format, int flags, int id)
{
struct msr_counter *msrp;
@@ -9744,9 +10294,7 @@ int add_counter(unsigned int msr_num, char *path, char *name,
struct perf_counter_info *make_perf_counter_info(const char *perf_device,
const char *perf_event,
const char *name,
- unsigned int width,
- enum counter_scope scope,
- enum counter_type type, enum counter_format format)
+ unsigned int width, enum counter_scope scope, enum counter_type type, enum counter_format format)
{
struct perf_counter_info *pinfo;
@@ -9821,8 +10369,7 @@ int add_perf_counter(const char *perf_device, const char *perf_event, const char
// FIXME: we might not have debug here yet
if (debug)
- fprintf(stderr, "%s: %s/%s, name: %s, scope%d\n",
- __func__, pinfo->device, pinfo->event, pinfo->name, pinfo->scope);
+ fprintf(stderr, "%s: %s/%s, name: %s, scope%d\n", __func__, pinfo->device, pinfo->event, pinfo->name, pinfo->scope);
return 0;
}
@@ -9891,6 +10438,10 @@ void parse_add_command_msr(char *add_command)
format = FORMAT_RAW;
goto next;
}
+ if (!strncmp(add_command, "average", strlen("average"))) {
+ format = FORMAT_AVERAGE;
+ goto next;
+ }
if (!strncmp(add_command, "delta", strlen("delta"))) {
format = FORMAT_DELTA;
goto next;
@@ -9987,8 +10538,7 @@ int pmt_parse_from_path(const char *target_path, unsigned int *out_guid, unsigne
pmt_diriter_init(&pmt_iter);
- for (dirname = pmt_diriter_begin(&pmt_iter, SYSFS_TELEM_PATH); dirname != NULL;
- dirname = pmt_diriter_next(&pmt_iter)) {
+ for (dirname = pmt_diriter_begin(&pmt_iter, SYSFS_TELEM_PATH); dirname != NULL; dirname = pmt_diriter_next(&pmt_iter)) {
fd_telem_dir = openat(dirfd(pmt_iter.dir), dirname->d_name, O_RDONLY | O_DIRECTORY);
if (fd_telem_dir == -1)
@@ -10000,8 +10550,7 @@ int pmt_parse_from_path(const char *target_path, unsigned int *out_guid, unsigne
}
if (fstat(fd_telem_dir, &stat) == -1) {
- fprintf(stderr, "%s: Failed to stat %s directory: %s", __func__,
- dirname->d_name, strerror(errno));
+ fprintf(stderr, "%s: Failed to stat %s directory: %s", __func__, dirname->d_name, strerror(errno));
continue;
}
@@ -10050,7 +10599,7 @@ void parse_add_command_pmt(char *add_command)
unsigned int lsb;
unsigned int msb;
unsigned int guid;
- unsigned int seq = 0; /* By default, pick first file in a sequence with a given GUID. */
+ unsigned int seq = 0; /* By default, pick first file in a sequence with a given GUID. */
unsigned int domain_id;
enum counter_scope scope = 0;
enum pmt_datatype type = PMT_TYPE_RAW;
@@ -10097,8 +10646,7 @@ void parse_add_command_pmt(char *add_command)
}
if (!has_scope) {
- printf("%s: invalid value for scope. Expected cpu%%u, core%%u or package%%u.\n",
- __func__);
+ printf("%s: invalid value for scope. Expected cpu%%u, core%%u or package%%u.\n", __func__);
exit(1);
}
@@ -10163,13 +10711,18 @@ next:
has_format = true;
}
+ if (strcmp("average", format_name) == 0) {
+ format = FORMAT_AVERAGE;
+ has_format = true;
+ }
+
if (strcmp("delta", format_name) == 0) {
format = FORMAT_DELTA;
has_format = true;
}
if (!has_format) {
- fprintf(stderr, "%s: Invalid format %s. Expected raw or delta\n", __func__, format_name);
+ fprintf(stderr, "%s: Invalid format %s. Expected raw, average or delta\n", __func__, format_name);
exit(1);
}
}
@@ -10259,8 +10812,10 @@ int is_deferred_add(char *name)
int i;
for (i = 0; i < deferred_add_index; ++i)
- if (!strcmp(name, deferred_add_names[i]))
+ if (!strcmp(name, deferred_add_names[i])) {
+ deferred_add_consumed |= (1 << i);
return 1;
+ }
return 0;
}
@@ -10269,11 +10824,34 @@ int is_deferred_skip(char *name)
int i;
for (i = 0; i < deferred_skip_index; ++i)
- if (!strcmp(name, deferred_skip_names[i]))
+ if (!strcmp(name, deferred_skip_names[i])) {
+ deferred_skip_consumed |= (1 << i);
return 1;
+ }
return 0;
}
+void verify_deferred_consumed(void)
+{
+ int i;
+ int fail = 0;
+
+ for (i = 0; i < deferred_add_index; ++i) {
+ if (!(deferred_add_consumed & (1 << i))) {
+ warnx("Counter '%s' can not be added.", deferred_add_names[i]);
+ fail++;
+ }
+ }
+ for (i = 0; i < deferred_skip_index; ++i) {
+ if (!(deferred_skip_consumed & (1 << i))) {
+ warnx("Counter '%s' can not be skipped.", deferred_skip_names[i]);
+ fail++;
+ }
+ }
+ if (fail)
+ exit(-EINVAL);
+}
+
void probe_cpuidle_residency(void)
{
char path[64];
@@ -10283,9 +10861,6 @@ void probe_cpuidle_residency(void)
int min_state = 1024, max_state = 0;
char *sp;
- if (!DO_BIC(BIC_pct_idle))
- return;
-
for (state = 10; state >= 0; --state) {
sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state);
@@ -10314,7 +10889,7 @@ void probe_cpuidle_residency(void)
if (is_deferred_skip(name_buf))
continue;
- add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC, FORMAT_PERCENT, SYSFS_PERCPU, 0);
+ add_counter(0, path, name_buf, 32, SCOPE_CPU, COUNTER_USEC, FORMAT_PERCENT, SYSFS_PERCPU, 0);
if (state > max_state)
max_state = state;
@@ -10498,22 +11073,29 @@ void cmdline(int argc, char **argv)
no_perf = 1;
break;
case 'e':
- /* --enable specified counter */
- bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
+ /* --enable specified counter, without clearning existing list */
+ bic_lookup(&bic_enabled, optarg, SHOW_LIST);
break;
case 'f':
force_load++;
break;
case 'd':
debug++;
- ENABLE_BIC(BIC_DISABLED_BY_DEFAULT);
+ bic_set_all(&bic_enabled);
break;
case 'H':
/*
* --hide: do not show those specified
* multiple invocations simply clear more bits in enabled mask
*/
- bic_enabled &= ~bic_lookup(optarg, HIDE_LIST);
+ {
+ cpu_set_t bic_group_hide;
+
+ BIC_INIT(&bic_group_hide);
+
+ bic_lookup(&bic_group_hide, optarg, HIDE_LIST);
+ bic_clear_bits(&bic_enabled, &bic_group_hide);
+ }
break;
case 'h':
default:
@@ -10537,7 +11119,7 @@ void cmdline(int argc, char **argv)
rapl_joules++;
break;
case 'l':
- ENABLE_BIC(BIC_DISABLED_BY_DEFAULT);
+ bic_set_all(&bic_enabled);
list_header_only++;
quiet++;
break;
@@ -10574,9 +11156,8 @@ void cmdline(int argc, char **argv)
* subsequent invocations can add to it.
*/
if (shown == 0)
- bic_enabled = bic_lookup(optarg, SHOW_LIST);
- else
- bic_enabled |= bic_lookup(optarg, SHOW_LIST);
+ BIC_INIT(&bic_enabled);
+ bic_lookup(&bic_enabled, optarg, SHOW_LIST);
shown = 1;
break;
case 'S':
@@ -10613,6 +11194,8 @@ int main(int argc, char **argv)
{
int fd, ret;
+ bic_groups_init();
+
fd = open("/sys/fs/cgroup/cgroup.procs", O_WRONLY);
if (fd < 0)
goto skip_cgroup_setting;
@@ -10635,6 +11218,8 @@ skip_cgroup_setting:
probe_cpuidle_residency();
probe_cpuidle_counts();
+ verify_deferred_consumed();
+
if (!getuid())
set_rlimit();
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index 666b325a62a2..d18284667400 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -1,8 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
CC = $(CROSS_COMPILE)gcc
-BUILD_OUTPUT := $(CURDIR)
+BUILD_OUTPUT := $(CURDIR)
PREFIX := /usr
DESTDIR :=
+DAY := $(shell date +%Y.%m.%d)
+SNAPSHOT = x86_energy_perf_policy-$(DAY)
+
+
ifeq ("$(origin O)", "command line")
BUILD_OUTPUT := $(O)
@@ -27,3 +31,26 @@ install : x86_energy_perf_policy
install -d $(DESTDIR)$(PREFIX)/share/man/man8
install -m 644 x86_energy_perf_policy.8 $(DESTDIR)$(PREFIX)/share/man/man8
+snapshot: x86_energy_perf_policy
+ @rm -rf $(SNAPSHOT)
+ @mkdir $(SNAPSHOT)
+ @cp x86_energy_perf_policy Makefile x86_energy_perf_policy.c x86_energy_perf_policy.8 $(SNAPSHOT)
+
+ @sed -e 's/^#include <linux\/bits.h>/#include "bits.h"/' -e 's/u64/unsigned long long/' ../../../../arch/x86/include/asm/msr-index.h > $(SNAPSHOT)/msr-index.h
+ @echo '#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))' >> $(SNAPSHOT)/msr-index.h
+ @echo "#define BIT(x) (1 << (x))" > $(SNAPSHOT)/bits.h
+ @echo "#define BIT_ULL(nr) (1ULL << (nr))" >> $(SNAPSHOT)/bits.h
+ @echo "#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (sizeof(long) * 8 - 1 - (h))))" >> $(SNAPSHOT)/bits.h
+ @echo "#define GENMASK_ULL(h, l) (((~0ULL) << (l)) & (~0ULL >> (sizeof(long long) * 8 - 1 - (h))))" >> $(SNAPSHOT)/bits.h
+
+ @echo '#define BUILD_BUG_ON(cond) do { enum { compile_time_check ## __COUNTER__ = 1/(!(cond)) }; } while (0)' > $(SNAPSHOT)/build_bug.h
+ @echo '#define __must_be_array(arr) 0' >> $(SNAPSHOT)/build_bug.h
+
+ @echo PWD=. > $(SNAPSHOT)/Makefile
+ @echo "CFLAGS += -DMSRHEADER='\"msr-index.h\"'" >> $(SNAPSHOT)/Makefile
+ @echo "CFLAGS += -DBUILD_BUG_HEADER='\"build_bug.h\"'" >> $(SNAPSHOT)/Makefile
+ @sed -e's/.*MSRHEADER.*//' Makefile >> $(SNAPSHOT)/Makefile
+
+ @rm -f $(SNAPSHOT).tar.gz
+ tar cvzf $(SNAPSHOT).tar.gz $(SNAPSHOT)
+
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
index 78c6361898b1..0aa981c18e56 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -2,7 +2,7 @@
.\" Distributed under the GPL, Copyleft 1994.
.TH X86_ENERGY_PERF_POLICY 8
.SH NAME
-x86_energy_perf_policy \- Manage Energy vs. Performance Policy via x86 Model Specific Registers
+x86_energy_perf_policy \- Manage Energy vs. Performance Policy
.SH SYNOPSIS
.B x86_energy_perf_policy
.RB "[ options ] [ scope ] [field \ value]"
@@ -19,9 +19,14 @@ x86_energy_perf_policy \- Manage Energy vs. Performance Policy via x86 Model Spe
.SH DESCRIPTION
\fBx86_energy_perf_policy\fP
displays and updates energy-performance policy settings specific to
-Intel Architecture Processors. Settings are accessed via Model Specific Register (MSR)
-updates, no matter if the Linux cpufreq sub-system is enabled or not.
+Intel Architecture Processors. It summarizes settings available
+in standard Linux interfaces (eg. cpufreq),
+and also decodes underlying Model Specific Register (MSRs).
+While \fBx86_energy_perf_policy\fP can manage energy-performance policy
+using only MSR access, it prefers standard
+Linux kernel interfaces, when they are available.
+.SH BACKGROUND
Policy in MSR_IA32_ENERGY_PERF_BIAS (EPB)
may affect a wide range of hardware decisions,
such as how aggressively the hardware enters and exits CPU idle states (C-states)
@@ -200,7 +205,9 @@ runs only as root.
.SH FILES
.ta
.nf
-/dev/cpu/*/msr
+EPB: /sys/devices/system/cpu/cpu*/power/energy_perf_bias
+EPP: /sys/devices/system/cpu/cpu*/cpufreq/energy_performance_preference
+MSR: /dev/cpu/*/msr
.fi
.SH "SEE ALSO"
.nf
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index ebda9c366b2b..ac37132207a4 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -4,7 +4,7 @@
* policy preference bias on recent X86 processors.
*/
/*
- * Copyright (c) 2010 - 2017 Intel Corporation.
+ * Copyright (c) 2010 - 2025 Intel Corporation.
* Len Brown <len.brown@intel.com>
*/
@@ -62,6 +62,7 @@ unsigned char turbo_update_value;
unsigned char update_hwp_epp;
unsigned char update_hwp_min;
unsigned char update_hwp_max;
+unsigned char hwp_limits_done_via_sysfs;
unsigned char update_hwp_desired;
unsigned char update_hwp_window;
unsigned char update_hwp_use_pkg;
@@ -94,6 +95,8 @@ unsigned int bdx_highest_ratio;
#define PATH_TO_CPU "/sys/devices/system/cpu/"
#define SYSFS_PATH_MAX 255
+static int use_android_msr_path;
+
/*
* maintain compatibility with original implementation, but don't document it:
*/
@@ -369,7 +372,7 @@ void validate_cpu_selected_set(void)
for (cpu = 0; cpu <= max_cpu_num; ++cpu) {
if (CPU_ISSET_S(cpu, cpu_setsize, cpu_selected_set))
if (!CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
- errx(1, "Requested cpu% is not present", cpu);
+ errx(1, "Requested cpu%d is not present", cpu);
}
}
@@ -517,7 +520,7 @@ void for_packages(unsigned long long pkg_set, int (func)(int))
void print_version(void)
{
- printf("x86_energy_perf_policy 17.05.11 (C) Len Brown <len.brown@intel.com>\n");
+ printf("x86_energy_perf_policy 2025.11.22 Len Brown <lenb@kernel.org>\n");
}
void cmdline(int argc, char **argv)
@@ -630,7 +633,7 @@ void cmdline(int argc, char **argv)
*/
FILE *fopen_or_die(const char *path, const char *mode)
{
- FILE *filep = fopen(path, "r");
+ FILE *filep = fopen(path, mode);
if (!filep)
err(1, "%s: open failed", path);
@@ -644,7 +647,7 @@ void err_on_hypervisor(void)
char *buffer;
/* On VMs /proc/cpuinfo contains a "flags" entry for hypervisor */
- cpuinfo = fopen_or_die("/proc/cpuinfo", "ro");
+ cpuinfo = fopen_or_die("/proc/cpuinfo", "r");
buffer = malloc(4096);
if (!buffer) {
@@ -659,6 +662,11 @@ void err_on_hypervisor(void)
}
flags = strstr(buffer, "flags");
+ if (!flags) {
+ fclose(cpuinfo);
+ free(buffer);
+ err(1, "Failed to find 'flags' in /proc/cpuinfo");
+ }
rewind(cpuinfo);
fseek(cpuinfo, flags - buffer, SEEK_SET);
if (!fgets(buffer, 4096, cpuinfo)) {
@@ -683,10 +691,12 @@ int get_msr(int cpu, int offset, unsigned long long *msr)
char pathname[32];
int fd;
- sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+ sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu);
fd = open(pathname, O_RDONLY);
if (fd < 0)
- err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+ err(-1, "%s open failed, try chown or chmod +r %s, or run as root",
+ pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr");
+
retval = pread(fd, msr, sizeof(*msr), offset);
if (retval != sizeof(*msr)) {
@@ -707,10 +717,11 @@ int put_msr(int cpu, int offset, unsigned long long new_msr)
int retval;
int fd;
- sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+ sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu);
fd = open(pathname, O_RDWR);
if (fd < 0)
- err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+ err(-1, "%s open failed, try chown or chmod +r %s, or run as root",
+ pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr");
retval = pwrite(fd, &new_msr, sizeof(new_msr), offset);
if (retval != sizeof(new_msr))
@@ -809,7 +820,7 @@ void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str)
h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp,
h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7);
}
-void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
+void read_hwp_request_msr(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
{
unsigned long long msr;
@@ -823,7 +834,7 @@ void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr
hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1);
}
-void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
+void write_hwp_request_msr(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
{
unsigned long long msr = 0;
@@ -843,7 +854,7 @@ void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int ms
put_msr(cpu, msr_offset, msr);
}
-static int get_epb(int cpu)
+static int get_epb_sysfs(int cpu)
{
char path[SYSFS_PATH_MAX];
char linebuf[3];
@@ -865,7 +876,7 @@ static int get_epb(int cpu)
return (int)val;
}
-static int set_epb(int cpu, int val)
+static int set_epb_sysfs(int cpu, int val)
{
char path[SYSFS_PATH_MAX];
char linebuf[3];
@@ -895,14 +906,14 @@ int print_cpu_msrs(int cpu)
struct msr_hwp_cap cap;
int epb;
- epb = get_epb(cpu);
+ epb = get_epb_sysfs(cpu);
if (epb >= 0)
printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb);
if (!has_hwp)
return 0;
- read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
+ read_hwp_request_msr(cpu, &req, MSR_HWP_REQUEST);
print_hwp_request(cpu, &req, "");
read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
@@ -919,7 +930,7 @@ int print_pkg_msrs(int pkg)
if (!has_hwp)
return 0;
- read_hwp_request(first_cpu_in_pkg[pkg], &req, MSR_HWP_REQUEST_PKG);
+ read_hwp_request_msr(first_cpu_in_pkg[pkg], &req, MSR_HWP_REQUEST_PKG);
print_hwp_request_pkg(pkg, &req, "");
if (has_hwp_notify) {
@@ -951,8 +962,10 @@ int ratio_2_sysfs_khz(int ratio)
}
/*
* If HWP is enabled and cpufreq sysfs attribtes are present,
- * then update sysfs, so that it will not become
- * stale when we write to MSRs.
+ * then update via sysfs. The intel_pstate driver may modify (clip)
+ * this request, say, when HWP_CAP is outside of PLATFORM_INFO limits,
+ * and the driver-chosen value takes precidence.
+ *
* (intel_pstate's max_perf_pct and min_perf_pct will follow cpufreq,
* so we don't have to touch that.)
*/
@@ -1007,6 +1020,8 @@ int update_sysfs(int cpu)
if (update_hwp_max)
update_cpufreq_scaling_freq(1, cpu, req_update.hwp_max);
+ hwp_limits_done_via_sysfs = 1;
+
return 0;
}
@@ -1074,21 +1089,21 @@ int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, s
return 0;
}
-int update_hwp_request(int cpu)
+int update_hwp_request_msr(int cpu)
{
struct msr_hwp_request req;
struct msr_hwp_cap cap;
int msr_offset = MSR_HWP_REQUEST;
- read_hwp_request(cpu, &req, msr_offset);
+ read_hwp_request_msr(cpu, &req, msr_offset);
if (debug)
print_hwp_request(cpu, &req, "old: ");
- if (update_hwp_min)
+ if (update_hwp_min && !hwp_limits_done_via_sysfs)
req.hwp_min = req_update.hwp_min;
- if (update_hwp_max)
+ if (update_hwp_max && !hwp_limits_done_via_sysfs)
req.hwp_max = req_update.hwp_max;
if (update_hwp_desired)
@@ -1111,15 +1126,15 @@ int update_hwp_request(int cpu)
verify_hwp_req_self_consistency(cpu, &req);
- write_hwp_request(cpu, &req, msr_offset);
+ write_hwp_request_msr(cpu, &req, msr_offset);
if (debug) {
- read_hwp_request(cpu, &req, msr_offset);
+ read_hwp_request_msr(cpu, &req, msr_offset);
print_hwp_request(cpu, &req, "new: ");
}
return 0;
}
-int update_hwp_request_pkg(int pkg)
+int update_hwp_request_pkg_msr(int pkg)
{
struct msr_hwp_request req;
struct msr_hwp_cap cap;
@@ -1127,7 +1142,7 @@ int update_hwp_request_pkg(int pkg)
int msr_offset = MSR_HWP_REQUEST_PKG;
- read_hwp_request(cpu, &req, msr_offset);
+ read_hwp_request_msr(cpu, &req, msr_offset);
if (debug)
print_hwp_request_pkg(pkg, &req, "old: ");
@@ -1155,10 +1170,10 @@ int update_hwp_request_pkg(int pkg)
verify_hwp_req_self_consistency(cpu, &req);
- write_hwp_request(cpu, &req, msr_offset);
+ write_hwp_request_msr(cpu, &req, msr_offset);
if (debug) {
- read_hwp_request(cpu, &req, msr_offset);
+ read_hwp_request_msr(cpu, &req, msr_offset);
print_hwp_request_pkg(pkg, &req, "new: ");
}
return 0;
@@ -1166,30 +1181,39 @@ int update_hwp_request_pkg(int pkg)
int enable_hwp_on_cpu(int cpu)
{
- unsigned long long msr;
+ unsigned long long old_msr, new_msr;
+
+ get_msr(cpu, MSR_PM_ENABLE, &old_msr);
+
+ if (old_msr & 1)
+ return 0; /* already enabled */
- get_msr(cpu, MSR_PM_ENABLE, &msr);
- put_msr(cpu, MSR_PM_ENABLE, 1);
+ new_msr = old_msr | 1;
+ put_msr(cpu, MSR_PM_ENABLE, new_msr);
if (verbose)
- printf("cpu%d: MSR_PM_ENABLE old: %d new: %d\n", cpu, (unsigned int) msr, 1);
+ printf("cpu%d: MSR_PM_ENABLE old: %llX new: %llX\n", cpu, old_msr, new_msr);
return 0;
}
-int update_cpu_msrs(int cpu)
+int update_cpu_epb_sysfs(int cpu)
{
- unsigned long long msr;
int epb;
- if (update_epb) {
- epb = get_epb(cpu);
- set_epb(cpu, new_epb);
+ epb = get_epb_sysfs(cpu);
+ set_epb_sysfs(cpu, new_epb);
- if (verbose)
- printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n",
- cpu, epb, (unsigned int) new_epb);
- }
+ if (verbose)
+ printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n",
+ cpu, epb, (unsigned int) new_epb);
+
+ return 0;
+}
+
+int update_cpu_msrs(int cpu)
+{
+ unsigned long long msr;
if (update_turbo) {
int turbo_is_present_and_disabled;
@@ -1224,7 +1248,7 @@ int update_cpu_msrs(int cpu)
if (!hwp_update_enabled())
return 0;
- update_hwp_request(cpu);
+ update_hwp_request_msr(cpu);
return 0;
}
@@ -1312,6 +1336,17 @@ void for_all_cpus_in_set(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
if (CPU_ISSET_S(cpu_num, set_size, cpu_set))
func(cpu_num);
}
+int for_all_cpus_in_set_and(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
+{
+ int cpu_num;
+ int retval = 1;
+
+ for (cpu_num = 0; cpu_num <= max_cpu_num; ++cpu_num)
+ if (CPU_ISSET_S(cpu_num, set_size, cpu_set))
+ retval &= func(cpu_num);
+
+ return retval;
+}
void init_data_structures(void)
{
@@ -1326,21 +1361,38 @@ void init_data_structures(void)
for_all_proc_cpus(mark_cpu_present);
}
-/* clear has_hwp if it is not enable (or being enabled) */
+int is_hwp_enabled_on_cpu(int cpu_num)
+{
+ unsigned long long msr;
+ int retval;
+
+ /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
+ get_msr(cpu_num, MSR_PM_ENABLE, &msr);
+ retval = (msr & 1);
+
+ if (verbose)
+ fprintf(stderr, "cpu%d: %sHWP\n", cpu_num, retval ? "" : "No-");
+
+ return retval;
+}
+/*
+ * verify_hwp_is_enabled()
+ *
+ * Set (has_hwp=0) if no HWP feature or any of selected CPU set does not have HWP enabled
+ */
void verify_hwp_is_enabled(void)
{
- unsigned long long msr;
+ int retval;
if (!has_hwp) /* set in early_cpuid() */
return;
- /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
- get_msr(base_cpu, MSR_PM_ENABLE, &msr);
- if ((msr & 1) == 0) {
+ retval = for_all_cpus_in_set_and(cpu_setsize, cpu_selected_set, is_hwp_enabled_on_cpu);
+
+ if (retval == 0) {
fprintf(stderr, "HWP can be enabled using '--hwp-enable'\n");
has_hwp = 0;
- return;
}
}
@@ -1379,16 +1431,32 @@ void set_base_cpu(void)
err(-ENODEV, "No valid cpus found");
}
+static void probe_android_msr_path(void)
+{
+ struct stat sb;
+ char test_path[32];
+
+ sprintf(test_path, "/dev/msr%d", base_cpu);
+ if (stat(test_path, &sb) == 0)
+ use_android_msr_path = 1;
+}
void probe_dev_msr(void)
{
struct stat sb;
char pathname[32];
- sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
- if (stat(pathname, &sb))
- if (system("/sbin/modprobe msr > /dev/null 2>&1"))
- err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
+ probe_android_msr_path();
+
+ sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", base_cpu);
+ if (stat(pathname, &sb)) {
+ if (system("/sbin/modprobe msr > /dev/null 2>&1")) {
+ if (use_android_msr_path)
+ err(-5, "no /dev/msr0, Try \"# modprobe msr\" ");
+ else
+ err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
+ }
+ }
}
static void get_cpuid_or_exit(unsigned int leaf,
@@ -1505,6 +1573,7 @@ void parse_cpuid(void)
int main(int argc, char **argv)
{
set_base_cpu();
+
probe_dev_msr();
init_data_structures();
@@ -1551,10 +1620,13 @@ int main(int argc, char **argv)
/* update CPU set */
if (cpu_selected_set) {
+ if (update_epb)
+ for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_cpu_epb_sysfs);
for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_sysfs);
for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_cpu_msrs);
+
} else if (pkg_selected_set)
- for_packages(pkg_selected_set, update_hwp_request_pkg);
+ for_packages(pkg_selected_set, update_hwp_request_pkg_msr);
return 0;
}
diff --git a/tools/sched/dl_bw_dump.py b/tools/sched/dl_bw_dump.py
new file mode 100644
index 000000000000..aae4e42b1769
--- /dev/null
+++ b/tools/sched/dl_bw_dump.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env drgn
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 Juri Lelli <juri.lelli@redhat.com>
+# Copyright (C) 2025 Red Hat, Inc.
+
+desc = """
+This is a drgn script to show dl_rq bandwidth accounting information. For more
+info on drgn, visit https://github.com/osandov/drgn.
+
+Only online CPUs are reported.
+"""
+
+import os
+import argparse
+
+import drgn
+from drgn import FaultError
+from drgn.helpers.common import *
+from drgn.helpers.linux import *
+
+def print_dl_bws_info():
+
+ print("Retrieving dl_rq bandwidth accounting information:")
+
+ runqueues = prog['runqueues']
+
+ for cpu_id in for_each_possible_cpu(prog):
+ try:
+ rq = per_cpu(runqueues, cpu_id)
+
+ if rq.online == 0:
+ continue
+
+ dl_rq = rq.dl
+
+ print(f" From CPU: {cpu_id}")
+
+ # Access and print relevant fields from struct dl_rq
+ print(f" running_bw : {dl_rq.running_bw}")
+ print(f" this_bw : {dl_rq.this_bw}")
+ print(f" extra_bw : {dl_rq.extra_bw}")
+ print(f" max_bw : {dl_rq.max_bw}")
+ print(f" bw_ratio : {dl_rq.bw_ratio}")
+
+ except drgn.FaultError as fe:
+ print(f" (CPU {cpu_id}: Fault accessing kernel memory: {fe})")
+ except AttributeError as ae:
+ print(f" (CPU {cpu_id}: Missing attribute for root_domain (kernel struct change?): {ae})")
+ except Exception as e:
+ print(f" (CPU {cpu_id}: An unexpected error occurred: {e})")
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description=desc,
+ formatter_class=argparse.RawTextHelpFormatter)
+ args = parser.parse_args()
+
+ print_dl_bws_info()
diff --git a/tools/sched/root_domains_dump.py b/tools/sched/root_domains_dump.py
new file mode 100644
index 000000000000..56dc91f017b2
--- /dev/null
+++ b/tools/sched/root_domains_dump.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env drgn
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 Juri Lelli <juri.lelli@redhat.com>
+# Copyright (C) 2025 Red Hat, Inc.
+
+desc = """
+This is a drgn script to show the current root domains configuration. For more
+info on drgn, visit https://github.com/osandov/drgn.
+
+Root domains are only printed once, as multiple CPUs might be attached to the
+same root domain.
+"""
+
+import os
+import argparse
+
+import drgn
+from drgn import FaultError
+from drgn.helpers.common import *
+from drgn.helpers.linux import *
+
+def print_root_domains_info():
+
+ # To store unique root domains found
+ seen_root_domains = set()
+
+ print("Retrieving (unique) Root Domain Information:")
+
+ runqueues = prog['runqueues']
+ def_root_domain = prog['def_root_domain']
+
+ for cpu_id in for_each_possible_cpu(prog):
+ try:
+ rq = per_cpu(runqueues, cpu_id)
+
+ root_domain = rq.rd
+
+ # Check if we've already processed this root domain to avoid duplicates
+ # Use the memory address of the root_domain as a unique identifier
+ root_domain_cast = int(root_domain)
+ if root_domain_cast in seen_root_domains:
+ continue
+ seen_root_domains.add(root_domain_cast)
+
+ if root_domain_cast == int(def_root_domain.address_):
+ print(f"\n--- Root Domain @ def_root_domain ---")
+ else:
+ print(f"\n--- Root Domain @ 0x{root_domain_cast:x} ---")
+
+ print(f" From CPU: {cpu_id}") # This CPU belongs to this root domain
+
+ # Access and print relevant fields from struct root_domain
+ print(f" Span : {cpumask_to_cpulist(root_domain.span[0])}")
+ print(f" Online : {cpumask_to_cpulist(root_domain.span[0])}")
+
+ except drgn.FaultError as fe:
+ print(f" (CPU {cpu_id}: Fault accessing kernel memory: {fe})")
+ except AttributeError as ae:
+ print(f" (CPU {cpu_id}: Missing attribute for root_domain (kernel struct change?): {ae})")
+ except Exception as e:
+ print(f" (CPU {cpu_id}: An unexpected error occurred: {e})")
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description=desc,
+ formatter_class=argparse.RawTextHelpFormatter)
+ args = parser.parse_args()
+
+ print_root_domains_info()
diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile
index d68780e2e03d..e4bda2474060 100644
--- a/tools/sched_ext/Makefile
+++ b/tools/sched_ext/Makefile
@@ -133,6 +133,7 @@ $(MAKE_DIRS):
$(call msg,MKDIR,,$@)
$(Q)mkdir -p $@
+ifneq ($(CROSS_COMPILE),)
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(OBJ_DIR)/libbpf
@@ -141,6 +142,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
EXTRA_CFLAGS='-g -O0 -fPIC' \
LDFLAGS="$(LDFLAGS)" \
DESTDIR=$(OUTPUT_DIR) prefix= all install_headers
+endif
$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
@@ -187,7 +189,7 @@ $(INCLUDE_DIR)/%.bpf.skel.h: $(SCXOBJ_DIR)/%.bpf.o $(INCLUDE_DIR)/vmlinux.h $(BP
SCX_COMMON_DEPS := include/scx/common.h include/scx/user_exit_info.h | $(BINDIR)
-c-sched-targets = scx_simple scx_qmap scx_central scx_flatcg
+c-sched-targets = scx_simple scx_cpu0 scx_qmap scx_central scx_flatcg
$(addprefix $(BINDIR)/,$(c-sched-targets)): \
$(BINDIR)/%: \
diff --git a/tools/sched_ext/include/scx/bpf_arena_common.bpf.h b/tools/sched_ext/include/scx/bpf_arena_common.bpf.h
new file mode 100644
index 000000000000..4366fb3c91ce
--- /dev/null
+++ b/tools/sched_ext/include/scx/bpf_arena_common.bpf.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+/*
+ * for older kernels try sizeof(struct genradix_node)
+ * or flexible:
+ * static inline long __bpf_page_size(void) {
+ * return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node);
+ * }
+ * but generated code is not great.
+ */
+#endif
+
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)
+#define __arena __attribute__((address_space(1)))
+#define __arena_global __attribute__((address_space(1)))
+#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
+#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
+#else
+
+/* emit instruction:
+ * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as
+ *
+ * This is a workaround for LLVM compiler versions without
+ * __BPF_FEATURE_ADDR_SPACE_CAST that do not automatically cast between arena
+ * pointers and native kernel/userspace ones. In this case we explicitly do so
+ * with cast_kern() and cast_user(). E.g., in the Linux kernel tree,
+ * tools/testing/selftests/bpf includes tests that use these macros to implement
+ * linked lists and hashtables backed by arena memory. In sched_ext, we use
+ * cast_kern() and cast_user() for compatibility with older LLVM toolchains.
+ */
+#ifndef bpf_addr_space_cast
+#define bpf_addr_space_cast(var, dst_as, src_as)\
+ asm volatile(".byte 0xBF; \
+ .ifc %[reg], r0; \
+ .byte 0x00; \
+ .endif; \
+ .ifc %[reg], r1; \
+ .byte 0x11; \
+ .endif; \
+ .ifc %[reg], r2; \
+ .byte 0x22; \
+ .endif; \
+ .ifc %[reg], r3; \
+ .byte 0x33; \
+ .endif; \
+ .ifc %[reg], r4; \
+ .byte 0x44; \
+ .endif; \
+ .ifc %[reg], r5; \
+ .byte 0x55; \
+ .endif; \
+ .ifc %[reg], r6; \
+ .byte 0x66; \
+ .endif; \
+ .ifc %[reg], r7; \
+ .byte 0x77; \
+ .endif; \
+ .ifc %[reg], r8; \
+ .byte 0x88; \
+ .endif; \
+ .ifc %[reg], r9; \
+ .byte 0x99; \
+ .endif; \
+ .short %[off]; \
+ .long %[as]" \
+ : [reg]"+r"(var) \
+ : [off]"i"(BPF_ADDR_SPACE_CAST) \
+ , [as]"i"((dst_as << 16) | src_as));
+#endif
+
+#define __arena
+#define __arena_global SEC(".addr_space.1")
+#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)
+#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)
+#endif
+
+void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
+ int node_id, __u64 flags) __ksym __weak;
+void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
+
+/*
+ * Note that cond_break can only be portably used in the body of a breakable
+ * construct, whereas can_loop can be used anywhere.
+ */
+#ifdef TEST
+#define can_loop true
+#define __cond_break(expr) expr
+#else
+#ifdef __BPF_FEATURE_MAY_GOTO
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("may_goto %l[l_break]" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define __cond_break(expr) \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("may_goto %l[l_break]" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: expr; \
+ l_continue:; \
+ })
+#else
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define __cond_break(expr) \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: expr; \
+ l_continue:; \
+ })
+#else
+#define can_loop \
+ ({ __label__ l_break, l_continue; \
+ bool ret = true; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: ret = false; \
+ l_continue:; \
+ ret; \
+ })
+
+#define __cond_break(expr) \
+ ({ __label__ l_break, l_continue; \
+ asm volatile goto("1:.byte 0xe5; \
+ .byte 0; \
+ .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
+ .short 0" \
+ :::: l_break); \
+ goto l_continue; \
+ l_break: expr; \
+ l_continue:; \
+ })
+#endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
+#endif /* __BPF_FEATURE_MAY_GOTO */
+#endif /* TEST */
+
+#define cond_break __cond_break(break)
+#define cond_break_label(label) __cond_break(goto label)
+
+
+void bpf_preempt_disable(void) __weak __ksym;
+void bpf_preempt_enable(void) __weak __ksym;
diff --git a/tools/sched_ext/include/scx/bpf_arena_common.h b/tools/sched_ext/include/scx/bpf_arena_common.h
new file mode 100644
index 000000000000..10141db0b59d
--- /dev/null
+++ b/tools/sched_ext/include/scx/bpf_arena_common.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+
+#ifndef arena_container_of
+#define arena_container_of(ptr, type, member) \
+ ({ \
+ void __arena *__mptr = (void __arena *)(ptr); \
+ ((type *)(__mptr - offsetof(type, member))); \
+ })
+#endif
+
+/* Provide the definition of PAGE_SIZE. */
+#include <sys/user.h>
+
+#define __arena
+#define __arg_arena
+#define cast_kern(ptr) /* nop for user space */
+#define cast_user(ptr) /* nop for user space */
+char __attribute__((weak)) arena[1];
+
+#ifndef offsetof
+#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
+#endif
+
+static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt,
+ int node_id, __u64 flags)
+{
+ return NULL;
+}
+static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt)
+{
+}
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index d4e21558e982..821d5791bd42 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -24,14 +24,26 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <asm-generic/errno.h>
-#include "user_exit_info.h"
+#include "user_exit_info.bpf.h"
#include "enum_defs.autogen.h"
+#define PF_IDLE 0x00000002 /* I am an IDLE thread */
+#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
+#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */
+#define PF_KSWAPD 0x00020000 /* I am kswapd */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_EXITING 0x00000004
#define CLOCK_MONOTONIC 1
+#ifndef NR_CPUS
+#define NR_CPUS 1024
+#endif
+
+#ifndef NUMA_NO_NODE
+#define NUMA_NO_NODE (-1)
+#endif
+
extern int LINUX_KERNEL_VERSION __kconfig;
extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak;
extern const char CONFIG_LOCALVERSION[64] __kconfig __weak;
@@ -48,21 +60,15 @@ static inline void ___vmlinux_h_sanity_check___(void)
s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
-s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
- const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
-void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
-void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
+s32 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed,
+ struct scx_bpf_select_cpu_and_args *args) __ksym __weak;
+bool __scx_bpf_dsq_insert_vtime(struct task_struct *p, struct scx_bpf_dsq_insert_vtime_args *args) __ksym __weak;
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
void scx_bpf_dispatch_cancel(void) __ksym;
-bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak;
-void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
-void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
-bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-u32 scx_bpf_reenqueue_local(void) __ksym;
void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
+struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) __ksym __weak;
int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak;
struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak;
void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak;
@@ -91,7 +97,8 @@ s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
-struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
+struct rq *scx_bpf_locked_rq(void) __ksym;
+struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak;
u64 scx_bpf_now(void) __ksym __weak;
void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
@@ -107,6 +114,9 @@ void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __
static inline __attribute__((format(printf, 1, 2)))
void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
+#define SCX_STRINGIFY(x) #x
+#define SCX_TOSTRING(x) SCX_STRINGIFY(x)
+
/*
* Helper macro for initializing the fmt and variadic argument inputs to both
* bstr exit kfuncs. Callers to this function should use ___fmt and ___param to
@@ -141,13 +151,15 @@ void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
* scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments
* instead of an array of u64. Invoking this macro will cause the scheduler to
* exit in an erroneous state, with diagnostic information being passed to the
- * user.
+ * user. It appends the file and line number to aid debugging.
*/
#define scx_bpf_error(fmt, args...) \
({ \
- scx_bpf_bstr_preamble(fmt, args) \
+ scx_bpf_bstr_preamble( \
+ __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args) \
scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \
- ___scx_bpf_bstr_format_checker(fmt, ##args); \
+ ___scx_bpf_bstr_format_checker( \
+ __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args); \
})
/*
@@ -229,6 +241,7 @@ BPF_PROG(name, ##args)
* be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of
* `MEMBER_VPTR(ptr, ->member)`.
*/
+#ifndef MEMBER_VPTR
#define MEMBER_VPTR(base, member) (typeof((base) member) *) \
({ \
u64 __base = (u64)&(base); \
@@ -245,6 +258,7 @@ BPF_PROG(name, ##args)
[max]"i"(sizeof(base) - sizeof((base) member))); \
__addr; \
})
+#endif /* MEMBER_VPTR */
/**
* ARRAY_ELEM_PTR - Obtain the verified pointer to an array element
@@ -260,6 +274,7 @@ BPF_PROG(name, ##args)
* size of the array to compute the max, which will result in rejection by
* the verifier.
*/
+#ifndef ARRAY_ELEM_PTR
#define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \
({ \
u64 __base = (u64)arr; \
@@ -274,7 +289,7 @@ BPF_PROG(name, ##args)
[max]"r"(sizeof(arr[0]) * ((n) - 1))); \
__addr; \
})
-
+#endif /* ARRAY_ELEM_PTR */
/*
* BPF declarations and helpers
@@ -438,8 +453,27 @@ static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask)
*/
static inline bool is_migration_disabled(const struct task_struct *p)
{
- if (bpf_core_field_exists(p->migration_disabled))
- return p->migration_disabled;
+ /*
+ * Testing p->migration_disabled in a BPF code is tricky because the
+ * migration is _always_ disabled while running the BPF code.
+ * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) for BPF
+ * code execution disable and re-enable the migration of the current
+ * task, respectively. So, the _current_ task of the sched_ext ops is
+ * always migration-disabled. Moreover, p->migration_disabled could be
+ * two or greater when a sched_ext ops BPF code (e.g., ops.tick) is
+ * executed in the middle of the other BPF code execution.
+ *
+ * Therefore, we should decide that the _current_ task is
+ * migration-disabled only when its migration_disabled count is greater
+ * than one. In other words, when p->migration_disabled == 1, there is
+ * an ambiguity, so we should check if @p is the current task or not.
+ */
+ if (bpf_core_field_exists(p->migration_disabled)) {
+ if (p->migration_disabled == 1)
+ return bpf_get_current_task_btf() != p;
+ else
+ return p->migration_disabled;
+ }
return false;
}
@@ -476,7 +510,7 @@ static inline s64 time_delta(u64 after, u64 before)
*/
static inline bool time_after(u64 a, u64 b)
{
- return (s64)(b - a) < 0;
+ return (s64)(b - a) < 0;
}
/**
@@ -500,7 +534,7 @@ static inline bool time_before(u64 a, u64 b)
*/
static inline bool time_after_eq(u64 a, u64 b)
{
- return (s64)(a - b) >= 0;
+ return (s64)(a - b) >= 0;
}
/**
@@ -547,9 +581,15 @@ static inline bool time_in_range_open(u64 a, u64 b, u64 c)
*/
/* useful compiler attributes */
+#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+#ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+#ifndef __maybe_unused
#define __maybe_unused __attribute__((__unused__))
+#endif
/*
* READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They
@@ -633,6 +673,26 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
})
/*
+ * __calc_avg - Calculate exponential weighted moving average (EWMA) with
+ * @old and @new values. @decay represents how large the @old value remains.
+ * With a larger @decay value, the moving average changes slowly, exhibiting
+ * fewer fluctuations.
+ */
+#define __calc_avg(old, new, decay) ({ \
+ typeof(decay) thr = 1 << (decay); \
+ typeof(old) ret; \
+ if (((old) < thr) || ((new) < thr)) { \
+ if (((old) == 1) && ((new) == 0)) \
+ ret = 0; \
+ else \
+ ret = ((old) - ((old) >> 1)) + ((new) >> 1); \
+ } else { \
+ ret = ((old) - ((old) >> (decay))) + ((new) >> (decay)); \
+ } \
+ ret; \
+})
+
+/*
* log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
* @v: The value for which we're computing the base 2 logarithm.
*/
@@ -663,6 +723,25 @@ static inline u32 log2_u64(u64 v)
}
/*
+ * sqrt_u64 - Calculate the square root of value @x using Newton's method.
+ */
+static inline u64 __sqrt_u64(u64 x)
+{
+ if (x == 0 || x == 1)
+ return x;
+
+ u64 r = ((1ULL << 32) > x) ? x : (1ULL << 32);
+
+ for (int i = 0; i < 8; ++i) {
+ u64 q = x / r;
+ if (r <= q)
+ break;
+ r = (r + q) >> 1;
+ }
+ return r;
+}
+
+/*
* Return a value proportionally scaled to the task's weight.
*/
static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value)
diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h
index 1dc76bd84296..b3c6372bcf81 100644
--- a/tools/sched_ext/include/scx/common.h
+++ b/tools/sched_ext/include/scx/common.h
@@ -75,8 +75,9 @@ typedef int64_t s64;
#include "enums.h"
/* not available when building kernel tools/sched_ext */
-#if __has_include(<lib/sdt_task.h>)
-#include <lib/sdt_task.h>
+#if __has_include(<lib/sdt_task_defs.h>)
+#include "bpf_arena_common.h"
+#include <lib/sdt_task_defs.h>
#endif
#endif /* __SCHED_EXT_COMMON_H */
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index 9252e1a00556..f2969c3061a7 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -16,114 +16,92 @@
})
/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
-#define __COMPAT_scx_bpf_task_cgroup(p) \
- (bpf_ksym_exists(scx_bpf_task_cgroup) ? \
- scx_bpf_task_cgroup((p)) : NULL)
+struct cgroup *scx_bpf_task_cgroup___new(struct task_struct *p) __ksym __weak;
+
+#define scx_bpf_task_cgroup(p) \
+ (bpf_ksym_exists(scx_bpf_task_cgroup___new) ? \
+ scx_bpf_task_cgroup___new((p)) : NULL)
/*
* v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are
* renamed to unload the verb.
*
- * Build error is triggered if old names are used. New binaries work with both
- * new and old names. The compat macros will be removed on v6.15 release.
- *
* scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by
* 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").
- * Preserve __COMPAT macros until v6.15.
*/
-void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
-void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
-bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
-void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
-void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
-bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-
-#define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \
- (bpf_ksym_exists(scx_bpf_dsq_insert) ? \
- scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) : \
- scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags)))
-
-#define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags) \
- (bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ? \
- scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \
- scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags)))
+bool scx_bpf_dsq_move_to_local___new(u64 dsq_id) __ksym __weak;
+void scx_bpf_dsq_move_set_slice___new(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
+void scx_bpf_dsq_move_set_vtime___new(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
+bool scx_bpf_dsq_move___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
+bool scx_bpf_dsq_move_vtime___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
+
+bool scx_bpf_consume___old(u64 dsq_id) __ksym __weak;
+void scx_bpf_dispatch_from_dsq_set_slice___old(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
+void scx_bpf_dispatch_from_dsq_set_vtime___old(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
+bool scx_bpf_dispatch_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
+bool scx_bpf_dispatch_vtime_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
#define scx_bpf_dsq_move_to_local(dsq_id) \
- (bpf_ksym_exists(scx_bpf_dsq_move_to_local) ? \
- scx_bpf_dsq_move_to_local((dsq_id)) : \
- scx_bpf_consume___compat((dsq_id)))
-
-#define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice) \
- (bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ? \
- scx_bpf_dsq_move_set_slice((it__iter), (slice)) : \
- (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ? \
- scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) : \
+ (bpf_ksym_exists(scx_bpf_dsq_move_to_local___new) ? \
+ scx_bpf_dsq_move_to_local___new((dsq_id)) : \
+ scx_bpf_consume___old((dsq_id)))
+
+#define scx_bpf_dsq_move_set_slice(it__iter, slice) \
+ (bpf_ksym_exists(scx_bpf_dsq_move_set_slice___new) ? \
+ scx_bpf_dsq_move_set_slice___new((it__iter), (slice)) : \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___old) ? \
+ scx_bpf_dispatch_from_dsq_set_slice___old((it__iter), (slice)) : \
(void)0))
-#define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
- (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ? \
- scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) : \
- (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ? \
- scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) : \
- (void) 0))
-
-#define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
- (bpf_ksym_exists(scx_bpf_dsq_move) ? \
- scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) : \
- (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ? \
- scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
- false))
+#define scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
+ (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime___new) ? \
+ scx_bpf_dsq_move_set_vtime___new((it__iter), (vtime)) : \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___old) ? \
+ scx_bpf_dispatch_from_dsq_set_vtime___old((it__iter), (vtime)) : \
+ (void)0))
-#define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
- (bpf_ksym_exists(scx_bpf_dsq_move_vtime) ? \
- scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) : \
- (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \
- scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
+#define scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
+ (bpf_ksym_exists(scx_bpf_dsq_move___new) ? \
+ scx_bpf_dsq_move___new((it__iter), (p), (dsq_id), (enq_flags)) : \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___old) ? \
+ scx_bpf_dispatch_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
-#define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \
- _Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
-
-#define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags) \
- _Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()")
-
-#define scx_bpf_consume(dsq_id) ({ \
- _Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \
- false; \
-})
-
-#define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
- _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()")
-
-#define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
- _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()")
-
-#define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
- _Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \
- false; \
-})
-
-#define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
- _Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \
- false; \
-})
-
-#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
- _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()")
+#define scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
+ (bpf_ksym_exists(scx_bpf_dsq_move_vtime___new) ? \
+ scx_bpf_dsq_move_vtime___new((it__iter), (p), (dsq_id), (enq_flags)) : \
+ (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___old) ? \
+ scx_bpf_dispatch_vtime_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \
+ false))
-#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
- _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()")
+/*
+ * v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits")
+ *
+ * Compat macro will be dropped on v6.19 release.
+ */
+int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
-#define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
- _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \
- false; \
-})
+#define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \
+ (bpf_ksym_exists(bpf_cpumask_populate) ? \
+ (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
-#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
- _Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \
- false; \
-})
+/*
+ * v6.19: Introduce lockless peek API for user DSQs.
+ *
+ * Preserve the following macro until v6.21.
+ */
+static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id)
+{
+ struct task_struct *p = NULL;
+ struct bpf_iter_scx_dsq it;
+
+ if (bpf_ksym_exists(scx_bpf_dsq_peek))
+ return scx_bpf_dsq_peek(dsq_id);
+ if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0))
+ p = bpf_iter_scx_dsq_next(&it);
+ bpf_iter_scx_dsq_destroy(&it);
+ return p;
+}
/**
* __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
@@ -226,6 +204,178 @@ static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
scx_bpf_pick_any_cpu(cpus_allowed, flags))
/*
+ * v6.18: Add a helper to retrieve the current task running on a CPU.
+ *
+ * Keep this helper available until v6.20 for compatibility.
+ */
+static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
+{
+ struct rq *rq;
+
+ if (bpf_ksym_exists(scx_bpf_cpu_curr))
+ return scx_bpf_cpu_curr(cpu);
+
+ rq = scx_bpf_cpu_rq(cpu);
+
+ return rq ? rq->curr : NULL;
+}
+
+/*
+ * v6.19: To work around BPF maximum parameter limit, the following kfuncs are
+ * replaced with variants that pack scalar arguments in a struct. Wrappers are
+ * provided to maintain source compatibility.
+ *
+ * v6.13: scx_bpf_dsq_insert_vtime() renaming is also handled here. See the
+ * block on dispatch renaming above for more details.
+ *
+ * The kernel will carry the compat variants until v6.23 to maintain binary
+ * compatibility. After v6.23 release, remove the compat handling and move the
+ * wrappers to common.bpf.h.
+ */
+s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
+void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
+void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
+
+/**
+ * scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p
+ * @p: task_struct to select a CPU for
+ * @prev_cpu: CPU @p was on previously
+ * @wake_flags: %SCX_WAKE_* flags
+ * @cpus_allowed: cpumask of allowed CPUs
+ * @flags: %SCX_PICK_IDLE* flags
+ *
+ * Inline wrapper that packs scalar arguments into a struct and calls
+ * __scx_bpf_select_cpu_and(). See __scx_bpf_select_cpu_and() for details.
+ */
+static inline s32
+scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ const struct cpumask *cpus_allowed, u64 flags)
+{
+ if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) {
+ struct scx_bpf_select_cpu_and_args args = {
+ .prev_cpu = prev_cpu,
+ .wake_flags = wake_flags,
+ .flags = flags,
+ };
+
+ return __scx_bpf_select_cpu_and(p, cpus_allowed, &args);
+ } else {
+ return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags,
+ cpus_allowed, flags);
+ }
+}
+
+/**
+ * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
+ * @p: task_struct to insert
+ * @dsq_id: DSQ to insert into
+ * @slice: duration @p can run for in nsecs, 0 to keep the current value
+ * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Inline wrapper that packs scalar arguments into a struct and calls
+ * __scx_bpf_dsq_insert_vtime(). See __scx_bpf_dsq_insert_vtime() for details.
+ */
+static inline bool
+scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,
+ u64 enq_flags)
+{
+ if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) {
+ struct scx_bpf_dsq_insert_vtime_args args = {
+ .dsq_id = dsq_id,
+ .slice = slice,
+ .vtime = vtime,
+ .enq_flags = enq_flags,
+ };
+
+ return __scx_bpf_dsq_insert_vtime(p, &args);
+ } else if (bpf_ksym_exists(scx_bpf_dsq_insert_vtime___compat)) {
+ scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime,
+ enq_flags);
+ return true;
+ } else {
+ scx_bpf_dispatch_vtime___compat(p, dsq_id, slice, vtime,
+ enq_flags);
+ return true;
+ }
+}
+
+/*
+ * v6.19: scx_bpf_dsq_insert() now returns bool instead of void. Move
+ * scx_bpf_dsq_insert() decl to common.bpf.h and drop compat helper after v6.22.
+ * The extra ___compat suffix is to work around libbpf not ignoring __SUFFIX on
+ * kernel side. The entire suffix can be dropped later.
+ *
+ * v6.13: scx_bpf_dsq_insert() renaming is also handled here. See the block on
+ * dispatch renaming above for more details.
+ */
+bool scx_bpf_dsq_insert___v2___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
+void scx_bpf_dsq_insert___v1(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
+void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
+
+static inline bool
+scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags)
+{
+ if (bpf_ksym_exists(scx_bpf_dsq_insert___v2___compat)) {
+ return scx_bpf_dsq_insert___v2___compat(p, dsq_id, slice, enq_flags);
+ } else if (bpf_ksym_exists(scx_bpf_dsq_insert___v1)) {
+ scx_bpf_dsq_insert___v1(p, dsq_id, slice, enq_flags);
+ return true;
+ } else {
+ scx_bpf_dispatch___compat(p, dsq_id, slice, enq_flags);
+ return true;
+ }
+}
+
+/*
+ * v6.19: scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() added to for
+ * sub-sched authority checks. Drop the wrappers and move the decls to
+ * common.bpf.h after v6.22.
+ */
+bool scx_bpf_task_set_slice___new(struct task_struct *p, u64 slice) __ksym __weak;
+bool scx_bpf_task_set_dsq_vtime___new(struct task_struct *p, u64 vtime) __ksym __weak;
+
+static inline void scx_bpf_task_set_slice(struct task_struct *p, u64 slice)
+{
+ if (bpf_ksym_exists(scx_bpf_task_set_slice___new))
+ scx_bpf_task_set_slice___new(p, slice);
+ else
+ p->scx.slice = slice;
+}
+
+static inline void scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime)
+{
+ if (bpf_ksym_exists(scx_bpf_task_set_dsq_vtime___new))
+ scx_bpf_task_set_dsq_vtime___new(p, vtime);
+ else
+ p->scx.dsq_vtime = vtime;
+}
+
+/*
+ * v6.19: The new void variant can be called from anywhere while the older v1
+ * variant can only be called from ops.cpu_release(). The double ___ prefixes on
+ * the v2 variant need to be removed once libbpf is updated to ignore ___ prefix
+ * on kernel side. Drop the wrapper and move the decl to common.bpf.h after
+ * v6.22.
+ */
+u32 scx_bpf_reenqueue_local___v1(void) __ksym __weak;
+void scx_bpf_reenqueue_local___v2___compat(void) __ksym __weak;
+
+static inline bool __COMPAT_scx_bpf_reenqueue_local_from_anywhere(void)
+{
+ return bpf_ksym_exists(scx_bpf_reenqueue_local___v2___compat);
+}
+
+static inline void scx_bpf_reenqueue_local(void)
+{
+ if (__COMPAT_scx_bpf_reenqueue_local_from_anywhere())
+ scx_bpf_reenqueue_local___v2___compat();
+ else
+ scx_bpf_reenqueue_local___v1();
+}
+
+/*
* Define sched_ext_ops. This may be expanded to define multiple variants for
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
*/
diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h
index 35c67c5174ac..8b4897fc8b99 100644
--- a/tools/sched_ext/include/scx/compat.h
+++ b/tools/sched_ext/include/scx/compat.h
@@ -151,6 +151,10 @@ static inline long scx_hotplug_seq(void)
*
* ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is
* the current minimum required kernel version.
+ *
+ * COMPAT:
+ * - v6.17: ops.cgroup_set_bandwidth()
+ * - v6.19: ops.cgroup_set_idle()
*/
#define SCX_OPS_OPEN(__ops_name, __scx_name) ({ \
struct __scx_name *__skel; \
@@ -162,6 +166,16 @@ static inline long scx_hotplug_seq(void)
SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \
__skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \
SCX_ENUM_INIT(__skel); \
+ if (__skel->struct_ops.__ops_name->cgroup_set_bandwidth && \
+ !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_bandwidth")) { \
+ fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_bandwidth()\n"); \
+ __skel->struct_ops.__ops_name->cgroup_set_bandwidth = NULL; \
+ } \
+ if (__skel->struct_ops.__ops_name->cgroup_set_idle && \
+ !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_idle")) { \
+ fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_idle()\n"); \
+ __skel->struct_ops.__ops_name->cgroup_set_idle = NULL; \
+ } \
__skel; \
})
diff --git a/tools/sched_ext/include/scx/user_exit_info.bpf.h b/tools/sched_ext/include/scx/user_exit_info.bpf.h
new file mode 100644
index 000000000000..e7ac6611a990
--- /dev/null
+++ b/tools/sched_ext/include/scx/user_exit_info.bpf.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Define struct user_exit_info which is shared between BPF and userspace parts
+ * to communicate exit status and other information.
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+
+#ifndef __USER_EXIT_INFO_BPF_H
+#define __USER_EXIT_INFO_BPF_H
+
+#ifndef LSP
+#include "vmlinux.h"
+#endif
+#include <bpf/bpf_core_read.h>
+
+#include "user_exit_info_common.h"
+
+#define UEI_DEFINE(__name) \
+ char RESIZABLE_ARRAY(data, __name##_dump); \
+ const volatile u32 __name##_dump_len; \
+ struct user_exit_info __name SEC(".data")
+
+#define UEI_RECORD(__uei_name, __ei) ({ \
+ bpf_probe_read_kernel_str(__uei_name.reason, \
+ sizeof(__uei_name.reason), (__ei)->reason); \
+ bpf_probe_read_kernel_str(__uei_name.msg, \
+ sizeof(__uei_name.msg), (__ei)->msg); \
+ bpf_probe_read_kernel_str(__uei_name##_dump, \
+ __uei_name##_dump_len, (__ei)->dump); \
+ if (bpf_core_field_exists((__ei)->exit_code)) \
+ __uei_name.exit_code = (__ei)->exit_code; \
+ /* use __sync to force memory barrier */ \
+ __sync_val_compare_and_swap(&__uei_name.kind, __uei_name.kind, \
+ (__ei)->kind); \
+})
+
+#endif /* __USER_EXIT_INFO_BPF_H */
diff --git a/tools/sched_ext/include/scx/user_exit_info.h b/tools/sched_ext/include/scx/user_exit_info.h
index 66f856640ee7..399697fa372f 100644
--- a/tools/sched_ext/include/scx/user_exit_info.h
+++ b/tools/sched_ext/include/scx/user_exit_info.h
@@ -10,55 +10,11 @@
#ifndef __USER_EXIT_INFO_H
#define __USER_EXIT_INFO_H
-#ifdef LSP
-#define __bpf__
-#include "../vmlinux.h"
-#endif
-
-enum uei_sizes {
- UEI_REASON_LEN = 128,
- UEI_MSG_LEN = 1024,
- UEI_DUMP_DFL_LEN = 32768,
-};
-
-struct user_exit_info {
- int kind;
- s64 exit_code;
- char reason[UEI_REASON_LEN];
- char msg[UEI_MSG_LEN];
-};
-
-#ifdef __bpf__
-
-#ifndef LSP
-#include "vmlinux.h"
-#endif
-#include <bpf/bpf_core_read.h>
-
-#define UEI_DEFINE(__name) \
- char RESIZABLE_ARRAY(data, __name##_dump); \
- const volatile u32 __name##_dump_len; \
- struct user_exit_info __name SEC(".data")
-
-#define UEI_RECORD(__uei_name, __ei) ({ \
- bpf_probe_read_kernel_str(__uei_name.reason, \
- sizeof(__uei_name.reason), (__ei)->reason); \
- bpf_probe_read_kernel_str(__uei_name.msg, \
- sizeof(__uei_name.msg), (__ei)->msg); \
- bpf_probe_read_kernel_str(__uei_name##_dump, \
- __uei_name##_dump_len, (__ei)->dump); \
- if (bpf_core_field_exists((__ei)->exit_code)) \
- __uei_name.exit_code = (__ei)->exit_code; \
- /* use __sync to force memory barrier */ \
- __sync_val_compare_and_swap(&__uei_name.kind, __uei_name.kind, \
- (__ei)->kind); \
-})
-
-#else /* !__bpf__ */
-
#include <stdio.h>
#include <stdbool.h>
+#include "user_exit_info_common.h"
+
/* no need to call the following explicitly if SCX_OPS_LOAD() is used */
#define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \
u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \
@@ -114,5 +70,4 @@ enum uei_ecode_mask {
#define UEI_ECODE_RESTART(__ecode) (UEI_ECODE_SYS_ACT((__ecode)) == SCX_ECODE_ACT_RESTART)
-#endif /* __bpf__ */
#endif /* __USER_EXIT_INFO_H */
diff --git a/tools/sched_ext/include/scx/user_exit_info_common.h b/tools/sched_ext/include/scx/user_exit_info_common.h
new file mode 100644
index 000000000000..2d0981aedd89
--- /dev/null
+++ b/tools/sched_ext/include/scx/user_exit_info_common.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Define struct user_exit_info which is shared between BPF and userspace parts
+ * to communicate exit status and other information.
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifndef __USER_EXIT_INFO_COMMON_H
+#define __USER_EXIT_INFO_COMMON_H
+
+#ifdef LSP
+#include "../vmlinux.h"
+#endif
+
+enum uei_sizes {
+ UEI_REASON_LEN = 128,
+ UEI_MSG_LEN = 1024,
+ UEI_DUMP_DFL_LEN = 32768,
+};
+
+struct user_exit_info {
+ int kind;
+ s64 exit_code;
+ char reason[UEI_REASON_LEN];
+ char msg[UEI_MSG_LEN];
+};
+
+#endif /* __USER_EXIT_INFO_COMMON_H */
diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c
index 50bc1737c167..55df8b798865 100644
--- a/tools/sched_ext/scx_central.bpf.c
+++ b/tools/sched_ext/scx_central.bpf.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * A central FIFO sched_ext scheduler which demonstrates the followings:
+ * A central FIFO sched_ext scheduler which demonstrates the following:
*
* a. Making all scheduling decisions from one CPU:
*
diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c
index 6ba6e610eeaa..55931a4cd71c 100644
--- a/tools/sched_ext/scx_central.c
+++ b/tools/sched_ext/scx_central.c
@@ -61,6 +61,7 @@ restart:
skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus();
skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
+ assert(skel->rodata->nr_cpu_ids > 0);
assert(skel->rodata->nr_cpu_ids <= INT32_MAX);
while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) {
diff --git a/tools/sched_ext/scx_cpu0.bpf.c b/tools/sched_ext/scx_cpu0.bpf.c
new file mode 100644
index 000000000000..6326ce598c8e
--- /dev/null
+++ b/tools/sched_ext/scx_cpu0.bpf.c
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A CPU0 scheduler.
+ *
+ * This scheduler queues all tasks to a shared DSQ and only dispatches them on
+ * CPU0 in FIFO order. This is useful for testing bypass behavior when many
+ * tasks are concentrated on a single CPU. If the load balancer doesn't work,
+ * bypass mode can trigger task hangs or RCU stalls as the queue is long and
+ * there's only one CPU working on it.
+ *
+ * - Statistics tracking how many tasks are queued to local and CPU0 DSQs.
+ * - Termination notification for userspace.
+ *
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
+ */
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */
+
+UEI_DEFINE(uei);
+
+/*
+ * We create a custom DSQ with ID 0 that we dispatch to and consume from on
+ * CPU0.
+ */
+#define DSQ_CPU0 0
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u64));
+ __uint(max_entries, 2); /* [local, cpu0] */
+} stats SEC(".maps");
+
+static void stat_inc(u32 idx)
+{
+ u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx);
+ if (cnt_p)
+ (*cnt_p)++;
+}
+
+s32 BPF_STRUCT_OPS(cpu0_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
+{
+ return 0;
+}
+
+void BPF_STRUCT_OPS(cpu0_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ /*
+ * select_cpu() always picks CPU0. If @p is not on CPU0, it can't run on
+ * CPU 0. Queue on whichever CPU it's currently only.
+ */
+ if (scx_bpf_task_cpu(p) != 0) {
+ stat_inc(0); /* count local queueing */
+ scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
+ return;
+ }
+
+ stat_inc(1); /* count cpu0 queueing */
+ scx_bpf_dsq_insert(p, DSQ_CPU0, SCX_SLICE_DFL, enq_flags);
+}
+
+void BPF_STRUCT_OPS(cpu0_dispatch, s32 cpu, struct task_struct *prev)
+{
+ if (cpu == 0)
+ scx_bpf_dsq_move_to_local(DSQ_CPU0);
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(cpu0_init)
+{
+ return scx_bpf_create_dsq(DSQ_CPU0, -1);
+}
+
+void BPF_STRUCT_OPS(cpu0_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SCX_OPS_DEFINE(cpu0_ops,
+ .select_cpu = (void *)cpu0_select_cpu,
+ .enqueue = (void *)cpu0_enqueue,
+ .dispatch = (void *)cpu0_dispatch,
+ .init = (void *)cpu0_init,
+ .exit = (void *)cpu0_exit,
+ .name = "cpu0");
diff --git a/tools/sched_ext/scx_cpu0.c b/tools/sched_ext/scx_cpu0.c
new file mode 100644
index 000000000000..1e4fa4ab8da9
--- /dev/null
+++ b/tools/sched_ext/scx_cpu0.c
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+#include <assert.h>
+#include <libgen.h>
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include "scx_cpu0.bpf.skel.h"
+
+const char help_fmt[] =
+"A cpu0 sched_ext scheduler.\n"
+"\n"
+"See the top-level comment in .bpf.c for more details.\n"
+"\n"
+"Usage: %s [-v]\n"
+"\n"
+" -v Print libbpf debug messages\n"
+" -h Display this help and exit\n";
+
+static bool verbose;
+static volatile int exit_req;
+
+static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !verbose)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+static void sigint_handler(int sig)
+{
+ exit_req = 1;
+}
+
+static void read_stats(struct scx_cpu0 *skel, __u64 *stats)
+{
+ int nr_cpus = libbpf_num_possible_cpus();
+ assert(nr_cpus > 0);
+ __u64 cnts[2][nr_cpus];
+ __u32 idx;
+
+ memset(stats, 0, sizeof(stats[0]) * 2);
+
+ for (idx = 0; idx < 2; idx++) {
+ int ret, cpu;
+
+ ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats),
+ &idx, cnts[idx]);
+ if (ret < 0)
+ continue;
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ stats[idx] += cnts[idx][cpu];
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct scx_cpu0 *skel;
+ struct bpf_link *link;
+ __u32 opt;
+ __u64 ecode;
+
+ libbpf_set_print(libbpf_print_fn);
+ signal(SIGINT, sigint_handler);
+ signal(SIGTERM, sigint_handler);
+restart:
+ skel = SCX_OPS_OPEN(cpu0_ops, scx_cpu0);
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ while ((opt = getopt(argc, argv, "vh")) != -1) {
+ switch (opt) {
+ case 'v':
+ verbose = true;
+ break;
+ default:
+ fprintf(stderr, help_fmt, basename(argv[0]));
+ return opt != 'h';
+ }
+ }
+
+ SCX_OPS_LOAD(skel, cpu0_ops, scx_cpu0, uei);
+ link = SCX_OPS_ATTACH(skel, cpu0_ops, scx_cpu0);
+
+ while (!exit_req && !UEI_EXITED(skel, uei)) {
+ __u64 stats[2];
+
+ read_stats(skel, stats);
+ printf("local=%llu cpu0=%llu\n", stats[0], stats[1]);
+ fflush(stdout);
+ sleep(1);
+ }
+
+ bpf_link__destroy(link);
+ ecode = UEI_REPORT(skel, uei);
+ scx_cpu0__destroy(skel);
+
+ if (UEI_ECODE_RESTART(ecode))
+ goto restart;
+ return 0;
+}
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
index fdc7170639e6..43126858b8e4 100644
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -382,7 +382,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
return;
}
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (!cgc)
goto out_release;
@@ -508,7 +508,7 @@ void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
{
struct cgroup *cgrp;
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
update_active_weight_sums(cgrp, true);
bpf_cgroup_release(cgrp);
}
@@ -521,7 +521,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
if (fifo_sched)
return;
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (cgc) {
/*
@@ -564,7 +564,7 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
if (!taskc->bypassed_at)
return;
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp);
if (cgc) {
__sync_fetch_and_add(&cgc->cvtime_delta,
@@ -578,7 +578,7 @@ void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
{
struct cgroup *cgrp;
- cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgrp = scx_bpf_task_cgroup(p);
update_active_weight_sums(cgrp, false);
bpf_cgroup_release(cgrp);
}
@@ -950,5 +950,5 @@ SCX_OPS_DEFINE(flatcg_ops,
.cgroup_move = (void *)fcg_cgroup_move,
.init = (void *)fcg_init,
.exit = (void *)fcg_exit,
- .flags = SCX_OPS_ENQ_EXITING,
+ .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING,
.name = "flatcg");
diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c
index 6dd423eeb4ff..cd85eb401179 100644
--- a/tools/sched_ext/scx_flatcg.c
+++ b/tools/sched_ext/scx_flatcg.c
@@ -6,6 +6,7 @@
*/
#include <stdio.h>
#include <signal.h>
+#include <assert.h>
#include <unistd.h>
#include <libgen.h>
#include <limits.h>
@@ -137,6 +138,7 @@ restart:
skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg);
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+ assert(skel->rodata->nr_cpus > 0);
skel->rodata->cgrp_slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
while ((opt = getopt(argc, argv, "s:i:dfvh")) != -1) {
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index c3cd9a17d48e..df21fad0c438 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -39,7 +39,8 @@ const volatile u32 stall_kernel_nth;
const volatile u32 dsp_inf_loop_after;
const volatile u32 dsp_batch;
const volatile bool highpri_boosting;
-const volatile bool print_shared_dsq;
+const volatile bool print_dsqs_and_events;
+const volatile bool print_msgs;
const volatile s32 disallow_tgid;
const volatile bool suppress_dump;
@@ -56,7 +57,8 @@ struct qmap {
queue1 SEC(".maps"),
queue2 SEC(".maps"),
queue3 SEC(".maps"),
- queue4 SEC(".maps");
+ queue4 SEC(".maps"),
+ dump_store SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
@@ -200,6 +202,9 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags)
void *ring;
s32 cpu;
+ if (enq_flags & SCX_ENQ_REENQ)
+ __sync_fetch_and_add(&nr_reenqueued, 1);
+
if (p->flags & PF_KTHREAD) {
if (stall_kernel_nth && !(++kernel_cnt % stall_kernel_nth))
return;
@@ -318,12 +323,9 @@ static bool dispatch_highpri(bool from_timer)
if (tctx->highpri) {
/* exercise the set_*() and vtime interface too */
- __COMPAT_scx_bpf_dsq_move_set_slice(
- BPF_FOR_EACH_ITER, slice_ns * 2);
- __COMPAT_scx_bpf_dsq_move_set_vtime(
- BPF_FOR_EACH_ITER, highpri_seq++);
- __COMPAT_scx_bpf_dsq_move_vtime(
- BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
+ scx_bpf_dsq_move_set_slice(BPF_FOR_EACH_ITER, slice_ns * 2);
+ scx_bpf_dsq_move_set_vtime(BPF_FOR_EACH_ITER, highpri_seq++);
+ scx_bpf_dsq_move_vtime(BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
}
}
@@ -340,9 +342,8 @@ static bool dispatch_highpri(bool from_timer)
else
cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
- if (__COMPAT_scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p,
- SCX_DSQ_LOCAL_ON | cpu,
- SCX_ENQ_PREEMPT)) {
+ if (scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, SCX_DSQ_LOCAL_ON | cpu,
+ SCX_ENQ_PREEMPT)) {
if (cpu == this_cpu) {
dispatched = true;
__sync_fetch_and_add(&nr_expedited_local, 1);
@@ -531,20 +532,35 @@ bool BPF_STRUCT_OPS(qmap_core_sched_before,
return task_qdist(a) > task_qdist(b);
}
-void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args)
+SEC("tp_btf/sched_switch")
+int BPF_PROG(qmap_sched_switch, bool preempt, struct task_struct *prev,
+ struct task_struct *next, unsigned long prev_state)
{
- u32 cnt;
+ if (!__COMPAT_scx_bpf_reenqueue_local_from_anywhere())
+ return 0;
/*
- * Called when @cpu is taken by a higher priority scheduling class. This
- * makes @cpu no longer available for executing sched_ext tasks. As we
- * don't want the tasks in @cpu's local dsq to sit there until @cpu
- * becomes available again, re-enqueue them into the global dsq. See
- * %SCX_ENQ_REENQ handling in qmap_enqueue().
+ * If @cpu is taken by a higher priority scheduling class, it is no
+ * longer available for executing sched_ext tasks. As we don't want the
+ * tasks in @cpu's local dsq to sit there until @cpu becomes available
+ * again, re-enqueue them into the global dsq. See %SCX_ENQ_REENQ
+ * handling in qmap_enqueue().
*/
- cnt = scx_bpf_reenqueue_local();
- if (cnt)
- __sync_fetch_and_add(&nr_reenqueued, cnt);
+ switch (next->policy) {
+ case 1: /* SCHED_FIFO */
+ case 2: /* SCHED_RR */
+ case 6: /* SCHED_DEADLINE */
+ scx_bpf_reenqueue_local();
+ }
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args)
+{
+ /* see qmap_sched_switch() to learn how to do this on newer kernels */
+ if (!__COMPAT_scx_bpf_reenqueue_local_from_anywhere())
+ scx_bpf_reenqueue_local();
}
s32 BPF_STRUCT_OPS(qmap_init_task, struct task_struct *p,
@@ -578,11 +594,26 @@ void BPF_STRUCT_OPS(qmap_dump, struct scx_dump_ctx *dctx)
return;
scx_bpf_dump("QMAP FIFO[%d]:", i);
+
+ /*
+ * Dump can be invoked anytime and there is no way to iterate in
+ * a non-destructive way. Pop and store in dump_store and then
+ * restore afterwards. If racing against new enqueues, ordering
+ * can get mixed up.
+ */
bpf_repeat(4096) {
if (bpf_map_pop_elem(fifo, &pid))
break;
+ bpf_map_push_elem(&dump_store, &pid, 0);
scx_bpf_dump(" %d", pid);
}
+
+ bpf_repeat(4096) {
+ if (bpf_map_pop_elem(&dump_store, &pid))
+ break;
+ bpf_map_push_elem(fifo, &pid, 0);
+ }
+
scx_bpf_dump("\n");
}
}
@@ -615,6 +646,29 @@ void BPF_STRUCT_OPS(qmap_dump_task, struct scx_dump_ctx *dctx, struct task_struc
taskc->force_local, taskc->core_sched_seq);
}
+s32 BPF_STRUCT_OPS(qmap_cgroup_init, struct cgroup *cgrp, struct scx_cgroup_init_args *args)
+{
+ if (print_msgs)
+ bpf_printk("CGRP INIT %llu weight=%u period=%lu quota=%ld burst=%lu",
+ cgrp->kn->id, args->weight, args->bw_period_us,
+ args->bw_quota_us, args->bw_burst_us);
+ return 0;
+}
+
+void BPF_STRUCT_OPS(qmap_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
+{
+ if (print_msgs)
+ bpf_printk("CGRP SET %llu weight=%u", cgrp->kn->id, weight);
+}
+
+void BPF_STRUCT_OPS(qmap_cgroup_set_bandwidth, struct cgroup *cgrp,
+ u64 period_us, u64 quota_us, u64 burst_us)
+{
+ if (print_msgs)
+ bpf_printk("CGRP SET %llu period=%lu quota=%ld burst=%lu",
+ cgrp->kn->id, period_us, quota_us, burst_us);
+}
+
/*
* Print out the online and possible CPU map using bpf_printk() as a
* demonstration of using the cpumask kfuncs and ops.cpu_on/offline().
@@ -656,16 +710,20 @@ static void print_cpus(void)
void BPF_STRUCT_OPS(qmap_cpu_online, s32 cpu)
{
- bpf_printk("CPU %d coming online", cpu);
- /* @cpu is already online at this point */
- print_cpus();
+ if (print_msgs) {
+ bpf_printk("CPU %d coming online", cpu);
+ /* @cpu is already online at this point */
+ print_cpus();
+ }
}
void BPF_STRUCT_OPS(qmap_cpu_offline, s32 cpu)
{
- bpf_printk("CPU %d going offline", cpu);
- /* @cpu is still online at this point */
- print_cpus();
+ if (print_msgs) {
+ bpf_printk("CPU %d going offline", cpu);
+ /* @cpu is still online at this point */
+ print_cpus();
+ }
}
struct monitor_timer {
@@ -763,35 +821,36 @@ static void dump_shared_dsq(void)
static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer)
{
- struct scx_event_stats events;
-
bpf_rcu_read_lock();
dispatch_highpri(true);
bpf_rcu_read_unlock();
monitor_cpuperf();
- if (print_shared_dsq)
+ if (print_dsqs_and_events) {
+ struct scx_event_stats events;
+
dump_shared_dsq();
- __COMPAT_scx_bpf_events(&events, sizeof(events));
-
- bpf_printk("%35s: %lld", "SCX_EV_SELECT_CPU_FALLBACK",
- scx_read_event(&events, SCX_EV_SELECT_CPU_FALLBACK));
- bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE",
- scx_read_event(&events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE));
- bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_KEEP_LAST",
- scx_read_event(&events, SCX_EV_DISPATCH_KEEP_LAST));
- bpf_printk("%35s: %lld", "SCX_EV_ENQ_SKIP_EXITING",
- scx_read_event(&events, SCX_EV_ENQ_SKIP_EXITING));
- bpf_printk("%35s: %lld", "SCX_EV_REFILL_SLICE_DFL",
- scx_read_event(&events, SCX_EV_REFILL_SLICE_DFL));
- bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DURATION",
- scx_read_event(&events, SCX_EV_BYPASS_DURATION));
- bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DISPATCH",
- scx_read_event(&events, SCX_EV_BYPASS_DISPATCH));
- bpf_printk("%35s: %lld", "SCX_EV_BYPASS_ACTIVATE",
- scx_read_event(&events, SCX_EV_BYPASS_ACTIVATE));
+ __COMPAT_scx_bpf_events(&events, sizeof(events));
+
+ bpf_printk("%35s: %lld", "SCX_EV_SELECT_CPU_FALLBACK",
+ scx_read_event(&events, SCX_EV_SELECT_CPU_FALLBACK));
+ bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE",
+ scx_read_event(&events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE));
+ bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_KEEP_LAST",
+ scx_read_event(&events, SCX_EV_DISPATCH_KEEP_LAST));
+ bpf_printk("%35s: %lld", "SCX_EV_ENQ_SKIP_EXITING",
+ scx_read_event(&events, SCX_EV_ENQ_SKIP_EXITING));
+ bpf_printk("%35s: %lld", "SCX_EV_REFILL_SLICE_DFL",
+ scx_read_event(&events, SCX_EV_REFILL_SLICE_DFL));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DURATION",
+ scx_read_event(&events, SCX_EV_BYPASS_DURATION));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DISPATCH",
+ scx_read_event(&events, SCX_EV_BYPASS_DISPATCH));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_ACTIVATE",
+ scx_read_event(&events, SCX_EV_BYPASS_ACTIVATE));
+ }
bpf_timer_start(timer, ONE_SEC_IN_NS, 0);
return 0;
@@ -803,7 +862,8 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(qmap_init)
struct bpf_timer *timer;
s32 ret;
- print_cpus();
+ if (print_msgs)
+ print_cpus();
ret = scx_bpf_create_dsq(SHARED_DSQ, -1);
if (ret)
@@ -840,6 +900,9 @@ SCX_OPS_DEFINE(qmap_ops,
.dump = (void *)qmap_dump,
.dump_cpu = (void *)qmap_dump_cpu,
.dump_task = (void *)qmap_dump_task,
+ .cgroup_init = (void *)qmap_cgroup_init,
+ .cgroup_set_weight = (void *)qmap_cgroup_set_weight,
+ .cgroup_set_bandwidth = (void *)qmap_cgroup_set_bandwidth,
.cpu_online = (void *)qmap_cpu_online,
.cpu_offline = (void *)qmap_cpu_offline,
.init = (void *)qmap_init,
diff --git a/tools/sched_ext/scx_qmap.c b/tools/sched_ext/scx_qmap.c
index c4912ab2e76f..ef701d45ba43 100644
--- a/tools/sched_ext/scx_qmap.c
+++ b/tools/sched_ext/scx_qmap.c
@@ -20,7 +20,7 @@ const char help_fmt[] =
"See the top-level comment in .bpf.c for more details.\n"
"\n"
"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-b COUNT]\n"
-" [-P] [-d PID] [-D LEN] [-p] [-v]\n"
+" [-P] [-M] [-d PID] [-D LEN] [-p] [-v]\n"
"\n"
" -s SLICE_US Override slice duration\n"
" -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n"
@@ -28,7 +28,8 @@ const char help_fmt[] =
" -T COUNT Stall every COUNT'th kernel thread\n"
" -l COUNT Trigger dispatch infinite looping after COUNT dispatches\n"
" -b COUNT Dispatch upto COUNT tasks together\n"
-" -P Print out DSQ content to trace_pipe every second, use with -b\n"
+" -P Print out DSQ content and event counters to trace_pipe every second\n"
+" -M Print out debug messages to trace_pipe\n"
" -H Boost nice -20 tasks in SHARED_DSQ, use with -b\n"
" -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n"
" -D LEN Set scx_exit_info.dump buffer length\n"
@@ -66,7 +67,7 @@ int main(int argc, char **argv)
skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
- while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PHd:D:Spvh")) != -1) {
+ while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PMHd:D:Spvh")) != -1) {
switch (opt) {
case 's':
skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000;
@@ -87,7 +88,10 @@ int main(int argc, char **argv)
skel->rodata->dsp_batch = strtoul(optarg, NULL, 0);
break;
case 'P':
- skel->rodata->print_shared_dsq = true;
+ skel->rodata->print_dsqs_and_events = true;
+ break;
+ case 'M':
+ skel->rodata->print_msgs = true;
break;
case 'H':
skel->rodata->highpri_boosting = true;
diff --git a/tools/sched_ext/scx_simple.c b/tools/sched_ext/scx_simple.c
index 76d83199545c..06d4b13bf76b 100644
--- a/tools/sched_ext/scx_simple.c
+++ b/tools/sched_ext/scx_simple.c
@@ -7,6 +7,7 @@
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
+#include <assert.h>
#include <libgen.h>
#include <bpf/bpf.h>
#include <scx/common.h>
@@ -41,6 +42,7 @@ static void sigint_handler(int simple)
static void read_stats(struct scx_simple *skel, __u64 *stats)
{
int nr_cpus = libbpf_num_possible_cpus();
+ assert(nr_cpus > 0);
__u64 cnts[2][nr_cpus];
__u32 idx;
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 5158250988ce..ded48263dd5e 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -101,7 +101,9 @@ else ifneq ($(CROSS_COMPILE),)
# Allow userspace to override CLANG_CROSS_FLAGS to specify their own
# sysroots and flags or to avoid the GCC call in pure Clang builds.
ifeq ($(CLANG_CROSS_FLAGS),)
-CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
+CLANG_TARGET := $(notdir $(CROSS_COMPILE:%-=%))
+CLANG_TARGET := $(subst s390-linux,s390x-linux,$(CLANG_TARGET))
+CLANG_CROSS_FLAGS := --target=$(CLANG_TARGET)
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null))
ifneq ($(GCC_TOOLCHAIN_DIR),)
CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
diff --git a/tools/scripts/syscall.tbl b/tools/scripts/syscall.tbl
index 580b4e246aec..d1ae5e92c615 100644
--- a/tools/scripts/syscall.tbl
+++ b/tools/scripts/syscall.tbl
@@ -408,3 +408,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 387f3df8b988..0e151d0572d1 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -4,23 +4,19 @@ ldflags-y += --wrap=is_acpi_device_node
ldflags-y += --wrap=acpi_evaluate_integer
ldflags-y += --wrap=acpi_pci_find_root
ldflags-y += --wrap=nvdimm_bus_register
-ldflags-y += --wrap=devm_cxl_port_enumerate_dports
-ldflags-y += --wrap=devm_cxl_setup_hdm
-ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
-ldflags-y += --wrap=devm_cxl_enumerate_decoders
ldflags-y += --wrap=cxl_await_media_ready
-ldflags-y += --wrap=cxl_hdm_decode_init
-ldflags-y += --wrap=cxl_dvsec_rr_decode
ldflags-y += --wrap=devm_cxl_add_rch_dport
-ldflags-y += --wrap=cxl_rcd_component_reg_phys
ldflags-y += --wrap=cxl_endpoint_parse_cdat
ldflags-y += --wrap=cxl_dport_init_ras_reporting
+ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup
+ldflags-y += --wrap=hmat_get_extended_linear_cache_size
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
CXL_CORE_SRC := $(DRIVERS)/cxl/core
ccflags-y := -I$(srctree)/drivers/cxl/
ccflags-y += -D__mock=__weak
+ccflags-y += -DCXL_TEST_ENABLE=1
ccflags-y += -DTRACE_INCLUDE_PATH=$(CXL_CORE_SRC) -I$(srctree)/drivers/cxl/core/
obj-m += cxl_acpi.o
@@ -62,11 +58,11 @@ cxl_core-y += $(CXL_CORE_SRC)/hdm.o
cxl_core-y += $(CXL_CORE_SRC)/pmu.o
cxl_core-y += $(CXL_CORE_SRC)/cdat.o
cxl_core-y += $(CXL_CORE_SRC)/ras.o
-cxl_core-y += $(CXL_CORE_SRC)/acpi.o
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += $(CXL_CORE_SRC)/features.o
+cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += $(CXL_CORE_SRC)/edac.o
cxl_core-y += config_check.o
cxl_core-y += cxl_core_test.o
cxl_core-y += cxl_core_exports.o
diff --git a/tools/testing/cxl/config_check.c b/tools/testing/cxl/config_check.c
index 0902c5d6e410..a80bc2c062fe 100644
--- a/tools/testing/cxl/config_check.c
+++ b/tools/testing/cxl/config_check.c
@@ -14,4 +14,5 @@ void check(void)
BUILD_BUG_ON(!IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST));
BUILD_BUG_ON(!IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST));
BUILD_BUG_ON(!IS_ENABLED(CONFIG_DEBUG_FS));
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_MEMORY_HOTPLUG));
}
diff --git a/tools/testing/cxl/cxl_core_exports.c b/tools/testing/cxl/cxl_core_exports.c
index f088792a8925..6754de35598d 100644
--- a/tools/testing/cxl/cxl_core_exports.c
+++ b/tools/testing/cxl/cxl_core_exports.c
@@ -2,6 +2,28 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include "cxl.h"
+#include "exports.h"
/* Exporting of cxl_core symbols that are only used by cxl_test */
EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, "CXL");
+
+cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev =
+ __devm_cxl_add_dport_by_dev;
+EXPORT_SYMBOL_NS_GPL(_devm_cxl_add_dport_by_dev, "CXL");
+
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ return _devm_cxl_add_dport_by_dev(port, dport_dev);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport_by_dev, "CXL");
+
+cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup =
+ __devm_cxl_switch_port_decoders_setup;
+EXPORT_SYMBOL_NS_GPL(_devm_cxl_switch_port_decoders_setup, "CXL");
+
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ return _devm_cxl_switch_port_decoders_setup(port);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_switch_port_decoders_setup, "CXL");
diff --git a/tools/testing/cxl/exports.h b/tools/testing/cxl/exports.h
new file mode 100644
index 000000000000..7ebee7c0bd67
--- /dev/null
+++ b/tools/testing/cxl/exports.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef __MOCK_CXL_EXPORTS_H_
+#define __MOCK_CXL_EXPORTS_H_
+
+typedef struct cxl_dport *(*cxl_add_dport_by_dev_fn)(struct cxl_port *port,
+ struct device *dport_dev);
+extern cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev;
+
+typedef int(*cxl_switch_decoders_setup_fn)(struct cxl_port *port);
+extern cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup;
+
+#endif
diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild
index 6b1927897856..af50972c8b6d 100644
--- a/tools/testing/cxl/test/Kbuild
+++ b/tools/testing/cxl/test/Kbuild
@@ -4,6 +4,7 @@ ccflags-y := -I$(srctree)/drivers/cxl/ -I$(srctree)/drivers/cxl/core
obj-m += cxl_test.o
obj-m += cxl_mock.o
obj-m += cxl_mock_mem.o
+obj-m += cxl_translate.o
cxl_test-y := cxl.o
cxl_mock-y := mock.o
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 1c3336095923..81e2aef3627a 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -2,6 +2,7 @@
// Copyright(c) 2021 Intel Corporation. All rights reserved.
#include <linux/platform_device.h>
+#include <linux/memory_hotplug.h>
#include <linux/genalloc.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -14,6 +15,7 @@
#include "mock.h"
static int interleave_arithmetic;
+static bool extended_linear_cache;
#define FAKE_QTG_ID 42
@@ -25,6 +27,9 @@ static int interleave_arithmetic;
#define NR_CXL_PORT_DECODERS 8
#define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
+#define MOCK_AUTO_REGION_SIZE_DEFAULT SZ_512M
+static int mock_auto_region_size = MOCK_AUTO_REGION_SIZE_DEFAULT;
+
static struct platform_device *cxl_acpi;
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
#define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
@@ -209,7 +214,7 @@ static struct {
},
.interleave_ways = 0,
.granularity = 4,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 4UL,
@@ -224,7 +229,7 @@ static struct {
},
.interleave_ways = 1,
.granularity = 4,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 8UL,
@@ -239,7 +244,7 @@ static struct {
},
.interleave_ways = 0,
.granularity = 4,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 4UL,
@@ -254,7 +259,7 @@ static struct {
},
.interleave_ways = 1,
.granularity = 4,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 8UL,
@@ -269,7 +274,7 @@ static struct {
},
.interleave_ways = 0,
.granularity = 4,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 4UL,
@@ -284,7 +289,7 @@ static struct {
},
.interleave_ways = 0,
.granularity = 4,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M,
@@ -301,7 +306,7 @@ static struct {
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
.interleave_ways = 0,
.granularity = 4,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 8UL,
@@ -317,7 +322,7 @@ static struct {
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
.interleave_ways = 1,
.granularity = 0,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 8UL,
@@ -333,7 +338,7 @@ static struct {
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
.interleave_ways = 8,
.granularity = 1,
- .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
+ .restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = FAKE_QTG_ID,
.window_size = SZ_512M * 6UL,
@@ -425,6 +430,22 @@ static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
return res;
}
+/* Only update CFMWS0 as this is used by the auto region. */
+static void cfmws_elc_update(struct acpi_cedt_cfmws *window, int index)
+{
+ if (!extended_linear_cache)
+ return;
+
+ if (index != 0)
+ return;
+
+ /*
+ * The window size should be 2x of the CXL region size where half is
+ * DRAM and half is CXL
+ */
+ window->window_size = mock_auto_region_size * 2;
+}
+
static int populate_cedt(void)
{
struct cxl_mock_res *res;
@@ -449,6 +470,7 @@ static int populate_cedt(void)
for (i = cfmws_start; i <= cfmws_end; i++) {
struct acpi_cedt_cfmws *window = mock_cfmws[i];
+ cfmws_elc_update(window, i);
res = alloc_mock_res(window->window_size, SZ_256M);
if (!res)
return -ENOMEM;
@@ -590,6 +612,25 @@ mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
return AE_OK;
}
+static int
+mock_hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *cache_size)
+{
+ struct acpi_cedt_cfmws *window = mock_cfmws[0];
+ struct resource cfmws0_res =
+ DEFINE_RES_MEM(window->base_hpa, window->window_size);
+
+ if (!extended_linear_cache ||
+ !resource_contains(&cfmws0_res, backing_res)) {
+ return hmat_get_extended_linear_cache_size(backing_res,
+ nid, cache_size);
+ }
+
+ *cache_size = mock_auto_region_size;
+
+ return 0;
+}
+
static struct pci_bus mock_pci_bus[NR_BRIDGES];
static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
[0] = {
@@ -642,15 +683,8 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
return cxlhdm;
}
-static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
-{
- dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
- return -EOPNOTSUPP;
-}
-
-
struct target_map_ctx {
- int *target_map;
+ u32 *target_map;
int index;
int target_count;
};
@@ -744,7 +778,6 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
struct cxl_endpoint_decoder *cxled;
struct cxl_switch_decoder *cxlsd;
struct cxl_port *port, *iter;
- const int size = SZ_512M;
struct cxl_memdev *cxlmd;
struct cxl_dport *dport;
struct device *dev;
@@ -787,9 +820,11 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
}
base = window->base_hpa;
+ if (extended_linear_cache)
+ base += mock_auto_region_size;
cxld->hpa_range = (struct range) {
.start = base,
- .end = base + size - 1,
+ .end = base + mock_auto_region_size - 1,
};
cxld->interleave_ways = 2;
@@ -798,7 +833,8 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->flags = CXL_DECODER_F_ENABLE;
cxled->state = CXL_DECODER_STATE_AUTO;
port->commit_end = cxld->id;
- devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
+ devm_cxl_dpa_reserve(cxled, 0,
+ mock_auto_region_size / cxld->interleave_ways, 0);
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
@@ -817,15 +853,21 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
*/
if (WARN_ON(!dev))
continue;
+
cxlsd = to_cxl_switch_decoder(dev);
if (i == 0) {
/* put cxl_mem.4 second in the decode order */
- if (pdev->id == 4)
+ if (pdev->id == 4) {
cxlsd->target[1] = dport;
- else
+ cxld->target_map[1] = dport->port_id;
+ } else {
cxlsd->target[0] = dport;
- } else
+ cxld->target_map[0] = dport->port_id;
+ }
+ } else {
cxlsd->target[0] = dport;
+ cxld->target_map[0] = dport->port_id;
+ }
cxld = &cxlsd->cxld;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->flags = CXL_DECODER_F_ENABLE;
@@ -841,7 +883,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->interleave_granularity = 4096;
cxld->hpa_range = (struct range) {
.start = base,
- .end = base + size - 1,
+ .end = base + mock_auto_region_size - 1,
};
put_device(dev);
}
@@ -862,9 +904,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
target_count = NR_CXL_SWITCH_PORTS;
for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
- int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
struct target_map_ctx ctx = {
- .target_map = target_map,
.target_count = target_count,
};
struct cxl_decoder *cxld;
@@ -893,6 +933,8 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxld = &cxled->cxld;
}
+ ctx.target_map = cxld->target_map;
+
mock_init_hdm_decoder(cxld);
if (target_count) {
@@ -904,7 +946,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
}
}
- rc = cxl_decoder_add_locked(cxld, target_map);
+ rc = cxl_decoder_add_locked(cxld);
if (rc) {
put_device(&cxld->dev);
dev_err(&port->dev, "Failed to add decoder\n");
@@ -920,10 +962,42 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
return 0;
}
-static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
+static int __mock_cxl_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ cxlhdm = mock_cxl_setup_hdm(port, NULL);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) != -ENODEV)
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ return mock_cxl_enumerate_decoders(cxlhdm, NULL);
+}
+
+static int mock_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ return __mock_cxl_decoders_setup(port);
+}
+
+static int mock_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ return __mock_cxl_decoders_setup(port);
+}
+
+static int get_port_array(struct cxl_port *port,
+ struct platform_device ***port_array,
+ int *port_array_size)
{
struct platform_device **array;
- int i, array_size;
+ int array_size;
if (port->depth == 1) {
if (is_multi_bridge(port->uport_dev)) {
@@ -957,9 +1031,24 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
return -ENXIO;
}
+ *port_array = array;
+ *port_array_size = array_size;
+
+ return 0;
+}
+
+static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct platform_device **array;
+ int rc, i, array_size;
+
+ rc = get_port_array(port, &array, &array_size);
+ if (rc)
+ return ERR_PTR(rc);
+
for (i = 0; i < array_size; i++) {
struct platform_device *pdev = array[i];
- struct cxl_dport *dport;
if (pdev->dev.parent != port->uport_dev) {
dev_dbg(&port->dev, "%s: mismatch parent %s\n",
@@ -968,14 +1057,14 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
continue;
}
- dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
- CXL_RESOURCE_NONE);
+ if (&pdev->dev != dport_dev)
+ continue;
- if (IS_ERR(dport))
- return PTR_ERR(dport);
+ return devm_cxl_add_dport(port, &pdev->dev, pdev->id,
+ CXL_RESOURCE_NONE);
}
- return 0;
+ return ERR_PTR(-ENODEV);
}
/*
@@ -1034,11 +1123,12 @@ static struct cxl_mock_ops cxl_mock_ops = {
.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
.acpi_pci_find_root = mock_acpi_pci_find_root,
- .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
- .devm_cxl_setup_hdm = mock_cxl_setup_hdm,
- .devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
- .devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
+ .devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup,
+ .devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup,
.cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
+ .devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
+ .hmat_get_extended_linear_cache_size =
+ mock_hmat_get_extended_linear_cache_size,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
};
@@ -1328,6 +1418,7 @@ err_mem:
static __init int cxl_test_init(void)
{
int rc, i;
+ struct range mappable;
cxl_acpi_test();
cxl_core_test();
@@ -1342,8 +1433,11 @@ static __init int cxl_test_init(void)
rc = -ENOMEM;
goto err_gen_pool_create;
}
+ mappable = mhp_get_pluggable_range(true);
- rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
+ rc = gen_pool_add(cxl_mock_pool,
+ min(iomem_resource.end + 1 - SZ_64G,
+ mappable.end + 1 - SZ_64G),
SZ_64G, NUMA_NO_NODE);
if (rc)
goto err_gen_pool_add;
@@ -1524,8 +1618,11 @@ static __exit void cxl_test_exit(void)
module_param(interleave_arithmetic, int, 0444);
MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
+module_param(extended_linear_cache, bool, 0444);
+MODULE_PARM_DESC(extended_linear_cache, "Enable extended linear cache support");
module_init(cxl_test_init);
module_exit(cxl_test_exit);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("cxl_test: setup module");
MODULE_IMPORT_NS("ACPI");
MODULE_IMPORT_NS("CXL");
diff --git a/tools/testing/cxl/test/cxl_translate.c b/tools/testing/cxl/test/cxl_translate.c
new file mode 100644
index 000000000000..2200ae21795c
--- /dev/null
+++ b/tools/testing/cxl/test/cxl_translate.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2025 Intel Corporation. All rights reserved.
+
+/* Preface all log entries with "cxl_translate" */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <cxlmem.h>
+#include <cxl.h>
+
+/* Maximum number of test vectors and entry length */
+#define MAX_TABLE_ENTRIES 128
+#define MAX_ENTRY_LEN 128
+
+/* Expected number of parameters in each test vector */
+#define EXPECTED_PARAMS 7
+
+/* Module parameters for test vectors */
+static char *table[MAX_TABLE_ENTRIES];
+static int table_num;
+
+/* Interleave Arithmetic */
+#define MODULO_MATH 0
+#define XOR_MATH 1
+
+/*
+ * XOR mapping configuration
+ * The test data sets all use the same set of xormaps. When additional
+ * data sets arrive for validation, this static setup will need to
+ * be changed to accept xormaps as additional parameters.
+ */
+struct cxl_cxims_data *cximsd;
+static u64 xormaps[] = {
+ 0x2020900,
+ 0x4041200,
+ 0x1010400,
+ 0x800,
+};
+
+static int nr_maps = ARRAY_SIZE(xormaps);
+
+#define HBIW_TO_NR_MAPS_SIZE (CXL_DECODER_MAX_INTERLEAVE + 1)
+static const int hbiw_to_nr_maps[HBIW_TO_NR_MAPS_SIZE] = {
+ [1] = 0, [2] = 1, [3] = 0, [4] = 2, [6] = 1, [8] = 3, [12] = 2, [16] = 4
+};
+
+/**
+ * to_hpa - calculate an HPA offset from a DPA offset and position
+ *
+ * dpa_offset: device physical address offset
+ * pos: devices position in interleave
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: host physical address offset
+ */
+static u64 to_hpa(u64 dpa_offset, int pos, u8 r_eiw, u16 r_eig, u8 hb_ways,
+ u8 math)
+{
+ u64 hpa_offset;
+
+ /* Calculate base HPA offset from DPA and position */
+ hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, r_eiw, r_eig);
+
+ if (math == XOR_MATH) {
+ cximsd->nr_maps = hbiw_to_nr_maps[hb_ways];
+ if (cximsd->nr_maps)
+ return cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+ }
+ return hpa_offset;
+}
+
+/**
+ * to_dpa - translate an HPA offset to DPA offset
+ *
+ * hpa_offset: host physical address offset
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: device physical address offset
+ */
+static u64 to_dpa(u64 hpa_offset, u8 r_eiw, u16 r_eig, u8 hb_ways, u8 math)
+{
+ u64 offset = hpa_offset;
+
+ if (math == XOR_MATH) {
+ cximsd->nr_maps = hbiw_to_nr_maps[hb_ways];
+ if (cximsd->nr_maps)
+ offset =
+ cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+ }
+ return cxl_calculate_dpa_offset(offset, r_eiw, r_eig);
+}
+
+/**
+ * to_pos - extract an interleave position from an HPA offset
+ *
+ * hpa_offset: host physical address offset
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: devices position in region interleave
+ */
+static u64 to_pos(u64 hpa_offset, u8 r_eiw, u16 r_eig, u8 hb_ways, u8 math)
+{
+ u64 offset = hpa_offset;
+
+ /* Reverse XOR mapping if specified */
+ if (math == XOR_MATH)
+ offset = cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+
+ return cxl_calculate_position(offset, r_eiw, r_eig);
+}
+
+/**
+ * run_translation_test - execute forward and reverse translations
+ *
+ * @dpa: device physical address
+ * @pos: expected position in region interleave
+ * @r_eiw: region encoded interleave ways
+ * @r_eig: region encoded interleave granularity
+ * @hb_ways: host bridge interleave ways
+ * @math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ * @expect_spa: expected system physical address
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+static int run_translation_test(u64 dpa, int pos, u8 r_eiw, u16 r_eig,
+ u8 hb_ways, int math, u64 expect_hpa)
+{
+ u64 translated_spa, reverse_dpa;
+ int reverse_pos;
+
+ /* Test Device to Host translation: DPA + POS -> SPA */
+ translated_spa = to_hpa(dpa, pos, r_eiw, r_eig, hb_ways, math);
+ if (translated_spa != expect_hpa) {
+ pr_err("Device to host failed: expected HPA %llu, got %llu\n",
+ expect_hpa, translated_spa);
+ return -1;
+ }
+
+ /* Test Host to Device DPA translation: SPA -> DPA */
+ reverse_dpa = to_dpa(translated_spa, r_eiw, r_eig, hb_ways, math);
+ if (reverse_dpa != dpa) {
+ pr_err("Host to Device DPA failed: expected %llu, got %llu\n",
+ dpa, reverse_dpa);
+ return -1;
+ }
+
+ /* Test Host to Device Position translation: SPA -> POS */
+ reverse_pos = to_pos(translated_spa, r_eiw, r_eig, hb_ways, math);
+ if (reverse_pos != pos) {
+ pr_err("Position lookup failed: expected %d, got %d\n", pos,
+ reverse_pos);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * parse_test_vector - parse a single test vector string
+ *
+ * entry: test vector string to parse
+ * dpa: device physical address
+ * pos: expected position in region interleave
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ * expect_spa: expected system physical address
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int parse_test_vector(const char *entry, u64 *dpa, int *pos, u8 *r_eiw,
+ u16 *r_eig, u8 *hb_ways, int *math,
+ u64 *expect_hpa)
+{
+ unsigned int tmp_r_eiw, tmp_r_eig, tmp_hb_ways;
+ int parsed;
+
+ parsed = sscanf(entry, "%llu %d %u %u %u %d %llu", dpa, pos, &tmp_r_eiw,
+ &tmp_r_eig, &tmp_hb_ways, math, expect_hpa);
+
+ if (parsed != EXPECTED_PARAMS) {
+ pr_err("Parse error: expected %d parameters, got %d in '%s'\n",
+ EXPECTED_PARAMS, parsed, entry);
+ return -EINVAL;
+ }
+ if (tmp_r_eiw > U8_MAX || tmp_r_eig > U16_MAX || tmp_hb_ways > U8_MAX) {
+ pr_err("Parameter overflow in entry: '%s'\n", entry);
+ return -ERANGE;
+ }
+ if (*math != MODULO_MATH && *math != XOR_MATH) {
+ pr_err("Invalid math type %d in entry: '%s'\n", *math, entry);
+ return -EINVAL;
+ }
+ *r_eiw = tmp_r_eiw;
+ *r_eig = tmp_r_eig;
+ *hb_ways = tmp_hb_ways;
+
+ return 0;
+}
+
+/*
+ * setup_xor_mapping - Initialize XOR mapping data structure
+ *
+ * The test data sets all use the same HBIG so we can use one set
+ * of xormaps, and set the number to apply based on HBIW before
+ * calling cxl_do_xormap_calc().
+ *
+ * When additional data sets arrive for validation with different
+ * HBIG's this static setup will need to be updated.
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int setup_xor_mapping(void)
+{
+ if (nr_maps <= 0)
+ return -EINVAL;
+
+ cximsd = kzalloc(struct_size(cximsd, xormaps, nr_maps), GFP_KERNEL);
+ if (!cximsd)
+ return -ENOMEM;
+
+ memcpy(cximsd->xormaps, xormaps, nr_maps * sizeof(*cximsd->xormaps));
+ cximsd->nr_maps = nr_maps;
+
+ return 0;
+}
+
+static int test_random_params(void)
+{
+ u8 valid_eiws[] = { 0, 1, 2, 3, 4, 8, 9, 10 };
+ u16 valid_eigs[] = { 0, 1, 2, 3, 4, 5, 6 };
+ int i, ways, pos, reverse_pos;
+ u64 dpa, hpa, reverse_dpa;
+ int iterations = 10000;
+ int failures = 0;
+
+ for (i = 0; i < iterations; i++) {
+ /* Generate valid random parameters for eiw, eig, pos, dpa */
+ u8 eiw = valid_eiws[get_random_u32() % ARRAY_SIZE(valid_eiws)];
+ u16 eig = valid_eigs[get_random_u32() % ARRAY_SIZE(valid_eigs)];
+
+ eiw_to_ways(eiw, &ways);
+ pos = get_random_u32() % ways;
+ dpa = get_random_u64() >> 12;
+
+ hpa = cxl_calculate_hpa_offset(dpa, pos, eiw, eig);
+ reverse_dpa = cxl_calculate_dpa_offset(hpa, eiw, eig);
+ reverse_pos = cxl_calculate_position(hpa, eiw, eig);
+
+ if (reverse_dpa != dpa || reverse_pos != pos) {
+ pr_err("test random iter %d FAIL hpa=%llu, dpa=%llu reverse_dpa=%llu, pos=%d reverse_pos=%d eiw=%u eig=%u\n",
+ i, hpa, dpa, reverse_dpa, pos, reverse_pos, eiw,
+ eig);
+
+ if (failures++ > 10) {
+ pr_err("test random too many failures, stop\n");
+ break;
+ }
+ }
+ }
+ pr_info("..... test random: PASS %d FAIL %d\n", i - failures, failures);
+
+ if (failures)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct param_test {
+ u8 eiw;
+ u16 eig;
+ int pos;
+ bool expect; /* true: expect pass, false: expect fail */
+ const char *desc;
+};
+
+static struct param_test param_tests[] = {
+ { 0x0, 0, 0, true, "1-way, min eig=0, pos=0" },
+ { 0x0, 3, 0, true, "1-way, mid eig=3, pos=0" },
+ { 0x0, 6, 0, true, "1-way, max eig=6, pos=0" },
+ { 0x1, 0, 0, true, "2-way, eig=0, pos=0" },
+ { 0x1, 3, 1, true, "2-way, eig=3, max pos=1" },
+ { 0x1, 6, 1, true, "2-way, eig=6, max pos=1" },
+ { 0x2, 0, 0, true, "4-way, eig=0, pos=0" },
+ { 0x2, 3, 3, true, "4-way, eig=3, max pos=3" },
+ { 0x2, 6, 3, true, "4-way, eig=6, max pos=3" },
+ { 0x3, 0, 0, true, "8-way, eig=0, pos=0" },
+ { 0x3, 3, 7, true, "8-way, eig=3, max pos=7" },
+ { 0x3, 6, 7, true, "8-way, eig=6, max pos=7" },
+ { 0x4, 0, 0, true, "16-way, eig=0, pos=0" },
+ { 0x4, 3, 15, true, "16-way, eig=3, max pos=15" },
+ { 0x4, 6, 15, true, "16-way, eig=6, max pos=15" },
+ { 0x8, 0, 0, true, "3-way, eig=0, pos=0" },
+ { 0x8, 3, 2, true, "3-way, eig=3, max pos=2" },
+ { 0x8, 6, 2, true, "3-way, eig=6, max pos=2" },
+ { 0x9, 0, 0, true, "6-way, eig=0, pos=0" },
+ { 0x9, 3, 5, true, "6-way, eig=3, max pos=5" },
+ { 0x9, 6, 5, true, "6-way, eig=6, max pos=5" },
+ { 0xA, 0, 0, true, "12-way, eig=0, pos=0" },
+ { 0xA, 3, 11, true, "12-way, eig=3, max pos=11" },
+ { 0xA, 6, 11, true, "12-way, eig=6, max pos=11" },
+ { 0x5, 0, 0, false, "invalid eiw=5" },
+ { 0x7, 0, 0, false, "invalid eiw=7" },
+ { 0xB, 0, 0, false, "invalid eiw=0xB" },
+ { 0xFF, 0, 0, false, "invalid eiw=0xFF" },
+ { 0x1, 7, 0, false, "invalid eig=7 (out of range)" },
+ { 0x2, 0x10, 0, false, "invalid eig=0x10" },
+ { 0x3, 0xFFFF, 0, false, "invalid eig=0xFFFF" },
+ { 0x1, 0, -1, false, "pos < 0" },
+ { 0x1, 0, 2, false, "2-way, pos=2 (>= ways)" },
+ { 0x2, 0, 4, false, "4-way, pos=4 (>= ways)" },
+ { 0x3, 0, 8, false, "8-way, pos=8 (>= ways)" },
+ { 0x4, 0, 16, false, "16-way, pos=16 (>= ways)" },
+ { 0x8, 0, 3, false, "3-way, pos=3 (>= ways)" },
+ { 0x9, 0, 6, false, "6-way, pos=6 (>= ways)" },
+ { 0xA, 0, 12, false, "12-way, pos=12 (>= ways)" },
+};
+
+static int test_cxl_validate_translation_params(void)
+{
+ int i, rc, failures = 0;
+ bool valid;
+
+ for (i = 0; i < ARRAY_SIZE(param_tests); i++) {
+ struct param_test *t = &param_tests[i];
+
+ rc = cxl_validate_translation_params(t->eiw, t->eig, t->pos);
+ valid = (rc == 0);
+
+ if (valid != t->expect) {
+ pr_err("test params failed: %s\n", t->desc);
+ failures++;
+ }
+ }
+ pr_info("..... test params: PASS %d FAIL %d\n", i - failures, failures);
+
+ if (failures)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * cxl_translate_init
+ *
+ * Run the internal validation tests when no params are passed.
+ * Otherwise, parse the parameters (test vectors), and kick off
+ * the translation test.
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int __init cxl_translate_init(void)
+{
+ int rc, i;
+
+ /* If no tables are passed, validate module params only */
+ if (table_num == 0) {
+ pr_info("Internal validation test start...\n");
+ rc = test_cxl_validate_translation_params();
+ if (rc)
+ return rc;
+
+ rc = test_random_params();
+ if (rc)
+ return rc;
+
+ pr_info("Internal validation test completed successfully\n");
+
+ return 0;
+ }
+
+ pr_info("CXL translate test module loaded with %d test vectors\n",
+ table_num);
+
+ rc = setup_xor_mapping();
+ if (rc)
+ return rc;
+
+ /* Process each test vector */
+ for (i = 0; i < table_num; i++) {
+ u64 dpa, expect_spa;
+ int pos, math;
+ u8 r_eiw, hb_ways;
+ u16 r_eig;
+
+ pr_debug("Processing test vector %d: '%s'\n", i, table[i]);
+
+ /* Parse the test vector */
+ rc = parse_test_vector(table[i], &dpa, &pos, &r_eiw, &r_eig,
+ &hb_ways, &math, &expect_spa);
+ if (rc) {
+ pr_err("CXL Translate Test %d: FAIL\n"
+ " Failed to parse test vector '%s'\n",
+ i, table[i]);
+ continue;
+ }
+ /* Run the translation test */
+ rc = run_translation_test(dpa, pos, r_eiw, r_eig, hb_ways, math,
+ expect_spa);
+ if (rc) {
+ pr_err("CXL Translate Test %d: FAIL\n"
+ " dpa=%llu pos=%d r_eiw=%u r_eig=%u hb_ways=%u math=%s expect_spa=%llu\n",
+ i, dpa, pos, r_eiw, r_eig, hb_ways,
+ (math == XOR_MATH) ? "XOR" : "MODULO",
+ expect_spa);
+ } else {
+ pr_info("CXL Translate Test %d: PASS\n", i);
+ }
+ }
+
+ kfree(cximsd);
+ pr_info("CXL translate test completed\n");
+
+ return 0;
+}
+
+static void __exit cxl_translate_exit(void)
+{
+ pr_info("CXL translate test module unloaded\n");
+}
+
+module_param_array(table, charp, &table_num, 0444);
+MODULE_PARM_DESC(table, "Test vectors as space-separated decimal strings");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("cxl_test: cxl address translation test module");
+MODULE_IMPORT_NS("CXL");
+
+module_init(cxl_translate_init);
+module_exit(cxl_translate_exit);
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index bf9caa908f89..176dcde570cd 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -250,22 +250,21 @@ static void mes_add_event(struct mock_event_store *mes,
* Vary the number of events returned to simulate events occuring while the
* logs are being read.
*/
-static int ret_limit = 0;
+static atomic_t event_counter = ATOMIC_INIT(0);
static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
{
struct cxl_get_event_payload *pl;
struct mock_event_log *log;
- u16 nr_overflow;
+ int ret_limit;
u8 log_type;
int i;
if (cmd->size_in != sizeof(log_type))
return -EINVAL;
- ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
- if (!ret_limit)
- ret_limit = 1;
+ /* Vary return limit from 1 to CXL_TEST_EVENT_RET_MAX */
+ ret_limit = (atomic_inc_return(&event_counter) % CXL_TEST_EVENT_RET_MAX) + 1;
if (cmd->size_out < struct_size(pl, records, ret_limit))
return -EINVAL;
@@ -299,7 +298,7 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
u64 ns;
pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
- pl->overflow_err_count = cpu_to_le16(nr_overflow);
+ pl->overflow_err_count = cpu_to_le16(log->nr_overflow);
ns = ktime_get_real_ns();
ns -= 5000000000; /* 5s ago */
pl->first_overflow_timestamp = cpu_to_le64(ns);
@@ -1828,27 +1827,10 @@ static ssize_t fw_buf_checksum_show(struct device *dev,
{
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
u8 hash[SHA256_DIGEST_SIZE];
- unsigned char *hstr, *hptr;
- struct sha256_state sctx;
- ssize_t written = 0;
- int i;
-
- sha256_init(&sctx);
- sha256_update(&sctx, mdata->fw, mdata->fw_size);
- sha256_final(&sctx, hash);
-
- hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
- if (!hstr)
- return -ENOMEM;
-
- hptr = hstr;
- for (i = 0; i < SHA256_DIGEST_SIZE; i++)
- hptr += sprintf(hptr, "%02x", hash[i]);
- written = sysfs_emit(buf, "%s\n", hstr);
+ sha256(mdata->fw, mdata->fw_size, hash);
- kfree(hstr);
- return written;
+ return sysfs_emit(buf, "%*phN\n", SHA256_DIGEST_SIZE, hash);
}
static DEVICE_ATTR_RO(fw_buf_checksum);
@@ -1909,4 +1891,5 @@ static struct platform_driver cxl_mock_mem_driver = {
module_platform_driver(cxl_mock_mem_driver);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("cxl_test: mem device mock module");
MODULE_IMPORT_NS("CXL");
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index af2594e4f35d..44bce80ef3ff 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -10,12 +10,21 @@
#include <cxlmem.h>
#include <cxlpci.h>
#include "mock.h"
+#include "../exports.h"
static LIST_HEAD(mock);
+static struct cxl_dport *
+redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
+static int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+
void register_cxl_mock_ops(struct cxl_mock_ops *ops)
{
list_add_rcu(&ops->list, &mock);
+ _devm_cxl_add_dport_by_dev = redirect_devm_cxl_add_dport_by_dev;
+ _devm_cxl_switch_port_decoders_setup =
+ redirect_devm_cxl_switch_port_decoders_setup;
}
EXPORT_SYMBOL_GPL(register_cxl_mock_ops);
@@ -23,6 +32,9 @@ DEFINE_STATIC_SRCU(cxl_mock_srcu);
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops)
{
+ _devm_cxl_switch_port_decoders_setup =
+ __devm_cxl_switch_port_decoders_setup;
+ _devm_cxl_add_dport_by_dev = __devm_cxl_add_dport_by_dev;
list_del_rcu(&ops->list);
synchronize_srcu(&cxl_mock_srcu);
}
@@ -99,6 +111,26 @@ acpi_status __wrap_acpi_evaluate_integer(acpi_handle handle,
}
EXPORT_SYMBOL(__wrap_acpi_evaluate_integer);
+int __wrap_hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid,
+ resource_size_t *cache_size)
+{
+ int index, rc;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ rc = ops->hmat_get_extended_linear_cache_size(backing_res, nid,
+ cache_size);
+ else
+ rc = hmat_get_extended_linear_cache_size(backing_res, nid,
+ cache_size);
+
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_hmat_get_extended_linear_cache_size);
+
struct acpi_pci_root *__wrap_acpi_pci_find_root(acpi_handle handle)
{
int index;
@@ -131,70 +163,34 @@ __wrap_nvdimm_bus_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register);
-struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
-
-{
- int index;
- struct cxl_hdm *cxlhdm;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_port(port->uport_dev))
- cxlhdm = ops->devm_cxl_setup_hdm(port, info);
- else
- cxlhdm = devm_cxl_setup_hdm(port, info);
- put_cxl_mock_ops(index);
-
- return cxlhdm;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, "CXL");
-
-int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
{
int rc, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(port->uport_dev))
- rc = ops->devm_cxl_add_passthrough_decoder(port);
+ rc = ops->devm_cxl_switch_port_decoders_setup(port);
else
- rc = devm_cxl_add_passthrough_decoder(port);
+ rc = __devm_cxl_switch_port_decoders_setup(port);
put_cxl_mock_ops(index);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_passthrough_decoder, "CXL");
-int __wrap_devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
+int __wrap_devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
{
int rc, index;
- struct cxl_port *port = cxlhdm->port;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(port->uport_dev))
- rc = ops->devm_cxl_enumerate_decoders(cxlhdm, info);
+ rc = ops->devm_cxl_endpoint_decoders_setup(port);
else
- rc = devm_cxl_enumerate_decoders(cxlhdm, info);
+ rc = devm_cxl_endpoint_decoders_setup(port);
put_cxl_mock_ops(index);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enumerate_decoders, "CXL");
-
-int __wrap_devm_cxl_port_enumerate_dports(struct cxl_port *port)
-{
- int rc, index;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_port(port->uport_dev))
- rc = ops->devm_cxl_port_enumerate_dports(port);
- else
- rc = devm_cxl_port_enumerate_dports(port);
- put_cxl_mock_ops(index);
-
- return rc;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_port_enumerate_dports, "CXL");
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_endpoint_decoders_setup, "CXL");
int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
{
@@ -211,39 +207,6 @@ int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, "CXL");
-int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
- struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
-{
- int rc = 0, index;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_dev(cxlds->dev))
- rc = 0;
- else
- rc = cxl_hdm_decode_init(cxlds, cxlhdm, info);
- put_cxl_mock_ops(index);
-
- return rc;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, "CXL");
-
-int __wrap_cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
- struct cxl_endpoint_dvsec_info *info)
-{
- int rc = 0, index;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_dev(cxlds->dev))
- rc = 0;
- else
- rc = cxl_dvsec_rr_decode(cxlds, info);
- put_cxl_mock_ops(index);
-
- return rc;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dvsec_rr_decode, "CXL");
-
struct cxl_dport *__wrap_devm_cxl_add_rch_dport(struct cxl_port *port,
struct device *dport_dev,
int port_id,
@@ -268,23 +231,6 @@ struct cxl_dport *__wrap_devm_cxl_add_rch_dport(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_rch_dport, "CXL");
-resource_size_t __wrap_cxl_rcd_component_reg_phys(struct device *dev,
- struct cxl_dport *dport)
-{
- int index;
- resource_size_t component_reg_phys;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_port(dev))
- component_reg_phys = CXL_RESOURCE_NONE;
- else
- component_reg_phys = cxl_rcd_component_reg_phys(dev, dport);
- put_cxl_mock_ops(index);
-
- return component_reg_phys;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcd_component_reg_phys, "CXL");
-
void __wrap_cxl_endpoint_parse_cdat(struct cxl_port *port)
{
int index;
@@ -311,6 +257,23 @@ void __wrap_cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dport_init_ras_reporting, "CXL");
+struct cxl_dport *redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+ struct cxl_dport *dport;
+
+ if (ops && ops->is_mock_port(port->uport_dev))
+ dport = ops->devm_cxl_add_dport_by_dev(port, dport_dev);
+ else
+ dport = __devm_cxl_add_dport_by_dev(port, dport_dev);
+ put_cxl_mock_ops(index);
+
+ return dport;
+}
+
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("cxl_test: emulation module");
MODULE_IMPORT_NS("ACPI");
MODULE_IMPORT_NS("CXL");
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index d1b0271d2822..2684b89c8aa2 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -19,13 +19,14 @@ struct cxl_mock_ops {
bool (*is_mock_bus)(struct pci_bus *bus);
bool (*is_mock_port)(struct device *dev);
bool (*is_mock_dev)(struct device *dev);
- int (*devm_cxl_port_enumerate_dports)(struct cxl_port *port);
- struct cxl_hdm *(*devm_cxl_setup_hdm)(
- struct cxl_port *port, struct cxl_endpoint_dvsec_info *info);
- int (*devm_cxl_add_passthrough_decoder)(struct cxl_port *port);
- int (*devm_cxl_enumerate_decoders)(
- struct cxl_hdm *hdm, struct cxl_endpoint_dvsec_info *info);
+ int (*devm_cxl_switch_port_decoders_setup)(struct cxl_port *port);
+ int (*devm_cxl_endpoint_decoders_setup)(struct cxl_port *port);
void (*cxl_endpoint_parse_cdat)(struct cxl_port *port);
+ struct cxl_dport *(*devm_cxl_add_dport_by_dev)(struct cxl_port *port,
+ struct device *dport_dev);
+ int (*hmat_get_extended_linear_cache_size)(struct resource *backing_res,
+ int nid,
+ resource_size_t *cache_size);
};
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
diff --git a/tools/testing/ktest/config-bisect.pl b/tools/testing/ktest/config-bisect.pl
index 6fd864935319..bee7cb34a289 100755
--- a/tools/testing/ktest/config-bisect.pl
+++ b/tools/testing/ktest/config-bisect.pl
@@ -741,9 +741,9 @@ if ($start) {
die "Can not find file $bad\n";
}
if ($val eq "good") {
- run_command "cp $output_config $good" or die "failed to copy $config to $good\n";
+ run_command "cp $output_config $good" or die "failed to copy $output_config to $good\n";
} elsif ($val eq "bad") {
- run_command "cp $output_config $bad" or die "failed to copy $config to $bad\n";
+ run_command "cp $output_config $bad" or die "failed to copy $output_config to $bad\n";
}
}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index a5f7fdd0c1fb..001c4df9f7df 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -21,6 +21,8 @@ my %opt;
my %repeat_tests;
my %repeats;
my %evals;
+my @command_vars;
+my %command_tmp_vars;
#default opts
my %default = (
@@ -216,6 +218,7 @@ my $patchcheck_type;
my $patchcheck_start;
my $patchcheck_cherry;
my $patchcheck_end;
+my $patchcheck_skip;
my $build_time;
my $install_time;
@@ -380,6 +383,7 @@ my %option_map = (
"PATCHCHECK_START" => \$patchcheck_start,
"PATCHCHECK_CHERRY" => \$patchcheck_cherry,
"PATCHCHECK_END" => \$patchcheck_end,
+ "PATCHCHECK_SKIP" => \$patchcheck_skip,
);
# Options may be used by other options, record them.
@@ -900,14 +904,22 @@ sub set_eval {
}
sub set_variable {
- my ($lvalue, $rvalue) = @_;
+ my ($lvalue, $rvalue, $command) = @_;
+ # Command line variables override all others
+ if (defined($command_tmp_vars{$lvalue})) {
+ return;
+ }
if ($rvalue =~ /^\s*$/) {
delete $variable{$lvalue};
} else {
$rvalue = process_variables($rvalue);
$variable{$lvalue} = $rvalue;
}
+
+ if (defined($command)) {
+ $command_tmp_vars{$lvalue} = 1;
+ }
}
sub process_compare {
@@ -1286,6 +1298,19 @@ sub read_config {
$test_case = __read_config $config, \$test_num;
+ foreach my $val (@command_vars) {
+ chomp $val;
+ my %command_overrides;
+ if ($val =~ m/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
+ my $lvalue = $1;
+ my $rvalue = $2;
+
+ set_value($lvalue, $rvalue, 1, \%command_overrides, "COMMAND LINE");
+ } else {
+ die "Invalid option definition '$val'\n";
+ }
+ }
+
# make sure we have all mandatory configs
get_mandatory_configs;
@@ -1371,7 +1396,10 @@ sub __eval_option {
# If a variable contains itself, use the default var
if (($var eq $name) && defined($opt{$var})) {
$o = $opt{$var};
- $retval = "$retval$o";
+ # Only append if the default doesn't contain itself
+ if ($o !~ m/\$\{$var\}/) {
+ $retval = "$retval$o";
+ }
} elsif (defined($opt{$o})) {
$o = $opt{$o};
$retval = "$retval$o";
@@ -3511,11 +3539,37 @@ sub patchcheck {
@list = reverse @list;
}
+ my %skip_list;
+ my $will_skip = 0;
+
+ if (defined($patchcheck_skip)) {
+ foreach my $s (split /\s+/, $patchcheck_skip) {
+ $s = `git log --pretty=oneline $s~1..$s`;
+ $s =~ s/^(\S+).*/$1/;
+ chomp $s;
+ $skip_list{$s} = 1;
+ $will_skip++;
+ }
+ }
+
doprint("Going to test the following commits:\n");
foreach my $l (@list) {
+ my $sha1 = $l;
+ $sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+ next if (defined($skip_list{$sha1}));
doprint "$l\n";
}
+ if ($will_skip) {
+ doprint("\nSkipping the following commits:\n");
+ foreach my $l (@list) {
+ my $sha1 = $l;
+ $sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+ next if (!defined($skip_list{$sha1}));
+ doprint "$l\n";
+ }
+ }
+
my $save_clean = $noclean;
my %ignored_warnings;
@@ -3530,6 +3584,11 @@ sub patchcheck {
my $sha1 = $item;
$sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+ if (defined($skip_list{$sha1})) {
+ doprint "\nSkipping \"$item\"\n\n";
+ next;
+ }
+
doprint "\nProcessing commit \"$item\"\n\n";
run_command "git checkout $sha1" or
@@ -4242,8 +4301,55 @@ sub cancel_test {
die "\nCaught Sig Int, test interrupted: $!\n"
}
-$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl [config-file]\n";
+sub die_usage {
+ die << "EOF"
+ktest.pl version: $VERSION
+ usage: ktest.pl [options] [config-file]
+ [options]:
+ -D value: Where value can act as an option override.
+ -D BUILD_NOCLEAN=1
+ Sets global BUILD_NOCLEAN to 1
+ -D TEST_TYPE[2]=build
+ Sets TEST_TYPE of test 2 to "build"
+
+ It can also override all temp variables.
+ -D USE_TEMP_DIR:=1
+ Will override all variables that use
+ "USE_TEMP_DIR="
+
+EOF
+;
+}
+
+while ( $#ARGV >= 0 ) {
+ if ( $ARGV[0] eq "-D" ) {
+ shift;
+ die_usage if ($#ARGV < 1);
+ my $val = shift;
+
+ if ($val =~ m/(.*?):=(.*)$/) {
+ set_variable($1, $2, 1);
+ } else {
+ $command_vars[$#command_vars + 1] = $val;
+ }
+
+ } elsif ( $ARGV[0] =~ m/^-D(.*)/) {
+ my $val = $1;
+ shift;
+
+ if ($val =~ m/(.*?):=(.*)$/) {
+ set_variable($1, $2, 1);
+ } else {
+ $command_vars[$#command_vars + 1] = $val;
+ }
+ } elsif ( $ARGV[0] eq "-h" ) {
+ die_usage;
+ } else {
+ last;
+ }
+}
+$#ARGV < 1 or die_usage;
if ($#ARGV == 0) {
$ktest_config = $ARGV[0];
if (! -f $ktest_config) {
@@ -4466,6 +4572,10 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
doprint "RUNNING TEST $i of $opt{NUM_TESTS}$name with option $test_type $run_type$installme\n\n";
+ # Always show which build directory and output directory is being used
+ doprint "BUILD_DIR=$builddir\n";
+ doprint "OUTPUT_DIR=$outputdir\n\n";
+
if (defined($pre_test)) {
my $ret = run_command $pre_test;
if (!$ret && defined($pre_test_die) &&
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index f43477a9b857..9c4c449a8f3e 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -1017,6 +1017,8 @@
# Note, PATCHCHECK_CHERRY requires PATCHCHECK_END to be defined.
# (default 0)
#
+# PATCHCHECK_SKIP is an optional list of shas to skip testing
+#
# PATCHCHECK_TYPE is required and is the type of test to run:
# build, boot, test.
#
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
index e70c502a16df..422e186cf3cf 100644
--- a/tools/testing/kunit/configs/all_tests.config
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -10,7 +10,6 @@ CONFIG_KUNIT_EXAMPLE_TEST=y
CONFIG_KUNIT_ALL_TESTS=y
CONFIG_FORTIFY_SOURCE=y
-CONFIG_INIT_STACK_ALL_PATTERN=y
CONFIG_IIO=y
diff --git a/tools/testing/kunit/configs/arch_uml.config b/tools/testing/kunit/configs/arch_uml.config
index 54ad8972681a..28edf816aa70 100644
--- a/tools/testing/kunit/configs/arch_uml.config
+++ b/tools/testing/kunit/configs/arch_uml.config
@@ -1,8 +1,7 @@
# Config options which are added to UML builds by default
-# Enable virtio/pci, as a lot of tests require it.
-CONFIG_VIRTIO_UML=y
-CONFIG_UML_PCI_OVER_VIRTIO=y
+# Enable pci, as a lot of tests require it.
+CONFIG_KUNIT_UML_PCI=y
# Enable FORTIFY_SOURCE for wider checking.
CONFIG_FORTIFY_SOURCE=y
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 7f9ae55fd6d5..cd99c1956331 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -228,7 +228,7 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input
fake_test.counts.passed = 1
output: Iterable[str] = input_data
- if request.raw_output == 'all':
+ if request.raw_output == 'all' or request.raw_output == 'full':
pass
elif request.raw_output == 'kunit':
output = kunit_parser.extract_tap_lines(output)
@@ -425,7 +425,7 @@ def add_parse_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. '
'By default, filters to just KUnit output. Use '
'--raw_output=all to show everything',
- type=str, nargs='?', const='all', default=None, choices=['all', 'kunit'])
+ type=str, nargs='?', const='all', default=None, choices=['all', 'full', 'kunit'])
parser.add_argument('--json',
nargs='?',
help='Prints parsed test results as JSON to stdout or a file if '
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index c176487356e6..333cd3a4a56b 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -352,9 +352,9 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool:
lines.pop()
return True
-TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
+TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) ?(- )?([^#]*)( # .*)?$')
-TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
+TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) ?(- )?(.*) # SKIP ?(.*)$')
def peek_test_name_match(lines: LineStream, test: Test) -> bool:
"""
@@ -379,6 +379,8 @@ def peek_test_name_match(lines: LineStream, test: Test) -> bool:
if not match:
return False
name = match.group(4)
+ if not name:
+ return False
return name == test.name
def parse_test_result(lines: LineStream, test: Test,
@@ -416,7 +418,7 @@ def parse_test_result(lines: LineStream, test: Test,
# Set name of test object
if skip_match:
- test.name = skip_match.group(4)
+ test.name = skip_match.group(4) or skip_match.group(5)
else:
test.name = match.group(4)
diff --git a/tools/testing/kunit/qemu_configs/mips.py b/tools/testing/kunit/qemu_configs/mips.py
new file mode 100644
index 000000000000..8899ac157b30
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/mips.py
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='mips',
+ kconfig='''
+CONFIG_32BIT=y
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_MIPS_MALTA=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+''',
+ qemu_arch='mips',
+ kernel_path='vmlinuz',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'malta'])
diff --git a/tools/testing/kunit/qemu_configs/mips64.py b/tools/testing/kunit/qemu_configs/mips64.py
new file mode 100644
index 000000000000..1478aed05b94
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/mips64.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='mips',
+ kconfig='''
+CONFIG_CPU_MIPS64_R2=y
+CONFIG_64BIT=y
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_MIPS_MALTA=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+''',
+ qemu_arch='mips64',
+ kernel_path='vmlinuz',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'malta', '-cpu', '5KEc'])
diff --git a/tools/testing/kunit/qemu_configs/mips64el.py b/tools/testing/kunit/qemu_configs/mips64el.py
new file mode 100644
index 000000000000..300c711d7a82
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/mips64el.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='mips',
+ kconfig='''
+CONFIG_CPU_MIPS64_R2=y
+CONFIG_64BIT=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MIPS_MALTA=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+''',
+ qemu_arch='mips64el',
+ kernel_path='vmlinuz',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'malta', '-cpu', '5KEc'])
diff --git a/tools/testing/kunit/qemu_configs/mipsel.py b/tools/testing/kunit/qemu_configs/mipsel.py
new file mode 100644
index 000000000000..3d3543315b45
--- /dev/null
+++ b/tools/testing/kunit/qemu_configs/mipsel.py
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from ..qemu_config import QemuArchParams
+
+QEMU_ARCH = QemuArchParams(linux_arch='mips',
+ kconfig='''
+CONFIG_32BIT=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MIPS_MALTA=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+''',
+ qemu_arch='mipsel',
+ kernel_path='vmlinuz',
+ kernel_command_line='console=ttyS0',
+ extra_qemu_params=['-M', 'malta'])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
index 65d3f27feaf2..30d9ef18bcec 100644
--- a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
+++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
@@ -1,5 +1,5 @@
TAP version 13
-1..2
+1..3
# selftests: membarrier: membarrier_test_single_thread
# TAP version 13
# 1..2
@@ -12,3 +12,4 @@ ok 1 selftests: membarrier: membarrier_test_single_thread
# ok 1 sys_membarrier available
# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected
ok 2 selftests: membarrier: membarrier_test_multi_thread
+ok 3 # SKIP selftests: membarrier: membarrier_test_multi_thread
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index c1ec099a3b1d..05e763aab104 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -10,7 +10,7 @@
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
- pfn_t *pfn)
+ unsigned long *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
@@ -29,7 +29,7 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
*kaddr = pmem->virt_addr + offset;
page = vmalloc_to_page(pmem->virt_addr + offset);
if (pfn)
- *pfn = page_to_pfn_t(page);
+ *pfn = page_to_pfn(page);
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
__func__, pmem, pgoff, page_to_pfn(page));
@@ -39,7 +39,7 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
if (kaddr)
*kaddr = pmem->virt_addr + offset;
if (pfn)
- *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+ *pfn = PHYS_PFN(pmem->phys_addr + offset);
/*
* If badblocks are present, limit known good range to the
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index e4313726fae3..f7e7bfe9bb85 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -8,7 +8,6 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/pfn_t.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -135,16 +134,6 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
}
EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
-pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
-{
- struct nfit_test_resource *nfit_res = get_nfit_res(addr);
-
- if (nfit_res)
- flags &= ~PFN_MAP;
- return phys_to_pfn_t(addr, flags);
-}
-EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
-
void *__wrap_memremap(resource_size_t offset, size_t size,
unsigned long flags)
{
diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
index 68a064ce598c..8e3b6be53839 100644
--- a/tools/testing/nvdimm/test/ndtest.c
+++ b/tools/testing/nvdimm/test/ndtest.c
@@ -850,11 +850,22 @@ static int ndtest_probe(struct platform_device *pdev)
p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->dcr_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->label_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
-
+ if (!p->dimm_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
rc = ndtest_nvdimm_init(p);
if (rc)
goto err;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index cfd4378e2129..f87e9f251d13 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -670,6 +670,7 @@ static int nfit_test_search_spa(struct nvdimm_bus *bus,
.addr = spa->spa,
.region = NULL,
};
+ struct nfit_mem *nfit_mem;
u64 dpa;
ret = device_for_each_child(&bus->dev, &ctx,
@@ -687,8 +688,12 @@ static int nfit_test_search_spa(struct nvdimm_bus *bus,
*/
nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
nvdimm = nd_mapping->nvdimm;
+ nfit_mem = nvdimm_provider_data(nvdimm);
+ if (!nfit_mem)
+ return -EINVAL;
- spa->devices[0].nfit_device_handle = handle[nvdimm->id];
+ spa->devices[0].nfit_device_handle =
+ __to_nfit_memdev(nfit_mem)->device_handle;
spa->num_nvdimms = 1;
spa->devices[0].dpa = dpa;
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index b00583d1eace..b9047fb8ea4a 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -212,7 +212,6 @@ void __iomem *__wrap_devm_ioremap(struct device *dev,
void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
-pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
void *__wrap_memremap(resource_size_t offset, size_t size,
unsigned long flags);
void __wrap_devm_memunmap(struct device *dev, void *addr);
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 84b8c3c92c79..2f830ff8396c 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -499,19 +499,17 @@ void ida_check_random(void)
goto repeat;
}
-void ida_simple_get_remove_test(void)
+void ida_alloc_free_test(void)
{
DEFINE_IDA(ida);
unsigned long i;
- for (i = 0; i < 10000; i++) {
- assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
- }
- assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
+ for (i = 0; i < 10000; i++)
+ assert(ida_alloc_max(&ida, 20000, GFP_KERNEL) == i);
+ assert(ida_alloc_range(&ida, 5, 30, GFP_KERNEL) < 0);
- for (i = 0; i < 10000; i++) {
- ida_simple_remove(&ida, i);
- }
+ for (i = 0; i < 10000; i++)
+ ida_free(&ida, i);
assert(ida_is_empty(&ida));
ida_destroy(&ida);
@@ -524,7 +522,7 @@ void user_ida_checks(void)
ida_check_nomem();
ida_check_conv_user();
ida_check_random();
- ida_simple_get_remove_test();
+ ida_alloc_free_test();
radix_tree_cpu_dead(1);
}
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 2c0b38301253..5c1b18e3ed21 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -8,14 +8,6 @@
* difficult to handle in kernel tests.
*/
-#define CONFIG_DEBUG_MAPLE_TREE
-#define CONFIG_MAPLE_SEARCH
-#define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31)
-#include "test.h"
-#include <stdlib.h>
-#include <time.h>
-#include <linux/init.h>
-
#define module_init(x)
#define module_exit(x)
#define MODULE_AUTHOR(x)
@@ -23,7 +15,9 @@
#define MODULE_LICENSE(x)
#define dump_stack() assert(0)
-#include "../../../lib/maple_tree.c"
+#include "test.h"
+
+#include "../shared/maple-shim.c"
#include "../../../lib/test_maple_tree.c"
#define RCU_RANGE_COUNT 1000
@@ -63,430 +57,6 @@ struct rcu_reader_struct {
struct rcu_test_struct2 *test;
};
-static int get_alloc_node_count(struct ma_state *mas)
-{
- int count = 1;
- struct maple_alloc *node = mas->alloc;
-
- if (!node || ((unsigned long)node & 0x1))
- return 0;
- while (node->node_count) {
- count += node->node_count;
- node = node->slot[0];
- }
- return count;
-}
-
-static void check_mas_alloc_node_count(struct ma_state *mas)
-{
- mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 1, GFP_KERNEL);
- mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 3, GFP_KERNEL);
- MT_BUG_ON(mas->tree, get_alloc_node_count(mas) != mas->alloc->total);
- mas_destroy(mas);
-}
-
-/*
- * check_new_node() - Check the creation of new nodes and error path
- * verification.
- */
-static noinline void __init check_new_node(struct maple_tree *mt)
-{
-
- struct maple_node *mn, *mn2, *mn3;
- struct maple_alloc *smn;
- struct maple_node *nodes[100];
- int i, j, total;
-
- MA_STATE(mas, mt, 0, 0);
-
- check_mas_alloc_node_count(&mas);
-
- /* Try allocating 3 nodes */
- mtree_lock(mt);
- mt_set_non_kernel(0);
- /* request 3 nodes to be allocated. */
- mas_node_count(&mas, 3);
- /* Allocation request of 3. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 3);
- /* Allocate failed. */
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- mas_push_node(&mas, mn);
- mas_reset(&mas);
- mas_destroy(&mas);
- mtree_unlock(mt);
-
-
- /* Try allocating 1 node, then 2 more */
- mtree_lock(mt);
- /* Set allocation request to 1. */
- mas_set_alloc_req(&mas, 1);
- /* Check Allocation request of 1. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- mas_set_err(&mas, -ENOMEM);
- /* Validate allocation request. */
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- /* Eat the requested node. */
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mn->slot[0] != NULL);
- MT_BUG_ON(mt, mn->slot[1] != NULL);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas.status = ma_start;
- mas_destroy(&mas);
- /* Allocate 3 nodes, will fail. */
- mas_node_count(&mas, 3);
- /* Drop the lock and allocate 3 nodes. */
- mas_nomem(&mas, GFP_KERNEL);
- /* Ensure 3 are allocated. */
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- /* Allocation request of 0. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 0);
-
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
- /* Ensure we counted 3. */
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- /* Free. */
- mas_reset(&mas);
- mas_destroy(&mas);
-
- /* Set allocation request to 1. */
- mas_set_alloc_req(&mas, 1);
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- mas_set_err(&mas, -ENOMEM);
- /* Validate allocation request. */
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != 1);
- /* Check the node is only one node. */
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, mn->slot[0] != NULL);
- MT_BUG_ON(mt, mn->slot[1] != NULL);
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != 1);
- MT_BUG_ON(mt, mas.alloc->node_count);
-
- mas_set_alloc_req(&mas, 2); /* request 2 more. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 2);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != 3);
- MT_BUG_ON(mt, mas.alloc == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
- MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
- for (i = 2; i >= 0; i--) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != i);
- MT_BUG_ON(mt, !mn);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
-
- total = 64;
- mas_set_alloc_req(&mas, total); /* request 2 more. */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != total);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (i = total; i > 0; i--) {
- unsigned int e = 0; /* expected node_count */
-
- if (!MAPLE_32BIT) {
- if (i >= 35)
- e = i - 34;
- else if (i >= 5)
- e = i - 4;
- else if (i >= 2)
- e = i - 1;
- } else {
- if (i >= 4)
- e = i - 3;
- else if (i >= 1)
- e = i - 1;
- else
- e = 0;
- }
-
- MT_BUG_ON(mt, mas.alloc->node_count != e);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
- MT_BUG_ON(mt, !mn);
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
-
- total = 100;
- for (i = 1; i < total; i++) {
- mas_set_alloc_req(&mas, i);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (j = i; j > 0; j--) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
- MT_BUG_ON(mt, !mn);
- MT_BUG_ON(mt, not_empty(mn));
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != j);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
- mas_set_alloc_req(&mas, i);
- mas_set_err(&mas, -ENOMEM);
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- for (j = 0; j <= i/2; j++) {
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- nodes[j] = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
- }
-
- while (j) {
- j--;
- mas_push_node(&mas, nodes[j]);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != i);
- for (j = 0; j <= i/2; j++) {
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
- }
- mas_reset(&mas);
- MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
- mas_destroy(&mas);
-
- }
-
- /* Set allocation request. */
- total = 500;
- mas_node_count(&mas, total);
- /* Drop the lock and allocate the nodes. */
- mas_nomem(&mas, GFP_KERNEL);
- MT_BUG_ON(mt, !mas.alloc);
- i = 1;
- smn = mas.alloc;
- while (i < total) {
- for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) {
- i++;
- MT_BUG_ON(mt, !smn->slot[j]);
- if (i == total)
- break;
- }
- smn = smn->slot[0]; /* next. */
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != total);
- mas_reset(&mas);
- mas_destroy(&mas); /* Free. */
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- for (i = 1; i < 128; i++) {
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */
- for (j = i; j > 0; j--) { /*Free the requests */
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- }
-
- for (i = 1; i < MAPLE_NODE_MASK + 1; i++) {
- MA_STATE(mas2, mt, 0, 0);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */
- for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mas_push_node(&mas2, mn);
- MT_BUG_ON(mt, mas_allocated(&mas2) != j);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- MT_BUG_ON(mt, mas_allocated(&mas2) != i);
-
- for (j = i; j > 0; j--) { /*Free the requests */
- MT_BUG_ON(mt, mas_allocated(&mas2) != j);
- mn = mas_pop_node(&mas2); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
- }
-
-
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
-
- mn = mas_pop_node(&mas); /* get the next node. */
- MT_BUG_ON(mt, mn == NULL);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
-
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
-
- /* Check the limit of pop/push/pop */
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */
- MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
- MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
- MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
- MT_BUG_ON(mt, mas_alloc_req(&mas));
- MT_BUG_ON(mt, mas.alloc->node_count != 1);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas.alloc->node_count != 1);
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) {
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, not_empty(mn));
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- }
- MT_BUG_ON(mt, mas_allocated(&mas) != 0);
-
-
- for (i = 3; i < MAPLE_NODE_MASK * 3; i++) {
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put it back */
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn2 = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put them back */
- mas_push_node(&mas, mn2);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn2 = mas_pop_node(&mas); /* get the next node. */
- mn3 = mas_pop_node(&mas); /* get the next node. */
- mas_push_node(&mas, mn); /* put them back */
- mas_push_node(&mas, mn2);
- mas_push_node(&mas, mn3);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, i); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mn = mas_pop_node(&mas); /* get the next node. */
- mn->parent = ma_parent_ptr(mn);
- ma_free_rcu(mn);
- mas_destroy(&mas);
- }
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 5); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != 5);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 10); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != 10);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 2 + 2); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 2 + 2);
- mas_destroy(&mas);
-
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 2 + 1); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 2 + 1);
- mas.node = MA_ERROR(-ENOMEM);
- mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 3 + 2); /* Request */
- mas_nomem(&mas, GFP_KERNEL); /* Fill request */
- mas.status = ma_start;
- MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 3 + 2);
- mas_destroy(&mas);
-
- mtree_unlock(mt);
-}
-
/*
* Check erasing including RCU.
*/
@@ -35062,7 +34632,7 @@ void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals)
int i;
void *(*function)(void *);
- pthread_t readers[20];
+ pthread_t readers[30];
unsigned int index = vals->index;
mt_set_in_rcu(mt);
@@ -35080,14 +34650,14 @@ void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals)
}
}
- usleep(5); /* small yield to ensure all threads are at least started. */
+ usleep(3); /* small yield to ensure all threads are at least started. */
while (index <= vals->last) {
mtree_store(mt, index,
(index % 2 ? vals->entry2 : vals->entry3),
GFP_KERNEL);
index++;
- usleep(5);
+ usleep(2);
}
while (i--)
@@ -35098,6 +34668,7 @@ void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals)
MT_BUG_ON(mt, !vals->seen_entry3);
MT_BUG_ON(mt, !vals->seen_both);
}
+
static noinline void __init check_rcu_simulated(struct maple_tree *mt)
{
unsigned long i, nr_entries = 1000;
@@ -35454,17 +35025,6 @@ static void check_dfs_preorder(struct maple_tree *mt)
MT_BUG_ON(mt, count != e);
mtree_destroy(mt);
- mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
- mas_reset(&mas);
- mt_zero_nr_tallocated();
- mt_set_non_kernel(200);
- mas_expected_entries(&mas, max);
- for (count = 0; count <= max; count++) {
- mas.index = mas.last = count;
- mas_store(&mas, xa_mk_value(count));
- MT_BUG_ON(mt, mas_is_err(&mas));
- }
- mas_destroy(&mas);
rcu_barrier();
/*
* pr_info(" ->seq test of 0-%lu %luK in %d active (%d total)\n",
@@ -35523,6 +35083,18 @@ static unsigned char get_vacant_height(struct ma_wr_state *wr_mas, void *entry)
return vacant_height;
}
+static int mas_allocated(struct ma_state *mas)
+{
+ int total = 0;
+
+ if (mas->alloc)
+ total++;
+
+ if (mas->sheaf)
+ total += kmem_cache_sheaf_size(mas->sheaf);
+
+ return total;
+}
/* Preallocation testing */
static noinline void __init check_prealloc(struct maple_tree *mt)
{
@@ -35541,7 +35113,10 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
/* Spanning store */
mas_set_range(&mas, 470, 500);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+
+ mas_wr_preallocate(&wr_mas, ptr);
+ MT_BUG_ON(mt, mas.store_type != wr_spanning_store);
+ MT_BUG_ON(mt, mas_is_err(&mas));
allocated = mas_allocated(&mas);
height = mas_mt_height(&mas);
vacant_height = get_vacant_height(&wr_mas, ptr);
@@ -35551,6 +35126,7 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
allocated = mas_allocated(&mas);
MT_BUG_ON(mt, allocated != 0);
+ mas_wr_preallocate(&wr_mas, ptr);
MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
allocated = mas_allocated(&mas);
height = mas_mt_height(&mas);
@@ -35596,20 +35172,6 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
height = mas_mt_height(&mas);
vacant_height = get_vacant_height(&wr_mas, ptr);
MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3);
- mn = mas_pop_node(&mas);
- MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
- mas_push_node(&mas, mn);
- MT_BUG_ON(mt, mas_allocated(&mas) != allocated);
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- mas_destroy(&mas);
- allocated = mas_allocated(&mas);
- MT_BUG_ON(mt, allocated != 0);
-
- MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
- allocated = mas_allocated(&mas);
- height = mas_mt_height(&mas);
- vacant_height = get_vacant_height(&wr_mas, ptr);
- MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3);
mas_store_prealloc(&mas, ptr);
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
@@ -35668,6 +35230,18 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
allocated = mas_allocated(&mas);
height = mas_mt_height(&mas);
MT_BUG_ON(mt, allocated != 0);
+
+ /* Chaining multiple preallocations */
+ mt_set_in_rcu(mt);
+ mas_set_range(&mas, 800, 805); /* Slot store, should be 0 allocations */
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+ mas.last = 809; /* Node store */
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 1);
+ mas_store_prealloc(&mas, ptr);
}
/* End of preallocation testing */
@@ -36314,13 +35888,18 @@ extern void test_kmem_cache_bulk(void);
static inline void check_spanning_store_height(struct maple_tree *mt)
{
int index = 0;
+ int last = 140;
MA_STATE(mas, mt, 0, 0);
mas_lock(&mas);
while (mt_height(mt) != 3) {
mas_store_gfp(&mas, xa_mk_value(index), GFP_KERNEL);
mas_set(&mas, ++index);
}
- mas_set_range(&mas, 90, 140);
+
+ if (MAPLE_32BIT)
+ last = 155; /* 32 bit higher branching factor. */
+
+ mas_set_range(&mas, 90, last);
mas_store_gfp(&mas, xa_mk_value(index), GFP_KERNEL);
MT_BUG_ON(mt, mas_mt_height(&mas) != 2);
mas_unlock(&mas);
@@ -36393,11 +35972,17 @@ static void check_nomem_writer_race(struct maple_tree *mt)
check_load(mt, 6, xa_mk_value(0xC));
mtree_unlock(mt);
+ mt_set_non_kernel(0);
/* test for the same race but with mas_store_gfp() */
mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
mas_set_range(&mas, 0, 5);
+
+ /* setup writer 2 that will trigger the race condition */
+ mt_set_private(mt);
+ mt_set_callback(writer2);
+
mtree_lock(mt);
mas_store_gfp(&mas, NULL, GFP_KERNEL);
@@ -36415,6 +36000,7 @@ static void check_nomem_writer_race(struct maple_tree *mt)
*/
static inline int check_vma_modification(struct maple_tree *mt)
{
+#if defined(CONFIG_64BIT)
MA_STATE(mas, mt, 0, 0);
mtree_lock(mt);
@@ -36438,30 +36024,11 @@ static inline int check_vma_modification(struct maple_tree *mt)
mas_destroy(&mas);
mtree_unlock(mt);
+#endif
+
return 0;
}
-/*
- * test to check that bulk stores do not use wr_rebalance as the store
- * type.
- */
-static inline void check_bulk_rebalance(struct maple_tree *mt)
-{
- MA_STATE(mas, mt, ULONG_MAX, ULONG_MAX);
- int max = 10;
-
- build_full_tree(mt, 0, 2);
-
- /* erase every entry in the tree */
- do {
- /* set up bulk store mode */
- mas_expected_entries(&mas, max);
- mas_erase(&mas);
- MT_BUG_ON(mt, mas.store_type == wr_rebalance);
- } while (mas_prev(&mas, 0) != NULL);
-
- mas_destroy(&mas);
-}
void farmer_tests(void)
{
@@ -36474,10 +36041,6 @@ void farmer_tests(void)
check_vma_modification(&tree);
mtree_destroy(&tree);
- mt_init(&tree);
- check_bulk_rebalance(&tree);
- mtree_destroy(&tree);
-
tree.ma_root = xa_mk_value(0);
mt_dump(&tree, mt_dump_dec);
@@ -36537,10 +36100,6 @@ void farmer_tests(void)
check_erase_testset(&tree);
mtree_destroy(&tree);
- mt_init_flags(&tree, 0);
- check_new_node(&tree);
- mtree_destroy(&tree);
-
if (!MAPLE_32BIT) {
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_rcu_simulated(&tree);
diff --git a/tools/testing/scatterlist/linux/mm.h b/tools/testing/scatterlist/linux/mm.h
index 5bd9e6e80625..121ae78d6e88 100644
--- a/tools/testing/scatterlist/linux/mm.h
+++ b/tools/testing/scatterlist/linux/mm.h
@@ -51,7 +51,6 @@ static inline unsigned long page_to_phys(struct page *page)
#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
-#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
#define __min(t1, t2, min1, min2, x, y) ({ \
t1 min1 = (x); \
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 6aa11cd3db42..56e44a98d6a5 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -36,6 +36,7 @@ TARGETS += filesystems/fat
TARGETS += filesystems/overlayfs
TARGETS += filesystems/statmount
TARGETS += filesystems/mount-notify
+TARGETS += filesystems/fuse
TARGETS += firmware
TARGETS += fpu
TARGETS += ftrace
@@ -53,6 +54,7 @@ TARGETS += kvm
TARGETS += landlock
TARGETS += lib
TARGETS += livepatch
+TARGETS += liveupdate
TARGETS += lkdtm
TARGETS += lsm
TARGETS += membarrier
@@ -77,6 +79,7 @@ TARGETS += net/ovpn
TARGETS += net/packetdrill
TARGETS += net/rds
TARGETS += net/tcp_ao
+TARGETS += nolibc
TARGETS += nsfs
TARGETS += pci_endpoint
TARGETS += pcie_bwctrl
@@ -123,6 +126,7 @@ TARGETS += uevent
TARGETS += user_events
TARGETS += vDSO
TARGETS += mm
+TARGETS += vfio
TARGETS += x86
TARGETS += x86/bugs
TARGETS += zram
@@ -205,7 +209,7 @@ export KHDR_INCLUDES
all:
@ret=1; \
- for TARGET in $(TARGETS); do \
+ for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET \
@@ -293,6 +297,14 @@ ifdef INSTALL_PATH
$(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET COLLECTION=$$TARGET \
-C $$TARGET emit_tests >> $(TEST_LIST); \
done;
+ @VERSION=$$(git describe HEAD 2>/dev/null); \
+ if [ -n "$$VERSION" ]; then \
+ echo "$$VERSION" > $(INSTALL_PATH)/VERSION; \
+ printf "Version saved to $(INSTALL_PATH)/VERSION\n"; \
+ else \
+ printf "Unable to get version from git describe\n"; \
+ fi
+ @echo "**Kselftest Installation is complete: $(INSTALL_PATH)**"
else
$(error Error: set INSTALL_PATH to use install)
endif
@@ -305,7 +317,7 @@ gen_tar: install
@echo "Created ${TAR_PATH}"
clean:
- @for TARGET in $(TARGETS); do \
+ @for TARGET in $(TARGETS) $(INSTALL_DEP_TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
diff --git a/tools/testing/selftests/acct/acct_syscall.c b/tools/testing/selftests/acct/acct_syscall.c
index 87c044fb9293..421adbdc299d 100644
--- a/tools/testing/selftests/acct/acct_syscall.c
+++ b/tools/testing/selftests/acct/acct_syscall.c
@@ -9,7 +9,7 @@
#include <string.h>
#include <sys/wait.h>
-#include "../kselftest.h"
+#include "kselftest.h"
int main(void)
{
diff --git a/tools/testing/selftests/alsa/conf.c b/tools/testing/selftests/alsa/conf.c
index e2b3a5810f47..317212078e36 100644
--- a/tools/testing/selftests/alsa/conf.c
+++ b/tools/testing/selftests/alsa/conf.c
@@ -14,7 +14,7 @@
#include <regex.h>
#include <sys/stat.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "alsa-local.h"
#define SYSFS_ROOT "/sys"
@@ -448,7 +448,7 @@ int conf_get_bool(snd_config_t *root, const char *key1, const char *key2, int de
ksft_exit_fail_msg("key '%s'.'%s' search error: %s\n", key1, key2, snd_strerror(ret));
ret = snd_config_get_bool(cfg);
if (ret < 0)
- ksft_exit_fail_msg("key '%s'.'%s' is not an bool\n", key1, key2);
+ ksft_exit_fail_msg("key '%s'.'%s' is not a bool\n", key1, key2);
return !!ret;
}
diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
index 2a4b2662035e..d4f845c32804 100644
--- a/tools/testing/selftests/alsa/mixer-test.c
+++ b/tools/testing/selftests/alsa/mixer-test.c
@@ -25,7 +25,7 @@
#include <poll.h>
#include <stdint.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "alsa-local.h"
#define TESTS_PER_CONTROL 7
@@ -53,10 +53,10 @@ struct ctl_data {
struct ctl_data *next;
};
-int num_cards = 0;
-int num_controls = 0;
-struct card_data *card_list = NULL;
-struct ctl_data *ctl_list = NULL;
+int num_cards;
+int num_controls;
+struct card_data *card_list;
+struct ctl_data *ctl_list;
static void find_controls(void)
{
diff --git a/tools/testing/selftests/alsa/pcm-test.c b/tools/testing/selftests/alsa/pcm-test.c
index dbd7c222ce93..ee04ccef7d7c 100644
--- a/tools/testing/selftests/alsa/pcm-test.c
+++ b/tools/testing/selftests/alsa/pcm-test.c
@@ -17,7 +17,7 @@
#include <assert.h>
#include <pthread.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "alsa-local.h"
typedef struct timespec timestamp_t;
@@ -30,7 +30,7 @@ struct card_data {
struct card_data *next;
};
-struct card_data *card_list = NULL;
+struct card_data *card_list;
struct pcm_data {
snd_pcm_t *handle;
@@ -43,10 +43,10 @@ struct pcm_data {
struct pcm_data *next;
};
-struct pcm_data *pcm_list = NULL;
+struct pcm_data *pcm_list;
-int num_missing = 0;
-struct pcm_data *pcm_missing = NULL;
+int num_missing;
+struct pcm_data *pcm_missing;
snd_config_t *default_pcm_config;
diff --git a/tools/testing/selftests/alsa/test-pcmtest-driver.c b/tools/testing/selftests/alsa/test-pcmtest-driver.c
index ca81afa4ee90..95065ef3b441 100644
--- a/tools/testing/selftests/alsa/test-pcmtest-driver.c
+++ b/tools/testing/selftests/alsa/test-pcmtest-driver.c
@@ -7,7 +7,7 @@
*/
#include <string.h>
#include <alsa/asoundlib.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define CH_NUM 4
diff --git a/tools/testing/selftests/alsa/utimer-test.c b/tools/testing/selftests/alsa/utimer-test.c
index 32ee3ce57721..c45cb226bd8f 100644
--- a/tools/testing/selftests/alsa/utimer-test.c
+++ b/tools/testing/selftests/alsa/utimer-test.c
@@ -6,7 +6,7 @@
*
* Author: Ivan Orlov <ivan.orlov0322@gmail.com>
*/
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <sound/asound.h>
#include <unistd.h>
#include <fcntl.h>
@@ -135,6 +135,7 @@ TEST_F(timer_f, utimer) {
pthread_join(ticking_thread, NULL);
ASSERT_EQ(total_ticks, TICKS_COUNT);
pclose(rfp);
+ free(buf);
}
TEST(wrong_timers_test) {
diff --git a/tools/testing/selftests/arm64/abi/Makefile b/tools/testing/selftests/arm64/abi/Makefile
index a6d30c620908..483488f8c2ad 100644
--- a/tools/testing/selftests/arm64/abi/Makefile
+++ b/tools/testing/selftests/arm64/abi/Makefile
@@ -12,4 +12,4 @@ $(OUTPUT)/syscall-abi: syscall-abi.c syscall-abi-asm.S
$(OUTPUT)/tpidr2: tpidr2.c
$(CC) -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
-static -include ../../../../include/nolibc/nolibc.h \
- -ffreestanding -Wall $^ -o $@ -lgcc
+ -I../.. -ffreestanding -Wall $^ -o $@ -lgcc
diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
index 35f521e5f41c..c41640f18e4e 100644
--- a/tools/testing/selftests/arm64/abi/hwcap.c
+++ b/tools/testing/selftests/arm64/abi/hwcap.c
@@ -17,10 +17,16 @@
#include <asm/sigcontext.h>
#include <asm/unistd.h>
-#include "../../kselftest.h"
+#include <linux/auxvec.h>
+
+#include "kselftest.h"
#define TESTS_PER_HWCAP 3
+#ifndef AT_HWCAP3
+#define AT_HWCAP3 29
+#endif
+
/*
* Function expected to generate exception when the feature is not
* supported and return when it is supported. If the specific exception
@@ -51,7 +57,6 @@ static void cmpbr_sigill(void)
/* Not implemented, too complicated and unreliable anyway */
}
-
static void crc32_sigill(void)
{
/* CRC32W W0, W0, W1 */
@@ -165,6 +170,18 @@ static void lse128_sigill(void)
: "cc", "memory");
}
+static void lsfe_sigill(void)
+{
+ float __attribute__ ((aligned (16))) mem;
+ register float *memp asm ("x0") = &mem;
+
+ /* STFADD H0, [X0] */
+ asm volatile(".inst 0x7c20801f"
+ : "+r" (memp)
+ :
+ : "memory");
+}
+
static void lut_sigill(void)
{
/* LUTI2 V0.16B, { V0.16B }, V[0] */
@@ -759,6 +776,13 @@ static const struct hwcap_data {
.sigill_fn = lse128_sigill,
},
{
+ .name = "LSFE",
+ .at_hwcap = AT_HWCAP3,
+ .hwcap_bit = HWCAP3_LSFE,
+ .cpuinfo = "lsfe",
+ .sigill_fn = lsfe_sigill,
+ },
+ {
.name = "LUT",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_LUT,
@@ -1098,6 +1122,18 @@ static const struct hwcap_data {
.sigill_fn = hbc_sigill,
.sigill_reliable = true,
},
+ {
+ .name = "MTE_FAR",
+ .at_hwcap = AT_HWCAP3,
+ .hwcap_bit = HWCAP3_MTE_FAR,
+ .cpuinfo = "mtefar",
+ },
+ {
+ .name = "MTE_STOREONLY",
+ .at_hwcap = AT_HWCAP3,
+ .hwcap_bit = HWCAP3_MTE_STORE_ONLY,
+ .cpuinfo = "mtestoreonly",
+ },
};
typedef void (*sighandler_fn)(int, siginfo_t *, void *);
diff --git a/tools/testing/selftests/arm64/abi/ptrace.c b/tools/testing/selftests/arm64/abi/ptrace.c
index b51d21f78cf9..0e46ac21c81d 100644
--- a/tools/testing/selftests/arm64/abi/ptrace.c
+++ b/tools/testing/selftests/arm64/abi/ptrace.c
@@ -18,7 +18,7 @@
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#define EXPECTED_TESTS 11
diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c
index 5ec9a18ec802..b67e3e26fa6d 100644
--- a/tools/testing/selftests/arm64/abi/syscall-abi.c
+++ b/tools/testing/selftests/arm64/abi/syscall-abi.c
@@ -16,7 +16,7 @@
#include <asm/sigcontext.h>
#include <asm/unistd.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#include "syscall-abi.h"
diff --git a/tools/testing/selftests/arm64/abi/tpidr2.c b/tools/testing/selftests/arm64/abi/tpidr2.c
index eb19dcc37a75..1703543fb7c7 100644
--- a/tools/testing/selftests/arm64/abi/tpidr2.c
+++ b/tools/testing/selftests/arm64/abi/tpidr2.c
@@ -3,31 +3,12 @@
#include <linux/sched.h>
#include <linux/wait.h>
+#include "kselftest.h"
+
#define SYS_TPIDR2 "S3_3_C13_C0_5"
#define EXPECTED_TESTS 5
-static void putstr(const char *str)
-{
- write(1, str, strlen(str));
-}
-
-static void putnum(unsigned int num)
-{
- char c;
-
- if (num / 10)
- putnum(num / 10);
-
- c = '0' + (num % 10);
- write(1, &c, 1);
-}
-
-static int tests_run;
-static int tests_passed;
-static int tests_failed;
-static int tests_skipped;
-
static void set_tpidr2(uint64_t val)
{
asm volatile (
@@ -50,20 +31,6 @@ static uint64_t get_tpidr2(void)
return val;
}
-static void print_summary(void)
-{
- if (tests_passed + tests_failed + tests_skipped != EXPECTED_TESTS)
- putstr("# UNEXPECTED TEST COUNT: ");
-
- putstr("# Totals: pass:");
- putnum(tests_passed);
- putstr(" fail:");
- putnum(tests_failed);
- putstr(" xfail:0 xpass:0 skip:");
- putnum(tests_skipped);
- putstr(" error:0\n");
-}
-
/* Processes should start with TPIDR2 == 0 */
static int default_value(void)
{
@@ -105,9 +72,8 @@ static int write_fork_read(void)
if (newpid == 0) {
/* In child */
if (get_tpidr2() != oldpid) {
- putstr("# TPIDR2 changed in child: ");
- putnum(get_tpidr2());
- putstr("\n");
+ ksft_print_msg("TPIDR2 changed in child: %llx\n",
+ get_tpidr2());
exit(0);
}
@@ -115,14 +81,12 @@ static int write_fork_read(void)
if (get_tpidr2() == getpid()) {
exit(1);
} else {
- putstr("# Failed to set TPIDR2 in child\n");
+ ksft_print_msg("Failed to set TPIDR2 in child\n");
exit(0);
}
}
if (newpid < 0) {
- putstr("# fork() failed: -");
- putnum(-newpid);
- putstr("\n");
+ ksft_print_msg("fork() failed: %d\n", newpid);
return 0;
}
@@ -132,23 +96,22 @@ static int write_fork_read(void)
if (waiting < 0) {
if (errno == EINTR)
continue;
- putstr("# waitpid() failed: ");
- putnum(errno);
- putstr("\n");
+ ksft_print_msg("waitpid() failed: %d\n", errno);
return 0;
}
if (waiting != newpid) {
- putstr("# waitpid() returned wrong PID\n");
+ ksft_print_msg("waitpid() returned wrong PID: %d != %d\n",
+ waiting, newpid);
return 0;
}
if (!WIFEXITED(status)) {
- putstr("# child did not exit\n");
+ ksft_print_msg("child did not exit\n");
return 0;
}
if (getpid() != get_tpidr2()) {
- putstr("# TPIDR2 corrupted in parent\n");
+ ksft_print_msg("TPIDR2 corrupted in parent\n");
return 0;
}
@@ -188,64 +151,58 @@ static int write_clone_read(void)
stack = malloc(__STACK_SIZE);
if (!stack) {
- putstr("# malloc() failed\n");
+ ksft_print_msg("malloc() failed\n");
return 0;
}
ret = sys_clone(CLONE_VM, (unsigned long)stack + __STACK_SIZE,
&parent_tid, 0, &child_tid);
if (ret == -1) {
- putstr("# clone() failed\n");
- putnum(errno);
- putstr("\n");
+ ksft_print_msg("clone() failed: %d\n", errno);
return 0;
}
if (ret == 0) {
/* In child */
if (get_tpidr2() != 0) {
- putstr("# TPIDR2 non-zero in child: ");
- putnum(get_tpidr2());
- putstr("\n");
+ ksft_print_msg("TPIDR2 non-zero in child: %llx\n",
+ get_tpidr2());
exit(0);
}
if (gettid() == 0)
- putstr("# Child TID==0\n");
+ ksft_print_msg("Child TID==0\n");
set_tpidr2(gettid());
if (get_tpidr2() == gettid()) {
exit(1);
} else {
- putstr("# Failed to set TPIDR2 in child\n");
+ ksft_print_msg("Failed to set TPIDR2 in child\n");
exit(0);
}
}
for (;;) {
- waiting = wait4(ret, &status, __WCLONE, NULL);
+ waiting = waitpid(ret, &status, __WCLONE);
if (waiting < 0) {
if (errno == EINTR)
continue;
- putstr("# wait4() failed: ");
- putnum(errno);
- putstr("\n");
+ ksft_print_msg("waitpid() failed: %d\n", errno);
return 0;
}
if (waiting != ret) {
- putstr("# wait4() returned wrong PID ");
- putnum(waiting);
- putstr("\n");
+ ksft_print_msg("waitpid() returned wrong PID %d\n",
+ waiting);
return 0;
}
if (!WIFEXITED(status)) {
- putstr("# child did not exit\n");
+ ksft_print_msg("child did not exit\n");
return 0;
}
if (parent != get_tpidr2()) {
- putstr("# TPIDR2 corrupted in parent\n");
+ ksft_print_msg("TPIDR2 corrupted in parent\n");
return 0;
}
@@ -253,35 +210,14 @@ static int write_clone_read(void)
}
}
-#define run_test(name) \
- if (name()) { \
- tests_passed++; \
- } else { \
- tests_failed++; \
- putstr("not "); \
- } \
- putstr("ok "); \
- putnum(++tests_run); \
- putstr(" " #name "\n");
-
-#define skip_test(name) \
- tests_skipped++; \
- putstr("ok "); \
- putnum(++tests_run); \
- putstr(" # SKIP " #name "\n");
-
int main(int argc, char **argv)
{
int ret;
- putstr("TAP version 13\n");
- putstr("1..");
- putnum(EXPECTED_TESTS);
- putstr("\n");
+ ksft_print_header();
+ ksft_set_plan(5);
- putstr("# PID: ");
- putnum(getpid());
- putstr("\n");
+ ksft_print_msg("PID: %d\n", getpid());
/*
* This test is run with nolibc which doesn't support hwcap and
@@ -290,23 +226,21 @@ int main(int argc, char **argv)
*/
ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
if (ret >= 0) {
- run_test(default_value);
- run_test(write_read);
- run_test(write_sleep_read);
- run_test(write_fork_read);
- run_test(write_clone_read);
+ ksft_test_result(default_value(), "default_value\n");
+ ksft_test_result(write_read(), "write_read\n");
+ ksft_test_result(write_sleep_read(), "write_sleep_read\n");
+ ksft_test_result(write_fork_read(), "write_fork_read\n");
+ ksft_test_result(write_clone_read(), "write_clone_read\n");
} else {
- putstr("# SME support not present\n");
+ ksft_print_msg("SME support not present\n");
- skip_test(default_value);
- skip_test(write_read);
- skip_test(write_sleep_read);
- skip_test(write_fork_read);
- skip_test(write_clone_read);
+ ksft_test_result_skip("default_value\n");
+ ksft_test_result_skip("write_read\n");
+ ksft_test_result_skip("write_sleep_read\n");
+ ksft_test_result_skip("write_fork_read\n");
+ ksft_test_result_skip("write_clone_read\n");
}
- print_summary();
-
- return 0;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/arm64/bti/assembler.h b/tools/testing/selftests/arm64/bti/assembler.h
index 04e7b72880ef..141cdcbf0b8f 100644
--- a/tools/testing/selftests/arm64/bti/assembler.h
+++ b/tools/testing/selftests/arm64/bti/assembler.h
@@ -14,7 +14,6 @@
#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
-
.macro startfn name:req
.globl \name
\name:
diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.c b/tools/testing/selftests/arm64/fp/fp-ptrace.c
index 191c47ca0ed8..22c584b78be5 100644
--- a/tools/testing/selftests/arm64/fp/fp-ptrace.c
+++ b/tools/testing/selftests/arm64/fp/fp-ptrace.c
@@ -27,7 +27,7 @@
#include <asm/sve_context.h>
#include <asm/ptrace.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#include "fp-ptrace.h"
@@ -1061,11 +1061,31 @@ static bool sve_write_supported(struct test_config *config)
if (config->sme_vl_in != config->sme_vl_expected) {
return false;
}
+
+ if (!sve_supported())
+ return false;
}
return true;
}
+static bool sve_write_fpsimd_supported(struct test_config *config)
+{
+ if (!sve_supported() && !sme_supported())
+ return false;
+
+ if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA))
+ return false;
+
+ if (config->svcr_expected & SVCR_SM)
+ return false;
+
+ if (config->sme_vl_in != config->sme_vl_expected)
+ return false;
+
+ return true;
+}
+
static void fpsimd_write_expected(struct test_config *config)
{
int vl;
@@ -1134,6 +1154,9 @@ static void sve_write_expected(struct test_config *config)
int vl = vl_expected(config);
int sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
+ if (!vl)
+ return;
+
fill_random(z_expected, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl)));
fill_random(p_expected, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl)));
@@ -1152,7 +1175,7 @@ static void sve_write_expected(struct test_config *config)
}
}
-static void sve_write(pid_t child, struct test_config *config)
+static void sve_write_sve(pid_t child, struct test_config *config)
{
struct user_sve_header *sve;
struct iovec iov;
@@ -1161,7 +1184,10 @@ static void sve_write(pid_t child, struct test_config *config)
vl = vl_expected(config);
vq = __sve_vq_from_vl(vl);
- iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
+ if (!vl)
+ return;
+
+ iov.iov_len = SVE_PT_SIZE(vq, SVE_PT_REGS_SVE);
iov.iov_base = malloc(iov.iov_len);
if (!iov.iov_base) {
ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
@@ -1195,6 +1221,41 @@ static void sve_write(pid_t child, struct test_config *config)
free(iov.iov_base);
}
+static void sve_write_fpsimd(pid_t child, struct test_config *config)
+{
+ struct user_sve_header *sve;
+ struct user_fpsimd_state *fpsimd;
+ struct iovec iov;
+ int ret, vl, vq;
+
+ vl = vl_expected(config);
+ vq = __sve_vq_from_vl(vl);
+
+ iov.iov_len = SVE_PT_SIZE(vq, SVE_PT_REGS_FPSIMD);
+ iov.iov_base = malloc(iov.iov_len);
+ if (!iov.iov_base) {
+ ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
+ iov.iov_len);
+ return;
+ }
+ memset(iov.iov_base, 0, iov.iov_len);
+
+ sve = iov.iov_base;
+ sve->size = iov.iov_len;
+ sve->flags = SVE_PT_REGS_FPSIMD;
+ sve->vl = vl;
+
+ fpsimd = iov.iov_base + SVE_PT_REGS_OFFSET;
+ memcpy(&fpsimd->vregs, v_expected, sizeof(v_expected));
+
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_SVE, &iov);
+ if (ret != 0)
+ ksft_print_msg("Failed to write SVE: %s (%d)\n",
+ strerror(errno), errno);
+
+ free(iov.iov_base);
+}
+
static bool za_write_supported(struct test_config *config)
{
if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM))
@@ -1386,7 +1447,13 @@ static struct test_definition sve_test_defs[] = {
.name = "SVE write",
.supported = sve_write_supported,
.set_expected_values = sve_write_expected,
- .modify_values = sve_write,
+ .modify_values = sve_write_sve,
+ },
+ {
+ .name = "SVE write FPSIMD format",
+ .supported = sve_write_fpsimd_supported,
+ .set_expected_values = fpsimd_write_expected,
+ .modify_values = sve_write_fpsimd,
},
};
@@ -1498,7 +1565,6 @@ static void run_sve_tests(void)
&test_config);
}
}
-
}
static void run_sme_tests(void)
@@ -1607,7 +1673,7 @@ int main(void)
* Run the test set if there is no SVE or SME, with those we
* have to pick a VL for each run.
*/
- if (!sve_supported()) {
+ if (!sve_supported() && !sme_supported()) {
test_config.sve_vl_in = 0;
test_config.sve_vl_expected = 0;
test_config.sme_vl_in = 0;
diff --git a/tools/testing/selftests/arm64/fp/fp-stress.c b/tools/testing/selftests/arm64/fp/fp-stress.c
index 74e23208b94c..65e01aba96ff 100644
--- a/tools/testing/selftests/arm64/fp/fp-stress.c
+++ b/tools/testing/selftests/arm64/fp/fp-stress.c
@@ -24,7 +24,7 @@
#include <sys/wait.h>
#include <asm/hwcap.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#define MAX_VLS 16
@@ -105,8 +105,8 @@ static void child_start(struct child_data *child, const char *program)
/*
* Read from the startup pipe, there should be no data
- * and we should block until it is closed. We just
- * carry on on error since this isn't super critical.
+ * and we should block until it is closed. We just
+ * carry-on on error since this isn't super critical.
*/
ret = read(3, &i, sizeof(i));
if (ret < 0)
@@ -549,7 +549,7 @@ int main(int argc, char **argv)
evs = calloc(tests, sizeof(*evs));
if (!evs)
- ksft_exit_fail_msg("Failed to allocated %d epoll events\n",
+ ksft_exit_fail_msg("Failed to allocate %d epoll events\n",
tests);
for (i = 0; i < cpus; i++) {
diff --git a/tools/testing/selftests/arm64/fp/kernel-test.c b/tools/testing/selftests/arm64/fp/kernel-test.c
index e3cec3723ffa..0c40007d1282 100644
--- a/tools/testing/selftests/arm64/fp/kernel-test.c
+++ b/tools/testing/selftests/arm64/fp/kernel-test.c
@@ -188,13 +188,13 @@ static bool create_socket(void)
ref = malloc(digest_len);
if (!ref) {
- printf("Failed to allocated %d byte reference\n", digest_len);
+ printf("Failed to allocate %d byte reference\n", digest_len);
return false;
}
digest = malloc(digest_len);
if (!digest) {
- printf("Failed to allocated %d byte digest\n", digest_len);
+ printf("Failed to allocate %d byte digest\n", digest_len);
return false;
}
diff --git a/tools/testing/selftests/arm64/fp/sve-probe-vls.c b/tools/testing/selftests/arm64/fp/sve-probe-vls.c
index a24eca7a4ecb..df0c1b6eb114 100644
--- a/tools/testing/selftests/arm64/fp/sve-probe-vls.c
+++ b/tools/testing/selftests/arm64/fp/sve-probe-vls.c
@@ -12,7 +12,7 @@
#include <sys/prctl.h>
#include <asm/sigcontext.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#include "rdvl.h"
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c
index 577b6e05e860..28f6b996c5e2 100644
--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c
+++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c
@@ -19,7 +19,7 @@
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
#ifndef NT_ARM_SVE
@@ -66,7 +66,7 @@ static const struct vec_type vec_types[] = {
};
#define VL_TESTS (((TEST_VQ_MAX - SVE_VQ_MIN) + 1) * 4)
-#define FLAG_TESTS 2
+#define FLAG_TESTS 4
#define FPSIMD_TESTS 2
#define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types))
@@ -95,19 +95,27 @@ static int do_child(void)
static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
{
struct iovec iov;
+ int ret;
iov.iov_base = fpsimd;
iov.iov_len = sizeof(*fpsimd);
- return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov);
+ ret = ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov);
+ if (ret == -1)
+ ksft_perror("ptrace(PTRACE_GETREGSET)");
+ return ret;
}
static int set_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd)
{
struct iovec iov;
+ int ret;
iov.iov_base = fpsimd;
iov.iov_len = sizeof(*fpsimd);
- return ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov);
+ ret = ptrace(PTRACE_SETREGSET, pid, NT_PRFPREG, &iov);
+ if (ret == -1)
+ ksft_perror("ptrace(PTRACE_SETREGSET)");
+ return ret;
}
static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type,
@@ -115,8 +123,9 @@ static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type,
{
struct user_sve_header *sve;
void *p;
- size_t sz = sizeof *sve;
+ size_t sz = sizeof(*sve);
struct iovec iov;
+ int ret;
while (1) {
if (*size < sz) {
@@ -132,8 +141,11 @@ static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type,
iov.iov_base = *buf;
iov.iov_len = sz;
- if (ptrace(PTRACE_GETREGSET, pid, type->regset, &iov))
+ ret = ptrace(PTRACE_GETREGSET, pid, type->regset, &iov);
+ if (ret) {
+ ksft_perror("ptrace(PTRACE_GETREGSET)");
goto error;
+ }
sve = *buf;
if (sve->size <= sz)
@@ -152,10 +164,46 @@ static int set_sve(pid_t pid, const struct vec_type *type,
const struct user_sve_header *sve)
{
struct iovec iov;
+ int ret;
iov.iov_base = (void *)sve;
iov.iov_len = sve->size;
- return ptrace(PTRACE_SETREGSET, pid, type->regset, &iov);
+ ret = ptrace(PTRACE_SETREGSET, pid, type->regset, &iov);
+ if (ret == -1)
+ ksft_perror("ptrace(PTRACE_SETREGSET)");
+ return ret;
+}
+
+/* A read operation fails */
+static void read_fails(pid_t child, const struct vec_type *type)
+{
+ struct user_sve_header *new_sve = NULL;
+ size_t new_sve_size = 0;
+ void *ret;
+
+ ret = get_sve(child, type, (void **)&new_sve, &new_sve_size);
+
+ ksft_test_result(ret == NULL, "%s unsupported read fails\n",
+ type->name);
+
+ free(new_sve);
+}
+
+/* A write operation fails */
+static void write_fails(pid_t child, const struct vec_type *type)
+{
+ struct user_sve_header sve;
+ int ret;
+
+ /* Just the header, no data */
+ memset(&sve, 0, sizeof(sve));
+ sve.size = sizeof(sve);
+ sve.flags = SVE_PT_REGS_SVE;
+ sve.vl = SVE_VL_MIN;
+ ret = set_sve(child, type, &sve);
+
+ ksft_test_result(ret != 0, "%s unsupported write fails\n",
+ type->name);
}
/* Validate setting and getting the inherit flag */
@@ -170,7 +218,7 @@ static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
memset(&sve, 0, sizeof(sve));
sve.size = sizeof(sve);
sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
- sve.flags = SVE_PT_VL_INHERIT;
+ sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE;
ret = set_sve(child, type, &sve);
if (ret != 0) {
ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
@@ -235,6 +283,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
/* Set the VL by doing a set with no register payload */
memset(&sve, 0, sizeof(sve));
sve.size = sizeof(sve);
+ sve.flags = SVE_PT_REGS_SVE;
sve.vl = vl;
ret = set_sve(child, type, &sve);
if (ret != 0) {
@@ -253,7 +302,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
return;
}
- ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n",
+ ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n",
type->name, vl);
free(new_sve);
@@ -269,6 +318,25 @@ static void check_u32(unsigned int vl, const char *reg,
}
}
+/* Set out of range VLs */
+static void ptrace_set_vl_ranges(pid_t child, const struct vec_type *type)
+{
+ struct user_sve_header sve;
+ int ret;
+
+ memset(&sve, 0, sizeof(sve));
+ sve.flags = SVE_PT_REGS_SVE;
+ sve.size = sizeof(sve);
+
+ ret = set_sve(child, type, &sve);
+ ksft_test_result(ret != 0, "%s Set invalid VL 0\n", type->name);
+
+ sve.vl = SVE_VL_MAX + SVE_VQ_BYTES;
+ ret = set_sve(child, type, &sve);
+ ksft_test_result(ret != 0, "%s Set invalid VL %d\n", type->name,
+ SVE_VL_MAX + SVE_VQ_BYTES);
+}
+
/* Access the FPSIMD registers via the SVE regset */
static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type)
{
@@ -301,8 +369,10 @@ static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type)
p[j] = j;
}
+ /* This should only succeed for SVE */
ret = set_sve(child, type, sve);
- ksft_test_result(ret == 0, "%s FPSIMD set via SVE: %d\n",
+ ksft_test_result((type->regset == NT_ARM_SVE) == (ret == 0),
+ "%s FPSIMD set via SVE: %d\n",
type->name, ret);
if (ret)
goto out;
@@ -324,6 +394,58 @@ out:
free(svebuf);
}
+/* Write the FPSIMD registers via the SVE regset when SVE is not supported */
+static void ptrace_sve_fpsimd_no_sve(pid_t child)
+{
+ void *svebuf;
+ struct user_sve_header *sve;
+ struct user_fpsimd_state *fpsimd, new_fpsimd;
+ unsigned int i, j;
+ unsigned char *p;
+ int ret;
+
+ svebuf = malloc(SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
+ if (!svebuf) {
+ ksft_test_result_fail("Failed to allocate FPSIMD buffer\n");
+ return;
+ }
+
+ /* On a system without SVE the VL should be set to 0 */
+ memset(svebuf, 0, SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD));
+ sve = svebuf;
+ sve->flags = SVE_PT_REGS_FPSIMD;
+ sve->size = SVE_PT_SIZE(0, SVE_PT_REGS_FPSIMD);
+ sve->vl = 0;
+
+ /* Try to set a known FPSIMD state via PT_REGS_SVE */
+ fpsimd = (struct user_fpsimd_state *)((char *)sve +
+ SVE_PT_FPSIMD_OFFSET);
+ for (i = 0; i < 32; ++i) {
+ p = (unsigned char *)&fpsimd->vregs[i];
+
+ for (j = 0; j < sizeof(fpsimd->vregs[i]); ++j)
+ p[j] = j;
+ }
+
+ ret = set_sve(child, &vec_types[0], sve);
+ ksft_test_result(ret == 0, "FPSIMD write via SVE\n");
+ if (ret) {
+ ksft_test_result_skip("Verify FPSIMD write via SVE\n");
+ goto out;
+ }
+
+ /* Verify via the FPSIMD regset */
+ if (get_fpsimd(child, &new_fpsimd)) {
+ ksft_test_result_skip("Verify FPSIMD write via SVE\n");
+ goto out;
+ }
+ ksft_test_result(memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0,
+ "Verify FPSIMD write via SVE\n");
+
+out:
+ free(svebuf);
+}
+
/* Validate attempting to set SVE data and read SVE data */
static void ptrace_set_sve_get_sve_data(pid_t child,
const struct vec_type *type,
@@ -680,6 +802,20 @@ static int do_parent(pid_t child)
}
for (i = 0; i < ARRAY_SIZE(vec_types); i++) {
+ /*
+ * If the vector type isn't supported reads and writes
+ * should fail.
+ */
+ if (!(getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap)) {
+ read_fails(child, &vec_types[i]);
+ write_fails(child, &vec_types[i]);
+ } else {
+ ksft_test_result_skip("%s unsupported read fails\n",
+ vec_types[i].name);
+ ksft_test_result_skip("%s unsupported write fails\n",
+ vec_types[i].name);
+ }
+
/* FPSIMD via SVE regset */
if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
ptrace_sve_fpsimd(child, &vec_types[i]);
@@ -700,6 +836,17 @@ static int do_parent(pid_t child)
vec_types[i].name);
}
+ /* Setting out of bounds VLs should fail */
+ if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) {
+ ptrace_set_vl_ranges(child, &vec_types[i]);
+ } else {
+ ksft_test_result_skip("%s Set invalid VL 0\n",
+ vec_types[i].name);
+ ksft_test_result_skip("%s Set invalid VL %d\n",
+ vec_types[i].name,
+ SVE_VL_MAX + SVE_VQ_BYTES);
+ }
+
/* Step through every possible VQ */
for (vq = SVE_VQ_MIN; vq <= TEST_VQ_MAX; vq++) {
vl = sve_vl_from_vq(vq);
@@ -731,6 +878,15 @@ static int do_parent(pid_t child)
}
}
+ /* We support SVE writes of FPSMID format on SME only systems */
+ if (!(getauxval(AT_HWCAP) & HWCAP_SVE) &&
+ (getauxval(AT_HWCAP2) & HWCAP2_SME)) {
+ ptrace_sve_fpsimd_no_sve(child);
+ } else {
+ ksft_test_result_skip("FPSIMD write via SVE\n");
+ ksft_test_result_skip("Verify FPSIMD write via SVE\n");
+ }
+
ret = EXIT_SUCCESS;
error:
@@ -750,9 +906,6 @@ int main(void)
ksft_print_header();
ksft_set_plan(EXPECTED_TESTS);
- if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
- ksft_exit_skip("SVE not available\n");
-
child = fork();
if (!child)
return do_child();
diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c
index ea9c7d47790f..8dd932fdcdc4 100644
--- a/tools/testing/selftests/arm64/fp/vec-syscfg.c
+++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c
@@ -19,7 +19,7 @@
#include <asm/sigcontext.h>
#include <asm/hwcap.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#include "rdvl.h"
#define ARCH_MIN_VL SVE_VL_MIN
@@ -690,7 +690,6 @@ static inline void smstop(void)
asm volatile("msr S0_3_C4_C6_3, xzr");
}
-
/*
* Verify we can change the SVE vector length while SME is active and
* continue to use SME afterwards.
diff --git a/tools/testing/selftests/arm64/fp/za-ptrace.c b/tools/testing/selftests/arm64/fp/za-ptrace.c
index 08c777f87ea2..787eed22d059 100644
--- a/tools/testing/selftests/arm64/fp/za-ptrace.c
+++ b/tools/testing/selftests/arm64/fp/za-ptrace.c
@@ -18,7 +18,7 @@
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
#ifndef NT_ARM_ZA
diff --git a/tools/testing/selftests/arm64/fp/zt-ptrace.c b/tools/testing/selftests/arm64/fp/zt-ptrace.c
index 584b8d59b7ea..f3fa49fd0fbd 100644
--- a/tools/testing/selftests/arm64/fp/zt-ptrace.c
+++ b/tools/testing/selftests/arm64/fp/zt-ptrace.c
@@ -18,7 +18,7 @@
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
/* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */
#ifndef NT_ARM_ZA
@@ -108,7 +108,6 @@ static int get_zt(pid_t pid, char zt[ZT_SIG_REG_BYTES])
return ptrace(PTRACE_GETREGSET, pid, NT_ARM_ZT, &iov);
}
-
static int set_zt(pid_t pid, const char zt[ZT_SIG_REG_BYTES])
{
struct iovec iov;
diff --git a/tools/testing/selftests/arm64/fp/zt-test.S b/tools/testing/selftests/arm64/fp/zt-test.S
index 38080f3c3280..a8df05771670 100644
--- a/tools/testing/selftests/arm64/fp/zt-test.S
+++ b/tools/testing/selftests/arm64/fp/zt-test.S
@@ -276,7 +276,7 @@ function barf
bl putdec
puts ", iteration="
mov x0, x22
- bl putdec
+ bl putdecn
puts "\tExpected ["
mov x0, x10
mov x1, x12
diff --git a/tools/testing/selftests/arm64/gcs/Makefile b/tools/testing/selftests/arm64/gcs/Makefile
index d2f3497a9103..1fbbf0ca1f02 100644
--- a/tools/testing/selftests/arm64/gcs/Makefile
+++ b/tools/testing/selftests/arm64/gcs/Makefile
@@ -14,11 +14,11 @@ LDLIBS+=-lpthread
include ../../lib.mk
$(OUTPUT)/basic-gcs: basic-gcs.c
- $(CC) -g -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
- -static -include ../../../../include/nolibc/nolibc.h \
+ $(CC) $(CFLAGS) -fno-asynchronous-unwind-tables -fno-ident -s -nostdlib -nostdinc \
+ -static -I../../../../include/nolibc -include ../../../../include/nolibc/nolibc.h \
-I../../../../../usr/include \
-std=gnu99 -I../.. -g \
- -ffreestanding -Wall $^ -o $@ -lgcc
+ -ffreestanding $^ -o $@ -lgcc
$(OUTPUT)/gcs-stress-thread: gcs-stress-thread.S
$(CC) -nostdlib $^ -o $@
diff --git a/tools/testing/selftests/arm64/gcs/basic-gcs.c b/tools/testing/selftests/arm64/gcs/basic-gcs.c
index 3fb9742342a3..250977abc398 100644
--- a/tools/testing/selftests/arm64/gcs/basic-gcs.c
+++ b/tools/testing/selftests/arm64/gcs/basic-gcs.c
@@ -10,6 +10,7 @@
#include <sys/mman.h>
#include <asm/mman.h>
+#include <asm/hwcap.h>
#include <linux/sched.h>
#include "kselftest.h"
@@ -298,6 +299,68 @@ out:
return pass;
}
+/* A vfork()ed process can run and exit */
+static bool test_vfork(void)
+{
+ unsigned long child_mode;
+ int ret, status;
+ pid_t pid;
+ bool pass = true;
+
+ pid = vfork();
+ if (pid == -1) {
+ ksft_print_msg("vfork() failed: %d\n", errno);
+ pass = false;
+ goto out;
+ }
+ if (pid == 0) {
+ /*
+ * In child, make sure we can call a function, read
+ * the GCS pointer and status and then exit.
+ */
+ valid_gcs_function();
+ get_gcspr();
+
+ ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS,
+ &child_mode, 0, 0, 0);
+ if (ret == 0 && !(child_mode & PR_SHADOW_STACK_ENABLE)) {
+ ksft_print_msg("GCS not enabled in child\n");
+ ret = EXIT_FAILURE;
+ }
+
+ _exit(ret);
+ }
+
+ /*
+ * In parent, check we can still do function calls then check
+ * on the child.
+ */
+ valid_gcs_function();
+
+ ksft_print_msg("Waiting for child %d\n", pid);
+
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ ksft_print_msg("Failed to wait for child: %d\n",
+ errno);
+ return false;
+ }
+
+ if (!WIFEXITED(status)) {
+ ksft_print_msg("Child exited due to signal %d\n",
+ WTERMSIG(status));
+ pass = false;
+ } else if (WEXITSTATUS(status)) {
+ ksft_print_msg("Child exited with status %d\n",
+ WEXITSTATUS(status));
+ pass = false;
+ }
+
+out:
+
+ return pass;
+}
+
typedef bool (*gcs_test)(void);
static struct {
@@ -314,6 +377,7 @@ static struct {
{ "enable_invalid", enable_invalid, true },
{ "map_guarded_stack", map_guarded_stack },
{ "fork", test_fork },
+ { "vfork", test_vfork },
};
int main(void)
@@ -323,14 +387,13 @@ int main(void)
ksft_print_header();
- /*
- * We don't have getauxval() with nolibc so treat a failure to
- * read GCS state as a lack of support and skip.
- */
+ if (!(getauxval(AT_HWCAP) & HWCAP_GCS))
+ ksft_exit_skip("SKIP GCS not supported\n");
+
ret = my_syscall5(__NR_prctl, PR_GET_SHADOW_STACK_STATUS,
&gcs_mode, 0, 0, 0);
if (ret != 0)
- ksft_exit_skip("Failed to read GCS state: %d\n", ret);
+ ksft_exit_fail_msg("Failed to read GCS state: %d\n", ret);
if (!(gcs_mode & PR_SHADOW_STACK_ENABLE)) {
gcs_mode = PR_SHADOW_STACK_ENABLE;
@@ -347,7 +410,7 @@ int main(void)
}
/* One last test: disable GCS, we can do this one time */
- my_syscall5(__NR_prctl, PR_SET_SHADOW_STACK_STATUS, 0, 0, 0, 0);
+ ret = my_syscall5(__NR_prctl, PR_SET_SHADOW_STACK_STATUS, 0, 0, 0, 0);
if (ret != 0)
ksft_print_msg("Failed to disable GCS: %d\n", ret);
diff --git a/tools/testing/selftests/arm64/gcs/gcs-locking.c b/tools/testing/selftests/arm64/gcs/gcs-locking.c
index 989f75a491b7..1e6abb136ffd 100644
--- a/tools/testing/selftests/arm64/gcs/gcs-locking.c
+++ b/tools/testing/selftests/arm64/gcs/gcs-locking.c
@@ -165,7 +165,6 @@ TEST_F(valid_modes, lock_enable_disable_others)
ASSERT_EQ(ret, 0);
ASSERT_EQ(mode, PR_SHADOW_STACK_ALL_MODES);
-
ret = my_syscall2(__NR_prctl, PR_SET_SHADOW_STACK_STATUS,
variant->mode);
ASSERT_EQ(ret, 0);
diff --git a/tools/testing/selftests/arm64/gcs/gcs-stress.c b/tools/testing/selftests/arm64/gcs/gcs-stress.c
index bbc7f4950c13..86d8cd42aee7 100644
--- a/tools/testing/selftests/arm64/gcs/gcs-stress.c
+++ b/tools/testing/selftests/arm64/gcs/gcs-stress.c
@@ -24,7 +24,7 @@
#include <sys/wait.h>
#include <asm/hwcap.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
struct child_data {
char *name, *output;
@@ -433,7 +433,7 @@ int main(int argc, char **argv)
evs = calloc(tests, sizeof(*evs));
if (!evs)
- ksft_exit_fail_msg("Failed to allocated %d epoll events\n",
+ ksft_exit_fail_msg("Failed to allocate %d epoll events\n",
tests);
for (i = 0; i < gcs_threads; i++)
diff --git a/tools/testing/selftests/arm64/mte/check_buffer_fill.c b/tools/testing/selftests/arm64/mte/check_buffer_fill.c
index 2ee7f114d7fa..ff4e07503349 100644
--- a/tools/testing/selftests/arm64/mte/check_buffer_fill.c
+++ b/tools/testing/selftests/arm64/mte/check_buffer_fill.c
@@ -31,7 +31,7 @@ static int check_buffer_by_byte(int mem_type, int mode)
int i, j, item;
bool err;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
@@ -68,7 +68,7 @@ static int check_buffer_underflow_by_byte(int mem_type, int mode,
bool err;
char *und_ptr = NULL;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
@@ -164,7 +164,7 @@ static int check_buffer_overflow_by_byte(int mem_type, int mode,
size_t tagged_size, overflow_size;
char *over_ptr = NULL;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
item = ARRAY_SIZE(sizes);
for (i = 0; i < item; i++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
@@ -337,7 +337,7 @@ static int check_buffer_by_block(int mem_type, int mode)
{
int i, item, result = KSFT_PASS;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
item = ARRAY_SIZE(sizes);
cur_mte_cxt.fault_valid = false;
for (i = 0; i < item; i++) {
@@ -368,7 +368,7 @@ static int check_memory_initial_tags(int mem_type, int mode, int mapping)
int run, fd;
int total = ARRAY_SIZE(sizes);
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
for (run = 0; run < total; run++) {
/* check initial tags for anonymous mmap */
ptr = (char *)mte_allocate_memory(sizes[run], mem_type, mapping, false);
@@ -415,7 +415,7 @@ int main(int argc, char *argv[])
return err;
/* Register SIGSEGV handler */
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(20);
diff --git a/tools/testing/selftests/arm64/mte/check_child_memory.c b/tools/testing/selftests/arm64/mte/check_child_memory.c
index 7597fc632cad..5e97ee792e4d 100644
--- a/tools/testing/selftests/arm64/mte/check_child_memory.c
+++ b/tools/testing/selftests/arm64/mte/check_child_memory.c
@@ -88,7 +88,7 @@ static int check_child_memory_mapping(int mem_type, int mode, int mapping)
int item = ARRAY_SIZE(sizes);
item = ARRAY_SIZE(sizes);
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
for (run = 0; run < item; run++) {
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
UNDERFLOW, OVERFLOW);
@@ -109,7 +109,7 @@ static int check_child_file_mapping(int mem_type, int mode, int mapping)
int run, fd, map_size, result = KSFT_PASS;
int total = ARRAY_SIZE(sizes);
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
for (run = 0; run < total; run++) {
fd = create_temp_file();
if (fd == -1)
@@ -160,8 +160,8 @@ int main(int argc, char *argv[])
return err;
/* Register SIGSEGV handler */
- mte_register_signal(SIGSEGV, mte_default_handler);
- mte_register_signal(SIGBUS, mte_default_handler);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
+ mte_register_signal(SIGBUS, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(12);
diff --git a/tools/testing/selftests/arm64/mte/check_hugetlb_options.c b/tools/testing/selftests/arm64/mte/check_hugetlb_options.c
index 3bfcd3848432..aad1234c7e0f 100644
--- a/tools/testing/selftests/arm64/mte/check_hugetlb_options.c
+++ b/tools/testing/selftests/arm64/mte/check_hugetlb_options.c
@@ -151,7 +151,7 @@ static int check_hugetlb_memory_mapping(int mem_type, int mode, int mapping, int
map_size = default_huge_page_size();
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false);
if (check_allocated_memory(map_ptr, map_size, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
@@ -180,7 +180,7 @@ static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
unsigned long map_size;
prot_flag = PROT_READ | PROT_WRITE;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
map_size = default_huge_page_size();
map_ptr = (char *)mte_allocate_memory_tag_range(map_size, mem_type, mapping,
0, 0);
@@ -210,7 +210,7 @@ static int check_child_hugetlb_memory_mapping(int mem_type, int mode, int mappin
map_size = default_huge_page_size();
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
ptr = (char *)mte_allocate_memory_tag_range(map_size, mem_type, mapping,
0, 0);
if (check_allocated_memory_range(ptr, map_size, mem_type,
@@ -235,8 +235,8 @@ int main(int argc, char *argv[])
return err;
/* Register signal handlers */
- mte_register_signal(SIGBUS, mte_default_handler);
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGBUS, mte_default_handler, false);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
allocate_hugetlb();
diff --git a/tools/testing/selftests/arm64/mte/check_ksm_options.c b/tools/testing/selftests/arm64/mte/check_ksm_options.c
index 88c74bc46d4f..0cf5faef1724 100644
--- a/tools/testing/selftests/arm64/mte/check_ksm_options.c
+++ b/tools/testing/selftests/arm64/mte/check_ksm_options.c
@@ -106,7 +106,7 @@ static int check_madvise_options(int mem_type, int mode, int mapping)
return err;
}
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
ptr = mte_allocate_memory(TEST_UNIT * page_sz, mem_type, mapping, true);
if (check_allocated_memory(ptr, TEST_UNIT * page_sz, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
@@ -141,8 +141,8 @@ int main(int argc, char *argv[])
return KSFT_FAIL;
}
/* Register signal handlers */
- mte_register_signal(SIGBUS, mte_default_handler);
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGBUS, mte_default_handler, false);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(4);
diff --git a/tools/testing/selftests/arm64/mte/check_mmap_options.c b/tools/testing/selftests/arm64/mte/check_mmap_options.c
index 17694caaff53..c100af3012cb 100644
--- a/tools/testing/selftests/arm64/mte/check_mmap_options.c
+++ b/tools/testing/selftests/arm64/mte/check_mmap_options.c
@@ -3,6 +3,7 @@
#define _GNU_SOURCE
+#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
@@ -23,6 +24,35 @@
#define OVERFLOW MT_GRANULE_SIZE
#define TAG_CHECK_ON 0
#define TAG_CHECK_OFF 1
+#define ATAG_CHECK_ON 1
+#define ATAG_CHECK_OFF 0
+
+#define TEST_NAME_MAX 256
+
+enum mte_mem_check_type {
+ CHECK_ANON_MEM = 0,
+ CHECK_FILE_MEM = 1,
+ CHECK_CLEAR_PROT_MTE = 2,
+};
+
+enum mte_tag_op_type {
+ TAG_OP_ALL = 0,
+ TAG_OP_STONLY = 1,
+};
+
+struct check_mmap_testcase {
+ int check_type;
+ int mem_type;
+ int mte_sync;
+ int mapping;
+ int tag_check;
+ int atag_check;
+ int tag_op;
+ bool enable_tco;
+};
+
+#define TAG_OP_ALL 0
+#define TAG_OP_STONLY 1
static size_t page_size;
static int sizes[] = {
@@ -30,8 +60,17 @@ static int sizes[] = {
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
};
-static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
+static int check_mte_memory(char *ptr, int size, int mode,
+ int tag_check,int atag_check, int tag_op)
{
+ char buf[MT_GRANULE_SIZE];
+
+ if (!mtefar_support && atag_check == ATAG_CHECK_ON)
+ return KSFT_SKIP;
+
+ if (atag_check == ATAG_CHECK_ON)
+ ptr = mte_insert_atag(ptr);
+
mte_initialize_current_context(mode, (uintptr_t)ptr, size);
memset(ptr, '1', size);
mte_wait_after_trig();
@@ -54,16 +93,34 @@ static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
if (cur_mte_cxt.fault_valid == true && tag_check == TAG_CHECK_OFF)
return KSFT_FAIL;
+ if (tag_op == TAG_OP_STONLY) {
+ mte_initialize_current_context(mode, (uintptr_t)ptr, -UNDERFLOW);
+ memcpy(buf, ptr - UNDERFLOW, MT_GRANULE_SIZE);
+ mte_wait_after_trig();
+ if (cur_mte_cxt.fault_valid == true)
+ return KSFT_FAIL;
+
+ mte_initialize_current_context(mode, (uintptr_t)ptr, size + OVERFLOW);
+ memcpy(buf, ptr + size, MT_GRANULE_SIZE);
+ mte_wait_after_trig();
+ if (cur_mte_cxt.fault_valid == true)
+ return KSFT_FAIL;
+ }
+
return KSFT_PASS;
}
-static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
+static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping,
+ int tag_check, int atag_check, int tag_op)
{
char *ptr, *map_ptr;
int run, result, map_size;
int item = ARRAY_SIZE(sizes);
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ if (tag_op == TAG_OP_STONLY && !mtestonly_support)
+ return KSFT_SKIP;
+
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, tag_op);
for (run = 0; run < item; run++) {
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false);
@@ -79,23 +136,27 @@ static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, i
munmap((void *)map_ptr, map_size);
return KSFT_FAIL;
}
- result = check_mte_memory(ptr, sizes[run], mode, tag_check);
+ result = check_mte_memory(ptr, sizes[run], mode, tag_check, atag_check, tag_op);
mte_clear_tags((void *)ptr, sizes[run]);
mte_free_memory((void *)map_ptr, map_size, mem_type, false);
- if (result == KSFT_FAIL)
- return KSFT_FAIL;
+ if (result != KSFT_PASS)
+ return result;
}
return KSFT_PASS;
}
-static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
+static int check_file_memory_mapping(int mem_type, int mode, int mapping,
+ int tag_check, int atag_check, int tag_op)
{
char *ptr, *map_ptr;
int run, fd, map_size;
int total = ARRAY_SIZE(sizes);
int result = KSFT_PASS;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ if (tag_op == TAG_OP_STONLY && !mtestonly_support)
+ return KSFT_SKIP;
+
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, tag_op);
for (run = 0; run < total; run++) {
fd = create_temp_file();
if (fd == -1)
@@ -117,24 +178,24 @@ static int check_file_memory_mapping(int mem_type, int mode, int mapping, int ta
close(fd);
return KSFT_FAIL;
}
- result = check_mte_memory(ptr, sizes[run], mode, tag_check);
+ result = check_mte_memory(ptr, sizes[run], mode, tag_check, atag_check, tag_op);
mte_clear_tags((void *)ptr, sizes[run]);
munmap((void *)map_ptr, map_size);
close(fd);
- if (result == KSFT_FAIL)
- break;
+ if (result != KSFT_PASS)
+ return result;
}
- return result;
+ return KSFT_PASS;
}
-static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
+static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check)
{
char *ptr, *map_ptr;
int run, prot_flag, result, fd, map_size;
int total = ARRAY_SIZE(sizes);
prot_flag = PROT_READ | PROT_WRITE;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
for (run = 0; run < total; run++) {
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
@@ -150,10 +211,10 @@ static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
ksft_print_msg("FAIL: mprotect not ignoring clear PROT_MTE property\n");
return KSFT_FAIL;
}
- result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
+ result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON, atag_check, TAG_OP_ALL);
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
if (result != KSFT_PASS)
- return KSFT_FAIL;
+ return result;
fd = create_temp_file();
if (fd == -1)
@@ -174,19 +235,715 @@ static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
close(fd);
return KSFT_FAIL;
}
- result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
+ result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON, atag_check, TAG_OP_ALL);
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
close(fd);
if (result != KSFT_PASS)
- return KSFT_FAIL;
+ return result;
}
return KSFT_PASS;
}
+const char *format_test_name(struct check_mmap_testcase *tc)
+{
+ static char test_name[TEST_NAME_MAX];
+ const char *check_type_str;
+ const char *mem_type_str;
+ const char *sync_str;
+ const char *mapping_str;
+ const char *tag_check_str;
+ const char *atag_check_str;
+ const char *tag_op_str;
+
+ switch (tc->check_type) {
+ case CHECK_ANON_MEM:
+ check_type_str = "anonymous memory";
+ break;
+ case CHECK_FILE_MEM:
+ check_type_str = "file memory";
+ break;
+ case CHECK_CLEAR_PROT_MTE:
+ check_type_str = "clear PROT_MTE flags";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->mem_type) {
+ case USE_MMAP:
+ mem_type_str = "mmap";
+ break;
+ case USE_MPROTECT:
+ mem_type_str = "mmap/mprotect";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->mte_sync) {
+ case MTE_NONE_ERR:
+ sync_str = "no error";
+ break;
+ case MTE_SYNC_ERR:
+ sync_str = "sync error";
+ break;
+ case MTE_ASYNC_ERR:
+ sync_str = "async error";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->mapping) {
+ case MAP_SHARED:
+ mapping_str = "shared";
+ break;
+ case MAP_PRIVATE:
+ mapping_str = "private";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->tag_check) {
+ case TAG_CHECK_ON:
+ tag_check_str = "tag check on";
+ break;
+ case TAG_CHECK_OFF:
+ tag_check_str = "tag check off";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (tc->atag_check) {
+ case ATAG_CHECK_ON:
+ atag_check_str = "with address tag [63:60]";
+ break;
+ case ATAG_CHECK_OFF:
+ atag_check_str = "without address tag [63:60]";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ snprintf(test_name, sizeof(test_name),
+ "Check %s with %s mapping, %s mode, %s memory and %s (%s)\n",
+ check_type_str, mapping_str, sync_str, mem_type_str,
+ tag_check_str, atag_check_str);
+
+ switch (tc->tag_op) {
+ case TAG_OP_ALL:
+ tag_op_str = "";
+ break;
+ case TAG_OP_STONLY:
+ tag_op_str = " / store-only";
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ snprintf(test_name, TEST_NAME_MAX,
+ "Check %s with %s mapping, %s mode, %s memory and %s (%s%s)\n",
+ check_type_str, mapping_str, sync_str, mem_type_str,
+ tag_check_str, atag_check_str, tag_op_str);
+
+ return test_name;
+}
+
int main(int argc, char *argv[])
{
- int err;
+ int err, i;
int item = ARRAY_SIZE(sizes);
+ struct check_mmap_testcase test_cases[]= {
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_OFF,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = true,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_OFF,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = true,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_NONE_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_OFF,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_NONE_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_OFF,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_CLEAR_PROT_MTE,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_CLEAR_PROT_MTE,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_OFF,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_ANON_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_SHARED,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_FILE_MEM,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_ASYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_STONLY,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_CLEAR_PROT_MTE,
+ .mem_type = USE_MMAP,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ {
+ .check_type = CHECK_CLEAR_PROT_MTE,
+ .mem_type = USE_MPROTECT,
+ .mte_sync = MTE_SYNC_ERR,
+ .mapping = MAP_PRIVATE,
+ .tag_check = TAG_CHECK_ON,
+ .atag_check = ATAG_CHECK_ON,
+ .tag_op = TAG_OP_ALL,
+ .enable_tco = false,
+ },
+ };
err = mte_default_setup();
if (err)
@@ -200,64 +957,51 @@ int main(int argc, char *argv[])
sizes[item - 2] = page_size;
sizes[item - 1] = page_size + 1;
- /* Register signal handlers */
- mte_register_signal(SIGBUS, mte_default_handler);
- mte_register_signal(SIGSEGV, mte_default_handler);
-
/* Set test plan */
- ksft_set_plan(22);
-
- mte_enable_pstate_tco();
-
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
- "Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
- "Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n");
-
- mte_disable_pstate_tco();
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
- "Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
- "Check file memory with private mapping, no error mode, mmap/mprotect memory and tag check off\n");
-
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check anonymous memory with private mapping, sync error mode, mmap memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check anonymous memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check anonymous memory with shared mapping, sync error mode, mmap memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check anonymous memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check anonymous memory with private mapping, async error mode, mmap memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check anonymous memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check anonymous memory with shared mapping, async error mode, mmap memory and tag check on\n");
- evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check anonymous memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
-
- evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check file memory with private mapping, sync error mode, mmap memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check file memory with shared mapping, sync error mode, mmap memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check file memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check file memory with private mapping, async error mode, mmap memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
- "Check file memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check file memory with shared mapping, async error mode, mmap memory and tag check on\n");
- evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
- "Check file memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
-
- evaluate_test(check_clear_prot_mte_flag(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
- "Check clear PROT_MTE flags with private mapping, sync error mode and mmap memory\n");
- evaluate_test(check_clear_prot_mte_flag(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
- "Check clear PROT_MTE flags with private mapping and sync error mode and mmap/mprotect memory\n");
+ ksft_set_plan(ARRAY_SIZE(test_cases));
+
+ for (i = 0 ; i < ARRAY_SIZE(test_cases); i++) {
+ /* Register signal handlers */
+ mte_register_signal(SIGBUS, mte_default_handler,
+ test_cases[i].atag_check == ATAG_CHECK_ON);
+ mte_register_signal(SIGSEGV, mte_default_handler,
+ test_cases[i].atag_check == ATAG_CHECK_ON);
+
+ if (test_cases[i].enable_tco)
+ mte_enable_pstate_tco();
+ else
+ mte_disable_pstate_tco();
+
+ switch (test_cases[i].check_type) {
+ case CHECK_ANON_MEM:
+ evaluate_test(check_anonymous_memory_mapping(test_cases[i].mem_type,
+ test_cases[i].mte_sync,
+ test_cases[i].mapping,
+ test_cases[i].tag_check,
+ test_cases[i].atag_check,
+ test_cases[i].tag_op),
+ format_test_name(&test_cases[i]));
+ break;
+ case CHECK_FILE_MEM:
+ evaluate_test(check_file_memory_mapping(test_cases[i].mem_type,
+ test_cases[i].mte_sync,
+ test_cases[i].mapping,
+ test_cases[i].tag_check,
+ test_cases[i].atag_check,
+ test_cases[i].tag_op),
+ format_test_name(&test_cases[i]));
+ break;
+ case CHECK_CLEAR_PROT_MTE:
+ evaluate_test(check_clear_prot_mte_flag(test_cases[i].mem_type,
+ test_cases[i].mte_sync,
+ test_cases[i].mapping,
+ test_cases[i].atag_check),
+ format_test_name(&test_cases[i]));
+ break;
+ default:
+ exit(KSFT_FAIL);
+ }
+ }
mte_restore_setup();
ksft_print_cnts();
diff --git a/tools/testing/selftests/arm64/mte/check_prctl.c b/tools/testing/selftests/arm64/mte/check_prctl.c
index 4c89e9538ca0..f7f320defa7b 100644
--- a/tools/testing/selftests/arm64/mte/check_prctl.c
+++ b/tools/testing/selftests/arm64/mte/check_prctl.c
@@ -12,6 +12,10 @@
#include "kselftest.h"
+#ifndef AT_HWCAP3
+#define AT_HWCAP3 29
+#endif
+
static int set_tagged_addr_ctrl(int val)
{
int ret;
@@ -60,7 +64,7 @@ void check_basic_read(void)
/*
* Attempt to set a specified combination of modes.
*/
-void set_mode_test(const char *name, int hwcap2, int mask)
+void set_mode_test(const char *name, int hwcap2, int hwcap3, int mask)
{
int ret;
@@ -69,6 +73,11 @@ void set_mode_test(const char *name, int hwcap2, int mask)
return;
}
+ if ((getauxval(AT_HWCAP3) & hwcap3) != hwcap3) {
+ ksft_test_result_skip("%s\n", name);
+ return;
+ }
+
ret = set_tagged_addr_ctrl(mask);
if (ret < 0) {
ksft_test_result_fail("%s\n", name);
@@ -81,7 +90,7 @@ void set_mode_test(const char *name, int hwcap2, int mask)
return;
}
- if ((ret & PR_MTE_TCF_MASK) == mask) {
+ if ((ret & (PR_MTE_TCF_MASK | PR_MTE_STORE_ONLY)) == mask) {
ksft_test_result_pass("%s\n", name);
} else {
ksft_print_msg("Got %x, expected %x\n",
@@ -93,12 +102,16 @@ void set_mode_test(const char *name, int hwcap2, int mask)
struct mte_mode {
int mask;
int hwcap2;
+ int hwcap3;
const char *name;
} mte_modes[] = {
- { PR_MTE_TCF_NONE, 0, "NONE" },
- { PR_MTE_TCF_SYNC, HWCAP2_MTE, "SYNC" },
- { PR_MTE_TCF_ASYNC, HWCAP2_MTE, "ASYNC" },
- { PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC, HWCAP2_MTE, "SYNC+ASYNC" },
+ { PR_MTE_TCF_NONE, 0, 0, "NONE" },
+ { PR_MTE_TCF_SYNC, HWCAP2_MTE, 0, "SYNC" },
+ { PR_MTE_TCF_ASYNC, HWCAP2_MTE, 0, "ASYNC" },
+ { PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC, HWCAP2_MTE, 0, "SYNC+ASYNC" },
+ { PR_MTE_TCF_SYNC | PR_MTE_STORE_ONLY, HWCAP2_MTE, HWCAP3_MTE_STORE_ONLY, "SYNC+STONLY" },
+ { PR_MTE_TCF_ASYNC | PR_MTE_STORE_ONLY, HWCAP2_MTE, HWCAP3_MTE_STORE_ONLY, "ASYNC+STONLY" },
+ { PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC | PR_MTE_STORE_ONLY, HWCAP2_MTE, HWCAP3_MTE_STORE_ONLY, "SYNC+ASYNC+STONLY" },
};
int main(void)
@@ -106,11 +119,11 @@ int main(void)
int i;
ksft_print_header();
- ksft_set_plan(5);
+ ksft_set_plan(ARRAY_SIZE(mte_modes));
check_basic_read();
for (i = 0; i < ARRAY_SIZE(mte_modes); i++)
- set_mode_test(mte_modes[i].name, mte_modes[i].hwcap2,
+ set_mode_test(mte_modes[i].name, mte_modes[i].hwcap2, mte_modes[i].hwcap3,
mte_modes[i].mask);
ksft_print_cnts();
diff --git a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
index a3d1e23fe02a..4b764f2a8185 100644
--- a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
+++ b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c
@@ -57,7 +57,7 @@ static int check_single_included_tags(int mem_type, int mode)
return KSFT_FAIL;
for (tag = 0; (tag < MT_TAG_COUNT) && (result == KSFT_PASS); tag++) {
- ret = mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag));
+ ret = mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag), false);
if (ret != 0)
result = KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
@@ -91,7 +91,7 @@ static int check_multiple_included_tags(int mem_type, int mode)
for (tag = 0; (tag < MT_TAG_COUNT - 1) && (result == KSFT_PASS); tag++) {
excl_mask |= 1 << tag;
- mte_switch_mode(mode, MT_INCLUDE_VALID_TAGS(excl_mask));
+ mte_switch_mode(mode, MT_INCLUDE_VALID_TAGS(excl_mask), false);
/* Try to catch a excluded tag by a number of tries. */
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
ptr = mte_insert_tags(ptr, BUFFER_SIZE);
@@ -120,7 +120,7 @@ static int check_all_included_tags(int mem_type, int mode)
mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
- ret = mte_switch_mode(mode, MT_INCLUDE_TAG_MASK);
+ ret = mte_switch_mode(mode, MT_INCLUDE_TAG_MASK, false);
if (ret != 0)
return KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
@@ -145,7 +145,7 @@ static int check_none_included_tags(int mem_type, int mode)
if (check_allocated_memory(ptr, BUFFER_SIZE, mem_type, false) != KSFT_PASS)
return KSFT_FAIL;
- ret = mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK);
+ ret = mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK, false);
if (ret != 0)
return KSFT_FAIL;
/* Try to catch a excluded tag by a number of tries. */
@@ -180,7 +180,7 @@ int main(int argc, char *argv[])
return err;
/* Register SIGSEGV handler */
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(4);
diff --git a/tools/testing/selftests/arm64/mte/check_user_mem.c b/tools/testing/selftests/arm64/mte/check_user_mem.c
index f4ae5f87a3b7..fb7936c4e097 100644
--- a/tools/testing/selftests/arm64/mte/check_user_mem.c
+++ b/tools/testing/selftests/arm64/mte/check_user_mem.c
@@ -44,7 +44,7 @@ static int check_usermem_access_fault(int mem_type, int mode, int mapping,
err = KSFT_PASS;
len = 2 * page_sz;
- mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG, false);
fd = create_temp_file();
if (fd == -1)
return KSFT_FAIL;
@@ -211,7 +211,7 @@ int main(int argc, char *argv[])
return err;
/* Register signal handlers */
- mte_register_signal(SIGSEGV, mte_default_handler);
+ mte_register_signal(SIGSEGV, mte_default_handler, false);
/* Set test plan */
ksft_set_plan(64);
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
index a1dc2fe5285b..397e57dd946a 100644
--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
@@ -6,6 +6,7 @@
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <time.h>
#include <unistd.h>
#include <linux/auxvec.h>
@@ -19,20 +20,40 @@
#include "mte_common_util.h"
#include "mte_def.h"
+#ifndef SA_EXPOSE_TAGBITS
+#define SA_EXPOSE_TAGBITS 0x00000800
+#endif
+
#define INIT_BUFFER_SIZE 256
struct mte_fault_cxt cur_mte_cxt;
+bool mtefar_support;
+bool mtestonly_support;
static unsigned int mte_cur_mode;
static unsigned int mte_cur_pstate_tco;
+static bool mte_cur_stonly;
void mte_default_handler(int signum, siginfo_t *si, void *uc)
{
+ struct sigaction sa;
unsigned long addr = (unsigned long)si->si_addr;
+ unsigned char si_tag, si_atag;
+
+ sigaction(signum, NULL, &sa);
+
+ if (sa.sa_flags & SA_EXPOSE_TAGBITS) {
+ si_tag = MT_FETCH_TAG(addr);
+ si_atag = MT_FETCH_ATAG(addr);
+ addr = MT_CLEAR_TAGS(addr);
+ } else {
+ si_tag = 0;
+ si_atag = 0;
+ }
if (signum == SIGSEGV) {
#ifdef DEBUG
- ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
- ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
+ ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx, si_tag=%x, si_atag=%x\n",
+ ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code, si_tag, si_atag);
#endif
if (si->si_code == SEGV_MTEAERR) {
if (cur_mte_cxt.trig_si_code == si->si_code)
@@ -45,13 +66,18 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
}
/* Compare the context for precise error */
else if (si->si_code == SEGV_MTESERR) {
+ if ((!mtefar_support && si_atag) || (si_atag != MT_FETCH_ATAG(cur_mte_cxt.trig_addr))) {
+ ksft_print_msg("Invalid MTE synchronous exception caught for address tag! si_tag=%x, si_atag: %x\n", si_tag, si_atag);
+ exit(KSFT_FAIL);
+ }
+
if (cur_mte_cxt.trig_si_code == si->si_code &&
((cur_mte_cxt.trig_range >= 0 &&
- addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
- addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
+ addr >= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
+ addr <= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
(cur_mte_cxt.trig_range < 0 &&
- addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
- addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
+ addr <= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
+ addr >= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
cur_mte_cxt.fault_valid = true;
/* Adjust the pc by 4 */
((ucontext_t *)uc)->uc_mcontext.pc += 4;
@@ -67,11 +93,11 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
ksft_print_msg("INFO: SIGBUS signal at pc=%llx, fault addr=%lx, si_code=%x\n",
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
if ((cur_mte_cxt.trig_range >= 0 &&
- addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
- addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
+ addr >= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
+ addr <= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
(cur_mte_cxt.trig_range < 0 &&
- addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
- addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
+ addr <= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
+ addr >= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
cur_mte_cxt.fault_valid = true;
/* Adjust the pc by 4 */
((ucontext_t *)uc)->uc_mcontext.pc += 4;
@@ -79,12 +105,17 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
}
}
-void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *))
+void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *),
+ bool export_tags)
{
struct sigaction sa;
sa.sa_sigaction = handler;
sa.sa_flags = SA_SIGINFO;
+
+ if (export_tags && signal == SIGSEGV)
+ sa.sa_flags |= SA_EXPOSE_TAGBITS;
+
sigemptyset(&sa.sa_mask);
sigaction(signal, &sa, NULL);
}
@@ -120,6 +151,19 @@ void mte_clear_tags(void *ptr, size_t size)
mte_clear_tag_address_range(ptr, size);
}
+void *mte_insert_atag(void *ptr)
+{
+ unsigned char atag;
+
+ atag = mtefar_support ? (random() % MT_ATAG_MASK) + 1 : 0;
+ return (void *)MT_SET_ATAG((unsigned long)ptr, atag);
+}
+
+void *mte_clear_atag(void *ptr)
+{
+ return (void *)MT_CLEAR_ATAG((unsigned long)ptr);
+}
+
static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
size_t range_before, size_t range_after,
bool tags, int fd)
@@ -272,7 +316,7 @@ void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range)
cur_mte_cxt.trig_si_code = 0;
}
-int mte_switch_mode(int mte_option, unsigned long incl_mask)
+int mte_switch_mode(int mte_option, unsigned long incl_mask, bool stonly)
{
unsigned long en = 0;
@@ -304,6 +348,9 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
break;
}
+ if (mtestonly_support && stonly)
+ en |= PR_MTE_STORE_ONLY;
+
en |= (incl_mask << PR_MTE_TAG_SHIFT);
/* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */
if (prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) != 0) {
@@ -316,12 +363,21 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
int mte_default_setup(void)
{
unsigned long hwcaps2 = getauxval(AT_HWCAP2);
+ unsigned long hwcaps3 = getauxval(AT_HWCAP3);
unsigned long en = 0;
int ret;
+ /* To generate random address tag */
+ srandom(time(NULL));
+
if (!(hwcaps2 & HWCAP2_MTE))
ksft_exit_skip("MTE features unavailable\n");
+ mtefar_support = !!(hwcaps3 & HWCAP3_MTE_FAR);
+
+ if (hwcaps3 & HWCAP3_MTE_STORE_ONLY)
+ mtestonly_support = true;
+
/* Get current mte mode */
ret = prctl(PR_GET_TAGGED_ADDR_CTRL, en, 0, 0, 0);
if (ret < 0) {
@@ -335,6 +391,8 @@ int mte_default_setup(void)
else if (ret & PR_MTE_TCF_NONE)
mte_cur_mode = MTE_NONE_ERR;
+ mte_cur_stonly = (ret & PR_MTE_STORE_ONLY) ? true : false;
+
mte_cur_pstate_tco = mte_get_pstate_tco();
/* Disable PSTATE.TCO */
mte_disable_pstate_tco();
@@ -343,7 +401,7 @@ int mte_default_setup(void)
void mte_restore_setup(void)
{
- mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG);
+ mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG, mte_cur_stonly);
if (mte_cur_pstate_tco == MT_PSTATE_TCO_EN)
mte_enable_pstate_tco();
else if (mte_cur_pstate_tco == MT_PSTATE_TCO_DIS)
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.h b/tools/testing/selftests/arm64/mte/mte_common_util.h
index a0017a303beb..250d671329a5 100644
--- a/tools/testing/selftests/arm64/mte/mte_common_util.h
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.h
@@ -37,10 +37,13 @@ struct mte_fault_cxt {
};
extern struct mte_fault_cxt cur_mte_cxt;
+extern bool mtefar_support;
+extern bool mtestonly_support;
/* MTE utility functions */
void mte_default_handler(int signum, siginfo_t *si, void *uc);
-void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *));
+void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *),
+ bool export_tags);
void mte_wait_after_trig(void);
void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags);
void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
@@ -54,9 +57,11 @@ void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type,
size_t range_before, size_t range_after);
void *mte_insert_tags(void *ptr, size_t size);
void mte_clear_tags(void *ptr, size_t size);
+void *mte_insert_atag(void *ptr);
+void *mte_clear_atag(void *ptr);
int mte_default_setup(void);
void mte_restore_setup(void);
-int mte_switch_mode(int mte_option, unsigned long incl_mask);
+int mte_switch_mode(int mte_option, unsigned long incl_mask, bool stonly);
void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range);
/* Common utility functions */
diff --git a/tools/testing/selftests/arm64/mte/mte_def.h b/tools/testing/selftests/arm64/mte/mte_def.h
index 9b188254b61a..6ad22f07c9b8 100644
--- a/tools/testing/selftests/arm64/mte/mte_def.h
+++ b/tools/testing/selftests/arm64/mte/mte_def.h
@@ -42,6 +42,8 @@
#define MT_TAG_COUNT 16
#define MT_INCLUDE_TAG_MASK 0xFFFF
#define MT_EXCLUDE_TAG_MASK 0x0
+#define MT_ATAG_SHIFT 60
+#define MT_ATAG_MASK 0xFUL
#define MT_ALIGN_GRANULE (MT_GRANULE_SIZE - 1)
#define MT_CLEAR_TAG(x) ((x) & ~(MT_TAG_MASK << MT_TAG_SHIFT))
@@ -49,6 +51,12 @@
#define MT_FETCH_TAG(x) ((x >> MT_TAG_SHIFT) & (MT_TAG_MASK))
#define MT_ALIGN_UP(x) ((x + MT_ALIGN_GRANULE) & ~(MT_ALIGN_GRANULE))
+#define MT_CLEAR_ATAG(x) ((x) & ~(MT_TAG_MASK << MT_ATAG_SHIFT))
+#define MT_SET_ATAG(x, y) ((x) | (((y) & MT_ATAG_MASK) << MT_ATAG_SHIFT))
+#define MT_FETCH_ATAG(x) ((x >> MT_ATAG_SHIFT) & (MT_ATAG_MASK))
+
+#define MT_CLEAR_TAGS(x) (MT_CLEAR_ATAG(MT_CLEAR_TAG(x)))
+
#define MT_PSTATE_TCO_SHIFT 25
#define MT_PSTATE_TCO_MASK ~(0x1 << MT_PSTATE_TCO_SHIFT)
#define MT_PSTATE_TCO_EN 1
diff --git a/tools/testing/selftests/arm64/pauth/exec_target.c b/tools/testing/selftests/arm64/pauth/exec_target.c
index 4435600ca400..e597861b26d6 100644
--- a/tools/testing/selftests/arm64/pauth/exec_target.c
+++ b/tools/testing/selftests/arm64/pauth/exec_target.c
@@ -13,7 +13,12 @@ int main(void)
unsigned long hwcaps;
size_t val;
- fread(&val, sizeof(size_t), 1, stdin);
+ size_t size = fread(&val, sizeof(size_t), 1, stdin);
+
+ if (size != 1) {
+ fprintf(stderr, "Could not read input from stdin\n");
+ return EXIT_FAILURE;
+ }
/* don't try to execute illegal (unimplemented) instructions) caller
* should have checked this and keep worker simple
diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
index 6d21b2fc758d..67d138057707 100644
--- a/tools/testing/selftests/arm64/pauth/pac.c
+++ b/tools/testing/selftests/arm64/pauth/pac.c
@@ -10,7 +10,7 @@
#include <setjmp.h>
#include <sched.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "helper.h"
#define PAC_COLLISION_ATTEMPTS 1000
diff --git a/tools/testing/selftests/arm64/tags/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
index 8ae26e496c89..375ab47f0edb 100644
--- a/tools/testing/selftests/arm64/tags/tags_test.c
+++ b/tools/testing/selftests/arm64/tags/tags_test.c
@@ -6,7 +6,7 @@
#include <stdint.h>
#include <sys/prctl.h>
#include <sys/utsname.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#define SHIFT_TAG(tag) ((uint64_t)(tag) << 56)
#define SET_TAG(ptr, tag) (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index e2a2c46c008b..19c1638e312a 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -21,9 +21,9 @@ test_lirc_mode2_user
flow_dissector_load
test_tcpnotify_user
test_libbpf
-test_sysctl
xdping
test_cpp
+test_progs_verification_cert
*.d
*.subskel.h
*.skel.h
@@ -33,7 +33,6 @@ test_cpp
/cpuv4
/host-tools
/tools
-/runqslower
/bench
/veristat
/sign-file
@@ -45,3 +44,4 @@ xdp_redirect_multi
xdp_synproxy
xdp_hw_metadata
xdp_features
+verification_cert.h
diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST
index 1789a61d0a9b..f748f2c33b22 100644
--- a/tools/testing/selftests/bpf/DENYLIST
+++ b/tools/testing/selftests/bpf/DENYLIST
@@ -1,6 +1,5 @@
# TEMPORARY
# Alphabetical order
-dynptr/test_probe_read_user_str_dynptr # disabled until https://patchwork.kernel.org/project/linux-mm/patch/20250422131449.57177-1-mykyta.yatsenko5@gmail.com/ makes it into the bpf-next
get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge
stacktrace_build_id
stacktrace_build_id_nmi
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
deleted file mode 100644
index 12e99c0277a8..000000000000
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ /dev/null
@@ -1 +0,0 @@
-tracing_struct/struct_many_args # struct_many_args:FAIL:tracing_struct_many_args__attach unexpected error: -524
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 3ebd77206f98..a17baf8c6fd7 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -2,4 +2,3 @@
# Alphabetical order
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
-verifier_iterating_callbacks
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index cf5ed3bee573..b7030a6e2e76 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -46,6 +46,7 @@ endif
CFLAGS += -g $(OPT_FLAGS) -rdynamic -std=gnu11 \
-Wall -Werror -fno-omit-frame-pointer \
+ -Wno-unused-but-set-variable \
$(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(TOOLSARCHINCDIR) -I$(APIDIR) -I$(OUTPUT)
@@ -73,7 +74,7 @@ endif
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_progs \
test_sockmap \
- test_tcpnotify_user test_sysctl \
+ test_tcpnotify_user \
test_progs-no_alu32
TEST_INST_SUBDIRS := no_alu32
@@ -98,17 +99,15 @@ TEST_GEN_PROGS += test_progs-cpuv4
TEST_INST_SUBDIRS += cpuv4
endif
-TEST_GEN_FILES = test_tc_edt.bpf.o
TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c)
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
test_lirc_mode2.sh \
- test_tc_tunnel.sh \
- test_tc_edt.sh \
test_xdping.sh \
test_bpftool_build.sh \
test_bpftool.sh \
+ test_bpftool_map.sh \
test_bpftool_metadata.sh \
test_doc_build.sh \
test_xsk.sh \
@@ -119,14 +118,13 @@ TEST_PROGS_EXTENDED := \
test_bpftool.py
TEST_KMODS := bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
- bpf_test_modorder_y.ko
+ bpf_test_modorder_y.ko bpf_test_rqspinlock.ko
TEST_KMOD_TARGETS = $(addprefix $(OUTPUT)/,$(TEST_KMODS))
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = \
bench \
flow_dissector_load \
- runqslower \
test_cpp \
test_lirc_mode2_user \
veristat \
@@ -136,7 +134,7 @@ TEST_GEN_PROGS_EXTENDED = \
xdping \
xskxceiver
-TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
+TEST_GEN_FILES += $(TEST_KMODS) liburandom_read.so urandom_read sign-file uprobe_multi
ifneq ($(V),1)
submake_extras := feature_display=0
@@ -208,8 +206,6 @@ HOST_INCLUDE_DIR := $(INCLUDE_DIR)
endif
HOST_BPFOBJ := $(HOST_BUILD_DIR)/libbpf/libbpf.a
RESOLVE_BTFIDS := $(HOST_BUILD_DIR)/resolve_btfids/resolve_btfids
-RUNQSLOWER_OUTPUT := $(BUILD_DIR)/runqslower/
-
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
../../../../vmlinux \
@@ -220,7 +216,7 @@ ifeq ($(VMLINUX_BTF),)
$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
endif
-# Define simple and short `make test_progs`, `make test_sysctl`, etc targets
+# Define simple and short `make test_progs`, `make test_maps`, etc targets
# to build individual tests.
# NOTE: Semicolon at the end is critical to override lib.mk's default static
# rule for binaries.
@@ -231,7 +227,7 @@ $(notdir $(TEST_GEN_PROGS) $(TEST_KMODS) \
MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
$(BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/bpftool \
$(HOST_BUILD_DIR)/resolve_btfids \
- $(RUNQSLOWER_OUTPUT) $(INCLUDE_DIR))
+ $(INCLUDE_DIR))
$(MAKE_DIRS):
$(call msg,MKDIR,,$@)
$(Q)mkdir -p $@
@@ -303,17 +299,6 @@ TRUNNER_BPFTOOL := $(DEFAULT_BPFTOOL)
USE_BOOTSTRAP := "bootstrap/"
endif
-$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
- $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
- OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \
- BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
- BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf/ \
- BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) \
- BPF_TARGET_ENDIAN=$(BPF_TARGET_ENDIAN) \
- EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \
- EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' && \
- cp $(RUNQSLOWER_OUTPUT)runqslower $@
-
TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
@@ -329,7 +314,6 @@ NETWORK_HELPERS := $(OUTPUT)/network_helpers.o
$(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS)
$(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS)
-$(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tag: $(TESTING_HELPERS)
$(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS)
$(OUTPUT)/xdping: $(TESTING_HELPERS)
@@ -398,7 +382,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
endif
-# vmlinux.h is first dumped to a temprorary file and then compared to
+# vmlinux.h is first dumped to a temporary file and then compared to
# the previous version. This helps to avoid unnecessary re-builds of
# $(TRUNNER_BPF_OBJS)
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
@@ -453,7 +437,9 @@ BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(abspath $(OUTPUT)/../usr/include) \
-std=gnu11 \
-fno-strict-aliasing \
- -Wno-compare-distinct-pointer-types
+ -Wno-compare-distinct-pointer-types \
+ -Wno-initializer-overrides \
+ #
# TODO: enable me -Wsign-compare
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES)
@@ -496,15 +482,17 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
test_subskeleton.skel.h test_subskeleton_lib.skel.h \
test_usdt.skel.h
-LSKELS := fentry_test.c fexit_test.c fexit_sleep.c atomics.c \
- trace_printk.c trace_vprintk.c map_ptr_kern.c \
+LSKELS := fexit_sleep.c trace_printk.c trace_vprintk.c map_ptr_kern.c \
core_kern.c core_kern_overflow.c test_ringbuf.c \
- test_ringbuf_n.c test_ringbuf_map_key.c test_ringbuf_write.c
+ test_ringbuf_n.c test_ringbuf_map_key.c test_ringbuf_write.c \
+ test_ringbuf_overwrite.c
+
+LSKELS_SIGNED := fentry_test.c fexit_test.c atomics.c
# Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test.c \
kfunc_call_test_subprog.c
-SKEL_BLACKLIST += $$(LSKELS)
+SKEL_BLACKLIST += $$(LSKELS) $$(LSKELS_SIGNED)
test_static_linked.skel.h-deps := test_static_linked1.bpf.o test_static_linked2.bpf.o
linked_funcs.skel.h-deps := linked_funcs1.bpf.o linked_funcs2.bpf.o
@@ -535,12 +523,15 @@ HEADERS_FOR_BPF_OBJS := $(wildcard $(BPFDIR)/*.bpf.h) \
# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc)
define DEFINE_TEST_RUNNER
+LSKEL_SIGN := -S -k $(PRIVATE_KEY) -i $(VERIFICATION_CERT)
TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2
TRUNNER_BINARY := $1$(if $2,-)$2
TRUNNER_TEST_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.test.o, \
$$(notdir $$(wildcard $(TRUNNER_TESTS_DIR)/*.c)))
TRUNNER_EXTRA_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
$$(filter %.c,$(TRUNNER_EXTRA_SOURCES)))
+TRUNNER_LIB_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, \
+ $$(filter %.c,$(TRUNNER_LIB_SOURCES)))
TRUNNER_EXTRA_HDRS := $$(filter %.h,$(TRUNNER_EXTRA_SOURCES))
TRUNNER_TESTS_HDR := $(TRUNNER_TESTS_DIR)/tests.h
TRUNNER_BPF_SRCS := $$(notdir $$(wildcard $(TRUNNER_BPF_PROGS_DIR)/*.c))
@@ -550,6 +541,7 @@ TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
$$(TRUNNER_BPF_SRCS)))
TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS) $$(LSKELS_EXTRA))
TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
+TRUNNER_BPF_LSKELS_SIGNED := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS_SIGNED))
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
# Evaluate rules now with extra TRUNNER_XXX variables above already defined
@@ -604,6 +596,15 @@ $(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@
$(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
+$(TRUNNER_BPF_LSKELS_SIGNED): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,GEN-SKEL,$(TRUNNER_BINARY) (signed),$$@)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked1.o) $$<
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked2.o) $$(<:.o=.llinked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked3.o) $$(<:.o=.llinked2.o)
+ $(Q)diff $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
+ $(Q)$$(BPFTOOL) gen skeleton $(LSKEL_SIGN) $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@
+ $(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
+
$(LINKED_BPF_OBJS): %: $(TRUNNER_OUTPUT)/%
# .SECONDEXPANSION here allows to correctly expand %-deps variables as prerequisites
@@ -653,6 +654,7 @@ $(TRUNNER_TEST_OBJS:.o=.d): $(TRUNNER_OUTPUT)/%.test.d: \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_BPF_SKELS) \
$(TRUNNER_BPF_LSKELS) \
+ $(TRUNNER_BPF_LSKELS_SIGNED) \
$(TRUNNER_BPF_SKELS_LINKED) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
@@ -667,11 +669,16 @@ $(foreach N,$(patsubst $(TRUNNER_OUTPUT)/%.o,%,$(TRUNNER_EXTRA_OBJS)), \
$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
%.c \
$(TRUNNER_EXTRA_HDRS) \
+ $(VERIFY_SIG_HDR) \
$(TRUNNER_TESTS_HDR) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,EXT-OBJ,$(TRUNNER_BINARY),$$@)
$(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
+$(TRUNNER_LIB_OBJS): $(TRUNNER_OUTPUT)/%.o:$(TOOLSDIR)/lib/%.c
+ $$(call msg,LIB-OBJ,$(TRUNNER_BINARY),$$@)
+ $(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
+
# non-flavored in-srctree builds receive special treatment, in particular, we
# do not need to copy extra resources (see e.g. test_btf_dump_case())
$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
@@ -685,6 +692,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): | $(TRUNNER_BPF_OBJS)
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
+ $(TRUNNER_LIB_OBJS) \
$(RESOLVE_BTFIDS) \
$(TRUNNER_BPFTOOL) \
$(OUTPUT)/veristat \
@@ -697,6 +705,19 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
endef
+VERIFY_SIG_SETUP := $(CURDIR)/verify_sig_setup.sh
+VERIFY_SIG_HDR := verification_cert.h
+VERIFICATION_CERT := $(BUILD_DIR)/signing_key.der
+PRIVATE_KEY := $(BUILD_DIR)/signing_key.pem
+
+$(VERIFICATION_CERT) $(PRIVATE_KEY): $(VERIFY_SIG_SETUP)
+ $(Q)mkdir -p $(BUILD_DIR)
+ $(Q)$(VERIFY_SIG_SETUP) genkey $(BUILD_DIR)
+
+$(VERIFY_SIG_HDR): $(VERIFICATION_CERT)
+ $(Q)ln -fs $< test_progs_verification_cert && \
+ xxd -i test_progs_verification_cert > $@
+
# Define test_progs test runner.
TRUNNER_TESTS_DIR := prog_tests
TRUNNER_BPF_PROGS_DIR := progs
@@ -716,8 +737,10 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
disasm.c \
disasm_helpers.c \
json_writer.c \
+ $(VERIFY_SIG_HDR) \
flow_dissector_load.h \
ip_check_defrag_frags.h
+TRUNNER_LIB_SOURCES := find_bit.c
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
@@ -725,7 +748,7 @@ TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \
$(OUTPUT)/uprobe_multi \
$(TEST_KMOD_TARGETS) \
ima_setup.sh \
- verify_sig_setup.sh \
+ $(VERIFY_SIG_SETUP) \
$(wildcard progs/btf_dump_test_case_*.c) \
$(wildcard progs/*.bpf.o)
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
@@ -755,6 +778,7 @@ endif
TRUNNER_TESTS_DIR := map_tests
TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_maps.c
+TRUNNER_LIB_SOURCES :=
TRUNNER_EXTRA_FILES :=
TRUNNER_BPF_BUILD_RULE := $$(error no BPF objects should be built)
TRUNNER_BPF_CFLAGS :=
@@ -776,7 +800,7 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
# Include find_bit.c to compile xskxceiver.
-EXTRA_SRC := $(TOOLSDIR)/lib/find_bit.c
+EXTRA_SRC := $(TOOLSDIR)/lib/find_bit.c prog_tests/test_xsk.c prog_tests/test_xsk.h
$(OUTPUT)/xskxceiver: $(EXTRA_SRC) xskxceiver.c xskxceiver.h $(OUTPUT)/network_helpers.o $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
@@ -816,6 +840,7 @@ $(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h
$(OUTPUT)/bench_htab_mem.o: $(OUTPUT)/htab_mem_bench.skel.h
$(OUTPUT)/bench_bpf_crypto.o: $(OUTPUT)/crypto_bench.skel.h
$(OUTPUT)/bench_sockmap.o: $(OUTPUT)/bench_sockmap_prog.skel.h
+$(OUTPUT)/bench_lpm_trie_map.o: $(OUTPUT)/lpm_trie_bench.skel.h $(OUTPUT)/lpm_trie_map.skel.h
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
$(OUTPUT)/bench: LDLIBS += -lm
$(OUTPUT)/bench: $(OUTPUT)/bench.o \
@@ -837,10 +862,16 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
$(OUTPUT)/bench_htab_mem.o \
$(OUTPUT)/bench_bpf_crypto.o \
$(OUTPUT)/bench_sockmap.o \
+ $(OUTPUT)/bench_lpm_trie_map.o \
#
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
+# This works around GCC warning about snprintf truncating strings like:
+#
+# char a[PATH_MAX], b[PATH_MAX];
+# snprintf(a, "%s/foo", b); // triggers -Wformat-truncation
+$(OUTPUT)/veristat.o: CFLAGS += -Wno-format-truncation
$(OUTPUT)/veristat.o: $(BPFOBJ)
$(OUTPUT)/veristat: $(OUTPUT)/veristat.o
$(call msg,BINARY,,$@)
@@ -859,7 +890,8 @@ EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
$(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \
no_alu32 cpuv4 bpf_gcc \
liburandom_read.so) \
- $(OUTPUT)/FEATURE-DUMP.selftests
+ $(OUTPUT)/FEATURE-DUMP.selftests \
+ test_progs_verification_cert
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index ddd73d06a1eb..bd29bb2e6cb5 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -284,6 +284,7 @@ extern struct argp bench_htab_mem_argp;
extern struct argp bench_trigger_batch_argp;
extern struct argp bench_crypto_argp;
extern struct argp bench_sockmap_argp;
+extern struct argp bench_lpm_trie_map_argp;
static const struct argp_child bench_parsers[] = {
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
@@ -299,6 +300,7 @@ static const struct argp_child bench_parsers[] = {
{ &bench_trigger_batch_argp, 0, "BPF triggering benchmark", 0 },
{ &bench_crypto_argp, 0, "bpf crypto benchmark", 0 },
{ &bench_sockmap_argp, 0, "bpf sockmap benchmark", 0 },
+ { &bench_lpm_trie_map_argp, 0, "LPM trie map benchmark", 0 },
{},
};
@@ -499,7 +501,7 @@ extern const struct bench bench_rename_rawtp;
extern const struct bench bench_rename_fentry;
extern const struct bench bench_rename_fexit;
-/* pure counting benchmarks to establish theoretical lmits */
+/* pure counting benchmarks to establish theoretical limits */
extern const struct bench bench_trig_usermode_count;
extern const struct bench bench_trig_syscall_count;
extern const struct bench bench_trig_kernel_count;
@@ -510,6 +512,8 @@ extern const struct bench bench_trig_kretprobe;
extern const struct bench bench_trig_kprobe_multi;
extern const struct bench bench_trig_kretprobe_multi;
extern const struct bench bench_trig_fentry;
+extern const struct bench bench_trig_kprobe_multi_all;
+extern const struct bench bench_trig_kretprobe_multi_all;
extern const struct bench bench_trig_fexit;
extern const struct bench bench_trig_fmodret;
extern const struct bench bench_trig_tp;
@@ -558,6 +562,13 @@ extern const struct bench bench_htab_mem;
extern const struct bench bench_crypto_encrypt;
extern const struct bench bench_crypto_decrypt;
extern const struct bench bench_sockmap;
+extern const struct bench bench_lpm_trie_noop;
+extern const struct bench bench_lpm_trie_baseline;
+extern const struct bench bench_lpm_trie_lookup;
+extern const struct bench bench_lpm_trie_insert;
+extern const struct bench bench_lpm_trie_update;
+extern const struct bench bench_lpm_trie_delete;
+extern const struct bench bench_lpm_trie_free;
static const struct bench *benchs[] = {
&bench_count_global,
@@ -578,6 +589,8 @@ static const struct bench *benchs[] = {
&bench_trig_kprobe_multi,
&bench_trig_kretprobe_multi,
&bench_trig_fentry,
+ &bench_trig_kprobe_multi_all,
+ &bench_trig_kretprobe_multi_all,
&bench_trig_fexit,
&bench_trig_fmodret,
&bench_trig_tp,
@@ -625,6 +638,13 @@ static const struct bench *benchs[] = {
&bench_crypto_encrypt,
&bench_crypto_decrypt,
&bench_sockmap,
+ &bench_lpm_trie_noop,
+ &bench_lpm_trie_baseline,
+ &bench_lpm_trie_lookup,
+ &bench_lpm_trie_insert,
+ &bench_lpm_trie_update,
+ &bench_lpm_trie_delete,
+ &bench_lpm_trie_free,
};
static void find_benchmark(void)
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index 005c401b3e22..bea323820ffb 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -46,6 +46,7 @@ struct bench_res {
unsigned long gp_ns;
unsigned long gp_ct;
unsigned int stime;
+ unsigned long duration_ns;
};
struct bench {
diff --git a/tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c b/tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
new file mode 100644
index 000000000000..246f6cb3387d
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Cloudflare */
+
+/*
+ * All of these benchmarks operate on tries with keys in the range
+ * [0, args.nr_entries), i.e. there are no gaps or partially filled
+ * branches of the trie for any key < args.nr_entries.
+ *
+ * This gives an idea of worst-case behaviour.
+ */
+
+#include <argp.h>
+#include <linux/time64.h>
+#include <linux/if_ether.h>
+#include "lpm_trie_bench.skel.h"
+#include "lpm_trie_map.skel.h"
+#include "bench.h"
+#include "testing_helpers.h"
+#include "progs/lpm_trie.h"
+
+static struct ctx {
+ struct lpm_trie_bench *bench;
+} ctx;
+
+static struct {
+ __u32 nr_entries;
+ __u32 prefixlen;
+ bool random;
+} args = {
+ .nr_entries = 0,
+ .prefixlen = 32,
+ .random = false,
+};
+
+enum {
+ ARG_NR_ENTRIES = 9000,
+ ARG_PREFIX_LEN,
+ ARG_RANDOM,
+};
+
+static const struct argp_option opts[] = {
+ { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
+ "Number of unique entries in the LPM trie" },
+ { "prefix_len", ARG_PREFIX_LEN, "PREFIX_LEN", 0,
+ "Number of prefix bits to use in the LPM trie" },
+ { "random", ARG_RANDOM, NULL, 0, "Access random keys during op" },
+ {},
+};
+
+static error_t lpm_parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_NR_ENTRIES:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "Invalid nr_entries count.");
+ argp_usage(state);
+ }
+ args.nr_entries = ret;
+ break;
+ case ARG_PREFIX_LEN:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "Invalid prefix_len value.");
+ argp_usage(state);
+ }
+ args.prefixlen = ret;
+ break;
+ case ARG_RANDOM:
+ args.random = true;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
+const struct argp bench_lpm_trie_map_argp = {
+ .options = opts,
+ .parser = lpm_parse_arg,
+};
+
+static void validate_common(void)
+{
+ if (env.consumer_cnt != 0) {
+ fprintf(stderr, "benchmark doesn't support consumer\n");
+ exit(1);
+ }
+
+ if (args.nr_entries == 0) {
+ fprintf(stderr, "Missing --nr_entries parameter\n");
+ exit(1);
+ }
+
+ if ((1UL << args.prefixlen) < args.nr_entries) {
+ fprintf(stderr, "prefix_len value too small for nr_entries\n");
+ exit(1);
+ }
+}
+
+static void lpm_insert_validate(void)
+{
+ validate_common();
+
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "lpm-trie-insert requires a single producer\n");
+ exit(1);
+ }
+
+ if (args.random) {
+ fprintf(stderr, "lpm-trie-insert does not support --random\n");
+ exit(1);
+ }
+}
+
+static void lpm_delete_validate(void)
+{
+ validate_common();
+
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "lpm-trie-delete requires a single producer\n");
+ exit(1);
+ }
+
+ if (args.random) {
+ fprintf(stderr, "lpm-trie-delete does not support --random\n");
+ exit(1);
+ }
+}
+
+static void lpm_free_validate(void)
+{
+ validate_common();
+
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "lpm-trie-free requires a single producer\n");
+ exit(1);
+ }
+
+ if (args.random) {
+ fprintf(stderr, "lpm-trie-free does not support --random\n");
+ exit(1);
+ }
+}
+
+static struct trie_key *keys;
+static __u32 *vals;
+
+static void fill_map(int map_fd)
+{
+ int err;
+
+ DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
+ .elem_flags = 0,
+ .flags = 0,
+ );
+
+ err = bpf_map_update_batch(map_fd, keys, vals, &args.nr_entries, &opts);
+ if (err) {
+ fprintf(stderr, "failed to batch update keys to map: %d\n",
+ -err);
+ exit(1);
+ }
+}
+
+static void empty_map(int map_fd)
+{
+ int err;
+
+ DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
+ .elem_flags = 0,
+ .flags = 0,
+ );
+
+ err = bpf_map_delete_batch(map_fd, keys, &args.nr_entries, &opts);
+ if (err) {
+ fprintf(stderr, "failed to batch delete keys for map: %d\n",
+ -err);
+ exit(1);
+ }
+}
+
+static void attach_prog(void)
+{
+ int i;
+
+ ctx.bench = lpm_trie_bench__open_and_load();
+ if (!ctx.bench) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ ctx.bench->bss->nr_entries = args.nr_entries;
+ ctx.bench->bss->prefixlen = args.prefixlen;
+ ctx.bench->bss->random = args.random;
+
+ if (lpm_trie_bench__attach(ctx.bench)) {
+ fprintf(stderr, "failed to attach skeleton\n");
+ exit(1);
+ }
+
+ keys = calloc(args.nr_entries, sizeof(*keys));
+ vals = calloc(args.nr_entries, sizeof(*vals));
+
+ for (i = 0; i < args.nr_entries; i++) {
+ struct trie_key *k = &keys[i];
+ __u32 *v = &vals[i];
+
+ k->prefixlen = args.prefixlen;
+ k->data = i;
+ *v = 1;
+ }
+}
+
+static void attach_prog_and_fill_map(void)
+{
+ int fd;
+
+ attach_prog();
+
+ fd = bpf_map__fd(ctx.bench->maps.trie_map);
+ fill_map(fd);
+}
+
+static void lpm_noop_setup(void)
+{
+ attach_prog();
+ ctx.bench->bss->op = LPM_OP_NOOP;
+}
+
+static void lpm_baseline_setup(void)
+{
+ attach_prog();
+ ctx.bench->bss->op = LPM_OP_BASELINE;
+}
+
+static void lpm_lookup_setup(void)
+{
+ attach_prog_and_fill_map();
+ ctx.bench->bss->op = LPM_OP_LOOKUP;
+}
+
+static void lpm_insert_setup(void)
+{
+ attach_prog();
+ ctx.bench->bss->op = LPM_OP_INSERT;
+}
+
+static void lpm_update_setup(void)
+{
+ attach_prog_and_fill_map();
+ ctx.bench->bss->op = LPM_OP_UPDATE;
+}
+
+static void lpm_delete_setup(void)
+{
+ attach_prog_and_fill_map();
+ ctx.bench->bss->op = LPM_OP_DELETE;
+}
+
+static void lpm_free_setup(void)
+{
+ attach_prog();
+ ctx.bench->bss->op = LPM_OP_FREE;
+}
+
+static void lpm_measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&ctx.bench->bss->hits, 0);
+ res->duration_ns = atomic_swap(&ctx.bench->bss->duration_ns, 0);
+}
+
+static void bench_reinit_map(void)
+{
+ int fd = bpf_map__fd(ctx.bench->maps.trie_map);
+
+ switch (ctx.bench->bss->op) {
+ case LPM_OP_INSERT:
+ /* trie_map needs to be emptied */
+ empty_map(fd);
+ break;
+ case LPM_OP_DELETE:
+ /* trie_map needs to be refilled */
+ fill_map(fd);
+ break;
+ default:
+ fprintf(stderr, "Unexpected REINIT return code for op %d\n",
+ ctx.bench->bss->op);
+ exit(1);
+ }
+}
+
+/* For NOOP, BASELINE, LOOKUP, INSERT, UPDATE, and DELETE */
+static void *lpm_producer(void *unused __always_unused)
+{
+ int err;
+ char in[ETH_HLEN]; /* unused */
+
+ LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = in,
+ .data_size_in = sizeof(in), .repeat = 1, );
+
+ while (true) {
+ int fd = bpf_program__fd(ctx.bench->progs.run_bench);
+ err = bpf_prog_test_run_opts(fd, &opts);
+ if (err) {
+ fprintf(stderr, "failed to run BPF prog: %d\n", err);
+ exit(1);
+ }
+
+ /* Check for kernel error code */
+ if ((int)opts.retval < 0) {
+ fprintf(stderr, "BPF prog returned error: %d\n",
+ opts.retval);
+ exit(1);
+ }
+
+ switch (opts.retval) {
+ case LPM_BENCH_SUCCESS:
+ break;
+ case LPM_BENCH_REINIT_MAP:
+ bench_reinit_map();
+ break;
+ default:
+ fprintf(stderr, "Unexpected BPF prog return code %d for op %d\n",
+ opts.retval, ctx.bench->bss->op);
+ exit(1);
+ }
+ }
+
+ return NULL;
+}
+
+static void *lpm_free_producer(void *unused __always_unused)
+{
+ while (true) {
+ struct lpm_trie_map *skel;
+
+ skel = lpm_trie_map__open_and_load();
+ if (!skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ fill_map(bpf_map__fd(skel->maps.trie_free_map));
+ lpm_trie_map__destroy(skel);
+ }
+
+ return NULL;
+}
+
+/*
+ * The standard bench op_report_*() functions assume measurements are
+ * taken over a 1-second interval but operations that modify the map
+ * (INSERT, DELETE, and FREE) cannot run indefinitely without
+ * "resetting" the map to the initial state. Depending on the size of
+ * the map, this likely needs to happen before the 1-second timer fires.
+ *
+ * Calculate the fraction of a second over which the op measurement was
+ * taken (to ignore any time spent doing the reset) and report the
+ * throughput results per second.
+ */
+static void frac_second_report_progress(int iter, struct bench_res *res,
+ long delta_ns, double rate_divisor,
+ char rate)
+{
+ double hits_per_sec, hits_per_prod;
+
+ hits_per_sec = res->hits / rate_divisor /
+ (res->duration_ns / (double)NSEC_PER_SEC);
+ hits_per_prod = hits_per_sec / env.producer_cnt;
+
+ printf("Iter %3d (%7.3lfus): ", iter,
+ (delta_ns - NSEC_PER_SEC) / 1000.0);
+ printf("hits %8.3lf%c/s (%7.3lf%c/prod)\n", hits_per_sec, rate,
+ hits_per_prod, rate);
+}
+
+static void frac_second_report_final(struct bench_res res[], int res_cnt,
+ double lat_divisor, double rate_divisor,
+ char rate, const char *unit)
+{
+ double hits_mean = 0.0, hits_stddev = 0.0;
+ double latency = 0.0;
+ int i;
+
+ for (i = 0; i < res_cnt; i++) {
+ double val = res[i].hits / rate_divisor /
+ (res[i].duration_ns / (double)NSEC_PER_SEC);
+ hits_mean += val / (0.0 + res_cnt);
+ latency += res[i].duration_ns / res[i].hits / (0.0 + res_cnt);
+ }
+
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++) {
+ double val =
+ res[i].hits / rate_divisor /
+ (res[i].duration_ns / (double)NSEC_PER_SEC);
+ hits_stddev += (hits_mean - val) * (hits_mean - val) /
+ (res_cnt - 1.0);
+ }
+
+ hits_stddev = sqrt(hits_stddev);
+ }
+ printf("Summary: throughput %8.3lf \u00B1 %5.3lf %c ops/s (%7.3lf%c ops/prod), ",
+ hits_mean, hits_stddev, rate, hits_mean / env.producer_cnt,
+ rate);
+ printf("latency %8.3lf %s/op\n",
+ latency / lat_divisor / env.producer_cnt, unit);
+}
+
+static void insert_ops_report_progress(int iter, struct bench_res *res,
+ long delta_ns)
+{
+ double rate_divisor = 1000000.0;
+ char rate = 'M';
+
+ frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
+}
+
+static void delete_ops_report_progress(int iter, struct bench_res *res,
+ long delta_ns)
+{
+ double rate_divisor = 1000000.0;
+ char rate = 'M';
+
+ frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
+}
+
+static void free_ops_report_progress(int iter, struct bench_res *res,
+ long delta_ns)
+{
+ double rate_divisor = 1000.0;
+ char rate = 'K';
+
+ frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
+}
+
+static void insert_ops_report_final(struct bench_res res[], int res_cnt)
+{
+ double lat_divisor = 1.0;
+ double rate_divisor = 1000000.0;
+ const char *unit = "ns";
+ char rate = 'M';
+
+ frac_second_report_final(res, res_cnt, lat_divisor, rate_divisor, rate,
+ unit);
+}
+
+static void delete_ops_report_final(struct bench_res res[], int res_cnt)
+{
+ double lat_divisor = 1.0;
+ double rate_divisor = 1000000.0;
+ const char *unit = "ns";
+ char rate = 'M';
+
+ frac_second_report_final(res, res_cnt, lat_divisor, rate_divisor, rate,
+ unit);
+}
+
+static void free_ops_report_final(struct bench_res res[], int res_cnt)
+{
+ double lat_divisor = 1000000.0;
+ double rate_divisor = 1000.0;
+ const char *unit = "ms";
+ char rate = 'K';
+
+ frac_second_report_final(res, res_cnt, lat_divisor, rate_divisor, rate,
+ unit);
+}
+
+/* noop bench measures harness-overhead */
+const struct bench bench_lpm_trie_noop = {
+ .name = "lpm-trie-noop",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = validate_common,
+ .setup = lpm_noop_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
+
+/* baseline overhead for lookup and update */
+const struct bench bench_lpm_trie_baseline = {
+ .name = "lpm-trie-baseline",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = validate_common,
+ .setup = lpm_baseline_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
+
+/* measure cost of doing a lookup on existing entries in a full trie */
+const struct bench bench_lpm_trie_lookup = {
+ .name = "lpm-trie-lookup",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = validate_common,
+ .setup = lpm_lookup_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
+
+/* measure cost of inserting new entries into an empty trie */
+const struct bench bench_lpm_trie_insert = {
+ .name = "lpm-trie-insert",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = lpm_insert_validate,
+ .setup = lpm_insert_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = insert_ops_report_progress,
+ .report_final = insert_ops_report_final,
+};
+
+/* measure cost of updating existing entries in a full trie */
+const struct bench bench_lpm_trie_update = {
+ .name = "lpm-trie-update",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = validate_common,
+ .setup = lpm_update_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = ops_report_progress,
+ .report_final = ops_report_final,
+};
+
+/* measure cost of deleting existing entries from a full trie */
+const struct bench bench_lpm_trie_delete = {
+ .name = "lpm-trie-delete",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = lpm_delete_validate,
+ .setup = lpm_delete_setup,
+ .producer_thread = lpm_producer,
+ .measure = lpm_measure,
+ .report_progress = delete_ops_report_progress,
+ .report_final = delete_ops_report_final,
+};
+
+/* measure cost of freeing a full trie */
+const struct bench bench_lpm_trie_free = {
+ .name = "lpm-trie-free",
+ .argp = &bench_lpm_trie_map_argp,
+ .validate = lpm_free_validate,
+ .setup = lpm_free_setup,
+ .producer_thread = lpm_free_producer,
+ .measure = lpm_measure,
+ .report_progress = free_ops_report_progress,
+ .report_final = free_ops_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
index e1ee979e6acc..01bdce692799 100644
--- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
+++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
@@ -19,6 +19,8 @@ static struct {
int ringbuf_sz; /* per-ringbuf, in bytes */
bool ringbuf_use_output; /* use slower output API */
int perfbuf_sz; /* per-CPU size, in pages */
+ bool overwrite;
+ bool bench_producer;
} args = {
.back2back = false,
.batch_cnt = 500,
@@ -27,6 +29,8 @@ static struct {
.ringbuf_sz = 512 * 1024,
.ringbuf_use_output = false,
.perfbuf_sz = 128,
+ .overwrite = false,
+ .bench_producer = false,
};
enum {
@@ -35,6 +39,8 @@ enum {
ARG_RB_BATCH_CNT = 2002,
ARG_RB_SAMPLED = 2003,
ARG_RB_SAMPLE_RATE = 2004,
+ ARG_RB_OVERWRITE = 2005,
+ ARG_RB_BENCH_PRODUCER = 2006,
};
static const struct argp_option opts[] = {
@@ -43,6 +49,8 @@ static const struct argp_option opts[] = {
{ "rb-batch-cnt", ARG_RB_BATCH_CNT, "CNT", 0, "Set BPF-side record batch count"},
{ "rb-sampled", ARG_RB_SAMPLED, NULL, 0, "Notification sampling"},
{ "rb-sample-rate", ARG_RB_SAMPLE_RATE, "RATE", 0, "Notification sample rate"},
+ { "rb-overwrite", ARG_RB_OVERWRITE, NULL, 0, "Overwrite mode"},
+ { "rb-bench-producer", ARG_RB_BENCH_PRODUCER, NULL, 0, "Benchmark producer"},
{},
};
@@ -72,6 +80,12 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
argp_usage(state);
}
break;
+ case ARG_RB_OVERWRITE:
+ args.overwrite = true;
+ break;
+ case ARG_RB_BENCH_PRODUCER:
+ args.bench_producer = true;
+ break;
default:
return ARGP_ERR_UNKNOWN;
}
@@ -95,8 +109,33 @@ static inline void bufs_trigger_batch(void)
static void bufs_validate(void)
{
- if (env.consumer_cnt != 1) {
- fprintf(stderr, "rb-libbpf benchmark needs one consumer!\n");
+ if (args.bench_producer && strcmp(env.bench_name, "rb-libbpf")) {
+ fprintf(stderr, "--rb-bench-producer only works with rb-libbpf!\n");
+ exit(1);
+ }
+
+ if (args.overwrite && !args.bench_producer) {
+ fprintf(stderr, "overwrite mode only works with --rb-bench-producer for now!\n");
+ exit(1);
+ }
+
+ if (args.bench_producer && env.consumer_cnt != 0) {
+ fprintf(stderr, "no consumer is needed for --rb-bench-producer!\n");
+ exit(1);
+ }
+
+ if (args.bench_producer && args.back2back) {
+ fprintf(stderr, "back-to-back mode makes no sense for --rb-bench-producer!\n");
+ exit(1);
+ }
+
+ if (args.bench_producer && args.sampled) {
+ fprintf(stderr, "sampling mode makes no sense for --rb-bench-producer!\n");
+ exit(1);
+ }
+
+ if (!args.bench_producer && env.consumer_cnt != 1) {
+ fprintf(stderr, "benchmarks without --rb-bench-producer require exactly one consumer!\n");
exit(1);
}
@@ -128,12 +167,17 @@ static void ringbuf_libbpf_measure(struct bench_res *res)
{
struct ringbuf_libbpf_ctx *ctx = &ringbuf_libbpf_ctx;
- res->hits = atomic_swap(&buf_hits.value, 0);
+ if (args.bench_producer)
+ res->hits = atomic_swap(&ctx->skel->bss->hits, 0);
+ else
+ res->hits = atomic_swap(&buf_hits.value, 0);
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
}
static struct ringbuf_bench *ringbuf_setup_skeleton(void)
{
+ __u32 flags;
+ struct bpf_map *ringbuf;
struct ringbuf_bench *skel;
setup_libbpf();
@@ -146,12 +190,19 @@ static struct ringbuf_bench *ringbuf_setup_skeleton(void)
skel->rodata->batch_cnt = args.batch_cnt;
skel->rodata->use_output = args.ringbuf_use_output ? 1 : 0;
+ skel->rodata->bench_producer = args.bench_producer;
if (args.sampled)
/* record data + header take 16 bytes */
skel->rodata->wakeup_data_size = args.sample_rate * 16;
- bpf_map__set_max_entries(skel->maps.ringbuf, args.ringbuf_sz);
+ ringbuf = skel->maps.ringbuf;
+ if (args.overwrite) {
+ flags = bpf_map__map_flags(ringbuf) | BPF_F_RB_OVERWRITE;
+ bpf_map__set_map_flags(ringbuf, flags);
+ }
+
+ bpf_map__set_max_entries(ringbuf, args.ringbuf_sz);
if (ringbuf_bench__load(skel)) {
fprintf(stderr, "failed to load skeleton\n");
@@ -171,10 +222,12 @@ static void ringbuf_libbpf_setup(void)
{
struct ringbuf_libbpf_ctx *ctx = &ringbuf_libbpf_ctx;
struct bpf_link *link;
+ int map_fd;
ctx->skel = ringbuf_setup_skeleton();
- ctx->ringbuf = ring_buffer__new(bpf_map__fd(ctx->skel->maps.ringbuf),
- buf_process_sample, NULL, NULL);
+
+ map_fd = bpf_map__fd(ctx->skel->maps.ringbuf);
+ ctx->ringbuf = ring_buffer__new(map_fd, buf_process_sample, NULL, NULL);
if (!ctx->ringbuf) {
fprintf(stderr, "failed to create ringbuf\n");
exit(1);
diff --git a/tools/testing/selftests/bpf/benchs/bench_sockmap.c b/tools/testing/selftests/bpf/benchs/bench_sockmap.c
index 8ebf563a67a2..cfc072aa7fff 100644
--- a/tools/testing/selftests/bpf/benchs/bench_sockmap.c
+++ b/tools/testing/selftests/bpf/benchs/bench_sockmap.c
@@ -10,6 +10,7 @@
#include <argp.h>
#include "bench.h"
#include "bench_sockmap_prog.skel.h"
+#include "bpf_util.h"
#define FILE_SIZE (128 * 1024)
#define DATA_REPEAT_SIZE 10
@@ -124,8 +125,8 @@ static void bench_sockmap_prog_destroy(void)
{
int i;
- for (i = 0; i < sizeof(ctx.fds); i++) {
- if (ctx.fds[0] > 0)
+ for (i = 0; i < ARRAY_SIZE(ctx.fds); i++) {
+ if (ctx.fds[i] > 0)
close(ctx.fds[i]);
}
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index 82327657846e..34018fc3927f 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -180,10 +180,10 @@ static void trigger_kernel_count_setup(void)
{
setup_ctx();
bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
- bpf_program__set_autoload(ctx.skel->progs.trigger_count, true);
+ bpf_program__set_autoload(ctx.skel->progs.trigger_kernel_count, true);
load_ctx();
/* override driver program */
- ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_count);
+ ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_kernel_count);
}
static void trigger_kprobe_setup(void)
@@ -226,6 +226,65 @@ static void trigger_fentry_setup(void)
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
}
+static void attach_ksyms_all(struct bpf_program *empty, bool kretprobe)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ char **syms = NULL;
+ size_t cnt = 0;
+
+ /* Some recursive functions will be skipped in
+ * bpf_get_ksyms -> skip_entry, as they can introduce sufficient
+ * overhead. However, it's difficut to skip all the recursive
+ * functions for a debug kernel.
+ *
+ * So, don't run the kprobe-multi-all and kretprobe-multi-all on
+ * a debug kernel.
+ */
+ if (bpf_get_ksyms(&syms, &cnt, true)) {
+ fprintf(stderr, "failed to get ksyms\n");
+ exit(1);
+ }
+
+ opts.syms = (const char **) syms;
+ opts.cnt = cnt;
+ opts.retprobe = kretprobe;
+ /* attach empty to all the kernel functions except bpf_get_numa_node_id. */
+ if (!bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts)) {
+ fprintf(stderr, "failed to attach bpf_program__attach_kprobe_multi_opts to all\n");
+ exit(1);
+ }
+}
+
+static void trigger_kprobe_multi_all_setup(void)
+{
+ struct bpf_program *prog, *empty;
+
+ setup_ctx();
+ empty = ctx.skel->progs.bench_kprobe_multi_empty;
+ prog = ctx.skel->progs.bench_trigger_kprobe_multi;
+ bpf_program__set_autoload(empty, true);
+ bpf_program__set_autoload(prog, true);
+ load_ctx();
+
+ attach_ksyms_all(empty, false);
+ attach_bpf(prog);
+}
+
+static void trigger_kretprobe_multi_all_setup(void)
+{
+ struct bpf_program *prog, *empty;
+
+ setup_ctx();
+ empty = ctx.skel->progs.bench_kretprobe_multi_empty;
+ prog = ctx.skel->progs.bench_trigger_kretprobe_multi;
+ bpf_program__set_autoload(empty, true);
+ bpf_program__set_autoload(prog, true);
+ load_ctx();
+
+ attach_ksyms_all(empty, true);
+ attach_bpf(prog);
+}
+
static void trigger_fexit_setup(void)
{
setup_ctx();
@@ -512,6 +571,8 @@ BENCH_TRIG_KERNEL(kretprobe, "kretprobe");
BENCH_TRIG_KERNEL(kprobe_multi, "kprobe-multi");
BENCH_TRIG_KERNEL(kretprobe_multi, "kretprobe-multi");
BENCH_TRIG_KERNEL(fentry, "fentry");
+BENCH_TRIG_KERNEL(kprobe_multi_all, "kprobe-multi-all");
+BENCH_TRIG_KERNEL(kretprobe_multi_all, "kretprobe-multi-all");
BENCH_TRIG_KERNEL(fexit, "fexit");
BENCH_TRIG_KERNEL(fmodret, "fmodret");
BENCH_TRIG_KERNEL(tp, "tp");
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
index 91e3567962ff..83e05e837871 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh
@@ -49,3 +49,7 @@ for b in 1 2 3 4 8 12 16 20 24 28 32 36 40 44 48 52; do
summarize "rb-libbpf nr_prod $b" "$($RUN_RB_BENCH -p$b --rb-batch-cnt 50 rb-libbpf)"
done
+header "Ringbuf, multi-producer contention in overwrite mode, no consumer"
+for b in 1 2 3 4 8 12 16 20 24 28 32 36 40 44 48 52; do
+ summarize "rb-prod nr_prod $b" "$($RUN_BENCH -p$b --rb-batch-cnt 50 --rb-overwrite --rb-bench-producer rb-libbpf)"
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_trigger.sh b/tools/testing/selftests/bpf/benchs/run_bench_trigger.sh
index a690f5a68b6b..f7573708a0c3 100755
--- a/tools/testing/selftests/bpf/benchs/run_bench_trigger.sh
+++ b/tools/testing/selftests/bpf/benchs/run_bench_trigger.sh
@@ -6,8 +6,8 @@ def_tests=( \
usermode-count kernel-count syscall-count \
fentry fexit fmodret \
rawtp tp \
- kprobe kprobe-multi \
- kretprobe kretprobe-multi \
+ kprobe kprobe-multi kprobe-multi-all \
+ kretprobe kretprobe-multi kretprobe-multi-all \
)
tests=("$@")
diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h
index 68a51dcc0669..16f8ce832004 100644
--- a/tools/testing/selftests/bpf/bpf_arena_common.h
+++ b/tools/testing/selftests/bpf/bpf_arena_common.h
@@ -46,8 +46,11 @@
void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
int node_id, __u64 flags) __ksym __weak;
+int bpf_arena_reserve_pages(void *map, void __arena *addr, __u32 page_cnt) __ksym __weak;
void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
+#define arena_base(map) ((void __arena *)((struct bpf_arena *)(map))->user_vm_start)
+
#else /* when compiled as user space code */
#define __arena
diff --git a/tools/testing/selftests/bpf/bpf_arena_list.h b/tools/testing/selftests/bpf/bpf_arena_list.h
index 85dbc3ea4da5..e16fa7d95fcf 100644
--- a/tools/testing/selftests/bpf/bpf_arena_list.h
+++ b/tools/testing/selftests/bpf/bpf_arena_list.h
@@ -64,14 +64,12 @@ static inline void list_add_head(arena_list_node_t *n, arena_list_head_t *h)
static inline void __list_del(arena_list_node_t *n)
{
- arena_list_node_t *next = n->next, *tmp;
+ arena_list_node_t *next = n->next;
arena_list_node_t * __arena *pprev = n->pprev;
cast_user(next);
cast_kern(pprev);
- tmp = *pprev;
- cast_kern(tmp);
- WRITE_ONCE(tmp, next);
+ WRITE_ONCE(*pprev, next);
if (next) {
cast_user(pprev);
cast_kern(next);
diff --git a/tools/testing/selftests/bpf/bpf_arena_strsearch.h b/tools/testing/selftests/bpf/bpf_arena_strsearch.h
new file mode 100644
index 000000000000..c1b6eaa905bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_arena_strsearch.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#pragma once
+#include "bpf_arena_common.h"
+
+__noinline int bpf_arena_strlen(const char __arena *s __arg_arena)
+{
+ const char __arena *sc;
+
+ for (sc = s; *sc != '\0'; ++sc)
+ cond_break;
+ return sc - s;
+}
+
+/**
+ * glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0)
+ * @pat: Shell-style pattern to match, e.g. "*.[ch]".
+ * @str: String to match. The pattern must match the entire string.
+ *
+ * Perform shell-style glob matching, returning true (1) if the match
+ * succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0).
+ *
+ * Pattern metacharacters are ?, *, [ and \.
+ * (And, inside character classes, !, - and ].)
+ *
+ * This is small and simple implementation intended for device blacklists
+ * where a string is matched against a number of patterns. Thus, it
+ * does not preprocess the patterns. It is non-recursive, and run-time
+ * is at most quadratic: strlen(@str)*strlen(@pat).
+ *
+ * An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa");
+ * it takes 6 passes over the pattern before matching the string.
+ *
+ * Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT
+ * treat / or leading . specially; it isn't actually used for pathnames.
+ *
+ * Note that according to glob(7) (and unlike bash), character classes
+ * are complemented by a leading !; this does not support the regex-style
+ * [^a-z] syntax.
+ *
+ * An opening bracket without a matching close is matched literally.
+ */
+__noinline bool glob_match(char const __arena *pat __arg_arena, char const __arena *str __arg_arena)
+{
+ /*
+ * Backtrack to previous * on mismatch and retry starting one
+ * character later in the string. Because * matches all characters
+ * (no exception for /), it can be easily proved that there's
+ * never a need to backtrack multiple levels.
+ */
+ char const __arena *back_pat = NULL, *back_str;
+
+ /*
+ * Loop over each token (character or class) in pat, matching
+ * it against the remaining unmatched tail of str. Return false
+ * on mismatch, or true after matching the trailing nul bytes.
+ */
+ for (;;) {
+ unsigned char c = *str++;
+ unsigned char d = *pat++;
+
+ switch (d) {
+ case '?': /* Wildcard: anything but nul */
+ if (c == '\0')
+ return false;
+ break;
+ case '*': /* Any-length wildcard */
+ if (*pat == '\0') /* Optimize trailing * case */
+ return true;
+ back_pat = pat;
+ back_str = --str; /* Allow zero-length match */
+ break;
+ case '[': { /* Character class */
+ bool match = false, inverted = (*pat == '!');
+ char const __arena *class = pat + inverted;
+ unsigned char a = *class++;
+
+ /*
+ * Iterate over each span in the character class.
+ * A span is either a single character a, or a
+ * range a-b. The first span may begin with ']'.
+ */
+ do {
+ unsigned char b = a;
+
+ if (a == '\0') /* Malformed */
+ goto literal;
+
+ if (class[0] == '-' && class[1] != ']') {
+ b = class[1];
+
+ if (b == '\0')
+ goto literal;
+
+ class += 2;
+ /* Any special action if a > b? */
+ }
+ match |= (a <= c && c <= b);
+ cond_break;
+ } while ((a = *class++) != ']');
+
+ if (match == inverted)
+ goto backtrack;
+ pat = class;
+ }
+ break;
+ case '\\':
+ d = *pat++;
+ __attribute__((__fallthrough__));
+ default: /* Literal character */
+literal:
+ if (c == d) {
+ if (d == '\0')
+ return true;
+ break;
+ }
+backtrack:
+ if (c == '\0' || !back_pat)
+ return false; /* No point continuing */
+ /* Try again from last *, one character later in str. */
+ pat = back_pat;
+ str = ++back_str;
+ break;
+ }
+ cond_break;
+ }
+ return false;
+}
diff --git a/tools/testing/selftests/bpf/bpf_atomic.h b/tools/testing/selftests/bpf/bpf_atomic.h
index a9674e544322..c550e5711967 100644
--- a/tools/testing/selftests/bpf/bpf_atomic.h
+++ b/tools/testing/selftests/bpf/bpf_atomic.h
@@ -61,7 +61,7 @@ extern bool CONFIG_X86_64 __kconfig __weak;
#define smp_mb() \
({ \
- unsigned long __val; \
+ volatile unsigned long __val; \
__sync_fetch_and_add(&__val, 0); \
})
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 5e512a1d09d1..2cd9165c7348 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -219,7 +219,7 @@ extern void bpf_put_file(struct file *file) __ksym;
* including the NULL termination character, stored in the supplied
* buffer. On error, a negative integer is returned.
*/
-extern int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz) __ksym;
+extern int bpf_path_d_path(const struct path *path, char *buf, size_t buf__sz) __ksym;
/* This macro must be used to mark the exception callback corresponding to the
* main program. For example:
@@ -596,4 +596,61 @@ extern int bpf_iter_dmabuf_new(struct bpf_iter_dmabuf *it) __weak __ksym;
extern struct dma_buf *bpf_iter_dmabuf_next(struct bpf_iter_dmabuf *it) __weak __ksym;
extern void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) __weak __ksym;
+extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
+ struct bpf_dynptr *value_p) __weak __ksym;
+
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 4
+#define NMI_BITS 4
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+
+extern bool CONFIG_PREEMPT_RT __kconfig __weak;
+#ifdef bpf_target_x86
+extern const int __preempt_count __ksym;
+#endif
+
+struct task_struct___preempt_rt {
+ int softirq_disable_cnt;
+} __attribute__((preserve_access_index));
+
+static inline int get_preempt_count(void)
+{
+#if defined(bpf_target_x86)
+ return *(int *) bpf_this_cpu_ptr(&__preempt_count);
+#elif defined(bpf_target_arm64)
+ return bpf_get_current_task_btf()->thread_info.preempt.count;
+#endif
+ return 0;
+}
+
+/* Description
+ * Report whether it is in interrupt context. Only works on the following archs:
+ * * x86
+ * * arm64
+ */
+static inline int bpf_in_interrupt(void)
+{
+ struct task_struct___preempt_rt *tsk;
+ int pcnt;
+
+ pcnt = get_preempt_count();
+ if (!CONFIG_PREEMPT_RT)
+ return pcnt & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK);
+
+ tsk = (void *) bpf_get_current_task_btf();
+ return (pcnt & (NMI_MASK | HARDIRQ_MASK)) |
+ (tsk->softirq_disable_cnt & SOFTIRQ_MASK);
+}
+
#endif
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index 8215c9b3115e..e0189254bb6e 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -19,14 +19,17 @@ extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags,
struct bpf_dynptr *ptr__uninit) __ksym __weak;
+extern int bpf_dynptr_from_skb_meta(struct __sk_buff *skb, __u64 flags,
+ struct bpf_dynptr *ptr__uninit) __ksym __weak;
+
/* Description
* Obtain a read-only pointer to the dynptr's data
* Returns
* Either a direct pointer to the dynptr data or a pointer to the user-provided
* buffer if unable to obtain a direct pointer
*/
-extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
- void *buffer, __u32 buffer__szk) __ksym __weak;
+extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u64 offset,
+ void *buffer, __u64 buffer__szk) __ksym __weak;
/* Description
* Obtain a read-write pointer to the dynptr's data
@@ -34,13 +37,13 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
* Either a direct pointer to the dynptr data or a pointer to the user-provided
* buffer if unable to obtain a direct pointer
*/
-extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset,
- void *buffer, __u32 buffer__szk) __ksym __weak;
+extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u64 offset, void *buffer,
+ __u64 buffer__szk) __ksym __weak;
-extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym __weak;
+extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u64 start, __u64 end) __ksym __weak;
extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym __weak;
extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym __weak;
-extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym __weak;
+extern __u64 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym __weak;
extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym __weak;
/* Description
@@ -69,7 +72,7 @@ extern int bpf_get_file_xattr(struct file *file, const char *name,
struct bpf_dynptr *value_ptr) __ksym;
extern int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_ptr) __ksym;
-extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
+extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
extern void bpf_key_put(struct bpf_key *key) __ksym;
extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 5f6963a320d7..4bc2d25f33e1 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -67,6 +67,9 @@ static inline void bpf_strlcpy(char *dst, const char *src, size_t sz)
#define sys_gettid() syscall(SYS_gettid)
#endif
+/* and poison usage to ensure it does not creep back in. */
+#pragma GCC poison gettid
+
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index e4535451322e..20cede4db3ce 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -4,6 +4,7 @@
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <sys/xattr.h>
#include <linux/limits.h>
#include <stdio.h>
#include <stdlib.h>
@@ -319,6 +320,26 @@ int join_parent_cgroup(const char *relative_path)
}
/**
+ * set_cgroup_xattr() - Set xattr on a cgroup dir
+ * @relative_path: The cgroup path, relative to the workdir, to set xattr
+ * @name: xattr name
+ * @value: xattr value
+ *
+ * This function set xattr on cgroup dir.
+ *
+ * On success, it returns 0, otherwise on failure it returns -1.
+ */
+int set_cgroup_xattr(const char *relative_path,
+ const char *name,
+ const char *value)
+{
+ char cgroup_path[PATH_MAX + 1];
+
+ format_cgroup_path(cgroup_path, relative_path);
+ return setxattr(cgroup_path, name, value, strlen(value) + 1, 0);
+}
+
+/**
* __cleanup_cgroup_environment() - Delete temporary cgroups
*
* This is a helper for cleanup_cgroup_environment() that is responsible for
@@ -391,6 +412,26 @@ void remove_cgroup(const char *relative_path)
log_err("rmdiring cgroup %s .. %s", relative_path, cgroup_path);
}
+/*
+ * remove_cgroup_pid() - Remove a cgroup setup by process identified by PID
+ * @relative_path: The cgroup path, relative to the workdir, to remove
+ * @pid: PID to be used to find cgroup_path
+ *
+ * This function expects a cgroup to already be created, relative to the cgroup
+ * work dir. It also expects the cgroup doesn't have any children or live
+ * processes and it removes the cgroup.
+ *
+ * On failure, it will print an error to stderr.
+ */
+void remove_cgroup_pid(const char *relative_path, int pid)
+{
+ char cgroup_path[PATH_MAX + 1];
+
+ format_cgroup_path_pid(cgroup_path, relative_path, pid);
+ if (rmdir(cgroup_path))
+ log_err("rmdiring cgroup %s .. %s", relative_path, cgroup_path);
+}
+
/**
* create_and_get_cgroup() - Create a cgroup, relative to workdir, and get the FD
* @relative_path: The cgroup path, relative to the workdir, to join
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h
index 502845160d88..3857304be874 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.h
+++ b/tools/testing/selftests/bpf/cgroup_helpers.h
@@ -19,6 +19,7 @@ int cgroup_setup_and_join(const char *relative_path);
int get_root_cgroup(void);
int create_and_get_cgroup(const char *relative_path);
void remove_cgroup(const char *relative_path);
+void remove_cgroup_pid(const char *relative_path, int pid);
unsigned long long get_cgroup_id(const char *relative_path);
int get_cgroup1_hierarchy_id(const char *subsys_name);
@@ -26,6 +27,10 @@ int join_cgroup(const char *relative_path);
int join_root_cgroup(void);
int join_parent_cgroup(const char *relative_path);
+int set_cgroup_xattr(const char *relative_path,
+ const char *name,
+ const char *value);
+
int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index f74e1ea0ad3b..558839e3c185 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -50,6 +50,7 @@ CONFIG_IPV6_SIT=y
CONFIG_IPV6_TUNNEL=y
CONFIG_KEYS=y
CONFIG_LIRC=y
+CONFIG_LIVEPATCH=y
CONFIG_LWTUNNEL=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SRCVERSION_ALL=y
@@ -61,6 +62,7 @@ CONFIG_MPLS_IPTUNNEL=y
CONFIG_MPLS_ROUTING=y
CONFIG_MPTCP=y
CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
CONFIG_NET_ACT_SKBMOD=y
CONFIG_NET_CLS=y
CONFIG_NET_CLS_ACT=y
@@ -97,6 +99,9 @@ CONFIG_NF_TABLES_NETDEV=y
CONFIG_NF_TABLES_IPV4=y
CONFIG_NF_TABLES_IPV6=y
CONFIG_NETFILTER_INGRESS=y
+CONFIG_IP_NF_IPTABLES_LEGACY=y
+CONFIG_IP6_NF_IPTABLES_LEGACY=y
+CONFIG_NETFILTER_XTABLES_LEGACY=y
CONFIG_NF_FLOW_TABLE=y
CONFIG_NF_FLOW_TABLE_INET=y
CONFIG_NETFILTER_NETLINK=y
@@ -105,7 +110,10 @@ CONFIG_IP_NF_IPTABLES=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_FILTER=y
CONFIG_NF_NAT=y
+CONFIG_PACKET=y
CONFIG_RC_CORE=y
+CONFIG_SAMPLES=y
+CONFIG_SAMPLE_LIVEPATCH=m
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SYN_COOKIES=y
@@ -118,3 +126,8 @@ CONFIG_XDP_SOCKETS=y
CONFIG_XFRM_INTERFACE=y
CONFIG_TCP_CONG_DCTCP=y
CONFIG_TCP_CONG_BBR=y
+CONFIG_INFINIBAND=y
+CONFIG_SMC=y
+CONFIG_SMC_HS_CTRL_BPF=y
+CONFIG_DIBS=y
+CONFIG_DIBS_LO=y \ No newline at end of file
diff --git a/tools/testing/selftests/bpf/config.aarch64 b/tools/testing/selftests/bpf/config.aarch64
index e1495a4bbc99..7efad36ceb26 100644
--- a/tools/testing/selftests/bpf/config.aarch64
+++ b/tools/testing/selftests/bpf/config.aarch64
@@ -31,10 +31,7 @@ CONFIG_COMPAT=y
CONFIG_CPUSETS=y
CONFIG_CRASH_DUMP=y
CONFIG_CRYPTO_USER_API_RNG=y
-CONFIG_CRYPTO_USER_API_SKCIPHER=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_INFO_BTF=y
-CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_INFO_REDUCED=n
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_LOCKDEP=y
@@ -46,7 +43,6 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEVTMPFS=y
CONFIG_DRM=y
-CONFIG_DUMMY=y
CONFIG_EXPERT=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -70,13 +66,11 @@ CONFIG_HZ_100=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_IKHEADERS=y
CONFIG_INET6_ESP=y
-CONFIG_INET_ESP=y
CONFIG_INET=y
CONFIG_INPUT_EVDEV=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_NF_IPTABLES=y
CONFIG_IPV6_SEG6_LWTUNNEL=y
CONFIG_IPVLAN=y
CONFIG_JUMP_LABEL=y
@@ -97,22 +91,18 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_NAMESPACES=y
CONFIG_NET_ACT_BPF=y
-CONFIG_NET_ACT_GACT=y
CONFIG_NETDEVICES=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NET_KEY=y
-CONFIG_NET_SCH_FQ=y
CONFIG_NET_VRF=y
CONFIG_NET=y
-CONFIG_NF_TABLES=y
CONFIG_NLMON=y
CONFIG_NO_HZ_IDLE=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_OVERLAY_FS=y
CONFIG_PACKET_DIAG=y
-CONFIG_PACKET=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PCI_HOST_GENERIC=y
@@ -149,7 +139,6 @@ CONFIG_TASK_XACCT=y
CONFIG_TCG_TIS=y
CONFIG_TCG_TPM=y
CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_DCTCP=y
CONFIG_TLS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_TMPFS=y
@@ -161,6 +150,5 @@ CONFIG_UPROBES=y
CONFIG_USER_NS=y
CONFIG_VETH=y
CONFIG_VLAN_8021Q=y
-CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y
CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.ppc64el b/tools/testing/selftests/bpf/config.ppc64el
new file mode 100644
index 000000000000..b53afb5e0b71
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.ppc64el
@@ -0,0 +1,92 @@
+CONFIG_ALTIVEC=y
+CONFIG_AUDIT=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=hvc0 wg.success=hvc1 panic_on_warn=1"
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPUSETS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_FS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEVTMPFS=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_FRAME_POINTER=y
+CONFIG_FRAME_WARN=1280
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HVC_CONSOLE=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KPROBES=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_VRF=y
+CONFIG_NET=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NONPORTABLE=y
+CONFIG_NR_CPUS=256
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PPC64=y
+CONFIG_PPC_OF_BOOT_TRAMPOLINE=y
+CONFIG_PPC_PSERIES=y
+CONFIG_PPC_RADIX_MMU=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SMP=y
+CONFIG_SOC_VIRT=y
+CONFIG_SYSVIPC=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_THREAD_SHIFT=14
+CONFIG_TLS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VSOCKETS_LOOPBACK=y
+CONFIG_VSX=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.riscv64 b/tools/testing/selftests/bpf/config.riscv64
index bb7043a80e1a..7bee24a79a71 100644
--- a/tools/testing/selftests/bpf/config.riscv64
+++ b/tools/testing/selftests/bpf/config.riscv64
@@ -48,7 +48,6 @@ CONFIG_NET_VRF=y
CONFIG_NONPORTABLE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_NR_CPUS=256
-CONFIG_PACKET=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PCI=y
diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x
index 26c3bc2ce11d..db61878148e4 100644
--- a/tools/testing/selftests/bpf/config.s390x
+++ b/tools/testing/selftests/bpf/config.s390x
@@ -22,10 +22,7 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_CPUSETS=y
CONFIG_CRASH_DUMP=y
CONFIG_CRYPTO_USER_API_RNG=y
-CONFIG_CRYPTO_USER_API_SKCIPHER=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_INFO_BTF=y
-CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_NOTIFIERS=y
@@ -56,11 +53,9 @@ CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_IKHEADERS=y
CONFIG_INET6_ESP=y
CONFIG_INET=y
-CONFIG_INET_ESP=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_NF_IPTABLES=y
CONFIG_IPV6_SEG6_LWTUNNEL=y
CONFIG_IPVLAN=y
CONFIG_JUMP_LABEL=y
@@ -83,18 +78,14 @@ CONFIG_MEMORY_HOTREMOVE=y
CONFIG_NAMESPACES=y
CONFIG_NET=y
CONFIG_NET_ACT_BPF=y
-CONFIG_NET_ACT_GACT=y
CONFIG_NET_KEY=y
-CONFIG_NET_SCH_FQ=y
CONFIG_NET_VRF=y
CONFIG_NETDEVICES=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
-CONFIG_NF_TABLES=y
CONFIG_NO_HZ_IDLE=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
-CONFIG_PACKET=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PCI=y
@@ -119,7 +110,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_TASK_XACCT=y
CONFIG_TASKSTATS=y
CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_DCTCP=y
CONFIG_TLS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -131,6 +121,5 @@ CONFIG_UPROBES=y
CONFIG_USER_NS=y
CONFIG_VETH=y
CONFIG_VLAN_8021Q=y
-CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y
CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
index 5e713ef7caa3..42ad817b00ae 100644
--- a/tools/testing/selftests/bpf/config.x86_64
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -44,7 +44,6 @@ CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_XXHASH=y
CONFIG_DCB=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEFAULT_FQ_CODEL=y
@@ -104,12 +103,10 @@ CONFIG_HZ_1000=y
CONFIG_INET=y
CONFIG_INPUT_EVDEV=y
CONFIG_INTEL_POWERCLAMP=y
-CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MROUTE=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_IP_ROUTE_MULTIPATH=y
@@ -162,7 +159,6 @@ CONFIG_NUMA=y
CONFIG_NUMA_BALANCING=y
CONFIG_NVMEM=y
CONFIG_OSF_PARTITION=y
-CONFIG_PACKET=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PCI=y
@@ -220,7 +216,6 @@ CONFIG_VALIDATE_FS_PARSER=y
CONFIG_VETH=y
CONFIG_VIRT_DRIVERS=y
CONFIG_VLAN_8021Q=y
-CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y
CONFIG_X86_ACPI_CPUFREQ=y
CONFIG_X86_CPUID=y
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 72b5c174ab3b..0a6a5561bed3 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -97,7 +97,7 @@ int settimeo(int fd, int timeout_ms)
int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t addrlen,
const struct network_helper_opts *opts)
{
- int fd;
+ int on = 1, fd;
if (!opts)
opts = &default_opts;
@@ -111,6 +111,12 @@ int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t a
if (settimeo(fd, opts->timeout_ms))
goto error_close;
+ if (type == SOCK_STREAM &&
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on))) {
+ log_err("Failed to enable SO_REUSEADDR");
+ goto error_close;
+ }
+
if (opts->post_socket_cb &&
opts->post_socket_cb(fd, opts->cb_opts)) {
log_err("Failed to call post_socket_cb");
@@ -457,7 +463,7 @@ int append_tid(char *str, size_t sz)
if (end + 8 > sz)
return -1;
- sprintf(&str[end], "%07d", gettid());
+ sprintf(&str[end], "%07ld", sys_gettid());
str[end + 7] = '\0';
return 0;
@@ -766,6 +772,50 @@ int send_recv_data(int lfd, int fd, uint32_t total_bytes)
return err;
}
+int tc_prog_attach(const char *dev, int ingress_fd, int egress_fd)
+{
+ int ifindex, ret;
+
+ if (!ASSERT_TRUE(ingress_fd >= 0 || egress_fd >= 0,
+ "at least one program fd is valid"))
+ return -1;
+
+ ifindex = if_nametoindex(dev);
+ if (!ASSERT_NEQ(ifindex, 0, "get ifindex"))
+ return -1;
+
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = ifindex,
+ .attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS);
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1,
+ .priority = 1, .prog_fd = ingress_fd);
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1,
+ .priority = 1, .prog_fd = egress_fd);
+
+ ret = bpf_tc_hook_create(&hook);
+ if (!ASSERT_OK(ret, "create tc hook"))
+ return ret;
+
+ if (ingress_fd >= 0) {
+ hook.attach_point = BPF_TC_INGRESS;
+ ret = bpf_tc_attach(&hook, &opts1);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(&hook);
+ return ret;
+ }
+ }
+
+ if (egress_fd >= 0) {
+ hook.attach_point = BPF_TC_EGRESS;
+ ret = bpf_tc_attach(&hook, &opts2);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(&hook);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
#ifdef TRAFFIC_MONITOR
struct tmonitor_ctx {
pcap_t *pcap;
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index ef208eefd571..79a010c88e11 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -255,6 +255,22 @@ struct tmonitor_ctx;
typedef int (*tm_print_fn_t)(const char *format, va_list args);
+/**
+ * tc_prog_attach - attach BPF program(s) to an interface
+ *
+ * Takes file descriptors pointing to at least one, at most two BPF
+ * programs, and attach those programs to an interface ingress, egress or
+ * both.
+ *
+ * @dev: string containing the interface name
+ * @ingress_fd: file descriptor of the program to attach to interface ingress
+ * @egress_fd: file descriptor of the program to attach to interface egress
+ *
+ * Returns 0 on success, -1 if no valid file descriptor has been found, if
+ * the interface name is invalid or if an error ocurred during attach.
+ */
+int tc_prog_attach(const char *dev, int ingress_fd, int egress_fd);
+
#ifdef TRAFFIC_MONITOR
struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name,
const char *subtest_name);
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index 1d53a8561ee2..24c509ce4e5b 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -42,11 +42,11 @@ static struct bpf_align_test tests[] = {
.matches = {
{0, "R1", "ctx()"},
{0, "R10", "fp0"},
- {0, "R3_w", "2"},
- {1, "R3_w", "4"},
- {2, "R3_w", "8"},
- {3, "R3_w", "16"},
- {4, "R3_w", "32"},
+ {0, "R3", "2"},
+ {1, "R3", "4"},
+ {2, "R3", "8"},
+ {3, "R3", "16"},
+ {4, "R3", "32"},
},
},
{
@@ -70,17 +70,17 @@ static struct bpf_align_test tests[] = {
.matches = {
{0, "R1", "ctx()"},
{0, "R10", "fp0"},
- {0, "R3_w", "1"},
- {1, "R3_w", "2"},
- {2, "R3_w", "4"},
- {3, "R3_w", "8"},
- {4, "R3_w", "16"},
- {5, "R3_w", "1"},
- {6, "R4_w", "32"},
- {7, "R4_w", "16"},
- {8, "R4_w", "8"},
- {9, "R4_w", "4"},
- {10, "R4_w", "2"},
+ {0, "R3", "1"},
+ {1, "R3", "2"},
+ {2, "R3", "4"},
+ {3, "R3", "8"},
+ {4, "R3", "16"},
+ {5, "R3", "1"},
+ {6, "R4", "32"},
+ {7, "R4", "16"},
+ {8, "R4", "8"},
+ {9, "R4", "4"},
+ {10, "R4", "2"},
},
},
{
@@ -99,12 +99,12 @@ static struct bpf_align_test tests[] = {
.matches = {
{0, "R1", "ctx()"},
{0, "R10", "fp0"},
- {0, "R3_w", "4"},
- {1, "R3_w", "8"},
- {2, "R3_w", "10"},
- {3, "R4_w", "8"},
- {4, "R4_w", "12"},
- {5, "R4_w", "14"},
+ {0, "R3", "4"},
+ {1, "R3", "8"},
+ {2, "R3", "10"},
+ {3, "R4", "8"},
+ {4, "R4", "12"},
+ {5, "R4", "14"},
},
},
{
@@ -121,10 +121,10 @@ static struct bpf_align_test tests[] = {
.matches = {
{0, "R1", "ctx()"},
{0, "R10", "fp0"},
- {0, "R3_w", "7"},
- {1, "R3_w", "7"},
- {2, "R3_w", "14"},
- {3, "R3_w", "56"},
+ {0, "R3", "7"},
+ {1, "R3", "7"},
+ {2, "R3", "14"},
+ {3, "R3", "56"},
},
},
@@ -162,19 +162,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {6, "R0_w", "pkt(off=8,r=8)"},
- {6, "R3_w", "var_off=(0x0; 0xff)"},
- {7, "R3_w", "var_off=(0x0; 0x1fe)"},
- {8, "R3_w", "var_off=(0x0; 0x3fc)"},
- {9, "R3_w", "var_off=(0x0; 0x7f8)"},
- {10, "R3_w", "var_off=(0x0; 0xff0)"},
- {12, "R3_w", "pkt_end()"},
- {17, "R4_w", "var_off=(0x0; 0xff)"},
- {18, "R4_w", "var_off=(0x0; 0x1fe0)"},
- {19, "R4_w", "var_off=(0x0; 0xff0)"},
- {20, "R4_w", "var_off=(0x0; 0x7f8)"},
- {21, "R4_w", "var_off=(0x0; 0x3fc)"},
- {22, "R4_w", "var_off=(0x0; 0x1fe)"},
+ {6, "R0", "pkt(off=8,r=8)"},
+ {6, "R3", "var_off=(0x0; 0xff)"},
+ {7, "R3", "var_off=(0x0; 0x1fe)"},
+ {8, "R3", "var_off=(0x0; 0x3fc)"},
+ {9, "R3", "var_off=(0x0; 0x7f8)"},
+ {10, "R3", "var_off=(0x0; 0xff0)"},
+ {12, "R3", "pkt_end()"},
+ {17, "R4", "var_off=(0x0; 0xff)"},
+ {18, "R4", "var_off=(0x0; 0x1fe0)"},
+ {19, "R4", "var_off=(0x0; 0xff0)"},
+ {20, "R4", "var_off=(0x0; 0x7f8)"},
+ {21, "R4", "var_off=(0x0; 0x3fc)"},
+ {22, "R4", "var_off=(0x0; 0x1fe)"},
},
},
{
@@ -195,16 +195,16 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {6, "R3_w", "var_off=(0x0; 0xff)"},
- {7, "R4_w", "var_off=(0x0; 0xff)"},
- {8, "R4_w", "var_off=(0x0; 0xff)"},
- {9, "R4_w", "var_off=(0x0; 0xff)"},
- {10, "R4_w", "var_off=(0x0; 0x1fe)"},
- {11, "R4_w", "var_off=(0x0; 0xff)"},
- {12, "R4_w", "var_off=(0x0; 0x3fc)"},
- {13, "R4_w", "var_off=(0x0; 0xff)"},
- {14, "R4_w", "var_off=(0x0; 0x7f8)"},
- {15, "R4_w", "var_off=(0x0; 0xff0)"},
+ {6, "R3", "var_off=(0x0; 0xff)"},
+ {7, "R4", "var_off=(0x0; 0xff)"},
+ {8, "R4", "var_off=(0x0; 0xff)"},
+ {9, "R4", "var_off=(0x0; 0xff)"},
+ {10, "R4", "var_off=(0x0; 0x1fe)"},
+ {11, "R4", "var_off=(0x0; 0xff)"},
+ {12, "R4", "var_off=(0x0; 0x3fc)"},
+ {13, "R4", "var_off=(0x0; 0xff)"},
+ {14, "R4", "var_off=(0x0; 0x7f8)"},
+ {15, "R4", "var_off=(0x0; 0xff0)"},
},
},
{
@@ -235,14 +235,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {2, "R5_w", "pkt(r=0)"},
- {4, "R5_w", "pkt(off=14,r=0)"},
- {5, "R4_w", "pkt(off=14,r=0)"},
+ {2, "R5", "pkt(r=0)"},
+ {4, "R5", "pkt(off=14,r=0)"},
+ {5, "R4", "pkt(off=14,r=0)"},
{9, "R2", "pkt(r=18)"},
{10, "R5", "pkt(off=14,r=18)"},
- {10, "R4_w", "var_off=(0x0; 0xff)"},
- {13, "R4_w", "var_off=(0x0; 0xffff)"},
- {14, "R4_w", "var_off=(0x0; 0xffff)"},
+ {10, "R4", "var_off=(0x0; 0xff)"},
+ {13, "R4", "var_off=(0x0; 0xffff)"},
+ {14, "R4", "var_off=(0x0; 0xffff)"},
},
},
{
@@ -299,12 +299,12 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w", "pkt(r=8)"},
- {7, "R6_w", "var_off=(0x0; 0x3fc)"},
+ {6, "R2", "pkt(r=8)"},
+ {7, "R6", "var_off=(0x0; 0x3fc)"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
- {11, "R5_w", "pkt(id=1,off=14,"},
+ {11, "R5", "pkt(id=1,off=14,"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
@@ -320,12 +320,12 @@ static struct bpf_align_test tests[] = {
* instruction to validate R5 state. We also check
* that R4 is what it should be in such case.
*/
- {18, "R4_w", "var_off=(0x0; 0x3fc)"},
- {18, "R5_w", "var_off=(0x0; 0x3fc)"},
+ {18, "R4", "var_off=(0x0; 0x3fc)"},
+ {18, "R5", "var_off=(0x0; 0x3fc)"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
- {19, "R5_w", "pkt(id=2,off=14,"},
+ {19, "R5", "pkt(id=2,off=14,"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
@@ -337,21 +337,21 @@ static struct bpf_align_test tests[] = {
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
- {26, "R5_w", "pkt(off=14,r=8)"},
+ {26, "R5", "pkt(off=14,r=8)"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n). See comment for insn #18
* for R4 = R5 trick.
*/
- {28, "R4_w", "var_off=(0x0; 0x3fc)"},
- {28, "R5_w", "var_off=(0x0; 0x3fc)"},
+ {28, "R4", "var_off=(0x0; 0x3fc)"},
+ {28, "R5", "var_off=(0x0; 0x3fc)"},
/* Constant is added to R5 again, setting reg->off to 18. */
- {29, "R5_w", "pkt(id=3,off=18,"},
+ {29, "R5", "pkt(id=3,off=18,"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
- {31, "R4_w", "var_off=(0x0; 0x7fc)"},
- {31, "R5_w", "var_off=(0x0; 0x7fc)"},
+ {31, "R4", "var_off=(0x0; 0x7fc)"},
+ {31, "R5", "var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
@@ -397,12 +397,12 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w", "pkt(r=8)"},
- {7, "R6_w", "var_off=(0x0; 0x3fc)"},
+ {6, "R2", "pkt(r=8)"},
+ {7, "R6", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
- {8, "R6_w", "var_off=(0x2; 0x7fc)"},
+ {8, "R6", "var_off=(0x2; 0x7fc)"},
/* Packet pointer has (4n+2) offset */
- {11, "R5_w", "var_off=(0x2; 0x7fc)"},
+ {11, "R5", "var_off=(0x2; 0x7fc)"},
{12, "R4", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -414,11 +414,11 @@ static struct bpf_align_test tests[] = {
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
- {17, "R6_w", "var_off=(0x0; 0x3fc)"},
+ {17, "R6", "var_off=(0x0; 0x3fc)"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
- {19, "R5_w", "var_off=(0x2; 0xffc)"},
+ {19, "R5", "var_off=(0x2; 0xffc)"},
{20, "R4", "var_off=(0x2; 0xffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -459,18 +459,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
- {3, "R5_w", "pkt_end()"},
+ {3, "R5", "pkt_end()"},
/* (ptr - ptr) << 2 == unknown, (4n) */
- {5, "R5_w", "var_off=(0x0; 0xfffffffffffffffc)"},
+ {5, "R5", "var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
- {6, "R5_w", "var_off=(0x2; 0xfffffffffffffffc)"},
+ {6, "R5", "var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
{9, "R5", "var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
- {11, "R6_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
- {12, "R4_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
+ {11, "R6", "var_off=(0x2; 0x7ffffffffffffffc)"},
+ {12, "R4", "var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
@@ -478,7 +478,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
- {15, "R6_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
+ {15, "R6", "var_off=(0x2; 0x7ffffffffffffffc)"},
}
},
{
@@ -513,12 +513,12 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w", "pkt(r=8)"},
- {8, "R6_w", "var_off=(0x0; 0x3fc)"},
+ {6, "R2", "pkt(r=8)"},
+ {8, "R6", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
- {9, "R6_w", "var_off=(0x2; 0x7fc)"},
+ {9, "R6", "var_off=(0x2; 0x7fc)"},
/* New unknown value in R7 is (4n) */
- {10, "R7_w", "var_off=(0x0; 0x3fc)"},
+ {10, "R7", "var_off=(0x0; 0x3fc)"},
/* Subtracting it from R6 blows our unsigned bounds */
{11, "R6", "var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>= 0 */
@@ -566,16 +566,16 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w", "pkt(r=8)"},
- {9, "R6_w", "var_off=(0x0; 0x3c)"},
+ {6, "R2", "pkt(r=8)"},
+ {9, "R6", "var_off=(0x0; 0x3c)"},
/* Adding 14 makes R6 be (4n+2) */
- {10, "R6_w", "var_off=(0x2; 0x7c)"},
+ {10, "R6", "var_off=(0x2; 0x7c)"},
/* Subtracting from packet pointer overflows ubounds */
- {13, "R5_w", "var_off=(0xffffffffffffff82; 0x7c)"},
+ {13, "R5", "var_off=(0xffffffffffffff82; 0x7c)"},
/* New unknown value in R7 is (4n), >= 76 */
- {14, "R7_w", "var_off=(0x0; 0x7fc)"},
+ {14, "R7", "var_off=(0x0; 0x7fc)"},
/* Adding it to packet pointer gives nice bounds again */
- {16, "R5_w", "var_off=(0x2; 0x7fc)"},
+ {16, "R5", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
index 0223fce4db2b..693fd86fbde6 100644
--- a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
@@ -40,8 +40,13 @@ static void *spin_lock_thread(void *arg)
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run err");
+
+ if (topts.retval == -EOPNOTSUPP)
+ goto end;
+
ASSERT_EQ((int)topts.retval, 0, "test_run retval");
+end:
pthread_exit(arg);
}
@@ -63,6 +68,7 @@ static void test_arena_spin_lock_size(int size)
skel = arena_spin_lock__open_and_load();
if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load"))
return;
+
if (skel->data->test_skip == 2) {
test__skip();
goto end;
@@ -86,6 +92,13 @@ static void test_arena_spin_lock_size(int size)
goto end_barrier;
}
+ if (skel->data->test_skip == 3) {
+ printf("%s:SKIP: CONFIG_NR_CPUS exceed the maximum supported by arena spinlock\n",
+ __func__);
+ test__skip();
+ goto end_barrier;
+ }
+
ASSERT_EQ(skel->bss->counter, repeat * nthreads, "check counter value");
end_barrier:
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_strsearch.c b/tools/testing/selftests/bpf/prog_tests/arena_strsearch.c
new file mode 100644
index 000000000000..f81a0c066505
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_strsearch.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "arena_strsearch.skel.h"
+
+static void test_arena_str(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct arena_strsearch *skel;
+ int ret;
+
+ skel = arena_strsearch__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "arena_strsearch__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_strsearch), &opts);
+ ASSERT_OK(ret, "ret_add");
+ ASSERT_OK(opts.retval, "retval");
+ if (skel->bss->skip) {
+ printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__);
+ test__skip();
+ }
+ arena_strsearch__destroy(skel);
+}
+
+void test_arena_strsearch(void)
+{
+ if (test__start_subtest("arena_strsearch"))
+ test_arena_str();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
index bb143de68875..e27d66b75fb1 100644
--- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
+++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
@@ -144,11 +144,17 @@ static void test_parse_test_list_file(void)
if (!ASSERT_OK(ferror(fp), "prepare tmp"))
goto out_fclose;
+ if (!ASSERT_OK(fsync(fileno(fp)), "fsync tmp"))
+ goto out_fclose;
+
init_test_filter_set(&set);
- ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file");
+ if (!ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"))
+ goto out_fclose;
+
+ if (!ASSERT_EQ(set.cnt, 4, "test count"))
+ goto out_free_set;
- ASSERT_EQ(set.cnt, 4, "test count");
ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name");
ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count");
ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name");
@@ -158,8 +164,8 @@ static void test_parse_test_list_file(void)
ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name");
ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name");
+out_free_set:
free_test_filter_set(&set);
-
out_fclose:
fclose(fp);
out_remove:
diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c
index 13e101f370a1..92b5f378bfb8 100644
--- a/tools/testing/selftests/bpf/prog_tests/atomics.c
+++ b/tools/testing/selftests/bpf/prog_tests/atomics.c
@@ -165,11 +165,17 @@ static void test_xchg(struct atomics_lskel *skel)
void test_atomics(void)
{
struct atomics_lskel *skel;
+ int err;
- skel = atomics_lskel__open_and_load();
- if (!ASSERT_OK_PTR(skel, "atomics skeleton load"))
+ skel = atomics_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "atomics skeleton open"))
return;
+ skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = atomics_lskel__load(skel);
+ if (!ASSERT_OK(err, "atomics skeleton load"))
+ goto cleanup;
+
if (skel->data->skip_tests) {
printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
__func__);
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index cabc51c2ca6b..9e77e5da7097 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -3,6 +3,7 @@
#include "test_attach_kprobe_sleepable.skel.h"
#include "test_attach_probe_manual.skel.h"
#include "test_attach_probe.skel.h"
+#include "kprobe_write_ctx.skel.h"
/* this is how USDT semaphore is actually defined, except volatile modifier */
volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
@@ -201,6 +202,31 @@ cleanup:
test_attach_probe_manual__destroy(skel);
}
+#ifdef __x86_64__
+/* attach kprobe/kretprobe long event name testings */
+static void test_attach_kprobe_write_ctx(void)
+{
+ struct kprobe_write_ctx *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_write_ctx__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_write_ctx__open_and_load"))
+ return;
+
+ link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_write_ctx,
+ "bpf_fentry_test1", NULL);
+ if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_opts"))
+ bpf_link__destroy(link);
+
+ kprobe_write_ctx__destroy(skel);
+}
+#else
+static void test_attach_kprobe_write_ctx(void)
+{
+ test__skip();
+}
+#endif
+
static void test_attach_probe_auto(struct test_attach_probe *skel)
{
struct bpf_link *uprobe_err_link;
@@ -406,6 +432,8 @@ void test_attach_probe(void)
test_attach_uprobe_long_event_name();
if (test__start_subtest("kprobe-long_name"))
test_attach_kprobe_long_event_name();
+ if (test__start_subtest("kprobe-write-ctx"))
+ test_attach_kprobe_write_ctx();
cleanup:
test_attach_probe__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
index 67557cda2208..42b49870e520 100644
--- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
@@ -13,7 +13,7 @@
static void test_fail_cases(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
- __u32 value;
+ __u32 value = 0;
int fd, err;
/* Invalid key size */
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 6befa870434b..75f4dff7d042 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -450,8 +450,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
attr.size = sizeof(attr);
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
- attr.freq = 1;
- attr.sample_freq = 10000;
+ attr.sample_period = 100000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
@@ -489,10 +488,28 @@ cleanup:
bpf_link__destroy(link);
}
+static int verify_tracing_link_info(int fd, u64 cookie)
+{
+ struct bpf_link_info info;
+ int err;
+ u32 len = sizeof(info);
+
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "get_link_info"))
+ return -1;
+
+ if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_TRACING, "link_type"))
+ return -1;
+
+ ASSERT_EQ(info.tracing.cookie, cookie, "tracing_cookie");
+
+ return 0;
+}
+
static void tracing_subtest(struct test_bpf_cookie *skel)
{
__u64 cookie;
- int prog_fd;
+ int prog_fd, err;
int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1;
LIBBPF_OPTS(bpf_test_run_opts, opts);
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
@@ -507,6 +524,10 @@ static void tracing_subtest(struct test_bpf_cookie *skel)
if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create"))
goto cleanup;
+ err = verify_tracing_link_info(fentry_fd, cookie);
+ if (!ASSERT_OK(err, "verify_tracing_link_info"))
+ goto cleanup;
+
cookie = 0x20000000000000L;
prog_fd = bpf_program__fd(skel->progs.fexit_test1);
link_opts.tracing.cookie = cookie;
@@ -635,10 +656,29 @@ cleanup:
bpf_link__destroy(link);
}
+static int verify_raw_tp_link_info(int fd, u64 cookie)
+{
+ struct bpf_link_info info;
+ int err;
+ u32 len = sizeof(info);
+
+ memset(&info, 0, sizeof(info));
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "get_link_info"))
+ return -1;
+
+ if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type"))
+ return -1;
+
+ ASSERT_EQ(info.raw_tracepoint.cookie, cookie, "raw_tp_cookie");
+
+ return 0;
+}
+
static void raw_tp_subtest(struct test_bpf_cookie *skel)
{
__u64 cookie;
- int prog_fd, link_fd = -1;
+ int err, prog_fd, link_fd = -1;
struct bpf_link *link = NULL;
LIBBPF_OPTS(bpf_raw_tp_opts, raw_tp_opts);
LIBBPF_OPTS(bpf_raw_tracepoint_opts, opts);
@@ -656,6 +696,11 @@ static void raw_tp_subtest(struct test_bpf_cookie *skel)
goto cleanup;
usleep(1); /* trigger */
+
+ err = verify_raw_tp_link_info(link_fd, cookie);
+ if (!ASSERT_OK(err, "verify_raw_tp_link_info"))
+ goto cleanup;
+
close(link_fd); /* detach */
link_fd = -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c b/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c
new file mode 100644
index 000000000000..d138cc7b1bda
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+
+#include <sys/syscall.h>
+#include <bpf/bpf.h>
+
+#include "bpf_gotox.skel.h"
+
+static void __test_run(struct bpf_program *prog, void *ctx_in, size_t ctx_size_in)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .ctx_in = ctx_in,
+ .ctx_size_in = ctx_size_in,
+ );
+ int err, prog_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run_opts err");
+}
+
+static void __subtest(struct bpf_gotox *skel, void (*check)(struct bpf_gotox *))
+{
+ if (skel->data->skip)
+ test__skip();
+ else
+ check(skel);
+}
+
+static void check_simple(struct bpf_gotox *skel,
+ struct bpf_program *prog,
+ __u64 ctx_in,
+ __u64 expected)
+{
+ skel->bss->ret_user = 0;
+
+ __test_run(prog, &ctx_in, sizeof(ctx_in));
+
+ if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
+ return;
+}
+
+static void check_simple_fentry(struct bpf_gotox *skel,
+ struct bpf_program *prog,
+ __u64 ctx_in,
+ __u64 expected)
+{
+ skel->bss->in_user = ctx_in;
+ skel->bss->ret_user = 0;
+
+ /* trigger */
+ usleep(1);
+
+ if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
+ return;
+}
+
+/* validate that for two loads of the same jump table libbpf generates only one map */
+static void check_one_map_two_jumps(struct bpf_gotox *skel)
+{
+ struct bpf_prog_info prog_info;
+ struct bpf_map_info map_info;
+ __u32 len;
+ __u32 map_ids[16];
+ int prog_fd, map_fd;
+ int ret;
+ int i;
+ bool seen = false;
+
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info.map_ids = (long)map_ids;
+ prog_info.nr_map_ids = ARRAY_SIZE(map_ids);
+ prog_fd = bpf_program__fd(skel->progs.one_map_two_jumps);
+ if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd(one_map_two_jumps)"))
+ return;
+
+ len = sizeof(prog_info);
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &len);
+ if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(prog_fd)"))
+ return;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ map_fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id"))
+ return;
+
+ len = sizeof(map_info);
+ memset(&map_info, 0, len);
+ ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &len);
+ if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(map_fd)")) {
+ close(map_fd);
+ return;
+ }
+
+ if (map_info.type == BPF_MAP_TYPE_INSN_ARRAY) {
+ if (!ASSERT_EQ(seen, false, "more than one INSN_ARRAY map")) {
+ close(map_fd);
+ return;
+ }
+ seen = true;
+ }
+ close(map_fd);
+ }
+
+ ASSERT_EQ(seen, true, "no INSN_ARRAY map");
+}
+
+static void check_one_switch(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.one_switch, in[i], out[i]);
+}
+
+static void check_one_switch_non_zero_sec_off(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.one_switch_non_zero_sec_off, in[i], out[i]);
+}
+
+static void check_two_switches(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {103, 104, 107, 205, 115, 1019, 1019};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.two_switches, in[i], out[i]);
+}
+
+static void check_big_jump_table(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 11, 27, 31, 22, 45, 99};
+ __u64 out[] = {2, 3, 4, 5, 19, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.big_jump_table, in[i], out[i]);
+}
+
+static void check_one_jump_two_maps(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {12, 15, 7 , 15, 12, 15, 15};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.one_jump_two_maps, in[i], out[i]);
+}
+
+static void check_static_global(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.use_static_global1, in[i], out[i]);
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.use_static_global2, in[i], out[i]);
+}
+
+static void check_nonstatic_global(struct bpf_gotox *skel)
+{
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.use_nonstatic_global1, in[i], out[i]);
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple(skel, skel->progs.use_nonstatic_global2, in[i], out[i]);
+}
+
+static void check_other_sec(struct bpf_gotox *skel)
+{
+ struct bpf_link *link;
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ link = bpf_program__attach(skel->progs.simple_test_other_sec);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple_fentry(skel, skel->progs.simple_test_other_sec, in[i], out[i]);
+
+ bpf_link__destroy(link);
+}
+
+static void check_static_global_other_sec(struct bpf_gotox *skel)
+{
+ struct bpf_link *link;
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ link = bpf_program__attach(skel->progs.use_static_global_other_sec);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple_fentry(skel, skel->progs.use_static_global_other_sec, in[i], out[i]);
+
+ bpf_link__destroy(link);
+}
+
+static void check_nonstatic_global_other_sec(struct bpf_gotox *skel)
+{
+ struct bpf_link *link;
+ __u64 in[] = {0, 1, 2, 3, 4, 5, 77};
+ __u64 out[] = {2, 3, 4, 5, 7, 19, 19};
+ int i;
+
+ link = bpf_program__attach(skel->progs.use_nonstatic_global_other_sec);
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(in); i++)
+ check_simple_fentry(skel, skel->progs.use_nonstatic_global_other_sec, in[i], out[i]);
+
+ bpf_link__destroy(link);
+}
+
+void test_bpf_gotox(void)
+{
+ struct bpf_gotox *skel;
+ int ret;
+
+ skel = bpf_gotox__open();
+ if (!ASSERT_NEQ(skel, NULL, "bpf_gotox__open"))
+ return;
+
+ ret = bpf_gotox__load(skel);
+ if (!ASSERT_OK(ret, "bpf_gotox__load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ if (test__start_subtest("one-switch"))
+ __subtest(skel, check_one_switch);
+
+ if (test__start_subtest("one-switch-non-zero-sec-offset"))
+ __subtest(skel, check_one_switch_non_zero_sec_off);
+
+ if (test__start_subtest("two-switches"))
+ __subtest(skel, check_two_switches);
+
+ if (test__start_subtest("big-jump-table"))
+ __subtest(skel, check_big_jump_table);
+
+ if (test__start_subtest("static-global"))
+ __subtest(skel, check_static_global);
+
+ if (test__start_subtest("nonstatic-global"))
+ __subtest(skel, check_nonstatic_global);
+
+ if (test__start_subtest("other-sec"))
+ __subtest(skel, check_other_sec);
+
+ if (test__start_subtest("static-global-other-sec"))
+ __subtest(skel, check_static_global_other_sec);
+
+ if (test__start_subtest("nonstatic-global-other-sec"))
+ __subtest(skel, check_nonstatic_global_other_sec);
+
+ if (test__start_subtest("one-jump-two-maps"))
+ __subtest(skel, check_one_jump_two_maps);
+
+ if (test__start_subtest("one-map-two-jumps"))
+ __subtest(skel, check_one_map_two_jumps);
+
+ bpf_gotox__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
new file mode 100644
index 000000000000..269870bec941
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <bpf/bpf.h>
+#include <test_progs.h>
+
+#ifdef __x86_64__
+static int map_create(__u32 map_type, __u32 max_entries)
+{
+ const char *map_name = "insn_array";
+ __u32 key_size = 4;
+ __u32 value_size = sizeof(struct bpf_insn_array_value);
+
+ return bpf_map_create(map_type, map_name, key_size, value_size, max_entries, NULL);
+}
+
+static int prog_load(struct bpf_insn *insns, __u32 insn_cnt, int *fd_array, __u32 fd_array_cnt)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ opts.fd_array = fd_array;
+ opts.fd_array_cnt = fd_array_cnt;
+
+ return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, &opts);
+}
+
+static void __check_success(struct bpf_insn *insns, __u32 insn_cnt, __u32 *map_in, __u32 *map_out)
+{
+ struct bpf_insn_array_value val = {};
+ int prog_fd = -1, map_fd, i;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, insn_cnt);
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ for (i = 0; i < insn_cnt; i++) {
+ val.orig_off = map_in[i];
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+ }
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, insn_cnt, &map_fd, 1);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+ for (i = 0; i < insn_cnt; i++) {
+ char buf[64];
+
+ if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ snprintf(buf, sizeof(buf), "val.xlated_off should be equal map_out[%d]", i);
+ ASSERT_EQ(val.xlated_off, map_out[i], buf);
+ }
+
+cleanup:
+ close(prog_fd);
+ close(map_fd);
+}
+
+/*
+ * Load a program, which will not be anyhow mangled by the verifier. Add an
+ * insn_array map pointing to every instruction. Check that it hasn't changed
+ * after the program load.
+ */
+static void check_one_to_one_mapping(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 map_in[] = {0, 1, 2, 3, 4, 5};
+ __u32 map_out[] = {0, 1, 2, 3, 4, 5};
+
+ __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
+}
+
+/*
+ * Load a program with two patches (get jiffies, for simplicity). Add an
+ * insn_array map pointing to every instruction. Check how it was changed
+ * after the program load.
+ */
+static void check_simple(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 map_in[] = {0, 1, 2, 3, 4, 5};
+ __u32 map_out[] = {0, 1, 4, 5, 8, 9};
+
+ __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
+}
+
+/*
+ * Verifier can delete code in two cases: nops & dead code. From insn
+ * array's point of view, the two cases are the same, so test using
+ * the simplest method: by loading some nops
+ */
+static void check_deletions(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 map_in[] = {0, 1, 2, 3, 4, 5};
+ __u32 map_out[] = {0, -1, 1, -1, 2, 3};
+
+ __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
+}
+
+/*
+ * Same test as check_deletions, but also add code which adds instructions
+ */
+static void check_deletions_with_functions(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0), /* nop */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ };
+ __u32 map_in[] = { 0, 1, 2, 3, 4, 5, /* func */ 6, 7, 8, 9, 10};
+ __u32 map_out[] = {-1, 0, -1, 3, 4, 5, /* func */ -1, 6, -1, 9, 10};
+
+ __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
+}
+
+/*
+ * Try to load a program with a map which points to outside of the program
+ */
+static void check_out_of_bounds_index(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, map_fd;
+ struct bpf_insn_array_value val = {};
+ int key;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ key = 0;
+ val.orig_off = ARRAY_SIZE(insns); /* too big */
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &key, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) {
+ close(prog_fd);
+ goto cleanup;
+ }
+
+cleanup:
+ close(map_fd);
+}
+
+/*
+ * Try to load a program with a map which points to the middle of 16-bit insn
+ */
+static void check_mid_insn_index(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_LD_IMM64(BPF_REG_0, 0), /* 2 x 8 */
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, map_fd;
+ struct bpf_insn_array_value val = {};
+ int key;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ key = 0;
+ val.orig_off = 1; /* middle of 16-byte instruction */
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &key, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)")) {
+ close(prog_fd);
+ goto cleanup;
+ }
+
+cleanup:
+ close(map_fd);
+}
+
+static void check_incorrect_index(void)
+{
+ check_out_of_bounds_index();
+ check_mid_insn_index();
+}
+
+static int set_bpf_jit_harden(char *level)
+{
+ char old_level;
+ int err = -1;
+ int fd = -1;
+
+ fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR | O_NONBLOCK);
+ if (fd < 0) {
+ ASSERT_FAIL("open .../bpf_jit_harden returned %d (errno=%d)", fd, errno);
+ return -1;
+ }
+
+ err = read(fd, &old_level, 1);
+ if (err != 1) {
+ ASSERT_FAIL("read from .../bpf_jit_harden returned %d (errno=%d)", err, errno);
+ err = -1;
+ goto end;
+ }
+
+ lseek(fd, 0, SEEK_SET);
+
+ err = write(fd, level, 1);
+ if (err != 1) {
+ ASSERT_FAIL("write to .../bpf_jit_harden returned %d (errno=%d)", err, errno);
+ err = -1;
+ goto end;
+ }
+
+ err = 0;
+ *level = old_level;
+end:
+ if (fd >= 0)
+ close(fd);
+ return err;
+}
+
+static void check_blindness(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd = -1, map_fd;
+ struct bpf_insn_array_value val = {};
+ char bpf_jit_harden = '@'; /* non-exizsting value */
+ int i;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ val.orig_off = i;
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+ }
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ bpf_jit_harden = '2';
+ if (set_bpf_jit_harden(&bpf_jit_harden)) {
+ bpf_jit_harden = '@'; /* open, read or write failed => no write was done */
+ goto cleanup;
+ }
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ char fmt[32];
+
+ if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ snprintf(fmt, sizeof(fmt), "val should be equal 3*%d", i);
+ ASSERT_EQ(val.xlated_off, i * 3, fmt);
+ }
+
+cleanup:
+ /* restore the old one */
+ if (bpf_jit_harden != '@')
+ set_bpf_jit_harden(&bpf_jit_harden);
+
+ close(prog_fd);
+ close(map_fd);
+}
+
+/* Once map was initialized, it should be frozen */
+static void check_load_unfrozen_map(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd = -1, map_fd;
+ struct bpf_insn_array_value val = {};
+ int i;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ val.orig_off = i;
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+ }
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)"))
+ goto cleanup;
+
+ /* correctness: now freeze the map, the program should load fine */
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ ASSERT_EQ(val.xlated_off, i, "val should be equal i");
+ }
+
+cleanup:
+ close(prog_fd);
+ close(map_fd);
+}
+
+/* Map can be used only by one BPF program */
+static void check_no_map_reuse(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd = -1, map_fd, extra_fd = -1;
+ struct bpf_insn_array_value val = {};
+ int i;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, ARRAY_SIZE(insns));
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ val.orig_off = i;
+ if (!ASSERT_EQ(bpf_map_update_elem(map_fd, &i, &val, 0), 0, "bpf_map_update_elem"))
+ goto cleanup;
+ }
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(insns); i++) {
+ if (!ASSERT_EQ(bpf_map_lookup_elem(map_fd, &i, &val), 0, "bpf_map_lookup_elem"))
+ goto cleanup;
+
+ ASSERT_EQ(val.xlated_off, i, "val should be equal i");
+ }
+
+ extra_fd = prog_load(insns, ARRAY_SIZE(insns), &map_fd, 1);
+ if (!ASSERT_EQ(extra_fd, -EBUSY, "program should have been rejected (extra_fd != -EBUSY)"))
+ goto cleanup;
+
+ /* correctness: check that prog is still loadable without fd_array */
+ extra_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
+ if (!ASSERT_GE(extra_fd, 0, "bpf(BPF_PROG_LOAD): expected no error"))
+ goto cleanup;
+
+cleanup:
+ close(extra_fd);
+ close(prog_fd);
+ close(map_fd);
+}
+
+static void check_bpf_no_lookup(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd = -1, map_fd;
+
+ map_fd = map_create(BPF_MAP_TYPE_INSN_ARRAY, 1);
+ if (!ASSERT_GE(map_fd, 0, "map_create"))
+ return;
+
+ insns[0].imm = map_fd;
+
+ if (!ASSERT_EQ(bpf_map_freeze(map_fd), 0, "bpf_map_freeze"))
+ goto cleanup;
+
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
+ if (!ASSERT_EQ(prog_fd, -EINVAL, "program should have been rejected (prog_fd != -EINVAL)"))
+ goto cleanup;
+
+ /* correctness: check that prog is still loadable with normal map */
+ close(map_fd);
+ map_fd = map_create(BPF_MAP_TYPE_ARRAY, 1);
+ insns[0].imm = map_fd;
+ prog_fd = prog_load(insns, ARRAY_SIZE(insns), NULL, 0);
+ if (!ASSERT_GE(prog_fd, 0, "bpf(BPF_PROG_LOAD)"))
+ goto cleanup;
+
+cleanup:
+ close(prog_fd);
+ close(map_fd);
+}
+
+static void check_bpf_side(void)
+{
+ check_bpf_no_lookup();
+}
+
+static void __test_bpf_insn_array(void)
+{
+ /* Test if offsets are adjusted properly */
+
+ if (test__start_subtest("one2one"))
+ check_one_to_one_mapping();
+
+ if (test__start_subtest("simple"))
+ check_simple();
+
+ if (test__start_subtest("deletions"))
+ check_deletions();
+
+ if (test__start_subtest("deletions-with-functions"))
+ check_deletions_with_functions();
+
+ if (test__start_subtest("blindness"))
+ check_blindness();
+
+ /* Check all kinds of operations and related restrictions */
+
+ if (test__start_subtest("incorrect-index"))
+ check_incorrect_index();
+
+ if (test__start_subtest("load-unfrozen-map"))
+ check_load_unfrozen_map();
+
+ if (test__start_subtest("no-map-reuse"))
+ check_no_map_reuse();
+
+ if (test__start_subtest("bpf-side-ops"))
+ check_bpf_side();
+}
+#else
+static void __test_bpf_insn_array(void)
+{
+ test__skip();
+}
+#endif
+
+void test_bpf_insn_array(void)
+{
+ __test_bpf_insn_array();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index add4a18c33bd..5225d69bf79b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -323,7 +323,7 @@ static void test_task_pidfd(void)
static void test_task_sleepable(void)
{
struct bpf_iter_tasks *skel;
- int pid, status, err, data_pipe[2], finish_pipe[2], c;
+ int pid, status, err, data_pipe[2], finish_pipe[2], c = 0;
char *test_data = NULL;
char *test_data_long = NULL;
char *data[2];
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
index fe2c502e5089..ecc3d47919ad 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
@@ -78,7 +78,7 @@ static int test_setup_uffd(void *fault_addr)
}
uffd_register.range.start = (unsigned long)fault_addr;
- uffd_register.range.len = 4096;
+ uffd_register.range.len = getpagesize();
uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) {
close(uffd);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 8a9ba4292109..054ecb6b1e9f 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -7496,6 +7496,71 @@ static struct btf_dedup_test dedup_tests[] = {
},
},
{
+ .descr = "dedup: recursive typedef",
+ /*
+ * This test simulates a recursive typedef, which in GO is defined as such:
+ *
+ * type Foo func() Foo
+ *
+ * In BTF terms, this is represented as a TYPEDEF referencing
+ * a FUNC_PROTO that returns the same TYPEDEF.
+ */
+ .input = {
+ .raw_types = {
+ /*
+ * [1] typedef Foo -> func() Foo
+ * [2] func_proto() -> Foo
+ * [3] typedef Foo -> func() Foo
+ * [4] func_proto() -> Foo
+ */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 2), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 0), /* [2] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 4), /* [3] */
+ BTF_FUNC_PROTO_ENC(3, 0), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0Foo"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 2), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 0), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0Foo"),
+ },
+},
+{
+ .descr = "dedup: typedef",
+ /*
+ * // CU 1:
+ * typedef int foo;
+ *
+ * // CU 2:
+ * typedef int foo;
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */
+ /* CU 2 */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [3] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 3), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0foo"),
+ },
+},
+{
.descr = "dedup: typedef tags",
.input = {
.raw_types = {
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index c0a776feec23..10cba526d3e6 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -63,7 +63,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
/* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
* so it's impossible to determine correct pointer size; but if they
- * do, it should be 8 regardless of host architecture, becaues BPF
+ * do, it should be 8 regardless of host architecture, because BPF
* target is always 64-bit
*/
if (!t->known_ptr_sz) {
@@ -879,6 +879,122 @@ static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d,
"static int bpf_cgrp_storage_busy = (int)2", 2);
}
+struct btf_dump_string_ctx {
+ struct btf *btf;
+ struct btf_dump *d;
+ char *str;
+ struct btf_dump_type_data_opts *opts;
+ int array_id;
+};
+
+static int btf_dump_one_string(struct btf_dump_string_ctx *ctx,
+ char *ptr, size_t ptr_sz,
+ const char *expected_val)
+{
+ size_t type_sz;
+ int ret;
+
+ ctx->str[0] = '\0';
+ type_sz = btf__resolve_size(ctx->btf, ctx->array_id);
+ ret = btf_dump__dump_type_data(ctx->d, ctx->array_id, ptr, ptr_sz, ctx->opts);
+ if (type_sz <= ptr_sz) {
+ if (!ASSERT_EQ(ret, type_sz, "failed/unexpected type_sz"))
+ return -EINVAL;
+ }
+ if (!ASSERT_STREQ(ctx->str, expected_val, "ensure expected/actual match"))
+ return -EFAULT;
+ return 0;
+}
+
+static void btf_dump_strings(struct btf_dump_string_ctx *ctx)
+{
+ struct btf_dump_type_data_opts *opts = ctx->opts;
+
+ opts->emit_strings = true;
+
+ opts->compact = true;
+ opts->emit_zeroes = false;
+
+ opts->skip_names = false;
+ btf_dump_one_string(ctx, "foo", 4, "(char[4])\"foo\"");
+
+ opts->skip_names = true;
+ btf_dump_one_string(ctx, "foo", 4, "\"foo\"");
+
+ /* This should have no effect. */
+ opts->emit_zeroes = false;
+ btf_dump_one_string(ctx, "foo", 4, "\"foo\"");
+
+ /* This should have no effect. */
+ opts->compact = false;
+ btf_dump_one_string(ctx, "foo", 4, "\"foo\"");
+
+ /* Non-printable characters come out as hex. */
+ btf_dump_one_string(ctx, "fo\xff", 4, "\"fo\\xff\"");
+ btf_dump_one_string(ctx, "fo\x7", 4, "\"fo\\x07\"");
+
+ /*
+ * Strings that are too long for the specified type ("char[4]")
+ * should fall back to the current behavior.
+ */
+ opts->compact = true;
+ btf_dump_one_string(ctx, "abcde", 6, "['a','b','c','d',]");
+
+ /*
+ * Strings that are too short for the specified type ("char[4]")
+ * should work normally.
+ */
+ btf_dump_one_string(ctx, "ab", 3, "\"ab\"");
+
+ /* Non-NUL-terminated arrays don't get printed as strings. */
+ char food[4] = { 'f', 'o', 'o', 'd' };
+ char bye[3] = { 'b', 'y', 'e' };
+
+ btf_dump_one_string(ctx, food, 4, "['f','o','o','d',]");
+ btf_dump_one_string(ctx, bye, 3, "['b','y','e',]");
+
+ /* The embedded NUL should terminate the string. */
+ char embed[4] = { 'f', 'o', '\0', 'd' };
+
+ btf_dump_one_string(ctx, embed, 4, "\"fo\"");
+}
+
+static void test_btf_dump_string_data(void)
+{
+ struct test_ctx t = {};
+ char str[STRSIZE];
+ struct btf_dump *d;
+ DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts);
+ struct btf_dump_string_ctx ctx;
+ int char_id, int_id, array_id;
+
+ if (test_ctx__init(&t))
+ return;
+
+ d = btf_dump__new(t.btf, btf_dump_snprintf, str, NULL);
+ if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
+ return;
+
+ /* Generate BTF for a four-element char array. */
+ char_id = btf__add_int(t.btf, "char", 1, BTF_INT_CHAR);
+ ASSERT_EQ(char_id, 1, "char_id");
+ int_id = btf__add_int(t.btf, "int", 4, BTF_INT_SIGNED);
+ ASSERT_EQ(int_id, 2, "int_id");
+ array_id = btf__add_array(t.btf, int_id, char_id, 4);
+ ASSERT_EQ(array_id, 3, "array_id");
+
+ ctx.btf = t.btf;
+ ctx.d = d;
+ ctx.str = str;
+ ctx.opts = &opts;
+ ctx.array_id = array_id;
+
+ btf_dump_strings(&ctx);
+
+ btf_dump__free(d);
+ test_ctx__free(&t);
+}
+
static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str,
const char *name, const char *expected_val,
void *data, size_t data_sz)
@@ -970,6 +1086,8 @@ void test_btf_dump() {
test_btf_dump_struct_data(btf, d, str);
if (test__start_subtest("btf_dump: var_data"))
test_btf_dump_var_data(btf, d, str);
+ if (test__start_subtest("btf_dump: string_data"))
+ test_btf_dump_string_data();
btf_dump__free(d);
btf__free(btf);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c
index 3696fb9a05ed..2d47cad50a51 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_split.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c
@@ -12,11 +12,45 @@ static void btf_dump_printf(void *ctx, const char *fmt, va_list args)
vfprintf(ctx, fmt, args);
}
+/* Write raw BTF to file, return number of bytes written or negative errno */
+static ssize_t btf_raw_write(struct btf *btf, char *file)
+{
+ ssize_t written = 0;
+ const void *data;
+ __u32 size = 0;
+ int fd, ret;
+
+ fd = mkstemp(file);
+ if (!ASSERT_GE(fd, 0, "create_file"))
+ return -errno;
+
+ data = btf__raw_data(btf, &size);
+ if (!ASSERT_OK_PTR(data, "btf__raw_data")) {
+ close(fd);
+ return -EINVAL;
+ }
+ while (written < size) {
+ ret = write(fd, data + written, size - written);
+ if (!ASSERT_GE(ret, 0, "write succeeded")) {
+ close(fd);
+ return -errno;
+ }
+ written += ret;
+ }
+ close(fd);
+ return written;
+}
+
static void __test_btf_split(bool multi)
{
+ char multisplit_btf_file[] = "/tmp/test_btf_multisplit.XXXXXX";
+ char split_btf_file[] = "/tmp/test_btf_split.XXXXXX";
+ char base_btf_file[] = "/tmp/test_btf_base.XXXXXX";
+ ssize_t multisplit_btf_sz = 0, split_btf_sz = 0, base_btf_sz = 0;
struct btf_dump *d = NULL;
- const struct btf_type *t;
- struct btf *btf1, *btf2, *btf3 = NULL;
+ const struct btf_type *t, *ot;
+ struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL;
+ struct btf *btf4 = NULL, *btf5 = NULL, *btf6 = NULL;
int str_off, i, err;
btf1 = btf__new_empty();
@@ -123,6 +157,45 @@ static void __test_btf_split(bool multi)
" int uf2;\n"
"};\n\n", "c_dump");
+ /* write base, split BTFs to files and ensure parsing succeeds */
+ base_btf_sz = btf_raw_write(btf1, base_btf_file);
+ if (base_btf_sz < 0)
+ goto cleanup;
+ split_btf_sz = btf_raw_write(btf2, split_btf_file);
+ if (split_btf_sz < 0)
+ goto cleanup;
+ btf4 = btf__parse(base_btf_file, NULL);
+ if (!ASSERT_OK_PTR(btf4, "parse_base"))
+ goto cleanup;
+ btf5 = btf__parse_split(split_btf_file, btf4);
+ if (!ASSERT_OK_PTR(btf5, "parse_split"))
+ goto cleanup;
+ if (multi) {
+ multisplit_btf_sz = btf_raw_write(btf3, multisplit_btf_file);
+ if (multisplit_btf_sz < 0)
+ goto cleanup;
+ btf6 = btf__parse_split(multisplit_btf_file, btf5);
+ if (!ASSERT_OK_PTR(btf6, "parse_multisplit"))
+ goto cleanup;
+ } else {
+ btf6 = btf5;
+ }
+
+ if (!ASSERT_EQ(btf__type_cnt(btf3), btf__type_cnt(btf6), "cmp_type_cnt"))
+ goto cleanup;
+
+ /* compare parsed to original BTF */
+ for (i = 1; i < btf__type_cnt(btf6); i++) {
+ t = btf__type_by_id(btf6, i);
+ if (!ASSERT_OK_PTR(t, "type_in_parsed_btf"))
+ goto cleanup;
+ ot = btf__type_by_id(btf3, i);
+ if (!ASSERT_OK_PTR(ot, "type_in_orig_btf"))
+ goto cleanup;
+ if (!ASSERT_EQ(memcmp(t, ot, sizeof(*ot)), 0, "cmp_parsed_orig_btf"))
+ goto cleanup;
+ }
+
cleanup:
if (dump_buf_file)
fclose(dump_buf_file);
@@ -132,6 +205,16 @@ cleanup:
btf__free(btf2);
if (btf2 != btf3)
btf__free(btf3);
+ btf__free(btf4);
+ btf__free(btf5);
+ if (btf5 != btf6)
+ btf__free(btf6);
+ if (base_btf_sz > 0)
+ unlink(base_btf_file);
+ if (split_btf_sz > 0)
+ unlink(split_btf_file);
+ if (multisplit_btf_sz > 0)
+ unlink(multisplit_btf_file);
}
void test_btf_split(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c
new file mode 100644
index 000000000000..bb60704a3ef9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_opts.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "cgroup_mprog.skel.h"
+
+static void assert_mprog_count(int cg, int atype, int expected)
+{
+ __u32 count = 0, attach_flags = 0;
+ int err;
+
+ err = bpf_prog_query(cg, atype, 0, &attach_flags,
+ NULL, &count);
+ ASSERT_EQ(count, expected, "count");
+ ASSERT_EQ(err, 0, "prog_query");
+}
+
+static void test_prog_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct cgroup_mprog *skel;
+ __u32 prog_ids[10];
+ int cg, err;
+
+ cg = test__join_cgroup("/prog_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /prog_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.getsockopt_1);
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+ fd3 = bpf_program__fd(skel->progs.getsockopt_3);
+ fd4 = bpf_program__fd(skel->progs.getsockopt_4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
+ .expected_revision = 1,
+ );
+
+ /* ordering: [fd1] */
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE,
+ .expected_revision = 2,
+ );
+
+ /* ordering: [fd2, fd1] */
+ err = bpf_prog_attach_opts(fd2, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
+ .relative_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ /* ordering: [fd2, fd3, fd1] */
+ err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ .expected_revision = 4,
+ );
+
+ /* ordering: [fd2, fd3, fd1, fd4] */
+ err = bpf_prog_attach_opts(fd4, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(cg, atype, 4);
+
+ /* retrieve optq.prog_cnt */
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ /* optq.prog_cnt will be used in below query */
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+cleanup4:
+ optd.expected_revision = 5;
+ err = bpf_prog_detach_opts(fd4, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 3);
+
+cleanup3:
+ LIBBPF_OPTS_RESET(optd);
+ err = bpf_prog_detach_opts(fd3, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 2);
+
+ /* Check revision after two detach operations */
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ ASSERT_OK(err, "prog_query");
+ ASSERT_EQ(optq.revision, 7, "revision");
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 0);
+
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+static void test_link_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_cgroup_opts, opta);
+ LIBBPF_OPTS(bpf_cgroup_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ struct bpf_link *link1, *link2, *link3, *link4;
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct cgroup_mprog *skel;
+ __u32 prog_ids[10];
+ int cg, err;
+
+ cg = test__join_cgroup("/link_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /link_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.getsockopt_1);
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+ fd3 = bpf_program__fd(skel->progs.getsockopt_3);
+ fd4 = bpf_program__fd(skel->progs.getsockopt_4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ /* ordering: [fd1] */
+ link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta);
+ if (!ASSERT_OK_PTR(link1, "link_attach"))
+ goto cleanup;
+
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_BEFORE | BPF_F_LINK,
+ .relative_id = id_from_link_fd(bpf_link__fd(link1)),
+ .expected_revision = 2,
+ );
+
+ /* ordering: [fd2, fd1] */
+ link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta);
+ if (!ASSERT_OK_PTR(link2, "link_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER | BPF_F_LINK,
+ .relative_fd = bpf_link__fd(link2),
+ .expected_revision = 3,
+ );
+
+ /* ordering: [fd2, fd3, fd1] */
+ link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
+ if (!ASSERT_OK_PTR(link3, "link_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 4,
+ );
+
+ /* ordering: [fd2, fd3, fd1, fd4] */
+ link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta);
+ if (!ASSERT_OK_PTR(link4, "link_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(cg, atype, 4);
+
+ /* retrieve optq.prog_cnt */
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ /* optq.prog_cnt will be used in below query */
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+cleanup4:
+ bpf_link__destroy(link4);
+ assert_mprog_count(cg, atype, 3);
+
+cleanup3:
+ bpf_link__destroy(link3);
+ assert_mprog_count(cg, atype, 2);
+
+ /* Check revision after two detach operations */
+ err = bpf_prog_query_opts(cg, atype, &optq);
+ ASSERT_OK(err, "prog_query");
+ ASSERT_EQ(optq.revision, 7, "revision");
+
+cleanup2:
+ bpf_link__destroy(link2);
+ assert_mprog_count(cg, atype, 1);
+
+cleanup1:
+ bpf_link__destroy(link1);
+ assert_mprog_count(cg, atype, 0);
+
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+static void test_preorder_prog_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ __u32 fd1, fd2, fd3, fd4;
+ struct cgroup_mprog *skel;
+ int cg, err;
+
+ cg = test__join_cgroup("/preorder_prog_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_prog_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.getsockopt_1);
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+ fd3 = bpf_program__fd(skel->progs.getsockopt_3);
+ fd4 = bpf_program__fd(skel->progs.getsockopt_4);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ .expected_revision = 1,
+ );
+
+ /* ordering: [fd1] */
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER,
+ .expected_revision = 2,
+ );
+
+ /* ordering: [fd1, fd2] */
+ err = bpf_prog_attach_opts(fd2, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
+ .relative_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
+ if (!ASSERT_EQ(err, -EINVAL, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_PREORDER,
+ .relative_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ /* ordering: [fd1, fd2, fd3] */
+ err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ .expected_revision = 4,
+ );
+
+ /* ordering: [fd2, fd3, fd1, fd4] */
+ err = bpf_prog_attach_opts(fd4, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(cg, atype, 4);
+
+ err = bpf_prog_detach_opts(fd4, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 3);
+
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 2);
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, cg, atype, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(cg, atype, 0);
+
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+static void test_preorder_link_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_cgroup_opts, opta);
+ struct bpf_link *link1, *link2, *link3, *link4;
+ struct cgroup_mprog *skel;
+ __u32 fd2;
+ int cg;
+
+ cg = test__join_cgroup("/preorder_link_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_link_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ /* ordering: [fd1] */
+ link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta);
+ if (!ASSERT_OK_PTR(link1, "link_attach"))
+ goto cleanup;
+
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_PREORDER,
+ .expected_revision = 2,
+ );
+
+ /* ordering: [fd1, fd2] */
+ link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta);
+ if (!ASSERT_OK_PTR(link2, "link_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER,
+ .relative_fd = fd2,
+ .expected_revision = 3,
+ );
+
+ link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
+ if (!ASSERT_ERR_PTR(link3, "link_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_AFTER | BPF_F_PREORDER | BPF_F_LINK,
+ .relative_fd = bpf_link__fd(link2),
+ .expected_revision = 3,
+ );
+
+ /* ordering: [fd1, fd2, fd3] */
+ link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
+ if (!ASSERT_OK_PTR(link3, "link_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(cg, atype, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 4,
+ );
+
+ /* ordering: [fd2, fd3, fd1, fd4] */
+ link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta);
+ if (!ASSERT_OK_PTR(link4, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(cg, atype, 4);
+
+ bpf_link__destroy(link4);
+ assert_mprog_count(cg, atype, 3);
+
+cleanup3:
+ bpf_link__destroy(link3);
+ assert_mprog_count(cg, atype, 2);
+
+cleanup2:
+ bpf_link__destroy(link2);
+ assert_mprog_count(cg, atype, 1);
+
+cleanup1:
+ bpf_link__destroy(link1);
+ assert_mprog_count(cg, atype, 0);
+
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+static void test_invalid_attach_detach(int atype)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ __u32 fd1, fd2, id2;
+ struct cgroup_mprog *skel;
+ int cg, err;
+
+ cg = test__join_cgroup("/invalid_attach_detach");
+ if (!ASSERT_GE(cg, 0, "join_cgroup /invalid_attach_detach"))
+ return;
+
+ skel = cgroup_mprog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.getsockopt_1);
+ fd2 = bpf_program__fd(skel->progs.getsockopt_2);
+
+ id2 = id_from_prog_fd(fd2);
+
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_ID,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_ID,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_LINK,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ .relative_id = id2,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
+ .relative_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -ENOENT, "prog_attach");
+ assert_mprog_count(cg, atype, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE | BPF_F_AFTER,
+ .replace_prog_fd = fd1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
+ ASSERT_EQ(err, -EINVAL, "prog_attach");
+ assert_mprog_count(cg, atype, 1);
+cleanup:
+ cgroup_mprog__destroy(skel);
+ close(cg);
+}
+
+void test_cgroup_mprog_opts(void)
+{
+ if (test__start_subtest("prog_attach_detach"))
+ test_prog_attach_detach(BPF_CGROUP_GETSOCKOPT);
+ if (test__start_subtest("link_attach_detach"))
+ test_link_attach_detach(BPF_CGROUP_GETSOCKOPT);
+ if (test__start_subtest("preorder_prog_attach_detach"))
+ test_preorder_prog_attach_detach(BPF_CGROUP_GETSOCKOPT);
+ if (test__start_subtest("preorder_link_attach_detach"))
+ test_preorder_link_attach_detach(BPF_CGROUP_GETSOCKOPT);
+ if (test__start_subtest("invalid_attach_detach"))
+ test_invalid_attach_detach(BPF_CGROUP_GETSOCKOPT);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c
new file mode 100644
index 000000000000..a36d2e968bc5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_mprog_ordering.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "cgroup_preorder.skel.h"
+
+static int run_getsockopt_test(int cg_parent, int sock_fd, bool has_relative_fd)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opts);
+ enum bpf_attach_type prog_p_atype, prog_p2_atype;
+ int prog_p_fd, prog_p2_fd;
+ struct cgroup_preorder *skel = NULL;
+ struct bpf_program *prog;
+ __u8 *result, buf;
+ socklen_t optlen = 1;
+ int err = 0;
+
+ skel = cgroup_preorder__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "cgroup_preorder__open_and_load"))
+ return 0;
+
+ LIBBPF_OPTS_RESET(opts);
+ opts.flags = BPF_F_ALLOW_MULTI;
+ prog = skel->progs.parent;
+ prog_p_fd = bpf_program__fd(prog);
+ prog_p_atype = bpf_program__expected_attach_type(prog);
+ err = bpf_prog_attach_opts(prog_p_fd, cg_parent, prog_p_atype, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent"))
+ goto close_skel;
+
+ opts.flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE;
+ if (has_relative_fd)
+ opts.relative_fd = prog_p_fd;
+ prog = skel->progs.parent_2;
+ prog_p2_fd = bpf_program__fd(prog);
+ prog_p2_atype = bpf_program__expected_attach_type(prog);
+ err = bpf_prog_attach_opts(prog_p2_fd, cg_parent, prog_p2_atype, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_attach_opts-parent_2"))
+ goto detach_parent;
+
+ err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen);
+ if (!ASSERT_OK(err, "getsockopt"))
+ goto detach_parent_2;
+
+ result = skel->bss->result;
+ ASSERT_TRUE(result[0] == 4 && result[1] == 3, "result values");
+
+detach_parent_2:
+ ASSERT_OK(bpf_prog_detach2(prog_p2_fd, cg_parent, prog_p2_atype),
+ "bpf_prog_detach2-parent_2");
+detach_parent:
+ ASSERT_OK(bpf_prog_detach2(prog_p_fd, cg_parent, prog_p_atype),
+ "bpf_prog_detach2-parent");
+close_skel:
+ cgroup_preorder__destroy(skel);
+ return err;
+}
+
+void test_cgroup_mprog_ordering(void)
+{
+ int cg_parent = -1, sock_fd = -1;
+
+ cg_parent = test__join_cgroup("/parent");
+ if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent"))
+ goto out;
+
+ sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(sock_fd, 0, "socket"))
+ goto out;
+
+ ASSERT_OK(run_getsockopt_test(cg_parent, sock_fd, false), "getsockopt_test_1");
+ ASSERT_OK(run_getsockopt_test(cg_parent, sock_fd, true), "getsockopt_test_2");
+
+out:
+ close(sock_fd);
+ close(cg_parent);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c b/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c
new file mode 100644
index 000000000000..5ad904e9d15d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_xattr.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+
+#include "read_cgroupfs_xattr.skel.h"
+#include "cgroup_read_xattr.skel.h"
+
+#define CGROUP_FS_PARENT "foo/"
+#define CGROUP_FS_CHILD CGROUP_FS_PARENT "bar/"
+#define TMP_FILE "/tmp/selftests_cgroup_xattr"
+
+static const char xattr_value_a[] = "bpf_selftest_value_a";
+static const char xattr_value_b[] = "bpf_selftest_value_b";
+static const char xattr_name[] = "user.bpf_test";
+
+static void test_read_cgroup_xattr(void)
+{
+ int tmp_fd, parent_cgroup_fd = -1, child_cgroup_fd = -1;
+ struct read_cgroupfs_xattr *skel = NULL;
+
+ parent_cgroup_fd = test__join_cgroup(CGROUP_FS_PARENT);
+ if (!ASSERT_OK_FD(parent_cgroup_fd, "create parent cgroup"))
+ return;
+ if (!ASSERT_OK(set_cgroup_xattr(CGROUP_FS_PARENT, xattr_name, xattr_value_a),
+ "set parent xattr"))
+ goto out;
+
+ child_cgroup_fd = test__join_cgroup(CGROUP_FS_CHILD);
+ if (!ASSERT_OK_FD(child_cgroup_fd, "create child cgroup"))
+ goto out;
+ if (!ASSERT_OK(set_cgroup_xattr(CGROUP_FS_CHILD, xattr_name, xattr_value_b),
+ "set child xattr"))
+ goto out;
+
+ skel = read_cgroupfs_xattr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "read_cgroupfs_xattr__open_and_load"))
+ goto out;
+
+ skel->bss->target_pid = sys_gettid();
+
+ if (!ASSERT_OK(read_cgroupfs_xattr__attach(skel), "read_cgroupfs_xattr__attach"))
+ goto out;
+
+ tmp_fd = open(TMP_FILE, O_RDONLY | O_CREAT);
+ ASSERT_OK_FD(tmp_fd, "open tmp file");
+ close(tmp_fd);
+
+ ASSERT_TRUE(skel->bss->found_value_a, "found_value_a");
+ ASSERT_TRUE(skel->bss->found_value_b, "found_value_b");
+
+out:
+ close(child_cgroup_fd);
+ close(parent_cgroup_fd);
+ read_cgroupfs_xattr__destroy(skel);
+ unlink(TMP_FILE);
+}
+
+void test_cgroup_xattr(void)
+{
+ RUN_TESTS(cgroup_read_xattr);
+
+ if (test__start_subtest("read_cgroupfs_xattr"))
+ test_read_cgroup_xattr();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
index adda85f97058..4b42fbc96efc 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
@@ -4,6 +4,8 @@
#define _GNU_SOURCE
#include <cgroup_helpers.h>
#include <test_progs.h>
+#include <sched.h>
+#include <sys/wait.h>
#include "cgrp_kfunc_failure.skel.h"
#include "cgrp_kfunc_success.skel.h"
@@ -87,6 +89,72 @@ static const char * const success_tests[] = {
"test_cgrp_from_id",
};
+static void test_cgrp_from_id_ns(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct cgrp_kfunc_success *skel;
+ struct bpf_program *prog;
+ int pid, pipe_fd[2];
+
+ skel = open_load_cgrp_kfunc_skel();
+ if (!ASSERT_OK_PTR(skel, "open_load_skel"))
+ return;
+
+ if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err"))
+ goto cleanup;
+
+ prog = skel->progs.test_cgrp_from_id_ns;
+
+ if (!ASSERT_OK(pipe(pipe_fd), "pipe"))
+ goto cleanup;
+
+ pid = fork();
+ if (!ASSERT_GE(pid, 0, "fork result")) {
+ close(pipe_fd[0]);
+ close(pipe_fd[1]);
+ goto cleanup;
+ }
+
+ if (pid == 0) {
+ int ret = 0;
+
+ close(pipe_fd[0]);
+
+ if (!ASSERT_GE(cgroup_setup_and_join("cgrp_from_id_ns"), 0, "join cgroup"))
+ exit(1);
+
+ if (!ASSERT_OK(unshare(CLONE_NEWCGROUP), "unshare cgns"))
+ exit(1);
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts);
+ if (!ASSERT_OK(ret, "test run ret"))
+ exit(1);
+
+ if (!ASSERT_OK(opts.retval, "test run retval"))
+ exit(1);
+
+ if (!ASSERT_EQ(write(pipe_fd[1], &ret, sizeof(ret)), sizeof(ret), "write pipe"))
+ exit(1);
+
+ exit(0);
+ } else {
+ int res;
+
+ close(pipe_fd[1]);
+
+ ASSERT_EQ(read(pipe_fd[0], &res, sizeof(res)), sizeof(res), "read res");
+ ASSERT_EQ(waitpid(pid, NULL, 0), pid, "wait on child");
+
+ remove_cgroup_pid("cgrp_from_id_ns", pid);
+
+ ASSERT_OK(res, "result from run");
+ }
+
+ close(pipe_fd[0]);
+cleanup:
+ cgrp_kfunc_success__destroy(skel);
+}
+
void test_cgrp_kfunc(void)
{
int i, err;
@@ -102,6 +170,9 @@ void test_cgrp_kfunc(void)
run_success_test(success_tests[i]);
}
+ if (test__start_subtest("test_cgrp_from_id_ns"))
+ test_cgrp_from_id_ns();
+
RUN_TESTS(cgrp_kfunc_failure);
cleanup:
diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
index 2a9a30650350..65b4512967e7 100644
--- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c
+++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
@@ -153,6 +153,26 @@ static void test_check_mtu_run_tc(struct test_check_mtu *skel,
ASSERT_EQ(mtu_result, mtu_expect, "MTU-compare-user");
}
+static void test_chk_segs_flag(struct test_check_mtu *skel, __u32 mtu)
+{
+ int err, prog_fd = bpf_program__fd(skel->progs.tc_chk_segs_flag);
+ struct __sk_buff skb = {
+ .gso_size = 10,
+ };
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ /* Lower the mtu to test the BPF_MTU_CHK_SEGS */
+ SYS_NOFAIL("ip link set dev lo mtu 10");
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ SYS_NOFAIL("ip link set dev lo mtu %u", mtu);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, BPF_OK, "retval");
+}
static void test_check_mtu_tc(__u32 mtu, __u32 ifindex)
{
@@ -177,11 +197,12 @@ static void test_check_mtu_tc(__u32 mtu, __u32 ifindex)
test_check_mtu_run_tc(skel, skel->progs.tc_minus_delta, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_input_len, mtu);
test_check_mtu_run_tc(skel, skel->progs.tc_input_len_exceed, mtu);
+ test_chk_segs_flag(skel, mtu);
cleanup:
test_check_mtu__destroy(skel);
}
-void serial_test_check_mtu(void)
+void test_ns_check_mtu(void)
{
int mtu_lo;
diff --git a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
index 34b59f6baca1..7488a7606e6a 100644
--- a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
@@ -22,79 +22,37 @@
static int duration = 0;
-struct addr_port {
- in_port_t port;
- union {
- struct in_addr in_addr;
- struct in6_addr in6_addr;
- };
-};
-
-struct tuple {
- int family;
- struct addr_port src;
- struct addr_port dst;
-};
-
-static bool fill_addr_port(const struct sockaddr *sa, struct addr_port *ap)
-{
- const struct sockaddr_in6 *in6;
- const struct sockaddr_in *in;
-
- switch (sa->sa_family) {
- case AF_INET:
- in = (const struct sockaddr_in *)sa;
- ap->in_addr = in->sin_addr;
- ap->port = in->sin_port;
- return true;
-
- case AF_INET6:
- in6 = (const struct sockaddr_in6 *)sa;
- ap->in6_addr = in6->sin6_addr;
- ap->port = in6->sin6_port;
- return true;
-
- default:
- return false;
- }
-}
-static bool set_up_conn(const struct sockaddr *addr, socklen_t len, int type,
- int *server, int *conn, struct tuple *tuple)
+static bool set_up_conn(const struct sockaddr_storage *addr, socklen_t len, int type,
+ int *server, int *conn,
+ struct sockaddr_storage *src,
+ struct sockaddr_storage *dst)
{
struct sockaddr_storage ss;
socklen_t slen = sizeof(ss);
- struct sockaddr *sa = (struct sockaddr *)&ss;
- *server = start_server_addr(type, (struct sockaddr_storage *)addr, len, NULL);
+ *server = start_server_addr(type, addr, len, NULL);
if (*server < 0)
return false;
- if (CHECK_FAIL(getsockname(*server, sa, &slen)))
+ if (CHECK_FAIL(getsockname(*server, (struct sockaddr *)&ss, &slen)))
goto close_server;
- *conn = connect_to_addr(type, (struct sockaddr_storage *)sa, slen, NULL);
+ *conn = connect_to_addr(type, &ss, slen, NULL);
if (*conn < 0)
goto close_server;
/* We want to simulate packets arriving at conn, so we have to
* swap src and dst.
*/
- slen = sizeof(ss);
- if (CHECK_FAIL(getsockname(*conn, sa, &slen)))
- goto close_conn;
-
- if (CHECK_FAIL(!fill_addr_port(sa, &tuple->dst)))
+ slen = sizeof(*dst);
+ if (CHECK_FAIL(getsockname(*conn, (struct sockaddr *)dst, &slen)))
goto close_conn;
- slen = sizeof(ss);
- if (CHECK_FAIL(getpeername(*conn, sa, &slen)))
+ slen = sizeof(*src);
+ if (CHECK_FAIL(getpeername(*conn, (struct sockaddr *)src, &slen)))
goto close_conn;
- if (CHECK_FAIL(!fill_addr_port(sa, &tuple->src)))
- goto close_conn;
-
- tuple->family = ss.ss_family;
return true;
close_conn:
@@ -110,17 +68,16 @@ static socklen_t prepare_addr(struct sockaddr_storage *addr, int family)
{
struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6;
+ memset(addr, 0, sizeof(*addr));
switch (family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
- memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = family;
addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
return sizeof(*addr4);
case AF_INET6:
addr6 = (struct sockaddr_in6 *)addr;
- memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = family;
addr6->sin6_addr = in6addr_loopback;
return sizeof(*addr6);
@@ -242,9 +199,15 @@ static void encap_init(encap_headers_t *encap, uint8_t hop_count, uint8_t proto)
}
static size_t build_input(const struct test_cfg *test, void *const buf,
- const struct tuple *tuple)
+ const struct sockaddr_storage *src,
+ const struct sockaddr_storage *dst)
{
- in_port_t sport = tuple->src.port;
+ struct sockaddr_in6 *src_in6 = (struct sockaddr_in6 *)src;
+ struct sockaddr_in6 *dst_in6 = (struct sockaddr_in6 *)dst;
+ struct sockaddr_in *src_in = (struct sockaddr_in *)src;
+ struct sockaddr_in *dst_in = (struct sockaddr_in *)dst;
+ sa_family_t family = src->ss_family;
+ in_port_t sport, dport;
encap_headers_t encap;
struct iphdr ip;
struct ipv6hdr ipv6;
@@ -254,8 +217,11 @@ static size_t build_input(const struct test_cfg *test, void *const buf,
uint8_t *p = buf;
int proto;
+ sport = (family == AF_INET) ? src_in->sin_port : src_in6->sin6_port;
+ dport = (family == AF_INET) ? dst_in->sin_port : dst_in6->sin6_port;
+
proto = IPPROTO_IPIP;
- if (tuple->family == AF_INET6)
+ if (family == AF_INET6)
proto = IPPROTO_IPV6;
encap_init(&encap, test->hops == ONE_HOP ? 1 : 0, proto);
@@ -270,15 +236,15 @@ static size_t build_input(const struct test_cfg *test, void *const buf,
if (test->type == UDP)
proto = IPPROTO_UDP;
- switch (tuple->family) {
+ switch (family) {
case AF_INET:
ip = (struct iphdr){
.ihl = 5,
.version = 4,
.ttl = IPDEFTTL,
.protocol = proto,
- .saddr = tuple->src.in_addr.s_addr,
- .daddr = tuple->dst.in_addr.s_addr,
+ .saddr = src_in->sin_addr.s_addr,
+ .daddr = dst_in->sin_addr.s_addr,
};
p = mempcpy(p, &ip, sizeof(ip));
break;
@@ -287,8 +253,8 @@ static size_t build_input(const struct test_cfg *test, void *const buf,
.version = 6,
.hop_limit = IPDEFTTL,
.nexthdr = proto,
- .saddr = tuple->src.in6_addr,
- .daddr = tuple->dst.in6_addr,
+ .saddr = src_in6->sin6_addr,
+ .daddr = dst_in6->sin6_addr,
};
p = mempcpy(p, &ipv6, sizeof(ipv6));
break;
@@ -303,18 +269,16 @@ static size_t build_input(const struct test_cfg *test, void *const buf,
case TCP:
tcp = (struct tcphdr){
.source = sport,
- .dest = tuple->dst.port,
+ .dest = dport,
+ .syn = (test->flags == SYN),
+ .ack = (test->flags == ACK),
};
- if (test->flags == SYN)
- tcp.syn = true;
- if (test->flags == ACK)
- tcp.ack = true;
p = mempcpy(p, &tcp, sizeof(tcp));
break;
case UDP:
udp = (struct udphdr){
.source = sport,
- .dest = tuple->dst.port,
+ .dest = dport,
};
p = mempcpy(p, &udp, sizeof(udp));
break;
@@ -339,27 +303,26 @@ static void test_cls_redirect_common(struct bpf_program *prog)
LIBBPF_OPTS(bpf_test_run_opts, tattr);
int families[] = { AF_INET, AF_INET6 };
struct sockaddr_storage ss;
- struct sockaddr *addr;
socklen_t slen;
int i, j, err, prog_fd;
int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
- struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
+ struct sockaddr_storage srcs[__NR_KIND][ARRAY_SIZE(families)];
+ struct sockaddr_storage dsts[__NR_KIND][ARRAY_SIZE(families)];
- addr = (struct sockaddr *)&ss;
for (i = 0; i < ARRAY_SIZE(families); i++) {
slen = prepare_addr(&ss, families[i]);
if (CHECK_FAIL(!slen))
goto cleanup;
- if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_DGRAM,
+ if (CHECK_FAIL(!set_up_conn(&ss, slen, SOCK_DGRAM,
&servers[UDP][i], &conns[UDP][i],
- &tuples[UDP][i])))
+ &srcs[UDP][i], &dsts[UDP][i])))
goto cleanup;
- if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_STREAM,
+ if (CHECK_FAIL(!set_up_conn(&ss, slen, SOCK_STREAM,
&servers[TCP][i], &conns[TCP][i],
- &tuples[TCP][i])))
+ &srcs[TCP][i], &dsts[TCP][i])))
goto cleanup;
}
@@ -368,11 +331,12 @@ static void test_cls_redirect_common(struct bpf_program *prog)
struct test_cfg *test = &tests[i];
for (j = 0; j < ARRAY_SIZE(families); j++) {
- struct tuple *tuple = &tuples[test->type][j];
+ struct sockaddr_storage *src = &srcs[test->type][j];
+ struct sockaddr_storage *dst = &dsts[test->type][j];
char input[256];
char tmp[256];
- test_str(tmp, sizeof(tmp), test, tuple->family);
+ test_str(tmp, sizeof(tmp), test, families[j]);
if (!test__start_subtest(tmp))
continue;
@@ -380,7 +344,7 @@ static void test_cls_redirect_common(struct bpf_program *prog)
tattr.data_size_out = sizeof(tmp);
tattr.data_in = input;
- tattr.data_size_in = build_input(test, input, tuple);
+ tattr.data_size_in = build_input(test, input, src, dst);
if (CHECK_FAIL(!tattr.data_size_in))
continue;
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
index 62e7ec775f24..b9f86cb91e81 100644
--- a/tools/testing/selftests/bpf/prog_tests/dynptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -21,9 +21,19 @@ static struct {
{"test_dynptr_data", SETUP_SYSCALL_SLEEP},
{"test_dynptr_copy", SETUP_SYSCALL_SLEEP},
{"test_dynptr_copy_xdp", SETUP_XDP_PROG},
+ {"test_dynptr_memset_zero", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_notzero", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_zero_offset", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_zero_adjusted", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_overflow", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_overflow_offset", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_memset_readonly", SETUP_SKB_PROG},
+ {"test_dynptr_memset_xdp_chunks", SETUP_XDP_PROG},
{"test_ringbuf", SETUP_SYSCALL_SLEEP},
{"test_skb_readonly", SETUP_SKB_PROG},
{"test_dynptr_skb_data", SETUP_SKB_PROG},
+ {"test_dynptr_skb_meta_data", SETUP_SKB_PROG},
+ {"test_dynptr_skb_meta_flags", SETUP_SKB_PROG},
{"test_adjust", SETUP_SYSCALL_SLEEP},
{"test_adjust_err", SETUP_SYSCALL_SLEEP},
{"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP},
@@ -43,6 +53,8 @@ static struct {
{"test_copy_from_user_task_str_dynptr", SETUP_SYSCALL_SLEEP},
};
+#define PAGE_SIZE_64K 65536
+
static void verify_success(const char *prog_name, enum test_setup_type setup_type)
{
char user_data[384] = {[0 ... 382] = 'a', '\0'};
@@ -138,14 +150,18 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ
}
case SETUP_XDP_PROG:
{
- char data[5000];
+ char data[90000];
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
- .data_size_in = sizeof(data),
.repeat = 1,
);
+ if (getpagesize() == PAGE_SIZE_64K)
+ opts.data_size_in = sizeof(data);
+ else
+ opts.data_size_in = 5000;
+
prog_fd = bpf_program__fd(prog);
err = bpf_prog_test_run_opts(prog_fd, &opts);
diff --git a/tools/testing/selftests/bpf/prog_tests/fd_array.c b/tools/testing/selftests/bpf/prog_tests/fd_array.c
index 9add890c2d37..c534b4d5f9da 100644
--- a/tools/testing/selftests/bpf/prog_tests/fd_array.c
+++ b/tools/testing/selftests/bpf/prog_tests/fd_array.c
@@ -293,7 +293,7 @@ static int get_btf_id_by_fd(int btf_fd, __u32 *id)
* 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1
* 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2
* 3) Close the btf_fd, now refcnt=1
- * Wait and check that BTF stil exists.
+ * Wait and check that BTF still exists.
*/
static void check_fd_array_cnt__referenced_btfs(void)
{
@@ -312,7 +312,7 @@ static void check_fd_array_cnt__referenced_btfs(void)
/* btf should still exist when original file descriptor is closed */
err = get_btf_id_by_fd(extra_fds[0], &btf_id);
- if (!ASSERT_GE(err, 0, "get_btf_id_by_fd"))
+ if (!ASSERT_EQ(err, 0, "get_btf_id_by_fd"))
goto cleanup;
Close(extra_fds[0]);
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
index 130f5b82d2e6..5ef1804e44df 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -12,13 +12,24 @@ void test_fentry_fexit(void)
int err, prog_fd, i;
LIBBPF_OPTS(bpf_test_run_opts, topts);
- fentry_skel = fentry_test_lskel__open_and_load();
+ fentry_skel = fentry_test_lskel__open();
if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))
goto close_prog;
- fexit_skel = fexit_test_lskel__open_and_load();
+
+ fentry_skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = fentry_test_lskel__load(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_skel_load"))
+ goto close_prog;
+
+ fexit_skel = fexit_test_lskel__open();
if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))
goto close_prog;
+ fexit_skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = fexit_test_lskel__load(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_skel_load"))
+ goto close_prog;
+
err = fentry_test_lskel__attach(fentry_skel);
if (!ASSERT_OK(err, "fentry_attach"))
goto close_prog;
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
index aee1bc77a17f..ec882328eb59 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -43,8 +43,13 @@ static void fentry_test(void)
struct fentry_test_lskel *fentry_skel = NULL;
int err;
- fentry_skel = fentry_test_lskel__open_and_load();
- if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))
+ fentry_skel = fentry_test_lskel__open();
+ if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_open"))
+ goto cleanup;
+
+ fentry_skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = fentry_test_lskel__load(fentry_skel);
+ if (!ASSERT_OK(err, "fentry_skel_load"))
goto cleanup;
err = fentry_test_common(fentry_skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_noreturns.c b/tools/testing/selftests/bpf/prog_tests/fexit_noreturns.c
deleted file mode 100644
index 568d3aa48a78..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/fexit_noreturns.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <test_progs.h>
-#include "fexit_noreturns.skel.h"
-
-void test_fexit_noreturns(void)
-{
- RUN_TESTS(fexit_noreturns);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
index 1c13007e37dd..94eed753560c 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -43,8 +43,13 @@ static void fexit_test(void)
struct fexit_test_lskel *fexit_skel = NULL;
int err;
- fexit_skel = fexit_test_lskel__open_and_load();
- if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))
+ fexit_skel = fexit_test_lskel__open();
+ if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_open"))
+ goto cleanup;
+
+ fexit_skel->keyring_id = KEY_SPEC_SESSION_KEYRING;
+ err = fexit_test_lskel__load(fexit_skel);
+ if (!ASSERT_OK(err, "fexit_skel_load"))
goto cleanup;
err = fexit_test_common(fexit_skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/file_reader.c b/tools/testing/selftests/bpf/prog_tests/file_reader.c
new file mode 100644
index 000000000000..5cde32b35da4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/file_reader.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "file_reader.skel.h"
+#include "file_reader_fail.skel.h"
+#include <dlfcn.h>
+#include <sys/mman.h>
+
+const char *user_ptr = "hello world";
+char file_contents[256000];
+
+void *get_executable_base_addr(void)
+{
+ Dl_info info;
+
+ if (!dladdr((void *)&get_executable_base_addr, &info)) {
+ fprintf(stderr, "dladdr failed\n");
+ return NULL;
+ }
+
+ return info.dli_fbase;
+}
+
+static int initialize_file_contents(void)
+{
+ int fd, page_sz = sysconf(_SC_PAGESIZE);
+ ssize_t n = 0, cur, off;
+ void *addr;
+
+ fd = open("/proc/self/exe", O_RDONLY);
+ if (!ASSERT_OK_FD(fd, "Open /proc/self/exe\n"))
+ return 1;
+
+ do {
+ cur = read(fd, file_contents + n, sizeof(file_contents) - n);
+ if (!ASSERT_GT(cur, 0, "read success"))
+ break;
+ n += cur;
+ } while (n < sizeof(file_contents));
+
+ close(fd);
+
+ if (!ASSERT_EQ(n, sizeof(file_contents), "Read /proc/self/exe\n"))
+ return 1;
+
+ addr = get_executable_base_addr();
+ if (!ASSERT_NEQ(addr, NULL, "get executable address"))
+ return 1;
+
+ /* page-align base file address */
+ addr = (void *)((unsigned long)addr & ~(page_sz - 1));
+
+ /*
+ * Page out range 0..512K, use 0..256K for positive tests and
+ * 256K..512K for negative tests expecting page faults
+ */
+ for (off = 0; off < sizeof(file_contents) * 2; off += page_sz) {
+ if (!ASSERT_OK(madvise(addr + off, page_sz, MADV_PAGEOUT),
+ "madvise pageout"))
+ return errno;
+ }
+
+ return 0;
+}
+
+static void run_test(const char *prog_name)
+{
+ struct file_reader *skel;
+ struct bpf_program *prog;
+ int err, fd;
+
+ err = initialize_file_contents();
+ if (!ASSERT_OK(err, "initialize file contents"))
+ return;
+
+ skel = file_reader__open();
+ if (!ASSERT_OK_PTR(skel, "file_reader__open"))
+ return;
+
+ bpf_object__for_each_program(prog, skel->obj) {
+ bpf_program__set_autoload(prog, strcmp(bpf_program__name(prog), prog_name) == 0);
+ }
+
+ memcpy(skel->bss->user_buf, file_contents, sizeof(file_contents));
+ skel->bss->pid = getpid();
+
+ err = file_reader__load(skel);
+ if (!ASSERT_OK(err, "file_reader__load"))
+ goto cleanup;
+
+ err = file_reader__attach(skel);
+ if (!ASSERT_OK(err, "file_reader__attach"))
+ goto cleanup;
+
+ fd = open("/proc/self/exe", O_RDONLY);
+ if (fd >= 0)
+ close(fd);
+
+ ASSERT_EQ(skel->bss->err, 0, "err");
+ ASSERT_EQ(skel->bss->run_success, 1, "run_success");
+cleanup:
+ file_reader__destroy(skel);
+}
+
+void test_file_reader(void)
+{
+ if (test__start_subtest("on_open_expect_fault"))
+ run_test("on_open_expect_fault");
+
+ if (test__start_subtest("on_open_validate_file_read"))
+ run_test("on_open_validate_file_read");
+
+ if (test__start_subtest("negative"))
+ RUN_TESTS(file_reader_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/free_timer.c b/tools/testing/selftests/bpf/prog_tests/free_timer.c
index b7b77a6b2979..0de8facca4c5 100644
--- a/tools/testing/selftests/bpf/prog_tests/free_timer.c
+++ b/tools/testing/selftests/bpf/prog_tests/free_timer.c
@@ -124,6 +124,10 @@ void test_free_timer(void)
int err;
skel = free_timer__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(skel, "open_load"))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/htab_update.c b/tools/testing/selftests/bpf/prog_tests/htab_update.c
index 2bc85f4814f4..d0b405eb2966 100644
--- a/tools/testing/selftests/bpf/prog_tests/htab_update.c
+++ b/tools/testing/selftests/bpf/prog_tests/htab_update.c
@@ -15,17 +15,17 @@ struct htab_update_ctx {
static void test_reenter_update(void)
{
struct htab_update *skel;
- unsigned int key, value;
+ void *value = NULL;
+ unsigned int key, value_size;
int err;
skel = htab_update__open();
if (!ASSERT_OK_PTR(skel, "htab_update__open"))
return;
- /* lookup_elem_raw() may be inlined and find_kernel_btf_id() will return -ESRCH */
- bpf_program__set_autoload(skel->progs.lookup_elem_raw, true);
+ bpf_program__set_autoload(skel->progs.bpf_obj_free_fields, true);
err = htab_update__load(skel);
- if (!ASSERT_TRUE(!err || err == -ESRCH, "htab_update__load") || err)
+ if (!ASSERT_TRUE(!err, "htab_update__load") || err)
goto out;
skel->bss->pid = getpid();
@@ -33,14 +33,33 @@ static void test_reenter_update(void)
if (!ASSERT_OK(err, "htab_update__attach"))
goto out;
- /* Will trigger the reentrancy of bpf_map_update_elem() */
+ value_size = bpf_map__value_size(skel->maps.htab);
+
+ value = calloc(1, value_size);
+ if (!ASSERT_OK_PTR(value, "calloc value"))
+ goto out;
+ /*
+ * First update: plain insert. This should NOT trigger the re-entrancy
+ * path, because there is no old element to free yet.
+ */
key = 0;
- value = 0;
- err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, &value, 0);
- if (!ASSERT_OK(err, "add element"))
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY);
+ if (!ASSERT_OK(err, "first update (insert)"))
+ goto out;
+
+ /*
+ * Second update: replace existing element with same key and trigger
+ * the reentrancy of bpf_map_update_elem().
+ * check_and_free_fields() calls bpf_obj_free_fields() on the old
+ * value, which is where fentry program runs and performs a nested
+ * bpf_map_update_elem(), triggering -EDEADLK.
+ */
+ memset(value, 0, value_size);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY);
+ if (!ASSERT_OK(err, "second update (replace)"))
goto out;
- ASSERT_EQ(skel->bss->update_err, -EBUSY, "no reentrancy");
+ ASSERT_EQ(skel->bss->update_err, -EDEADLK, "no reentrancy");
out:
htab_update__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/kernel_flag.c b/tools/testing/selftests/bpf/prog_tests/kernel_flag.c
index a133354ac9bc..97b00c7efe94 100644
--- a/tools/testing/selftests/bpf/prog_tests/kernel_flag.c
+++ b/tools/testing/selftests/bpf/prog_tests/kernel_flag.c
@@ -16,7 +16,7 @@ void test_kernel_flag(void)
if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel"))
return;
- lsm_skel->bss->monitored_tid = gettid();
+ lsm_skel->bss->monitored_tid = sys_gettid();
ret = test_kernel_flag__attach(lsm_skel);
if (!ASSERT_OK(ret, "test_kernel_flag__attach"))
diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
index 1de14b111931..6e35e13c2022 100644
--- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
@@ -57,7 +57,8 @@ static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
if (!ASSERT_OK(ret, "kmem_cache_lookup"))
break;
- ASSERT_STREQ(r.name, name, "kmem_cache_name");
+ ASSERT_STRNEQ(r.name, name, sizeof(r.name) - 1,
+ "kmem_cache_name");
ASSERT_EQ(r.obj_size, objsize, "kmem_cache_objsize");
seen++;
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index e19ef509ebf8..6cfaa978bc9a 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -7,6 +7,7 @@
#include "kprobe_multi_session.skel.h"
#include "kprobe_multi_session_cookie.skel.h"
#include "kprobe_multi_verifier.skel.h"
+#include "kprobe_write_ctx.skel.h"
#include "bpf/libbpf_internal.h"
#include "bpf/hashmap.h"
@@ -422,220 +423,6 @@ static void test_unique_match(void)
kprobe_multi__destroy(skel);
}
-static size_t symbol_hash(long key, void *ctx __maybe_unused)
-{
- return str_hash((const char *) key);
-}
-
-static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
-{
- return strcmp((const char *) key1, (const char *) key2) == 0;
-}
-
-static bool is_invalid_entry(char *buf, bool kernel)
-{
- if (kernel && strchr(buf, '['))
- return true;
- if (!kernel && !strchr(buf, '['))
- return true;
- return false;
-}
-
-static bool skip_entry(char *name)
-{
- /*
- * We attach to almost all kernel functions and some of them
- * will cause 'suspicious RCU usage' when fprobe is attached
- * to them. Filter out the current culprits - arch_cpu_idle
- * default_idle and rcu_* functions.
- */
- if (!strcmp(name, "arch_cpu_idle"))
- return true;
- if (!strcmp(name, "default_idle"))
- return true;
- if (!strncmp(name, "rcu_", 4))
- return true;
- if (!strcmp(name, "bpf_dispatcher_xdp_func"))
- return true;
- if (!strncmp(name, "__ftrace_invalid_address__",
- sizeof("__ftrace_invalid_address__") - 1))
- return true;
- return false;
-}
-
-/* Do comparision by ignoring '.llvm.<hash>' suffixes. */
-static int compare_name(const char *name1, const char *name2)
-{
- const char *res1, *res2;
- int len1, len2;
-
- res1 = strstr(name1, ".llvm.");
- res2 = strstr(name2, ".llvm.");
- len1 = res1 ? res1 - name1 : strlen(name1);
- len2 = res2 ? res2 - name2 : strlen(name2);
-
- if (len1 == len2)
- return strncmp(name1, name2, len1);
- if (len1 < len2)
- return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
- return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
-}
-
-static int load_kallsyms_compare(const void *p1, const void *p2)
-{
- return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
-}
-
-static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
-{
- return compare_name(p1, p2->name);
-}
-
-static int get_syms(char ***symsp, size_t *cntp, bool kernel)
-{
- size_t cap = 0, cnt = 0;
- char *name = NULL, *ksym_name, **syms = NULL;
- struct hashmap *map;
- struct ksyms *ksyms;
- struct ksym *ks;
- char buf[256];
- FILE *f;
- int err = 0;
-
- ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
- if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_custom_local"))
- return -EINVAL;
-
- /*
- * The available_filter_functions contains many duplicates,
- * but other than that all symbols are usable in kprobe multi
- * interface.
- * Filtering out duplicates by using hashmap__add, which won't
- * add existing entry.
- */
-
- if (access("/sys/kernel/tracing/trace", F_OK) == 0)
- f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
- else
- f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
-
- if (!f)
- return -EINVAL;
-
- map = hashmap__new(symbol_hash, symbol_equal, NULL);
- if (IS_ERR(map)) {
- err = libbpf_get_error(map);
- goto error;
- }
-
- while (fgets(buf, sizeof(buf), f)) {
- if (is_invalid_entry(buf, kernel))
- continue;
-
- free(name);
- if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
- continue;
- if (skip_entry(name))
- continue;
-
- ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
- if (!ks) {
- err = -EINVAL;
- goto error;
- }
-
- ksym_name = ks->name;
- err = hashmap__add(map, ksym_name, 0);
- if (err == -EEXIST) {
- err = 0;
- continue;
- }
- if (err)
- goto error;
-
- err = libbpf_ensure_mem((void **) &syms, &cap,
- sizeof(*syms), cnt + 1);
- if (err)
- goto error;
-
- syms[cnt++] = ksym_name;
- }
-
- *symsp = syms;
- *cntp = cnt;
-
-error:
- free(name);
- fclose(f);
- hashmap__free(map);
- if (err)
- free(syms);
- return err;
-}
-
-static int get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
-{
- unsigned long *addr, *addrs, *tmp_addrs;
- int err = 0, max_cnt, inc_cnt;
- char *name = NULL;
- size_t cnt = 0;
- char buf[256];
- FILE *f;
-
- if (access("/sys/kernel/tracing/trace", F_OK) == 0)
- f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
- else
- f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
-
- if (!f)
- return -ENOENT;
-
- /* In my local setup, the number of entries is 50k+ so Let us initially
- * allocate space to hold 64k entries. If 64k is not enough, incrementally
- * increase 1k each time.
- */
- max_cnt = 65536;
- inc_cnt = 1024;
- addrs = malloc(max_cnt * sizeof(long));
- if (addrs == NULL) {
- err = -ENOMEM;
- goto error;
- }
-
- while (fgets(buf, sizeof(buf), f)) {
- if (is_invalid_entry(buf, kernel))
- continue;
-
- free(name);
- if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
- continue;
- if (skip_entry(name))
- continue;
-
- if (cnt == max_cnt) {
- max_cnt += inc_cnt;
- tmp_addrs = realloc(addrs, max_cnt);
- if (!tmp_addrs) {
- err = -ENOMEM;
- goto error;
- }
- addrs = tmp_addrs;
- }
-
- addrs[cnt++] = (unsigned long)addr;
- }
-
- *addrsp = addrs;
- *cntp = cnt;
-
-error:
- free(name);
- fclose(f);
- if (err)
- free(addrs);
- return err;
-}
-
static void do_bench_test(struct kprobe_multi_empty *skel, struct bpf_kprobe_multi_opts *opts)
{
long attach_start_ns, attach_end_ns;
@@ -670,7 +457,7 @@ static void test_kprobe_multi_bench_attach(bool kernel)
char **syms = NULL;
size_t cnt = 0;
- if (!ASSERT_OK(get_syms(&syms, &cnt, kernel), "get_syms"))
+ if (!ASSERT_OK(bpf_get_ksyms(&syms, &cnt, kernel), "bpf_get_ksyms"))
return;
skel = kprobe_multi_empty__open_and_load();
@@ -696,13 +483,13 @@ static void test_kprobe_multi_bench_attach_addr(bool kernel)
size_t cnt = 0;
int err;
- err = get_addrs(&addrs, &cnt, kernel);
+ err = bpf_get_addrs(&addrs, &cnt, kernel);
if (err == -ENOENT) {
test__skip();
return;
}
- if (!ASSERT_OK(err, "get_addrs"))
+ if (!ASSERT_OK(err, "bpf_get_addrs"))
return;
skel = kprobe_multi_empty__open_and_load();
@@ -753,6 +540,30 @@ cleanup:
kprobe_multi_override__destroy(skel);
}
+#ifdef __x86_64__
+static void test_attach_write_ctx(void)
+{
+ struct kprobe_write_ctx *skel = NULL;
+ struct bpf_link *link = NULL;
+
+ skel = kprobe_write_ctx__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_write_ctx__open_and_load"))
+ return;
+
+ link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_multi_write_ctx,
+ "bpf_fentry_test1", NULL);
+ if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_opts"))
+ bpf_link__destroy(link);
+
+ kprobe_write_ctx__destroy(skel);
+}
+#else
+static void test_attach_write_ctx(void)
+{
+ test__skip();
+}
+#endif
+
void serial_test_kprobe_multi_bench_attach(void)
{
if (test__start_subtest("kernel"))
@@ -792,5 +603,7 @@ void test_kprobe_multi_test(void)
test_session_cookie_skel_api();
if (test__start_subtest("unique_match"))
test_unique_match();
+ if (test__start_subtest("attach_write_ctx"))
+ test_attach_write_ctx();
RUN_TESTS(kprobe_multi_verifier);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
index 5266c7022863..14c5a7ef0e87 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
@@ -72,7 +72,7 @@ static struct {
{ "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
{ "obj_new_acq", "Unreleased reference id=" },
{ "use_after_drop", "invalid mem access 'scalar'" },
- { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
+ { "ptr_walk_scalar", "type=rdonly_untrusted_mem expected=percpu_ptr_" },
{ "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
{ "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
{ "direct_read_head", "direct access to bpf_list_head is disallowed" },
diff --git a/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c b/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
new file mode 100644
index 000000000000..72aa5376c30e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "testing_helpers.h"
+#include "livepatch_trampoline.skel.h"
+
+static int load_livepatch(void)
+{
+ char path[4096];
+
+ /* CI will set KBUILD_OUTPUT */
+ snprintf(path, sizeof(path), "%s/samples/livepatch/livepatch-sample.ko",
+ getenv("KBUILD_OUTPUT") ? : "../../../..");
+
+ return load_module(path, env_verbosity > VERBOSE_NONE);
+}
+
+static void unload_livepatch(void)
+{
+ /* Disable the livepatch before unloading the module */
+ system("echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled");
+
+ unload_module("livepatch_sample", env_verbosity > VERBOSE_NONE);
+}
+
+static void read_proc_cmdline(void)
+{
+ char buf[4096];
+ int fd, ret;
+
+ fd = open("/proc/cmdline", O_RDONLY);
+ if (!ASSERT_OK_FD(fd, "open /proc/cmdline"))
+ return;
+
+ ret = read(fd, buf, sizeof(buf));
+ if (!ASSERT_GT(ret, 0, "read /proc/cmdline"))
+ goto out;
+
+ ASSERT_OK(strncmp(buf, "this has been live patched", 26), "strncmp");
+
+out:
+ close(fd);
+}
+
+static void __test_livepatch_trampoline(bool fexit_first)
+{
+ struct livepatch_trampoline *skel = NULL;
+ int err;
+
+ skel = livepatch_trampoline__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ goto out;
+
+ skel->bss->my_pid = getpid();
+
+ if (!fexit_first) {
+ /* fentry program is loaded first by default */
+ err = livepatch_trampoline__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+ } else {
+ /* Manually load fexit program first. */
+ skel->links.fexit_cmdline = bpf_program__attach(skel->progs.fexit_cmdline);
+ if (!ASSERT_OK_PTR(skel->links.fexit_cmdline, "attach_fexit"))
+ goto out;
+
+ skel->links.fentry_cmdline = bpf_program__attach(skel->progs.fentry_cmdline);
+ if (!ASSERT_OK_PTR(skel->links.fentry_cmdline, "attach_fentry"))
+ goto out;
+ }
+
+ read_proc_cmdline();
+
+ ASSERT_EQ(skel->bss->fentry_hit, 1, "fentry_hit");
+ ASSERT_EQ(skel->bss->fexit_hit, 1, "fexit_hit");
+out:
+ livepatch_trampoline__destroy(skel);
+}
+
+void test_livepatch_trampoline(void)
+{
+ int retry_cnt = 0;
+
+retry:
+ if (load_livepatch()) {
+ if (retry_cnt) {
+ ASSERT_OK(1, "load_livepatch");
+ goto out;
+ }
+ /*
+ * Something else (previous run of the same test?) loaded
+ * the KLP module. Unload the KLP module and retry.
+ */
+ unload_livepatch();
+ retry_cnt++;
+ goto retry;
+ }
+
+ if (test__start_subtest("fentry_first"))
+ __test_livepatch_trampoline(false);
+
+ if (test__start_subtest("fexit_first"))
+ __test_livepatch_trampoline(true);
+out:
+ unload_livepatch();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c
index 169ce689b97c..d6f14a232002 100644
--- a/tools/testing/selftests/bpf/prog_tests/log_buf.c
+++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c
@@ -7,6 +7,10 @@
#include "test_log_buf.skel.h"
#include "bpf_util.h"
+#if !defined(__clang__)
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
static size_t libbpf_log_pos;
static char libbpf_log_buf[1024 * 1024];
static bool libbpf_log_error;
diff --git a/tools/testing/selftests/bpf/prog_tests/map_excl.c b/tools/testing/selftests/bpf/prog_tests/map_excl.c
new file mode 100644
index 000000000000..6bdc6d6de0da
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_excl.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Google LLC. */
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "map_excl.skel.h"
+
+static void test_map_excl_allowed(void)
+{
+ struct map_excl *skel = map_excl__open();
+ int err;
+
+ err = bpf_map__set_exclusive_program(skel->maps.excl_map, skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__set_exclusive_program"))
+ goto out;
+
+ bpf_program__set_autoload(skel->progs.should_have_access, true);
+ bpf_program__set_autoload(skel->progs.should_not_have_access, false);
+
+ err = map_excl__load(skel);
+ ASSERT_OK(err, "map_excl__load");
+out:
+ map_excl__destroy(skel);
+}
+
+static void test_map_excl_denied(void)
+{
+ struct map_excl *skel = map_excl__open();
+ int err;
+
+ err = bpf_map__set_exclusive_program(skel->maps.excl_map, skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__make_exclusive"))
+ goto out;
+
+ bpf_program__set_autoload(skel->progs.should_have_access, false);
+ bpf_program__set_autoload(skel->progs.should_not_have_access, true);
+
+ err = map_excl__load(skel);
+ ASSERT_EQ(err, -EACCES, "exclusive map access not denied\n");
+out:
+ map_excl__destroy(skel);
+
+}
+
+void test_map_excl(void)
+{
+ if (test__start_subtest("map_excl_allowed"))
+ test_map_excl_allowed();
+ if (test__start_subtest("map_excl_denied"))
+ test_map_excl_denied();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c
new file mode 100644
index 000000000000..40d4f687bd9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/mem_rdonly_untrusted.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <test_progs.h>
+#include "mem_rdonly_untrusted.skel.h"
+
+void test_mem_rdonly_untrusted(void)
+{
+ RUN_TESTS(mem_rdonly_untrusted);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c
index 6d391d95f96e..70fa7ae93173 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c
@@ -90,7 +90,7 @@ void test_module_attach(void)
test_module_attach__detach(skel);
- /* attach fentry/fexit and make sure it get's module reference */
+ /* attach fentry/fexit and make sure it gets module reference */
link = bpf_program__attach(skel->progs.handle_fentry);
if (!ASSERT_OK_PTR(link, "attach_fentry"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index f8eb7f9d4fd2..8fade8bdc451 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -6,11 +6,13 @@
#include <netinet/in.h>
#include <test_progs.h>
#include <unistd.h>
+#include <errno.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "mptcp_sock.skel.h"
#include "mptcpify.skel.h"
#include "mptcp_subflow.skel.h"
+#include "mptcp_sockmap.skel.h"
#define NS_TEST "mptcp_ns"
#define ADDR_1 "10.0.1.1"
@@ -436,6 +438,142 @@ close_cgroup:
close(cgroup_fd);
}
+/* Test sockmap on MPTCP server handling non-mp-capable clients. */
+static void test_sockmap_with_mptcp_fallback(struct mptcp_sockmap *skel)
+{
+ int listen_fd = -1, client_fd1 = -1, client_fd2 = -1;
+ int server_fd1 = -1, server_fd2 = -1, sent, recvd;
+ char snd[9] = "123456789";
+ char rcv[10];
+
+ /* start server with MPTCP enabled */
+ listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ if (!ASSERT_OK_FD(listen_fd, "sockmap-fb:start_mptcp_server"))
+ return;
+
+ skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd));
+ skel->bss->sk_index = 0;
+ /* create client without MPTCP enabled */
+ client_fd1 = connect_to_fd_opts(listen_fd, NULL);
+ if (!ASSERT_OK_FD(client_fd1, "sockmap-fb:connect_to_fd"))
+ goto end;
+
+ server_fd1 = accept(listen_fd, NULL, 0);
+ skel->bss->sk_index = 1;
+ client_fd2 = connect_to_fd_opts(listen_fd, NULL);
+ if (!ASSERT_OK_FD(client_fd2, "sockmap-fb:connect_to_fd"))
+ goto end;
+
+ server_fd2 = accept(listen_fd, NULL, 0);
+ /* test normal redirect behavior: data sent by client_fd1 can be
+ * received by client_fd2
+ */
+ skel->bss->redirect_idx = 1;
+ sent = send(client_fd1, snd, sizeof(snd), 0);
+ if (!ASSERT_EQ(sent, sizeof(snd), "sockmap-fb:send(client_fd1)"))
+ goto end;
+
+ /* try to recv more bytes to avoid truncation check */
+ recvd = recv(client_fd2, rcv, sizeof(rcv), 0);
+ if (!ASSERT_EQ(recvd, sizeof(snd), "sockmap-fb:recv(client_fd2)"))
+ goto end;
+
+end:
+ if (client_fd1 >= 0)
+ close(client_fd1);
+ if (client_fd2 >= 0)
+ close(client_fd2);
+ if (server_fd1 >= 0)
+ close(server_fd1);
+ if (server_fd2 >= 0)
+ close(server_fd2);
+ close(listen_fd);
+}
+
+/* Test sockmap rejection of MPTCP sockets - both server and client sides. */
+static void test_sockmap_reject_mptcp(struct mptcp_sockmap *skel)
+{
+ int listen_fd = -1, server_fd = -1, client_fd1 = -1;
+ int err, zero = 0;
+
+ /* start server with MPTCP enabled */
+ listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ if (!ASSERT_OK_FD(listen_fd, "start_mptcp_server"))
+ return;
+
+ skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd));
+ skel->bss->sk_index = 0;
+ /* create client with MPTCP enabled */
+ client_fd1 = connect_to_fd(listen_fd, 0);
+ if (!ASSERT_OK_FD(client_fd1, "connect_to_fd client_fd1"))
+ goto end;
+
+ /* bpf_sock_map_update() called from sockops should reject MPTCP sk */
+ if (!ASSERT_EQ(skel->bss->helper_ret, -EOPNOTSUPP, "should reject"))
+ goto end;
+
+ server_fd = accept(listen_fd, NULL, 0);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map),
+ &zero, &server_fd, BPF_NOEXIST);
+ if (!ASSERT_EQ(err, -EOPNOTSUPP, "server should be disallowed"))
+ goto end;
+
+ /* MPTCP client should also be disallowed */
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map),
+ &zero, &client_fd1, BPF_NOEXIST);
+ if (!ASSERT_EQ(err, -EOPNOTSUPP, "client should be disallowed"))
+ goto end;
+end:
+ if (client_fd1 >= 0)
+ close(client_fd1);
+ if (server_fd >= 0)
+ close(server_fd);
+ close(listen_fd);
+}
+
+static void test_mptcp_sockmap(void)
+{
+ struct mptcp_sockmap *skel;
+ struct netns_obj *netns;
+ int cgroup_fd, err;
+
+ cgroup_fd = test__join_cgroup("/mptcp_sockmap");
+ if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_sockmap"))
+ return;
+
+ skel = mptcp_sockmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_sockmap"))
+ goto close_cgroup;
+
+ skel->links.mptcp_sockmap_inject =
+ bpf_program__attach_cgroup(skel->progs.mptcp_sockmap_inject, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.mptcp_sockmap_inject, "attach sockmap"))
+ goto skel_destroy;
+
+ err = bpf_prog_attach(bpf_program__fd(skel->progs.mptcp_sockmap_redirect),
+ bpf_map__fd(skel->maps.sock_map),
+ BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach stream verdict"))
+ goto skel_destroy;
+
+ netns = netns_new(NS_TEST, true);
+ if (!ASSERT_OK_PTR(netns, "netns_new: mptcp_sockmap"))
+ goto skel_destroy;
+
+ if (endpoint_init("subflow") < 0)
+ goto close_netns;
+
+ test_sockmap_with_mptcp_fallback(skel);
+ test_sockmap_reject_mptcp(skel);
+
+close_netns:
+ netns_free(netns);
+skel_destroy:
+ mptcp_sockmap__destroy(skel);
+close_cgroup:
+ close(cgroup_fd);
+}
+
void test_mptcp(void)
{
if (test__start_subtest("base"))
@@ -444,4 +582,6 @@ void test_mptcp(void)
test_mptcpify();
if (test__start_subtest("subflow"))
test_subflow();
+ if (test__start_subtest("sockmap"))
+ test_mptcp_sockmap();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
index bc24f83339d6..0a7ef770c487 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_branches.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
@@ -15,6 +15,10 @@ static void check_good_sample(struct test_perf_branches *skel)
int pbe_size = sizeof(struct perf_branch_entry);
int duration = 0;
+ if (CHECK(!skel->bss->run_cnt, "invalid run_cnt",
+ "checked sample validity before prog run"))
+ return;
+
if (CHECK(!skel->bss->valid, "output not valid",
"no valid sample from prog"))
return;
@@ -45,6 +49,10 @@ static void check_bad_sample(struct test_perf_branches *skel)
int written_stack = skel->bss->written_stack_out;
int duration = 0;
+ if (CHECK(!skel->bss->run_cnt, "invalid run_cnt",
+ "checked sample validity before prog run"))
+ return;
+
if (CHECK(!skel->bss->valid, "output not valid",
"no valid sample from prog"))
return;
@@ -83,8 +91,12 @@ static void test_perf_branches_common(int perf_fd,
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err))
goto out_destroy;
- /* spin the loop for a while (random high number) */
- for (i = 0; i < 1000000; ++i)
+
+ /* Spin the loop for a while by using a high iteration count, and by
+ * checking whether the specific run count marker has been explicitly
+ * incremented at least once by the backing perf_event BPF program.
+ */
+ for (i = 0; i < 100000000 && !*(volatile int *)&skel->bss->run_cnt; ++i)
++j;
test_perf_branches__detach(skel);
@@ -116,11 +128,11 @@ static void test_perf_branches_hw(void)
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
/*
- * Some setups don't support branch records (virtual machines, !x86),
- * so skip test in this case.
+ * Some setups don't support LBR (virtual machines, !x86, AMD Milan Zen
+ * 3 which only supports BRS), so skip test in this case.
*/
if (pfd < 0) {
- if (errno == ENOENT || errno == EOPNOTSUPP) {
+ if (errno == ENOENT || errno == EOPNOTSUPP || errno == EINVAL) {
printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n",
__func__);
test__skip();
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning_devmap_reuse.c b/tools/testing/selftests/bpf/prog_tests/pinning_devmap_reuse.c
new file mode 100644
index 000000000000..9ae49b587f3e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pinning_devmap_reuse.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <test_progs.h>
+
+
+#include "test_pinning_devmap.skel.h"
+
+void test_pinning_devmap_reuse(void)
+{
+ const char *pinpath1 = "/sys/fs/bpf/pinmap1";
+ const char *pinpath2 = "/sys/fs/bpf/pinmap2";
+ struct test_pinning_devmap *skel1 = NULL, *skel2 = NULL;
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+
+ /* load the object a first time */
+ skel1 = test_pinning_devmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel1, "skel_load1"))
+ goto out;
+
+ /* load the object a second time, re-using the pinned map */
+ skel2 = test_pinning_devmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel2, "skel_load2"))
+ goto out;
+
+ /* we can close the reference safely without
+ * the map's refcount falling to 0
+ */
+ test_pinning_devmap__destroy(skel1);
+ skel1 = NULL;
+
+ /* now, swap the pins */
+ err = renameat2(0, pinpath1, 0, pinpath2, RENAME_EXCHANGE);
+ if (!ASSERT_OK(err, "swap pins"))
+ goto out;
+
+ /* load the object again, this time the re-use should fail */
+ skel1 = test_pinning_devmap__open_and_load();
+ if (!ASSERT_ERR_PTR(skel1, "skel_load3"))
+ goto out;
+
+out:
+ unlink(pinpath1);
+ unlink(pinpath2);
+ test_pinning_devmap__destroy(skel1);
+ test_pinning_devmap__destroy(skel2);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning_htab.c b/tools/testing/selftests/bpf/prog_tests/pinning_htab.c
new file mode 100644
index 000000000000..16bd74be3dbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pinning_htab.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_pinning_htab.skel.h"
+
+static void unpin_map(const char *map_name, const char *pin_path)
+{
+ struct test_pinning_htab *skel;
+ struct bpf_map *map;
+ int err;
+
+ skel = test_pinning_htab__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+ return;
+
+ map = bpf_object__find_map_by_name(skel->obj, map_name);
+ if (!ASSERT_OK_PTR(map, "bpf_object__find_map_by_name"))
+ goto out;
+
+ err = bpf_map__pin(map, pin_path);
+ if (!ASSERT_OK(err, "bpf_map__pin"))
+ goto out;
+
+ err = bpf_map__unpin(map, pin_path);
+ ASSERT_OK(err, "bpf_map__unpin");
+out:
+ test_pinning_htab__destroy(skel);
+}
+
+void test_pinning_htab(void)
+{
+ if (test__start_subtest("timer_prealloc"))
+ unpin_map("timer_prealloc", "/sys/fs/bpf/timer_prealloc");
+ if (test__start_subtest("timer_no_prealloc"))
+ unpin_map("timer_no_prealloc", "/sys/fs/bpf/timer_no_prealloc");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
index 14f2796076e0..7607cfc2408c 100644
--- a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
+++ b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
@@ -54,3 +54,128 @@ void test_prog_tests_framework(void)
return;
clear_test_state(state);
}
+
+static void dummy_emit(const char *buf, bool force) {}
+
+void test_prog_tests_framework_expected_msgs(void)
+{
+ struct expected_msgs msgs;
+ int i, j, error_cnt;
+ const struct {
+ const char *name;
+ const char *log;
+ const char *expected;
+ struct expect_msg *pats;
+ } cases[] = {
+ {
+ .name = "simple-ok",
+ .log = "aaabbbccc",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "simple-fail",
+ .log = "aaabbbddd",
+ .expected = "MATCHED SUBSTR: 'aaa'\n"
+ "EXPECTED SUBSTR: 'ccc'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "negative-ok-mid",
+ .log = "aaabbbccc",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "foo", .negative = true },
+ { .substr = "bar", .negative = true },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "negative-ok-tail",
+ .log = "aaabbbccc",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "foo", .negative = true },
+ {}
+ }
+ },
+ {
+ .name = "negative-ok-head",
+ .log = "aaabbbccc",
+ .pats = (struct expect_msg[]) {
+ { .substr = "foo", .negative = true },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "negative-fail-head",
+ .log = "aaabbbccc",
+ .expected = "UNEXPECTED SUBSTR: 'aaa'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa", .negative = true },
+ { .substr = "bbb" },
+ {}
+ }
+ },
+ {
+ .name = "negative-fail-tail",
+ .log = "aaabbbccc",
+ .expected = "UNEXPECTED SUBSTR: 'ccc'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "bbb" },
+ { .substr = "ccc", .negative = true },
+ {}
+ }
+ },
+ {
+ .name = "negative-fail-mid-1",
+ .log = "aaabbbccc",
+ .expected = "UNEXPECTED SUBSTR: 'bbb'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "bbb", .negative = true },
+ { .substr = "ccc" },
+ {}
+ }
+ },
+ {
+ .name = "negative-fail-mid-2",
+ .log = "aaabbb222ccc",
+ .expected = "UNEXPECTED SUBSTR: '222'\n",
+ .pats = (struct expect_msg[]) {
+ { .substr = "aaa" },
+ { .substr = "222", .negative = true },
+ { .substr = "bbb", .negative = true },
+ { .substr = "ccc" },
+ {}
+ }
+ }
+ };
+
+ for (i = 0; i < ARRAY_SIZE(cases); i++) {
+ if (test__start_subtest(cases[i].name)) {
+ error_cnt = env.subtest_state->error_cnt;
+ msgs.patterns = cases[i].pats;
+ msgs.cnt = 0;
+ for (j = 0; cases[i].pats[j].substr; j++)
+ msgs.cnt++;
+ validate_msgs(cases[i].log, &msgs, dummy_emit);
+ fflush(stderr);
+ env.subtest_state->error_cnt = error_cnt;
+ if (cases[i].expected)
+ ASSERT_HAS_SUBSTR(env.subtest_state->log_buf, cases[i].expected, "expected output");
+ else
+ ASSERT_STREQ(env.subtest_state->log_buf, "", "expected no output");
+ test__end_subtest();
+ }
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
index c9f855e5da24..246eb259c08a 100644
--- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
@@ -28,6 +28,7 @@ static void test_success(void)
bpf_program__set_autoload(skel->progs.two_regions, true);
bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
+ bpf_program__set_autoload(skel->progs.nested_rcu_region, true);
bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true);
bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog, true);
bpf_program__set_autoload(skel->progs.rcu_read_lock_global_subprog, true);
@@ -78,7 +79,8 @@ static const char * const inproper_region_tests[] = {
"non_sleepable_rcu_mismatch",
"inproper_sleepable_helper",
"inproper_sleepable_kfunc",
- "nested_rcu_region",
+ "nested_rcu_region_unbalanced_1",
+ "nested_rcu_region_unbalanced_2",
"rcu_read_lock_global_subprog_lock",
"rcu_read_lock_global_subprog_unlock",
"rcu_read_lock_sleepable_helper_global_subprog",
diff --git a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c
index 8100509e561b..0ffa01d54ce2 100644
--- a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c
@@ -149,3 +149,70 @@ close_prog:
fentry_recursive_target__destroy(target_skel);
fentry_recursive__destroy(tracing_skel);
}
+
+static void *fentry_target_test_run(void *arg)
+{
+ for (;;) {
+ int prog_fd = __atomic_load_n((int *)arg, __ATOMIC_SEQ_CST);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err;
+
+ if (prog_fd == -1)
+ break;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "fentry_target test_run"))
+ break;
+ }
+
+ return NULL;
+}
+
+void test_fentry_attach_stress(void)
+{
+ struct fentry_recursive_target *target_skel = NULL;
+ struct fentry_recursive *tracing_skel = NULL;
+ struct bpf_program *prog;
+ int err, i, tgt_prog_fd;
+ pthread_t thread;
+
+ target_skel = fentry_recursive_target__open_and_load();
+ if (!ASSERT_OK_PTR(target_skel,
+ "fentry_recursive_target__open_and_load"))
+ goto close_prog;
+ tgt_prog_fd = bpf_program__fd(target_skel->progs.fentry_target);
+ err = pthread_create(&thread, NULL,
+ fentry_target_test_run, &tgt_prog_fd);
+ if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
+ goto close_prog;
+
+ for (i = 0; i < 1000; i++) {
+ tracing_skel = fentry_recursive__open();
+ if (!ASSERT_OK_PTR(tracing_skel, "fentry_recursive__open"))
+ goto stop_thread;
+
+ prog = tracing_skel->progs.recursive_attach;
+ err = bpf_program__set_attach_target(prog, tgt_prog_fd,
+ "fentry_target");
+ if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
+ goto stop_thread;
+
+ err = fentry_recursive__load(tracing_skel);
+ if (!ASSERT_OK(err, "fentry_recursive__load"))
+ goto stop_thread;
+
+ err = fentry_recursive__attach(tracing_skel);
+ if (!ASSERT_OK(err, "fentry_recursive__attach"))
+ goto stop_thread;
+
+ fentry_recursive__destroy(tracing_skel);
+ tracing_skel = NULL;
+ }
+
+stop_thread:
+ __atomic_store_n(&tgt_prog_fd, -1, __ATOMIC_SEQ_CST);
+ err = pthread_join(thread, NULL);
+ ASSERT_OK(err, "pthread_join");
+close_prog:
+ fentry_recursive__destroy(tracing_skel);
+ fentry_recursive_target__destroy(target_skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
index d6bd5e16e637..d2c0542716a8 100644
--- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
@@ -44,3 +44,59 @@ void test_refcounted_kptr_wrong_owner(void)
ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
refcounted_kptr__destroy(skel);
}
+
+void test_percpu_hash_refcounted_kptr_refcount_leak(void)
+{
+ struct refcounted_kptr *skel;
+ int cpu_nr, fd, err, key = 0;
+ struct bpf_map *map;
+ size_t values_sz;
+ u64 *values;
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ cpu_nr = libbpf_num_possible_cpus();
+ if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus"))
+ return;
+
+ values = calloc(cpu_nr, sizeof(u64));
+ if (!ASSERT_OK_PTR(values, "calloc values"))
+ return;
+
+ skel = refcounted_kptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) {
+ free(values);
+ return;
+ }
+
+ values_sz = cpu_nr * sizeof(u64);
+ memset(values, 0, values_sz);
+
+ map = skel->maps.percpu_hash;
+ err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0);
+ if (!ASSERT_OK(err, "bpf_map__update_elem"))
+ goto out;
+
+ fd = bpf_program__fd(skel->progs.percpu_hash_refcount_leak);
+ err = bpf_prog_test_run_opts(fd, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ if (!ASSERT_EQ(opts.retval, 2, "opts.retval"))
+ goto out;
+
+ err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0);
+ if (!ASSERT_OK(err, "bpf_map__update_elem"))
+ goto out;
+
+ fd = bpf_program__fd(skel->progs.check_percpu_hash_refcount);
+ err = bpf_prog_test_run_opts(fd, &opts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_EQ(opts.retval, 1, "opts.retval");
+
+out:
+ refcounted_kptr__destroy(skel);
+ free(values);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index 39d42271cc46..d93a0c7b1786 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -465,6 +465,20 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
return range_improve(x_t, x, x_swap);
}
+ if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
+ if (x_t == S64 && x.a > x.b) {
+ if (x.b < y.a && x.a <= y.b)
+ return range(x_t, x.a, y.b);
+ if (x.a > y.b && x.b >= y.a)
+ return range(x_t, y.a, x.b);
+ } else if (x_t == U64 && y.a > y.b) {
+ if (y.b < x.a && y.a <= x.b)
+ return range(x_t, y.a, x.b);
+ if (y.a > x.b && y.b >= x.a)
+ return range(x_t, x.a, y.b);
+ }
+ }
+
/* otherwise, plain range cast and intersection works */
return range_improve(x_t, x, y_cast);
}
@@ -609,7 +623,7 @@ static void range_cond(enum num_t t, struct range x, struct range y,
*newx = range(t, x.a, x.b);
*newy = range(t, y.a + 1, y.b);
} else if (x.a == x.b && x.b == y.b) {
- /* X is a constant matching rigth side of Y */
+ /* X is a constant matching right side of Y */
*newx = range(t, x.a, x.b);
*newy = range(t, y.a, y.b - 1);
} else if (y.a == y.b && x.a == y.a) {
@@ -617,7 +631,7 @@ static void range_cond(enum num_t t, struct range x, struct range y,
*newx = range(t, x.a + 1, x.b);
*newy = range(t, y.a, y.b);
} else if (y.a == y.b && x.b == y.b) {
- /* Y is a constant matching rigth side of X */
+ /* Y is a constant matching right side of X */
*newx = range(t, x.a, x.b - 1);
*newy = range(t, y.a, y.b);
} else {
diff --git a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
index 0703e987df89..f0a8c828f8f1 100644
--- a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c
@@ -99,3 +99,19 @@ end:
res_spin_lock__destroy(skel);
return;
}
+
+void serial_test_res_spin_lock_stress(void)
+{
+ if (libbpf_num_possible_cpus() < 3) {
+ test__skip();
+ return;
+ }
+
+ ASSERT_OK(load_module("bpf_test_rqspinlock.ko", false), "load module AA");
+ sleep(5);
+ unload_module("bpf_test_rqspinlock", false);
+ /*
+ * Insert bpf_test_rqspinlock.ko manually with test_mode=[1|2] to test
+ * other cases (ABBA, ABBCCA).
+ */
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index da430df45aa4..64520684d2cb 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -17,6 +17,7 @@
#include "test_ringbuf_n.lskel.h"
#include "test_ringbuf_map_key.lskel.h"
#include "test_ringbuf_write.lskel.h"
+#include "test_ringbuf_overwrite.lskel.h"
#define EDONE 7777
@@ -97,7 +98,7 @@ static void ringbuf_write_subtest(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
- skel->maps.ringbuf.max_entries = 0x4000;
+ skel->maps.ringbuf.max_entries = 0x40000;
err = test_ringbuf_write_lskel__load(skel);
if (!ASSERT_OK(err, "skel_load"))
@@ -108,7 +109,7 @@ static void ringbuf_write_subtest(void)
mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
goto cleanup;
- *mmap_ptr = 0x3000;
+ *mmap_ptr = 0x30000;
ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
skel->bss->pid = getpid();
@@ -497,6 +498,68 @@ cleanup:
test_ringbuf_map_key_lskel__destroy(skel_map_key);
}
+static void ringbuf_overwrite_mode_subtest(void)
+{
+ unsigned long size, len1, len2, len3, len4, len5;
+ unsigned long expect_avail_data, expect_prod_pos, expect_over_pos;
+ struct test_ringbuf_overwrite_lskel *skel;
+ int page_size = getpagesize();
+ int err;
+
+ skel = test_ringbuf_overwrite_lskel__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ size = page_size;
+ len1 = page_size / 2;
+ len2 = page_size / 4;
+ len3 = size - len1 - len2 - BPF_RINGBUF_HDR_SZ * 3;
+ len4 = len3 - 8;
+ len5 = len3; /* retry with len3 */
+
+ skel->maps.ringbuf.max_entries = size;
+ skel->rodata->LEN1 = len1;
+ skel->rodata->LEN2 = len2;
+ skel->rodata->LEN3 = len3;
+ skel->rodata->LEN4 = len4;
+ skel->rodata->LEN5 = len5;
+
+ skel->bss->pid = getpid();
+
+ err = test_ringbuf_overwrite_lskel__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ err = test_ringbuf_overwrite_lskel__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ syscall(__NR_getpgid);
+
+ ASSERT_EQ(skel->bss->reserve1_fail, 0, "reserve 1");
+ ASSERT_EQ(skel->bss->reserve2_fail, 0, "reserve 2");
+ ASSERT_EQ(skel->bss->reserve3_fail, 1, "reserve 3");
+ ASSERT_EQ(skel->bss->reserve4_fail, 0, "reserve 4");
+ ASSERT_EQ(skel->bss->reserve5_fail, 0, "reserve 5");
+
+ ASSERT_EQ(skel->bss->ring_size, size, "check_ring_size");
+
+ expect_avail_data = len2 + len4 + len5 + 3 * BPF_RINGBUF_HDR_SZ;
+ ASSERT_EQ(skel->bss->avail_data, expect_avail_data, "check_avail_size");
+
+ ASSERT_EQ(skel->bss->cons_pos, 0, "check_cons_pos");
+
+ expect_prod_pos = len1 + len2 + len4 + len5 + 4 * BPF_RINGBUF_HDR_SZ;
+ ASSERT_EQ(skel->bss->prod_pos, expect_prod_pos, "check_prod_pos");
+
+ expect_over_pos = len1 + BPF_RINGBUF_HDR_SZ;
+ ASSERT_EQ(skel->bss->over_pos, expect_over_pos, "check_over_pos");
+
+ test_ringbuf_overwrite_lskel__detach(skel);
+cleanup:
+ test_ringbuf_overwrite_lskel__destroy(skel);
+}
+
void test_ringbuf(void)
{
if (test__start_subtest("ringbuf"))
@@ -507,4 +570,6 @@ void test_ringbuf(void)
ringbuf_map_key_subtest();
if (test__start_subtest("ringbuf_write"))
ringbuf_write_subtest();
+ if (test__start_subtest("ringbuf_overwrite_mode"))
+ ringbuf_overwrite_mode_subtest();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 036d4760d2c1..3dbcc091f16c 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -41,11 +41,7 @@ static struct bpf_object *obj;
static __u32 index_zero;
static int epfd;
-static union sa46 {
- struct sockaddr_in6 v6;
- struct sockaddr_in v4;
- sa_family_t family;
-} srv_sa;
+static struct sockaddr_storage srv_sa;
#define RET_IF(condition, tag, format...) ({ \
if (CHECK_FAIL(condition)) { \
@@ -135,24 +131,24 @@ static int prepare_bpf_obj(void)
return 0;
}
-static void sa46_init_loopback(union sa46 *sa, sa_family_t family)
+static void ss_init_loopback(struct sockaddr_storage *sa, sa_family_t family)
{
memset(sa, 0, sizeof(*sa));
- sa->family = family;
- if (sa->family == AF_INET6)
- sa->v6.sin6_addr = in6addr_loopback;
+ sa->ss_family = family;
+ if (sa->ss_family == AF_INET6)
+ ((struct sockaddr_in6 *)sa)->sin6_addr = in6addr_loopback;
else
- sa->v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ ((struct sockaddr_in *)sa)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
}
-static void sa46_init_inany(union sa46 *sa, sa_family_t family)
+static void ss_init_inany(struct sockaddr_storage *sa, sa_family_t family)
{
memset(sa, 0, sizeof(*sa));
- sa->family = family;
- if (sa->family == AF_INET6)
- sa->v6.sin6_addr = in6addr_any;
+ sa->ss_family = family;
+ if (sa->ss_family == AF_INET6)
+ ((struct sockaddr_in6 *)sa)->sin6_addr = in6addr_any;
else
- sa->v4.sin_addr.s_addr = INADDR_ANY;
+ ((struct sockaddr_in *)sa)->sin_addr.s_addr = INADDR_ANY;
}
static int read_int_sysctl(const char *sysctl)
@@ -228,7 +224,7 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd,
int cli_fd)
{
struct data_check expected = {}, result;
- union sa46 cli_sa;
+ struct sockaddr_storage cli_sa;
socklen_t addrlen;
int err;
@@ -251,26 +247,32 @@ static void check_data(int type, sa_family_t family, const struct cmd *cmd,
}
if (family == AF_INET6) {
+ struct sockaddr_in6 *srv_v6 = (struct sockaddr_in6 *)&srv_sa;
+ struct sockaddr_in6 *cli_v6 = (struct sockaddr_in6 *)&cli_sa;
+
expected.eth_protocol = htons(ETH_P_IPV6);
- expected.bind_inany = !srv_sa.v6.sin6_addr.s6_addr32[3] &&
- !srv_sa.v6.sin6_addr.s6_addr32[2] &&
- !srv_sa.v6.sin6_addr.s6_addr32[1] &&
- !srv_sa.v6.sin6_addr.s6_addr32[0];
+ expected.bind_inany = !srv_v6->sin6_addr.s6_addr32[3] &&
+ !srv_v6->sin6_addr.s6_addr32[2] &&
+ !srv_v6->sin6_addr.s6_addr32[1] &&
+ !srv_v6->sin6_addr.s6_addr32[0];
- memcpy(&expected.skb_addrs[0], cli_sa.v6.sin6_addr.s6_addr32,
- sizeof(cli_sa.v6.sin6_addr));
+ memcpy(&expected.skb_addrs[0], cli_v6->sin6_addr.s6_addr32,
+ sizeof(cli_v6->sin6_addr));
memcpy(&expected.skb_addrs[4], &in6addr_loopback,
sizeof(in6addr_loopback));
- expected.skb_ports[0] = cli_sa.v6.sin6_port;
- expected.skb_ports[1] = srv_sa.v6.sin6_port;
+ expected.skb_ports[0] = cli_v6->sin6_port;
+ expected.skb_ports[1] = srv_v6->sin6_port;
} else {
+ struct sockaddr_in *srv_v4 = (struct sockaddr_in *)&srv_sa;
+ struct sockaddr_in *cli_v4 = (struct sockaddr_in *)&cli_sa;
+
expected.eth_protocol = htons(ETH_P_IP);
- expected.bind_inany = !srv_sa.v4.sin_addr.s_addr;
+ expected.bind_inany = !srv_v4->sin_addr.s_addr;
- expected.skb_addrs[0] = cli_sa.v4.sin_addr.s_addr;
+ expected.skb_addrs[0] = cli_v4->sin_addr.s_addr;
expected.skb_addrs[1] = htonl(INADDR_LOOPBACK);
- expected.skb_ports[0] = cli_sa.v4.sin_port;
- expected.skb_ports[1] = srv_sa.v4.sin_port;
+ expected.skb_ports[0] = cli_v4->sin_port;
+ expected.skb_ports[1] = srv_v4->sin_port;
}
if (memcmp(&result, &expected, offsetof(struct data_check,
@@ -364,16 +366,15 @@ static void check_results(void)
static int send_data(int type, sa_family_t family, void *data, size_t len,
enum result expected)
{
- union sa46 cli_sa;
+ struct sockaddr_storage cli_sa;
int fd, err;
fd = socket(family, type, 0);
RET_ERR(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno);
- sa46_init_loopback(&cli_sa, family);
+ ss_init_loopback(&cli_sa, family);
err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa));
RET_ERR(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno);
-
err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa,
sizeof(srv_sa));
RET_ERR(err != len && expected >= PASS,
@@ -589,9 +590,9 @@ static void prepare_sk_fds(int type, sa_family_t family, bool inany)
socklen_t addrlen;
if (inany)
- sa46_init_inany(&srv_sa, family);
+ ss_init_inany(&srv_sa, family);
else
- sa46_init_loopback(&srv_sa, family);
+ ss_init_loopback(&srv_sa, family);
addrlen = sizeof(srv_sa);
/*
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index 1702aa592c2c..7ac4d5a488aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -206,6 +206,11 @@ destroy_skel:
skel_open_load_failure:
close(pipe_c2p[0]);
close(pipe_p2c[1]);
+ /*
+ * Child is either about to exit cleanly or stuck in case of errors.
+ * Nudge it to exit.
+ */
+ kill(pid, SIGKILL);
wait(NULL);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sha256.c b/tools/testing/selftests/bpf/prog_tests/sha256.c
new file mode 100644
index 000000000000..604a0b1423d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sha256.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2025 Google LLC */
+
+#include <test_progs.h>
+#include "bpf/libbpf_internal.h"
+
+#define MAX_LEN 4096
+
+/* Test libbpf_sha256() for all lengths from 0 to MAX_LEN inclusively. */
+void test_sha256(void)
+{
+ /*
+ * The correctness of this value was verified by running this test with
+ * libbpf_sha256() replaced by OpenSSL's SHA256().
+ */
+ static const __u8 expected_digest_of_digests[SHA256_DIGEST_LENGTH] = {
+ 0x62, 0x30, 0x0e, 0x1d, 0xea, 0x7f, 0xc4, 0x74,
+ 0xfd, 0x8e, 0x64, 0x0b, 0xd8, 0x5f, 0xea, 0x04,
+ 0xf3, 0xef, 0x77, 0x42, 0xc2, 0x01, 0xb8, 0x90,
+ 0x6e, 0x19, 0x91, 0x1b, 0xca, 0xb3, 0x28, 0x42,
+ };
+ __u64 seed = 0;
+ __u8 *data = NULL, *digests = NULL;
+ __u8 digest_of_digests[SHA256_DIGEST_LENGTH];
+ size_t i;
+
+ data = malloc(MAX_LEN);
+ if (!ASSERT_OK_PTR(data, "malloc"))
+ goto out;
+ digests = malloc((MAX_LEN + 1) * SHA256_DIGEST_LENGTH);
+ if (!ASSERT_OK_PTR(digests, "malloc"))
+ goto out;
+
+ /* Generate MAX_LEN bytes of "random" data deterministically. */
+ for (i = 0; i < MAX_LEN; i++) {
+ seed = (seed * 25214903917 + 11) & ((1ULL << 48) - 1);
+ data[i] = (__u8)(seed >> 16);
+ }
+
+ /* Calculate a digest for each length 0 through MAX_LEN inclusively. */
+ for (i = 0; i <= MAX_LEN; i++)
+ libbpf_sha256(data, i, &digests[i * SHA256_DIGEST_LENGTH]);
+
+ /* Calculate and verify the digest of all the digests. */
+ libbpf_sha256(digests, (MAX_LEN + 1) * SHA256_DIGEST_LENGTH,
+ digest_of_digests);
+ ASSERT_MEMEQ(digest_of_digests, expected_digest_of_digests,
+ SHA256_DIGEST_LENGTH, "digest_of_digests");
+out:
+ free(data);
+ free(digests);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c b/tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c
new file mode 100644
index 000000000000..e4940583924b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sk_bypass_prot_mem.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2025 Google LLC */
+
+#include <test_progs.h>
+#include "sk_bypass_prot_mem.skel.h"
+#include "network_helpers.h"
+
+#define NR_PAGES 32
+#define NR_SOCKETS 2
+#define BUF_TOTAL (NR_PAGES * 4096 / NR_SOCKETS)
+#define BUF_SINGLE 1024
+#define NR_SEND (BUF_TOTAL / BUF_SINGLE)
+
+struct test_case {
+ char name[8];
+ int family;
+ int type;
+ int (*create_sockets)(struct test_case *test_case, int sk[], int len);
+ long (*get_memory_allocated)(struct test_case *test_case, struct sk_bypass_prot_mem *skel);
+};
+
+static int tcp_create_sockets(struct test_case *test_case, int sk[], int len)
+{
+ int server, i, err = 0;
+
+ server = start_server(test_case->family, test_case->type, NULL, 0, 0);
+ if (!ASSERT_GE(server, 0, "start_server_str"))
+ return server;
+
+ /* Keep for-loop so we can change NR_SOCKETS easily. */
+ for (i = 0; i < len; i += 2) {
+ sk[i] = connect_to_fd(server, 0);
+ if (sk[i] < 0) {
+ ASSERT_GE(sk[i], 0, "connect_to_fd");
+ err = sk[i];
+ break;
+ }
+
+ sk[i + 1] = accept(server, NULL, NULL);
+ if (sk[i + 1] < 0) {
+ ASSERT_GE(sk[i + 1], 0, "accept");
+ err = sk[i + 1];
+ break;
+ }
+ }
+
+ close(server);
+
+ return err;
+}
+
+static int udp_create_sockets(struct test_case *test_case, int sk[], int len)
+{
+ int i, j, err, rcvbuf = BUF_TOTAL;
+
+ /* Keep for-loop so we can change NR_SOCKETS easily. */
+ for (i = 0; i < len; i += 2) {
+ sk[i] = start_server(test_case->family, test_case->type, NULL, 0, 0);
+ if (sk[i] < 0) {
+ ASSERT_GE(sk[i], 0, "start_server");
+ return sk[i];
+ }
+
+ sk[i + 1] = connect_to_fd(sk[i], 0);
+ if (sk[i + 1] < 0) {
+ ASSERT_GE(sk[i + 1], 0, "connect_to_fd");
+ return sk[i + 1];
+ }
+
+ err = connect_fd_to_fd(sk[i], sk[i + 1], 0);
+ if (err) {
+ ASSERT_EQ(err, 0, "connect_fd_to_fd");
+ return err;
+ }
+
+ for (j = 0; j < 2; j++) {
+ err = setsockopt(sk[i + j], SOL_SOCKET, SO_RCVBUF, &rcvbuf, sizeof(int));
+ if (err) {
+ ASSERT_EQ(err, 0, "setsockopt(SO_RCVBUF)");
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static long get_memory_allocated(struct test_case *test_case,
+ bool *activated, long *memory_allocated)
+{
+ int sk;
+
+ *activated = true;
+
+ /* AF_INET and AF_INET6 share the same memory_allocated.
+ * tcp_init_sock() is called by AF_INET and AF_INET6,
+ * but udp_lib_init_sock() is inline.
+ */
+ sk = socket(AF_INET, test_case->type, 0);
+ if (!ASSERT_GE(sk, 0, "get_memory_allocated"))
+ return -1;
+
+ close(sk);
+
+ return *memory_allocated;
+}
+
+static long tcp_get_memory_allocated(struct test_case *test_case, struct sk_bypass_prot_mem *skel)
+{
+ return get_memory_allocated(test_case,
+ &skel->bss->tcp_activated,
+ &skel->bss->tcp_memory_allocated);
+}
+
+static long udp_get_memory_allocated(struct test_case *test_case, struct sk_bypass_prot_mem *skel)
+{
+ return get_memory_allocated(test_case,
+ &skel->bss->udp_activated,
+ &skel->bss->udp_memory_allocated);
+}
+
+static int check_bypass(struct test_case *test_case,
+ struct sk_bypass_prot_mem *skel, bool bypass)
+{
+ char buf[BUF_SINGLE] = {};
+ long memory_allocated[2];
+ int sk[NR_SOCKETS];
+ int err, i, j;
+
+ for (i = 0; i < ARRAY_SIZE(sk); i++)
+ sk[i] = -1;
+
+ err = test_case->create_sockets(test_case, sk, ARRAY_SIZE(sk));
+ if (err)
+ goto close;
+
+ memory_allocated[0] = test_case->get_memory_allocated(test_case, skel);
+
+ /* allocate pages >= NR_PAGES */
+ for (i = 0; i < ARRAY_SIZE(sk); i++) {
+ for (j = 0; j < NR_SEND; j++) {
+ int bytes = send(sk[i], buf, sizeof(buf), 0);
+
+ /* Avoid too noisy logs when something failed. */
+ if (bytes != sizeof(buf)) {
+ ASSERT_EQ(bytes, sizeof(buf), "send");
+ if (bytes < 0) {
+ err = bytes;
+ goto drain;
+ }
+ }
+ }
+ }
+
+ memory_allocated[1] = test_case->get_memory_allocated(test_case, skel);
+
+ if (bypass)
+ ASSERT_LE(memory_allocated[1], memory_allocated[0] + 10, "bypass");
+ else
+ ASSERT_GT(memory_allocated[1], memory_allocated[0] + NR_PAGES, "no bypass");
+
+drain:
+ if (test_case->type == SOCK_DGRAM) {
+ /* UDP starts purging sk->sk_receive_queue after one RCU
+ * grace period, then udp_memory_allocated goes down,
+ * so drain the queue before close().
+ */
+ for (i = 0; i < ARRAY_SIZE(sk); i++) {
+ for (j = 0; j < NR_SEND; j++) {
+ int bytes = recv(sk[i], buf, 1, MSG_DONTWAIT | MSG_TRUNC);
+
+ if (bytes == sizeof(buf))
+ continue;
+ if (bytes != -1 || errno != EAGAIN)
+ PRINT_FAIL("bytes: %d, errno: %s\n", bytes, strerror(errno));
+ break;
+ }
+ }
+ }
+
+close:
+ for (i = 0; i < ARRAY_SIZE(sk); i++) {
+ if (sk[i] < 0)
+ break;
+
+ close(sk[i]);
+ }
+
+ return err;
+}
+
+static void run_test(struct test_case *test_case)
+{
+ struct sk_bypass_prot_mem *skel;
+ struct nstoken *nstoken;
+ int cgroup, err;
+
+ skel = sk_bypass_prot_mem__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ skel->bss->nr_cpus = libbpf_num_possible_cpus();
+
+ err = sk_bypass_prot_mem__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto destroy_skel;
+
+ cgroup = test__join_cgroup("/sk_bypass_prot_mem");
+ if (!ASSERT_GE(cgroup, 0, "join_cgroup"))
+ goto destroy_skel;
+
+ err = make_netns("sk_bypass_prot_mem");
+ if (!ASSERT_EQ(err, 0, "make_netns"))
+ goto close_cgroup;
+
+ nstoken = open_netns("sk_bypass_prot_mem");
+ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+ goto remove_netns;
+
+ err = check_bypass(test_case, skel, false);
+ if (!ASSERT_EQ(err, 0, "test_bypass(false)"))
+ goto close_netns;
+
+ err = write_sysctl("/proc/sys/net/core/bypass_prot_mem", "1");
+ if (!ASSERT_EQ(err, 0, "write_sysctl(1)"))
+ goto close_netns;
+
+ err = check_bypass(test_case, skel, true);
+ if (!ASSERT_EQ(err, 0, "test_bypass(true by sysctl)"))
+ goto close_netns;
+
+ err = write_sysctl("/proc/sys/net/core/bypass_prot_mem", "0");
+ if (!ASSERT_EQ(err, 0, "write_sysctl(0)"))
+ goto close_netns;
+
+ skel->links.sock_create = bpf_program__attach_cgroup(skel->progs.sock_create, cgroup);
+ if (!ASSERT_OK_PTR(skel->links.sock_create, "attach_cgroup(sock_create)"))
+ goto close_netns;
+
+ err = check_bypass(test_case, skel, true);
+ ASSERT_EQ(err, 0, "test_bypass(true by bpf)");
+
+close_netns:
+ close_netns(nstoken);
+remove_netns:
+ remove_netns("sk_bypass_prot_mem");
+close_cgroup:
+ close(cgroup);
+destroy_skel:
+ sk_bypass_prot_mem__destroy(skel);
+}
+
+static struct test_case test_cases[] = {
+ {
+ .name = "TCP ",
+ .family = AF_INET,
+ .type = SOCK_STREAM,
+ .create_sockets = tcp_create_sockets,
+ .get_memory_allocated = tcp_get_memory_allocated,
+ },
+ {
+ .name = "UDP ",
+ .family = AF_INET,
+ .type = SOCK_DGRAM,
+ .create_sockets = udp_create_sockets,
+ .get_memory_allocated = udp_get_memory_allocated,
+ },
+ {
+ .name = "TCPv6",
+ .family = AF_INET6,
+ .type = SOCK_STREAM,
+ .create_sockets = tcp_create_sockets,
+ .get_memory_allocated = tcp_get_memory_allocated,
+ },
+ {
+ .name = "UDPv6",
+ .family = AF_INET6,
+ .type = SOCK_DGRAM,
+ .create_sockets = udp_create_sockets,
+ .get_memory_allocated = udp_get_memory_allocated,
+ },
+};
+
+void serial_test_sk_bypass_prot_mem(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ if (test__start_subtest(test_cases[i].name))
+ run_test(&test_cases[i]);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c
index 4be6fdb78c6a..594441acb707 100644
--- a/tools/testing/selftests/bpf/prog_tests/snprintf.c
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c
@@ -116,6 +116,8 @@ static void test_snprintf_negative(void)
ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7");
ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character");
ASSERT_ERR(load_single_snprintf("\x1"), "non printable character");
+ ASSERT_ERR(load_single_snprintf("%p%"), "invalid specifier 8");
+ ASSERT_ERR(load_single_snprintf("%s%"), "invalid specifier 9");
}
void test_snprintf(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
index a4517bee34d5..27781df8f2fb 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2024 Meta
+#include <poll.h>
#include <test_progs.h>
#include "network_helpers.h"
#include "sock_iter_batch.skel.h"
#define TEST_NS "sock_iter_batch_netns"
+#define TEST_CHILD_NS "sock_iter_batch_child_netns"
static const int init_batch_size = 16;
static const int nr_soreuse = 4;
@@ -118,6 +120,45 @@ done:
return nth_sock_idx;
}
+static void destroy(int fd)
+{
+ struct sock_iter_batch *skel = NULL;
+ __u64 cookie = socket_cookie(fd);
+ struct bpf_link *link = NULL;
+ int iter_fd = -1;
+ int nread;
+ __u64 out;
+
+ skel = sock_iter_batch__open();
+ if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open"))
+ goto done;
+
+ skel->rodata->destroy_cookie = cookie;
+
+ if (!ASSERT_OK(sock_iter_batch__load(skel), "sock_iter_batch__load"))
+ goto done;
+
+ link = bpf_program__attach_iter(skel->progs.iter_tcp_destroy, NULL);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter"))
+ goto done;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create"))
+ goto done;
+
+ /* Delete matching socket. */
+ nread = read(iter_fd, &out, sizeof(out));
+ ASSERT_GE(nread, 0, "nread");
+ if (nread)
+ ASSERT_EQ(out, cookie, "cookie matches");
+done:
+ if (iter_fd >= 0)
+ close(iter_fd);
+ bpf_link__destroy(link);
+ sock_iter_batch__destroy(skel);
+ close(fd);
+}
+
static int get_seen_count(int fd, struct sock_count counts[], int n)
{
__u64 cookie = socket_cookie(fd);
@@ -152,8 +193,71 @@ static void check_n_were_seen_once(int *fds, int fds_len, int n,
ASSERT_EQ(seen_once, n, "seen_once");
}
+static int accept_from_one(struct pollfd *server_poll_fds,
+ int server_poll_fds_len)
+{
+ static const int poll_timeout_ms = 5000; /* 5s */
+ int ret;
+ int i;
+
+ ret = poll(server_poll_fds, server_poll_fds_len, poll_timeout_ms);
+ if (!ASSERT_EQ(ret, 1, "poll"))
+ return -1;
+
+ for (i = 0; i < server_poll_fds_len; i++)
+ if (server_poll_fds[i].revents & POLLIN)
+ return accept(server_poll_fds[i].fd, NULL, NULL);
+
+ return -1;
+}
+
+static int *connect_to_server(int family, int sock_type, const char *addr,
+ __u16 port, int nr_connects, int *server_fds,
+ int server_fds_len)
+{
+ struct pollfd *server_poll_fds = NULL;
+ int *established_socks = NULL;
+ int i;
+
+ server_poll_fds = calloc(server_fds_len, sizeof(*server_poll_fds));
+ if (!ASSERT_OK_PTR(server_poll_fds, "server_poll_fds"))
+ return NULL;
+
+ for (i = 0; i < server_fds_len; i++) {
+ server_poll_fds[i].fd = server_fds[i];
+ server_poll_fds[i].events = POLLIN;
+ }
+
+ i = 0;
+
+ established_socks = malloc(sizeof(*established_socks) * nr_connects*2);
+ if (!ASSERT_OK_PTR(established_socks, "established_socks"))
+ goto error;
+
+ while (nr_connects--) {
+ established_socks[i] = connect_to_addr_str(family, sock_type,
+ addr, port, NULL);
+ if (!ASSERT_OK_FD(established_socks[i], "connect_to_addr_str"))
+ goto error;
+ i++;
+ established_socks[i] = accept_from_one(server_poll_fds,
+ server_fds_len);
+ if (!ASSERT_OK_FD(established_socks[i], "accept_from_one"))
+ goto error;
+ i++;
+ }
+
+ free(server_poll_fds);
+ return established_socks;
+error:
+ free_fds(established_socks, i);
+ free(server_poll_fds);
+ return NULL;
+}
+
static void remove_seen(int family, int sock_type, const char *addr, __u16 port,
- int *socks, int socks_len, struct sock_count *counts,
+ int *socks, int socks_len, int *established_socks,
+ int established_socks_len, struct sock_count *counts,
int counts_len, struct bpf_link *link, int iter_fd)
{
int close_idx;
@@ -182,8 +286,46 @@ static void remove_seen(int family, int sock_type, const char *addr, __u16 port,
counts_len);
}
+static void remove_seen_established(int family, int sock_type, const char *addr,
+ __u16 port, int *listen_socks,
+ int listen_socks_len, int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int close_idx;
+
+ /* Iterate through all listening sockets. */
+ read_n(iter_fd, listen_socks_len, counts, counts_len);
+
+ /* Make sure we saw all listening sockets exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+
+ /* Leave one established socket. */
+ read_n(iter_fd, established_socks_len - 1, counts, counts_len);
+
+ /* Close a socket we've already seen to remove it from the bucket. */
+ close_idx = get_nth_socket(established_socks, established_socks_len,
+ link, listen_socks_len + 1);
+ if (!ASSERT_GE(close_idx, 0, "close_idx"))
+ return;
+ destroy(established_socks[close_idx]);
+ established_socks[close_idx] = -1;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure the last socket wasn't skipped and that there were no
+ * repeats.
+ */
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len - 1, counts, counts_len);
+}
+
static void remove_unseen(int family, int sock_type, const char *addr,
__u16 port, int *socks, int socks_len,
+ int *established_socks, int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
@@ -214,8 +356,54 @@ static void remove_unseen(int family, int sock_type, const char *addr,
counts_len);
}
+static void remove_unseen_established(int family, int sock_type,
+ const char *addr, __u16 port,
+ int *listen_socks, int listen_socks_len,
+ int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int close_idx;
+
+ /* Iterate through all listening sockets. */
+ read_n(iter_fd, listen_socks_len, counts, counts_len);
+
+ /* Make sure we saw all listening sockets exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+
+ /* Iterate through the first established socket. */
+ read_n(iter_fd, 1, counts, counts_len);
+
+ /* Make sure we saw one established socks. */
+ check_n_were_seen_once(established_socks, established_socks_len, 1,
+ counts, counts_len);
+
+ /* Close what would be the next socket in the bucket to exercise the
+ * condition where we need to skip past the first cookie we remembered.
+ */
+ close_idx = get_nth_socket(established_socks, established_socks_len,
+ link, listen_socks_len + 1);
+ if (!ASSERT_GE(close_idx, 0, "close_idx"))
+ return;
+
+ destroy(established_socks[close_idx]);
+ established_socks[close_idx] = -1;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure the remaining sockets were seen exactly once and that we
+ * didn't repeat the socket that was already seen.
+ */
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len - 1, counts, counts_len);
+}
+
static void remove_all(int family, int sock_type, const char *addr,
__u16 port, int *socks, int socks_len,
+ int *established_socks, int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
@@ -242,8 +430,57 @@ static void remove_all(int family, int sock_type, const char *addr,
ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n");
}
+static void remove_all_established(int family, int sock_type, const char *addr,
+ __u16 port, int *listen_socks,
+ int listen_socks_len, int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ int *close_idx = NULL;
+ int i;
+
+ /* Iterate through all listening sockets. */
+ read_n(iter_fd, listen_socks_len, counts, counts_len);
+
+ /* Make sure we saw all listening sockets exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+
+ /* Iterate through the first established socket. */
+ read_n(iter_fd, 1, counts, counts_len);
+
+ /* Make sure we saw one established socks. */
+ check_n_were_seen_once(established_socks, established_socks_len, 1,
+ counts, counts_len);
+
+ /* Close all remaining sockets to exhaust the list of saved cookies and
+ * exit without putting any sockets into the batch on the next read.
+ */
+ close_idx = malloc(sizeof(int) * (established_socks_len - 1));
+ if (!ASSERT_OK_PTR(close_idx, "close_idx malloc"))
+ return;
+ for (i = 0; i < established_socks_len - 1; i++) {
+ close_idx[i] = get_nth_socket(established_socks,
+ established_socks_len, link,
+ listen_socks_len + i);
+ if (!ASSERT_GE(close_idx[i], 0, "close_idx"))
+ return;
+ }
+
+ for (i = 0; i < established_socks_len - 1; i++) {
+ destroy(established_socks[close_idx[i]]);
+ established_socks[close_idx[i]] = -1;
+ }
+
+ /* Make sure there are no more sockets returned */
+ ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n");
+ free(close_idx);
+}
+
static void add_some(int family, int sock_type, const char *addr, __u16 port,
- int *socks, int socks_len, struct sock_count *counts,
+ int *socks, int socks_len, int *established_socks,
+ int established_socks_len, struct sock_count *counts,
int counts_len, struct bpf_link *link, int iter_fd)
{
int *new_socks = NULL;
@@ -271,8 +508,52 @@ done:
free_fds(new_socks, socks_len);
}
+static void add_some_established(int family, int sock_type, const char *addr,
+ __u16 port, int *listen_socks,
+ int listen_socks_len, int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts,
+ int counts_len, struct bpf_link *link,
+ int iter_fd)
+{
+ int *new_socks = NULL;
+
+ /* Iterate through all listening sockets. */
+ read_n(iter_fd, listen_socks_len, counts, counts_len);
+
+ /* Make sure we saw all listening sockets exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+
+ /* Iterate through the first established_socks_len - 1 sockets. */
+ read_n(iter_fd, established_socks_len - 1, counts, counts_len);
+
+ /* Make sure we saw established_socks_len - 1 sockets exactly once. */
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len - 1, counts, counts_len);
+
+ /* Double the number of established sockets in the bucket. */
+ new_socks = connect_to_server(family, sock_type, addr, port,
+ established_socks_len / 2, listen_socks,
+ listen_socks_len);
+ if (!ASSERT_OK_PTR(new_socks, "connect_to_server"))
+ goto done;
+
+ /* Iterate through the rest of the sockets. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure each of the original sockets was seen exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len, counts, counts_len);
+done:
+ free_fds(new_socks, established_socks_len);
+}
+
static void force_realloc(int family, int sock_type, const char *addr,
__u16 port, int *socks, int socks_len,
+ int *established_socks, int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
@@ -299,11 +580,32 @@ done:
free_fds(new_socks, socks_len);
}
+static void force_realloc_established(int family, int sock_type,
+ const char *addr, __u16 port,
+ int *listen_socks, int listen_socks_len,
+ int *established_socks,
+ int established_socks_len,
+ struct sock_count *counts, int counts_len,
+ struct bpf_link *link, int iter_fd)
+{
+ /* Iterate through all sockets to trigger a realloc. */
+ read_n(iter_fd, -1, counts, counts_len);
+
+ /* Make sure each socket was seen exactly once. */
+ check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
+ counts, counts_len);
+ check_n_were_seen_once(established_socks, established_socks_len,
+ established_socks_len, counts, counts_len);
+}
+
struct test_case {
void (*test)(int family, int sock_type, const char *addr, __u16 port,
- int *socks, int socks_len, struct sock_count *counts,
+ int *socks, int socks_len, int *established_socks,
+ int established_socks_len, struct sock_count *counts,
int counts_len, struct bpf_link *link, int iter_fd);
const char *description;
+ int ehash_buckets;
+ int connections;
int init_socks;
int max_socks;
int sock_type;
@@ -358,18 +660,140 @@ static struct test_case resume_tests[] = {
.family = AF_INET6,
.test = force_realloc,
},
+ {
+ .description = "tcp: resume after removing a seen socket (listening)",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_seen,
+ },
+ {
+ .description = "tcp: resume after removing one unseen socket (listening)",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_unseen,
+ },
+ {
+ .description = "tcp: resume after removing all unseen sockets (listening)",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_all,
+ },
+ {
+ .description = "tcp: resume after adding a few sockets (listening)",
+ .init_socks = nr_soreuse,
+ .max_socks = nr_soreuse,
+ .sock_type = SOCK_STREAM,
+ /* Use AF_INET so that new sockets are added to the head of the
+ * bucket's list.
+ */
+ .family = AF_INET,
+ .test = add_some,
+ },
+ {
+ .description = "tcp: force a realloc to occur (listening)",
+ .init_socks = init_batch_size,
+ .max_socks = init_batch_size * 2,
+ .sock_type = SOCK_STREAM,
+ /* Use AF_INET6 so that new sockets are added to the tail of the
+ * bucket's list, needing to be added to the next batch to force
+ * a realloc.
+ */
+ .family = AF_INET6,
+ .test = force_realloc,
+ },
+ {
+ .description = "tcp: resume after removing a seen socket (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ .connections = nr_soreuse,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse * 3,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_seen_established,
+ },
+ {
+ .description = "tcp: resume after removing one unseen socket (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ .connections = nr_soreuse,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse * 3,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_unseen_established,
+ },
+ {
+ .description = "tcp: resume after removing all unseen sockets (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ .connections = nr_soreuse,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse * 3,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = remove_all_established,
+ },
+ {
+ .description = "tcp: resume after adding a few sockets (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ .connections = nr_soreuse,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse * 3,
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = add_some_established,
+ },
+ {
+ .description = "tcp: force a realloc to occur (established)",
+ /* Force all established sockets into one bucket */
+ .ehash_buckets = 1,
+ /* Bucket size will need to double when going from listening to
+ * established sockets.
+ */
+ .connections = init_batch_size,
+ .init_socks = nr_soreuse,
+ /* Room for connect()ed and accept()ed sockets */
+ .max_socks = nr_soreuse + (init_batch_size * 2),
+ .sock_type = SOCK_STREAM,
+ .family = AF_INET6,
+ .test = force_realloc_established,
+ },
};
static void do_resume_test(struct test_case *tc)
{
struct sock_iter_batch *skel = NULL;
+ struct sock_count *counts = NULL;
static const __u16 port = 10001;
+ struct nstoken *nstoken = NULL;
struct bpf_link *link = NULL;
- struct sock_count *counts;
+ int *established_fds = NULL;
int err, iter_fd = -1;
const char *addr;
int *fds = NULL;
- int local_port;
+
+ if (tc->ehash_buckets) {
+ SYS_NOFAIL("ip netns del " TEST_CHILD_NS);
+ SYS(done, "sysctl -wq net.ipv4.tcp_child_ehash_entries=%d",
+ tc->ehash_buckets);
+ SYS(done, "ip netns add %s", TEST_CHILD_NS);
+ SYS(done, "ip -net %s link set dev lo up", TEST_CHILD_NS);
+ nstoken = open_netns(TEST_CHILD_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open_child_netns"))
+ goto done;
+ }
counts = calloc(tc->max_socks, sizeof(*counts));
if (!ASSERT_OK_PTR(counts, "counts"))
@@ -384,11 +808,18 @@ static void do_resume_test(struct test_case *tc)
tc->init_socks);
if (!ASSERT_OK_PTR(fds, "start_reuseport_server"))
goto done;
- local_port = get_socket_local_port(*fds);
- if (!ASSERT_GE(local_port, 0, "get_socket_local_port"))
- goto done;
- skel->rodata->ports[0] = ntohs(local_port);
+ if (tc->connections) {
+ established_fds = connect_to_server(tc->family, tc->sock_type,
+ addr, port,
+ tc->connections, fds,
+ tc->init_socks);
+ if (!ASSERT_OK_PTR(established_fds, "connect_to_server"))
+ goto done;
+ }
+ skel->rodata->ports[0] = 0;
+ skel->rodata->ports[1] = 0;
skel->rodata->sf = tc->family;
+ skel->rodata->ss = 0;
err = sock_iter_batch__load(skel);
if (!ASSERT_OK(err, "sock_iter_batch__load"))
@@ -406,10 +837,15 @@ static void do_resume_test(struct test_case *tc)
goto done;
tc->test(tc->family, tc->sock_type, addr, port, fds, tc->init_socks,
- counts, tc->max_socks, link, iter_fd);
+ established_fds, tc->connections*2, counts, tc->max_socks,
+ link, iter_fd);
done:
+ close_netns(nstoken);
+ SYS_NOFAIL("ip netns del " TEST_CHILD_NS);
+ SYS_NOFAIL("sysctl -w net.ipv4.tcp_child_ehash_entries=0");
free(counts);
free_fds(fds, tc->init_socks);
+ free_fds(established_fds, tc->connections*2);
if (iter_fd >= 0)
close(iter_fd);
bpf_link__destroy(link);
@@ -454,6 +890,8 @@ static void do_test(int sock_type, bool onebyone)
skel->rodata->ports[i] = ntohs(local_port);
}
skel->rodata->sf = AF_INET6;
+ if (sock_type == SOCK_STREAM)
+ skel->rodata->ss = TCP_LISTEN;
err = sock_iter_batch__load(skel);
if (!ASSERT_OK(err, "sock_iter_batch__load"))
diff --git a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
index e02cabcc814e..0d59503a0c73 100644
--- a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
@@ -17,11 +17,16 @@
#define VMADDR_CID_LOCAL 1
#endif
+/* include/linux/compiler_types.h */
+#if __STDC_VERSION__ < 202311L && !defined(auto)
+# define auto __auto_type
+#endif
+
/* include/linux/cleanup.h */
#define __get_and_null(p, nullvalue) \
({ \
- __auto_type __ptr = &(p); \
- __auto_type __val = *__ptr; \
+ auto __ptr = &(p); \
+ auto __val = *__ptr; \
*__ptr = nullvalue; \
__val; \
})
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
index b6c471da5c28..b87e7f39e15a 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
@@ -314,6 +314,95 @@ out:
test_sockmap_ktls__destroy(skel);
}
+static void test_sockmap_ktls_tx_pop(int family, int sotype)
+{
+ char msg[37] = "0123456789abcdefghijklmnopqrstuvwxyz\0";
+ int c = 0, p = 0, one = 1, sent, recvd;
+ struct test_sockmap_ktls *skel;
+ int prog_fd, map_fd;
+ char rcv[50] = {0};
+ int err;
+ int i, m, r;
+
+ skel = test_sockmap_ktls__open_and_load();
+ if (!ASSERT_TRUE(skel, "open ktls skel"))
+ return;
+
+ err = create_pair(family, sotype, &c, &p);
+ if (!ASSERT_OK(err, "create_pair()"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.prog_sk_policy);
+ map_fd = bpf_map__fd(skel->maps.sock_map);
+
+ err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach sk msg"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c)"))
+ goto out;
+
+ err = init_ktls_pairs(c, p);
+ if (!ASSERT_OK(err, "init_ktls_pairs(c, p)"))
+ goto out;
+
+ struct {
+ int pop_start;
+ int pop_len;
+ } pop_policy[] = {
+ /* trim the start */
+ {0, 2},
+ {0, 10},
+ {1, 2},
+ {1, 10},
+ /* trim the end */
+ {35, 2},
+ /* New entries should be added before this line */
+ {-1, -1},
+ };
+
+ i = 0;
+ while (pop_policy[i].pop_start >= 0) {
+ skel->bss->pop_start = pop_policy[i].pop_start;
+ skel->bss->pop_end = pop_policy[i].pop_len;
+
+ sent = send(c, msg, sizeof(msg), 0);
+ if (!ASSERT_EQ(sent, sizeof(msg), "send(msg)"))
+ goto out;
+
+ recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1);
+ if (!ASSERT_EQ(recvd, sizeof(msg) - pop_policy[i].pop_len, "pop len mismatch"))
+ goto out;
+
+ /* verify the data
+ * msg: 0123456789a bcdefghij klmnopqrstuvwxyz
+ * | |
+ * popped data
+ */
+ for (m = 0, r = 0; m < sizeof(msg);) {
+ /* skip checking the data that has been popped */
+ if (m >= pop_policy[i].pop_start &&
+ m <= pop_policy[i].pop_start + pop_policy[i].pop_len - 1) {
+ m++;
+ continue;
+ }
+
+ if (!ASSERT_EQ(msg[m], rcv[r], "data mismatch"))
+ goto out;
+ m++;
+ r++;
+ }
+ i++;
+ }
+out:
+ if (c)
+ close(c);
+ if (p)
+ close(p);
+ test_sockmap_ktls__destroy(skel);
+}
+
static void run_tests(int family, enum bpf_map_type map_type)
{
int map;
@@ -338,6 +427,8 @@ static void run_ktls_test(int family, int sotype)
test_sockmap_ktls_tx_cork(family, sotype, true);
if (test__start_subtest("tls tx egress with no buf"))
test_sockmap_ktls_tx_no_buf(family, sotype, true);
+ if (test__start_subtest("tls tx with pop"))
+ test_sockmap_ktls_tx_pop(family, sotype);
}
void test_sockmap_ktls(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index 1d98eee7a2c3..f1bdccc7e4e7 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -924,6 +924,8 @@ static void redir_partial(int family, int sotype, int sock_map, int parser_map)
goto close;
n = xsend(c1, buf, sizeof(buf), 0);
+ if (n == -1)
+ goto close;
if (n < sizeof(buf))
FAIL("incomplete write");
diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
index e3ea5dc2f697..254fbfeab06a 100644
--- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
@@ -13,22 +13,22 @@ static struct {
const char *err_msg;
} spin_lock_fail_tests[] = {
{ "lock_id_kptr_preserve",
- "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2) "
- "R1_w=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
+ "5: (bf) r1 = r0 ; R0=ptr_foo(id=2,ref_obj_id=2) "
+ "R1=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=ptr_ expected=percpu_ptr_" },
{ "lock_id_global_zero",
- "; R1_w=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n"
+ "; R1=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_mapval_preserve",
"[0-9]\\+: (bf) r1 = r0 ;"
- " R0_w=map_value(id=1,map=array_map,ks=4,vs=8)"
- " R1_w=map_value(id=1,map=array_map,ks=4,vs=8)\n"
+ " R0=map_value(id=1,map=array_map,ks=4,vs=8)"
+ " R1=map_value(id=1,map=array_map,ks=4,vs=8)\n"
"[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_innermapval_preserve",
"[0-9]\\+: (bf) r1 = r0 ;"
" R0=map_value(id=2,ks=4,vs=8)"
- " R1_w=map_value(id=2,ks=4,vs=8)\n"
+ " R1=map_value(id=2,ks=4,vs=8)\n"
"[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
index b7ba5cd47d96..271b5cc9fc01 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
@@ -39,7 +39,7 @@ retry:
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
index 0832fd787457..b277dddd5af7 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -66,7 +66,7 @@ retry:
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
new file mode 100644
index 000000000000..c9efdd2a5b18
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "stacktrace_ips.skel.h"
+
+#ifdef __x86_64__
+static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...)
+{
+ __u64 ips[PERF_MAX_STACK_DEPTH];
+ struct ksyms *ksyms = NULL;
+ int i, err = 0;
+ va_list args;
+
+ /* sorted by addr */
+ ksyms = load_kallsyms_local();
+ if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
+ return -1;
+
+ /* unlikely, but... */
+ if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max"))
+ return -1;
+
+ err = bpf_map_lookup_elem(fd, &key, ips);
+ if (err)
+ goto out;
+
+ /*
+ * Compare all symbols provided via arguments with stacktrace ips,
+ * and their related symbol addresses.t
+ */
+ va_start(args, cnt);
+
+ for (i = 0; i < cnt; i++) {
+ unsigned long val;
+ struct ksym *ksym;
+
+ val = va_arg(args, unsigned long);
+ ksym = ksym_search_local(ksyms, ips[i]);
+ if (!ASSERT_OK_PTR(ksym, "ksym_search_local"))
+ break;
+ ASSERT_EQ(ksym->addr, val, "stack_cmp");
+ }
+
+ va_end(args);
+
+out:
+ free_kallsyms_local(ksyms);
+ return err;
+}
+
+static void test_stacktrace_ips_kprobe_multi(bool retprobe)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
+ .retprobe = retprobe
+ );
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct stacktrace_ips *skel;
+
+ skel = stacktrace_ips__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+ return;
+
+ if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+ test__skip();
+ goto cleanup;
+ }
+
+ skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts(
+ skel->progs.kprobe_multi_test,
+ "bpf_testmod_stacktrace_test", &opts);
+ if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ trigger_module_test_read(1);
+
+ load_kallsyms();
+
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
+ ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+ ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+
+cleanup:
+ stacktrace_ips__destroy(skel);
+}
+
+static void test_stacktrace_ips_raw_tp(void)
+{
+ __u32 info_len = sizeof(struct bpf_prog_info);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_prog_info info = {};
+ struct stacktrace_ips *skel;
+ __u64 bpf_prog_ksym = 0;
+ int err;
+
+ skel = stacktrace_ips__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+ return;
+
+ if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+ test__skip();
+ goto cleanup;
+ }
+
+ skel->links.rawtp_test = bpf_program__attach_raw_tracepoint(
+ skel->progs.rawtp_test,
+ "bpf_testmod_test_read");
+ if (!ASSERT_OK_PTR(skel->links.rawtp_test, "bpf_program__attach_raw_tracepoint"))
+ goto cleanup;
+
+ /* get bpf program address */
+ info.jited_ksyms = ptr_to_u64(&bpf_prog_ksym);
+ info.nr_jited_ksyms = 1;
+ err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.rawtp_test),
+ &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ goto cleanup;
+
+ trigger_module_test_read(1);
+
+ load_kallsyms();
+
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 2,
+ bpf_prog_ksym,
+ ksym_get_addr("bpf_trace_run2"));
+
+cleanup:
+ stacktrace_ips__destroy(skel);
+}
+
+static void __test_stacktrace_ips(void)
+{
+ if (test__start_subtest("kprobe_multi"))
+ test_stacktrace_ips_kprobe_multi(false);
+ if (test__start_subtest("kretprobe_multi"))
+ test_stacktrace_ips_kprobe_multi(true);
+ if (test__start_subtest("raw_tp"))
+ test_stacktrace_ips_raw_tp();
+}
+#else
+static void __test_stacktrace_ips(void)
+{
+ test__skip();
+}
+#endif
+
+void test_stacktrace_ips(void)
+{
+ __test_stacktrace_ips();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
index df59e4ae2951..c23b97414813 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -1,46 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include "stacktrace_map.skel.h"
void test_stacktrace_map(void)
{
+ struct stacktrace_map *skel;
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *prog_name = "oncpu";
- int err, prog_fd, stack_trace_len;
- const char *file = "./test_stacktrace_map.bpf.o";
- __u32 key, val, duration = 0;
- struct bpf_program *prog;
- struct bpf_object *obj;
- struct bpf_link *link;
+ int err, stack_trace_len;
+ __u32 key, val, stack_id, duration = 0;
+ __u64 stack[PERF_MAX_STACK_DEPTH];
- err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ skel = stacktrace_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
- prog = bpf_object__find_program_by_name(obj, prog_name);
- if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
- goto close_prog;
-
- link = bpf_program__attach_tracepoint(prog, "sched", "sched_switch");
- if (!ASSERT_OK_PTR(link, "attach_tp"))
- goto close_prog;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK_FAIL(control_map_fd < 0))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK_FAIL(stackid_hmap_fd < 0))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK_FAIL(stackmap_fd < 0))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK_FAIL(stack_amap_fd < 0))
- goto disable_pmu;
+ control_map_fd = bpf_map__fd(skel->maps.control_map);
+ stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
+ stackmap_fd = bpf_map__fd(skel->maps.stackmap);
+ stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
+ err = stacktrace_map__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
/* give some time for bpf program run */
sleep(1);
@@ -50,26 +31,32 @@ void test_stacktrace_map(void)
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto out;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
+ goto out;
stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
"err %d errno %d\n", err, errno))
- goto disable_pmu;
-
-disable_pmu:
- bpf_link__destroy(link);
-close_prog:
- bpf_object__close(obj);
+ goto out;
+
+ stack_id = skel->bss->stack_id;
+ err = bpf_map_lookup_and_delete_elem(stackmap_fd, &stack_id, stack);
+ if (!ASSERT_OK(err, "lookup and delete target stack_id"))
+ goto out;
+
+ err = bpf_map_lookup_elem(stackmap_fd, &stack_id, stack);
+ if (!ASSERT_EQ(err, -ENOENT, "lookup deleted stack_id"))
+ goto out;
+out:
+ stacktrace_map__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
index c6ef06f55cdb..e985d51d3d47 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
@@ -5,7 +5,7 @@ void test_stacktrace_map_raw_tp(void)
{
const char *prog_name = "oncpu";
int control_map_fd, stackid_hmap_fd, stackmap_fd;
- const char *file = "./test_stacktrace_map.bpf.o";
+ const char *file = "./stacktrace_map.bpf.o";
__u32 key, val, duration = 0;
int err, prog_fd;
struct bpf_program *prog;
@@ -46,7 +46,7 @@ void test_stacktrace_map_raw_tp(void)
bpf_map_update_elem(control_map_fd, &key, &val, 0);
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
index 1932b1e0685c..dc2ccf6a14d1 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
@@ -40,7 +40,7 @@ void test_stacktrace_map_skip(void)
skel->bss->control = 1;
/* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
+ * in stackmap, and vice versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap"))
diff --git a/tools/testing/selftests/bpf/prog_tests/stream.c b/tools/testing/selftests/bpf/prog_tests/stream.c
new file mode 100644
index 000000000000..c3cce5c292bd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stream.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+
+#include "stream.skel.h"
+#include "stream_fail.skel.h"
+
+void test_stream_failure(void)
+{
+ RUN_TESTS(stream_fail);
+}
+
+void test_stream_success(void)
+{
+ RUN_TESTS(stream);
+ return;
+}
+
+void test_stream_syscall(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts);
+ struct stream *skel;
+ int ret, prog_fd;
+ char buf[64];
+
+ skel = stream__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stream__open_and_load"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.stream_syscall);
+ ret = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+
+ ASSERT_LT(bpf_prog_stream_read(0, BPF_STREAM_STDOUT, buf, sizeof(buf), &ropts), 0, "error");
+ ret = -errno;
+ ASSERT_EQ(ret, -EINVAL, "bad prog_fd");
+
+ ASSERT_LT(bpf_prog_stream_read(prog_fd, 0, buf, sizeof(buf), &ropts), 0, "error");
+ ret = -errno;
+ ASSERT_EQ(ret, -ENOENT, "bad stream id");
+
+ ASSERT_LT(bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, NULL, sizeof(buf), NULL), 0, "error");
+ ret = -errno;
+ ASSERT_EQ(ret, -EFAULT, "bad stream buf");
+
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 2, NULL);
+ ASSERT_EQ(ret, 2, "bytes");
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 2, NULL);
+ ASSERT_EQ(ret, 1, "bytes");
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDOUT, buf, 1, &ropts);
+ ASSERT_EQ(ret, 0, "no bytes stdout");
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDERR, buf, 1, &ropts);
+ ASSERT_EQ(ret, 0, "no bytes stderr");
+
+ stream__destroy(skel);
+}
+
+static void test_address(struct bpf_program *prog, unsigned long *fault_addr_p)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts);
+ int ret, prog_fd;
+ char fault_addr[64];
+ char buf[1024];
+
+ prog_fd = bpf_program__fd(prog);
+
+ ret = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+
+ sprintf(fault_addr, "0x%lx", *fault_addr_p);
+
+ ret = bpf_prog_stream_read(prog_fd, BPF_STREAM_STDERR, buf, sizeof(buf), &ropts);
+ ASSERT_GT(ret, 0, "stream read");
+ ASSERT_LE(ret, 1023, "len for buf");
+ buf[ret] = '\0';
+
+ if (!ASSERT_HAS_SUBSTR(buf, fault_addr, "fault_addr")) {
+ fprintf(stderr, "Output from stream:\n%s\n", buf);
+ fprintf(stderr, "Fault Addr: %s\n", fault_addr);
+ }
+}
+
+void test_stream_arena_fault_address(void)
+{
+ struct stream *skel;
+
+#if !defined(__x86_64__) && !defined(__aarch64__)
+ printf("%s:SKIP: arena fault reporting not supported\n", __func__);
+ test__skip();
+ return;
+#endif
+
+ skel = stream__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "stream__open_and_load"))
+ return;
+
+ if (test__start_subtest("read_fault"))
+ test_address(skel->progs.stream_arena_read_fault, &skel->bss->fault_addr);
+ if (test__start_subtest("write_fault"))
+ test_address(skel->progs.stream_arena_write_fault, &skel->bss->fault_addr);
+
+ stream__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c b/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c
new file mode 100644
index 000000000000..0f3bf594e7a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/string_kfuncs.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include <test_progs.h>
+#include "string_kfuncs_success.skel.h"
+#include "string_kfuncs_failure1.skel.h"
+#include "string_kfuncs_failure2.skel.h"
+#include <sys/mman.h>
+
+static const char * const test_cases[] = {
+ "strcmp",
+ "strcasecmp",
+ "strchr",
+ "strchrnul",
+ "strnchr",
+ "strrchr",
+ "strlen",
+ "strnlen",
+ "strspn_str",
+ "strspn_accept",
+ "strcspn_str",
+ "strcspn_reject",
+ "strstr",
+ "strcasestr",
+ "strnstr",
+ "strncasestr",
+};
+
+void run_too_long_tests(void)
+{
+ struct string_kfuncs_failure2 *skel;
+ struct bpf_program *prog;
+ char test_name[256];
+ int err, i;
+
+ skel = string_kfuncs_failure2__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "string_kfuncs_failure2__open_and_load"))
+ return;
+
+ memset(skel->bss->long_str, 'a', sizeof(skel->bss->long_str));
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ sprintf(test_name, "test_%s_too_long", test_cases[i]);
+ if (!test__start_subtest(test_name))
+ continue;
+
+ prog = bpf_object__find_program_by_name(skel->obj, test_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run"))
+ goto cleanup;
+
+ ASSERT_EQ(topts.retval, -E2BIG, "reading too long string fails with -E2BIG");
+ }
+
+cleanup:
+ string_kfuncs_failure2__destroy(skel);
+}
+
+void test_string_kfuncs(void)
+{
+ RUN_TESTS(string_kfuncs_success);
+ RUN_TESTS(string_kfuncs_failure1);
+
+ run_too_long_tests();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 66a900327f91..0ab36503c3b2 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -1195,7 +1195,7 @@ static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
bool test_fexit,
bool test_fentry_entry)
{
- int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val;
+ int err, map_fd, prog_fd, main_data_fd, fentry_data_fd = 0, fexit_data_fd = 0, i, val;
struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
struct bpf_program *prog, *fentry_prog;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_data.h b/tools/testing/selftests/bpf/prog_tests/task_local_data.h
new file mode 100644
index 000000000000..2de38776a2d4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_data.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TASK_LOCAL_DATA_H
+#define __TASK_LOCAL_DATA_H
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdatomic.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+#include <pthread.h>
+#endif
+
+#include <bpf/bpf.h>
+
+/*
+ * OPTIONS
+ *
+ * Define the option before including the header
+ *
+ * TLD_FREE_DATA_ON_THREAD_EXIT - Frees memory on thread exit automatically
+ *
+ * Thread-specific memory for storing TLD is allocated lazily on the first call to
+ * tld_get_data(). The thread that calls it must also call tld_free() on thread exit
+ * to prevent memory leak. Pthread will be included if the option is defined. A pthread
+ * key will be registered with a destructor that calls tld_free().
+ *
+ *
+ * TLD_DYN_DATA_SIZE - The maximum size of memory allocated for TLDs created dynamically
+ * (default: 64 bytes)
+ *
+ * A TLD can be defined statically using TLD_DEFINE_KEY() or created on the fly using
+ * tld_create_key(). As the total size of TLDs created with tld_create_key() cannot be
+ * possibly known statically, a memory area of size TLD_DYN_DATA_SIZE will be allocated
+ * for these TLDs. This additional memory is allocated for every thread that calls
+ * tld_get_data() even if no tld_create_key are actually called, so be mindful of
+ * potential memory wastage. Use TLD_DEFINE_KEY() whenever possible as just enough memory
+ * will be allocated for TLDs created with it.
+ *
+ *
+ * TLD_NAME_LEN - The maximum length of the name of a TLD (default: 62)
+ *
+ * Setting TLD_NAME_LEN will affect the maximum number of TLDs a process can store,
+ * TLD_MAX_DATA_CNT.
+ *
+ *
+ * TLD_DATA_USE_ALIGNED_ALLOC - Always use aligned_alloc() instead of malloc()
+ *
+ * When allocating the memory for storing TLDs, we need to make sure there is a memory
+ * region of the X bytes within a page. This is due to the limit posed by UPTR: memory
+ * pinned to the kernel cannot exceed a page nor can it cross the page boundary. The
+ * library normally calls malloc(2*X) given X bytes of total TLDs, and only uses
+ * aligned_alloc(PAGE_SIZE, X) when X >= PAGE_SIZE / 2. This is to reduce memory wastage
+ * as not all memory allocator can use the exact amount of memory requested to fulfill
+ * aligned_alloc(). For example, some may round the size up to the alignment. Enable the
+ * option to always use aligned_alloc() if the implementation has low memory overhead.
+ */
+
+#define TLD_PAGE_SIZE getpagesize()
+#define TLD_PAGE_MASK (~(TLD_PAGE_SIZE - 1))
+
+#define TLD_ROUND_MASK(x, y) ((__typeof__(x))((y) - 1))
+#define TLD_ROUND_UP(x, y) ((((x) - 1) | TLD_ROUND_MASK(x, y)) + 1)
+
+#define TLD_READ_ONCE(x) (*(volatile typeof(x) *)&(x))
+
+#ifndef TLD_DYN_DATA_SIZE
+#define TLD_DYN_DATA_SIZE 64
+#endif
+
+#define TLD_MAX_DATA_CNT (TLD_PAGE_SIZE / sizeof(struct tld_metadata) - 1)
+
+#ifndef TLD_NAME_LEN
+#define TLD_NAME_LEN 62
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ __s16 off;
+} tld_key_t;
+
+struct tld_metadata {
+ char name[TLD_NAME_LEN];
+ _Atomic __u16 size;
+};
+
+struct tld_meta_u {
+ _Atomic __u8 cnt;
+ __u16 size;
+ struct tld_metadata metadata[];
+};
+
+struct tld_data_u {
+ __u64 start; /* offset of tld_data_u->data in a page */
+ char data[];
+};
+
+struct tld_map_value {
+ void *data;
+ struct tld_meta_u *meta;
+};
+
+struct tld_meta_u * _Atomic tld_meta_p __attribute__((weak));
+__thread struct tld_data_u *tld_data_p __attribute__((weak));
+__thread void *tld_data_alloc_p __attribute__((weak));
+
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+pthread_key_t tld_pthread_key __attribute__((weak));
+
+static void tld_free(void);
+
+static void __tld_thread_exit_handler(void *unused)
+{
+ tld_free();
+}
+#endif
+
+static int __tld_init_meta_p(void)
+{
+ struct tld_meta_u *meta, *uninit = NULL;
+ int err = 0;
+
+ meta = (struct tld_meta_u *)aligned_alloc(TLD_PAGE_SIZE, TLD_PAGE_SIZE);
+ if (!meta) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memset(meta, 0, TLD_PAGE_SIZE);
+ meta->size = TLD_DYN_DATA_SIZE;
+
+ if (!atomic_compare_exchange_strong(&tld_meta_p, &uninit, meta)) {
+ free(meta);
+ goto out;
+ }
+
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+ pthread_key_create(&tld_pthread_key, __tld_thread_exit_handler);
+#endif
+out:
+ return err;
+}
+
+static int __tld_init_data_p(int map_fd)
+{
+ bool use_aligned_alloc = false;
+ struct tld_map_value map_val;
+ struct tld_data_u *data;
+ void *data_alloc = NULL;
+ int err, tid_fd = -1;
+
+ tid_fd = syscall(SYS_pidfd_open, sys_gettid(), O_EXCL);
+ if (tid_fd < 0) {
+ err = -errno;
+ goto out;
+ }
+
+#ifdef TLD_DATA_USE_ALIGNED_ALLOC
+ use_aligned_alloc = true;
+#endif
+
+ /*
+ * tld_meta_p->size = TLD_DYN_DATA_SIZE +
+ * total size of TLDs defined via TLD_DEFINE_KEY()
+ */
+ data_alloc = (use_aligned_alloc || tld_meta_p->size * 2 >= TLD_PAGE_SIZE) ?
+ aligned_alloc(TLD_PAGE_SIZE, tld_meta_p->size) :
+ malloc(tld_meta_p->size * 2);
+ if (!data_alloc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Always pass a page-aligned address to UPTR since the size of tld_map_value::data
+ * is a page in BTF. If data_alloc spans across two pages, use the page that contains large
+ * enough memory.
+ */
+ if (TLD_PAGE_SIZE - (~TLD_PAGE_MASK & (intptr_t)data_alloc) >= tld_meta_p->size) {
+ map_val.data = (void *)(TLD_PAGE_MASK & (intptr_t)data_alloc);
+ data = data_alloc;
+ data->start = (~TLD_PAGE_MASK & (intptr_t)data_alloc) +
+ offsetof(struct tld_data_u, data);
+ } else {
+ map_val.data = (void *)(TLD_ROUND_UP((intptr_t)data_alloc, TLD_PAGE_SIZE));
+ data = (void *)(TLD_ROUND_UP((intptr_t)data_alloc, TLD_PAGE_SIZE));
+ data->start = offsetof(struct tld_data_u, data);
+ }
+ map_val.meta = TLD_READ_ONCE(tld_meta_p);
+
+ err = bpf_map_update_elem(map_fd, &tid_fd, &map_val, 0);
+ if (err) {
+ free(data_alloc);
+ goto out;
+ }
+
+ tld_data_p = data;
+ tld_data_alloc_p = data_alloc;
+#ifdef TLD_FREE_DATA_ON_THREAD_EXIT
+ pthread_setspecific(tld_pthread_key, (void *)1);
+#endif
+out:
+ if (tid_fd >= 0)
+ close(tid_fd);
+ return err;
+}
+
+static tld_key_t __tld_create_key(const char *name, size_t size, bool dyn_data)
+{
+ int err, i, sz, off = 0;
+ __u8 cnt;
+
+ if (!TLD_READ_ONCE(tld_meta_p)) {
+ err = __tld_init_meta_p();
+ if (err)
+ return (tld_key_t){err};
+ }
+
+ for (i = 0; i < TLD_MAX_DATA_CNT; i++) {
+retry:
+ cnt = atomic_load(&tld_meta_p->cnt);
+ if (i < cnt) {
+ /* A metadata is not ready until size is updated with a non-zero value */
+ while (!(sz = atomic_load(&tld_meta_p->metadata[i].size)))
+ sched_yield();
+
+ if (!strncmp(tld_meta_p->metadata[i].name, name, TLD_NAME_LEN))
+ return (tld_key_t){-EEXIST};
+
+ off += TLD_ROUND_UP(sz, 8);
+ continue;
+ }
+
+ /*
+ * TLD_DEFINE_KEY() is given memory upto a page while at most
+ * TLD_DYN_DATA_SIZE is allocated for tld_create_key()
+ */
+ if (dyn_data) {
+ if (off + TLD_ROUND_UP(size, 8) > tld_meta_p->size)
+ return (tld_key_t){-E2BIG};
+ } else {
+ if (off + TLD_ROUND_UP(size, 8) > TLD_PAGE_SIZE - sizeof(struct tld_data_u))
+ return (tld_key_t){-E2BIG};
+ tld_meta_p->size += TLD_ROUND_UP(size, 8);
+ }
+
+ /*
+ * Only one tld_create_key() can increase the current cnt by one and
+ * takes the latest available slot. Other threads will check again if a new
+ * TLD can still be added, and then compete for the new slot after the
+ * succeeding thread update the size.
+ */
+ if (!atomic_compare_exchange_strong(&tld_meta_p->cnt, &cnt, cnt + 1))
+ goto retry;
+
+ strncpy(tld_meta_p->metadata[i].name, name, TLD_NAME_LEN);
+ atomic_store(&tld_meta_p->metadata[i].size, size);
+ return (tld_key_t){(__s16)off};
+ }
+
+ return (tld_key_t){-ENOSPC};
+}
+
+/**
+ * TLD_DEFINE_KEY() - Define a TLD and a global variable key associated with the TLD.
+ *
+ * @name: The name of the TLD
+ * @size: The size of the TLD
+ * @key: The variable name of the key. Cannot exceed TLD_NAME_LEN
+ *
+ * The macro can only be used in file scope.
+ *
+ * A global variable key of opaque type, tld_key_t, will be declared and initialized before
+ * main() starts. Use tld_key_is_err() or tld_key_err_or_zero() later to check if the key
+ * creation succeeded. Pass the key to tld_get_data() to get a pointer to the TLD.
+ * bpf programs can also fetch the same key by name.
+ *
+ * The total size of TLDs created using TLD_DEFINE_KEY() cannot exceed a page. Just
+ * enough memory will be allocated for each thread on the first call to tld_get_data().
+ */
+#define TLD_DEFINE_KEY(key, name, size) \
+tld_key_t key; \
+ \
+__attribute__((constructor)) \
+void __tld_define_key_##key(void) \
+{ \
+ key = __tld_create_key(name, size, false); \
+}
+
+/**
+ * tld_create_key() - Create a TLD and return a key associated with the TLD.
+ *
+ * @name: The name the TLD
+ * @size: The size of the TLD
+ *
+ * Return an opaque object key. Use tld_key_is_err() or tld_key_err_or_zero() to check
+ * if the key creation succeeded. Pass the key to tld_get_data() to get a pointer to
+ * locate the TLD. bpf programs can also fetch the same key by name.
+ *
+ * Use tld_create_key() only when a TLD needs to be created dynamically (e.g., @name is
+ * not known statically or a TLD needs to be created conditionally)
+ *
+ * An additional TLD_DYN_DATA_SIZE bytes are allocated per-thread to accommodate TLDs
+ * created dynamically with tld_create_key(). Since only a user page is pinned to the
+ * kernel, when TLDs created with TLD_DEFINE_KEY() uses more than TLD_PAGE_SIZE -
+ * TLD_DYN_DATA_SIZE, the buffer size will be limited to the rest of the page.
+ */
+__attribute__((unused))
+static tld_key_t tld_create_key(const char *name, size_t size)
+{
+ return __tld_create_key(name, size, true);
+}
+
+__attribute__((unused))
+static inline bool tld_key_is_err(tld_key_t key)
+{
+ return key.off < 0;
+}
+
+__attribute__((unused))
+static inline int tld_key_err_or_zero(tld_key_t key)
+{
+ return tld_key_is_err(key) ? key.off : 0;
+}
+
+/**
+ * tld_get_data() - Get a pointer to the TLD associated with the given key of the
+ * calling thread.
+ *
+ * @map_fd: A file descriptor of tld_data_map, the underlying BPF task local storage map
+ * of task local data.
+ * @key: A key object created by TLD_DEFINE_KEY() or tld_create_key().
+ *
+ * Return a pointer to the TLD if the key is valid; NULL if not enough memory for TLD
+ * for this thread, or the key is invalid. The returned pointer is guaranteed to be 8-byte
+ * aligned.
+ *
+ * Threads that call tld_get_data() must call tld_free() on exit to prevent
+ * memory leak if TLD_FREE_DATA_ON_THREAD_EXIT is not defined.
+ */
+__attribute__((unused))
+static void *tld_get_data(int map_fd, tld_key_t key)
+{
+ if (!TLD_READ_ONCE(tld_meta_p))
+ return NULL;
+
+ /* tld_data_p is allocated on the first invocation of tld_get_data() */
+ if (!tld_data_p && __tld_init_data_p(map_fd))
+ return NULL;
+
+ return tld_data_p->data + key.off;
+}
+
+/**
+ * tld_free() - Free task local data memory of the calling thread
+ *
+ * For the calling thread, all pointers to TLDs acquired before will become invalid.
+ *
+ * Users must call tld_free() on thread exit to prevent memory leak. Alternatively,
+ * define TLD_FREE_DATA_ON_THREAD_EXIT and a thread exit handler will be registered
+ * to free the memory automatically.
+ */
+__attribute__((unused))
+static void tld_free(void)
+{
+ if (tld_data_alloc_p) {
+ free(tld_data_alloc_p);
+ tld_data_alloc_p = NULL;
+ tld_data_p = NULL;
+ }
+}
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __TASK_LOCAL_DATA_H */
diff --git a/tools/testing/selftests/bpf/prog_tests/task_work_stress.c b/tools/testing/selftests/bpf/prog_tests/task_work_stress.c
new file mode 100644
index 000000000000..450d17d91a56
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_work_stress.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <string.h>
+#include <stdio.h>
+#include "task_work_stress.skel.h"
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <sys/syscall.h>
+#include <time.h>
+#include <stdlib.h>
+#include <stdatomic.h>
+
+struct test_data {
+ int prog_fd;
+ atomic_int exit;
+};
+
+void *runner(void *test_data)
+{
+ struct test_data *td = test_data;
+ int err = 0;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ while (!err && !atomic_load(&td->exit))
+ err = bpf_prog_test_run_opts(td->prog_fd, &opts);
+
+ return NULL;
+}
+
+static int get_env_int(const char *str, int def)
+{
+ const char *s = getenv(str);
+ char *end;
+ int retval;
+
+ if (!s || !*s)
+ return def;
+ errno = 0;
+ retval = strtol(s, &end, 10);
+ if (errno || *end || retval < 0)
+ return def;
+ return retval;
+}
+
+static void task_work_run(bool enable_delete)
+{
+ struct task_work_stress *skel;
+ struct bpf_program *scheduler, *deleter;
+ int nthreads = 16;
+ int test_time_s = get_env_int("BPF_TASK_WORK_TEST_TIME", 1);
+ pthread_t tid[nthreads], tid_del;
+ bool started[nthreads], started_del = false;
+ struct test_data td_sched = { .exit = 0 }, td_del = { .exit = 1 };
+ int i, err;
+
+ skel = task_work_stress__open();
+ if (!ASSERT_OK_PTR(skel, "task_work__open"))
+ return;
+
+ scheduler = bpf_object__find_program_by_name(skel->obj, "schedule_task_work");
+ bpf_program__set_autoload(scheduler, true);
+
+ deleter = bpf_object__find_program_by_name(skel->obj, "delete_task_work");
+ bpf_program__set_autoload(deleter, true);
+
+ err = task_work_stress__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ for (i = 0; i < nthreads; ++i)
+ started[i] = false;
+
+ td_sched.prog_fd = bpf_program__fd(scheduler);
+ for (i = 0; i < nthreads; ++i) {
+ if (pthread_create(&tid[i], NULL, runner, &td_sched) != 0) {
+ fprintf(stderr, "could not start thread");
+ goto cancel;
+ }
+ started[i] = true;
+ }
+
+ if (enable_delete)
+ atomic_store(&td_del.exit, 0);
+
+ td_del.prog_fd = bpf_program__fd(deleter);
+ if (pthread_create(&tid_del, NULL, runner, &td_del) != 0) {
+ fprintf(stderr, "could not start thread");
+ goto cancel;
+ }
+ started_del = true;
+
+ /* Run stress test for some time */
+ sleep(test_time_s);
+
+cancel:
+ atomic_store(&td_sched.exit, 1);
+ atomic_store(&td_del.exit, 1);
+ for (i = 0; i < nthreads; ++i) {
+ if (started[i])
+ pthread_join(tid[i], NULL);
+ }
+
+ if (started_del)
+ pthread_join(tid_del, NULL);
+
+ ASSERT_GT(skel->bss->callback_scheduled, 0, "work scheduled");
+ /* Some scheduling attempts should have failed due to contention */
+ ASSERT_GT(skel->bss->schedule_error, 0, "schedule error");
+
+ if (enable_delete) {
+ /* If delete thread is enabled, it has cancelled some callbacks */
+ ASSERT_GT(skel->bss->delete_success, 0, "delete success");
+ ASSERT_LT(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks");
+ } else {
+ /* Without delete thread number of scheduled callbacks is the same as fired */
+ ASSERT_EQ(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks");
+ }
+
+cleanup:
+ task_work_stress__destroy(skel);
+}
+
+void test_task_work_stress(void)
+{
+ if (test__start_subtest("no_delete"))
+ task_work_run(false);
+ if (test__start_subtest("with_delete"))
+ task_work_run(true);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
index 924d0e25320c..d52a62af77bf 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
@@ -8,34 +8,6 @@
# define loopback 1
#endif
-static inline __u32 id_from_prog_fd(int fd)
-{
- struct bpf_prog_info prog_info = {};
- __u32 prog_info_len = sizeof(prog_info);
- int err;
-
- err = bpf_obj_get_info_by_fd(fd, &prog_info, &prog_info_len);
- if (!ASSERT_OK(err, "id_from_prog_fd"))
- return 0;
-
- ASSERT_NEQ(prog_info.id, 0, "prog_info.id");
- return prog_info.id;
-}
-
-static inline __u32 id_from_link_fd(int fd)
-{
- struct bpf_link_info link_info = {};
- __u32 link_info_len = sizeof(link_info);
- int err;
-
- err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len);
- if (!ASSERT_OK(err, "id_from_link_fd"))
- return 0;
-
- ASSERT_NEQ(link_info.id, 0, "link_info.id");
- return link_info.id;
-}
-
static inline __u32 ifindex_from_link_fd(int fd)
{
struct bpf_link_info link_info = {};
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c
new file mode 100644
index 000000000000..de22734abc4d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_smc.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <linux/genetlink.h>
+#include "network_helpers.h"
+#include "bpf_smc.skel.h"
+
+#ifndef IPPROTO_SMC
+#define IPPROTO_SMC 256
+#endif
+
+#define CLIENT_IP "127.0.0.1"
+#define SERVER_IP "127.0.1.0"
+#define SERVER_IP_VIA_RISK_PATH "127.0.2.0"
+
+#define SERVICE_1 80
+#define SERVICE_2 443
+#define SERVICE_3 8443
+
+#define TEST_NS "bpf_smc_netns"
+
+static struct netns_obj *test_netns;
+
+struct smc_policy_ip_key {
+ __u32 sip;
+ __u32 dip;
+};
+
+struct smc_policy_ip_value {
+ __u8 mode;
+};
+
+#if defined(__s390x__)
+/* s390x has default seid */
+static bool setup_ueid(void) { return true; }
+static void cleanup_ueid(void) {}
+#else
+enum {
+ SMC_NETLINK_ADD_UEID = 10,
+ SMC_NETLINK_REMOVE_UEID
+};
+
+enum {
+ SMC_NLA_EID_TABLE_UNSPEC,
+ SMC_NLA_EID_TABLE_ENTRY, /* string */
+};
+
+struct msgtemplate {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[1024];
+};
+
+#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
+#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
+#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
+#define NLA_PAYLOAD(len) ((len) - NLA_HDRLEN)
+
+#define SMC_GENL_FAMILY_NAME "SMC_GEN_NETLINK"
+#define SMC_BPFTEST_UEID "SMC-BPFTEST-UEID"
+
+static uint16_t smc_nl_family_id = -1;
+
+static int send_cmd(int fd, __u16 nlmsg_type, __u32 nlmsg_pid,
+ __u16 nlmsg_flags, __u8 genl_cmd, __u16 nla_type,
+ void *nla_data, int nla_len)
+{
+ struct nlattr *na;
+ struct sockaddr_nl nladdr;
+ int r, buflen;
+ char *buf;
+
+ struct msgtemplate msg = {0};
+
+ msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ msg.n.nlmsg_type = nlmsg_type;
+ msg.n.nlmsg_flags = nlmsg_flags;
+ msg.n.nlmsg_seq = 0;
+ msg.n.nlmsg_pid = nlmsg_pid;
+ msg.g.cmd = genl_cmd;
+ msg.g.version = 1;
+ na = (struct nlattr *)GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+ na->nla_len = nla_len + 1 + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+
+ buf = (char *)&msg;
+ buflen = msg.n.nlmsg_len;
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+
+ while ((r = sendto(fd, buf, buflen, 0, (struct sockaddr *)&nladdr,
+ sizeof(nladdr))) < buflen) {
+ if (r > 0) {
+ buf += r;
+ buflen -= r;
+ } else if (errno != EAGAIN) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static bool get_smc_nl_family_id(void)
+{
+ struct sockaddr_nl nl_src;
+ struct msgtemplate msg;
+ struct nlattr *nl;
+ int fd, ret;
+ pid_t pid;
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+ if (!ASSERT_OK_FD(fd, "nl_family socket"))
+ return false;
+
+ pid = getpid();
+
+ memset(&nl_src, 0, sizeof(nl_src));
+ nl_src.nl_family = AF_NETLINK;
+ nl_src.nl_pid = pid;
+
+ ret = bind(fd, (struct sockaddr *)&nl_src, sizeof(nl_src));
+ if (!ASSERT_OK(ret, "nl_family bind"))
+ goto fail;
+
+ ret = send_cmd(fd, GENL_ID_CTRL, pid,
+ NLM_F_REQUEST, CTRL_CMD_GETFAMILY,
+ CTRL_ATTR_FAMILY_NAME, (void *)SMC_GENL_FAMILY_NAME,
+ strlen(SMC_GENL_FAMILY_NAME));
+ if (!ASSERT_OK(ret, "nl_family query"))
+ goto fail;
+
+ ret = recv(fd, &msg, sizeof(msg), 0);
+ if (!ASSERT_FALSE(msg.n.nlmsg_type == NLMSG_ERROR || ret < 0 ||
+ !NLMSG_OK(&msg.n, ret), "nl_family response"))
+ goto fail;
+
+ nl = (struct nlattr *)GENLMSG_DATA(&msg);
+ nl = (struct nlattr *)((char *)nl + NLA_ALIGN(nl->nla_len));
+ if (!ASSERT_EQ(nl->nla_type, CTRL_ATTR_FAMILY_ID, "nl_family nla type"))
+ goto fail;
+
+ smc_nl_family_id = *(uint16_t *)NLA_DATA(nl);
+ close(fd);
+ return true;
+fail:
+ close(fd);
+ return false;
+}
+
+static bool smc_ueid(int op)
+{
+ struct sockaddr_nl nl_src;
+ struct msgtemplate msg;
+ struct nlmsgerr *err;
+ char test_ueid[32];
+ int fd, ret;
+ pid_t pid;
+
+ /* UEID required */
+ memset(test_ueid, '\x20', sizeof(test_ueid));
+ memcpy(test_ueid, SMC_BPFTEST_UEID, strlen(SMC_BPFTEST_UEID));
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+ if (!ASSERT_OK_FD(fd, "ueid socket"))
+ return false;
+
+ pid = getpid();
+ memset(&nl_src, 0, sizeof(nl_src));
+ nl_src.nl_family = AF_NETLINK;
+ nl_src.nl_pid = pid;
+
+ ret = bind(fd, (struct sockaddr *)&nl_src, sizeof(nl_src));
+ if (!ASSERT_OK(ret, "ueid bind"))
+ goto fail;
+
+ ret = send_cmd(fd, smc_nl_family_id, pid,
+ NLM_F_REQUEST | NLM_F_ACK, op, SMC_NLA_EID_TABLE_ENTRY,
+ (void *)test_ueid, sizeof(test_ueid));
+ if (!ASSERT_OK(ret, "ueid cmd"))
+ goto fail;
+
+ ret = recv(fd, &msg, sizeof(msg), 0);
+ if (!ASSERT_FALSE(ret < 0 ||
+ !NLMSG_OK(&msg.n, ret), "ueid response"))
+ goto fail;
+
+ if (msg.n.nlmsg_type == NLMSG_ERROR) {
+ err = NLMSG_DATA(&msg);
+ switch (op) {
+ case SMC_NETLINK_REMOVE_UEID:
+ if (!ASSERT_FALSE((err->error && err->error != -ENOENT),
+ "ueid remove"))
+ goto fail;
+ break;
+ case SMC_NETLINK_ADD_UEID:
+ if (!ASSERT_OK(err->error, "ueid add"))
+ goto fail;
+ break;
+ default:
+ break;
+ }
+ }
+ close(fd);
+ return true;
+fail:
+ close(fd);
+ return false;
+}
+
+static bool setup_ueid(void)
+{
+ /* get smc nl id */
+ if (!get_smc_nl_family_id())
+ return false;
+ /* clear old ueid for bpftest */
+ smc_ueid(SMC_NETLINK_REMOVE_UEID);
+ /* smc-loopback required ueid */
+ return smc_ueid(SMC_NETLINK_ADD_UEID);
+}
+
+static void cleanup_ueid(void)
+{
+ smc_ueid(SMC_NETLINK_REMOVE_UEID);
+}
+#endif /* __s390x__ */
+
+static bool setup_netns(void)
+{
+ test_netns = netns_new(TEST_NS, true);
+ if (!ASSERT_OK_PTR(test_netns, "open net namespace"))
+ goto fail_netns;
+
+ SYS(fail_ip, "ip addr add 127.0.1.0/8 dev lo");
+ SYS(fail_ip, "ip addr add 127.0.2.0/8 dev lo");
+
+ return true;
+fail_ip:
+ netns_free(test_netns);
+fail_netns:
+ return false;
+}
+
+static void cleanup_netns(void)
+{
+ netns_free(test_netns);
+}
+
+static bool setup_smc(void)
+{
+ if (!setup_ueid())
+ return false;
+
+ if (!setup_netns())
+ goto fail_netns;
+
+ return true;
+fail_netns:
+ cleanup_ueid();
+ return false;
+}
+
+static int set_client_addr_cb(int fd, void *opts)
+{
+ const char *src = (const char *)opts;
+ struct sockaddr_in localaddr;
+
+ localaddr.sin_family = AF_INET;
+ localaddr.sin_port = htons(0);
+ localaddr.sin_addr.s_addr = inet_addr(src);
+ return !ASSERT_OK(bind(fd, &localaddr, sizeof(localaddr)), "client bind");
+}
+
+static void run_link(const char *src, const char *dst, int port)
+{
+ struct network_helper_opts opts = {0};
+ int server, client;
+
+ server = start_server_str(AF_INET, SOCK_STREAM, dst, port, NULL);
+ if (!ASSERT_OK_FD(server, "start service_1"))
+ return;
+
+ opts.proto = IPPROTO_TCP;
+ opts.post_socket_cb = set_client_addr_cb;
+ opts.cb_opts = (void *)src;
+
+ client = connect_to_fd_opts(server, &opts);
+ if (!ASSERT_OK_FD(client, "start connect"))
+ goto fail_client;
+
+ close(client);
+fail_client:
+ close(server);
+}
+
+static void block_link(int map_fd, const char *src, const char *dst)
+{
+ struct smc_policy_ip_value val = { .mode = /* block */ 0 };
+ struct smc_policy_ip_key key = {
+ .sip = inet_addr(src),
+ .dip = inet_addr(dst),
+ };
+
+ bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
+}
+
+/*
+ * This test describes a real-life service topology as follows:
+ *
+ * +-------------> service_1
+ * link 1 | |
+ * +--------------------> server | link 2
+ * | | V
+ * | +-------------> service_2
+ * | link 3
+ * client -------------------> server_via_unsafe_path -> service_3
+ *
+ * Among them,
+ * 1. link-1 is very suitable for using SMC.
+ * 2. link-2 is not suitable for using SMC, because the mode of this link is
+ * kind of short-link services.
+ * 3. link-3 is also not suitable for using SMC, because the RDMA link is
+ * unavailable and needs to go through a long timeout before it can fallback
+ * to TCP.
+ * To achieve this goal, we use a customized SMC ip strategy via smc_hs_ctrl.
+ */
+static void test_topo(void)
+{
+ struct bpf_smc *skel;
+ int rc, map_fd;
+
+ skel = bpf_smc__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_smc__open_and_load"))
+ return;
+
+ rc = bpf_smc__attach(skel);
+ if (!ASSERT_OK(rc, "bpf_smc__attach"))
+ goto fail;
+
+ map_fd = bpf_map__fd(skel->maps.smc_policy_ip);
+ if (!ASSERT_OK_FD(map_fd, "bpf_map__fd"))
+ goto fail;
+
+ /* Mock the process of transparent replacement, since we will modify
+ * protocol to ipproto_smc accropding to it via
+ * fmod_ret/update_socket_protocol.
+ */
+ write_sysctl("/proc/sys/net/smc/hs_ctrl", "linkcheck");
+
+ /* Configure ip strat */
+ block_link(map_fd, CLIENT_IP, SERVER_IP_VIA_RISK_PATH);
+ block_link(map_fd, SERVER_IP, SERVER_IP);
+
+ /* should go with smc */
+ run_link(CLIENT_IP, SERVER_IP, SERVICE_1);
+ /* should go with smc fallback */
+ run_link(SERVER_IP, SERVER_IP, SERVICE_2);
+
+ ASSERT_EQ(skel->bss->smc_cnt, 2, "smc count");
+ ASSERT_EQ(skel->bss->fallback_cnt, 1, "fallback count");
+
+ /* should go with smc */
+ run_link(CLIENT_IP, SERVER_IP, SERVICE_2);
+
+ ASSERT_EQ(skel->bss->smc_cnt, 3, "smc count");
+ ASSERT_EQ(skel->bss->fallback_cnt, 1, "fallback count");
+
+ /* should go with smc fallback */
+ run_link(CLIENT_IP, SERVER_IP_VIA_RISK_PATH, SERVICE_3);
+
+ ASSERT_EQ(skel->bss->smc_cnt, 4, "smc count");
+ ASSERT_EQ(skel->bss->fallback_cnt, 2, "fallback count");
+
+fail:
+ bpf_smc__destroy(skel);
+}
+
+void test_bpf_smc(void)
+{
+ if (!setup_smc()) {
+ printf("setup for smc test failed, test SKIP:\n");
+ test__skip();
+ return;
+ }
+
+ if (test__start_subtest("topo"))
+ test_topo();
+
+ cleanup_ueid();
+ cleanup_netns();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
index 2a27f3714f5c..bdc4fc06bc5a 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -139,7 +139,7 @@ static void test_lsm_tailcall(void)
if (CHECK_FAIL(!err))
goto close_prog;
- prog_fd = bpf_program__fd(skel->progs.lsm_file_alloc_security_prog);
+ prog_fd = bpf_program__fd(skel->progs.lsm_kernfs_init_security_prog);
if (CHECK_FAIL(prog_fd < 0))
goto close_prog;
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_id_ops_mapping.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_id_ops_mapping.c
new file mode 100644
index 000000000000..fd8762ba4b67
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_id_ops_mapping.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "struct_ops_id_ops_mapping1.skel.h"
+#include "struct_ops_id_ops_mapping2.skel.h"
+
+static void test_st_ops_id_ops_mapping(void)
+{
+ struct struct_ops_id_ops_mapping1 *skel1 = NULL;
+ struct struct_ops_id_ops_mapping2 *skel2 = NULL;
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int err, pid, prog1_fd, prog2_fd;
+
+ skel1 = struct_ops_id_ops_mapping1__open_and_load();
+ if (!ASSERT_OK_PTR(skel1, "struct_ops_id_ops_mapping1__open"))
+ goto out;
+
+ skel2 = struct_ops_id_ops_mapping2__open_and_load();
+ if (!ASSERT_OK_PTR(skel2, "struct_ops_id_ops_mapping2__open"))
+ goto out;
+
+ err = bpf_map_get_info_by_fd(bpf_map__fd(skel1->maps.st_ops_map),
+ &info, &len);
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
+ goto out;
+
+ skel1->bss->st_ops_id = info.id;
+
+ err = bpf_map_get_info_by_fd(bpf_map__fd(skel2->maps.st_ops_map),
+ &info, &len);
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
+ goto out;
+
+ skel2->bss->st_ops_id = info.id;
+
+ err = struct_ops_id_ops_mapping1__attach(skel1);
+ if (!ASSERT_OK(err, "struct_ops_id_ops_mapping1__attach"))
+ goto out;
+
+ err = struct_ops_id_ops_mapping2__attach(skel2);
+ if (!ASSERT_OK(err, "struct_ops_id_ops_mapping2__attach"))
+ goto out;
+
+ /* run tracing prog that calls .test_1 and checks return */
+ pid = getpid();
+ skel1->bss->test_pid = pid;
+ skel2->bss->test_pid = pid;
+ sys_gettid();
+ skel1->bss->test_pid = 0;
+ skel2->bss->test_pid = 0;
+
+ /* run syscall_prog that calls .test_1 and checks return */
+ prog1_fd = bpf_program__fd(skel1->progs.syscall_prog);
+ err = bpf_prog_test_run_opts(prog1_fd, NULL);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+
+ prog2_fd = bpf_program__fd(skel2->progs.syscall_prog);
+ err = bpf_prog_test_run_opts(prog2_fd, NULL);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+
+ ASSERT_EQ(skel1->bss->test_err, 0, "skel1->bss->test_err");
+ ASSERT_EQ(skel2->bss->test_err, 0, "skel2->bss->test_err");
+
+out:
+ struct_ops_id_ops_mapping1__destroy(skel1);
+ struct_ops_id_ops_mapping2__destroy(skel2);
+}
+
+void test_struct_ops_id_ops_mapping(void)
+{
+ if (test__start_subtest("st_ops_id_ops_mapping"))
+ test_st_ops_id_ops_mapping();
+}
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/prog_tests/test_sysctl.c
index bcdbd27f22f0..273dd41ca09e 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_sysctl.c
@@ -1,22 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <fcntl.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <linux/filter.h>
-
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include <bpf/bpf_endian.h>
-#include "bpf_util.h"
+#include "test_progs.h"
#include "cgroup_helpers.h"
-#include "testing_helpers.h"
#define CG_PATH "/foo"
#define MAX_INSNS 512
@@ -1608,26 +1594,19 @@ static int run_tests(int cgfd)
return fails ? -1 : 0;
}
-int main(int argc, char **argv)
+void test_sysctl(void)
{
- int cgfd = -1;
- int err = 0;
+ int cgfd;
cgfd = cgroup_setup_and_join(CG_PATH);
- if (cgfd < 0)
- goto err;
+ if (!ASSERT_OK_FD(cgfd < 0, "create_cgroup"))
+ goto out;
- /* Use libbpf 1.0 API mode */
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+ if (!ASSERT_OK(run_tests(cgfd), "run_tests"))
+ goto out;
- if (run_tests(cgfd))
- goto err;
-
- goto out;
-err:
- err = -1;
out:
close(cgfd);
cleanup_cgroup_environment();
- return err;
+ return;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c b/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
new file mode 100644
index 000000000000..9fd6306b455c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_task_local_data.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <pthread.h>
+#include <bpf/btf.h>
+#include <test_progs.h>
+
+#define TLD_FREE_DATA_ON_THREAD_EXIT
+#define TLD_DYN_DATA_SIZE 4096
+#include "task_local_data.h"
+
+struct test_tld_struct {
+ __u64 a;
+ __u64 b;
+ __u64 c;
+ __u64 d;
+};
+
+#include "test_task_local_data.skel.h"
+
+TLD_DEFINE_KEY(value0_key, "value0", sizeof(int));
+
+/*
+ * Reset task local data between subtests by clearing metadata other
+ * than the statically defined value0. This is safe as subtests run
+ * sequentially. Users of task local data library should not touch
+ * library internal.
+ */
+static void reset_tld(void)
+{
+ if (TLD_READ_ONCE(tld_meta_p)) {
+ /* Remove TLDs created by tld_create_key() */
+ tld_meta_p->cnt = 1;
+ tld_meta_p->size = TLD_DYN_DATA_SIZE;
+ memset(&tld_meta_p->metadata[1], 0,
+ (TLD_MAX_DATA_CNT - 1) * sizeof(struct tld_metadata));
+ }
+}
+
+/* Serialize access to bpf program's global variables */
+static pthread_mutex_t global_mutex;
+
+static tld_key_t *tld_keys;
+
+#define TEST_BASIC_THREAD_NUM 32
+
+void *test_task_local_data_basic_thread(void *arg)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct test_task_local_data *skel = (struct test_task_local_data *)arg;
+ int fd, err, tid, *value0, *value1;
+ struct test_tld_struct *value2;
+
+ fd = bpf_map__fd(skel->maps.tld_data_map);
+
+ value0 = tld_get_data(fd, value0_key);
+ if (!ASSERT_OK_PTR(value0, "tld_get_data"))
+ goto out;
+
+ value1 = tld_get_data(fd, tld_keys[1]);
+ if (!ASSERT_OK_PTR(value1, "tld_get_data"))
+ goto out;
+
+ value2 = tld_get_data(fd, tld_keys[2]);
+ if (!ASSERT_OK_PTR(value2, "tld_get_data"))
+ goto out;
+
+ tid = sys_gettid();
+
+ *value0 = tid + 0;
+ *value1 = tid + 1;
+ value2->a = tid + 2;
+ value2->b = tid + 3;
+ value2->c = tid + 4;
+ value2->d = tid + 5;
+
+ pthread_mutex_lock(&global_mutex);
+ /* Run task_main that read task local data and save to global variables */
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.task_main), &opts);
+ ASSERT_OK(err, "run task_main");
+ ASSERT_OK(opts.retval, "task_main retval");
+
+ ASSERT_EQ(skel->bss->test_value0, tid + 0, "tld_get_data value0");
+ ASSERT_EQ(skel->bss->test_value1, tid + 1, "tld_get_data value1");
+ ASSERT_EQ(skel->bss->test_value2.a, tid + 2, "tld_get_data value2.a");
+ ASSERT_EQ(skel->bss->test_value2.b, tid + 3, "tld_get_data value2.b");
+ ASSERT_EQ(skel->bss->test_value2.c, tid + 4, "tld_get_data value2.c");
+ ASSERT_EQ(skel->bss->test_value2.d, tid + 5, "tld_get_data value2.d");
+ pthread_mutex_unlock(&global_mutex);
+
+ /* Make sure valueX are indeed local to threads */
+ ASSERT_EQ(*value0, tid + 0, "value0");
+ ASSERT_EQ(*value1, tid + 1, "value1");
+ ASSERT_EQ(value2->a, tid + 2, "value2.a");
+ ASSERT_EQ(value2->b, tid + 3, "value2.b");
+ ASSERT_EQ(value2->c, tid + 4, "value2.c");
+ ASSERT_EQ(value2->d, tid + 5, "value2.d");
+
+ *value0 = tid + 5;
+ *value1 = tid + 4;
+ value2->a = tid + 3;
+ value2->b = tid + 2;
+ value2->c = tid + 1;
+ value2->d = tid + 0;
+
+ /* Run task_main again */
+ pthread_mutex_lock(&global_mutex);
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.task_main), &opts);
+ ASSERT_OK(err, "run task_main");
+ ASSERT_OK(opts.retval, "task_main retval");
+
+ ASSERT_EQ(skel->bss->test_value0, tid + 5, "tld_get_data value0");
+ ASSERT_EQ(skel->bss->test_value1, tid + 4, "tld_get_data value1");
+ ASSERT_EQ(skel->bss->test_value2.a, tid + 3, "tld_get_data value2.a");
+ ASSERT_EQ(skel->bss->test_value2.b, tid + 2, "tld_get_data value2.b");
+ ASSERT_EQ(skel->bss->test_value2.c, tid + 1, "tld_get_data value2.c");
+ ASSERT_EQ(skel->bss->test_value2.d, tid + 0, "tld_get_data value2.d");
+ pthread_mutex_unlock(&global_mutex);
+
+out:
+ pthread_exit(NULL);
+}
+
+static void test_task_local_data_basic(void)
+{
+ struct test_task_local_data *skel;
+ pthread_t thread[TEST_BASIC_THREAD_NUM];
+ char dummy_key_name[TLD_NAME_LEN];
+ tld_key_t key;
+ int i, err;
+
+ reset_tld();
+
+ ASSERT_OK(pthread_mutex_init(&global_mutex, NULL), "pthread_mutex_init");
+
+ skel = test_task_local_data__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ tld_keys = calloc(TLD_MAX_DATA_CNT, sizeof(tld_key_t));
+ if (!ASSERT_OK_PTR(tld_keys, "calloc tld_keys"))
+ goto out;
+
+ ASSERT_FALSE(tld_key_is_err(value0_key), "TLD_DEFINE_KEY");
+ tld_keys[1] = tld_create_key("value1", sizeof(int));
+ ASSERT_FALSE(tld_key_is_err(tld_keys[1]), "tld_create_key");
+ tld_keys[2] = tld_create_key("value2", sizeof(struct test_tld_struct));
+ ASSERT_FALSE(tld_key_is_err(tld_keys[2]), "tld_create_key");
+
+ /*
+ * Shouldn't be able to store data exceed a page. Create a TLD just big
+ * enough to exceed a page. TLDs already created are int value0, int
+ * value1, and struct test_tld_struct value2.
+ */
+ key = tld_create_key("value_not_exist",
+ TLD_PAGE_SIZE - 2 * sizeof(int) - sizeof(struct test_tld_struct) + 1);
+ ASSERT_EQ(tld_key_err_or_zero(key), -E2BIG, "tld_create_key");
+
+ key = tld_create_key("value2", sizeof(struct test_tld_struct));
+ ASSERT_EQ(tld_key_err_or_zero(key), -EEXIST, "tld_create_key");
+
+ /* Shouldn't be able to create the (TLD_MAX_DATA_CNT+1)-th TLD */
+ for (i = 3; i < TLD_MAX_DATA_CNT; i++) {
+ snprintf(dummy_key_name, TLD_NAME_LEN, "dummy_value%d", i);
+ tld_keys[i] = tld_create_key(dummy_key_name, sizeof(int));
+ ASSERT_FALSE(tld_key_is_err(tld_keys[i]), "tld_create_key");
+ }
+ key = tld_create_key("value_not_exist", sizeof(struct test_tld_struct));
+ ASSERT_EQ(tld_key_err_or_zero(key), -ENOSPC, "tld_create_key");
+
+ /* Access TLDs from multiple threads and check if they are thread-specific */
+ for (i = 0; i < TEST_BASIC_THREAD_NUM; i++) {
+ err = pthread_create(&thread[i], NULL, test_task_local_data_basic_thread, skel);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto out;
+ }
+
+out:
+ for (i = 0; i < TEST_BASIC_THREAD_NUM; i++)
+ pthread_join(thread[i], NULL);
+
+ if (tld_keys) {
+ free(tld_keys);
+ tld_keys = NULL;
+ }
+ tld_free();
+ test_task_local_data__destroy(skel);
+}
+
+#define TEST_RACE_THREAD_NUM (TLD_MAX_DATA_CNT - 3)
+
+void *test_task_local_data_race_thread(void *arg)
+{
+ int err = 0, id = (intptr_t)arg;
+ char key_name[32];
+ tld_key_t key;
+
+ key = tld_create_key("value_not_exist", TLD_PAGE_SIZE + 1);
+ if (tld_key_err_or_zero(key) != -E2BIG) {
+ err = 1;
+ goto out;
+ }
+
+ /* Only one thread will succeed in creating value1 */
+ key = tld_create_key("value1", sizeof(int));
+ if (!tld_key_is_err(key))
+ tld_keys[1] = key;
+
+ /* Only one thread will succeed in creating value2 */
+ key = tld_create_key("value2", sizeof(struct test_tld_struct));
+ if (!tld_key_is_err(key))
+ tld_keys[2] = key;
+
+ snprintf(key_name, 32, "thread_%d", id);
+ tld_keys[id] = tld_create_key(key_name, sizeof(int));
+ if (tld_key_is_err(tld_keys[id]))
+ err = 2;
+out:
+ return (void *)(intptr_t)err;
+}
+
+static void test_task_local_data_race(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ pthread_t thread[TEST_RACE_THREAD_NUM];
+ struct test_task_local_data *skel;
+ int fd, i, j, err, *data;
+ void *ret = NULL;
+
+ skel = test_task_local_data__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ tld_keys = calloc(TLD_MAX_DATA_CNT, sizeof(tld_key_t));
+ if (!ASSERT_OK_PTR(tld_keys, "calloc tld_keys"))
+ goto out;
+
+ fd = bpf_map__fd(skel->maps.tld_data_map);
+
+ ASSERT_FALSE(tld_key_is_err(value0_key), "TLD_DEFINE_KEY");
+ tld_keys[0] = value0_key;
+
+ for (j = 0; j < 100; j++) {
+ reset_tld();
+
+ for (i = 0; i < TEST_RACE_THREAD_NUM; i++) {
+ /*
+ * Try to make tld_create_key() race with each other. Call
+ * tld_create_key(), both valid and invalid, from different threads.
+ */
+ err = pthread_create(&thread[i], NULL, test_task_local_data_race_thread,
+ (void *)(intptr_t)(i + 3));
+ if (CHECK_FAIL(err))
+ break;
+ }
+
+ /* Wait for all tld_create_key() to return */
+ for (i = 0; i < TEST_RACE_THREAD_NUM; i++) {
+ pthread_join(thread[i], &ret);
+ if (CHECK_FAIL(ret))
+ break;
+ }
+
+ /* Write a unique number to each TLD */
+ for (i = 0; i < TLD_MAX_DATA_CNT; i++) {
+ data = tld_get_data(fd, tld_keys[i]);
+ if (CHECK_FAIL(!data))
+ break;
+ *data = i;
+ }
+
+ /* Read TLDs and check the value to see if any address collides with another */
+ for (i = 0; i < TLD_MAX_DATA_CNT; i++) {
+ data = tld_get_data(fd, tld_keys[i]);
+ if (CHECK_FAIL(*data != i))
+ break;
+ }
+
+ /* Run task_main to make sure no invalid TLDs are added */
+ err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.task_main), &opts);
+ ASSERT_OK(err, "run task_main");
+ ASSERT_OK(opts.retval, "task_main retval");
+ }
+out:
+ if (tld_keys) {
+ free(tld_keys);
+ tld_keys = NULL;
+ }
+ tld_free();
+ test_task_local_data__destroy(skel);
+}
+
+void test_task_local_data(void)
+{
+ if (test__start_subtest("task_local_data_basic"))
+ test_task_local_data_basic();
+ if (test__start_subtest("task_local_data_race"))
+ test_task_local_data_race();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_task_work.c b/tools/testing/selftests/bpf/prog_tests/test_task_work.c
new file mode 100644
index 000000000000..774b31a5f6ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_task_work.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <string.h>
+#include <stdio.h>
+#include "task_work.skel.h"
+#include "task_work_fail.skel.h"
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <sys/syscall.h>
+#include <time.h>
+
+static int perf_event_open(__u32 type, __u64 config, int pid)
+{
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .size = sizeof(struct perf_event_attr),
+ .sample_period = 100000,
+ };
+
+ return syscall(__NR_perf_event_open, &attr, pid, -1, -1, 0);
+}
+
+struct elem {
+ char data[128];
+ struct bpf_task_work tw;
+};
+
+static int verify_map(struct bpf_map *map, const char *expected_data)
+{
+ int err;
+ struct elem value;
+ int processed_values = 0;
+ int k, sz;
+
+ sz = bpf_map__max_entries(map);
+ for (k = 0; k < sz; ++k) {
+ err = bpf_map__lookup_elem(map, &k, sizeof(int), &value, sizeof(struct elem), 0);
+ if (err)
+ continue;
+ if (!ASSERT_EQ(strcmp(expected_data, value.data), 0, "map data")) {
+ fprintf(stderr, "expected '%s', found '%s' in %s map", expected_data,
+ value.data, bpf_map__name(map));
+ return 2;
+ }
+ processed_values++;
+ }
+
+ return processed_values == 0;
+}
+
+static void task_work_run(const char *prog_name, const char *map_name)
+{
+ struct task_work *skel;
+ struct bpf_program *prog;
+ struct bpf_map *map;
+ struct bpf_link *link = NULL;
+ int err, pe_fd = -1, pid, status, pipefd[2];
+ char user_string[] = "hello world";
+
+ if (!ASSERT_NEQ(pipe(pipefd), -1, "pipe"))
+ return;
+
+ pid = fork();
+ if (pid == 0) {
+ __u64 num = 1;
+ int i;
+ char buf;
+
+ close(pipefd[1]);
+ read(pipefd[0], &buf, sizeof(buf));
+ close(pipefd[0]);
+
+ for (i = 0; i < 10000; ++i)
+ num *= time(0) % 7;
+ (void)num;
+ exit(0);
+ }
+ if (!ASSERT_GT(pid, 0, "fork() failed")) {
+ close(pipefd[0]);
+ close(pipefd[1]);
+ return;
+ }
+
+ skel = task_work__open();
+ if (!ASSERT_OK_PTR(skel, "task_work__open"))
+ return;
+
+ bpf_object__for_each_program(prog, skel->obj) {
+ bpf_program__set_autoload(prog, false);
+ }
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "prog_name"))
+ goto cleanup;
+ bpf_program__set_autoload(prog, true);
+ skel->bss->user_ptr = (char *)user_string;
+
+ err = task_work__load(skel);
+ if (!ASSERT_OK(err, "skel_load"))
+ goto cleanup;
+
+ pe_fd = perf_event_open(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES, pid);
+ if (pe_fd == -1 && (errno == ENOENT || errno == EOPNOTSUPP)) {
+ printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
+ test__skip();
+ goto cleanup;
+ }
+ if (!ASSERT_NEQ(pe_fd, -1, "pe_fd")) {
+ fprintf(stderr, "perf_event_open errno: %d, pid: %d\n", errno, pid);
+ goto cleanup;
+ }
+
+ link = bpf_program__attach_perf_event(prog, pe_fd);
+ if (!ASSERT_OK_PTR(link, "attach_perf_event"))
+ goto cleanup;
+
+ /* perf event fd ownership is passed to bpf_link */
+ pe_fd = -1;
+ close(pipefd[0]);
+ write(pipefd[1], user_string, 1);
+ close(pipefd[1]);
+ /* Wait to collect some samples */
+ waitpid(pid, &status, 0);
+ pid = 0;
+ map = bpf_object__find_map_by_name(skel->obj, map_name);
+ if (!ASSERT_OK_PTR(map, "find map_name"))
+ goto cleanup;
+ if (!ASSERT_OK(verify_map(map, user_string), "verify map"))
+ goto cleanup;
+cleanup:
+ if (pe_fd >= 0)
+ close(pe_fd);
+ bpf_link__destroy(link);
+ task_work__destroy(skel);
+ if (pid > 0) {
+ close(pipefd[0]);
+ write(pipefd[1], user_string, 1);
+ close(pipefd[1]);
+ waitpid(pid, &status, 0);
+ }
+}
+
+void test_task_work(void)
+{
+ if (test__start_subtest("test_task_work_hash_map"))
+ task_work_run("oncpu_hash_map", "hmap");
+
+ if (test__start_subtest("test_task_work_array_map"))
+ task_work_run("oncpu_array_map", "arrmap");
+
+ if (test__start_subtest("test_task_work_lru_map"))
+ task_work_run("oncpu_lru_map", "lrumap");
+
+ RUN_TESTS(task_work_fail);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c b/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c
new file mode 100644
index 000000000000..462512fb191f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_tc_edt.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * BPF-based flow shaping
+ *
+ * The test brings up two veth in two isolated namespaces, attach some flow
+ * shaping program onto it, and ensures that a manual speedtest maximum
+ * value matches the rate set in the BPF shapers.
+ */
+
+#include <asm-generic/socket.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <math.h>
+#include <sys/time.h>
+#include <sys/socket.h>
+#include <bpf/libbpf.h>
+#include <pthread.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tc_edt.skel.h"
+
+#define SERVER_NS "tc-edt-server-ns"
+#define CLIENT_NS "tc-edt-client-ns"
+#define IP4_ADDR_VETH1 "192.168.1.1"
+#define IP4_ADDR_VETH2 "192.168.1.2"
+#define IP4_ADDR_VETH2_HEX 0xC0A80102
+
+#define TIMEOUT_MS 2000
+#define TEST_PORT 9000
+#define TARGET_RATE_MBPS 5.0
+#define TX_BYTES_COUNT (1 * 1000 * 1000)
+#define RATE_ERROR_PERCENT 2.0
+
+struct connection {
+ int server_listen_fd;
+ int server_conn_fd;
+ int client_conn_fd;
+};
+
+static int setup(struct test_tc_edt *skel)
+{
+ struct nstoken *nstoken_client, *nstoken_server;
+ int ret;
+
+ if (!ASSERT_OK(make_netns(CLIENT_NS), "create client ns"))
+ goto fail;
+ if (!ASSERT_OK(make_netns(SERVER_NS), "create server ns"))
+ goto fail_delete_client_ns;
+
+ nstoken_client = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken_client, "open client ns"))
+ goto fail_delete_server_ns;
+ SYS(fail_close_client_ns, "ip link add veth1 type veth peer name %s",
+ "veth2 netns " SERVER_NS);
+ SYS(fail_close_client_ns, "ip -4 addr add " IP4_ADDR_VETH1 "/24 dev veth1");
+ SYS(fail_close_client_ns, "ip link set veth1 up");
+
+ nstoken_server = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken_server, "enter server ns"))
+ goto fail_close_client_ns;
+ SYS(fail_close_server_ns, "ip -4 addr add " IP4_ADDR_VETH2 "/24 dev veth2");
+ SYS(fail_close_server_ns, "ip link set veth2 up");
+ SYS(fail_close_server_ns, "tc qdisc add dev veth2 root fq");
+ ret = tc_prog_attach("veth2", -1, bpf_program__fd(skel->progs.tc_prog));
+ if (!ASSERT_OK(ret, "attach bpf prog"))
+ goto fail_close_server_ns;
+ skel->bss->target_rate = TARGET_RATE_MBPS * 1000 * 1000;
+ close_netns(nstoken_server);
+ close_netns(nstoken_client);
+
+ return 0;
+
+fail_close_server_ns:
+ close_netns(nstoken_server);
+fail_close_client_ns:
+ close_netns(nstoken_client);
+fail_delete_server_ns:
+ remove_netns(SERVER_NS);
+fail_delete_client_ns:
+ remove_netns(CLIENT_NS);
+fail:
+ return -1;
+}
+
+static void cleanup(void)
+{
+ remove_netns(CLIENT_NS);
+ remove_netns(SERVER_NS);
+}
+
+static void run_test(void)
+{
+ int server_fd, client_fd, err;
+ double rate_mbps, rate_error;
+ struct nstoken *nstoken;
+ __u64 ts_start, ts_end;
+
+ nstoken = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open server ns"))
+ return;
+ server_fd = start_server(AF_INET, SOCK_STREAM, IP4_ADDR_VETH2,
+ TEST_PORT, TIMEOUT_MS);
+ if (!ASSERT_OK_FD(server_fd, "start server"))
+ return;
+
+ close_netns(nstoken);
+ nstoken = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open client ns"))
+ return;
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_OK_FD(client_fd, "connect client"))
+ return;
+
+ ts_start = get_time_ns();
+ err = send_recv_data(server_fd, client_fd, TX_BYTES_COUNT);
+ ts_end = get_time_ns();
+ close_netns(nstoken);
+ ASSERT_OK(err, "send_recv_data");
+
+ rate_mbps = TX_BYTES_COUNT / ((ts_end - ts_start) / 1000.0);
+ rate_error =
+ fabs((rate_mbps - TARGET_RATE_MBPS) * 100.0 / TARGET_RATE_MBPS);
+
+ ASSERT_LE(rate_error, RATE_ERROR_PERCENT,
+ "rate error is lower than threshold");
+}
+
+void test_tc_edt(void)
+{
+ struct test_tc_edt *skel;
+
+ skel = test_tc_edt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open and load"))
+ return;
+
+ if (!ASSERT_OK(setup(skel), "global setup"))
+ return;
+
+ run_test();
+
+ cleanup();
+ test_tc_edt__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
new file mode 100644
index 000000000000..0fe0a8f62486
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * End-to-end eBPF tunnel test suite
+ * The file tests BPF network tunnels implementation. For each tunnel
+ * type, the test validates that:
+ * - basic communication can first be established between the two veths
+ * - when adding a BPF-based encapsulation on client egress, it now fails
+ * to communicate with the server
+ * - when adding a kernel-based decapsulation on server ingress, client
+ * can now connect
+ * - when replacing the kernel-based decapsulation with a BPF-based one,
+ * the client can still connect
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <bpf/libbpf.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tc_tunnel.skel.h"
+
+#define SERVER_NS "tc-tunnel-server-ns"
+#define CLIENT_NS "tc-tunnel-client-ns"
+#define MAC_ADDR_VETH1 "00:11:22:33:44:55"
+#define IP4_ADDR_VETH1 "192.168.1.1"
+#define IP6_ADDR_VETH1 "fd::1"
+#define MAC_ADDR_VETH2 "66:77:88:99:AA:BB"
+#define IP4_ADDR_VETH2 "192.168.1.2"
+#define IP6_ADDR_VETH2 "fd::2"
+
+#define TEST_NAME_MAX_LEN 64
+#define PROG_NAME_MAX_LEN 64
+#define TUNNEL_ARGS_MAX_LEN 128
+#define BUFFER_LEN 2000
+#define DEFAULT_TEST_DATA_SIZE 100
+#define GSO_TEST_DATA_SIZE BUFFER_LEN
+
+#define TIMEOUT_MS 1000
+#define TEST_PORT 8000
+#define UDP_PORT 5555
+#define MPLS_UDP_PORT 6635
+#define FOU_MPLS_PROTO 137
+#define VXLAN_ID 1
+#define VXLAN_PORT 8472
+#define MPLS_TABLE_ENTRIES_COUNT 65536
+
+static char tx_buffer[BUFFER_LEN], rx_buffer[BUFFER_LEN];
+
+struct subtest_cfg {
+ char *ebpf_tun_type;
+ char *iproute_tun_type;
+ char *mac_tun_type;
+ int ipproto;
+ void (*extra_decap_mod_args_cb)(struct subtest_cfg *cfg, char *dst);
+ bool tunnel_need_veth_mac;
+ bool configure_fou_rx_port;
+ char *tmode;
+ bool expect_kern_decap_failure;
+ bool configure_mpls;
+ bool test_gso;
+ char *tunnel_client_addr;
+ char *tunnel_server_addr;
+ char name[TEST_NAME_MAX_LEN];
+ char *server_addr;
+ int client_egress_prog_fd;
+ int server_ingress_prog_fd;
+ char extra_decap_mod_args[TUNNEL_ARGS_MAX_LEN];
+ int server_fd;
+};
+
+struct connection {
+ int client_fd;
+ int server_fd;
+};
+
+static int build_subtest_name(struct subtest_cfg *cfg, char *dst, size_t size)
+{
+ int ret;
+
+ ret = snprintf(dst, size, "%s_%s", cfg->ebpf_tun_type,
+ cfg->mac_tun_type);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int set_subtest_progs(struct subtest_cfg *cfg, struct test_tc_tunnel *skel)
+{
+ char prog_name[PROG_NAME_MAX_LEN];
+ struct bpf_program *prog;
+ int ret;
+
+ ret = snprintf(prog_name, PROG_NAME_MAX_LEN, "__encap_");
+ if (ret < 0)
+ return ret;
+ ret = build_subtest_name(cfg, prog_name + ret, PROG_NAME_MAX_LEN - ret);
+ if (ret < 0)
+ return ret;
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!prog)
+ return -1;
+
+ cfg->client_egress_prog_fd = bpf_program__fd(prog);
+ cfg->server_ingress_prog_fd = bpf_program__fd(skel->progs.decap_f);
+ return 0;
+}
+
+static void set_subtest_addresses(struct subtest_cfg *cfg)
+{
+ if (cfg->ipproto == 6)
+ cfg->server_addr = IP6_ADDR_VETH2;
+ else
+ cfg->server_addr = IP4_ADDR_VETH2;
+
+ /* Some specific tunnel types need specific addressing, it then
+ * has been already set in the configuration table. Otherwise,
+ * deduce the relevant addressing from the ipproto
+ */
+ if (cfg->tunnel_client_addr && cfg->tunnel_server_addr)
+ return;
+
+ if (cfg->ipproto == 6) {
+ cfg->tunnel_client_addr = IP6_ADDR_VETH1;
+ cfg->tunnel_server_addr = IP6_ADDR_VETH2;
+ } else {
+ cfg->tunnel_client_addr = IP4_ADDR_VETH1;
+ cfg->tunnel_server_addr = IP4_ADDR_VETH2;
+ }
+}
+
+static int run_server(struct subtest_cfg *cfg)
+{
+ int family = cfg->ipproto == 6 ? AF_INET6 : AF_INET;
+ struct nstoken *nstoken;
+ struct network_helper_opts opts = {
+ .timeout_ms = TIMEOUT_MS
+ };
+
+ nstoken = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open server ns"))
+ return -1;
+
+ cfg->server_fd = start_server_str(family, SOCK_STREAM, cfg->server_addr,
+ TEST_PORT, &opts);
+ close_netns(nstoken);
+ if (!ASSERT_OK_FD(cfg->server_fd, "start server"))
+ return -1;
+
+ return 0;
+}
+
+static int check_server_rx_data(struct subtest_cfg *cfg,
+ struct connection *conn, int len)
+{
+ int err;
+
+ memset(rx_buffer, 0, BUFFER_LEN);
+ err = recv(conn->server_fd, rx_buffer, len, 0);
+ if (!ASSERT_EQ(err, len, "check rx data len"))
+ return 1;
+ if (!ASSERT_MEMEQ(tx_buffer, rx_buffer, len, "check received data"))
+ return 1;
+ return 0;
+}
+
+static struct connection *connect_client_to_server(struct subtest_cfg *cfg)
+{
+ struct network_helper_opts opts = {.timeout_ms = 500};
+ int family = cfg->ipproto == 6 ? AF_INET6 : AF_INET;
+ struct connection *conn = NULL;
+ int client_fd, server_fd;
+
+ conn = malloc(sizeof(struct connection));
+ if (!conn)
+ return conn;
+
+ client_fd = connect_to_addr_str(family, SOCK_STREAM, cfg->server_addr,
+ TEST_PORT, &opts);
+
+ if (client_fd < 0) {
+ free(conn);
+ return NULL;
+ }
+
+ server_fd = accept(cfg->server_fd, NULL, NULL);
+ if (server_fd < 0) {
+ close(client_fd);
+ free(conn);
+ return NULL;
+ }
+
+ conn->server_fd = server_fd;
+ conn->client_fd = client_fd;
+
+ return conn;
+}
+
+static void disconnect_client_from_server(struct subtest_cfg *cfg,
+ struct connection *conn)
+{
+ close(conn->server_fd);
+ close(conn->client_fd);
+ free(conn);
+}
+
+static int send_and_test_data(struct subtest_cfg *cfg, bool must_succeed)
+{
+ struct connection *conn;
+ int err, res = -1;
+
+ conn = connect_client_to_server(cfg);
+ if (!must_succeed && !ASSERT_ERR_PTR(conn, "connection that must fail"))
+ goto end;
+ else if (!must_succeed)
+ return 0;
+
+ if (!ASSERT_OK_PTR(conn, "connection that must succeed"))
+ return -1;
+
+ err = send(conn->client_fd, tx_buffer, DEFAULT_TEST_DATA_SIZE, 0);
+ if (!ASSERT_EQ(err, DEFAULT_TEST_DATA_SIZE, "send data from client"))
+ goto end;
+ if (check_server_rx_data(cfg, conn, DEFAULT_TEST_DATA_SIZE))
+ goto end;
+
+ if (!cfg->test_gso) {
+ res = 0;
+ goto end;
+ }
+
+ err = send(conn->client_fd, tx_buffer, GSO_TEST_DATA_SIZE, 0);
+ if (!ASSERT_EQ(err, GSO_TEST_DATA_SIZE, "send (large) data from client"))
+ goto end;
+ if (check_server_rx_data(cfg, conn, DEFAULT_TEST_DATA_SIZE))
+ goto end;
+
+ res = 0;
+end:
+ disconnect_client_from_server(cfg, conn);
+ return res;
+}
+
+static void vxlan_decap_mod_args_cb(struct subtest_cfg *cfg, char *dst)
+{
+ snprintf(dst, TUNNEL_ARGS_MAX_LEN, "id %d dstport %d udp6zerocsumrx",
+ VXLAN_ID, VXLAN_PORT);
+}
+
+static void udp_decap_mod_args_cb(struct subtest_cfg *cfg, char *dst)
+{
+ bool is_mpls = !strcmp(cfg->mac_tun_type, "mpls");
+
+ snprintf(dst, TUNNEL_ARGS_MAX_LEN,
+ "encap fou encap-sport auto encap-dport %d",
+ is_mpls ? MPLS_UDP_PORT : UDP_PORT);
+}
+
+static int configure_fou_rx_port(struct subtest_cfg *cfg, bool add)
+{
+ bool is_mpls = strcmp(cfg->mac_tun_type, "mpls") == 0;
+ int fou_proto;
+
+ if (is_mpls)
+ fou_proto = FOU_MPLS_PROTO;
+ else
+ fou_proto = cfg->ipproto == 6 ? 41 : 4;
+
+ SYS(fail, "ip fou %s port %d ipproto %d%s", add ? "add" : "del",
+ is_mpls ? MPLS_UDP_PORT : UDP_PORT, fou_proto,
+ cfg->ipproto == 6 ? " -6" : "");
+
+ return 0;
+fail:
+ return 1;
+}
+
+static int add_fou_rx_port(struct subtest_cfg *cfg)
+{
+ return configure_fou_rx_port(cfg, true);
+}
+
+static int del_fou_rx_port(struct subtest_cfg *cfg)
+{
+ return configure_fou_rx_port(cfg, false);
+}
+
+static int update_tunnel_intf_addr(struct subtest_cfg *cfg)
+{
+ SYS(fail, "ip link set dev testtun0 address " MAC_ADDR_VETH2);
+ return 0;
+fail:
+ return -1;
+}
+
+static int configure_kernel_for_mpls(struct subtest_cfg *cfg)
+{
+ SYS(fail, "sysctl -qw net.mpls.platform_labels=%d",
+ MPLS_TABLE_ENTRIES_COUNT);
+ SYS(fail, "ip -f mpls route add 1000 dev lo");
+ SYS(fail, "ip link set lo up");
+ SYS(fail, "sysctl -qw net.mpls.conf.testtun0.input=1");
+ SYS(fail, "sysctl -qw net.ipv4.conf.lo.rp_filter=0");
+ return 0;
+fail:
+ return -1;
+}
+
+static int configure_encapsulation(struct subtest_cfg *cfg)
+{
+ int ret;
+
+ ret = tc_prog_attach("veth1", -1, cfg->client_egress_prog_fd);
+
+ return ret;
+}
+
+static int configure_kernel_decapsulation(struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken = open_netns(SERVER_NS);
+ int ret = -1;
+
+ if (!ASSERT_OK_PTR(nstoken, "open server ns"))
+ return ret;
+
+ if (cfg->configure_fou_rx_port &&
+ !ASSERT_OK(add_fou_rx_port(cfg), "configure FOU RX port"))
+ goto fail;
+ SYS(fail, "ip link add name testtun0 type %s %s remote %s local %s %s",
+ cfg->iproute_tun_type, cfg->tmode ? cfg->tmode : "",
+ cfg->tunnel_client_addr, cfg->tunnel_server_addr,
+ cfg->extra_decap_mod_args);
+ if (cfg->tunnel_need_veth_mac &&
+ !ASSERT_OK(update_tunnel_intf_addr(cfg), "update testtun0 mac"))
+ goto fail;
+ if (cfg->configure_mpls &&
+ (!ASSERT_OK(configure_kernel_for_mpls(cfg),
+ "configure MPLS decap")))
+ goto fail;
+ SYS(fail, "sysctl -qw net.ipv4.conf.all.rp_filter=0");
+ SYS(fail, "sysctl -qw net.ipv4.conf.testtun0.rp_filter=0");
+ SYS(fail, "ip link set dev testtun0 up");
+
+ ret = 0;
+fail:
+ close_netns(nstoken);
+ return ret;
+}
+
+static void remove_kernel_decapsulation(struct subtest_cfg *cfg)
+{
+ SYS_NOFAIL("ip link del testtun0");
+ if (cfg->configure_mpls)
+ SYS_NOFAIL("ip -f mpls route del 1000 dev lo");
+ if (cfg->configure_fou_rx_port)
+ del_fou_rx_port(cfg);
+}
+
+static int configure_ebpf_decapsulation(struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken = open_netns(SERVER_NS);
+ int ret = -1;
+
+ if (!ASSERT_OK_PTR(nstoken, "open server ns"))
+ return ret;
+
+ if (!cfg->expect_kern_decap_failure)
+ SYS(fail, "ip link del testtun0");
+
+ if (!ASSERT_OK(tc_prog_attach("veth2", cfg->server_ingress_prog_fd, -1),
+ "attach_program"))
+ goto fail;
+
+ ret = 0;
+fail:
+ close_netns(nstoken);
+ return ret;
+}
+
+static void run_test(struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken;
+
+ if (!ASSERT_OK(run_server(cfg), "run server"))
+ return;
+
+ nstoken = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken, "open client ns"))
+ goto fail;
+
+ /* Basic communication must work */
+ if (!ASSERT_OK(send_and_test_data(cfg, true), "connect without any encap"))
+ goto fail;
+
+ /* Attach encapsulation program to client */
+ if (!ASSERT_OK(configure_encapsulation(cfg), "configure encapsulation"))
+ goto fail;
+
+ /* If supported, insert kernel decap module, connection must succeed */
+ if (!cfg->expect_kern_decap_failure) {
+ if (!ASSERT_OK(configure_kernel_decapsulation(cfg),
+ "configure kernel decapsulation"))
+ goto fail;
+ if (!ASSERT_OK(send_and_test_data(cfg, true),
+ "connect with encap prog and kern decap"))
+ goto fail;
+ }
+
+ /* Replace kernel decapsulation with BPF decapsulation, test must pass */
+ if (!ASSERT_OK(configure_ebpf_decapsulation(cfg), "configure ebpf decapsulation"))
+ goto fail;
+ ASSERT_OK(send_and_test_data(cfg, true), "connect with encap and decap progs");
+
+fail:
+ close_netns(nstoken);
+ close(cfg->server_fd);
+}
+
+static int setup(void)
+{
+ struct nstoken *nstoken_client, *nstoken_server;
+ int fd, err;
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (!ASSERT_OK_FD(fd, "open urandom"))
+ goto fail;
+ err = read(fd, tx_buffer, BUFFER_LEN);
+ close(fd);
+
+ if (!ASSERT_EQ(err, BUFFER_LEN, "read random bytes"))
+ goto fail;
+
+ /* Configure the testing network */
+ if (!ASSERT_OK(make_netns(CLIENT_NS), "create client ns") ||
+ !ASSERT_OK(make_netns(SERVER_NS), "create server ns"))
+ goto fail;
+
+ nstoken_client = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken_client, "open client ns"))
+ goto fail_delete_ns;
+ SYS(fail_close_ns_client, "ip link add %s type veth peer name %s",
+ "veth1 mtu 1500 netns " CLIENT_NS " address " MAC_ADDR_VETH1,
+ "veth2 mtu 1500 netns " SERVER_NS " address " MAC_ADDR_VETH2);
+ SYS(fail_close_ns_client, "ethtool -K veth1 tso off");
+ SYS(fail_close_ns_client, "ip link set veth1 up");
+ nstoken_server = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken_server, "open server ns"))
+ goto fail_close_ns_client;
+ SYS(fail_close_ns_server, "ip link set veth2 up");
+
+ close_netns(nstoken_server);
+ close_netns(nstoken_client);
+ return 0;
+
+fail_close_ns_server:
+ close_netns(nstoken_server);
+fail_close_ns_client:
+ close_netns(nstoken_client);
+fail_delete_ns:
+ SYS_NOFAIL("ip netns del " CLIENT_NS);
+ SYS_NOFAIL("ip netns del " SERVER_NS);
+fail:
+ return -1;
+}
+
+static int subtest_setup(struct test_tc_tunnel *skel, struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken_client, *nstoken_server;
+ int ret = -1;
+
+ set_subtest_addresses(cfg);
+ if (!ASSERT_OK(set_subtest_progs(cfg, skel),
+ "find subtest progs"))
+ goto fail;
+ if (cfg->extra_decap_mod_args_cb)
+ cfg->extra_decap_mod_args_cb(cfg, cfg->extra_decap_mod_args);
+
+ nstoken_client = open_netns(CLIENT_NS);
+ if (!ASSERT_OK_PTR(nstoken_client, "open client ns"))
+ goto fail;
+ SYS(fail_close_client_ns,
+ "ip -4 addr add " IP4_ADDR_VETH1 "/24 dev veth1");
+ SYS(fail_close_client_ns, "ip -4 route flush table main");
+ SYS(fail_close_client_ns,
+ "ip -4 route add " IP4_ADDR_VETH2 " mtu 1450 dev veth1");
+ SYS(fail_close_client_ns,
+ "ip -6 addr add " IP6_ADDR_VETH1 "/64 dev veth1 nodad");
+ SYS(fail_close_client_ns, "ip -6 route flush table main");
+ SYS(fail_close_client_ns,
+ "ip -6 route add " IP6_ADDR_VETH2 " mtu 1430 dev veth1");
+ nstoken_server = open_netns(SERVER_NS);
+ if (!ASSERT_OK_PTR(nstoken_server, "open server ns"))
+ goto fail_close_client_ns;
+ SYS(fail_close_server_ns,
+ "ip -4 addr add " IP4_ADDR_VETH2 "/24 dev veth2");
+ SYS(fail_close_server_ns,
+ "ip -6 addr add " IP6_ADDR_VETH2 "/64 dev veth2 nodad");
+
+ ret = 0;
+
+fail_close_server_ns:
+ close_netns(nstoken_server);
+fail_close_client_ns:
+ close_netns(nstoken_client);
+fail:
+ return ret;
+}
+
+
+static void subtest_cleanup(struct subtest_cfg *cfg)
+{
+ struct nstoken *nstoken;
+
+ nstoken = open_netns(CLIENT_NS);
+ if (ASSERT_OK_PTR(nstoken, "open clien ns")) {
+ SYS_NOFAIL("tc qdisc delete dev veth1 parent ffff:fff1");
+ SYS_NOFAIL("ip a flush veth1");
+ close_netns(nstoken);
+ }
+ nstoken = open_netns(SERVER_NS);
+ if (ASSERT_OK_PTR(nstoken, "open clien ns")) {
+ SYS_NOFAIL("tc qdisc delete dev veth2 parent ffff:fff1");
+ SYS_NOFAIL("ip a flush veth2");
+ if (!cfg->expect_kern_decap_failure)
+ remove_kernel_decapsulation(cfg);
+ close_netns(nstoken);
+ }
+}
+
+static void cleanup(void)
+{
+ remove_netns(CLIENT_NS);
+ remove_netns(SERVER_NS);
+}
+
+static struct subtest_cfg subtests_cfg[] = {
+ {
+ .ebpf_tun_type = "ipip",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ipip",
+ .ipproto = 4,
+ },
+ {
+ .ebpf_tun_type = "ipip6",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ip6tnl",
+ .ipproto = 4,
+ .tunnel_client_addr = IP6_ADDR_VETH1,
+ .tunnel_server_addr = IP6_ADDR_VETH2,
+ },
+ {
+ .ebpf_tun_type = "ip6tnl",
+ .iproute_tun_type = "ip6tnl",
+ .mac_tun_type = "none",
+ .ipproto = 6,
+ },
+ {
+ .mac_tun_type = "none",
+ .ebpf_tun_type = "sit",
+ .iproute_tun_type = "sit",
+ .ipproto = 6,
+ .tunnel_client_addr = IP4_ADDR_VETH1,
+ .tunnel_server_addr = IP4_ADDR_VETH2,
+ },
+ {
+ .ebpf_tun_type = "vxlan",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "vxlan",
+ .ipproto = 4,
+ .extra_decap_mod_args_cb = vxlan_decap_mod_args_cb,
+ .tunnel_need_veth_mac = true
+ },
+ {
+ .ebpf_tun_type = "ip6vxlan",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "vxlan",
+ .ipproto = 6,
+ .extra_decap_mod_args_cb = vxlan_decap_mod_args_cb,
+ .tunnel_need_veth_mac = true
+ },
+ {
+ .ebpf_tun_type = "gre",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "gre",
+ .ipproto = 4,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "gre",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "gretap",
+ .ipproto = 4,
+ .tunnel_need_veth_mac = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "gre",
+ .mac_tun_type = "mpls",
+ .iproute_tun_type = "gre",
+ .ipproto = 4,
+ .configure_mpls = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6gre",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ip6gre",
+ .ipproto = 6,
+ .test_gso = true,
+ },
+ {
+ .ebpf_tun_type = "ip6gre",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "ip6gretap",
+ .ipproto = 6,
+ .tunnel_need_veth_mac = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6gre",
+ .mac_tun_type = "mpls",
+ .iproute_tun_type = "ip6gre",
+ .ipproto = 6,
+ .configure_mpls = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "udp",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ipip",
+ .ipproto = 4,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "udp",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "ipip",
+ .ipproto = 4,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .expect_kern_decap_failure = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "udp",
+ .mac_tun_type = "mpls",
+ .iproute_tun_type = "ipip",
+ .ipproto = 4,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .tmode = "mode any ttl 255",
+ .configure_mpls = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6udp",
+ .mac_tun_type = "none",
+ .iproute_tun_type = "ip6tnl",
+ .ipproto = 6,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6udp",
+ .mac_tun_type = "eth",
+ .iproute_tun_type = "ip6tnl",
+ .ipproto = 6,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .expect_kern_decap_failure = true,
+ .test_gso = true
+ },
+ {
+ .ebpf_tun_type = "ip6udp",
+ .mac_tun_type = "mpls",
+ .iproute_tun_type = "ip6tnl",
+ .ipproto = 6,
+ .extra_decap_mod_args_cb = udp_decap_mod_args_cb,
+ .configure_fou_rx_port = true,
+ .tmode = "mode any ttl 255",
+ .expect_kern_decap_failure = true,
+ .test_gso = true
+ },
+};
+
+void test_tc_tunnel(void)
+{
+ struct test_tc_tunnel *skel;
+ struct subtest_cfg *cfg;
+ int i, ret;
+
+ skel = test_tc_tunnel__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open and load"))
+ return;
+
+ if (!ASSERT_OK(setup(), "global setup"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(subtests_cfg); i++) {
+ cfg = &subtests_cfg[i];
+ ret = build_subtest_name(cfg, cfg->name, TEST_NAME_MAX_LEN);
+ if (ret < 0 || !test__start_subtest(cfg->name))
+ continue;
+ if (subtest_setup(skel, cfg) == 0)
+ run_test(cfg);
+ subtest_cleanup(cfg);
+ }
+ cleanup();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
index bae0e9de277d..eb9309931272 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
@@ -534,85 +534,6 @@ static void ping6_dev1(void)
close_netns(nstoken);
}
-static int attach_tc_prog(int ifindex, int igr_fd, int egr_fd)
-{
- DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = ifindex,
- .attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS);
- DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1,
- .priority = 1, .prog_fd = igr_fd);
- DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1,
- .priority = 1, .prog_fd = egr_fd);
- int ret;
-
- ret = bpf_tc_hook_create(&hook);
- if (!ASSERT_OK(ret, "create tc hook"))
- return ret;
-
- if (igr_fd >= 0) {
- hook.attach_point = BPF_TC_INGRESS;
- ret = bpf_tc_attach(&hook, &opts1);
- if (!ASSERT_OK(ret, "bpf_tc_attach")) {
- bpf_tc_hook_destroy(&hook);
- return ret;
- }
- }
-
- if (egr_fd >= 0) {
- hook.attach_point = BPF_TC_EGRESS;
- ret = bpf_tc_attach(&hook, &opts2);
- if (!ASSERT_OK(ret, "bpf_tc_attach")) {
- bpf_tc_hook_destroy(&hook);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int generic_attach(const char *dev, int igr_fd, int egr_fd)
-{
- int ifindex;
-
- if (!ASSERT_OK_FD(igr_fd, "check ingress fd"))
- return -1;
- if (!ASSERT_OK_FD(egr_fd, "check egress fd"))
- return -1;
-
- ifindex = if_nametoindex(dev);
- if (!ASSERT_NEQ(ifindex, 0, "get ifindex"))
- return -1;
-
- return attach_tc_prog(ifindex, igr_fd, egr_fd);
-}
-
-static int generic_attach_igr(const char *dev, int igr_fd)
-{
- int ifindex;
-
- if (!ASSERT_OK_FD(igr_fd, "check ingress fd"))
- return -1;
-
- ifindex = if_nametoindex(dev);
- if (!ASSERT_NEQ(ifindex, 0, "get ifindex"))
- return -1;
-
- return attach_tc_prog(ifindex, igr_fd, -1);
-}
-
-static int generic_attach_egr(const char *dev, int egr_fd)
-{
- int ifindex;
-
- if (!ASSERT_OK_FD(egr_fd, "check egress fd"))
- return -1;
-
- ifindex = if_nametoindex(dev);
- if (!ASSERT_NEQ(ifindex, 0, "get ifindex"))
- return -1;
-
- return attach_tc_prog(ifindex, -1, egr_fd);
-}
-
static void test_vxlan_tunnel(void)
{
struct test_tunnel_kern *skel = NULL;
@@ -635,12 +556,12 @@ static void test_vxlan_tunnel(void)
goto done;
get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src);
set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src);
- if (generic_attach(VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
+ if (tc_prog_attach(VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
goto done;
/* load and attach bpf prog to veth dev tc hook point */
set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst);
- if (generic_attach_igr("veth1", set_dst_prog_fd))
+ if (tc_prog_attach("veth1", set_dst_prog_fd, -1))
goto done;
/* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
@@ -648,7 +569,7 @@ static void test_vxlan_tunnel(void)
if (!ASSERT_OK_PTR(nstoken, "setns src"))
goto done;
set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst);
- if (generic_attach_egr(VXLAN_TUNL_DEV0, set_dst_prog_fd))
+ if (tc_prog_attach(VXLAN_TUNL_DEV0, -1, set_dst_prog_fd))
goto done;
close_netns(nstoken);
@@ -695,7 +616,7 @@ static void test_ip6vxlan_tunnel(void)
goto done;
get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src);
set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src);
- if (generic_attach(IP6VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
+ if (tc_prog_attach(IP6VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
goto done;
/* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
@@ -703,7 +624,7 @@ static void test_ip6vxlan_tunnel(void)
if (!ASSERT_OK_PTR(nstoken, "setns src"))
goto done;
set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst);
- if (generic_attach_egr(IP6VXLAN_TUNL_DEV0, set_dst_prog_fd))
+ if (tc_prog_attach(IP6VXLAN_TUNL_DEV0, -1, set_dst_prog_fd))
goto done;
close_netns(nstoken);
@@ -764,7 +685,7 @@ static void test_ipip_tunnel(enum ipip_encap encap)
skel->progs.ipip_set_tunnel);
}
- if (generic_attach(IPIP_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
+ if (tc_prog_attach(IPIP_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd))
goto done;
ping_dev0();
@@ -797,7 +718,7 @@ static void test_xfrm_tunnel(void)
/* attach tc prog to tunnel dev */
tc_prog_fd = bpf_program__fd(skel->progs.xfrm_get_state);
- if (generic_attach_igr("veth1", tc_prog_fd))
+ if (tc_prog_attach("veth1", tc_prog_fd, -1))
goto done;
/* attach xdp prog to tunnel dev */
@@ -870,7 +791,7 @@ static void test_gre_tunnel(enum gre_test test)
if (!ASSERT_OK(err, "add tunnel"))
goto done;
- if (generic_attach(GRE_TUNL_DEV1, get_fd, set_fd))
+ if (tc_prog_attach(GRE_TUNL_DEV1, get_fd, set_fd))
goto done;
ping_dev0();
@@ -911,7 +832,7 @@ static void test_ip6gre_tunnel(enum ip6gre_test test)
set_fd = bpf_program__fd(skel->progs.ip6gretap_set_tunnel);
get_fd = bpf_program__fd(skel->progs.ip6gretap_get_tunnel);
- if (generic_attach(IP6GRE_TUNL_DEV1, get_fd, set_fd))
+ if (tc_prog_attach(IP6GRE_TUNL_DEV1, get_fd, set_fd))
goto done;
ping6_veth0();
@@ -954,7 +875,7 @@ static void test_erspan_tunnel(enum erspan_test test)
set_fd = bpf_program__fd(skel->progs.erspan_set_tunnel);
get_fd = bpf_program__fd(skel->progs.erspan_get_tunnel);
- if (generic_attach(ERSPAN_TUNL_DEV1, get_fd, set_fd))
+ if (tc_prog_attach(ERSPAN_TUNL_DEV1, get_fd, set_fd))
goto done;
ping_dev0();
@@ -990,7 +911,7 @@ static void test_ip6erspan_tunnel(enum erspan_test test)
set_fd = bpf_program__fd(skel->progs.ip4ip6erspan_set_tunnel);
get_fd = bpf_program__fd(skel->progs.ip4ip6erspan_get_tunnel);
- if (generic_attach(IP6ERSPAN_TUNL_DEV1, get_fd, set_fd))
+ if (tc_prog_attach(IP6ERSPAN_TUNL_DEV1, get_fd, set_fd))
goto done;
ping6_veth0();
@@ -1017,7 +938,7 @@ static void test_geneve_tunnel(void)
set_fd = bpf_program__fd(skel->progs.geneve_set_tunnel);
get_fd = bpf_program__fd(skel->progs.geneve_get_tunnel);
- if (generic_attach(GENEVE_TUNL_DEV1, get_fd, set_fd))
+ if (tc_prog_attach(GENEVE_TUNL_DEV1, get_fd, set_fd))
goto done;
ping_dev0();
@@ -1044,7 +965,7 @@ static void test_ip6geneve_tunnel(void)
set_fd = bpf_program__fd(skel->progs.ip6geneve_set_tunnel);
get_fd = bpf_program__fd(skel->progs.ip6geneve_get_tunnel);
- if (generic_attach(IP6GENEVE_TUNL_DEV1, get_fd, set_fd))
+ if (tc_prog_attach(IP6GENEVE_TUNL_DEV1, get_fd, set_fd))
goto done;
ping_dev0();
@@ -1083,7 +1004,7 @@ static void test_ip6tnl_tunnel(enum ip6tnl_test test)
get_fd = bpf_program__fd(skel->progs.ip6ip6_get_tunnel);
break;
}
- if (generic_attach(IP6TNL_TUNL_DEV1, get_fd, set_fd))
+ if (tc_prog_attach(IP6TNL_TUNL_DEV1, get_fd, set_fd))
goto done;
ping6_veth0();
diff --git a/tools/testing/selftests/bpf/prog_tests/test_veristat.c b/tools/testing/selftests/bpf/prog_tests/test_veristat.c
index 47b56c258f3f..b38c16b4247f 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_veristat.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_veristat.c
@@ -60,29 +60,41 @@ static void test_set_global_vars_succeeds(void)
" -G \"var_s8 = -128\" "\
" -G \"var_u8 = 255\" "\
" -G \"var_ea = EA2\" "\
- " -G \"var_eb = EB2\" "\
- " -G \"var_ec = EC2\" "\
+ " -G \"var_eb = EB2\" "\
+ " -G \"var_ec=EC2\" "\
" -G \"var_b = 1\" "\
- " -G \"struct1.struct2.u.var_u8 = 170\" "\
+ " -G \"struct1[2].struct2[1][2].u.var_u8[2]=170\" "\
" -G \"union1.struct3.var_u8_l = 0xaa\" "\
" -G \"union1.struct3.var_u8_h = 0xaa\" "\
- "-vl2 > %s", fix->veristat, fix->tmpfile);
+ " -G \"arr[3]= 171\" " \
+ " -G \"arr[EA2] =172\" " \
+ " -G \"enum_arr[EC2]=EA3\" " \
+ " -G \"three_d[31][7][EA2]=173\"" \
+ " -G \"struct1[2].struct2[1][2].u.mat[5][3]=174\" " \
+ " -G \"struct11 [ 7 ] [ 5 ] .struct2[0][1].u.mat[3][0] = 175\" " \
+ " -vl2 > %s", fix->veristat, fix->tmpfile);
read(fix->fd, fix->output, fix->sz);
- __CHECK_STR("_w=0xf000000000000001 ", "var_s64 = 0xf000000000000001");
- __CHECK_STR("_w=0xfedcba9876543210 ", "var_u64 = 0xfedcba9876543210");
- __CHECK_STR("_w=0x80000000 ", "var_s32 = -0x80000000");
- __CHECK_STR("_w=0x76543210 ", "var_u32 = 0x76543210");
- __CHECK_STR("_w=0x8000 ", "var_s16 = -32768");
- __CHECK_STR("_w=0xecec ", "var_u16 = 60652");
- __CHECK_STR("_w=128 ", "var_s8 = -128");
- __CHECK_STR("_w=255 ", "var_u8 = 255");
- __CHECK_STR("_w=11 ", "var_ea = EA2");
- __CHECK_STR("_w=12 ", "var_eb = EB2");
- __CHECK_STR("_w=13 ", "var_ec = EC2");
- __CHECK_STR("_w=1 ", "var_b = 1");
- __CHECK_STR("_w=170 ", "struct1.struct2.u.var_u8 = 170");
- __CHECK_STR("_w=0xaaaa ", "union1.var_u16 = 0xaaaa");
+ __CHECK_STR("=0xf000000000000001 ", "var_s64 = 0xf000000000000001");
+ __CHECK_STR("=0xfedcba9876543210 ", "var_u64 = 0xfedcba9876543210");
+ __CHECK_STR("=0x80000000 ", "var_s32 = -0x80000000");
+ __CHECK_STR("=0x76543210 ", "var_u32 = 0x76543210");
+ __CHECK_STR("=0x8000 ", "var_s16 = -32768");
+ __CHECK_STR("=0xecec ", "var_u16 = 60652");
+ __CHECK_STR("=128 ", "var_s8 = -128");
+ __CHECK_STR("=255 ", "var_u8 = 255");
+ __CHECK_STR("=11 ", "var_ea = EA2");
+ __CHECK_STR("=12 ", "var_eb = EB2");
+ __CHECK_STR("=13 ", "var_ec = EC2");
+ __CHECK_STR("=1 ", "var_b = 1");
+ __CHECK_STR("=170 ", "struct1[2].struct2[1][2].u.var_u8[2]=170");
+ __CHECK_STR("=0xaaaa ", "union1.var_u16 = 0xaaaa");
+ __CHECK_STR("=171 ", "arr[3]= 171");
+ __CHECK_STR("=172 ", "arr[EA2] =172");
+ __CHECK_STR("=10 ", "enum_arr[EC2]=EA3");
+ __CHECK_STR("=173 ", "matrix[31][7][11]=173");
+ __CHECK_STR("=174 ", "struct1[2].struct2[1][2].u.mat[5][3]=174");
+ __CHECK_STR("=175 ", "struct11[7][5].struct2[0][1].u.mat[3][0]=175");
out:
teardown_fixture(fix);
@@ -105,8 +117,8 @@ static void test_set_global_vars_from_file_succeeds(void)
SYS(out, "%s set_global_vars.bpf.o -G \"@%s\" -vl2 > %s",
fix->veristat, input_file, fix->tmpfile);
read(fix->fd, fix->output, fix->sz);
- __CHECK_STR("_w=0x8000 ", "var_s16 = -32768");
- __CHECK_STR("_w=0xecec ", "var_u16 = 60652");
+ __CHECK_STR("=0x8000 ", "var_s16 = -32768");
+ __CHECK_STR("=0xecec ", "var_u16 = 60652");
out:
close(fd);
@@ -129,6 +141,95 @@ out:
teardown_fixture(fix);
}
+static void test_unsupported_ptr_array_type(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"ptr_arr[0] = 0\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("Can't set ptr_arr[0]. Only ints and enums are supported", "ptr_arr");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_array_out_of_bounds(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"arr[99] = 0\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("Array index 99 is out of bounds", "arr[99]");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_array_index_not_found(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"arr[EG2] = 0\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ read(fix->fd, fix->output, fix->sz);
+ __CHECK_STR("Can't resolve enum value EG2", "arr[EG2]");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_array_index_for_non_array(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"var_b[0] = 1\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ pread(fix->fd, fix->output, fix->sz, 0);
+ __CHECK_STR("Array index is not expected for var_b", "var_b[0] = 1");
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"union1.struct3[0].var_u8_l=1\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ pread(fix->fd, fix->output, fix->sz, 0);
+ __CHECK_STR("Array index is not expected for struct3", "union1.struct3[0].var_u8_l=1");
+
+out:
+ teardown_fixture(fix);
+}
+
+static void test_no_array_index_for_array(void)
+{
+ struct fixture *fix = init_fixture();
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"arr = 1\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ pread(fix->fd, fix->output, fix->sz, 0);
+ __CHECK_STR("Can't set arr. Only ints and enums are supported", "arr = 1");
+
+ SYS_FAIL(out,
+ "%s set_global_vars.bpf.o -G \"struct1[0].struct2.u.var_u8[2]=1\" -vl2 2> %s",
+ fix->veristat, fix->tmpfile);
+
+ pread(fix->fd, fix->output, fix->sz, 0);
+ __CHECK_STR("Can't resolve field u for non-composite type", "struct1[0].struct2.u.var_u8[2]=1");
+
+out:
+ teardown_fixture(fix);
+}
+
void test_veristat(void)
{
if (test__start_subtest("set_global_vars_succeeds"))
@@ -139,6 +240,22 @@ void test_veristat(void)
if (test__start_subtest("set_global_vars_from_file_succeeds"))
test_set_global_vars_from_file_succeeds();
+
+ if (test__start_subtest("test_unsupported_ptr_array_type"))
+ test_unsupported_ptr_array_type();
+
+ if (test__start_subtest("test_array_out_of_bounds"))
+ test_array_out_of_bounds();
+
+ if (test__start_subtest("test_array_index_not_found"))
+ test_array_index_not_found();
+
+ if (test__start_subtest("test_array_index_for_non_array"))
+ test_array_index_for_non_array();
+
+ if (test__start_subtest("test_no_array_index_for_array"))
+ test_no_array_index_for_array();
+
}
#undef __CHECK_STR
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.c b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
new file mode 100644
index 000000000000..5af28f359cfd
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.c
@@ -0,0 +1,2596 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/bpf.h>
+#include <errno.h>
+#include <linux/bitmap.h>
+#include <linux/if_link.h>
+#include <linux/mman.h>
+#include <linux/netdev.h>
+#include <poll.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "network_helpers.h"
+#include "test_xsk.h"
+#include "xsk_xdp_common.h"
+#include "xsk_xdp_progs.skel.h"
+
+#define DEFAULT_BATCH_SIZE 64
+#define MIN_PKT_SIZE 64
+#define MAX_ETH_JUMBO_SIZE 9000
+#define MAX_INTERFACES 2
+#define MAX_TEARDOWN_ITER 10
+#define MAX_TX_BUDGET_DEFAULT 32
+#define PKT_DUMP_NB_TO_PRINT 16
+/* Just to align the data in the packet */
+#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2)
+#define POLL_TMOUT 1000
+#define THREAD_TMOUT 3
+#define UMEM_HEADROOM_TEST_SIZE 128
+#define XSK_DESC__INVALID_OPTION (0xffff)
+#define XSK_UMEM__INVALID_FRAME_SIZE (MAX_ETH_JUMBO_SIZE + 1)
+#define XSK_UMEM__LARGE_FRAME_SIZE (3 * 1024)
+#define XSK_UMEM__MAX_FRAME_SIZE (4 * 1024)
+
+static const u8 g_mac[ETH_ALEN] = {0x55, 0x44, 0x33, 0x22, 0x11, 0x00};
+
+bool opt_verbose;
+pthread_barrier_t barr;
+pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int pkts_in_flight;
+
+/* The payload is a word consisting of a packet sequence number in the upper
+ * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
+ * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
+ */
+static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
+{
+ u32 *ptr = (u32 *)dest, i;
+
+ start /= sizeof(*ptr);
+ size /= sizeof(*ptr);
+ for (i = 0; i < size; i++)
+ ptr[i] = htonl(pkt_nb << 16 | (i + start));
+}
+
+static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
+{
+ memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
+ memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
+ eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
+}
+
+static bool is_umem_valid(struct ifobject *ifobj)
+{
+ return !!ifobj->umem->umem;
+}
+
+static u32 mode_to_xdp_flags(enum test_mode mode)
+{
+ return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
+}
+
+static u64 umem_size(struct xsk_umem_info *umem)
+{
+ return umem->num_frames * umem->frame_size;
+}
+
+int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
+ u64 size)
+{
+ struct xsk_umem_config cfg = {
+ .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ .frame_size = umem->frame_size,
+ .frame_headroom = umem->frame_headroom,
+ .flags = XSK_UMEM__DEFAULT_FLAGS
+ };
+ int ret;
+
+ if (umem->fill_size)
+ cfg.fill_size = umem->fill_size;
+
+ if (umem->comp_size)
+ cfg.comp_size = umem->comp_size;
+
+ if (umem->unaligned_mode)
+ cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+
+ ret = xsk_umem__create(&umem->umem, buffer, size,
+ &umem->fq, &umem->cq, &cfg);
+ if (ret)
+ return ret;
+
+ umem->buffer = buffer;
+ if (ifobj->shared_umem && ifobj->rx_on) {
+ umem->base_addr = umem_size(umem);
+ umem->next_buffer = umem_size(umem);
+ }
+
+ return 0;
+}
+
+static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
+{
+ u64 addr;
+
+ addr = umem->next_buffer;
+ umem->next_buffer += umem->frame_size;
+ if (umem->next_buffer >= umem->base_addr + umem_size(umem))
+ umem->next_buffer = umem->base_addr;
+
+ return addr;
+}
+
+static void umem_reset_alloc(struct xsk_umem_info *umem)
+{
+ umem->next_buffer = 0;
+}
+
+static int enable_busy_poll(struct xsk_socket_info *xsk)
+{
+ int sock_opt;
+
+ sock_opt = 1;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ return -errno;
+
+ sock_opt = 20;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ return -errno;
+
+ sock_opt = xsk->batch_size;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ return -errno;
+
+ return 0;
+}
+
+int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
+ struct ifobject *ifobject, bool shared)
+{
+ struct xsk_socket_config cfg = {};
+ struct xsk_ring_cons *rxr;
+ struct xsk_ring_prod *txr;
+
+ xsk->umem = umem;
+ cfg.rx_size = xsk->rxqsize;
+ cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg.bind_flags = ifobject->bind_flags;
+ if (shared)
+ cfg.bind_flags |= XDP_SHARED_UMEM;
+ if (ifobject->mtu > MAX_ETH_PKT_SIZE)
+ cfg.bind_flags |= XDP_USE_SG;
+ if (umem->comp_size)
+ cfg.tx_size = umem->comp_size;
+ if (umem->fill_size)
+ cfg.rx_size = umem->fill_size;
+
+ txr = ifobject->tx_on ? &xsk->tx : NULL;
+ rxr = ifobject->rx_on ? &xsk->rx : NULL;
+ return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
+}
+
+#define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags"
+static unsigned int get_max_skb_frags(void)
+{
+ unsigned int max_skb_frags = 0;
+ FILE *file;
+
+ file = fopen(MAX_SKB_FRAGS_PATH, "r");
+ if (!file) {
+ ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH);
+ return 0;
+ }
+
+ if (fscanf(file, "%u", &max_skb_frags) != 1)
+ ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH);
+
+ fclose(file);
+ return max_skb_frags;
+}
+
+static int set_ring_size(struct ifobject *ifobj)
+{
+ int ret;
+ u32 ctr = 0;
+
+ while (ctr++ < SOCK_RECONF_CTR) {
+ ret = set_hw_ring_size(ifobj->ifname, &ifobj->ring);
+ if (!ret)
+ break;
+
+ /* Retry if it fails */
+ if (ctr >= SOCK_RECONF_CTR || errno != EBUSY)
+ return -errno;
+
+ usleep(USLEEP_MAX);
+ }
+
+ return ret;
+}
+
+int hw_ring_size_reset(struct ifobject *ifobj)
+{
+ ifobj->ring.tx_pending = ifobj->set_ring.default_tx;
+ ifobj->ring.rx_pending = ifobj->set_ring.default_rx;
+ return set_ring_size(ifobj);
+}
+
+static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
+ struct ifobject *ifobj_rx)
+{
+ u32 i, j;
+
+ for (i = 0; i < MAX_INTERFACES; i++) {
+ struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
+
+ ifobj->xsk = &ifobj->xsk_arr[0];
+ ifobj->use_poll = false;
+ ifobj->use_fill_ring = true;
+ ifobj->release_rx = true;
+ ifobj->validation_func = NULL;
+ ifobj->use_metadata = false;
+
+ if (i == 0) {
+ ifobj->rx_on = false;
+ ifobj->tx_on = true;
+ } else {
+ ifobj->rx_on = true;
+ ifobj->tx_on = false;
+ }
+
+ memset(ifobj->umem, 0, sizeof(*ifobj->umem));
+ ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
+ ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
+
+ for (j = 0; j < MAX_SOCKETS; j++) {
+ memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
+ ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
+ if (i == 0)
+ ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
+ else
+ ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
+
+ memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
+ memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
+ ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
+ ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
+ }
+ }
+
+ if (ifobj_tx->hw_ring_size_supp)
+ hw_ring_size_reset(ifobj_tx);
+
+ test->ifobj_tx = ifobj_tx;
+ test->ifobj_rx = ifobj_rx;
+ test->current_step = 0;
+ test->total_steps = 1;
+ test->nb_sockets = 1;
+ test->fail = false;
+ test->set_ring = false;
+ test->adjust_tail = false;
+ test->adjust_tail_support = false;
+ test->mtu = MAX_ETH_PKT_SIZE;
+ test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
+ test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
+ test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
+ test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
+}
+
+void test_init(struct test_spec *test, struct ifobject *ifobj_tx,
+ struct ifobject *ifobj_rx, enum test_mode mode,
+ const struct test_spec *test_to_run)
+{
+ struct pkt_stream *tx_pkt_stream;
+ struct pkt_stream *rx_pkt_stream;
+ u32 i;
+
+ tx_pkt_stream = test->tx_pkt_stream_default;
+ rx_pkt_stream = test->rx_pkt_stream_default;
+ memset(test, 0, sizeof(*test));
+ test->tx_pkt_stream_default = tx_pkt_stream;
+ test->rx_pkt_stream_default = rx_pkt_stream;
+
+ for (i = 0; i < MAX_INTERFACES; i++) {
+ struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
+
+ ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
+ if (mode == TEST_MODE_ZC)
+ ifobj->bind_flags |= XDP_ZEROCOPY;
+ else
+ ifobj->bind_flags |= XDP_COPY;
+ }
+
+ memcpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
+ test->test_func = test_to_run->test_func;
+ test->mode = mode;
+ __test_spec_init(test, ifobj_tx, ifobj_rx);
+}
+
+static void test_spec_reset(struct test_spec *test)
+{
+ __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
+}
+
+static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
+ struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
+ struct bpf_map *xskmap_tx)
+{
+ test->xdp_prog_rx = xdp_prog_rx;
+ test->xdp_prog_tx = xdp_prog_tx;
+ test->xskmap_rx = xskmap_rx;
+ test->xskmap_tx = xskmap_tx;
+}
+
+static int test_spec_set_mtu(struct test_spec *test, int mtu)
+{
+ int err;
+
+ if (test->ifobj_rx->mtu != mtu) {
+ err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu);
+ if (err)
+ return err;
+ test->ifobj_rx->mtu = mtu;
+ }
+ if (test->ifobj_tx->mtu != mtu) {
+ err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu);
+ if (err)
+ return err;
+ test->ifobj_tx->mtu = mtu;
+ }
+
+ return 0;
+}
+
+void pkt_stream_reset(struct pkt_stream *pkt_stream)
+{
+ if (pkt_stream) {
+ pkt_stream->current_pkt_nb = 0;
+ pkt_stream->nb_rx_pkts = 0;
+ }
+}
+
+static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
+{
+ if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
+ return NULL;
+
+ return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
+}
+
+static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
+{
+ while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
+ (*pkts_sent)++;
+ if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
+ return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
+ pkt_stream->current_pkt_nb++;
+ }
+ return NULL;
+}
+
+void pkt_stream_delete(struct pkt_stream *pkt_stream)
+{
+ free(pkt_stream->pkts);
+ free(pkt_stream);
+}
+
+void pkt_stream_restore_default(struct test_spec *test)
+{
+ struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
+ struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
+
+ if (tx_pkt_stream != test->tx_pkt_stream_default) {
+ pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
+ test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
+ }
+
+ if (rx_pkt_stream != test->rx_pkt_stream_default) {
+ pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
+ test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
+ }
+}
+
+static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
+{
+ struct pkt_stream *pkt_stream;
+
+ pkt_stream = calloc(1, sizeof(*pkt_stream));
+ if (!pkt_stream)
+ return NULL;
+
+ pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
+ if (!pkt_stream->pkts) {
+ free(pkt_stream);
+ return NULL;
+ }
+
+ pkt_stream->nb_pkts = nb_pkts;
+ return pkt_stream;
+}
+
+static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt)
+{
+ u32 nb_frags = 1, next_frag;
+
+ if (!pkt)
+ return 1;
+
+ if (!pkt_stream->verbatim) {
+ if (!pkt->valid || !pkt->len)
+ return 1;
+ return ceil_u32(pkt->len, frame_size);
+ }
+
+ /* Search for the end of the packet in verbatim mode */
+ if (!pkt_continues(pkt->options))
+ return nb_frags;
+
+ next_frag = pkt_stream->current_pkt_nb;
+ pkt++;
+ while (next_frag++ < pkt_stream->nb_pkts) {
+ nb_frags++;
+ if (!pkt_continues(pkt->options) || !pkt->valid)
+ break;
+ pkt++;
+ }
+ return nb_frags;
+}
+
+static bool set_pkt_valid(int offset, u32 len)
+{
+ return len <= MAX_ETH_JUMBO_SIZE;
+}
+
+static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
+{
+ pkt->offset = offset;
+ pkt->len = len;
+ pkt->valid = set_pkt_valid(offset, len);
+}
+
+static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
+{
+ bool prev_pkt_valid = pkt->valid;
+
+ pkt_set(pkt_stream, pkt, offset, len);
+ pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid;
+}
+
+static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
+{
+ return ceil_u32(len, umem->frame_size) * umem->frame_size;
+}
+
+static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
+{
+ struct pkt_stream *pkt_stream;
+ u32 i;
+
+ pkt_stream = __pkt_stream_alloc(nb_pkts);
+ if (!pkt_stream)
+ return NULL;
+
+ pkt_stream->nb_pkts = nb_pkts;
+ pkt_stream->max_pkt_len = pkt_len;
+ for (i = 0; i < nb_pkts; i++) {
+ struct pkt *pkt = &pkt_stream->pkts[i];
+
+ pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len);
+ pkt->pkt_nb = nb_start + i * nb_off;
+ }
+
+ return pkt_stream;
+}
+
+struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
+{
+ return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
+}
+
+static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
+{
+ return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
+}
+
+static int pkt_stream_replace_ifobject(struct ifobject *ifobj, u32 nb_pkts, u32 pkt_len)
+{
+ ifobj->xsk->pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
+
+ if (!ifobj->xsk->pkt_stream)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
+{
+ int ret;
+
+ ret = pkt_stream_replace_ifobject(test->ifobj_tx, nb_pkts, pkt_len);
+ if (ret)
+ return ret;
+
+ return pkt_stream_replace_ifobject(test->ifobj_rx, nb_pkts, pkt_len);
+}
+
+static int __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
+ int offset)
+{
+ struct pkt_stream *pkt_stream;
+ u32 i;
+
+ pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
+ if (!pkt_stream)
+ return -ENOMEM;
+
+ for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
+ pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
+
+ ifobj->xsk->pkt_stream = pkt_stream;
+
+ return 0;
+}
+
+static int pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
+{
+ int ret = __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
+
+ if (ret)
+ return ret;
+
+ return __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
+}
+
+static int pkt_stream_receive_half(struct test_spec *test)
+{
+ struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
+ u32 i;
+
+ if (test->ifobj_rx->xsk->pkt_stream != test->rx_pkt_stream_default)
+ /* Packet stream has already been replaced so we have to release this one.
+ * The newly created one will be freed by the restore_default() at the
+ * end of the test
+ */
+ pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
+
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
+ pkt_stream->pkts[0].len);
+ if (!test->ifobj_rx->xsk->pkt_stream)
+ return -ENOMEM;
+
+ pkt_stream = test->ifobj_rx->xsk->pkt_stream;
+ for (i = 1; i < pkt_stream->nb_pkts; i += 2)
+ pkt_stream->pkts[i].valid = false;
+
+ pkt_stream->nb_valid_entries /= 2;
+
+ return 0;
+}
+
+static int pkt_stream_even_odd_sequence(struct test_spec *test)
+{
+ struct pkt_stream *pkt_stream;
+ u32 i;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
+ pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
+ pkt_stream->pkts[0].len, i, 2);
+ if (!pkt_stream)
+ return -ENOMEM;
+ test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
+
+ pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
+ pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
+ pkt_stream->pkts[0].len, i, 2);
+ if (!pkt_stream)
+ return -ENOMEM;
+ test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
+ }
+
+ return 0;
+}
+
+static void release_even_odd_sequence(struct test_spec *test)
+{
+ struct pkt_stream *later_free_tx = test->ifobj_tx->xsk->pkt_stream;
+ struct pkt_stream *later_free_rx = test->ifobj_rx->xsk->pkt_stream;
+ int i;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ /* later_free_{rx/tx} will be freed by restore_default() */
+ if (test->ifobj_tx->xsk_arr[i].pkt_stream != later_free_tx)
+ pkt_stream_delete(test->ifobj_tx->xsk_arr[i].pkt_stream);
+ if (test->ifobj_rx->xsk_arr[i].pkt_stream != later_free_rx)
+ pkt_stream_delete(test->ifobj_rx->xsk_arr[i].pkt_stream);
+ }
+
+}
+
+static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
+{
+ if (!pkt->valid)
+ return pkt->offset;
+ return pkt->offset + umem_alloc_buffer(umem);
+}
+
+static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
+{
+ pkt_stream->current_pkt_nb--;
+}
+
+static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
+ u32 pkt_nb, u32 bytes_written)
+{
+ void *data = xsk_umem__get_data(umem->buffer, addr);
+
+ if (len < MIN_PKT_SIZE)
+ return;
+
+ if (!bytes_written) {
+ gen_eth_hdr(xsk, data);
+
+ len -= PKT_HDR_SIZE;
+ data += PKT_HDR_SIZE;
+ } else {
+ bytes_written -= PKT_HDR_SIZE;
+ }
+
+ write_payload(data, pkt_nb, bytes_written, len);
+}
+
+static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames,
+ u32 nb_frames, bool verbatim)
+{
+ u32 i, len = 0, pkt_nb = 0, payload = 0;
+ struct pkt_stream *pkt_stream;
+
+ pkt_stream = __pkt_stream_alloc(nb_frames);
+ if (!pkt_stream)
+ return NULL;
+
+ for (i = 0; i < nb_frames; i++) {
+ struct pkt *pkt = &pkt_stream->pkts[pkt_nb];
+ struct pkt *frame = &frames[i];
+
+ pkt->offset = frame->offset;
+ if (verbatim) {
+ *pkt = *frame;
+ pkt->pkt_nb = payload;
+ if (!frame->valid || !pkt_continues(frame->options))
+ payload++;
+ } else {
+ if (frame->valid)
+ len += frame->len;
+ if (frame->valid && pkt_continues(frame->options))
+ continue;
+
+ pkt->pkt_nb = pkt_nb;
+ pkt->len = len;
+ pkt->valid = frame->valid;
+ pkt->options = 0;
+
+ len = 0;
+ }
+
+ print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
+ pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
+
+ if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
+ pkt_stream->max_pkt_len = pkt->len;
+
+ if (pkt->valid)
+ pkt_stream->nb_valid_entries++;
+
+ pkt_nb++;
+ }
+
+ pkt_stream->nb_pkts = pkt_nb;
+ pkt_stream->verbatim = verbatim;
+ return pkt_stream;
+}
+
+static int pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
+{
+ struct pkt_stream *pkt_stream;
+
+ pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
+ if (!pkt_stream)
+ return -ENOMEM;
+ test->ifobj_tx->xsk->pkt_stream = pkt_stream;
+
+ pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
+ if (!pkt_stream)
+ return -ENOMEM;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream;
+
+ return 0;
+}
+
+static void pkt_print_data(u32 *data, u32 cnt)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++) {
+ u32 seqnum, pkt_nb;
+
+ seqnum = ntohl(*data) & 0xffff;
+ pkt_nb = ntohl(*data) >> 16;
+ ksft_print_msg("%u:%u ", pkt_nb, seqnum);
+ data++;
+ }
+}
+
+static void pkt_dump(void *pkt, u32 len, bool eth_header)
+{
+ struct ethhdr *ethhdr = pkt;
+ u32 i, *data;
+
+ if (eth_header) {
+ /*extract L2 frame */
+ ksft_print_msg("DEBUG>> L2: dst mac: ");
+ for (i = 0; i < ETH_ALEN; i++)
+ ksft_print_msg("%02X", ethhdr->h_dest[i]);
+
+ ksft_print_msg("\nDEBUG>> L2: src mac: ");
+ for (i = 0; i < ETH_ALEN; i++)
+ ksft_print_msg("%02X", ethhdr->h_source[i]);
+
+ data = pkt + PKT_HDR_SIZE;
+ } else {
+ data = pkt;
+ }
+
+ /*extract L5 frame */
+ ksft_print_msg("\nDEBUG>> L5: seqnum: ");
+ pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
+ ksft_print_msg("....");
+ if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
+ ksft_print_msg("\n.... ");
+ pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
+ PKT_DUMP_NB_TO_PRINT);
+ }
+ ksft_print_msg("\n---------------------------------------\n");
+}
+
+static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
+{
+ u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
+ u32 offset = addr % umem->frame_size, expected_offset;
+ int pkt_offset = pkt->valid ? pkt->offset : 0;
+
+ if (!umem->unaligned_mode)
+ pkt_offset = 0;
+
+ expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
+
+ if (offset == expected_offset)
+ return true;
+
+ ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
+ return false;
+}
+
+static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
+{
+ void *data = xsk_umem__get_data(buffer, addr);
+ struct xdp_info *meta = data - sizeof(struct xdp_info);
+
+ if (meta->count != pkt->pkt_nb) {
+ ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
+ __func__, pkt->pkt_nb,
+ (unsigned long long)meta->count);
+ return false;
+ }
+
+ return true;
+}
+
+static int is_adjust_tail_supported(struct xsk_xdp_progs *skel_rx, bool *supported)
+{
+ struct bpf_map *data_map;
+ int adjust_value = 0;
+ int key = 0;
+ int ret;
+
+ data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
+ if (!data_map || !bpf_map__is_internal(data_map)) {
+ ksft_print_msg("Error: could not find bss section of XDP program\n");
+ return -EINVAL;
+ }
+
+ ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &key, &adjust_value);
+ if (ret) {
+ ksft_print_msg("Error: bpf_map_lookup_elem failed with error %d\n", ret);
+ return ret;
+ }
+
+ /* Set the 'adjust_value' variable to -EOPNOTSUPP in the XDP program if the adjust_tail
+ * helper is not supported. Skip the adjust_tail test case in this scenario.
+ */
+ *supported = adjust_value != -EOPNOTSUPP;
+
+ return 0;
+}
+
+static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
+ u32 bytes_processed)
+{
+ u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
+ void *data = xsk_umem__get_data(umem->buffer, addr);
+
+ addr -= umem->base_addr;
+
+ if (addr >= umem->num_frames * umem->frame_size ||
+ addr + len > umem->num_frames * umem->frame_size) {
+ ksft_print_msg("Frag invalid addr: %llx len: %u\n",
+ (unsigned long long)addr, len);
+ return false;
+ }
+ if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
+ ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
+ (unsigned long long)addr, len);
+ return false;
+ }
+
+ pkt_data = data;
+ if (!bytes_processed) {
+ pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
+ len -= PKT_HDR_SIZE;
+ } else {
+ bytes_processed -= PKT_HDR_SIZE;
+ }
+
+ expected_seqnum = bytes_processed / sizeof(*pkt_data);
+ seqnum = ntohl(*pkt_data) & 0xffff;
+ pkt_nb = ntohl(*pkt_data) >> 16;
+
+ if (expected_pkt_nb != pkt_nb) {
+ ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
+ __func__, expected_pkt_nb, pkt_nb);
+ goto error;
+ }
+ if (expected_seqnum != seqnum) {
+ ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
+ __func__, expected_seqnum, seqnum);
+ goto error;
+ }
+
+ words_to_end = len / sizeof(*pkt_data) - 1;
+ pkt_data += words_to_end;
+ seqnum = ntohl(*pkt_data) & 0xffff;
+ expected_seqnum += words_to_end;
+ if (expected_seqnum != seqnum) {
+ ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
+ __func__, expected_seqnum, seqnum);
+ goto error;
+ }
+
+ return true;
+
+error:
+ pkt_dump(data, len, !bytes_processed);
+ return false;
+}
+
+static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
+{
+ if (pkt->len != len) {
+ ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
+ __func__, pkt->len, len);
+ pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
+ return false;
+ }
+
+ return true;
+}
+
+static u32 load_value(u32 *counter)
+{
+ return __atomic_load_n(counter, __ATOMIC_ACQUIRE);
+}
+
+static bool kick_tx_with_check(struct xsk_socket_info *xsk, int *ret)
+{
+ u32 max_budget = MAX_TX_BUDGET_DEFAULT;
+ u32 cons, ready_to_send;
+ int delta;
+
+ cons = load_value(xsk->tx.consumer);
+ ready_to_send = load_value(xsk->tx.producer) - cons;
+ *ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
+
+ delta = load_value(xsk->tx.consumer) - cons;
+ /* By default, xsk should consume exact @max_budget descs at one
+ * send in this case where hitting the max budget limit in while
+ * loop is triggered in __xsk_generic_xmit(). Please make sure that
+ * the number of descs to be sent is larger than @max_budget, or
+ * else the tx.consumer will be updated in xskq_cons_peek_desc()
+ * in time which hides the issue we try to verify.
+ */
+ if (ready_to_send > max_budget && delta != max_budget)
+ return false;
+
+ return true;
+}
+
+int kick_tx(struct xsk_socket_info *xsk)
+{
+ int ret;
+
+ if (xsk->check_consumer) {
+ if (!kick_tx_with_check(xsk, &ret))
+ return TEST_FAILURE;
+ } else {
+ ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
+ }
+ if (ret >= 0)
+ return TEST_PASS;
+ if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
+ usleep(100);
+ return TEST_PASS;
+ }
+ return TEST_FAILURE;
+}
+
+int kick_rx(struct xsk_socket_info *xsk)
+{
+ int ret;
+
+ ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
+ if (ret < 0)
+ return TEST_FAILURE;
+
+ return TEST_PASS;
+}
+
+static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
+{
+ unsigned int rcvd;
+ u32 idx;
+ int ret;
+
+ if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
+ ret = kick_tx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+ }
+
+ rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
+ if (rcvd) {
+ if (rcvd > xsk->outstanding_tx) {
+ u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
+
+ ksft_print_msg("[%s] Too many packets completed\n", __func__);
+ ksft_print_msg("Last completion address: %llx\n",
+ (unsigned long long)addr);
+ return TEST_FAILURE;
+ }
+
+ xsk_ring_cons__release(&xsk->umem->cq, rcvd);
+ xsk->outstanding_tx -= rcvd;
+ }
+
+ return TEST_PASS;
+}
+
+static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
+{
+ u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
+ u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
+ struct ifobject *ifobj = test->ifobj_rx;
+ struct xsk_umem_info *umem = xsk->umem;
+ struct pollfd fds = { };
+ struct pkt *pkt;
+ u64 first_addr = 0;
+ int ret;
+
+ fds.fd = xsk_socket__fd(xsk->xsk);
+ fds.events = POLLIN;
+
+ ret = kick_rx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+
+ if (ifobj->use_poll) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret < 0)
+ return TEST_FAILURE;
+
+ if (!ret) {
+ if (!is_umem_valid(test->ifobj_tx))
+ return TEST_PASS;
+
+ ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
+ return TEST_CONTINUE;
+ }
+
+ if (!(fds.revents & POLLIN))
+ return TEST_CONTINUE;
+ }
+
+ rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
+ if (!rcvd)
+ return TEST_CONTINUE;
+
+ if (ifobj->use_fill_ring) {
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ while (ret != rcvd) {
+ if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret < 0)
+ return TEST_FAILURE;
+ }
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ }
+ }
+
+ while (frags_processed < rcvd) {
+ const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
+ u64 addr = desc->addr, orig;
+
+ orig = xsk_umem__extract_addr(addr);
+ addr = xsk_umem__add_offset_to_addr(addr);
+
+ if (!nb_frags) {
+ pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
+ if (!pkt) {
+ ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
+ __func__, addr, desc->len);
+ return TEST_FAILURE;
+ }
+ }
+
+ print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
+ addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
+
+ if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
+ !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
+ !is_metadata_correct(pkt, umem->buffer, addr)))
+ return TEST_FAILURE;
+
+ if (!nb_frags++)
+ first_addr = addr;
+ frags_processed++;
+ pkt_len += desc->len;
+ if (ifobj->use_fill_ring)
+ *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
+
+ if (pkt_continues(desc->options))
+ continue;
+
+ /* The complete packet has been received */
+ if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
+ !is_offset_correct(umem, pkt, addr))
+ return TEST_FAILURE;
+
+ pkt_stream->nb_rx_pkts++;
+ nb_frags = 0;
+ pkt_len = 0;
+ }
+
+ if (nb_frags) {
+ /* In the middle of a packet. Start over from beginning of packet. */
+ idx_rx -= nb_frags;
+ xsk_ring_cons__cancel(&xsk->rx, nb_frags);
+ if (ifobj->use_fill_ring) {
+ idx_fq -= nb_frags;
+ xsk_ring_prod__cancel(&umem->fq, nb_frags);
+ }
+ frags_processed -= nb_frags;
+ }
+
+ if (ifobj->use_fill_ring)
+ xsk_ring_prod__submit(&umem->fq, frags_processed);
+ if (ifobj->release_rx)
+ xsk_ring_cons__release(&xsk->rx, frags_processed);
+
+ pthread_mutex_lock(&pacing_mutex);
+ pkts_in_flight -= pkts_sent;
+ pthread_mutex_unlock(&pacing_mutex);
+ pkts_sent = 0;
+
+ return TEST_CONTINUE;
+}
+
+bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
+ unsigned long *bitmap)
+{
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
+
+ if (!pkt_stream) {
+ __set_bit(sock_num, bitmap);
+ return false;
+ }
+
+ if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
+ __set_bit(sock_num, bitmap);
+ if (bitmap_full(bitmap, test->nb_sockets))
+ return true;
+ }
+
+ return false;
+}
+
+static int receive_pkts(struct test_spec *test)
+{
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ DECLARE_BITMAP(bitmap, test->nb_sockets);
+ struct xsk_socket_info *xsk;
+ u32 sock_num = 0;
+ int res, ret;
+
+ bitmap_zero(bitmap, test->nb_sockets);
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ return TEST_FAILURE;
+
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ while (1) {
+ xsk = &test->ifobj_rx->xsk_arr[sock_num];
+
+ if ((all_packets_received(test, xsk, sock_num, bitmap)))
+ break;
+
+ res = __receive_pkts(test, xsk);
+ if (!(res == TEST_PASS || res == TEST_CONTINUE))
+ return res;
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ return TEST_FAILURE;
+
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+ sock_num = (sock_num + 1) % test->nb_sockets;
+ }
+
+ return TEST_PASS;
+}
+
+static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
+{
+ u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
+ struct pkt_stream *pkt_stream = xsk->pkt_stream;
+ struct xsk_umem_info *umem = ifobject->umem;
+ bool use_poll = ifobject->use_poll;
+ struct pollfd fds = { };
+ int ret;
+
+ buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
+ /* pkts_in_flight might be negative if many invalid packets are sent */
+ if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
+ buffer_len)) {
+ ret = kick_tx(xsk);
+ if (ret)
+ return TEST_FAILURE;
+ return TEST_CONTINUE;
+ }
+
+ fds.fd = xsk_socket__fd(xsk->xsk);
+ fds.events = POLLOUT;
+
+ while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
+ if (use_poll) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (timeout) {
+ if (ret < 0) {
+ ksft_print_msg("ERROR: [%s] Poll error %d\n",
+ __func__, errno);
+ return TEST_FAILURE;
+ }
+ if (ret == 0)
+ return TEST_PASS;
+ break;
+ }
+ if (ret <= 0) {
+ ksft_print_msg("ERROR: [%s] Poll error %d\n",
+ __func__, errno);
+ return TEST_FAILURE;
+ }
+ }
+
+ complete_pkts(xsk, xsk->batch_size);
+ }
+
+ for (i = 0; i < xsk->batch_size; i++) {
+ struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
+ u32 nb_frags_left, nb_frags, bytes_written = 0;
+
+ if (!pkt)
+ break;
+
+ nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
+ if (nb_frags > xsk->batch_size - i) {
+ pkt_stream_cancel(pkt_stream);
+ xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
+ break;
+ }
+ nb_frags_left = nb_frags;
+
+ while (nb_frags_left--) {
+ struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
+
+ tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
+ if (pkt_stream->verbatim) {
+ tx_desc->len = pkt->len;
+ tx_desc->options = pkt->options;
+ } else if (nb_frags_left) {
+ tx_desc->len = umem->frame_size;
+ tx_desc->options = XDP_PKT_CONTD;
+ } else {
+ tx_desc->len = pkt->len - bytes_written;
+ tx_desc->options = 0;
+ }
+ if (pkt->valid)
+ pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
+ bytes_written);
+ bytes_written += tx_desc->len;
+
+ print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
+ tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
+
+ if (nb_frags_left) {
+ i++;
+ if (pkt_stream->verbatim)
+ pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
+ }
+ }
+
+ if (pkt && pkt->valid) {
+ valid_pkts++;
+ valid_frags += nb_frags;
+ }
+ }
+
+ pthread_mutex_lock(&pacing_mutex);
+ pkts_in_flight += valid_pkts;
+ pthread_mutex_unlock(&pacing_mutex);
+
+ xsk_ring_prod__submit(&xsk->tx, i);
+ xsk->outstanding_tx += valid_frags;
+
+ if (use_poll) {
+ ret = poll(&fds, 1, POLL_TMOUT);
+ if (ret <= 0) {
+ if (ret == 0 && timeout)
+ return TEST_PASS;
+
+ ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
+ return TEST_FAILURE;
+ }
+ }
+
+ if (!timeout) {
+ if (complete_pkts(xsk, i))
+ return TEST_FAILURE;
+
+ usleep(10);
+ return TEST_PASS;
+ }
+
+ return TEST_CONTINUE;
+}
+
+static int wait_for_tx_completion(struct xsk_socket_info *xsk)
+{
+ struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
+ int ret;
+
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ return TEST_FAILURE;
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ while (xsk->outstanding_tx) {
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ return TEST_FAILURE;
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+
+ complete_pkts(xsk, xsk->batch_size);
+ }
+
+ return TEST_PASS;
+}
+
+bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
+{
+ return bitmap_full(bitmap, test->nb_sockets);
+}
+
+static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
+{
+ bool timeout = !is_umem_valid(test->ifobj_rx);
+ DECLARE_BITMAP(bitmap, test->nb_sockets);
+ u32 i, ret;
+
+ bitmap_zero(bitmap, test->nb_sockets);
+
+ while (!(all_packets_sent(test, bitmap))) {
+ for (i = 0; i < test->nb_sockets; i++) {
+ struct pkt_stream *pkt_stream;
+
+ pkt_stream = ifobject->xsk_arr[i].pkt_stream;
+ if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
+ __set_bit(i, bitmap);
+ continue;
+ }
+ ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
+ if (ret == TEST_CONTINUE && !test->fail)
+ continue;
+
+ if ((ret || test->fail) && !timeout)
+ return TEST_FAILURE;
+
+ if (ret == TEST_PASS && timeout)
+ return ret;
+
+ ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
+ if (ret)
+ return TEST_FAILURE;
+ }
+ }
+
+ return TEST_PASS;
+}
+
+static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
+{
+ int fd = xsk_socket__fd(xsk), err;
+ socklen_t optlen, expected_len;
+
+ optlen = sizeof(*stats);
+ err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
+ if (err) {
+ ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
+ __func__, -err, strerror(-err));
+ return TEST_FAILURE;
+ }
+
+ expected_len = sizeof(struct xdp_statistics);
+ if (optlen != expected_len) {
+ ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
+ __func__, expected_len, optlen);
+ return TEST_FAILURE;
+ }
+
+ return TEST_PASS;
+}
+
+static int validate_rx_dropped(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ struct xdp_statistics stats;
+ int err;
+
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
+
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
+
+ /* The receiver calls getsockopt after receiving the last (valid)
+ * packet which is not the final packet sent in this test (valid and
+ * invalid packets are sent in alternating fashion with the final
+ * packet being invalid). Since the last packet may or may not have
+ * been dropped already, both outcomes must be allowed.
+ */
+ if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
+ stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
+ return TEST_PASS;
+
+ return TEST_FAILURE;
+}
+
+static int validate_rx_full(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ struct xdp_statistics stats;
+ int err;
+
+ usleep(1000);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
+
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
+
+ if (stats.rx_ring_full)
+ return TEST_PASS;
+
+ return TEST_FAILURE;
+}
+
+static int validate_fill_empty(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ struct xdp_statistics stats;
+ int err;
+
+ usleep(1000);
+ err = kick_rx(ifobject->xsk);
+ if (err)
+ return TEST_FAILURE;
+
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
+
+ if (stats.rx_fill_ring_empty_descs)
+ return TEST_PASS;
+
+ return TEST_FAILURE;
+}
+
+static int validate_tx_invalid_descs(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ int fd = xsk_socket__fd(xsk);
+ struct xdp_statistics stats;
+ socklen_t optlen;
+ int err;
+
+ optlen = sizeof(stats);
+ err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
+ if (err) {
+ ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
+ __func__, -err, strerror(-err));
+ return TEST_FAILURE;
+ }
+
+ if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
+ ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
+ __func__,
+ (unsigned long long)stats.tx_invalid_descs,
+ ifobject->xsk->pkt_stream->nb_pkts);
+ return TEST_FAILURE;
+ }
+
+ return TEST_PASS;
+}
+
+static int xsk_configure(struct test_spec *test, struct ifobject *ifobject,
+ struct xsk_umem_info *umem, bool tx)
+{
+ int i, ret;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ bool shared = (ifobject->shared_umem && tx) ? true : !!i;
+ u32 ctr = 0;
+
+ while (ctr++ < SOCK_RECONF_CTR) {
+ ret = xsk_configure_socket(&ifobject->xsk_arr[i], umem,
+ ifobject, shared);
+ if (!ret)
+ break;
+
+ /* Retry if it fails as xsk_socket__create() is asynchronous */
+ if (ctr >= SOCK_RECONF_CTR)
+ return ret;
+ usleep(USLEEP_MAX);
+ }
+ if (ifobject->busy_poll) {
+ ret = enable_busy_poll(&ifobject->xsk_arr[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
+{
+ int ret = xsk_configure(test, ifobject, test->ifobj_rx->umem, true);
+
+ if (ret)
+ return ret;
+ ifobject->xsk = &ifobject->xsk_arr[0];
+ ifobject->xskmap = test->ifobj_rx->xskmap;
+ memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
+ ifobject->umem->base_addr = 0;
+
+ return 0;
+}
+
+static int xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
+ bool fill_up)
+{
+ u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
+ u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
+ int ret;
+
+ if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
+ buffers_to_fill = umem->num_frames;
+ else
+ buffers_to_fill = umem->fill_size;
+
+ ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
+ if (ret != buffers_to_fill)
+ return -ENOSPC;
+
+ while (filled < buffers_to_fill) {
+ struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) {
+ if (!pkt) {
+ if (!fill_up)
+ break;
+ addr = filled * umem->frame_size + umem->base_addr;
+ } else if (pkt->offset >= 0) {
+ addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
+ } else {
+ addr = pkt->offset + umem_alloc_buffer(umem);
+ }
+
+ *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
+ if (++filled >= buffers_to_fill)
+ break;
+ }
+ }
+ xsk_ring_prod__submit(&umem->fq, filled);
+ xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
+
+ pkt_stream_reset(pkt_stream);
+ umem_reset_alloc(umem);
+
+ return 0;
+}
+
+static int thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
+{
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
+ int mmap_flags;
+ u64 umem_sz;
+ void *bufs;
+ int ret;
+ u32 i;
+
+ umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
+ mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
+
+ if (ifobject->umem->unaligned_mode)
+ mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
+
+ if (ifobject->shared_umem)
+ umem_sz *= 2;
+
+ bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
+ if (bufs == MAP_FAILED)
+ return -errno;
+
+ ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
+ if (ret)
+ return ret;
+
+ ret = xsk_configure(test, ifobject, ifobject->umem, false);
+ if (ret)
+ return ret;
+
+ ifobject->xsk = &ifobject->xsk_arr[0];
+
+ if (!ifobject->rx_on)
+ return 0;
+
+ ret = xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream,
+ ifobject->use_fill_ring);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < test->nb_sockets; i++) {
+ ifobject->xsk = &ifobject->xsk_arr[i];
+ ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void *worker_testapp_validate_tx(void *arg)
+{
+ struct test_spec *test = (struct test_spec *)arg;
+ struct ifobject *ifobject = test->ifobj_tx;
+ int err;
+
+ if (test->current_step == 1) {
+ if (!ifobject->shared_umem) {
+ if (thread_common_ops(test, ifobject)) {
+ test->fail = true;
+ pthread_exit(NULL);
+ }
+ } else {
+ if (thread_common_ops_tx(test, ifobject)) {
+ test->fail = true;
+ pthread_exit(NULL);
+ }
+ }
+ }
+
+ err = send_pkts(test, ifobject);
+
+ if (!err && ifobject->validation_func)
+ err = ifobject->validation_func(ifobject);
+ if (err)
+ test->fail = true;
+
+ pthread_exit(NULL);
+}
+
+void *worker_testapp_validate_rx(void *arg)
+{
+ struct test_spec *test = (struct test_spec *)arg;
+ struct ifobject *ifobject = test->ifobj_rx;
+ int err;
+
+ if (test->current_step == 1) {
+ err = thread_common_ops(test, ifobject);
+ } else {
+ xsk_clear_xskmap(ifobject->xskmap);
+ err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
+ if (err)
+ ksft_print_msg("Error: Failed to update xskmap, error %s\n",
+ strerror(-err));
+ }
+
+ pthread_barrier_wait(&barr);
+
+ /* We leave only now in case of error to avoid getting stuck in the barrier */
+ if (err) {
+ test->fail = true;
+ pthread_exit(NULL);
+ }
+
+ err = receive_pkts(test);
+
+ if (!err && ifobject->validation_func)
+ err = ifobject->validation_func(ifobject);
+
+ if (err) {
+ if (!test->adjust_tail) {
+ test->fail = true;
+ } else {
+ bool supported;
+
+ if (is_adjust_tail_supported(ifobject->xdp_progs, &supported))
+ test->fail = true;
+ else if (!supported)
+ test->adjust_tail_support = false;
+ else
+ test->fail = true;
+ }
+ }
+
+ pthread_exit(NULL);
+}
+
+static void testapp_clean_xsk_umem(struct ifobject *ifobj)
+{
+ u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
+
+ if (ifobj->shared_umem)
+ umem_sz *= 2;
+
+ umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
+ xsk_umem__delete(ifobj->umem->umem);
+ munmap(ifobj->umem->buffer, umem_sz);
+}
+
+static void handler(int signum)
+{
+ pthread_exit(NULL);
+}
+
+static bool xdp_prog_changed_rx(struct test_spec *test)
+{
+ struct ifobject *ifobj = test->ifobj_rx;
+
+ return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
+}
+
+static bool xdp_prog_changed_tx(struct test_spec *test)
+{
+ struct ifobject *ifobj = test->ifobj_tx;
+
+ return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
+}
+
+static int xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
+ struct bpf_map *xskmap, enum test_mode mode)
+{
+ int err;
+
+ xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
+ err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
+ if (err) {
+ ksft_print_msg("Error attaching XDP program\n");
+ return err;
+ }
+
+ if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
+ if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
+ ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
+ return -EINVAL;
+ }
+
+ ifobj->xdp_prog = xdp_prog;
+ ifobj->xskmap = xskmap;
+ ifobj->mode = mode;
+
+ return 0;
+}
+
+static int xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
+ struct ifobject *ifobj_tx)
+{
+ int err = 0;
+
+ if (xdp_prog_changed_rx(test)) {
+ err = xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
+ if (err)
+ return err;
+ }
+
+ if (!ifobj_tx || ifobj_tx->shared_umem)
+ return 0;
+
+ if (xdp_prog_changed_tx(test))
+ err = xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
+
+ return err;
+}
+
+static void clean_sockets(struct test_spec *test, struct ifobject *ifobj)
+{
+ u32 i;
+
+ if (!ifobj || !test)
+ return;
+
+ for (i = 0; i < test->nb_sockets; i++)
+ xsk_socket__delete(ifobj->xsk_arr[i].xsk);
+}
+
+static void clean_umem(struct test_spec *test, struct ifobject *ifobj1, struct ifobject *ifobj2)
+{
+ if (!ifobj1)
+ return;
+
+ testapp_clean_xsk_umem(ifobj1);
+ if (ifobj2 && !ifobj2->shared_umem)
+ testapp_clean_xsk_umem(ifobj2);
+}
+
+static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
+ struct ifobject *ifobj2)
+{
+ pthread_t t0, t1;
+ int err;
+
+ if (test->mtu > MAX_ETH_PKT_SIZE) {
+ if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp ||
+ (ifobj2 && !ifobj2->multi_buff_zc_supp))) {
+ ksft_print_msg("Multi buffer for zero-copy not supported.\n");
+ return TEST_SKIP;
+ }
+ if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp ||
+ (ifobj2 && !ifobj2->multi_buff_supp))) {
+ ksft_print_msg("Multi buffer not supported.\n");
+ return TEST_SKIP;
+ }
+ }
+ err = test_spec_set_mtu(test, test->mtu);
+ if (err) {
+ ksft_print_msg("Error, could not set mtu.\n");
+ return TEST_FAILURE;
+ }
+
+ if (ifobj2) {
+ if (pthread_barrier_init(&barr, NULL, 2))
+ return TEST_FAILURE;
+ pkt_stream_reset(ifobj2->xsk->pkt_stream);
+ }
+
+ test->current_step++;
+ pkt_stream_reset(ifobj1->xsk->pkt_stream);
+ pkts_in_flight = 0;
+
+ signal(SIGUSR1, handler);
+ /*Spawn RX thread */
+ pthread_create(&t0, NULL, ifobj1->func_ptr, test);
+
+ if (ifobj2) {
+ pthread_barrier_wait(&barr);
+ if (pthread_barrier_destroy(&barr)) {
+ pthread_kill(t0, SIGUSR1);
+ clean_sockets(test, ifobj1);
+ clean_umem(test, ifobj1, NULL);
+ return TEST_FAILURE;
+ }
+
+ /*Spawn TX thread */
+ pthread_create(&t1, NULL, ifobj2->func_ptr, test);
+
+ pthread_join(t1, NULL);
+ }
+
+ if (!ifobj2)
+ pthread_kill(t0, SIGUSR1);
+ else
+ pthread_join(t0, NULL);
+
+ if (test->total_steps == test->current_step || test->fail) {
+ clean_sockets(test, ifobj1);
+ clean_sockets(test, ifobj2);
+ clean_umem(test, ifobj1, ifobj2);
+ }
+
+ if (test->fail)
+ return TEST_FAILURE;
+
+ return TEST_PASS;
+}
+
+static int testapp_validate_traffic(struct test_spec *test)
+{
+ struct ifobject *ifobj_rx = test->ifobj_rx;
+ struct ifobject *ifobj_tx = test->ifobj_tx;
+
+ if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
+ (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
+ ksft_print_msg("No huge pages present.\n");
+ return TEST_SKIP;
+ }
+
+ if (test->set_ring) {
+ if (ifobj_tx->hw_ring_size_supp) {
+ if (set_ring_size(ifobj_tx)) {
+ ksft_print_msg("Failed to change HW ring size.\n");
+ return TEST_FAILURE;
+ }
+ } else {
+ ksft_print_msg("Changing HW ring size not supported.\n");
+ return TEST_SKIP;
+ }
+ }
+
+ if (xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx))
+ return TEST_FAILURE;
+ return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
+}
+
+static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
+{
+ return __testapp_validate_traffic(test, ifobj, NULL);
+}
+
+int testapp_teardown(struct test_spec *test)
+{
+ int i;
+
+ for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
+ if (testapp_validate_traffic(test))
+ return TEST_FAILURE;
+ test_spec_reset(test);
+ }
+
+ return TEST_PASS;
+}
+
+static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
+{
+ thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
+ struct ifobject *tmp_ifobj = (*ifobj1);
+
+ (*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
+ (*ifobj2)->func_ptr = tmp_func_ptr;
+
+ *ifobj1 = *ifobj2;
+ *ifobj2 = tmp_ifobj;
+}
+
+int testapp_bidirectional(struct test_spec *test)
+{
+ int res;
+
+ test->ifobj_tx->rx_on = true;
+ test->ifobj_rx->tx_on = true;
+ test->total_steps = 2;
+ if (testapp_validate_traffic(test))
+ return TEST_FAILURE;
+
+ print_verbose("Switching Tx/Rx direction\n");
+ swap_directions(&test->ifobj_rx, &test->ifobj_tx);
+ res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
+
+ swap_directions(&test->ifobj_rx, &test->ifobj_tx);
+ return res;
+}
+
+static int swap_xsk_resources(struct test_spec *test)
+{
+ int ret;
+
+ test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
+ test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
+ test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
+ test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
+ test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
+ test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
+
+ ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
+ if (ret)
+ return TEST_FAILURE;
+
+ return TEST_PASS;
+}
+
+int testapp_xdp_prog_cleanup(struct test_spec *test)
+{
+ test->total_steps = 2;
+ test->nb_sockets = 2;
+ if (testapp_validate_traffic(test))
+ return TEST_FAILURE;
+
+ if (swap_xsk_resources(test)) {
+ clean_sockets(test, test->ifobj_rx);
+ clean_sockets(test, test->ifobj_tx);
+ clean_umem(test, test->ifobj_rx, test->ifobj_tx);
+ return TEST_FAILURE;
+ }
+
+ return testapp_validate_traffic(test);
+}
+
+int testapp_headroom(struct test_spec *test)
+{
+ test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_stats_rx_dropped(struct test_spec *test)
+{
+ if (test->mode == TEST_MODE_ZC) {
+ ksft_print_msg("Can not run RX_DROPPED test for ZC mode\n");
+ return TEST_SKIP;
+ }
+
+ if (pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0))
+ return TEST_FAILURE;
+ test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
+ XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
+ if (pkt_stream_receive_half(test))
+ return TEST_FAILURE;
+ test->ifobj_rx->validation_func = validate_rx_dropped;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_stats_tx_invalid_descs(struct test_spec *test)
+{
+ if (pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0))
+ return TEST_FAILURE;
+ test->ifobj_tx->validation_func = validate_tx_invalid_descs;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_stats_rx_full(struct test_spec *test)
+{
+ if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE))
+ return TEST_FAILURE;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+
+ test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
+ test->ifobj_rx->release_rx = false;
+ test->ifobj_rx->validation_func = validate_rx_full;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_stats_fill_empty(struct test_spec *test)
+{
+ if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE))
+ return TEST_FAILURE;
+ test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
+
+ test->ifobj_rx->use_fill_ring = false;
+ test->ifobj_rx->validation_func = validate_fill_empty;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_send_receive_unaligned(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ /* Let half of the packets straddle a 4K buffer boundary */
+ if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2))
+ return TEST_FAILURE;
+
+ return testapp_validate_traffic(test);
+}
+
+int testapp_send_receive_unaligned_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_single_pkt(struct test_spec *test)
+{
+ struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
+
+ if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_send_receive_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE))
+ return TEST_FAILURE;
+
+ return testapp_validate_traffic(test);
+}
+
+int testapp_invalid_desc_mb(struct test_spec *test)
+{
+ struct xsk_umem_info *umem = test->ifobj_tx->umem;
+ u64 umem_size = umem->num_frames * umem->frame_size;
+ struct pkt pkts[] = {
+ /* Valid packet for synch to start with */
+ {0, MIN_PKT_SIZE, 0, true, 0},
+ /* Zero frame len is not legal */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {0, 0, 0, false, 0},
+ /* Invalid address in the second frame */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ /* Invalid len in the middle */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ /* Invalid options in the middle */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION},
+ /* Transmit 2 frags, receive 3 */
+ {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD},
+ {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0},
+ /* Middle frame crosses chunk boundary with small length */
+ {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
+ {-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0},
+ /* Valid packet for synch so that something is received */
+ {0, MIN_PKT_SIZE, 0, true, 0}};
+
+ if (umem->unaligned_mode) {
+ /* Crossing a chunk boundary allowed */
+ pkts[12].valid = true;
+ pkts[13].valid = true;
+ }
+
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_invalid_desc(struct test_spec *test)
+{
+ struct xsk_umem_info *umem = test->ifobj_tx->umem;
+ u64 umem_size = umem->num_frames * umem->frame_size;
+ struct pkt pkts[] = {
+ /* Zero packet address allowed */
+ {0, MIN_PKT_SIZE, 0, true},
+ /* Allowed packet */
+ {0, MIN_PKT_SIZE, 0, true},
+ /* Straddling the start of umem */
+ {-2, MIN_PKT_SIZE, 0, false},
+ /* Packet too large */
+ {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
+ /* Up to end of umem allowed */
+ {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
+ /* After umem ends */
+ {umem_size, MIN_PKT_SIZE, 0, false},
+ /* Straddle the end of umem */
+ {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
+ /* Straddle a 4K boundary */
+ {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
+ /* Straddle a 2K boundary */
+ {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
+ /* Valid packet for synch so that something is received */
+ {0, MIN_PKT_SIZE, 0, true}};
+
+ if (umem->unaligned_mode) {
+ /* Crossing a page boundary allowed */
+ pkts[7].valid = true;
+ }
+ if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
+ /* Crossing a 2K frame size boundary not allowed */
+ pkts[8].valid = false;
+ }
+
+ if (test->ifobj_tx->shared_umem) {
+ pkts[4].offset += umem_size;
+ pkts[5].offset += umem_size;
+ pkts[6].offset += umem_size;
+ }
+
+ if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_xdp_drop(struct test_spec *test)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ if (pkt_stream_receive_half(test))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_xdp_metadata_copy(struct test_spec *test)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
+ skel_tx->progs.xsk_xdp_populate_metadata,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+ test->ifobj_rx->use_metadata = true;
+
+ skel_rx->bss->count = 0;
+
+ return testapp_validate_traffic(test);
+}
+
+int testapp_xdp_shared_umem(struct test_spec *test)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+ int ret;
+
+ test->total_steps = 1;
+ test->nb_sockets = 2;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
+ skel_tx->progs.xsk_xdp_shared_umem,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ if (pkt_stream_even_odd_sequence(test))
+ return TEST_FAILURE;
+
+ ret = testapp_validate_traffic(test);
+
+ release_even_odd_sequence(test);
+
+ return ret;
+}
+
+int testapp_poll_txq_tmout(struct test_spec *test)
+{
+ test->ifobj_tx->use_poll = true;
+ /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
+ test->ifobj_tx->umem->frame_size = 2048;
+ if (pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048))
+ return TEST_FAILURE;
+ return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
+}
+
+int testapp_poll_rxq_tmout(struct test_spec *test)
+{
+ test->ifobj_rx->use_poll = true;
+ return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
+}
+
+int testapp_too_many_frags(struct test_spec *test)
+{
+ struct pkt *pkts;
+ u32 max_frags, i;
+ int ret = TEST_FAILURE;
+
+ if (test->mode == TEST_MODE_ZC) {
+ max_frags = test->ifobj_tx->xdp_zc_max_segs;
+ } else {
+ max_frags = get_max_skb_frags();
+ if (!max_frags) {
+ ksft_print_msg("Can't get MAX_SKB_FRAGS from system, using default (17)\n");
+ max_frags = 17;
+ }
+ max_frags += 1;
+ }
+
+ pkts = calloc(2 * max_frags + 2, sizeof(struct pkt));
+ if (!pkts)
+ return TEST_FAILURE;
+
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+
+ /* Valid packet for synch */
+ pkts[0].len = MIN_PKT_SIZE;
+ pkts[0].valid = true;
+
+ /* One valid packet with the max amount of frags */
+ for (i = 1; i < max_frags + 1; i++) {
+ pkts[i].len = MIN_PKT_SIZE;
+ pkts[i].options = XDP_PKT_CONTD;
+ pkts[i].valid = true;
+ }
+ pkts[max_frags].options = 0;
+
+ /* An invalid packet with the max amount of frags but signals packet
+ * continues on the last frag
+ */
+ for (i = max_frags + 1; i < 2 * max_frags + 1; i++) {
+ pkts[i].len = MIN_PKT_SIZE;
+ pkts[i].options = XDP_PKT_CONTD;
+ pkts[i].valid = false;
+ }
+
+ /* Valid packet for synch */
+ pkts[2 * max_frags + 1].len = MIN_PKT_SIZE;
+ pkts[2 * max_frags + 1].valid = true;
+
+ if (pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2)) {
+ free(pkts);
+ return TEST_FAILURE;
+ }
+
+ ret = testapp_validate_traffic(test);
+ free(pkts);
+ return ret;
+}
+
+static int xsk_load_xdp_programs(struct ifobject *ifobj)
+{
+ ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
+ if (libbpf_get_error(ifobj->xdp_progs))
+ return libbpf_get_error(ifobj->xdp_progs);
+
+ return 0;
+}
+
+/* Simple test */
+static bool hugepages_present(void)
+{
+ size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
+ void *bufs;
+
+ bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
+ if (bufs == MAP_FAILED)
+ return false;
+
+ mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
+ munmap(bufs, mmap_sz);
+ return true;
+}
+
+int init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
+{
+ LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
+ int err;
+
+ ifobj->func_ptr = func_ptr;
+
+ err = xsk_load_xdp_programs(ifobj);
+ if (err) {
+ ksft_print_msg("Error loading XDP program\n");
+ return err;
+ }
+
+ if (hugepages_present())
+ ifobj->unaligned_supp = true;
+
+ err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts);
+ if (err) {
+ ksft_print_msg("Error querying XDP capabilities\n");
+ return err;
+ }
+ if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG)
+ ifobj->multi_buff_supp = true;
+ if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
+ if (query_opts.xdp_zc_max_segs > 1) {
+ ifobj->multi_buff_zc_supp = true;
+ ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs;
+ } else {
+ ifobj->xdp_zc_max_segs = 0;
+ }
+ }
+
+ return 0;
+}
+
+int testapp_send_receive(struct test_spec *test)
+{
+ return testapp_validate_traffic(test);
+}
+
+int testapp_send_receive_2k_frame(struct test_spec *test)
+{
+ test->ifobj_tx->umem->frame_size = 2048;
+ test->ifobj_rx->umem->frame_size = 2048;
+ if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE))
+ return TEST_FAILURE;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_poll_rx(struct test_spec *test)
+{
+ test->ifobj_rx->use_poll = true;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_poll_tx(struct test_spec *test)
+{
+ test->ifobj_tx->use_poll = true;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_aligned_inv_desc(struct test_spec *test)
+{
+ return testapp_invalid_desc(test);
+}
+
+int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
+{
+ test->ifobj_tx->umem->frame_size = 2048;
+ test->ifobj_rx->umem->frame_size = 2048;
+ return testapp_invalid_desc(test);
+}
+
+int testapp_unaligned_inv_desc(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ return testapp_invalid_desc(test);
+}
+
+int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
+{
+ u64 page_size, umem_size;
+
+ /* Odd frame size so the UMEM doesn't end near a page boundary. */
+ test->ifobj_tx->umem->frame_size = 4001;
+ test->ifobj_rx->umem->frame_size = 4001;
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ /* This test exists to test descriptors that staddle the end of
+ * the UMEM but not a page.
+ */
+ page_size = sysconf(_SC_PAGESIZE);
+ umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
+ assert(umem_size % page_size > MIN_PKT_SIZE);
+ assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
+
+ return testapp_invalid_desc(test);
+}
+
+int testapp_aligned_inv_desc_mb(struct test_spec *test)
+{
+ return testapp_invalid_desc_mb(test);
+}
+
+int testapp_unaligned_inv_desc_mb(struct test_spec *test)
+{
+ test->ifobj_tx->umem->unaligned_mode = true;
+ test->ifobj_rx->umem->unaligned_mode = true;
+ return testapp_invalid_desc_mb(test);
+}
+
+int testapp_xdp_metadata(struct test_spec *test)
+{
+ return testapp_xdp_metadata_copy(test);
+}
+
+int testapp_xdp_metadata_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ return testapp_xdp_metadata_copy(test);
+}
+
+int testapp_hw_sw_min_ring_size(struct test_spec *test)
+{
+ int ret;
+
+ test->set_ring = true;
+ test->total_steps = 2;
+ test->ifobj_tx->ring.tx_pending = DEFAULT_BATCH_SIZE;
+ test->ifobj_tx->ring.rx_pending = DEFAULT_BATCH_SIZE * 2;
+ test->ifobj_tx->xsk->batch_size = 1;
+ test->ifobj_rx->xsk->batch_size = 1;
+ ret = testapp_validate_traffic(test);
+ if (ret)
+ return ret;
+
+ /* Set batch size to hw_ring_size - 1 */
+ test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
+ test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
+ return testapp_validate_traffic(test);
+}
+
+int testapp_hw_sw_max_ring_size(struct test_spec *test)
+{
+ u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
+ int ret;
+
+ test->set_ring = true;
+ test->total_steps = 2;
+ test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
+ test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending;
+ test->ifobj_rx->umem->num_frames = max_descs;
+ test->ifobj_rx->umem->fill_size = max_descs;
+ test->ifobj_rx->umem->comp_size = max_descs;
+ test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+
+ ret = testapp_validate_traffic(test);
+ if (ret)
+ return ret;
+
+ /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
+ * updating the Rx HW tail register.
+ */
+ test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
+ test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
+ if (pkt_stream_replace(test, max_descs, MIN_PKT_SIZE)) {
+ clean_sockets(test, test->ifobj_tx);
+ clean_sockets(test, test->ifobj_rx);
+ clean_umem(test, test->ifobj_rx, test->ifobj_tx);
+ return TEST_FAILURE;
+ }
+
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_xdp_adjust_tail(struct test_spec *test, int adjust_value)
+{
+ struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
+ struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
+
+ test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_adjust_tail,
+ skel_tx->progs.xsk_xdp_adjust_tail,
+ skel_rx->maps.xsk, skel_tx->maps.xsk);
+
+ skel_rx->bss->adjust_value = adjust_value;
+
+ return testapp_validate_traffic(test);
+}
+
+static int testapp_adjust_tail(struct test_spec *test, u32 value, u32 pkt_len)
+{
+ int ret;
+
+ test->adjust_tail_support = true;
+ test->adjust_tail = true;
+ test->total_steps = 1;
+
+ ret = pkt_stream_replace_ifobject(test->ifobj_tx, DEFAULT_BATCH_SIZE, pkt_len);
+ if (ret)
+ return TEST_FAILURE;
+
+ ret = pkt_stream_replace_ifobject(test->ifobj_rx, DEFAULT_BATCH_SIZE, pkt_len + value);
+ if (ret)
+ return TEST_FAILURE;
+
+ ret = testapp_xdp_adjust_tail(test, value);
+ if (ret)
+ return ret;
+
+ if (!test->adjust_tail_support) {
+ ksft_print_msg("%s %sResize pkt with bpf_xdp_adjust_tail() not supported\n",
+ mode_string(test), busy_poll_string(test));
+ return TEST_SKIP;
+ }
+
+ return 0;
+}
+
+int testapp_adjust_tail_shrink(struct test_spec *test)
+{
+ /* Shrink by 4 bytes for testing purpose */
+ return testapp_adjust_tail(test, -4, MIN_PKT_SIZE * 2);
+}
+
+int testapp_adjust_tail_shrink_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ /* Shrink by the frag size */
+ return testapp_adjust_tail(test, -XSK_UMEM__MAX_FRAME_SIZE, XSK_UMEM__LARGE_FRAME_SIZE * 2);
+}
+
+int testapp_adjust_tail_grow(struct test_spec *test)
+{
+ /* Grow by 4 bytes for testing purpose */
+ return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2);
+}
+
+int testapp_adjust_tail_grow_mb(struct test_spec *test)
+{
+ test->mtu = MAX_ETH_JUMBO_SIZE;
+ /* Grow by (frag_size - last_frag_Size) - 1 to stay inside the last fragment */
+ return testapp_adjust_tail(test, (XSK_UMEM__MAX_FRAME_SIZE / 2) - 1,
+ XSK_UMEM__LARGE_FRAME_SIZE * 2);
+}
+
+int testapp_tx_queue_consumer(struct test_spec *test)
+{
+ int nr_packets;
+
+ if (test->mode == TEST_MODE_ZC) {
+ ksft_print_msg("Can not run TX_QUEUE_CONSUMER test for ZC mode\n");
+ return TEST_SKIP;
+ }
+
+ nr_packets = MAX_TX_BUDGET_DEFAULT + 1;
+ if (pkt_stream_replace(test, nr_packets, MIN_PKT_SIZE))
+ return TEST_FAILURE;
+ test->ifobj_tx->xsk->batch_size = nr_packets;
+ test->ifobj_tx->xsk->check_consumer = true;
+
+ return testapp_validate_traffic(test);
+}
+
+struct ifobject *ifobject_create(void)
+{
+ struct ifobject *ifobj;
+
+ ifobj = calloc(1, sizeof(struct ifobject));
+ if (!ifobj)
+ return NULL;
+
+ ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
+ if (!ifobj->xsk_arr)
+ goto out_xsk_arr;
+
+ ifobj->umem = calloc(1, sizeof(*ifobj->umem));
+ if (!ifobj->umem)
+ goto out_umem;
+
+ return ifobj;
+
+out_umem:
+ free(ifobj->xsk_arr);
+out_xsk_arr:
+ free(ifobj);
+ return NULL;
+}
+
+void ifobject_delete(struct ifobject *ifobj)
+{
+ free(ifobj->umem);
+ free(ifobj->xsk_arr);
+ free(ifobj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xsk.h b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
new file mode 100644
index 000000000000..8fc78a057de0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_xsk.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef TEST_XSK_H_
+#define TEST_XSK_H_
+
+#include <linux/ethtool.h>
+#include <linux/if_xdp.h>
+
+#include "../kselftest.h"
+#include "xsk.h"
+
+#ifndef SO_PREFER_BUSY_POLL
+#define SO_PREFER_BUSY_POLL 69
+#endif
+
+#ifndef SO_BUSY_POLL_BUDGET
+#define SO_BUSY_POLL_BUDGET 70
+#endif
+
+#define TEST_PASS 0
+#define TEST_FAILURE -1
+#define TEST_CONTINUE 1
+#define TEST_SKIP 2
+
+#define DEFAULT_PKT_CNT (4 * 1024)
+#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
+#define HUGEPAGE_SIZE (2 * 1024 * 1024)
+#define MIN_PKT_SIZE 64
+#define MAX_ETH_PKT_SIZE 1518
+#define MAX_INTERFACE_NAME_CHARS 16
+#define MAX_TEST_NAME_SIZE 48
+#define SOCK_RECONF_CTR 10
+#define USLEEP_MAX 10000
+
+extern bool opt_verbose;
+#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
+
+
+static inline u32 ceil_u32(u32 a, u32 b)
+{
+ return (a + b - 1) / b;
+}
+
+static inline u64 ceil_u64(u64 a, u64 b)
+{
+ return (a + b - 1) / b;
+}
+
+/* Simple test */
+enum test_mode {
+ TEST_MODE_SKB,
+ TEST_MODE_DRV,
+ TEST_MODE_ZC,
+ TEST_MODE_ALL
+};
+
+struct ifobject;
+struct test_spec;
+typedef int (*validation_func_t)(struct ifobject *ifobj);
+typedef void *(*thread_func_t)(void *arg);
+typedef int (*test_func_t)(struct test_spec *test);
+
+struct xsk_socket_info {
+ struct xsk_ring_cons rx;
+ struct xsk_ring_prod tx;
+ struct xsk_umem_info *umem;
+ struct xsk_socket *xsk;
+ struct pkt_stream *pkt_stream;
+ u32 outstanding_tx;
+ u32 rxqsize;
+ u32 batch_size;
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ bool check_consumer;
+};
+
+int kick_rx(struct xsk_socket_info *xsk);
+int kick_tx(struct xsk_socket_info *xsk);
+
+struct xsk_umem_info {
+ struct xsk_ring_prod fq;
+ struct xsk_ring_cons cq;
+ struct xsk_umem *umem;
+ u64 next_buffer;
+ u32 num_frames;
+ u32 frame_headroom;
+ void *buffer;
+ u32 frame_size;
+ u32 base_addr;
+ u32 fill_size;
+ u32 comp_size;
+ bool unaligned_mode;
+};
+
+struct set_hw_ring {
+ u32 default_tx;
+ u32 default_rx;
+};
+
+int hw_ring_size_reset(struct ifobject *ifobj);
+
+struct ifobject {
+ char ifname[MAX_INTERFACE_NAME_CHARS];
+ struct xsk_socket_info *xsk;
+ struct xsk_socket_info *xsk_arr;
+ struct xsk_umem_info *umem;
+ thread_func_t func_ptr;
+ validation_func_t validation_func;
+ struct xsk_xdp_progs *xdp_progs;
+ struct bpf_map *xskmap;
+ struct bpf_program *xdp_prog;
+ struct ethtool_ringparam ring;
+ struct set_hw_ring set_ring;
+ enum test_mode mode;
+ int ifindex;
+ int mtu;
+ u32 bind_flags;
+ u32 xdp_zc_max_segs;
+ bool tx_on;
+ bool rx_on;
+ bool use_poll;
+ bool busy_poll;
+ bool use_fill_ring;
+ bool release_rx;
+ bool shared_umem;
+ bool use_metadata;
+ bool unaligned_supp;
+ bool multi_buff_supp;
+ bool multi_buff_zc_supp;
+ bool hw_ring_size_supp;
+};
+struct ifobject *ifobject_create(void);
+void ifobject_delete(struct ifobject *ifobj);
+int init_iface(struct ifobject *ifobj, thread_func_t func_ptr);
+
+int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, u64 size);
+int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
+ struct ifobject *ifobject, bool shared);
+
+
+struct pkt {
+ int offset;
+ u32 len;
+ u32 pkt_nb;
+ bool valid;
+ u16 options;
+};
+
+struct pkt_stream {
+ u32 nb_pkts;
+ u32 current_pkt_nb;
+ struct pkt *pkts;
+ u32 max_pkt_len;
+ u32 nb_rx_pkts;
+ u32 nb_valid_entries;
+ bool verbatim;
+};
+
+static inline bool pkt_continues(u32 options)
+{
+ return options & XDP_PKT_CONTD;
+}
+
+struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len);
+void pkt_stream_delete(struct pkt_stream *pkt_stream);
+void pkt_stream_reset(struct pkt_stream *pkt_stream);
+void pkt_stream_restore_default(struct test_spec *test);
+
+struct test_spec {
+ struct ifobject *ifobj_tx;
+ struct ifobject *ifobj_rx;
+ struct pkt_stream *tx_pkt_stream_default;
+ struct pkt_stream *rx_pkt_stream_default;
+ struct bpf_program *xdp_prog_rx;
+ struct bpf_program *xdp_prog_tx;
+ struct bpf_map *xskmap_rx;
+ struct bpf_map *xskmap_tx;
+ test_func_t test_func;
+ int mtu;
+ u16 total_steps;
+ u16 current_step;
+ u16 nb_sockets;
+ bool fail;
+ bool set_ring;
+ bool adjust_tail;
+ bool adjust_tail_support;
+ enum test_mode mode;
+ char name[MAX_TEST_NAME_SIZE];
+};
+
+#define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
+static inline char *mode_string(struct test_spec *test)
+{
+ switch (test->mode) {
+ case TEST_MODE_SKB:
+ return "SKB";
+ case TEST_MODE_DRV:
+ return "DRV";
+ case TEST_MODE_ZC:
+ return "ZC";
+ default:
+ return "BOGUS";
+ }
+}
+
+void test_init(struct test_spec *test, struct ifobject *ifobj_tx,
+ struct ifobject *ifobj_rx, enum test_mode mode,
+ const struct test_spec *test_to_run);
+
+int testapp_adjust_tail_grow(struct test_spec *test);
+int testapp_adjust_tail_grow_mb(struct test_spec *test);
+int testapp_adjust_tail_shrink(struct test_spec *test);
+int testapp_adjust_tail_shrink_mb(struct test_spec *test);
+int testapp_aligned_inv_desc(struct test_spec *test);
+int testapp_aligned_inv_desc_2k_frame(struct test_spec *test);
+int testapp_aligned_inv_desc_mb(struct test_spec *test);
+int testapp_bidirectional(struct test_spec *test);
+int testapp_headroom(struct test_spec *test);
+int testapp_hw_sw_max_ring_size(struct test_spec *test);
+int testapp_hw_sw_min_ring_size(struct test_spec *test);
+int testapp_poll_rx(struct test_spec *test);
+int testapp_poll_rxq_tmout(struct test_spec *test);
+int testapp_poll_tx(struct test_spec *test);
+int testapp_poll_txq_tmout(struct test_spec *test);
+int testapp_send_receive(struct test_spec *test);
+int testapp_send_receive_2k_frame(struct test_spec *test);
+int testapp_send_receive_mb(struct test_spec *test);
+int testapp_send_receive_unaligned(struct test_spec *test);
+int testapp_send_receive_unaligned_mb(struct test_spec *test);
+int testapp_single_pkt(struct test_spec *test);
+int testapp_stats_fill_empty(struct test_spec *test);
+int testapp_stats_rx_dropped(struct test_spec *test);
+int testapp_stats_tx_invalid_descs(struct test_spec *test);
+int testapp_stats_rx_full(struct test_spec *test);
+int testapp_teardown(struct test_spec *test);
+int testapp_too_many_frags(struct test_spec *test);
+int testapp_tx_queue_consumer(struct test_spec *test);
+int testapp_unaligned_inv_desc(struct test_spec *test);
+int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test);
+int testapp_unaligned_inv_desc_mb(struct test_spec *test);
+int testapp_xdp_drop(struct test_spec *test);
+int testapp_xdp_metadata(struct test_spec *test);
+int testapp_xdp_metadata_mb(struct test_spec *test);
+int testapp_xdp_prog_cleanup(struct test_spec *test);
+int testapp_xdp_shared_umem(struct test_spec *test);
+
+void *worker_testapp_validate_rx(void *arg);
+void *worker_testapp_validate_tx(void *arg);
+
+static const struct test_spec tests[] = {
+ {.name = "SEND_RECEIVE", .test_func = testapp_send_receive},
+ {.name = "SEND_RECEIVE_2K_FRAME", .test_func = testapp_send_receive_2k_frame},
+ {.name = "SEND_RECEIVE_SINGLE_PKT", .test_func = testapp_single_pkt},
+ {.name = "POLL_RX", .test_func = testapp_poll_rx},
+ {.name = "POLL_TX", .test_func = testapp_poll_tx},
+ {.name = "POLL_RXQ_FULL", .test_func = testapp_poll_rxq_tmout},
+ {.name = "POLL_TXQ_FULL", .test_func = testapp_poll_txq_tmout},
+ {.name = "ALIGNED_INV_DESC", .test_func = testapp_aligned_inv_desc},
+ {.name = "ALIGNED_INV_DESC_2K_FRAME_SIZE", .test_func = testapp_aligned_inv_desc_2k_frame},
+ {.name = "UMEM_HEADROOM", .test_func = testapp_headroom},
+ {.name = "BIDIRECTIONAL", .test_func = testapp_bidirectional},
+ {.name = "STAT_RX_DROPPED", .test_func = testapp_stats_rx_dropped},
+ {.name = "STAT_TX_INVALID", .test_func = testapp_stats_tx_invalid_descs},
+ {.name = "STAT_RX_FULL", .test_func = testapp_stats_rx_full},
+ {.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty},
+ {.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup},
+ {.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop},
+ {.name = "XDP_SHARED_UMEM", .test_func = testapp_xdp_shared_umem},
+ {.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata},
+ {.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb},
+ {.name = "ALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_aligned_inv_desc_mb},
+ {.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags},
+ {.name = "XDP_ADJUST_TAIL_SHRINK", .test_func = testapp_adjust_tail_shrink},
+ {.name = "TX_QUEUE_CONSUMER", .test_func = testapp_tx_queue_consumer},
+ };
+
+static const struct test_spec ci_skip_tests[] = {
+ /* Flaky tests */
+ {.name = "XDP_ADJUST_TAIL_SHRINK_MULTI_BUFF", .test_func = testapp_adjust_tail_shrink_mb},
+ {.name = "XDP_ADJUST_TAIL_GROW", .test_func = testapp_adjust_tail_grow},
+ {.name = "XDP_ADJUST_TAIL_GROW_MULTI_BUFF", .test_func = testapp_adjust_tail_grow_mb},
+ {.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb},
+ /* Tests with huge page dependency */
+ {.name = "SEND_RECEIVE_UNALIGNED", .test_func = testapp_send_receive_unaligned},
+ {.name = "UNALIGNED_INV_DESC", .test_func = testapp_unaligned_inv_desc},
+ {.name = "UNALIGNED_INV_DESC_4001_FRAME_SIZE",
+ .test_func = testapp_unaligned_inv_desc_4001_frame},
+ {.name = "SEND_RECEIVE_UNALIGNED_9K_PACKETS",
+ .test_func = testapp_send_receive_unaligned_mb},
+ {.name = "UNALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_unaligned_inv_desc_mb},
+ /* Test with HW ring size dependency */
+ {.name = "HW_SW_MIN_RING_SIZE", .test_func = testapp_hw_sw_min_ring_size},
+ {.name = "HW_SW_MAX_RING_SIZE", .test_func = testapp_hw_sw_max_ring_size},
+ /* Too long test */
+ {.name = "TEARDOWN", .test_func = testapp_teardown},
+};
+
+
+#endif /* TEST_XSK_H_ */
diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c
index d66687f1ee6a..34f9ccce2602 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer.c
@@ -3,6 +3,7 @@
#include <test_progs.h>
#include "timer.skel.h"
#include "timer_failure.skel.h"
+#include "timer_interrupt.skel.h"
#define NUM_THR 8
@@ -86,6 +87,10 @@ void serial_test_timer(void)
int err;
timer_skel = timer__open_and_load();
+ if (!timer_skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
return;
@@ -95,3 +100,36 @@ void serial_test_timer(void)
RUN_TESTS(timer_failure);
}
+
+void test_timer_interrupt(void)
+{
+ struct timer_interrupt *skel = NULL;
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ skel = timer_interrupt__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
+ if (!ASSERT_OK_PTR(skel, "timer_interrupt__open_and_load"))
+ return;
+
+ err = timer_interrupt__attach(skel);
+ if (!ASSERT_OK(err, "timer_interrupt__attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.test_timer_interrupt);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+
+ usleep(50);
+
+ ASSERT_EQ(skel->bss->in_interrupt, 0, "in_interrupt");
+ if (skel->bss->preempt_count)
+ ASSERT_NEQ(skel->bss->in_interrupt_cb, 0, "in_interrupt_cb");
+
+out:
+ timer_interrupt__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_crash.c b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
index f74b82305da8..b841597c8a3a 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer_crash.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
@@ -12,6 +12,10 @@ static void test_timer_crash_mode(int mode)
struct timer_crash *skel;
skel = timer_crash__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
return;
skel->bss->pid = getpid();
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
index 1a2f99596916..eb303fa1e09a 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
@@ -59,6 +59,10 @@ void test_timer_lockup(void)
}
skel = timer_lockup__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load"))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c
index 9ff7843909e7..c930c7d7105b 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer_mim.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c
@@ -65,6 +65,10 @@ void serial_test_timer_mim(void)
goto cleanup;
timer_skel = timer_mim__open_and_load();
+ if (!timer_skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
index f9392df23f8a..b81dde283052 100644
--- a/tools/testing/selftests/bpf/prog_tests/token.c
+++ b/tools/testing/selftests/bpf/prog_tests/token.c
@@ -115,7 +115,7 @@ static int create_bpffs_fd(void)
static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts)
{
- int mnt_fd, err;
+ int err;
/* set up token delegation mount options */
err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str);
@@ -136,12 +136,7 @@ static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts)
if (err < 0)
return -errno;
- /* create O_PATH fd for detached mount */
- mnt_fd = sys_fsmount(fs_fd, 0, 0);
- if (err < 0)
- return -errno;
-
- return mnt_fd;
+ return 0;
}
/* send FD over Unix domain (AF_UNIX) socket */
@@ -287,6 +282,7 @@ static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callba
{
int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1;
struct token_lsm *lsm_skel = NULL;
+ char one;
/* load and attach LSM "policy" before we go into unpriv userns */
lsm_skel = token_lsm__open_and_load();
@@ -333,13 +329,19 @@ static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callba
err = sendfd(sock_fd, fs_fd);
if (!ASSERT_OK(err, "send_fs_fd"))
goto cleanup;
- zclose(fs_fd);
+
+ /* wait that the parent reads the fd, does the fsconfig() calls
+ * and send us a signal that it is done
+ */
+ err = read(sock_fd, &one, sizeof(one));
+ if (!ASSERT_GE(err, 0, "read_one"))
+ goto cleanup;
/* avoid mucking around with mount namespaces and mounting at
- * well-known path, just get detach-mounted BPF FS fd back from parent
+ * well-known path, just create O_PATH fd for detached mount
*/
- err = recvfd(sock_fd, &mnt_fd);
- if (!ASSERT_OK(err, "recv_mnt_fd"))
+ mnt_fd = sys_fsmount(fs_fd, 0, 0);
+ if (!ASSERT_OK_FD(mnt_fd, "mnt_fd"))
goto cleanup;
/* try to fspick() BPF FS and try to add some delegation options */
@@ -429,24 +431,24 @@ again:
static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd)
{
- int fs_fd = -1, mnt_fd = -1, token_fd = -1, err;
+ int fs_fd = -1, token_fd = -1, err;
+ char one = 1;
err = recvfd(sock_fd, &fs_fd);
if (!ASSERT_OK(err, "recv_bpffs_fd"))
goto cleanup;
- mnt_fd = materialize_bpffs_fd(fs_fd, bpffs_opts);
- if (!ASSERT_GE(mnt_fd, 0, "materialize_bpffs_fd")) {
+ err = materialize_bpffs_fd(fs_fd, bpffs_opts);
+ if (!ASSERT_GE(err, 0, "materialize_bpffs_fd")) {
err = -EINVAL;
goto cleanup;
}
- zclose(fs_fd);
- /* pass BPF FS context object to parent */
- err = sendfd(sock_fd, mnt_fd);
- if (!ASSERT_OK(err, "send_mnt_fd"))
+ /* notify the child that we did the fsconfig() calls and it can proceed. */
+ err = write(sock_fd, &one, sizeof(one));
+ if (!ASSERT_EQ(err, sizeof(one), "send_one"))
goto cleanup;
- zclose(mnt_fd);
+ zclose(fs_fd);
/* receive BPF token FD back from child for some extra tests */
err = recvfd(sock_fd, &token_fd);
@@ -459,7 +461,6 @@ static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd)
cleanup:
zclose(sock_fd);
zclose(fs_fd);
- zclose(mnt_fd);
zclose(token_fd);
if (child_pid > 0)
@@ -1046,6 +1047,41 @@ err_out:
#define bit(n) (1ULL << (n))
+static int userns_bpf_token_info(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ int err, token_fd = -1;
+ struct bpf_token_info info;
+ u32 len = sizeof(struct bpf_token_info);
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ memset(&info, 0, len);
+ err = bpf_obj_get_info_by_fd(token_fd, &info, &len);
+ if (!ASSERT_ERR(err, "bpf_obj_get_token_info"))
+ goto cleanup;
+ if (!ASSERT_EQ(info.allowed_cmds, bit(BPF_MAP_CREATE), "token_info_cmds_map_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ if (!ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_XDP), "token_info_progs_xdp")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* The BPF_PROG_TYPE_EXT is not set in token */
+ if (ASSERT_EQ(info.allowed_progs, bit(BPF_PROG_TYPE_EXT), "token_info_progs_ext"))
+ err = -EINVAL;
+
+cleanup:
+ zclose(token_fd);
+ return err;
+}
+
void test_token(void)
{
if (test__start_subtest("map_token")) {
@@ -1149,4 +1185,13 @@ void test_token(void)
subtest_userns(&opts, userns_obj_priv_implicit_token_envvar);
}
+ if (test__start_subtest("bpf_token_info")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_MAP_CREATE),
+ .progs = bit(BPF_PROG_TYPE_XDP),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_bpf_token_info);
+ }
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
index a222df765bc3..10e231965589 100644
--- a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
@@ -28,10 +28,62 @@ out:
tracing_failure__destroy(skel);
}
+static void test_tracing_fail_prog(const char *prog_name, const char *exp_msg)
+{
+ struct tracing_failure *skel;
+ struct bpf_program *prog;
+ char log_buf[256];
+ int err;
+
+ skel = tracing_failure__open();
+ if (!ASSERT_OK_PTR(skel, "tracing_failure__open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto out;
+
+ bpf_program__set_autoload(prog, true);
+ bpf_program__set_log_buf(prog, log_buf, sizeof(log_buf));
+
+ err = tracing_failure__load(skel);
+ if (!ASSERT_ERR(err, "tracing_failure__load"))
+ goto out;
+
+ ASSERT_HAS_SUBSTR(log_buf, exp_msg, "log_buf");
+out:
+ tracing_failure__destroy(skel);
+}
+
+static void test_tracing_deny(void)
+{
+ int btf_id;
+
+ /* __rcu_read_lock depends on CONFIG_PREEMPT_RCU */
+ btf_id = libbpf_find_vmlinux_btf_id("__rcu_read_lock", BPF_TRACE_FENTRY);
+ if (btf_id <= 0) {
+ test__skip();
+ return;
+ }
+
+ test_tracing_fail_prog("tracing_deny",
+ "Attaching tracing programs to function '__rcu_read_lock' is rejected.");
+}
+
+static void test_fexit_noreturns(void)
+{
+ test_tracing_fail_prog("fexit_noreturns",
+ "Attaching fexit/fmod_ret to __noreturn function 'do_exit' is rejected.");
+}
+
void test_tracing_failure(void)
{
if (test__start_subtest("bpf_spin_lock"))
test_bpf_spin_lock(true);
if (test__start_subtest("bpf_spin_unlock"))
test_bpf_spin_lock(false);
+ if (test__start_subtest("tracing_deny"))
+ test_tracing_deny();
+ if (test__start_subtest("fexit_noreturns"))
+ test_fexit_noreturns();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
index 19e68d4b3532..6f8c0bfb0415 100644
--- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
@@ -112,10 +112,39 @@ destroy_skel:
tracing_struct_many_args__destroy(skel);
}
+static void test_union_args(void)
+{
+ struct tracing_struct *skel;
+ int err;
+
+ skel = tracing_struct__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load"))
+ return;
+
+ err = tracing_struct__attach(skel);
+ if (!ASSERT_OK(err, "tracing_struct__attach"))
+ goto out;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+ ASSERT_EQ(skel->bss->ut1_a_a, 1, "ut1:a.arg.a");
+ ASSERT_EQ(skel->bss->ut1_b, 4, "ut1:b");
+ ASSERT_EQ(skel->bss->ut1_c, 5, "ut1:c");
+
+ ASSERT_EQ(skel->bss->ut2_a, 6, "ut2:a");
+ ASSERT_EQ(skel->bss->ut2_b_a, 2, "ut2:b.arg.a");
+ ASSERT_EQ(skel->bss->ut2_b_b, 3, "ut2:b.arg.b");
+
+out:
+ tracing_struct__destroy(skel);
+}
+
void test_tracing_struct(void)
{
if (test__start_subtest("struct_args"))
test_struct_args();
if (test__start_subtest("struct_many_args"))
test_struct_many_args();
+ if (test__start_subtest("union_args"))
+ test_union_args();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe.c b/tools/testing/selftests/bpf/prog_tests/uprobe.c
index cf3e0e7a64fa..86404476c1da 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2023 Hengqi Chen */
#include <test_progs.h>
+#include <asm/ptrace.h>
#include "test_uprobe.skel.h"
static FILE *urand_spawn(int *pid)
@@ -33,7 +34,7 @@ static int urand_trigger(FILE **urand_pipe)
return exit_code;
}
-void test_uprobe(void)
+static void test_uprobe_attach(void)
{
LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
struct test_uprobe *skel;
@@ -93,3 +94,156 @@ cleanup:
pclose(urand_pipe);
test_uprobe__destroy(skel);
}
+
+#ifdef __x86_64__
+__naked __maybe_unused unsigned long uprobe_regs_change_trigger(void)
+{
+ asm volatile (
+ "ret\n"
+ );
+}
+
+static __naked void uprobe_regs_change(struct pt_regs *before, struct pt_regs *after)
+{
+ asm volatile (
+ "movq %r11, 48(%rdi)\n"
+ "movq %r10, 56(%rdi)\n"
+ "movq %r9, 64(%rdi)\n"
+ "movq %r8, 72(%rdi)\n"
+ "movq %rax, 80(%rdi)\n"
+ "movq %rcx, 88(%rdi)\n"
+ "movq %rdx, 96(%rdi)\n"
+ "movq %rsi, 104(%rdi)\n"
+ "movq %rdi, 112(%rdi)\n"
+
+ /* save 2nd argument */
+ "pushq %rsi\n"
+ "call uprobe_regs_change_trigger\n"
+
+ /* save return value and load 2nd argument pointer to rax */
+ "pushq %rax\n"
+ "movq 8(%rsp), %rax\n"
+
+ "movq %r11, 48(%rax)\n"
+ "movq %r10, 56(%rax)\n"
+ "movq %r9, 64(%rax)\n"
+ "movq %r8, 72(%rax)\n"
+ "movq %rcx, 88(%rax)\n"
+ "movq %rdx, 96(%rax)\n"
+ "movq %rsi, 104(%rax)\n"
+ "movq %rdi, 112(%rax)\n"
+
+ /* restore return value and 2nd argument */
+ "pop %rax\n"
+ "pop %rsi\n"
+
+ "movq %rax, 80(%rsi)\n"
+ "ret\n"
+ );
+}
+
+static void regs_common(void)
+{
+ struct pt_regs before = {}, after = {}, expected = {
+ .rax = 0xc0ffe,
+ .rcx = 0xbad,
+ .rdx = 0xdead,
+ .r8 = 0x8,
+ .r9 = 0x9,
+ .r10 = 0x10,
+ .r11 = 0x11,
+ .rdi = 0x12,
+ .rsi = 0x13,
+ };
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct test_uprobe *skel;
+
+ skel = test_uprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->my_pid = getpid();
+ skel->bss->regs = expected;
+
+ uprobe_opts.func_name = "uprobe_regs_change_trigger";
+ skel->links.test_regs_change = bpf_program__attach_uprobe_opts(skel->progs.test_regs_change,
+ -1,
+ "/proc/self/exe",
+ 0 /* offset */,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.test_regs_change, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ uprobe_regs_change(&before, &after);
+
+ ASSERT_EQ(after.rax, expected.rax, "ax");
+ ASSERT_EQ(after.rcx, expected.rcx, "cx");
+ ASSERT_EQ(after.rdx, expected.rdx, "dx");
+ ASSERT_EQ(after.r8, expected.r8, "r8");
+ ASSERT_EQ(after.r9, expected.r9, "r9");
+ ASSERT_EQ(after.r10, expected.r10, "r10");
+ ASSERT_EQ(after.r11, expected.r11, "r11");
+ ASSERT_EQ(after.rdi, expected.rdi, "rdi");
+ ASSERT_EQ(after.rsi, expected.rsi, "rsi");
+
+cleanup:
+ test_uprobe__destroy(skel);
+}
+
+static noinline unsigned long uprobe_regs_change_ip_1(void)
+{
+ return 0xc0ffee;
+}
+
+static noinline unsigned long uprobe_regs_change_ip_2(void)
+{
+ return 0xdeadbeef;
+}
+
+static void regs_ip(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
+ struct test_uprobe *skel;
+ unsigned long ret;
+
+ skel = test_uprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->my_pid = getpid();
+ skel->bss->ip = (unsigned long) uprobe_regs_change_ip_2;
+
+ uprobe_opts.func_name = "uprobe_regs_change_ip_1";
+ skel->links.test_regs_change_ip = bpf_program__attach_uprobe_opts(
+ skel->progs.test_regs_change_ip,
+ -1,
+ "/proc/self/exe",
+ 0 /* offset */,
+ &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.test_regs_change_ip, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ ret = uprobe_regs_change_ip_1();
+ ASSERT_EQ(ret, 0xdeadbeef, "ret");
+
+cleanup:
+ test_uprobe__destroy(skel);
+}
+
+static void test_uprobe_regs_change(void)
+{
+ if (test__start_subtest("regs_change_common"))
+ regs_common();
+ if (test__start_subtest("regs_change_ip"))
+ regs_ip();
+}
+#else
+static void test_uprobe_regs_change(void) { }
+#endif
+
+void test_uprobe(void)
+{
+ if (test__start_subtest("attach"))
+ test_uprobe_attach();
+ test_uprobe_regs_change();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
index c397336fe1ed..955a37751b52 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
@@ -8,22 +8,31 @@
#include <asm/ptrace.h>
#include <linux/compiler.h>
#include <linux/stringify.h>
+#include <linux/kernel.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#include <sys/prctl.h>
#include <asm/prctl.h>
#include "uprobe_syscall.skel.h"
#include "uprobe_syscall_executed.skel.h"
+#include "bpf/libbpf_internal.h"
-__naked unsigned long uretprobe_regs_trigger(void)
+#define USDT_NOP .byte 0x0f, 0x1f, 0x44, 0x00, 0x00
+#include "usdt.h"
+
+#pragma GCC diagnostic ignored "-Wattributes"
+
+__attribute__((aligned(16)))
+__nocf_check __weak __naked unsigned long uprobe_regs_trigger(void)
{
asm volatile (
+ ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00\n" /* nop5 */
"movq $0xdeadbeef, %rax\n"
"ret\n"
);
}
-__naked void uretprobe_regs(struct pt_regs *before, struct pt_regs *after)
+__naked void uprobe_regs(struct pt_regs *before, struct pt_regs *after)
{
asm volatile (
"movq %r15, 0(%rdi)\n"
@@ -44,15 +53,17 @@ __naked void uretprobe_regs(struct pt_regs *before, struct pt_regs *after)
"movq $0, 120(%rdi)\n" /* orig_rax */
"movq $0, 128(%rdi)\n" /* rip */
"movq $0, 136(%rdi)\n" /* cs */
+ "pushq %rax\n"
"pushf\n"
"pop %rax\n"
"movq %rax, 144(%rdi)\n" /* eflags */
+ "pop %rax\n"
"movq %rsp, 152(%rdi)\n" /* rsp */
"movq $0, 160(%rdi)\n" /* ss */
/* save 2nd argument */
"pushq %rsi\n"
- "call uretprobe_regs_trigger\n"
+ "call uprobe_regs_trigger\n"
/* save return value and load 2nd argument pointer to rax */
"pushq %rax\n"
@@ -92,25 +103,37 @@ __naked void uretprobe_regs(struct pt_regs *before, struct pt_regs *after)
);
}
-static void test_uretprobe_regs_equal(void)
+static void test_uprobe_regs_equal(bool retprobe)
{
+ LIBBPF_OPTS(bpf_uprobe_opts, opts,
+ .retprobe = retprobe,
+ );
struct uprobe_syscall *skel = NULL;
struct pt_regs before = {}, after = {};
unsigned long *pb = (unsigned long *) &before;
unsigned long *pa = (unsigned long *) &after;
unsigned long *pp;
+ unsigned long offset;
unsigned int i, cnt;
- int err;
+
+ offset = get_uprobe_offset(&uprobe_regs_trigger);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ return;
skel = uprobe_syscall__open_and_load();
if (!ASSERT_OK_PTR(skel, "uprobe_syscall__open_and_load"))
goto cleanup;
- err = uprobe_syscall__attach(skel);
- if (!ASSERT_OK(err, "uprobe_syscall__attach"))
+ skel->links.probe = bpf_program__attach_uprobe_opts(skel->progs.probe,
+ 0, "/proc/self/exe", offset, &opts);
+ if (!ASSERT_OK_PTR(skel->links.probe, "bpf_program__attach_uprobe_opts"))
goto cleanup;
- uretprobe_regs(&before, &after);
+ /* make sure uprobe gets optimized */
+ if (!retprobe)
+ uprobe_regs_trigger();
+
+ uprobe_regs(&before, &after);
pp = (unsigned long *) &skel->bss->regs;
cnt = sizeof(before)/sizeof(*pb);
@@ -119,7 +142,7 @@ static void test_uretprobe_regs_equal(void)
unsigned int offset = i * sizeof(unsigned long);
/*
- * Check register before and after uretprobe_regs_trigger call
+ * Check register before and after uprobe_regs_trigger call
* that triggers the uretprobe.
*/
switch (offset) {
@@ -133,7 +156,7 @@ static void test_uretprobe_regs_equal(void)
/*
* Check register seen from bpf program and register after
- * uretprobe_regs_trigger call
+ * uprobe_regs_trigger call (with rax exception, check below).
*/
switch (offset) {
/*
@@ -146,6 +169,15 @@ static void test_uretprobe_regs_equal(void)
case offsetof(struct pt_regs, rsp):
case offsetof(struct pt_regs, ss):
break;
+ /*
+ * uprobe does not see return value in rax, it needs to see the
+ * original (before) rax value
+ */
+ case offsetof(struct pt_regs, rax):
+ if (!retprobe) {
+ ASSERT_EQ(pp[i], pb[i], "uprobe rax prog-before value check");
+ break;
+ }
default:
if (!ASSERT_EQ(pp[i], pa[i], "register prog-after value check"))
fprintf(stdout, "failed register offset %u\n", offset);
@@ -175,7 +207,7 @@ static int write_bpf_testmod_uprobe(unsigned long offset)
return ret != n ? (int) ret : 0;
}
-static void test_uretprobe_regs_change(void)
+static void test_regs_change(void)
{
struct pt_regs before = {}, after = {};
unsigned long *pb = (unsigned long *) &before;
@@ -183,13 +215,16 @@ static void test_uretprobe_regs_change(void)
unsigned long cnt = sizeof(before)/sizeof(*pb);
unsigned int i, err, offset;
- offset = get_uprobe_offset(uretprobe_regs_trigger);
+ offset = get_uprobe_offset(uprobe_regs_trigger);
err = write_bpf_testmod_uprobe(offset);
if (!ASSERT_OK(err, "register_uprobe"))
return;
- uretprobe_regs(&before, &after);
+ /* make sure uprobe gets optimized */
+ uprobe_regs_trigger();
+
+ uprobe_regs(&before, &after);
err = write_bpf_testmod_uprobe(0);
if (!ASSERT_OK(err, "unregister_uprobe"))
@@ -251,7 +286,8 @@ static void test_uretprobe_syscall_call(void)
.retprobe = true,
);
struct uprobe_syscall_executed *skel;
- int pid, status, err, go[2], c;
+ int pid, status, err, go[2], c = 0;
+ struct bpf_link *link;
if (!ASSERT_OK(pipe(go), "pipe"))
return;
@@ -277,11 +313,14 @@ static void test_uretprobe_syscall_call(void)
_exit(0);
}
- skel->links.test = bpf_program__attach_uprobe_multi(skel->progs.test, pid,
- "/proc/self/exe",
- "uretprobe_syscall_call", &opts);
- if (!ASSERT_OK_PTR(skel->links.test, "bpf_program__attach_uprobe_multi"))
+ skel->bss->pid = pid;
+
+ link = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_multi,
+ pid, "/proc/self/exe",
+ "uretprobe_syscall_call", &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
goto cleanup;
+ skel->links.test_uretprobe_multi = link;
/* kick the child */
write(go[1], &c, 1);
@@ -301,6 +340,256 @@ cleanup:
close(go[0]);
}
+#define TRAMP "[uprobes-trampoline]"
+
+__attribute__((aligned(16)))
+__nocf_check __weak __naked void uprobe_test(void)
+{
+ asm volatile (" \n"
+ ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00 \n"
+ "ret \n"
+ );
+}
+
+__attribute__((aligned(16)))
+__nocf_check __weak void usdt_test(void)
+{
+ USDT(optimized_uprobe, usdt);
+}
+
+static int find_uprobes_trampoline(void *tramp_addr)
+{
+ void *start, *end;
+ char line[128];
+ int ret = -1;
+ FILE *maps;
+
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps) {
+ fprintf(stderr, "cannot open maps\n");
+ return -1;
+ }
+
+ while (fgets(line, sizeof(line), maps)) {
+ int m = -1;
+
+ /* We care only about private r-x mappings. */
+ if (sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n", &start, &end, &m) != 2)
+ continue;
+ if (m < 0)
+ continue;
+ if (!strncmp(&line[m], TRAMP, sizeof(TRAMP)-1) && (start == tramp_addr)) {
+ ret = 0;
+ break;
+ }
+ }
+
+ fclose(maps);
+ return ret;
+}
+
+static unsigned char nop5[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 };
+
+static void *find_nop5(void *fn)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (!memcmp(nop5, fn + i, 5))
+ return fn + i;
+ }
+ return NULL;
+}
+
+typedef void (__attribute__((nocf_check)) *trigger_t)(void);
+
+static void *check_attach(struct uprobe_syscall_executed *skel, trigger_t trigger,
+ void *addr, int executed)
+{
+ struct __arch_relative_insn {
+ __u8 op;
+ __s32 raddr;
+ } __packed *call;
+ void *tramp = NULL;
+
+ /* Uprobe gets optimized after first trigger, so let's press twice. */
+ trigger();
+ trigger();
+
+ /* Make sure bpf program got executed.. */
+ ASSERT_EQ(skel->bss->executed, executed, "executed");
+
+ /* .. and check the trampoline is as expected. */
+ call = (struct __arch_relative_insn *) addr;
+ tramp = (void *) (call + 1) + call->raddr;
+ ASSERT_EQ(call->op, 0xe8, "call");
+ ASSERT_OK(find_uprobes_trampoline(tramp), "uprobes_trampoline");
+
+ return tramp;
+}
+
+static void check_detach(void *addr, void *tramp)
+{
+ /* [uprobes_trampoline] stays after detach */
+ ASSERT_OK(find_uprobes_trampoline(tramp), "uprobes_trampoline");
+ ASSERT_OK(memcmp(addr, nop5, 5), "nop5");
+}
+
+static void check(struct uprobe_syscall_executed *skel, struct bpf_link *link,
+ trigger_t trigger, void *addr, int executed)
+{
+ void *tramp;
+
+ tramp = check_attach(skel, trigger, addr, executed);
+ bpf_link__destroy(link);
+ check_detach(addr, tramp);
+}
+
+static void test_uprobe_legacy(void)
+{
+ struct uprobe_syscall_executed *skel = NULL;
+ LIBBPF_OPTS(bpf_uprobe_opts, opts,
+ .retprobe = true,
+ );
+ struct bpf_link *link;
+ unsigned long offset;
+
+ offset = get_uprobe_offset(&uprobe_test);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ goto cleanup;
+
+ /* uprobe */
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ link = bpf_program__attach_uprobe_opts(skel->progs.test_uprobe,
+ 0, "/proc/self/exe", offset, NULL);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 2);
+
+ /* uretprobe */
+ skel->bss->executed = 0;
+
+ link = bpf_program__attach_uprobe_opts(skel->progs.test_uretprobe,
+ 0, "/proc/self/exe", offset, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 2);
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+}
+
+static void test_uprobe_multi(void)
+{
+ struct uprobe_syscall_executed *skel = NULL;
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ struct bpf_link *link;
+ unsigned long offset;
+
+ offset = get_uprobe_offset(&uprobe_test);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ goto cleanup;
+
+ opts.offsets = &offset;
+ opts.cnt = 1;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ /* uprobe.multi */
+ link = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_multi,
+ 0, "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 2);
+
+ /* uretprobe.multi */
+ skel->bss->executed = 0;
+ opts.retprobe = true;
+ link = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_multi,
+ 0, "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 2);
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+}
+
+static void test_uprobe_session(void)
+{
+ struct uprobe_syscall_executed *skel = NULL;
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
+ .session = true,
+ );
+ struct bpf_link *link;
+ unsigned long offset;
+
+ offset = get_uprobe_offset(&uprobe_test);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ goto cleanup;
+
+ opts.offsets = &offset;
+ opts.cnt = 1;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ link = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_session,
+ 0, "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ check(skel, link, uprobe_test, uprobe_test, 4);
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+}
+
+static void test_uprobe_usdt(void)
+{
+ struct uprobe_syscall_executed *skel;
+ struct bpf_link *link;
+ void *addr;
+
+ errno = 0;
+ addr = find_nop5(usdt_test);
+ if (!ASSERT_OK_PTR(addr, "find_nop5"))
+ return;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ link = bpf_program__attach_usdt(skel->progs.test_usdt,
+ -1 /* all PIDs */, "/proc/self/exe",
+ "optimized_uprobe", "usdt", NULL);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_usdt"))
+ goto cleanup;
+
+ check(skel, link, usdt_test, addr, 2);
+
+cleanup:
+ uprobe_syscall_executed__destroy(skel);
+}
+
/*
* Borrowed from tools/testing/selftests/x86/test_shadow_stack.c.
*
@@ -343,43 +632,172 @@ static void test_uretprobe_shadow_stack(void)
return;
}
- /* Run all of the uretprobe tests. */
- test_uretprobe_regs_equal();
- test_uretprobe_regs_change();
+ /* Run all the tests with shadow stack in place. */
+
+ test_uprobe_regs_equal(false);
+ test_uprobe_regs_equal(true);
test_uretprobe_syscall_call();
+ test_uprobe_legacy();
+ test_uprobe_multi();
+ test_uprobe_session();
+ test_uprobe_usdt();
+
+ test_regs_change();
+
ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
}
-#else
-static void test_uretprobe_regs_equal(void)
+
+static volatile bool race_stop;
+
+static USDT_DEFINE_SEMA(race);
+
+static void *worker_trigger(void *arg)
{
- test__skip();
+ unsigned long rounds = 0;
+
+ while (!race_stop) {
+ uprobe_test();
+ rounds++;
+ }
+
+ printf("tid %ld trigger rounds: %lu\n", sys_gettid(), rounds);
+ return NULL;
}
-static void test_uretprobe_regs_change(void)
+static void *worker_attach(void *arg)
{
- test__skip();
+ LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ struct uprobe_syscall_executed *skel;
+ unsigned long rounds = 0, offset;
+ const char *sema[2] = {
+ __stringify(USDT_SEMA(race)),
+ NULL,
+ };
+ unsigned long *ref;
+ int err;
+
+ offset = get_uprobe_offset(&uprobe_test);
+ if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
+ return NULL;
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 1, (const char **) &sema, &ref, STT_OBJECT);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
+ return NULL;
+
+ opts.ref_ctr_offset = *ref;
+
+ skel = uprobe_syscall_executed__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
+ return NULL;
+
+ skel->bss->pid = getpid();
+
+ while (!race_stop) {
+ skel->links.test_uprobe = bpf_program__attach_uprobe_opts(skel->progs.test_uprobe,
+ 0, "/proc/self/exe", offset, &opts);
+ if (!ASSERT_OK_PTR(skel->links.test_uprobe, "bpf_program__attach_uprobe_opts"))
+ break;
+
+ bpf_link__destroy(skel->links.test_uprobe);
+ skel->links.test_uprobe = NULL;
+ rounds++;
+ }
+
+ printf("tid %ld attach rounds: %lu hits: %d\n", sys_gettid(), rounds, skel->bss->executed);
+ uprobe_syscall_executed__destroy(skel);
+ free(ref);
+ return NULL;
}
-static void test_uretprobe_syscall_call(void)
+static useconds_t race_msec(void)
{
- test__skip();
+ char *env;
+
+ env = getenv("BPF_SELFTESTS_UPROBE_SYSCALL_RACE_MSEC");
+ if (env)
+ return atoi(env);
+
+ /* default duration is 500ms */
+ return 500;
}
-static void test_uretprobe_shadow_stack(void)
+static void test_uprobe_race(void)
{
- test__skip();
+ int err, i, nr_threads;
+ pthread_t *threads;
+
+ nr_threads = libbpf_num_possible_cpus();
+ if (!ASSERT_GT(nr_threads, 0, "libbpf_num_possible_cpus"))
+ return;
+ nr_threads = max(2, nr_threads);
+
+ threads = alloca(sizeof(*threads) * nr_threads);
+ if (!ASSERT_OK_PTR(threads, "malloc"))
+ return;
+
+ for (i = 0; i < nr_threads; i++) {
+ err = pthread_create(&threads[i], NULL, i % 2 ? worker_trigger : worker_attach,
+ NULL);
+ if (!ASSERT_OK(err, "pthread_create"))
+ goto cleanup;
+ }
+
+ usleep(race_msec() * 1000);
+
+cleanup:
+ race_stop = true;
+ for (nr_threads = i, i = 0; i < nr_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ ASSERT_FALSE(USDT_SEMA_IS_ACTIVE(race), "race_semaphore");
}
+
+#ifndef __NR_uprobe
+#define __NR_uprobe 336
#endif
-void test_uprobe_syscall(void)
+static void test_uprobe_error(void)
+{
+ long err = syscall(__NR_uprobe);
+
+ ASSERT_EQ(err, -1, "error");
+ ASSERT_EQ(errno, ENXIO, "errno");
+}
+
+static void __test_uprobe_syscall(void)
{
if (test__start_subtest("uretprobe_regs_equal"))
- test_uretprobe_regs_equal();
- if (test__start_subtest("uretprobe_regs_change"))
- test_uretprobe_regs_change();
+ test_uprobe_regs_equal(true);
if (test__start_subtest("uretprobe_syscall_call"))
test_uretprobe_syscall_call();
if (test__start_subtest("uretprobe_shadow_stack"))
test_uretprobe_shadow_stack();
+ if (test__start_subtest("uprobe_legacy"))
+ test_uprobe_legacy();
+ if (test__start_subtest("uprobe_multi"))
+ test_uprobe_multi();
+ if (test__start_subtest("uprobe_session"))
+ test_uprobe_session();
+ if (test__start_subtest("uprobe_usdt"))
+ test_uprobe_usdt();
+ if (test__start_subtest("uprobe_race"))
+ test_uprobe_race();
+ if (test__start_subtest("uprobe_error"))
+ test_uprobe_error();
+ if (test__start_subtest("uprobe_regs_equal"))
+ test_uprobe_regs_equal(false);
+ if (test__start_subtest("regs_change"))
+ test_regs_change();
+}
+#else
+static void __test_uprobe_syscall(void)
+{
+ test__skip();
+}
+#endif
+
+void test_uprobe_syscall(void)
+{
+ __test_uprobe_syscall();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c
index 495d66414b57..f4be5269fa90 100644
--- a/tools/testing/selftests/bpf/prog_tests/usdt.c
+++ b/tools/testing/selftests/bpf/prog_tests/usdt.c
@@ -40,12 +40,79 @@ static void __always_inline trigger_func(int x) {
}
}
-static void subtest_basic_usdt(void)
+#if defined(__x86_64__) || defined(__i386__)
+/*
+ * SIB (Scale-Index-Base) addressing format: "size@(base_reg, index_reg, scale)"
+ * - 'size' is the size in bytes of the array element, and its sign indicates
+ * whether the type is signed (negative) or unsigned (positive).
+ * - 'base_reg' is the register holding the base address, normally rdx or edx
+ * - 'index_reg' is the register holding the index, normally rax or eax
+ * - 'scale' is the scaling factor (typically 1, 2, 4, or 8), which matches the
+ * size of the element type.
+ *
+ * For example, for an array of 'short' (signed 2-byte elements), the SIB spec would be:
+ * - size: -2 (negative because 'short' is signed)
+ * - scale: 2 (since sizeof(short) == 2)
+ *
+ * The resulting SIB format: "-2@(%%rdx,%%rax,2)" for x86_64, "-2@(%%edx,%%eax,2)" for i386
+ */
+static volatile short array[] = {-1, -2, -3, -4};
+
+#if defined(__x86_64__)
+#define USDT_SIB_ARG_SPEC -2@(%%rdx,%%rax,2)
+#else
+#define USDT_SIB_ARG_SPEC -2@(%%edx,%%eax,2)
+#endif
+
+unsigned short test_usdt_sib_semaphore SEC(".probes");
+
+static void trigger_sib_spec(void)
+{
+ /*
+ * Force SIB addressing with inline assembly.
+ *
+ * You must compile with -std=gnu99 or -std=c99 to use the
+ * STAP_PROBE_ASM macro.
+ *
+ * The STAP_PROBE_ASM macro generates a quoted string that gets
+ * inserted between the surrounding assembly instructions. In this
+ * case, USDT_SIB_ARG_SPEC is embedded directly into the instruction
+ * stream, creating a probe point between the asm statement boundaries.
+ * It works fine with gcc/clang.
+ *
+ * Register constraints:
+ * - "d"(array): Binds the 'array' variable to %rdx or %edx register
+ * - "a"(0): Binds the constant 0 to %rax or %eax register
+ * These ensure that when USDT_SIB_ARG_SPEC references %%rdx(%edx) and
+ * %%rax(%eax), they contain the expected values for SIB addressing.
+ *
+ * The "memory" clobber prevents the compiler from reordering memory
+ * accesses around the probe point, ensuring that the probe behavior
+ * is predictable and consistent.
+ */
+ asm volatile(
+ STAP_PROBE_ASM(test, usdt_sib, USDT_SIB_ARG_SPEC)
+ :
+ : "d"(array), "a"(0)
+ : "memory"
+ );
+}
+#endif
+
+static void subtest_basic_usdt(bool optimized)
{
LIBBPF_OPTS(bpf_usdt_opts, opts);
struct test_usdt *skel;
struct test_usdt__bss *bss;
- int err, i;
+ int err, i, called;
+ const __u64 expected_cookie = 0xcafedeadbeeffeed;
+
+#define TRIGGER(x) ({ \
+ trigger_func(x); \
+ if (optimized) \
+ trigger_func(x); \
+ optimized ? 2 : 1; \
+ })
skel = test_usdt__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
@@ -59,20 +126,29 @@ static void subtest_basic_usdt(void)
goto cleanup;
/* usdt0 won't be auto-attached */
- opts.usdt_cookie = 0xcafedeadbeeffeed;
+ opts.usdt_cookie = expected_cookie;
skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0,
0 /*self*/, "/proc/self/exe",
"test", "usdt0", &opts);
if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link"))
goto cleanup;
- trigger_func(1);
+#if defined(__x86_64__) || defined(__i386__)
+ opts.usdt_cookie = expected_cookie;
+ skel->links.usdt_sib = bpf_program__attach_usdt(skel->progs.usdt_sib,
+ 0 /*self*/, "/proc/self/exe",
+ "test", "usdt_sib", &opts);
+ if (!ASSERT_OK_PTR(skel->links.usdt_sib, "usdt_sib_link"))
+ goto cleanup;
+#endif
+
+ called = TRIGGER(1);
- ASSERT_EQ(bss->usdt0_called, 1, "usdt0_called");
- ASSERT_EQ(bss->usdt3_called, 1, "usdt3_called");
- ASSERT_EQ(bss->usdt12_called, 1, "usdt12_called");
+ ASSERT_EQ(bss->usdt0_called, called, "usdt0_called");
+ ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
+ ASSERT_EQ(bss->usdt12_called, called, "usdt12_called");
- ASSERT_EQ(bss->usdt0_cookie, 0xcafedeadbeeffeed, "usdt0_cookie");
+ ASSERT_EQ(bss->usdt0_cookie, expected_cookie, "usdt0_cookie");
ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt");
ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret");
ASSERT_EQ(bss->usdt0_arg_size, -ENOENT, "usdt0_arg_size");
@@ -119,11 +195,11 @@ static void subtest_basic_usdt(void)
* bpf_program__attach_usdt() handles this properly and attaches to
* all possible places of USDT invocation.
*/
- trigger_func(2);
+ called += TRIGGER(2);
- ASSERT_EQ(bss->usdt0_called, 2, "usdt0_called");
- ASSERT_EQ(bss->usdt3_called, 2, "usdt3_called");
- ASSERT_EQ(bss->usdt12_called, 2, "usdt12_called");
+ ASSERT_EQ(bss->usdt0_called, called, "usdt0_called");
+ ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
+ ASSERT_EQ(bss->usdt12_called, called, "usdt12_called");
/* only check values that depend on trigger_func()'s input value */
ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1");
@@ -142,9 +218,9 @@ static void subtest_basic_usdt(void)
if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach"))
goto cleanup;
- trigger_func(3);
+ called += TRIGGER(3);
- ASSERT_EQ(bss->usdt3_called, 3, "usdt3_called");
+ ASSERT_EQ(bss->usdt3_called, called, "usdt3_called");
/* this time usdt3 has custom cookie */
ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie");
ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
@@ -156,8 +232,19 @@ static void subtest_basic_usdt(void)
ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
+#if defined(__x86_64__) || defined(__i386__)
+ trigger_sib_spec();
+ ASSERT_EQ(bss->usdt_sib_called, 1, "usdt_sib_called");
+ ASSERT_EQ(bss->usdt_sib_cookie, expected_cookie, "usdt_sib_cookie");
+ ASSERT_EQ(bss->usdt_sib_arg_cnt, 1, "usdt_sib_arg_cnt");
+ ASSERT_EQ(bss->usdt_sib_arg, nums[0], "usdt_sib_arg");
+ ASSERT_EQ(bss->usdt_sib_arg_ret, 0, "usdt_sib_arg_ret");
+ ASSERT_EQ(bss->usdt_sib_arg_size, sizeof(nums[0]), "usdt_sib_arg_size");
+#endif
+
cleanup:
test_usdt__destroy(skel);
+#undef TRIGGER
}
unsigned short test_usdt_100_semaphore SEC(".probes");
@@ -270,8 +357,16 @@ static void subtest_multispec_usdt(void)
*/
trigger_300_usdts();
- /* we'll reuse usdt_100 BPF program for usdt_300 test */
bpf_link__destroy(skel->links.usdt_100);
+
+ bss->usdt_100_called = 0;
+ bss->usdt_100_sum = 0;
+
+ /* If built with arm64/clang, there will be much less number of specs
+ * for usdt_300 call sites.
+ */
+#if !defined(__aarch64__) || !defined(__clang__)
+ /* we'll reuse usdt_100 BPF program for usdt_300 test */
skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe",
"test", "usdt_300", NULL);
err = -errno;
@@ -282,13 +377,11 @@ static void subtest_multispec_usdt(void)
/* let's check that there are no "dangling" BPF programs attached due
* to partial success of the above test:usdt_300 attachment
*/
- bss->usdt_100_called = 0;
- bss->usdt_100_sum = 0;
-
f300(777); /* this is 301st instance of usdt_300 */
ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called");
ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum");
+#endif
/* This time we have USDT with 400 inlined invocations, but arg specs
* should be the same across all sites, so libbpf will only need to
@@ -419,7 +512,11 @@ cleanup:
void test_usdt(void)
{
if (test__start_subtest("basic"))
- subtest_basic_usdt();
+ subtest_basic_usdt(false);
+#ifdef __x86_64__
+ if (test__start_subtest("basic_optimized"))
+ subtest_basic_usdt(true);
+#endif
if (test__start_subtest("multispec"))
subtest_multispec_usdt();
if (test__start_subtest("urand_auto_attach"))
diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
index d424e7ecbd12..9fd3ae987321 100644
--- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
@@ -21,8 +21,7 @@
#include "../progs/test_user_ringbuf.h"
static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
-static const long c_ringbuf_size = 1 << 12; /* 1 small page */
-static const long c_max_entries = c_ringbuf_size / c_sample_size;
+static long c_ringbuf_size, c_max_entries;
static void drain_current_samples(void)
{
@@ -424,7 +423,9 @@ static void test_user_ringbuf_loop(void)
uint32_t remaining_samples = total_samples;
int err;
- BUILD_BUG_ON(total_samples <= c_max_entries);
+ if (!ASSERT_LT(c_max_entries, total_samples, "compare_c_max_entries"))
+ return;
+
err = load_skel_create_user_ringbuf(&skel, &ringbuf);
if (err)
return;
@@ -686,6 +687,9 @@ void test_user_ringbuf(void)
{
int i;
+ c_ringbuf_size = getpagesize(); /* 1 page */
+ c_max_entries = c_ringbuf_size / c_sample_size;
+
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
if (!test__start_subtest(success_tests[i].test_name))
continue;
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index c9da06741104..4b4b081b46cc 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -7,6 +7,7 @@
#include "verifier_arena.skel.h"
#include "verifier_arena_large.skel.h"
#include "verifier_array_access.skel.h"
+#include "verifier_async_cb_context.skel.h"
#include "verifier_basic_stack.skel.h"
#include "verifier_bitfield_write.skel.h"
#include "verifier_bounds.skel.h"
@@ -34,6 +35,7 @@
#include "verifier_global_subprogs.skel.h"
#include "verifier_global_ptr_args.skel.h"
#include "verifier_gotol.skel.h"
+#include "verifier_gotox.skel.h"
#include "verifier_helper_access_var_len.skel.h"
#include "verifier_helper_packet_access.skel.h"
#include "verifier_helper_restricted.skel.h"
@@ -46,6 +48,7 @@
#include "verifier_ldsx.skel.h"
#include "verifier_leak_ptr.skel.h"
#include "verifier_linked_scalars.skel.h"
+#include "verifier_live_stack.skel.h"
#include "verifier_load_acquire.skel.h"
#include "verifier_loops1.skel.h"
#include "verifier_lwt.skel.h"
@@ -59,6 +62,7 @@
#include "verifier_meta_access.skel.h"
#include "verifier_movsx.skel.h"
#include "verifier_mtu.skel.h"
+#include "verifier_mul.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
#include "verifier_bpf_fastcall.skel.h"
@@ -85,6 +89,7 @@
#include "verifier_store_release.skel.h"
#include "verifier_subprog_precision.skel.h"
#include "verifier_subreg.skel.h"
+#include "verifier_tailcall.skel.h"
#include "verifier_tailcall_jit.skel.h"
#include "verifier_typedef.skel.h"
#include "verifier_uninit.skel.h"
@@ -169,6 +174,7 @@ void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
void test_verifier_global_subprogs(void) { RUN(verifier_global_subprogs); }
void test_verifier_global_ptr_args(void) { RUN(verifier_global_ptr_args); }
void test_verifier_gotol(void) { RUN(verifier_gotol); }
+void test_verifier_gotox(void) { RUN(verifier_gotox); }
void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
@@ -182,6 +188,7 @@ void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
void test_verifier_linked_scalars(void) { RUN(verifier_linked_scalars); }
+void test_verifier_live_stack(void) { RUN(verifier_live_stack); }
void test_verifier_loops1(void) { RUN(verifier_loops1); }
void test_verifier_lwt(void) { RUN(verifier_lwt); }
void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); }
@@ -193,6 +200,7 @@ void test_verifier_may_goto_1(void) { RUN(verifier_may_goto_1); }
void test_verifier_may_goto_2(void) { RUN(verifier_may_goto_2); }
void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
+void test_verifier_mul(void) { RUN(verifier_mul); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
void test_verifier_bpf_fastcall(void) { RUN(verifier_bpf_fastcall); }
@@ -219,6 +227,7 @@ void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
void test_verifier_store_release(void) { RUN(verifier_store_release); }
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
void test_verifier_subreg(void) { RUN(verifier_subreg); }
+void test_verifier_tailcall(void) { RUN(verifier_tailcall); }
void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); }
void test_verifier_typedef(void) { RUN(verifier_typedef); }
void test_verifier_uninit(void) { RUN(verifier_uninit); }
@@ -274,6 +283,7 @@ void test_verifier_array_access(void)
verifier_array_access__elf_bytes,
init_array_access_maps);
}
+void test_verifier_async_cb_context(void) { RUN(verifier_async_cb_context); }
static int init_value_ptr_arith_maps(struct bpf_object *obj)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
index ab0f02faa80c..4d69d9d55e17 100644
--- a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
+++ b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c
@@ -268,7 +268,7 @@ static void test_verify_pkcs7_sig_from_map(void)
char *tmp_dir;
struct test_verify_pkcs7_sig *skel = NULL;
struct bpf_map *map;
- struct data data;
+ struct data data = {};
int ret, zero = 0;
/* Trigger creation of session keyring. */
diff --git a/tools/testing/selftests/bpf/prog_tests/wq.c b/tools/testing/selftests/bpf/prog_tests/wq.c
index 99e438fe12ac..15c67d23128b 100644
--- a/tools/testing/selftests/bpf/prog_tests/wq.c
+++ b/tools/testing/selftests/bpf/prog_tests/wq.c
@@ -38,3 +38,59 @@ void serial_test_failures_wq(void)
{
RUN_TESTS(wq_failures);
}
+
+static void test_failure_map_no_btf(void)
+{
+ struct wq *skel = NULL;
+ char log[8192];
+ const struct bpf_insn *insns;
+ size_t insn_cnt;
+ int ret, err, map_fd;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_size = sizeof(log), .log_buf = log,
+ .log_level = 2);
+
+ skel = wq__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ err = bpf_object__prepare(skel->obj);
+ if (!ASSERT_OK(err, "skel__prepare"))
+ goto out;
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "map_no_btf", sizeof(__u32), sizeof(__u64), 100,
+ NULL);
+ if (!ASSERT_GT(map_fd, -1, "map create"))
+ goto out;
+
+ err = bpf_map__reuse_fd(skel->maps.array, map_fd);
+ if (!ASSERT_OK(err, "map reuse fd")) {
+ close(map_fd);
+ goto out;
+ }
+
+ insns = bpf_program__insns(skel->progs.test_map_no_btf);
+ if (!ASSERT_OK_PTR(insns, "insns ptr"))
+ goto out;
+
+ insn_cnt = bpf_program__insn_cnt(skel->progs.test_map_no_btf);
+ if (!ASSERT_GT(insn_cnt, 0u, "insn cnt"))
+ goto out;
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
+ if (!ASSERT_LT(ret, 0, "prog load failed")) {
+ if (ret > 0)
+ close(ret);
+ goto out;
+ }
+
+ ASSERT_HAS_SUBSTR(log, "map 'map_no_btf' has to have BTF in order to use bpf_wq",
+ "log complains no map BTF");
+out:
+ wq__destroy(skel);
+}
+
+void test_wq_custom(void)
+{
+ if (test__start_subtest("test_failure_map_no_btf"))
+ test_failure_map_no_btf();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index b2b2d85dbb1b..43264347e7d7 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -37,21 +37,26 @@ static void test_xdp_adjust_tail_shrink(void)
bpf_object__close(obj);
}
-static void test_xdp_adjust_tail_grow(void)
+static void test_xdp_adjust_tail_grow(bool is_64k_pagesize)
{
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
struct bpf_object *obj;
- char buf[4096]; /* avoid segfault: large buf to hold grow results */
+ char buf[8192]; /* avoid segfault: large buf to hold grow results */
__u32 expect_sz;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
- .data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = sizeof(buf),
.repeat = 1,
);
+ /* topts.data_size_in as a special signal to bpf prog */
+ if (is_64k_pagesize)
+ topts.data_size_in = sizeof(pkt_v4) - 1;
+ else
+ topts.data_size_in = sizeof(pkt_v4);
+
err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
return;
@@ -208,7 +213,7 @@ out:
bpf_object__close(obj);
}
-static void test_xdp_adjust_frags_tail_grow(void)
+static void test_xdp_adjust_frags_tail_grow_4k(void)
{
const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
__u32 exp_size;
@@ -246,14 +251,20 @@ static void test_xdp_adjust_frags_tail_grow(void)
ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval");
ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size");
- for (i = 0; i < 9000; i++)
- ASSERT_EQ(buf[i], 1, "9Kb+10b-old");
+ for (i = 0; i < 9000; i++) {
+ if (buf[i] != 1)
+ ASSERT_EQ(buf[i], 1, "9Kb+10b-old");
+ }
- for (i = 9000; i < 9010; i++)
- ASSERT_EQ(buf[i], 0, "9Kb+10b-new");
+ for (i = 9000; i < 9010; i++) {
+ if (buf[i] != 0)
+ ASSERT_EQ(buf[i], 0, "9Kb+10b-new");
+ }
- for (i = 9010; i < 16384; i++)
- ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched");
+ for (i = 9010; i < 16384; i++) {
+ if (buf[i] != 1)
+ ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched");
+ }
/* Test a too large grow */
memset(buf, 1, 16384);
@@ -273,16 +284,93 @@ out:
bpf_object__close(obj);
}
+static void test_xdp_adjust_frags_tail_grow_64k(void)
+{
+ const char *file = "./test_xdp_adjust_tail_grow.bpf.o";
+ __u32 exp_size;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int err, i, prog_fd;
+ __u8 *buf;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ obj = bpf_object__open(file);
+ if (libbpf_get_error(obj))
+ return;
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (bpf_object__load(obj))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+
+ buf = malloc(262144);
+ if (!ASSERT_OK_PTR(buf, "alloc buf 256Kb"))
+ goto out;
+
+ /* Test case add 10 bytes to last frag */
+ memset(buf, 1, 262144);
+ exp_size = 90000 + 10;
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = 90000;
+ topts.data_size_out = 262144;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "90Kb+10b");
+ ASSERT_EQ(topts.retval, XDP_TX, "90Kb+10b retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size");
+
+ for (i = 0; i < 90000; i++) {
+ if (buf[i] != 1)
+ ASSERT_EQ(buf[i], 1, "90Kb+10b-old");
+ }
+
+ for (i = 90000; i < 90010; i++) {
+ if (buf[i] != 0)
+ ASSERT_EQ(buf[i], 0, "90Kb+10b-new");
+ }
+
+ for (i = 90010; i < 262144; i++) {
+ if (buf[i] != 1)
+ ASSERT_EQ(buf[i], 1, "90Kb+10b-untouched");
+ }
+
+ /* Test a too large grow */
+ memset(buf, 1, 262144);
+ exp_size = 90001;
+
+ topts.data_in = topts.data_out = buf;
+ topts.data_size_in = 90001;
+ topts.data_size_out = 262144;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+
+ ASSERT_OK(err, "90Kb+10b");
+ ASSERT_EQ(topts.retval, XDP_DROP, "90Kb+10b retval");
+ ASSERT_EQ(topts.data_size_out, exp_size, "90Kb+10b size");
+
+ free(buf);
+out:
+ bpf_object__close(obj);
+}
+
void test_xdp_adjust_tail(void)
{
+ int page_size = getpagesize();
+
if (test__start_subtest("xdp_adjust_tail_shrink"))
test_xdp_adjust_tail_shrink();
if (test__start_subtest("xdp_adjust_tail_grow"))
- test_xdp_adjust_tail_grow();
+ test_xdp_adjust_tail_grow(page_size == 65536);
if (test__start_subtest("xdp_adjust_tail_grow2"))
test_xdp_adjust_tail_grow2();
if (test__start_subtest("xdp_adjust_frags_tail_shrink"))
test_xdp_adjust_frags_tail_shrink();
- if (test__start_subtest("xdp_adjust_frags_tail_grow"))
- test_xdp_adjust_frags_tail_grow();
+ if (test__start_subtest("xdp_adjust_frags_tail_grow")) {
+ if (page_size == 65536)
+ test_xdp_adjust_frags_tail_grow_64k();
+ else
+ test_xdp_adjust_frags_tail_grow_4k();
+ }
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
index b9d9f0a502ce..ee94c281888a 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c
@@ -9,6 +9,7 @@
#define TX_NETNS "xdp_context_tx"
#define RX_NETNS "xdp_context_rx"
#define TAP_NAME "tap0"
+#define DUMMY_NAME "dum0"
#define TAP_NETNS "xdp_context_tuntap"
#define TEST_PAYLOAD_LEN 32
@@ -96,9 +97,7 @@ void test_xdp_context_test_run(void)
/* Meta data must be 255 bytes or smaller */
test_xdp_context_error(prog_fd, opts, 0, 256, sizeof(data), 0, 0, 0);
- /* Total size of data must match data_end - data_meta */
- test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
- sizeof(data) - 1, 0, 0, 0);
+ /* Total size of data must be data_end - data_meta or larger */
test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
sizeof(data) + 1, 0, 0, 0);
@@ -125,10 +124,10 @@ static int send_test_packet(int ifindex)
int n, sock = -1;
__u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
- /* The ethernet header is not relevant for this test and doesn't need to
- * be meaningful.
- */
- struct ethhdr eth = { 0 };
+ /* We use the Ethernet header only to identify the test packet */
+ struct ethhdr eth = {
+ .h_source = { 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF },
+ };
memcpy(packet, &eth, sizeof(eth));
memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN);
@@ -156,20 +155,43 @@ err:
return -1;
}
-static void assert_test_result(struct test_xdp_meta *skel)
+static int write_test_packet(int tap_fd)
{
- int err;
- __u32 map_key = 0;
- __u8 map_value[TEST_PAYLOAD_LEN];
+ __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
+ int n;
- err = bpf_map__lookup_elem(skel->maps.test_result, &map_key,
- sizeof(map_key), &map_value,
- TEST_PAYLOAD_LEN, BPF_ANY);
- if (!ASSERT_OK(err, "lookup test_result"))
- return;
+ /* The Ethernet header is mostly not relevant. We use it to identify the
+ * test packet and some BPF helpers we exercise expect to operate on
+ * Ethernet frames carrying IP packets. Pretend that's the case.
+ */
+ struct ethhdr eth = {
+ .h_source = { 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF },
+ .h_proto = htons(ETH_P_IP),
+ };
+
+ memcpy(packet, &eth, sizeof(eth));
+ memcpy(packet + sizeof(struct ethhdr), test_payload, TEST_PAYLOAD_LEN);
+
+ n = write(tap_fd, packet, sizeof(packet));
+ if (!ASSERT_EQ(n, sizeof(packet), "write packet"))
+ return -1;
- ASSERT_MEMEQ(&map_value, &test_payload, TEST_PAYLOAD_LEN,
- "test_result map contains test payload");
+ return 0;
+}
+
+static void dump_err_stream(const struct bpf_program *prog)
+{
+ char buf[512];
+ int ret;
+
+ ret = 0;
+ do {
+ ret = bpf_prog_stream_read(bpf_program__fd(prog),
+ BPF_STREAM_STDERR, buf, sizeof(buf),
+ NULL);
+ if (ret > 0)
+ fwrite(buf, sizeof(buf[0]), ret, stderr);
+ } while (ret > 0);
}
void test_xdp_context_veth(void)
@@ -244,11 +266,14 @@ void test_xdp_context_veth(void)
if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx"))
goto close;
+ skel->bss->test_pass = false;
+
ret = send_test_packet(tx_ifindex);
if (!ASSERT_OK(ret, "send_test_packet"))
goto close;
- assert_test_result(skel);
+ if (!ASSERT_TRUE(skel->bss->test_pass, "test_pass"))
+ dump_err_stream(tc_prog);
close:
close_netns(nstoken);
@@ -257,17 +282,20 @@ close:
netns_free(tx_ns);
}
-void test_xdp_context_tuntap(void)
+static void test_tuntap(struct bpf_program *xdp_prog,
+ struct bpf_program *tc_prio_1_prog,
+ struct bpf_program *tc_prio_2_prog,
+ bool *test_pass)
{
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
struct netns_obj *ns = NULL;
- struct test_xdp_meta *skel = NULL;
- __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
int tap_fd = -1;
int tap_ifindex;
int ret;
+ *test_pass = false;
+
ns = netns_new(TAP_NETNS, true);
if (!ASSERT_OK_PTR(ns, "create and open ns"))
return;
@@ -278,10 +306,6 @@ void test_xdp_context_tuntap(void)
SYS(close, "ip link set dev " TAP_NAME " up");
- skel = test_xdp_meta__open_and_load();
- if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
- goto close;
-
tap_ifindex = if_nametoindex(TAP_NAME);
if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
goto close;
@@ -291,33 +315,198 @@ void test_xdp_context_tuntap(void)
if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
goto close;
- tc_opts.prog_fd = bpf_program__fd(skel->progs.ing_cls);
+ tc_opts.prog_fd = bpf_program__fd(tc_prio_1_prog);
ret = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(ret, "bpf_tc_attach"))
goto close;
- ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(skel->progs.ing_xdp),
+ if (tc_prio_2_prog) {
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 2,
+ .prog_fd = bpf_program__fd(tc_prio_2_prog));
+
+ ret = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ goto close;
+ }
+
+ ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog),
0, NULL);
if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
goto close;
- /* The ethernet header is not relevant for this test and doesn't need to
- * be meaningful.
- */
- struct ethhdr eth = { 0 };
+ ret = write_test_packet(tap_fd);
+ if (!ASSERT_OK(ret, "write_test_packet"))
+ goto close;
- memcpy(packet, &eth, sizeof(eth));
- memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN);
+ if (!ASSERT_TRUE(*test_pass, "test_pass"))
+ dump_err_stream(tc_prio_2_prog ? : tc_prio_1_prog);
+
+close:
+ if (tap_fd >= 0)
+ close(tap_fd);
+ netns_free(ns);
+}
- ret = write(tap_fd, packet, sizeof(packet));
- if (!ASSERT_EQ(ret, sizeof(packet), "write packet"))
+/* Write a packet to a tap dev and copy it to ingress of a dummy dev */
+static void test_tuntap_mirred(struct bpf_program *xdp_prog,
+ struct bpf_program *tc_prog,
+ bool *test_pass)
+{
+ LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+ struct netns_obj *ns = NULL;
+ int dummy_ifindex;
+ int tap_fd = -1;
+ int tap_ifindex;
+ int ret;
+
+ *test_pass = false;
+
+ ns = netns_new(TAP_NETNS, true);
+ if (!ASSERT_OK_PTR(ns, "netns_new"))
+ return;
+
+ /* Setup dummy interface */
+ SYS(close, "ip link add name " DUMMY_NAME " type dummy");
+ SYS(close, "ip link set dev " DUMMY_NAME " up");
+
+ dummy_ifindex = if_nametoindex(DUMMY_NAME);
+ if (!ASSERT_GE(dummy_ifindex, 0, "if_nametoindex"))
+ goto close;
+
+ tc_hook.ifindex = dummy_ifindex;
+ ret = bpf_tc_hook_create(&tc_hook);
+ if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
+ goto close;
+
+ tc_opts.prog_fd = bpf_program__fd(tc_prog);
+ ret = bpf_tc_attach(&tc_hook, &tc_opts);
+ if (!ASSERT_OK(ret, "bpf_tc_attach"))
+ goto close;
+
+ /* Setup TAP interface */
+ tap_fd = open_tuntap(TAP_NAME, true);
+ if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
+ goto close;
+
+ SYS(close, "ip link set dev " TAP_NAME " up");
+
+ tap_ifindex = if_nametoindex(TAP_NAME);
+ if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
+ goto close;
+
+ ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog), 0, NULL);
+ if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
goto close;
- assert_test_result(skel);
+ /* Copy all packets received from TAP to dummy ingress */
+ SYS(close, "tc qdisc add dev " TAP_NAME " clsact");
+ SYS(close, "tc filter add dev " TAP_NAME " ingress "
+ "protocol all matchall "
+ "action mirred ingress mirror dev " DUMMY_NAME);
+
+ /* Receive a packet on TAP */
+ ret = write_test_packet(tap_fd);
+ if (!ASSERT_OK(ret, "write_test_packet"))
+ goto close;
+
+ if (!ASSERT_TRUE(*test_pass, "test_pass"))
+ dump_err_stream(tc_prog);
close:
if (tap_fd >= 0)
close(tap_fd);
- test_xdp_meta__destroy(skel);
netns_free(ns);
}
+
+void test_xdp_context_tuntap(void)
+{
+ struct test_xdp_meta *skel = NULL;
+
+ skel = test_xdp_meta__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
+ return;
+
+ if (test__start_subtest("data_meta"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.ing_cls,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_read"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.ing_cls_dynptr_read,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_slice"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.ing_cls_dynptr_slice,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_write"))
+ test_tuntap(skel->progs.ing_xdp_zalloc_meta,
+ skel->progs.ing_cls_dynptr_write,
+ skel->progs.ing_cls_dynptr_read,
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_slice_rdwr"))
+ test_tuntap(skel->progs.ing_xdp_zalloc_meta,
+ skel->progs.ing_cls_dynptr_slice_rdwr,
+ skel->progs.ing_cls_dynptr_slice,
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_offset"))
+ test_tuntap(skel->progs.ing_xdp_zalloc_meta,
+ skel->progs.ing_cls_dynptr_offset_wr,
+ skel->progs.ing_cls_dynptr_offset_rd,
+ &skel->bss->test_pass);
+ if (test__start_subtest("dynptr_offset_oob"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.ing_cls_dynptr_offset_oob,
+ skel->progs.ing_cls,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_data_meta_survives_data_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_data_meta_survives_data_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_data_meta_survives_meta_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_data_meta_survives_meta_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_meta_dynptr_survives_data_slice_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_meta_dynptr_survives_data_slice_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_meta_dynptr_survives_meta_slice_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_meta_dynptr_survives_meta_slice_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_meta_dynptr_rw_before_data_dynptr_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_meta_dynptr_rw_before_data_dynptr_write,
+ &skel->bss->test_pass);
+ if (test__start_subtest("clone_meta_dynptr_rw_before_meta_dynptr_write"))
+ test_tuntap_mirred(skel->progs.ing_xdp,
+ skel->progs.clone_meta_dynptr_rw_before_meta_dynptr_write,
+ &skel->bss->test_pass);
+ /* Tests for BPF helpers which touch headroom */
+ if (test__start_subtest("helper_skb_vlan_push_pop"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.helper_skb_vlan_push_pop,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("helper_skb_adjust_room"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.helper_skb_adjust_room,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("helper_skb_change_head_tail"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.helper_skb_change_head_tail,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+ if (test__start_subtest("helper_skb_change_proto"))
+ test_tuntap(skel->progs.ing_xdp,
+ skel->progs.helper_skb_change_proto,
+ NULL, /* tc prio 2 */
+ &skel->bss->test_pass);
+
+ test_xdp_meta__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
index 461ab18705d5..a8ab05216c38 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
@@ -7,6 +7,7 @@
#include <test_progs.h>
#include "test_xdp_devmap_helpers.skel.h"
+#include "test_xdp_devmap_tailcall.skel.h"
#include "test_xdp_with_devmap_frags_helpers.skel.h"
#include "test_xdp_with_devmap_helpers.skel.h"
@@ -107,6 +108,29 @@ static void test_neg_xdp_devmap_helpers(void)
}
}
+static void test_xdp_devmap_tailcall(enum bpf_attach_type prog_dev,
+ enum bpf_attach_type prog_tail,
+ bool expect_reject)
+{
+ struct test_xdp_devmap_tailcall *skel;
+ int err;
+
+ skel = test_xdp_devmap_tailcall__open();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_devmap_tailcall__open"))
+ return;
+
+ bpf_program__set_expected_attach_type(skel->progs.xdp_devmap, prog_dev);
+ bpf_program__set_expected_attach_type(skel->progs.xdp_entry, prog_tail);
+
+ err = test_xdp_devmap_tailcall__load(skel);
+ if (expect_reject)
+ ASSERT_ERR(err, "test_xdp_devmap_tailcall__load");
+ else
+ ASSERT_OK(err, "test_xdp_devmap_tailcall__load");
+
+ test_xdp_devmap_tailcall__destroy(skel);
+}
+
static void test_xdp_with_devmap_frags_helpers(void)
{
struct test_xdp_with_devmap_frags_helpers *skel;
@@ -238,8 +262,13 @@ void serial_test_xdp_devmap_attach(void)
if (test__start_subtest("DEVMAP with frags programs in entries"))
test_xdp_with_devmap_frags_helpers();
- if (test__start_subtest("Verifier check of DEVMAP programs"))
+ if (test__start_subtest("Verifier check of DEVMAP programs")) {
test_neg_xdp_devmap_helpers();
+ test_xdp_devmap_tailcall(BPF_XDP_DEVMAP, BPF_XDP_DEVMAP, false);
+ test_xdp_devmap_tailcall(0, 0, true);
+ test_xdp_devmap_tailcall(BPF_XDP_DEVMAP, 0, true);
+ test_xdp_devmap_tailcall(0, BPF_XDP_DEVMAP, true);
+ }
if (test__start_subtest("DEVMAP with programs in entries on veth"))
test_xdp_with_devmap_helpers_veth();
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
index 7dac044664ac..dd34b0cc4b4e 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -66,16 +66,25 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
#else
#define MAX_PKT_SIZE 3408
#endif
+
+#define PAGE_SIZE_4K 4096
+#define PAGE_SIZE_64K 65536
+
static void test_max_pkt_size(int fd)
{
- char data[MAX_PKT_SIZE + 1] = {};
+ char data[PAGE_SIZE_64K + 1] = {};
int err;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
- .data_size_in = MAX_PKT_SIZE,
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
.repeat = 1,
);
+
+ if (getpagesize() == PAGE_SIZE_64K)
+ opts.data_size_in = MAX_PKT_SIZE + PAGE_SIZE_64K - PAGE_SIZE_4K;
+ else
+ opts.data_size_in = MAX_PKT_SIZE;
+
err = bpf_prog_test_run_opts(fd, &opts);
ASSERT_OK(err, "prog_run_max_size");
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c b/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c
new file mode 100644
index 000000000000..efa350d04ec5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_pull_data.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_xdp_pull_data.skel.h"
+
+#define PULL_MAX (1 << 31)
+#define PULL_PLUS_ONE (1 << 30)
+
+#define XDP_PACKET_HEADROOM 256
+
+/* Find headroom and tailroom occupied by struct xdp_frame and struct
+ * skb_shared_info so that we can calculate the maximum pull lengths for
+ * test cases. They might not be the real size of the structures due to
+ * cache alignment.
+ */
+static int find_xdp_sizes(struct test_xdp_pull_data *skel, int frame_sz)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct xdp_md ctx = {};
+ int prog_fd, err;
+ __u8 *buf;
+
+ buf = calloc(frame_sz, sizeof(__u8));
+ if (!ASSERT_OK_PTR(buf, "calloc buf"))
+ return -ENOMEM;
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = frame_sz;
+ topts.data_size_out = frame_sz;
+ /* Pass a data_end larger than the linear space available to make sure
+ * bpf_prog_test_run_xdp() will fill the linear data area so that
+ * xdp_find_sizes can infer the size of struct skb_shared_info
+ */
+ ctx.data_end = frame_sz;
+ topts.ctx_in = &ctx;
+ topts.ctx_out = &ctx;
+ topts.ctx_size_in = sizeof(ctx);
+ topts.ctx_size_out = sizeof(ctx);
+
+ prog_fd = bpf_program__fd(skel->progs.xdp_find_sizes);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+
+ free(buf);
+
+ return err;
+}
+
+/* xdp_pull_data_prog will directly read a marker 0xbb stored at buf[1024]
+ * so caller expecting XDP_PASS should always pass pull_len no less than 1024
+ */
+static void run_test(struct test_xdp_pull_data *skel, int retval,
+ int frame_sz, int buff_len, int meta_len, int data_len,
+ int pull_len)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct xdp_md ctx = {};
+ int prog_fd, err;
+ __u8 *buf;
+
+ buf = calloc(buff_len, sizeof(__u8));
+ if (!ASSERT_OK_PTR(buf, "calloc buf"))
+ return;
+
+ buf[meta_len + 1023] = 0xaa;
+ buf[meta_len + 1024] = 0xbb;
+ buf[meta_len + 1025] = 0xcc;
+
+ topts.data_in = buf;
+ topts.data_out = buf;
+ topts.data_size_in = buff_len;
+ topts.data_size_out = buff_len;
+ ctx.data = meta_len;
+ ctx.data_end = meta_len + data_len;
+ topts.ctx_in = &ctx;
+ topts.ctx_out = &ctx;
+ topts.ctx_size_in = sizeof(ctx);
+ topts.ctx_size_out = sizeof(ctx);
+
+ skel->bss->data_len = data_len;
+ if (pull_len & PULL_MAX) {
+ int headroom = XDP_PACKET_HEADROOM - meta_len - skel->bss->xdpf_sz;
+ int tailroom = frame_sz - XDP_PACKET_HEADROOM -
+ data_len - skel->bss->sinfo_sz;
+
+ pull_len = pull_len & PULL_PLUS_ONE ? 1 : 0;
+ pull_len += headroom + tailroom + data_len;
+ }
+ skel->bss->pull_len = pull_len;
+
+ prog_fd = bpf_program__fd(skel->progs.xdp_pull_data_prog);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_EQ(topts.retval, retval, "xdp_pull_data_prog retval");
+
+ if (retval == XDP_DROP)
+ goto out;
+
+ ASSERT_EQ(ctx.data_end, meta_len + pull_len, "linear data size");
+ ASSERT_EQ(topts.data_size_out, buff_len, "linear + non-linear data size");
+ /* Make sure data around xdp->data_end was not messed up by
+ * bpf_xdp_pull_data()
+ */
+ ASSERT_EQ(buf[meta_len + 1023], 0xaa, "data[1023]");
+ ASSERT_EQ(buf[meta_len + 1024], 0xbb, "data[1024]");
+ ASSERT_EQ(buf[meta_len + 1025], 0xcc, "data[1025]");
+out:
+ free(buf);
+}
+
+static void test_xdp_pull_data_basic(void)
+{
+ u32 pg_sz, max_meta_len, max_data_len;
+ struct test_xdp_pull_data *skel;
+
+ skel = test_xdp_pull_data__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_xdp_pull_data__open_and_load"))
+ return;
+
+ pg_sz = sysconf(_SC_PAGE_SIZE);
+
+ if (find_xdp_sizes(skel, pg_sz))
+ goto out;
+
+ max_meta_len = XDP_PACKET_HEADROOM - skel->bss->xdpf_sz;
+ max_data_len = pg_sz - XDP_PACKET_HEADROOM - skel->bss->sinfo_sz;
+
+ /* linear xdp pkt, pull 0 byte */
+ run_test(skel, XDP_PASS, pg_sz, 2048, 0, 2048, 2048);
+
+ /* multi-buf pkt, pull results in linear xdp pkt */
+ run_test(skel, XDP_PASS, pg_sz, 2048, 0, 1024, 2048);
+
+ /* multi-buf pkt, pull 1 byte to linear data area */
+ run_test(skel, XDP_PASS, pg_sz, 9000, 0, 1024, 1025);
+
+ /* multi-buf pkt, pull 0 byte to linear data area */
+ run_test(skel, XDP_PASS, pg_sz, 9000, 0, 1025, 1025);
+
+ /* multi-buf pkt, empty linear data area, pull requires memmove */
+ run_test(skel, XDP_PASS, pg_sz, 9000, 0, 0, PULL_MAX);
+
+ /* multi-buf pkt, no headroom */
+ run_test(skel, XDP_PASS, pg_sz, 9000, max_meta_len, 1024, PULL_MAX);
+
+ /* multi-buf pkt, no tailroom, pull requires memmove */
+ run_test(skel, XDP_PASS, pg_sz, 9000, 0, max_data_len, PULL_MAX);
+
+ /* Test cases with invalid pull length */
+
+ /* linear xdp pkt, pull more than total data len */
+ run_test(skel, XDP_DROP, pg_sz, 2048, 0, 2048, 2049);
+
+ /* multi-buf pkt with no space left in linear data area */
+ run_test(skel, XDP_DROP, pg_sz, 9000, max_meta_len, max_data_len,
+ PULL_MAX | PULL_PLUS_ONE);
+
+ /* multi-buf pkt, empty linear data area */
+ run_test(skel, XDP_DROP, pg_sz, 9000, 0, 0, PULL_MAX | PULL_PLUS_ONE);
+
+ /* multi-buf pkt, no headroom */
+ run_test(skel, XDP_DROP, pg_sz, 9000, max_meta_len, 1024,
+ PULL_MAX | PULL_PLUS_ONE);
+
+ /* multi-buf pkt, no tailroom */
+ run_test(skel, XDP_DROP, pg_sz, 9000, 0, max_data_len,
+ PULL_MAX | PULL_PLUS_ONE);
+
+out:
+ test_xdp_pull_data__destroy(skel);
+}
+
+void test_xdp_pull_data(void)
+{
+ if (test__start_subtest("xdp_pull_data"))
+ test_xdp_pull_data_basic();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xsk.c b/tools/testing/selftests/bpf/prog_tests/xsk.c
new file mode 100644
index 000000000000..dd4c35c0e428
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xsk.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <net/if.h>
+#include <stdarg.h>
+
+#include "network_helpers.h"
+#include "test_progs.h"
+#include "test_xsk.h"
+#include "xsk_xdp_progs.skel.h"
+
+#define VETH_RX "veth0"
+#define VETH_TX "veth1"
+#define MTU 1500
+
+int setup_veth(bool busy_poll)
+{
+ SYS(fail,
+ "ip link add %s numtxqueues 4 numrxqueues 4 type veth peer name %s numtxqueues 4 numrxqueues 4",
+ VETH_RX, VETH_TX);
+ SYS(fail, "sysctl -wq net.ipv6.conf.%s.disable_ipv6=1", VETH_RX);
+ SYS(fail, "sysctl -wq net.ipv6.conf.%s.disable_ipv6=1", VETH_TX);
+
+ if (busy_poll) {
+ SYS(fail, "echo 2 > /sys/class/net/%s/napi_defer_hard_irqs", VETH_RX);
+ SYS(fail, "echo 200000 > /sys/class/net/%s/gro_flush_timeout", VETH_RX);
+ SYS(fail, "echo 2 > /sys/class/net/%s/napi_defer_hard_irqs", VETH_TX);
+ SYS(fail, "echo 200000 > /sys/class/net/%s/gro_flush_timeout", VETH_TX);
+ }
+
+ SYS(fail, "ip link set %s mtu %d", VETH_RX, MTU);
+ SYS(fail, "ip link set %s mtu %d", VETH_TX, MTU);
+ SYS(fail, "ip link set %s up", VETH_RX);
+ SYS(fail, "ip link set %s up", VETH_TX);
+
+ return 0;
+
+fail:
+ return -1;
+}
+
+void delete_veth(void)
+{
+ SYS_NOFAIL("ip link del %s", VETH_RX);
+ SYS_NOFAIL("ip link del %s", VETH_TX);
+}
+
+int configure_ifobj(struct ifobject *tx, struct ifobject *rx)
+{
+ rx->ifindex = if_nametoindex(VETH_RX);
+ if (!ASSERT_OK_FD(rx->ifindex, "get RX ifindex"))
+ return -1;
+
+ tx->ifindex = if_nametoindex(VETH_TX);
+ if (!ASSERT_OK_FD(tx->ifindex, "get TX ifindex"))
+ return -1;
+
+ tx->shared_umem = false;
+ rx->shared_umem = false;
+
+
+ return 0;
+}
+
+static void test_xsk(const struct test_spec *test_to_run, enum test_mode mode)
+{
+ struct ifobject *ifobj_tx, *ifobj_rx;
+ struct test_spec test;
+ int ret;
+
+ ifobj_tx = ifobject_create();
+ if (!ASSERT_OK_PTR(ifobj_tx, "create ifobj_tx"))
+ return;
+
+ ifobj_rx = ifobject_create();
+ if (!ASSERT_OK_PTR(ifobj_rx, "create ifobj_rx"))
+ goto delete_tx;
+
+ if (!ASSERT_OK(configure_ifobj(ifobj_tx, ifobj_rx), "conigure ifobj"))
+ goto delete_rx;
+
+ ret = get_hw_ring_size(ifobj_tx->ifname, &ifobj_tx->ring);
+ if (!ret) {
+ ifobj_tx->hw_ring_size_supp = true;
+ ifobj_tx->set_ring.default_tx = ifobj_tx->ring.tx_pending;
+ ifobj_tx->set_ring.default_rx = ifobj_tx->ring.rx_pending;
+ }
+
+ if (!ASSERT_OK(init_iface(ifobj_rx, worker_testapp_validate_rx), "init RX"))
+ goto delete_rx;
+ if (!ASSERT_OK(init_iface(ifobj_tx, worker_testapp_validate_tx), "init TX"))
+ goto delete_rx;
+
+ test_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
+
+ test.tx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ if (!ASSERT_OK_PTR(test.tx_pkt_stream_default, "TX pkt generation"))
+ goto delete_rx;
+ test.rx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ if (!ASSERT_OK_PTR(test.rx_pkt_stream_default, "RX pkt generation"))
+ goto delete_rx;
+
+
+ test_init(&test, ifobj_tx, ifobj_rx, mode, test_to_run);
+ ret = test.test_func(&test);
+ if (ret != TEST_SKIP)
+ ASSERT_OK(ret, "Run test");
+ pkt_stream_restore_default(&test);
+
+ if (ifobj_tx->hw_ring_size_supp)
+ hw_ring_size_reset(ifobj_tx);
+
+ pkt_stream_delete(test.tx_pkt_stream_default);
+ pkt_stream_delete(test.rx_pkt_stream_default);
+ xsk_xdp_progs__destroy(ifobj_tx->xdp_progs);
+ xsk_xdp_progs__destroy(ifobj_rx->xdp_progs);
+
+delete_rx:
+ ifobject_delete(ifobj_rx);
+delete_tx:
+ ifobject_delete(ifobj_tx);
+}
+
+void test_ns_xsk_skb(void)
+{
+ int i;
+
+ if (!ASSERT_OK(setup_veth(false), "setup veth"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (test__start_subtest(tests[i].name))
+ test_xsk(&tests[i], TEST_MODE_SKB);
+ }
+
+ delete_veth();
+}
+
+void test_ns_xsk_drv(void)
+{
+ int i;
+
+ if (!ASSERT_OK(setup_veth(false), "setup veth"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (test__start_subtest(tests[i].name))
+ test_xsk(&tests[i], TEST_MODE_DRV);
+ }
+
+ delete_veth();
+}
+
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
index a52feff98112..d1841aac94a2 100644
--- a/tools/testing/selftests/bpf/progs/arena_atomics.c
+++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
@@ -28,7 +28,8 @@ bool skip_all_tests = true;
#if defined(ENABLE_ATOMICS_TESTS) && \
defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
- (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
bool skip_lacq_srel_tests __attribute((__section__(".data"))) = false;
#else
bool skip_lacq_srel_tests = true;
@@ -314,7 +315,8 @@ int load_acquire(const void *ctx)
{
#if defined(ENABLE_ATOMICS_TESTS) && \
defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
- (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
#define LOAD_ACQUIRE_ARENA(SIZEOP, SIZE, SRC, DST) \
{ asm volatile ( \
@@ -365,7 +367,8 @@ int store_release(const void *ctx)
{
#if defined(ENABLE_ATOMICS_TESTS) && \
defined(__BPF_FEATURE_ADDR_SPACE_CAST) && \
- (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86))
+ (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
#define STORE_RELEASE_ARENA(SIZEOP, DST, VAL) \
{ asm volatile ( \
diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
index c4500c37f85e..086b57a426cf 100644
--- a/tools/testing/selftests/bpf/progs/arena_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
@@ -37,8 +37,11 @@ int prog(void *ctx)
#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
unsigned long flags;
- if ((ret = arena_spin_lock_irqsave(&lock, flags)))
+ if ((ret = arena_spin_lock_irqsave(&lock, flags))) {
+ if (ret == -EOPNOTSUPP)
+ test_skip = 3;
return ret;
+ }
if (counter != limit)
counter++;
bpf_repeat(cs_count);
diff --git a/tools/testing/selftests/bpf/progs/arena_strsearch.c b/tools/testing/selftests/bpf/progs/arena_strsearch.c
new file mode 100644
index 000000000000..ef6b76658f7f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_strsearch.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include "bpf_experimental.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 100); /* number of pages */
+} arena SEC(".maps");
+
+#include "bpf_arena_strsearch.h"
+
+struct glob_test {
+ char const __arena *pat, *str;
+ bool expected;
+};
+
+static bool test(char const __arena *pat, char const __arena *str, bool expected)
+{
+ bool match = glob_match(pat, str);
+ bool success = match == expected;
+
+ /* bpf_printk("glob_match %s %s res %d ok %d", pat, str, match, success); */
+ return success;
+}
+
+/*
+ * The tests are all jammed together in one array to make it simpler
+ * to place that array in the .init.rodata section. The obvious
+ * "array of structures containing char *" has no way to force the
+ * pointed-to strings to be in a particular section.
+ *
+ * Anyway, a test consists of:
+ * 1. Expected glob_match result: '1' or '0'.
+ * 2. Pattern to match: null-terminated string
+ * 3. String to match against: null-terminated string
+ *
+ * The list of tests is terminated with a final '\0' instead of
+ * a glob_match result character.
+ */
+static const char __arena glob_tests[] =
+ /* Some basic tests */
+ "1" "a\0" "a\0"
+ "0" "a\0" "b\0"
+ "0" "a\0" "aa\0"
+ "0" "a\0" "\0"
+ "1" "\0" "\0"
+ "0" "\0" "a\0"
+ /* Simple character class tests */
+ "1" "[a]\0" "a\0"
+ "0" "[a]\0" "b\0"
+ "0" "[!a]\0" "a\0"
+ "1" "[!a]\0" "b\0"
+ "1" "[ab]\0" "a\0"
+ "1" "[ab]\0" "b\0"
+ "0" "[ab]\0" "c\0"
+ "1" "[!ab]\0" "c\0"
+ "1" "[a-c]\0" "b\0"
+ "0" "[a-c]\0" "d\0"
+ /* Corner cases in character class parsing */
+ "1" "[a-c-e-g]\0" "-\0"
+ "0" "[a-c-e-g]\0" "d\0"
+ "1" "[a-c-e-g]\0" "f\0"
+ "1" "[]a-ceg-ik[]\0" "a\0"
+ "1" "[]a-ceg-ik[]\0" "]\0"
+ "1" "[]a-ceg-ik[]\0" "[\0"
+ "1" "[]a-ceg-ik[]\0" "h\0"
+ "0" "[]a-ceg-ik[]\0" "f\0"
+ "0" "[!]a-ceg-ik[]\0" "h\0"
+ "0" "[!]a-ceg-ik[]\0" "]\0"
+ "1" "[!]a-ceg-ik[]\0" "f\0"
+ /* Simple wild cards */
+ "1" "?\0" "a\0"
+ "0" "?\0" "aa\0"
+ "0" "??\0" "a\0"
+ "1" "?x?\0" "axb\0"
+ "0" "?x?\0" "abx\0"
+ "0" "?x?\0" "xab\0"
+ /* Asterisk wild cards (backtracking) */
+ "0" "*??\0" "a\0"
+ "1" "*??\0" "ab\0"
+ "1" "*??\0" "abc\0"
+ "1" "*??\0" "abcd\0"
+ "0" "??*\0" "a\0"
+ "1" "??*\0" "ab\0"
+ "1" "??*\0" "abc\0"
+ "1" "??*\0" "abcd\0"
+ "0" "?*?\0" "a\0"
+ "1" "?*?\0" "ab\0"
+ "1" "?*?\0" "abc\0"
+ "1" "?*?\0" "abcd\0"
+ "1" "*b\0" "b\0"
+ "1" "*b\0" "ab\0"
+ "0" "*b\0" "ba\0"
+ "1" "*b\0" "bb\0"
+ "1" "*b\0" "abb\0"
+ "1" "*b\0" "bab\0"
+ "1" "*bc\0" "abbc\0"
+ "1" "*bc\0" "bc\0"
+ "1" "*bc\0" "bbc\0"
+ "1" "*bc\0" "bcbc\0"
+ /* Multiple asterisks (complex backtracking) */
+ "1" "*ac*\0" "abacadaeafag\0"
+ "1" "*ac*ae*ag*\0" "abacadaeafag\0"
+ "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
+ "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
+ "1" "*abcd*\0" "abcabcabcabcdefg\0"
+ "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
+ "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
+ "0" "*abcd*\0" "abcabcabcabcefg\0"
+ "0" "*ab*cd*\0" "abcabcabcabcefg\0";
+
+bool skip = false;
+
+SEC("syscall")
+int arena_strsearch(void *ctx)
+{
+ unsigned successes = 0;
+ unsigned n = 0;
+ char const __arena *p = glob_tests;
+
+ /*
+ * Tests are jammed together in a string. The first byte is '1'
+ * or '0' to indicate the expected outcome, or '\0' to indicate the
+ * end of the tests. Then come two null-terminated strings: the
+ * pattern and the string to match it against.
+ */
+ while (*p) {
+ bool expected = *p++ & 1;
+ char const __arena *pat = p;
+
+ cond_break;
+ p += bpf_arena_strlen(p) + 1;
+ successes += test(pat, p, expected);
+ p += bpf_arena_strlen(p) + 1;
+ n++;
+ }
+
+ n -= successes;
+ /* bpf_printk("glob: %u self-tests passed, %u failed\n", successes, n); */
+
+ return n ? -1 : 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
index d67466c1ff77..f90531cf3ee5 100644
--- a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
+++ b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
@@ -302,7 +302,7 @@ int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val
* barriers.
*/
if (val & _Q_LOCKED_MASK)
- smp_cond_load_acquire_label(&lock->locked, !VAL, release_err);
+ (void)smp_cond_load_acquire_label(&lock->locked, !VAL, release_err);
/*
* take ownership and clear the pending bit.
@@ -380,7 +380,7 @@ queue:
/* Link @node into the waitqueue. */
WRITE_ONCE(prev->next, node);
- arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
+ (void)arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
/*
* While waiting for the MCS lock, the next pointer may have
diff --git a/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
index 1654a530aa3d..9af19dfe4e80 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cc_cubic.c
@@ -22,10 +22,6 @@
#define TCP_PACING_CA_RATIO (120)
#define TCP_REORDERING (12)
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#define after(seq2, seq1) before(seq1, seq2)
-
extern void cubictcp_init(struct sock *sk) __ksym;
extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
extern __u32 cubictcp_recalc_ssthresh(struct sock *sk) __ksym;
@@ -34,11 +30,6 @@ extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym;
extern void cubictcp_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
-static bool before(__u32 seq1, __u32 seq2)
-{
- return (__s32)(seq1-seq2) < 0;
-}
-
static __u64 div64_u64(__u64 dividend, __u64 divisor)
{
return dividend / divisor;
@@ -101,7 +92,7 @@ static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
tp->snd_cwnd = pkts_in_flight + sndcnt;
}
-/* Decide wheather to run the increase function of congestion control. */
+/* Decide whether to run the increase function of congestion control. */
static bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
if (tcp_sk(sk)->reordering > TCP_REORDERING)
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index f089faa97ae6..46fb2b37d3a7 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -20,13 +20,6 @@
char _license[] SEC("license") = "GPL";
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#define max(a, b) ((a) > (b) ? (a) : (b))
-static bool before(__u32 seq1, __u32 seq2)
-{
- return (__s32)(seq1-seq2) < 0;
-}
-#define after(seq2, seq1) before(seq1, seq2)
extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym;
extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 7cd73e75f52a..1cc83140849f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
-/* WARNING: This implemenation is not necessarily the same
+/* WARNING: This implementation is not necessarily the same
* as the tcp_dctcp.c. The purpose is mainly for testing
* the kernel BPF logic.
*/
@@ -13,16 +13,10 @@
#ifndef EBUSY
#define EBUSY 16
#endif
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#define max(a, b) ((a) > (b) ? (a) : (b))
#define min_not_zero(x, y) ({ \
typeof(x) __x = (x); \
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
-static bool before(__u32 seq1, __u32 seq2)
-{
- return (__s32)(seq1-seq2) < 0;
-}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_gotox.c b/tools/testing/selftests/bpf/progs/bpf_gotox.c
new file mode 100644
index 000000000000..216c71b94c64
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_gotox.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+__u64 in_user;
+__u64 ret_user;
+
+int pid;
+
+/*
+ * Skip all the tests if compiler doesn't support indirect jumps.
+ *
+ * If tests are skipped, then all functions below are compiled as
+ * dummy, such that the skeleton looks the same, and the userspace
+ * program can avoid any checks rather than if data->skip is set.
+ */
+#ifdef __BPF_FEATURE_GOTOX
+__u64 skip SEC(".data") = 0;
+#else
+__u64 skip = 1;
+#endif
+
+struct simple_ctx {
+ __u64 x;
+};
+
+#ifdef __BPF_FEATURE_GOTOX
+__u64 some_var;
+
+/*
+ * This function adds code which will be replaced by a different
+ * number of instructions by the verifier. This adds additional
+ * stress on testing the insn_array maps corresponding to indirect jumps.
+ */
+static __always_inline void adjust_insns(__u64 x)
+{
+ some_var ^= x + bpf_jiffies64();
+}
+
+SEC("syscall")
+int one_switch(struct simple_ctx *ctx)
+{
+ switch (ctx->x) {
+ case 0:
+ adjust_insns(ctx->x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(ctx->x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(ctx->x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(ctx->x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(ctx->x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(ctx->x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int one_switch_non_zero_sec_off(struct simple_ctx *ctx)
+{
+ switch (ctx->x) {
+ case 0:
+ adjust_insns(ctx->x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(ctx->x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(ctx->x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(ctx->x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(ctx->x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(ctx->x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int simple_test_other_sec(struct pt_regs *ctx)
+{
+ __u64 x = in_user;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ switch (x) {
+ case 0:
+ adjust_insns(x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int two_switches(struct simple_ctx *ctx)
+{
+ switch (ctx->x) {
+ case 0:
+ adjust_insns(ctx->x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(ctx->x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(ctx->x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(ctx->x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(ctx->x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(ctx->x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ switch (ctx->x + !!ret_user) {
+ case 1:
+ adjust_insns(ctx->x + 7);
+ ret_user = 103;
+ break;
+ case 2:
+ adjust_insns(ctx->x + 9);
+ ret_user = 104;
+ break;
+ case 3:
+ adjust_insns(ctx->x + 11);
+ ret_user = 107;
+ break;
+ case 4:
+ adjust_insns(ctx->x + 11);
+ ret_user = 205;
+ break;
+ case 5:
+ adjust_insns(ctx->x + 11);
+ ret_user = 115;
+ break;
+ default:
+ adjust_insns(ctx->x + 177);
+ ret_user = 1019;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int big_jump_table(struct simple_ctx *ctx __attribute__((unused)))
+{
+ const void *const jt[256] = {
+ [0 ... 255] = &&default_label,
+ [0] = &&l0,
+ [11] = &&l11,
+ [27] = &&l27,
+ [31] = &&l31,
+ };
+
+ goto *jt[ctx->x & 0xff];
+
+l0:
+ adjust_insns(ctx->x + 1);
+ ret_user = 2;
+ return 0;
+
+l11:
+ adjust_insns(ctx->x + 7);
+ ret_user = 3;
+ return 0;
+
+l27:
+ adjust_insns(ctx->x + 9);
+ ret_user = 4;
+ return 0;
+
+l31:
+ adjust_insns(ctx->x + 11);
+ ret_user = 5;
+ return 0;
+
+default_label:
+ adjust_insns(ctx->x + 177);
+ ret_user = 19;
+ return 0;
+}
+
+SEC("syscall")
+int one_jump_two_maps(struct simple_ctx *ctx __attribute__((unused)))
+{
+ __label__ l1, l2, l3, l4;
+ void *jt1[2] = { &&l1, &&l2 };
+ void *jt2[2] = { &&l3, &&l4 };
+ unsigned int a = ctx->x % 2;
+ unsigned int b = (ctx->x / 2) % 2;
+ volatile int ret = 0;
+
+ if (!(a < 2 && b < 2))
+ return 19;
+
+ if (ctx->x % 2)
+ goto *jt1[a];
+ else
+ goto *jt2[b];
+
+ l1: ret += 1;
+ l2: ret += 3;
+ l3: ret += 5;
+ l4: ret += 7;
+
+ ret_user = ret;
+ return ret;
+}
+
+SEC("syscall")
+int one_map_two_jumps(struct simple_ctx *ctx __attribute__((unused)))
+{
+ __label__ l1, l2, l3;
+ void *jt[3] = { &&l1, &&l2, &&l3 };
+ unsigned int a = (ctx->x >> 2) & 1;
+ unsigned int b = (ctx->x >> 3) & 1;
+ volatile int ret = 0;
+
+ if (ctx->x % 2)
+ goto *jt[a];
+
+ if (ctx->x % 3)
+ goto *jt[a + b];
+
+ l1: ret += 3;
+ l2: ret += 5;
+ l3: ret += 7;
+
+ ret_user = ret;
+ return ret;
+}
+
+/* Just to introduce some non-zero offsets in .text */
+static __noinline int f0(volatile struct simple_ctx *ctx __arg_ctx)
+{
+ if (ctx)
+ return 1;
+ else
+ return 13;
+}
+
+SEC("syscall") int f1(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ return f0(ctx);
+}
+
+static __noinline int __static_global(__u64 x)
+{
+ switch (x) {
+ case 0:
+ adjust_insns(x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int use_static_global1(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ return __static_global(ctx->x);
+}
+
+SEC("syscall")
+int use_static_global2(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ adjust_insns(ctx->x + 1);
+ return __static_global(ctx->x);
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int use_static_global_other_sec(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ return __static_global(in_user);
+}
+
+__noinline int __nonstatic_global(__u64 x)
+{
+ switch (x) {
+ case 0:
+ adjust_insns(x + 1);
+ ret_user = 2;
+ break;
+ case 1:
+ adjust_insns(x + 7);
+ ret_user = 3;
+ break;
+ case 2:
+ adjust_insns(x + 9);
+ ret_user = 4;
+ break;
+ case 3:
+ adjust_insns(x + 11);
+ ret_user = 5;
+ break;
+ case 4:
+ adjust_insns(x + 17);
+ ret_user = 7;
+ break;
+ default:
+ adjust_insns(x + 177);
+ ret_user = 19;
+ break;
+ }
+
+ return 0;
+}
+
+SEC("syscall")
+int use_nonstatic_global1(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ return __nonstatic_global(ctx->x);
+}
+
+SEC("syscall")
+int use_nonstatic_global2(struct simple_ctx *ctx)
+{
+ ret_user = 0;
+ adjust_insns(ctx->x + 1);
+ return __nonstatic_global(ctx->x);
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int use_nonstatic_global_other_sec(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ return __nonstatic_global(in_user);
+}
+
+#else /* __BPF_FEATURE_GOTOX */
+
+#define SKIP_TEST(TEST_NAME) \
+ SEC("syscall") int TEST_NAME(void *ctx) \
+ { \
+ return 0; \
+ }
+
+SKIP_TEST(one_switch);
+SKIP_TEST(one_switch_non_zero_sec_off);
+SKIP_TEST(simple_test_other_sec);
+SKIP_TEST(two_switches);
+SKIP_TEST(big_jump_table);
+SKIP_TEST(one_jump_two_maps);
+SKIP_TEST(one_map_two_jumps);
+SKIP_TEST(use_static_global1);
+SKIP_TEST(use_static_global2);
+SKIP_TEST(use_static_global_other_sec);
+SKIP_TEST(use_nonstatic_global1);
+SKIP_TEST(use_nonstatic_global2);
+SKIP_TEST(use_nonstatic_global_other_sec);
+
+#endif /* __BPF_FEATURE_GOTOX */
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c
new file mode 100644
index 000000000000..2f20485e0de3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_map_elem.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 value_sum = 0;
+
+SEC("iter/bpf_map_elem")
+int dump_bpf_map_values(struct bpf_iter__bpf_map_elem *ctx)
+{
+ __u32 value = 0;
+
+ if (ctx->value == (void *)0)
+ return 0;
+
+ bpf_probe_read_kernel(&value, sizeof(value), ctx->value);
+ value_sum += value;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c
index 774d4dbe8189..a8aa5a71d846 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c
@@ -18,23 +18,10 @@
unsigned short reuse_listen_hport = 0;
unsigned short listen_hport = 0;
-char cubic_cc[TCP_CA_NAME_MAX] = "bpf_cubic";
+const char cubic_cc[] = "bpf_cubic";
char dctcp_cc[TCP_CA_NAME_MAX] = "bpf_dctcp";
bool random_retry = false;
-static bool tcp_cc_eq(const char *a, const char *b)
-{
- int i;
-
- for (i = 0; i < TCP_CA_NAME_MAX; i++) {
- if (a[i] != b[i])
- return false;
- if (!a[i])
- break;
- }
-
- return true;
-}
SEC("iter/tcp")
int change_tcp_cc(struct bpf_iter__tcp *ctx)
@@ -58,7 +45,7 @@ int change_tcp_cc(struct bpf_iter__tcp *ctx)
cur_cc, sizeof(cur_cc)))
return 0;
- if (!tcp_cc_eq(cur_cc, cubic_cc))
+ if (bpf_strncmp(cur_cc, TCP_CA_NAME_MAX, cubic_cc))
return 0;
if (random_retry && bpf_get_prandom_u32() % 4 == 1)
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
index 164640db3a29..b1e509b231cd 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
@@ -99,13 +99,13 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk->icsk_retransmit_timer.expires;
+ timer_expires = sp->tcp_retransmit_timer.expires;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk->icsk_retransmit_timer.expires;
- } else if (timer_pending(&sp->sk_timer)) {
+ timer_expires = sp->tcp_retransmit_timer.expires;
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
timer_active = 2;
- timer_expires = sp->sk_timer.expires;
+ timer_expires = icsk->icsk_keepalive_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
index 591c703f5032..dbc7166aee91 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
@@ -99,13 +99,13 @@ static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp,
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
- timer_expires = icsk->icsk_retransmit_timer.expires;
+ timer_expires = sp->tcp_retransmit_timer.expires;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
- timer_expires = icsk->icsk_retransmit_timer.expires;
- } else if (timer_pending(&sp->sk_timer)) {
+ timer_expires = sp->tcp_retransmit_timer.expires;
+ } else if (timer_pending(&icsk->icsk_keepalive_timer)) {
timer_active = 2;
- timer_expires = sp->sk_timer.expires;
+ timer_expires = icsk->icsk_keepalive_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
index ffbd4b116d17..23b2aa2604de 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
@@ -64,7 +64,8 @@ int dump_udp4(struct bpf_iter__udp *ctx)
0, 0L, 0, ctx->uid, 0,
sock_i_ino(&inet->sk),
inet->sk.sk_refcnt.refs.counter, udp_sk,
- inet->sk.sk_drops.counter);
+ udp_sk->drop_counters.drops0.counter +
+ udp_sk->drop_counters.drops1.counter);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
index 47ff7754f4fd..c48b05aa2a4b 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
@@ -72,7 +72,7 @@ int dump_udp6(struct bpf_iter__udp *ctx)
0, 0L, 0, ctx->uid, 0,
sock_i_ino(&inet->sk),
inet->sk.sk_refcnt.refs.counter, udp_sk,
- inet->sk.sk_drops.counter);
-
+ udp_sk->drop_counters.drops0.counter +
+ udp_sk->drop_counters.drops1.counter);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index 6e208e24ba3b..c9bfbe1bafc1 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -33,7 +33,20 @@
* e.g. "foo{{[0-9]+}}" matches strings like "foo007".
* Extended POSIX regular expression syntax is allowed
* inside the brackets.
+ * __not_msg Message not expected to be found in verifier log.
+ * If __msg_not is situated between __msg tags
+ * framework matches __msg tags first, and then
+ * checks that __msg_not is not present in a portion of
+ * a log between bracketing __msg tags.
+ * Same regex syntax as for __msg is supported.
* __msg_unpriv Same as __msg but for unprivileged mode.
+ * __not_msg_unpriv Same as __not_msg but for unprivileged mode.
+ *
+ * __stderr Message expected to be found in bpf stderr stream. The
+ * same regex rules apply like __msg.
+ * __stderr_unpriv Same as __stderr but for unpriveleged mode.
+ * __stdout Same as __stderr but for stdout stream.
+ * __stdout_unpriv Same as __stdout but for unpriveleged mode.
*
* __xlated Expect a line in a disassembly log after verifier applies rewrites.
* Multiple __xlated attributes could be specified.
@@ -83,9 +96,11 @@
* expect return value to match passed parameter:
* - a decimal number
* - a hexadecimal number, when starts from 0x
- * - literal INT_MIN
- * - literal POINTER_VALUE (see definition below)
- * - literal TEST_DATA_LEN (see definition below)
+ * - a macro which expands to one of the above
+ * - literal _INT_MIN (expands to INT_MIN)
+ * In addition, two special macros are defined below:
+ * - POINTER_VALUE
+ * - TEST_DATA_LEN
* __retval_unpriv Same, but load program in unprivileged mode.
*
* __description Text to be used instead of a program name for display
@@ -111,22 +126,27 @@
* Several __arch_* annotations could be specified at once.
* When test case is not run on current arch it is marked as skipped.
* __caps_unpriv Specify the capabilities that should be set when running the test.
+ *
+ * __linear_size Specify the size of the linear area of non-linear skbs, or
+ * 0 for linear skbs.
*/
#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg)))
+#define __not_msg(msg) __attribute__((btf_decl_tag("comment:test_expect_not_msg=" XSTR(__COUNTER__) "=" msg)))
#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg)))
#define __jited(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __not_msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_not_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg)))
#define __jited_unpriv(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
#define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag)))
-#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val)))
-#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val)))
+#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="XSTR(val))))
+#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="XSTR(val))))
#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary")))
#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv")))
#define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path)))
@@ -134,9 +154,15 @@
#define __arch_x86_64 __arch("X86_64")
#define __arch_arm64 __arch("ARM64")
#define __arch_riscv64 __arch("RISCV64")
+#define __arch_s390x __arch("s390x")
#define __caps_unpriv(caps) __attribute__((btf_decl_tag("comment:test_caps_unpriv=" EXPAND_QUOTE(caps))))
#define __load_if_JITed() __attribute__((btf_decl_tag("comment:load_mode=jited")))
#define __load_if_no_JITed() __attribute__((btf_decl_tag("comment:load_mode=no_jited")))
+#define __stderr(msg) __attribute__((btf_decl_tag("comment:test_expect_stderr=" XSTR(__COUNTER__) "=" msg)))
+#define __stderr_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_stderr_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __stdout(msg) __attribute__((btf_decl_tag("comment:test_expect_stdout=" XSTR(__COUNTER__) "=" msg)))
+#define __stdout_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_stdout_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __linear_size(sz) __attribute__((btf_decl_tag("comment:test_linear_size=" XSTR(sz))))
/* Define common capabilities tested using __caps_unpriv */
#define CAP_NET_ADMIN 12
@@ -154,8 +180,12 @@
#define __imm_ptr(name) [name]"r"(&name)
#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr))
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+
/* Magic constants used with __retval() */
-#define POINTER_VALUE 0xcafe4all
+#define POINTER_VALUE 0xbadcafe
#define TEST_DATA_LEN 64
#ifndef __used
@@ -227,8 +257,17 @@
#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \
(defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
- (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64))
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) || \
+ (defined(__TARGET_ARCH_powerpc))
#define CAN_USE_LOAD_ACQ_STORE_REL
#endif
+#if defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)
+#define SPEC_V1
+#endif
+
+#if defined(__TARGET_ARCH_x86)
+#define SPEC_V4
+#endif
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_smc.c b/tools/testing/selftests/bpf/progs/bpf_smc.c
new file mode 100644
index 000000000000..70d8b08f5914
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_smc.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+enum {
+ BPF_SMC_LISTEN = 10,
+};
+
+struct smc_sock___local {
+ struct sock sk;
+ struct smc_sock *listen_smc;
+ bool use_fallback;
+} __attribute__((preserve_access_index));
+
+int smc_cnt = 0;
+int fallback_cnt = 0;
+
+SEC("fentry/smc_release")
+int BPF_PROG(bpf_smc_release, struct socket *sock)
+{
+ /* only count from one side (client) */
+ if (sock->sk->__sk_common.skc_state == BPF_SMC_LISTEN)
+ return 0;
+ smc_cnt++;
+ return 0;
+}
+
+SEC("fentry/smc_switch_to_fallback")
+int BPF_PROG(bpf_smc_switch_to_fallback, struct smc_sock___local *smc)
+{
+ /* only count from one side (client) */
+ if (smc && !smc->listen_smc)
+ fallback_cnt++;
+ return 0;
+}
+
+/* go with default value if no strat was found */
+bool default_ip_strat_value = true;
+
+struct smc_policy_ip_key {
+ __u32 sip;
+ __u32 dip;
+};
+
+struct smc_policy_ip_value {
+ __u8 mode;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct smc_policy_ip_key));
+ __uint(value_size, sizeof(struct smc_policy_ip_value));
+ __uint(max_entries, 128);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} smc_policy_ip SEC(".maps");
+
+static bool smc_check(__u32 src, __u32 dst)
+{
+ struct smc_policy_ip_value *value;
+ struct smc_policy_ip_key key = {
+ .sip = src,
+ .dip = dst,
+ };
+
+ value = bpf_map_lookup_elem(&smc_policy_ip, &key);
+ return value ? value->mode : default_ip_strat_value;
+}
+
+SEC("fmod_ret/update_socket_protocol")
+int BPF_PROG(smc_run, int family, int type, int protocol)
+{
+ struct task_struct *task;
+
+ if (family != AF_INET && family != AF_INET6)
+ return protocol;
+
+ if ((type & 0xf) != SOCK_STREAM)
+ return protocol;
+
+ if (protocol != 0 && protocol != IPPROTO_TCP)
+ return protocol;
+
+ task = bpf_get_current_task_btf();
+ /* Prevent from affecting other tests */
+ if (!task || !task->nsproxy->net_ns->smc.hs_ctrl)
+ return protocol;
+
+ return IPPROTO_SMC;
+}
+
+SEC("struct_ops")
+int BPF_PROG(bpf_smc_set_tcp_option_cond, const struct tcp_sock *tp,
+ struct inet_request_sock *ireq)
+{
+ return smc_check(ireq->req.__req_common.skc_daddr,
+ ireq->req.__req_common.skc_rcv_saddr);
+}
+
+SEC("struct_ops")
+int BPF_PROG(bpf_smc_set_tcp_option, struct tcp_sock *tp)
+{
+ return smc_check(tp->inet_conn.icsk_inet.sk.__sk_common.skc_rcv_saddr,
+ tp->inet_conn.icsk_inet.sk.__sk_common.skc_daddr);
+}
+
+SEC(".struct_ops")
+struct smc_hs_ctrl linkcheck = {
+ .name = "linkcheck",
+ .syn_option = (void *)bpf_smc_set_tcp_option,
+ .synack_option = (void *)bpf_smc_set_tcp_option_cond,
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_test_utils.h b/tools/testing/selftests/bpf/progs/bpf_test_utils.h
new file mode 100644
index 000000000000..f4e67b492dd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_test_utils.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_TEST_UTILS_H__
+#define __BPF_TEST_UTILS_H__
+
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Clobber as many native registers and stack slots as possible. */
+static __always_inline void clobber_regs_stack(void)
+{
+ char tmp_str[] = "123456789";
+ unsigned long tmp;
+
+ bpf_strtoul(tmp_str, sizeof(tmp_str), 0, &tmp);
+ __sink(tmp);
+}
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
index 17db400f0e0d..d8dacef37c16 100644
--- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
+++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
@@ -146,6 +146,20 @@
#define tcp_jiffies32 ((__u32)bpf_jiffies64())
+#ifndef min
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#endif
+
+static inline bool before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1 - seq2) < 0;
+}
+
+#define after(seq2, seq1) before(seq1, seq2)
+
static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
{
return (struct inet_connection_sock *)sk;
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
index 69f81cb555ca..d93f68024cc6 100644
--- a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c
@@ -57,15 +57,15 @@ int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path)
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path)
{
- struct cgroup_rstat_cpu *rstat;
+ struct css_rstat_cpu *rstat;
__u32 cpu;
cpu = bpf_get_smp_processor_id();
- rstat = (struct cgroup_rstat_cpu *)bpf_per_cpu_ptr(
+ rstat = (struct css_rstat_cpu *)bpf_per_cpu_ptr(
cgrp->self.rstat_cpu, cpu);
if (rstat) {
/* READ_ONCE */
- *(volatile int *)rstat;
+ *(volatile long *)rstat;
}
return 0;
diff --git a/tools/testing/selftests/bpf/progs/cgroup_mprog.c b/tools/testing/selftests/bpf/progs/cgroup_mprog.c
new file mode 100644
index 000000000000..6a0ea02c4de2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_mprog.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("cgroup/getsockopt")
+int getsockopt_1(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_2(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_3(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
+
+SEC("cgroup/getsockopt")
+int getsockopt_4(struct bpf_sockopt *ctx)
+{
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c
new file mode 100644
index 000000000000..88e13e17ec9e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_read_xattr.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+char value[16];
+
+static __always_inline void read_xattr(struct cgroup *cgroup)
+{
+ struct bpf_dynptr value_ptr;
+
+ bpf_dynptr_from_mem(value, sizeof(value), 0, &value_ptr);
+ bpf_cgroup_read_xattr(cgroup, "user.bpf_test",
+ &value_ptr);
+}
+
+SEC("lsm.s/socket_connect")
+__success
+int BPF_PROG(trusted_cgroup_ptr_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(trusted_cgroup_ptr_non_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(use_css_iter_non_sleepable)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm.s/socket_connect")
+__failure __msg("kernel func bpf_iter_css_new requires RCU critical section protection")
+int BPF_PROG(use_css_iter_sleepable_missing_rcu_lock)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("lsm.s/socket_connect")
+__success
+int BPF_PROG(use_css_iter_sleepable_with_rcu_lock)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ bpf_rcu_read_lock();
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ goto out;
+
+ bpf_for_each(css, css, &cgrp->self, BPF_CGROUP_ITER_ANCESTORS_UP)
+ read_xattr(css->cgroup);
+
+ bpf_cgroup_release(cgrp);
+out:
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("lsm/socket_connect")
+__success
+int BPF_PROG(use_bpf_cgroup_ancestor)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp, *ancestor;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, 1);
+ if (!ancestor)
+ goto out;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(ancestor);
+out:
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
+
+SEC("cgroup/sendmsg4")
+__success
+int BPF_PROG(cgroup_skb)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup *cgrp, *ancestor;
+
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp)
+ return 0;
+
+ ancestor = bpf_cgroup_ancestor(cgrp, 1);
+ if (!ancestor)
+ goto out;
+
+ read_xattr(cgrp);
+ bpf_cgroup_release(ancestor);
+out:
+ bpf_cgroup_release(cgrp);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
index 5354455a01be..02d8f160ca0e 100644
--- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
@@ -221,3 +221,15 @@ int BPF_PROG(test_cgrp_from_id, struct cgroup *cgrp, const char *path)
return 0;
}
+
+SEC("syscall")
+int test_cgrp_from_id_ns(void *ctx)
+{
+ struct cgroup *cg;
+
+ cg = bpf_cgroup_from_id(1);
+ if (!cg)
+ return 42;
+ bpf_cgroup_release(cg);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/compute_live_registers.c b/tools/testing/selftests/bpf/progs/compute_live_registers.c
index f3d79aecbf93..6884ab99a421 100644
--- a/tools/testing/selftests/bpf/progs/compute_live_registers.c
+++ b/tools/testing/selftests/bpf/progs/compute_live_registers.c
@@ -240,6 +240,22 @@ __naked void if2(void)
::: __clobber_all);
}
+/* Verifier misses that r2 is alive if jset is not handled properly */
+SEC("socket")
+__log_level(2)
+__msg("2: 012....... (45) if r1 & 0x7 goto pc+1")
+__naked void if3_jset_bug(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "r2 = 2;"
+ "if r1 & 0x7 goto +1;"
+ "exit;"
+ "r0 = r2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
SEC("socket")
__log_level(2)
__msg("0: .......... (b7) r1 = 0")
diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
index 9e9ebf27b878..9d158cfad981 100644
--- a/tools/testing/selftests/bpf/progs/connect4_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
@@ -34,6 +34,9 @@
#define SOL_TCP 6
#endif
+const char reno[] = "reno";
+const char cubic[] = "cubic";
+
__attribute__ ((noinline)) __weak
int do_bind(struct bpf_sock_addr *ctx)
{
@@ -50,35 +53,27 @@ int do_bind(struct bpf_sock_addr *ctx)
}
static __inline int verify_cc(struct bpf_sock_addr *ctx,
- char expected[TCP_CA_NAME_MAX])
+ const char expected[])
{
char buf[TCP_CA_NAME_MAX];
- int i;
if (bpf_getsockopt(ctx, SOL_TCP, TCP_CONGESTION, &buf, sizeof(buf)))
return 1;
- for (i = 0; i < TCP_CA_NAME_MAX; i++) {
- if (buf[i] != expected[i])
- return 1;
- if (buf[i] == 0)
- break;
- }
+ if (bpf_strncmp(buf, TCP_CA_NAME_MAX, expected))
+ return 1;
return 0;
}
static __inline int set_cc(struct bpf_sock_addr *ctx)
{
- char reno[TCP_CA_NAME_MAX] = "reno";
- char cubic[TCP_CA_NAME_MAX] = "cubic";
-
- if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, &reno, sizeof(reno)))
+ if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, (void *)reno, sizeof(reno)))
return 1;
if (verify_cc(ctx, reno))
return 1;
- if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, &cubic, sizeof(cubic)))
+ if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, (void *)cubic, sizeof(cubic)))
return 1;
if (verify_cc(ctx, cubic))
return 1;
diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
index 645be6cddf36..dfd8a258f14a 100644
--- a/tools/testing/selftests/bpf/progs/crypto_sanity.c
+++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
@@ -14,7 +14,7 @@ unsigned char key[256] = {};
u16 udp_test_port = 7777;
u32 authsize, key_len;
char algo[128] = {};
-char dst[16] = {};
+char dst[16] = {}, dst_bad[8] = {};
int status;
static int skb_dynptr_validate(struct __sk_buff *skb, struct bpf_dynptr *psrc)
@@ -59,10 +59,9 @@ int skb_crypto_setup(void *ctx)
.authsize = authsize,
};
struct bpf_crypto_ctx *cctx;
- int err = 0;
+ int err;
status = 0;
-
if (key_len > 256) {
status = -EINVAL;
return 0;
@@ -70,8 +69,8 @@ int skb_crypto_setup(void *ctx)
__builtin_memcpy(&params.algo, algo, sizeof(algo));
__builtin_memcpy(&params.key, key, sizeof(key));
- cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
if (!cctx) {
status = err;
return 0;
@@ -80,7 +79,6 @@ int skb_crypto_setup(void *ctx)
err = crypto_ctx_insert(cctx);
if (err && err != -EEXIST)
status = err;
-
return 0;
}
@@ -92,6 +90,7 @@ int decrypt_sanity(struct __sk_buff *skb)
struct bpf_dynptr psrc, pdst;
int err;
+ status = 0;
err = skb_dynptr_validate(skb, &psrc);
if (err < 0) {
status = err;
@@ -110,13 +109,23 @@ int decrypt_sanity(struct __sk_buff *skb)
return TC_ACT_SHOT;
}
- /* dst is a global variable to make testing part easier to check. In real
- * production code, a percpu map should be used to store the result.
+ /* Check also bad case where the dst buffer is smaller than the
+ * skb's linear section.
+ */
+ bpf_dynptr_from_mem(dst_bad, sizeof(dst_bad), 0, &pdst);
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
+ if (!status)
+ status = -EIO;
+ if (status != -EINVAL)
+ goto err;
+
+ /* dst is a global variable to make testing part easier to check.
+ * In real production code, a percpu map should be used to store
+ * the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
-
status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
-
+err:
return TC_ACT_SHOT;
}
@@ -129,7 +138,6 @@ int encrypt_sanity(struct __sk_buff *skb)
int err;
status = 0;
-
err = skb_dynptr_validate(skb, &psrc);
if (err < 0) {
status = err;
@@ -148,13 +156,23 @@ int encrypt_sanity(struct __sk_buff *skb)
return TC_ACT_SHOT;
}
- /* dst is a global variable to make testing part easier to check. In real
- * production code, a percpu map should be used to store the result.
+ /* Check also bad case where the dst buffer is smaller than the
+ * skb's linear section.
+ */
+ bpf_dynptr_from_mem(dst_bad, sizeof(dst_bad), 0, &pdst);
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
+ if (!status)
+ status = -EIO;
+ if (status != -EINVAL)
+ goto err;
+
+ /* dst is a global variable to make testing part easier to check.
+ * In real production code, a percpu map should be used to store
+ * the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
-
status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
-
+err:
return TC_ACT_SHOT;
}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index bd8f15229f5c..dda6a8dada82 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -269,6 +269,26 @@ int data_slice_out_of_bounds_skb(struct __sk_buff *skb)
return SK_PASS;
}
+/* A metadata slice can't be accessed out of bounds */
+SEC("?tc")
+__failure __msg("value is outside of the allowed memory range")
+int data_slice_out_of_bounds_skb_meta(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ /* this should fail */
+ *(md + 1) = 42;
+
+ return SK_PASS;
+}
+
SEC("?raw_tp")
__failure __msg("value is outside of the allowed memory range")
int data_slice_out_of_bounds_map_value(void *ctx)
@@ -1089,6 +1109,26 @@ int skb_invalid_slice_write(struct __sk_buff *skb)
return SK_PASS;
}
+/* bpf_dynptr_slice()s are read-only and cannot be written to */
+SEC("?tc")
+__failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
+int skb_meta_invalid_slice_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ /* this should fail */
+ *md = 42;
+
+ return SK_PASS;
+}
+
/* The read-only data slice is invalidated whenever a helper changes packet data */
SEC("?tc")
__failure __msg("invalid mem access 'scalar'")
@@ -1192,6 +1232,188 @@ int skb_invalid_data_slice4(struct __sk_buff *skb)
return SK_PASS;
}
+/* Read-only skb data slice is invalidated on write to skb metadata */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int ro_skb_slice_invalid_after_metadata_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *d;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ d = bpf_dynptr_slice(&data, 0, NULL, sizeof(*d));
+ if (!d)
+ return SK_DROP;
+
+ bpf_dynptr_write(&meta, 0, "x", 1, 0);
+
+ /* this should fail */
+ val = *d;
+
+ return SK_PASS;
+}
+
+/* Read-write skb data slice is invalidated on write to skb metadata */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int rw_skb_slice_invalid_after_metadata_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *d;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ d = bpf_dynptr_slice_rdwr(&data, 0, NULL, sizeof(*d));
+ if (!d)
+ return SK_DROP;
+
+ bpf_dynptr_write(&meta, 0, "x", 1, 0);
+
+ /* this should fail */
+ *d = 42;
+
+ return SK_PASS;
+}
+
+/* Read-only skb metadata slice is invalidated on write to skb data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int ro_skb_meta_slice_invalid_after_payload_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ bpf_dynptr_write(&data, 0, "x", 1, 0);
+
+ /* this should fail */
+ val = *md;
+
+ return SK_PASS;
+}
+
+/* Read-write skb metadata slice is invalidated on write to skb data slice */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int rw_skb_meta_slice_invalid_after_payload_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb(skb, 0, &data);
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ bpf_dynptr_write(&data, 0, "x", 1, 0);
+
+ /* this should fail */
+ *md = 42;
+
+ return SK_PASS;
+}
+
+/* Read-only skb metadata slice is invalidated whenever a helper changes packet data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int ro_skb_meta_slice_invalid_after_payload_helper(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ val = *md;
+
+ return SK_PASS;
+}
+
+/* Read-write skb metadata slice is invalidated whenever a helper changes packet data */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int rw_skb_meta_slice_invalid_after_payload_helper(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ *md = 42;
+
+ return SK_PASS;
+}
+
+/* Read-only skb metadata slice is invalidated on write to skb metadata */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int ro_skb_meta_slice_invalid_after_metadata_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ bpf_dynptr_write(&meta, 0, "x", 1, 0);
+
+ /* this should fail */
+ val = *md;
+
+ return SK_PASS;
+}
+
+/* Read-write skb metadata slice is invalidated on write to skb metadata */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int rw_skb_meta_slice_invalid_after_metadata_write(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+
+ md = bpf_dynptr_slice_rdwr(&meta, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ bpf_dynptr_write(&meta, 0, "x", 1, 0);
+
+ /* this should fail */
+ *md = 42;
+
+ return SK_PASS;
+}
+
/* The read-only data slice is invalidated whenever a helper changes packet data */
SEC("?xdp")
__failure __msg("invalid mem access 'scalar'")
@@ -1255,6 +1477,19 @@ int skb_invalid_ctx(void *ctx)
return 0;
}
+/* Only supported prog type can create skb_meta-type dynptrs */
+SEC("?raw_tp")
+__failure __msg("calling kernel function bpf_dynptr_from_skb_meta is not allowed")
+int skb_meta_invalid_ctx(void *ctx)
+{
+ struct bpf_dynptr meta;
+
+ /* this should fail */
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+
+ return 0;
+}
+
SEC("fentry/skb_tx_error")
__failure __msg("must be referenced or trusted")
int BPF_PROG(skb_invalid_ctx_fentry, void *skb)
@@ -1665,6 +1900,29 @@ int clone_skb_packet_data(struct __sk_buff *skb)
return 0;
}
+/* A skb clone's metadata slice becomes invalid anytime packet data changes */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int clone_skb_packet_meta(struct __sk_buff *skb)
+{
+ struct bpf_dynptr clone, meta;
+ __u8 *md;
+
+ bpf_dynptr_from_skb_meta(skb, 0, &meta);
+ bpf_dynptr_clone(&meta, &clone);
+ md = bpf_dynptr_slice_rdwr(&clone, 0, NULL, sizeof(*md));
+ if (!md)
+ return SK_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ *md = 42;
+
+ return 0;
+}
+
/* A xdp clone's data slices should be invalid anytime packet data changes */
SEC("?xdp")
__failure __msg("invalid mem access 'scalar'")
diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c
index a0391f9da2d4..e0d672d93adf 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_success.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_success.c
@@ -9,6 +9,8 @@
#include "bpf_misc.h"
#include "errno.h"
+#define PAGE_SIZE_64K 65536
+
char _license[] SEC("license") = "GPL";
int pid, err, val;
@@ -209,6 +211,61 @@ int test_dynptr_skb_data(struct __sk_buff *skb)
return 1;
}
+SEC("?tc")
+int test_dynptr_skb_meta_data(struct __sk_buff *skb)
+{
+ struct bpf_dynptr meta;
+ __u8 *md;
+ int ret;
+
+ err = 1;
+ ret = bpf_dynptr_from_skb_meta(skb, 0, &meta);
+ if (ret)
+ return 1;
+
+ /* This should return NULL. Must use bpf_dynptr_slice API */
+ err = 2;
+ md = bpf_dynptr_data(&meta, 0, sizeof(*md));
+ if (md)
+ return 1;
+
+ err = 0;
+ return 1;
+}
+
+/* Check that skb metadata dynptr ops don't accept any flags. */
+SEC("?tc")
+int test_dynptr_skb_meta_flags(struct __sk_buff *skb)
+{
+ const __u64 INVALID_FLAGS = ~0ULL;
+ struct bpf_dynptr meta;
+ __u8 buf;
+ int ret;
+
+ err = 1;
+ ret = bpf_dynptr_from_skb_meta(skb, INVALID_FLAGS, &meta);
+ if (ret != -EINVAL)
+ return 1;
+
+ err = 2;
+ ret = bpf_dynptr_from_skb_meta(skb, 0, &meta);
+ if (ret)
+ return 1;
+
+ err = 3;
+ ret = bpf_dynptr_read(&buf, 0, &meta, 0, INVALID_FLAGS);
+ if (ret != -EINVAL)
+ return 1;
+
+ err = 4;
+ ret = bpf_dynptr_write(&meta, 0, &buf, 0, INVALID_FLAGS);
+ if (ret != -EINVAL)
+ return 1;
+
+ err = 0;
+ return 1;
+}
+
SEC("tp/syscalls/sys_enter_nanosleep")
int test_adjust(void *ctx)
{
@@ -611,11 +668,12 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp)
struct bpf_dynptr ptr_buf, ptr_xdp;
char data[] = "qwertyuiopasdfghjkl";
char buf[32] = {'\0'};
- __u32 len = sizeof(data);
+ __u32 len = sizeof(data), xdp_data_size;
int i, chunks = 200;
/* ptr_xdp is backed by non-contiguous memory */
bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+ xdp_data_size = bpf_dynptr_size(&ptr_xdp);
bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf);
/* Destination dynptr is backed by non-contiguous memory */
@@ -673,7 +731,7 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp)
goto out;
}
- if (bpf_dynptr_copy(&ptr_xdp, 2000, &ptr_xdp, 0, len * chunks) != -E2BIG)
+ if (bpf_dynptr_copy(&ptr_xdp, xdp_data_size - 3000, &ptr_xdp, 0, len * chunks) != -E2BIG)
err = 1;
out:
@@ -681,6 +739,173 @@ out:
return XDP_DROP;
}
+char memset_zero_data[] = "data to be zeroed";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_zero_data);
+ char zeroes[32] = {'\0'};
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, 0);
+ err = err ?: bpf_memcmp(zeroes, memset_zero_data, data_sz);
+
+ return 0;
+}
+
+#define DYNPTR_MEMSET_VAL 42
+
+char memset_notzero_data[] = "data to be overwritten";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_notzero(void *ctx)
+{
+ u32 data_sz = sizeof(memset_notzero_data);
+ struct bpf_dynptr ptr;
+ char expected[32];
+
+ __builtin_memset(expected, DYNPTR_MEMSET_VAL, data_sz);
+
+ err = bpf_dynptr_from_mem(memset_notzero_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, data_sz, DYNPTR_MEMSET_VAL);
+ err = err ?: bpf_memcmp(expected, memset_notzero_data, data_sz);
+
+ return 0;
+}
+
+char memset_zero_offset_data[] = "data to be zeroed partially";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero_offset(void *ctx)
+{
+ char expected[] = "data to \0\0\0\0eroed partially";
+ __u32 data_sz = sizeof(memset_zero_offset_data);
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_offset_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_memset(&ptr, 8, 4, 0);
+ err = err ?: bpf_memcmp(expected, memset_zero_offset_data, data_sz);
+
+ return 0;
+}
+
+char memset_zero_adjusted_data[] = "data to be zeroed partially";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_zero_adjusted(void *ctx)
+{
+ char expected[] = "data\0\0\0\0be zeroed partially";
+ __u32 data_sz = sizeof(memset_zero_adjusted_data);
+ struct bpf_dynptr ptr;
+
+ err = bpf_dynptr_from_mem(memset_zero_adjusted_data, data_sz, 0, &ptr);
+ err = err ?: bpf_dynptr_adjust(&ptr, 4, 8);
+ err = err ?: bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0);
+ err = err ?: bpf_memcmp(expected, memset_zero_adjusted_data, data_sz);
+
+ return 0;
+}
+
+char memset_overflow_data[] = "memset overflow data";
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_overflow(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_overflow_data);
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr);
+ ret = bpf_dynptr_memset(&ptr, 0, data_sz + 1, 0);
+ if (ret != -E2BIG)
+ err = 1;
+
+ return 0;
+}
+
+SEC("?tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_memset_overflow_offset(void *ctx)
+{
+ __u32 data_sz = sizeof(memset_overflow_data);
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_mem(memset_overflow_data, data_sz, 0, &ptr);
+ ret = bpf_dynptr_memset(&ptr, 1, data_sz, 0);
+ if (ret != -E2BIG)
+ err = 1;
+
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_memset_readonly(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ int ret;
+
+ err = bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ /* cgroup skbs are read only, memset should fail */
+ ret = bpf_dynptr_memset(&ptr, 0, bpf_dynptr_size(&ptr), 0);
+ if (ret != -EINVAL)
+ err = 1;
+
+ return 0;
+}
+
+#define min_t(type, x, y) ({ \
+ type __x = (x); \
+ type __y = (y); \
+ __x < __y ? __x : __y; })
+
+SEC("xdp")
+int test_dynptr_memset_xdp_chunks(struct xdp_md *xdp)
+{
+ u32 data_sz, chunk_sz, offset = 0;
+ const int max_chunks = 200;
+ struct bpf_dynptr ptr_xdp;
+ char expected_buf[32];
+ char buf[32];
+ int i;
+
+ __builtin_memset(expected_buf, DYNPTR_MEMSET_VAL, sizeof(expected_buf));
+
+ /* ptr_xdp is backed by non-contiguous memory */
+ bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+ data_sz = bpf_dynptr_size(&ptr_xdp);
+
+ err = bpf_dynptr_memset(&ptr_xdp, 0, data_sz, DYNPTR_MEMSET_VAL);
+ if (err) {
+ /* bpf_dynptr_memset() eventually called bpf_xdp_pointer()
+ * where if data_sz is greater than 0xffff, -EFAULT will be
+ * returned. For 64K page size, data_sz is greater than
+ * 64K, so error is expected and let us zero out error and
+ * return success.
+ */
+ if (data_sz >= PAGE_SIZE_64K)
+ err = 0;
+ goto out;
+ }
+
+ bpf_for(i, 0, max_chunks) {
+ offset = i * sizeof(buf);
+ if (offset >= data_sz)
+ goto out;
+ chunk_sz = min_t(u32, sizeof(buf), data_sz - offset);
+ err = bpf_dynptr_read(&buf, chunk_sz, &ptr_xdp, offset, 0);
+ if (err)
+ goto out;
+ err = bpf_memcmp(buf, expected_buf, sizeof(buf));
+ if (err)
+ goto out;
+ }
+out:
+ return XDP_DROP;
+}
+
void *user_ptr;
/* Contains the copy of the data pointed by user_ptr.
* Size 384 to make it not fit into a single kernel chunk when copying
@@ -689,8 +914,8 @@ void *user_ptr;
char expected_str[384];
__u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257};
-typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off,
- u32 size, const void *unsafe_ptr);
+typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u64 off,
+ u64 size, const void *unsafe_ptr);
/* Returns the offset just before the end of the maximum sized xdp fragment.
* Any write larger than 32 bytes will be split between 2 fragments.
@@ -881,16 +1106,16 @@ int test_copy_from_user_str_dynptr(void *ctx)
return 0;
}
-static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off,
- u32 size, const void *unsafe_ptr)
+static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u64 off,
+ u64 size, const void *unsafe_ptr)
{
struct task_struct *task = bpf_get_current_task_btf();
return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task);
}
-static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off,
- u32 size, const void *unsafe_ptr)
+static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u64 off,
+ u64 size, const void *unsafe_ptr)
{
struct task_struct *task = bpf_get_current_task_btf();
diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
index 5e0a1ca96d4e..a01c2736890f 100644
--- a/tools/testing/selftests/bpf/progs/exceptions_assert.c
+++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
@@ -18,43 +18,43 @@
return *(u64 *)num; \
}
-__msg(": R0_w=0xffffffff80000000")
+__msg(": R0=0xffffffff80000000")
check_assert(s64, ==, eq_int_min, INT_MIN);
-__msg(": R0_w=0x7fffffff")
+__msg(": R0=0x7fffffff")
check_assert(s64, ==, eq_int_max, INT_MAX);
-__msg(": R0_w=0")
+__msg(": R0=0")
check_assert(s64, ==, eq_zero, 0);
-__msg(": R0_w=0x8000000000000000 R1_w=0x8000000000000000")
+__msg(": R0=0x8000000000000000 R1=0x8000000000000000")
check_assert(s64, ==, eq_llong_min, LLONG_MIN);
-__msg(": R0_w=0x7fffffffffffffff R1_w=0x7fffffffffffffff")
+__msg(": R0=0x7fffffffffffffff R1=0x7fffffffffffffff")
check_assert(s64, ==, eq_llong_max, LLONG_MAX);
-__msg(": R0_w=scalar(id=1,smax=0x7ffffffe)")
+__msg(": R0=scalar(id=1,smax=0x7ffffffe)")
check_assert(s64, <, lt_pos, INT_MAX);
-__msg(": R0_w=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+__msg(": R0=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, <, lt_zero, 0);
-__msg(": R0_w=scalar(id=1,smax=0xffffffff7fffffff")
+__msg(": R0=scalar(id=1,smax=0xffffffff7fffffff")
check_assert(s64, <, lt_neg, INT_MIN);
-__msg(": R0_w=scalar(id=1,smax=0x7fffffff)")
+__msg(": R0=scalar(id=1,smax=0x7fffffff)")
check_assert(s64, <=, le_pos, INT_MAX);
-__msg(": R0_w=scalar(id=1,smax=0)")
+__msg(": R0=scalar(id=1,smax=0)")
check_assert(s64, <=, le_zero, 0);
-__msg(": R0_w=scalar(id=1,smax=0xffffffff80000000")
+__msg(": R0=scalar(id=1,smax=0xffffffff80000000")
check_assert(s64, <=, le_neg, INT_MIN);
-__msg(": R0_w=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+__msg(": R0=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, >, gt_pos, INT_MAX);
-__msg(": R0_w=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+__msg(": R0=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, >, gt_zero, 0);
-__msg(": R0_w=scalar(id=1,smin=0xffffffff80000001")
+__msg(": R0=scalar(id=1,smin=0xffffffff80000001")
check_assert(s64, >, gt_neg, INT_MIN);
-__msg(": R0_w=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+__msg(": R0=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, >=, ge_pos, INT_MAX);
-__msg(": R0_w=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+__msg(": R0=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, >=, ge_zero, 0);
-__msg(": R0_w=scalar(id=1,smin=0xffffffff80000000")
+__msg(": R0=scalar(id=1,smin=0xffffffff80000000")
check_assert(s64, >=, ge_neg, INT_MIN);
SEC("?tc")
diff --git a/tools/testing/selftests/bpf/progs/fexit_noreturns.c b/tools/testing/selftests/bpf/progs/fexit_noreturns.c
deleted file mode 100644
index 54654539f550..000000000000
--- a/tools/testing/selftests/bpf/progs/fexit_noreturns.c
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-#include "bpf_misc.h"
-
-char _license[] SEC("license") = "GPL";
-
-SEC("fexit/do_exit")
-__failure __msg("Attaching fexit/fmod_ret to __noreturn functions is rejected.")
-int BPF_PROG(noreturns)
-{
- return 0;
-}
diff --git a/tools/testing/selftests/bpf/progs/file_reader.c b/tools/testing/selftests/bpf/progs/file_reader.c
new file mode 100644
index 000000000000..4d756b623557
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/file_reader.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} arrmap SEC(".maps");
+
+struct elem {
+ struct file *file;
+ struct bpf_task_work tw;
+};
+
+char user_buf[256000];
+char tmp_buf[256000];
+
+int pid = 0;
+int err, run_success = 0;
+
+static int validate_file_read(struct file *file);
+static int task_work_callback(struct bpf_map *map, void *key, void *value);
+
+SEC("lsm/file_open")
+int on_open_expect_fault(void *c)
+{
+ struct bpf_dynptr dynptr;
+ struct file *file;
+ int local_err = 1;
+ __u32 user_buf_sz = sizeof(user_buf);
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ file = bpf_get_task_exe_file(bpf_get_current_task_btf());
+ if (!file)
+ return 0;
+
+ if (bpf_dynptr_from_file(file, 0, &dynptr))
+ goto out;
+
+ local_err = bpf_dynptr_read(tmp_buf, user_buf_sz, &dynptr, user_buf_sz, 0);
+ if (local_err == -EFAULT) { /* Expect page fault */
+ local_err = 0;
+ run_success = 1;
+ }
+out:
+ bpf_dynptr_file_discard(&dynptr);
+ if (local_err)
+ err = local_err;
+ bpf_put_file(file);
+ return 0;
+}
+
+SEC("lsm/file_open")
+int on_open_validate_file_read(void *c)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct elem *work;
+ int key = 0;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ work = bpf_map_lookup_elem(&arrmap, &key);
+ if (!work) {
+ err = 1;
+ return 0;
+ }
+ bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, task_work_callback, NULL);
+ return 0;
+}
+
+/* Called in a sleepable context, read 256K bytes, cross check with user space read data */
+static int task_work_callback(struct bpf_map *map, void *key, void *value)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct file *file = bpf_get_task_exe_file(task);
+
+ if (!file)
+ return 0;
+
+ err = validate_file_read(file);
+ if (!err)
+ run_success = 1;
+ bpf_put_file(file);
+ return 0;
+}
+
+static int verify_dynptr_read(struct bpf_dynptr *ptr, u32 off, char *user_buf, u32 len)
+{
+ int i;
+
+ if (bpf_dynptr_read(tmp_buf, len, ptr, off, 0))
+ return 1;
+
+ /* Verify file contents read from BPF is the same as the one read from userspace */
+ bpf_for(i, 0, len)
+ {
+ if (tmp_buf[i] != user_buf[i])
+ return 1;
+ }
+ return 0;
+}
+
+static int validate_file_read(struct file *file)
+{
+ struct bpf_dynptr dynptr;
+ int loc_err = 1, off;
+ __u32 user_buf_sz = sizeof(user_buf);
+
+ if (bpf_dynptr_from_file(file, 0, &dynptr))
+ goto cleanup;
+
+ loc_err = verify_dynptr_read(&dynptr, 0, user_buf, user_buf_sz);
+ off = 1;
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, off, user_buf + off, user_buf_sz - off);
+ off = user_buf_sz - 1;
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, off, user_buf + off, user_buf_sz - off);
+ /* Read file with random offset and length */
+ off = 4097;
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, off, user_buf + off, 100);
+
+ /* Adjust dynptr, verify read */
+ loc_err = loc_err ?: bpf_dynptr_adjust(&dynptr, off, off + 1);
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, 0, user_buf + off, 1);
+ /* Can't read more than 1 byte */
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, 0, user_buf + off, 2) == 0;
+ /* Can't read with far offset */
+ loc_err = loc_err ?: verify_dynptr_read(&dynptr, 1, user_buf + off, 1) == 0;
+
+cleanup:
+ bpf_dynptr_file_discard(&dynptr);
+ return loc_err;
+}
diff --git a/tools/testing/selftests/bpf/progs/file_reader_fail.c b/tools/testing/selftests/bpf/progs/file_reader_fail.c
new file mode 100644
index 000000000000..32fe28ed2439
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/file_reader_fail.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err;
+void *user_ptr;
+
+SEC("lsm/file_open")
+__failure
+__msg("Unreleased reference id=")
+int on_nanosleep_unreleased_ref(void *ctx)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct file *file = bpf_get_task_exe_file(task);
+ struct bpf_dynptr dynptr;
+
+ if (!file)
+ return 0;
+
+ err = bpf_dynptr_from_file(file, 0, &dynptr);
+ return err ? 1 : 0;
+}
+
+SEC("xdp")
+__failure
+__msg("Expected a dynptr of type file as arg #0")
+int xdp_wrong_dynptr_type(struct xdp_md *xdp)
+{
+ struct bpf_dynptr dynptr;
+
+ bpf_dynptr_from_xdp(xdp, 0, &dynptr);
+ bpf_dynptr_file_discard(&dynptr);
+ return 0;
+}
+
+SEC("xdp")
+__failure
+__msg("Expected an initialized dynptr as arg #0")
+int xdp_no_dynptr_type(struct xdp_md *xdp)
+{
+ struct bpf_dynptr dynptr;
+
+ bpf_dynptr_file_discard(&dynptr);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c b/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
index 544e5ac90461..d09bbd8ae8a8 100644
--- a/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
+++ b/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
@@ -12,7 +12,7 @@
SEC("freplace/connect_v4_prog")
int new_connect_v4_prog(struct bpf_sock_addr *ctx)
{
- // return value thats in invalid range
+ // return value that's in invalid range
return 255;
}
diff --git a/tools/testing/selftests/bpf/progs/htab_update.c b/tools/testing/selftests/bpf/progs/htab_update.c
index 7481bb30b29b..195d3b2fba00 100644
--- a/tools/testing/selftests/bpf/progs/htab_update.c
+++ b/tools/testing/selftests/bpf/progs/htab_update.c
@@ -6,24 +6,31 @@
char _license[] SEC("license") = "GPL";
+/* Map value type: has BTF-managed field (bpf_timer) */
+struct val {
+ struct bpf_timer t;
+ __u64 payload;
+};
+
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(__u32));
+ __type(key, __u32);
+ __type(value, struct val);
} htab SEC(".maps");
int pid = 0;
int update_err = 0;
-SEC("?fentry/lookup_elem_raw")
-int lookup_elem_raw(void *ctx)
+SEC("?fentry/bpf_obj_free_fields")
+int bpf_obj_free_fields(void *ctx)
{
- __u32 key = 0, value = 1;
+ __u32 key = 0;
+ struct val value = { .payload = 1 };
if ((bpf_get_current_pid_tgid() >> 32) != pid)
return 0;
- update_err = bpf_map_update_elem(&htab, &key, &value, 0);
+ update_err = bpf_map_update_elem(&htab, &key, &value, BPF_ANY);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/ip_check_defrag.c b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
index 645b2c9f7867..0e87ad1ebcfa 100644
--- a/tools/testing/selftests/bpf/progs/ip_check_defrag.c
+++ b/tools/testing/selftests/bpf/progs/ip_check_defrag.c
@@ -12,11 +12,6 @@
#define IP_OFFSET 0x1FFF
#define NEXTHDR_FRAGMENT 44
-extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
- struct bpf_dynptr *ptr__uninit) __ksym;
-extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
- void *buffer, uint32_t buffer__sz) __ksym;
-
volatile int shootdowns = 0;
static bool is_frag_v4(struct iphdr *iph)
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index 76adf4a8f2da..7dd92a303bf6 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -1649,4 +1649,281 @@ int clean_live_states(const void *ctx)
return 0;
}
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * r8 = bpf_get_prandom_u32();
+ * r6 = -32;
+ * bpf_iter_num_new(&fp[-8], 0, 10);
+ * if (unlikely(bpf_get_prandom_u32()))
+ * r6 = -31;
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "r7 = 0;"
+ "r6 = -32;"
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "loop_%=:"
+ "call noop;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int noop(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state2(void)
+{
+ /* This is equivalent to C program below.
+ *
+ * r8 = bpf_get_prandom_u32();
+ * r6 = -32;
+ * bpf_iter_num_new(&fp[-8], 0, 10);
+ * if (unlikely(bpf_get_prandom_u32())) {
+ * r6 = -31;
+ * jump_into_loop:
+ * goto +0;
+ * goto loop;
+ * }
+ * if (unlikely(bpf_get_prandom_u32()))
+ * goto jump_into_loop;
+ * loop:
+ * while (bpf_iter_num_next(&fp[-8])) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * bpf_iter_num_destroy(&fp[-8])
+ * return 0
+ */
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "r7 = 0;"
+ "r6 = -32;"
+ "r0 = 0;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto jump_into_loop_%=;"
+ "loop_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "jump_into_loop_%=: "
+ "goto +0;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state3(void)
+{
+ /*
+ * bpf_iter_num_new(&fp[-8], 0, 10)
+ * loop1(-32, &fp[-8])
+ * loop1_wrapper(&fp[-8])
+ * bpf_iter_num_destroy(&fp[-8])
+ */
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ /* call #1 */
+ "r1 = -32;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "call loop1;"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ /* call #2 */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call loop1_wrapper;"
+ /* return */
+ "r1 = r10;"
+ "r1 += -8;"
+ "call %[bpf_iter_num_destroy];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int loop1(void)
+{
+ /*
+ * int loop1(num, iter) {
+ * r6 = num;
+ * r7 = iter;
+ * while (bpf_iter_num_next(r7)) {
+ * if (unlikely(bpf_get_prandom_u32()))
+ * *(fp + r6) = 7;
+ * }
+ * return 0
+ * }
+ */
+ asm volatile (
+ "r6 = r1;"
+ "r7 = r2;"
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "loop_%=:"
+ "r1 = r7;"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto loop_end_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto use_r6_%=;"
+ "goto loop_%=;"
+ "loop_end_%=:"
+ "r0 = 0;"
+ "exit;"
+ "use_r6_%=:"
+ "r0 = r10;"
+ "r0 += r6;"
+ "r1 = 7;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_next),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+__used __naked
+static int loop1_wrapper(void)
+{
+ /*
+ * int loop1_wrapper(iter) {
+ * r6 = -32;
+ * r7 = iter;
+ * if (unlikely(bpf_get_prandom_u32()))
+ * r6 = -31;
+ * loop1(r6, r7);
+ * return 0;
+ * }
+ */
+ asm volatile (
+ "r6 = -32;"
+ "r7 = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "r8 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == r8 goto change_r6_%=;"
+ "loop_%=:"
+ "r1 = r6;"
+ "r2 = r7;"
+ "call loop1;"
+ "r0 = 0;"
+ "exit;"
+ "change_r6_%=:"
+ "r6 = -31;"
+ "goto loop_%=;"
+ :
+ : __imm(bpf_iter_num_next),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/iters_looping.c b/tools/testing/selftests/bpf/progs/iters_looping.c
index 05fa5ce7fc59..d00fd570255a 100644
--- a/tools/testing/selftests/bpf/progs/iters_looping.c
+++ b/tools/testing/selftests/bpf/progs/iters_looping.c
@@ -161,3 +161,56 @@ int simplest_loop(void *ctx)
return 0;
}
+
+__used
+static void iterator_with_diff_stack_depth(int x)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile (
+ "if r1 == 42 goto 0f;"
+ "*(u64 *)(r10 - 128) = 0;"
+ "0:"
+ /* create iterator */
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ /* consume next item */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 2f;"
+ "goto 1b;"
+ "2:"
+ /* destroy iterator */
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter), ITER_HELPERS
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("socket")
+__success
+__naked int widening_stack_size_bug(void *ctx)
+{
+ /*
+ * Depending on iterator_with_diff_stack_depth() parameter value,
+ * subprogram stack depth is either 8 or 128 bytes. Arrange values so
+ * that it is 128 on a first call and 8 on a second. This triggered a
+ * bug in verifier's widen_imprecise_scalars() logic.
+ */
+ asm volatile (
+ "r6 = 0;"
+ "r1 = 0;"
+ "1:"
+ "call iterator_with_diff_stack_depth;"
+ "r1 = 42;"
+ "r6 += 1;"
+ "if r6 < 2 goto 1b;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c
index f41257eadbb2..d273b46dfc7c 100644
--- a/tools/testing/selftests/bpf/progs/iters_state_safety.c
+++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c
@@ -30,7 +30,7 @@ int force_clang_to_emit_btf_for_externs(void *ctx)
SEC("?raw_tp")
__success __log_level(2)
-__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)")
+__msg("fp-8=iter_num(ref_id=1,state=active,depth=0)")
int create_and_destroy(void *ctx)
{
struct bpf_iter_num iter;
@@ -196,7 +196,7 @@ int leak_iter_from_subprog_fail(void *ctx)
SEC("?raw_tp")
__success __log_level(2)
-__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)")
+__msg("fp-8=iter_num(ref_id=1,state=active,depth=0)")
int valid_stack_reuse(void *ctx)
{
struct bpf_iter_num iter;
@@ -345,7 +345,7 @@ int __naked read_from_iter_slot_fail(void)
"r3 = 1000;"
"call %[bpf_iter_num_new];"
- /* attemp to leak bpf_iter_num state */
+ /* attempt to leak bpf_iter_num state */
"r7 = *(u64 *)(r6 + 0);"
"r8 = *(u64 *)(r6 + 8);"
diff --git a/tools/testing/selftests/bpf/progs/iters_task_failure.c b/tools/testing/selftests/bpf/progs/iters_task_failure.c
index 6b1588d70652..fe3663dedbe1 100644
--- a/tools/testing/selftests/bpf/progs/iters_task_failure.c
+++ b/tools/testing/selftests/bpf/progs/iters_task_failure.c
@@ -15,7 +15,7 @@ void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
-__failure __msg("expected an RCU CS when using bpf_iter_task_next")
+__failure __msg("kernel func bpf_iter_task_new requires RCU critical section protection")
int BPF_PROG(iter_tasks_without_lock)
{
struct task_struct *pos;
@@ -27,7 +27,7 @@ int BPF_PROG(iter_tasks_without_lock)
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
-__failure __msg("expected an RCU CS when using bpf_iter_css_next")
+__failure __msg("kernel func bpf_iter_css_new requires RCU critical section protection")
int BPF_PROG(iter_css_without_lock)
{
u64 cg_id = bpf_get_current_cgroup_id();
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c
index 9e4b45201e69..5379e9960ffd 100644
--- a/tools/testing/selftests/bpf/progs/iters_testmod.c
+++ b/tools/testing/selftests/bpf/progs/iters_testmod.c
@@ -123,3 +123,49 @@ out:
bpf_iter_num_destroy(&num_it);
return 0;
}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_kfunc_ret_rcu_test requires RCU critical section protection")
+int iter_ret_rcu_test_protected(const void *ctx)
+{
+ struct task_struct *p;
+
+ p = bpf_kfunc_ret_rcu_test();
+ return p->pid;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("R1 type=rcu_ptr_or_null_ expected=")
+int iter_ret_rcu_test_type(const void *ctx)
+{
+ struct task_struct *p;
+
+ bpf_rcu_read_lock();
+ p = bpf_kfunc_ret_rcu_test();
+ bpf_this_cpu_ptr(p);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("kernel func bpf_kfunc_ret_rcu_test_nostruct requires RCU critical section protection")
+int iter_ret_rcu_test_protected_nostruct(const void *ctx)
+{
+ void *p;
+
+ p = bpf_kfunc_ret_rcu_test_nostruct(4);
+ return *(int *)p;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure __msg("R1 type=rdonly_rcu_mem_or_null expected=")
+int iter_ret_rcu_test_type_nostruct(const void *ctx)
+{
+ void *p;
+
+ bpf_rcu_read_lock();
+ p = bpf_kfunc_ret_rcu_test_nostruct(4);
+ bpf_this_cpu_ptr(p);
+ bpf_rcu_read_unlock();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
index 6543d5b6e0a9..83791348bed5 100644
--- a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
+++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
@@ -20,7 +20,7 @@ __s64 res_empty;
SEC("raw_tp/sys_enter")
__success __log_level(2)
-__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_empty(const void *ctx)
@@ -38,7 +38,7 @@ __s64 res_full;
SEC("raw_tp/sys_enter")
__success __log_level(2)
-__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_full(const void *ctx)
@@ -58,7 +58,7 @@ static volatile int zero = 0;
SEC("raw_tp/sys_enter")
__success __log_level(2)
-__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)")
+__msg("fp-16=iter_testmod_seq(ref_id=1,state=active,depth=0)")
__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)")
__msg("call bpf_iter_testmod_seq_destroy")
int testmod_seq_truncated(const void *ctx)
diff --git a/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c b/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c
new file mode 100644
index 000000000000..f77aef0474d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_write_ctx.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#if defined(__TARGET_ARCH_x86)
+SEC("kprobe")
+int kprobe_write_ctx(struct pt_regs *ctx)
+{
+ ctx->ax = 0;
+ return 0;
+}
+
+SEC("kprobe.multi")
+int kprobe_multi_write_ctx(struct pt_regs *ctx)
+{
+ ctx->ax = 0;
+ return 0;
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
index 6438982b928b..ddd26d1a083f 100644
--- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
@@ -226,8 +226,7 @@ int obj_new_no_composite(void *ctx)
SEC("?tc")
int obj_new_no_struct(void *ctx)
{
-
- bpf_obj_new(union { int data; unsigned udata; });
+ (void)bpf_obj_new(union { int data; unsigned udata; });
return 0;
}
@@ -252,7 +251,7 @@ int new_null_ret(void *ctx)
SEC("?tc")
int obj_new_acq(void *ctx)
{
- bpf_obj_new(struct foo);
+ (void)bpf_obj_new(struct foo);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/livepatch_trampoline.c b/tools/testing/selftests/bpf/progs/livepatch_trampoline.c
new file mode 100644
index 000000000000..15579d5bcd91
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/livepatch_trampoline.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int fentry_hit;
+int fexit_hit;
+int my_pid;
+
+SEC("fentry/cmdline_proc_show")
+int BPF_PROG(fentry_cmdline)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ fentry_hit = 1;
+ return 0;
+}
+
+SEC("fexit/cmdline_proc_show")
+int BPF_PROG(fexit_cmdline)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ fexit_hit = 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/loop1.c b/tools/testing/selftests/bpf/progs/loop1.c
index 50e66772c046..b0fa26fb4760 100644
--- a/tools/testing/selftests/bpf/progs/loop1.c
+++ b/tools/testing/selftests/bpf/progs/loop1.c
@@ -1,11 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/loop2.c b/tools/testing/selftests/bpf/progs/loop2.c
index 947bb7e988c2..0227409d4b0e 100644
--- a/tools/testing/selftests/bpf/progs/loop2.c
+++ b/tools/testing/selftests/bpf/progs/loop2.c
@@ -1,11 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/loop3.c b/tools/testing/selftests/bpf/progs/loop3.c
index 717dab14322b..5d1c9a775e6b 100644
--- a/tools/testing/selftests/bpf/progs/loop3.c
+++ b/tools/testing/selftests/bpf/progs/loop3.c
@@ -1,11 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/loop6.c b/tools/testing/selftests/bpf/progs/loop6.c
index e4ff97fbcce1..dd36aff4fba3 100644
--- a/tools/testing/selftests/bpf/progs/loop6.c
+++ b/tools/testing/selftests/bpf/progs/loop6.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/ptrace.h>
-#include <stddef.h>
-#include <linux/bpf.h>
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
@@ -26,12 +25,6 @@ char _license[] SEC("license") = "GPL";
#define SG_CHAIN 0x01UL
#define SG_END 0x02UL
-struct scatterlist {
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
-};
-
#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
#define sg_is_last(sg) ((sg)->page_link & SG_END)
#define sg_chain_ptr(sg) \
@@ -62,7 +55,7 @@ static inline struct scatterlist *get_sgp(struct scatterlist **sgs, int i)
return sgp;
}
-int config = 0;
+int run_once = 0;
int result = 0;
SEC("kprobe/virtqueue_add_sgs")
@@ -73,14 +66,14 @@ int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs,
__u64 length1 = 0, length2 = 0;
unsigned int i, n, len;
- if (config != 0)
+ if (run_once != 0)
return 0;
for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) {
__sink(out_sgs);
for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
sgp = __sg_next(sgp)) {
- bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
+ len = BPF_CORE_READ(sgp, length);
length1 += len;
n++;
}
@@ -90,13 +83,13 @@ int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs,
__sink(in_sgs);
for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
sgp = __sg_next(sgp)) {
- bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
+ len = BPF_CORE_READ(sgp, length);
length2 += len;
n++;
}
}
- config = 1;
+ run_once = 1;
result = length2 - length1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/lpm_trie.h b/tools/testing/selftests/bpf/progs/lpm_trie.h
new file mode 100644
index 000000000000..76aa5821807f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lpm_trie.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PROGS_LPM_TRIE_H
+#define __PROGS_LPM_TRIE_H
+
+struct trie_key {
+ __u32 prefixlen;
+ __u32 data;
+};
+
+/* Benchmark operations */
+enum {
+ LPM_OP_NOOP = 0,
+ LPM_OP_BASELINE,
+ LPM_OP_LOOKUP,
+ LPM_OP_INSERT,
+ LPM_OP_UPDATE,
+ LPM_OP_DELETE,
+ LPM_OP_FREE
+};
+
+/*
+ * Return values from run_bench.
+ *
+ * Negative values are also allowed and represent kernel error codes.
+ */
+#define LPM_BENCH_SUCCESS 0
+#define LPM_BENCH_REINIT_MAP 1 /* Reset trie to initial state for current op */
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/lpm_trie_bench.c b/tools/testing/selftests/bpf/progs/lpm_trie_bench.c
new file mode 100644
index 000000000000..a0e6ebd5507a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lpm_trie_bench.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Cloudflare */
+
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "bpf_atomic.h"
+#include "progs/lpm_trie.h"
+
+#define BPF_OBJ_NAME_LEN 16U
+#define MAX_ENTRIES 100000000
+#define NR_LOOPS 10000
+
+char _license[] SEC("license") = "GPL";
+
+/* Filled by userspace. See fill_map() in bench_lpm_trie_map.c */
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __type(key, struct trie_key);
+ __type(value, __u32);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, MAX_ENTRIES);
+} trie_map SEC(".maps");
+
+long hits;
+long duration_ns;
+
+/* Configured from userspace */
+__u32 nr_entries;
+__u32 prefixlen;
+bool random;
+__u8 op;
+
+static __u64 latency_free_start;
+
+SEC("fentry/bpf_map_free_deferred")
+int BPF_PROG(trie_free_entry, struct work_struct *work)
+{
+ struct bpf_map *map = container_of(work, struct bpf_map, work);
+ char name[BPF_OBJ_NAME_LEN];
+ u32 map_type;
+
+ map_type = BPF_CORE_READ(map, map_type);
+ if (map_type != BPF_MAP_TYPE_LPM_TRIE)
+ return 0;
+
+ /*
+ * Ideally we'd have access to the map ID but that's already
+ * freed before we enter trie_free().
+ */
+ BPF_CORE_READ_STR_INTO(&name, map, name);
+ if (bpf_strncmp(name, BPF_OBJ_NAME_LEN, "trie_free_map"))
+ return 0;
+
+ latency_free_start = bpf_ktime_get_ns();
+
+ return 0;
+}
+
+SEC("fexit/bpf_map_free_deferred")
+int BPF_PROG(trie_free_exit, struct work_struct *work)
+{
+ __u64 val;
+
+ if (!latency_free_start)
+ return 0;
+
+ val = bpf_ktime_get_ns() - latency_free_start;
+ latency_free_start = 0;
+
+ __sync_add_and_fetch(&duration_ns, val);
+ __sync_add_and_fetch(&hits, 1);
+
+ return 0;
+}
+
+static __u32 cur_key;
+
+static __always_inline void generate_key(struct trie_key *key)
+{
+ key->prefixlen = prefixlen;
+
+ if (random)
+ key->data = bpf_get_prandom_u32() % nr_entries;
+ else
+ key->data = cur_key++ % nr_entries;
+}
+
+static int noop(__u32 index, __u32 *unused)
+{
+ return 0;
+}
+
+static int baseline(__u32 index, __u32 *unused)
+{
+ struct trie_key key;
+ __u32 blackbox = 0;
+
+ generate_key(&key);
+ /* Avoid compiler optimizing out the modulo */
+ barrier_var(blackbox);
+ blackbox = READ_ONCE(key.data);
+
+ return 0;
+}
+
+static int lookup(__u32 index, int *retval)
+{
+ struct trie_key key;
+
+ generate_key(&key);
+ if (!bpf_map_lookup_elem(&trie_map, &key)) {
+ *retval = -ENOENT;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int insert(__u32 index, int *retval)
+{
+ struct trie_key key;
+ u32 val = 1;
+ int err;
+
+ generate_key(&key);
+ err = bpf_map_update_elem(&trie_map, &key, &val, BPF_NOEXIST);
+ if (err) {
+ *retval = err;
+ return 1;
+ }
+
+ /* Is this the last entry? */
+ if (key.data == nr_entries - 1) {
+ /* For atomicity concerns, see the comment in delete() */
+ *retval = LPM_BENCH_REINIT_MAP;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int update(__u32 index, int *retval)
+{
+ struct trie_key key;
+ u32 val = 1;
+ int err;
+
+ generate_key(&key);
+ err = bpf_map_update_elem(&trie_map, &key, &val, BPF_EXIST);
+ if (err) {
+ *retval = err;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int delete(__u32 index, int *retval)
+{
+ struct trie_key key;
+ int err;
+
+ generate_key(&key);
+ err = bpf_map_delete_elem(&trie_map, &key);
+ if (err) {
+ *retval = err;
+ return 1;
+ }
+
+ /* Do we need to refill the map? */
+ if (key.data == nr_entries - 1) {
+ /*
+ * Atomicity isn't required because DELETE only supports
+ * one producer running concurrently. What we need is a
+ * way to track how many entries have been deleted from
+ * the trie between consecutive invocations of the BPF
+ * prog because a single bpf_loop() call might not
+ * delete all entries, e.g. when NR_LOOPS < nr_entries.
+ */
+ *retval = LPM_BENCH_REINIT_MAP;
+ return 1;
+ }
+
+ return 0;
+}
+
+SEC("xdp")
+int BPF_PROG(run_bench)
+{
+ int err = LPM_BENCH_SUCCESS;
+ u64 start, delta;
+ int loops;
+
+ start = bpf_ktime_get_ns();
+
+ switch (op) {
+ case LPM_OP_NOOP:
+ loops = bpf_loop(NR_LOOPS, noop, NULL, 0);
+ break;
+ case LPM_OP_BASELINE:
+ loops = bpf_loop(NR_LOOPS, baseline, NULL, 0);
+ break;
+ case LPM_OP_LOOKUP:
+ loops = bpf_loop(NR_LOOPS, lookup, &err, 0);
+ break;
+ case LPM_OP_INSERT:
+ loops = bpf_loop(NR_LOOPS, insert, &err, 0);
+ break;
+ case LPM_OP_UPDATE:
+ loops = bpf_loop(NR_LOOPS, update, &err, 0);
+ break;
+ case LPM_OP_DELETE:
+ loops = bpf_loop(NR_LOOPS, delete, &err, 0);
+ break;
+ default:
+ bpf_printk("invalid benchmark operation\n");
+ return -1;
+ }
+
+ delta = bpf_ktime_get_ns() - start;
+
+ __sync_add_and_fetch(&duration_ns, delta);
+ __sync_add_and_fetch(&hits, loops);
+
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/progs/lpm_trie_map.c b/tools/testing/selftests/bpf/progs/lpm_trie_map.c
new file mode 100644
index 000000000000..6e60d686b664
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lpm_trie_map.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define MAX_ENTRIES 100000000
+
+struct trie_key {
+ __u32 prefixlen;
+ __u32 data;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __type(key, struct trie_key);
+ __type(value, __u32);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, MAX_ENTRIES);
+} trie_free_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
index 0c13b7409947..7de173daf27b 100644
--- a/tools/testing/selftests/bpf/progs/lsm.c
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -89,14 +89,16 @@ SEC("lsm/file_mprotect")
int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot, int ret)
{
- if (ret != 0)
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (ret != 0 || !mm)
return ret;
__s32 pid = bpf_get_current_pid_tgid() >> 32;
int is_stack = 0;
- is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
- vma->vm_end >= vma->vm_mm->start_stack);
+ is_stack = (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack);
if (is_stack && monitored_pid == pid) {
mprotect_count++;
diff --git a/tools/testing/selftests/bpf/progs/lsm_tailcall.c b/tools/testing/selftests/bpf/progs/lsm_tailcall.c
index 49c075ce2d4c..6e7e58051e64 100644
--- a/tools/testing/selftests/bpf/progs/lsm_tailcall.c
+++ b/tools/testing/selftests/bpf/progs/lsm_tailcall.c
@@ -20,14 +20,14 @@ int lsm_file_permission_prog(void *ctx)
return 0;
}
-SEC("lsm/file_alloc_security")
-int lsm_file_alloc_security_prog(void *ctx)
+SEC("lsm/kernfs_init_security")
+int lsm_kernfs_init_security_prog(void *ctx)
{
return 0;
}
-SEC("lsm/file_alloc_security")
-int lsm_file_alloc_security_entry(void *ctx)
+SEC("lsm/kernfs_init_security")
+int lsm_kernfs_init_security_entry(void *ctx)
{
bpf_tail_call_static(ctx, &jmp_table, 0);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/map_excl.c b/tools/testing/selftests/bpf/progs/map_excl.c
new file mode 100644
index 000000000000..d461684728e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_excl.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Google LLC. */
+#include <linux/bpf.h>
+#include <time.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} excl_map SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int should_have_access(void *ctx)
+{
+ int key = 0, value = 0xdeadbeef;
+
+ bpf_map_update_elem(&excl_map, &key, &value, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int should_not_have_access(void *ctx)
+{
+ int key = 0, value = 0xdeadbeef;
+
+ bpf_map_update_elem(&excl_map, &key, &value, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
new file mode 100644
index 000000000000..3b984b6ae7c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mem_rdonly_untrusted.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+SEC("tp_btf/sys_enter")
+__success
+__log_level(2)
+__msg("r8 = *(u64 *)(r7 +0) ; R7=ptr_nameidata(off={{[0-9]+}}) R8=rdonly_untrusted_mem(sz=0)")
+__msg("r9 = *(u8 *)(r8 +0) ; R8=rdonly_untrusted_mem(sz=0) R9=scalar")
+int btf_id_to_ptr_mem(void *ctx)
+{
+ struct task_struct *task;
+ struct nameidata *idata;
+ u64 ret, off;
+
+ task = bpf_get_current_task_btf();
+ idata = task->nameidata;
+ off = bpf_core_field_offset(struct nameidata, pathname);
+ /*
+ * asm block to have reliable match target for __msg, equivalent of:
+ * ret = task->nameidata->pathname[0];
+ */
+ asm volatile (
+ "r7 = %[idata];"
+ "r7 += %[off];"
+ "r8 = *(u64 *)(r7 + 0);"
+ "r9 = *(u8 *)(r8 + 0);"
+ "%[ret] = r9;"
+ : [ret]"=r"(ret)
+ : [idata]"r"(idata),
+ [off]"r"(off)
+ : "r7", "r8", "r9");
+ return ret;
+}
+
+SEC("socket")
+__success
+__retval(0)
+int ldx_is_ok_bad_addr(void *ctx)
+{
+ char *p;
+
+ if (!bpf_core_enum_value_exists(enum bpf_features, BPF_FEAT_RDONLY_CAST_TO_VOID))
+ return 42;
+
+ p = bpf_rdonly_cast(0, 0);
+ return p[0x7fff];
+}
+
+SEC("socket")
+__success
+__retval(1)
+int ldx_is_ok_good_addr(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ return *p;
+}
+
+SEC("socket")
+__success
+int offset_not_tracked(void *ctx)
+{
+ int *p, i, s;
+
+ p = bpf_rdonly_cast(0, 0);
+ s = 0;
+ bpf_for(i, 0, 1000 * 1000 * 1000) {
+ p++;
+ s += *p;
+ }
+ return s;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int stx_not_ok(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ *p = 1;
+ return 0;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int atomic_not_ok(void *ctx)
+{
+ int v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ __sync_fetch_and_add(p, 1);
+ return 0;
+}
+
+SEC("socket")
+__failure
+__msg("cannot write into rdonly_untrusted_mem")
+int atomic_rmw_not_ok(void *ctx)
+{
+ long v, *p;
+
+ v = 1;
+ p = bpf_rdonly_cast(&v, 0);
+ return __sync_val_compare_and_swap(p, 0, 42);
+}
+
+SEC("socket")
+__failure
+__msg("invalid access to memory, mem_size=0 off=0 size=4")
+__msg("R1 min value is outside of the allowed memory range")
+int kfunc_param_not_ok(void *ctx)
+{
+ int *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ bpf_kfunc_trusted_num_test(p);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+__failure
+__msg("R1 type=rdonly_untrusted_mem expected=")
+int helper_param_not_ok(void *ctx)
+{
+ char *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ /*
+ * Any helper with ARG_CONST_SIZE_OR_ZERO constraint will do,
+ * the most permissive constraint
+ */
+ bpf_copy_from_user(p, 0, (void *)42);
+ return 0;
+}
+
+static __noinline u64 *get_some_addr(void)
+{
+ if (bpf_get_prandom_u32())
+ return bpf_rdonly_cast(0, bpf_core_type_id_kernel(struct sock));
+ else
+ return bpf_rdonly_cast(0, 0);
+}
+
+SEC("socket")
+__success
+__retval(0)
+int mixed_mem_type(void *ctx)
+{
+ u64 *p;
+
+ /* Try to avoid compiler hoisting load to if branches by using __noinline func. */
+ p = get_some_addr();
+ return *p;
+}
+
+__attribute__((__aligned__(8)))
+u8 global[] = {
+ 0x11, 0x22, 0x33, 0x44,
+ 0x55, 0x66, 0x77, 0x88,
+ 0x99
+};
+
+__always_inline
+static u64 combine(void *p)
+{
+ u64 acc;
+
+ acc = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ acc |= (*(u64 *)p >> 56) << 24;
+ acc |= (*(u32 *)p >> 24) << 16;
+ acc |= (*(u16 *)p >> 8) << 8;
+ acc |= *(u8 *)p;
+#else
+ acc |= (*(u64 *)p & 0xff) << 24;
+ acc |= (*(u32 *)p & 0xff) << 16;
+ acc |= (*(u16 *)p & 0xff) << 8;
+ acc |= *(u8 *)p;
+#endif
+ return acc;
+}
+
+SEC("socket")
+__retval(0x88442211)
+int diff_size_access(void *ctx)
+{
+ return combine(bpf_rdonly_cast(&global, 0));
+}
+
+SEC("socket")
+__retval(0x99553322)
+int misaligned_access(void *ctx)
+{
+ return combine(bpf_rdonly_cast(&global, 0) + 1);
+}
+
+__weak int return_one(void)
+{
+ return 1;
+}
+
+SEC("socket")
+__success
+__retval(1)
+int null_check(void *ctx)
+{
+ int *p;
+
+ p = bpf_rdonly_cast(0, 0);
+ if (p == 0)
+ /* make this a function call to avoid compiler
+ * moving r0 assignment before check.
+ */
+ return return_one();
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/mptcp_sockmap.c b/tools/testing/selftests/bpf/progs/mptcp_sockmap.c
new file mode 100644
index 000000000000..d4eef0cbadb9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_sockmap.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bpf_tracing_net.h"
+
+char _license[] SEC("license") = "GPL";
+
+int sk_index;
+int redirect_idx;
+int trace_port;
+int helper_ret;
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __uint(max_entries, 100);
+} sock_map SEC(".maps");
+
+SEC("sockops")
+int mptcp_sockmap_inject(struct bpf_sock_ops *skops)
+{
+ struct bpf_sock *sk;
+
+ /* only accept specified connection */
+ if (skops->local_port != trace_port ||
+ skops->op != BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB)
+ return 1;
+
+ sk = skops->sk;
+ if (!sk)
+ return 1;
+
+ /* update sk handler */
+ helper_ret = bpf_sock_map_update(skops, &sock_map, &sk_index, BPF_NOEXIST);
+
+ return 1;
+}
+
+SEC("sk_skb/stream_verdict")
+int mptcp_sockmap_redirect(struct __sk_buff *skb)
+{
+ /* redirect skb to the sk under sock_map[redirect_idx] */
+ return bpf_sk_redirect_map(skb, &sock_map, redirect_idx, 0);
+}
diff --git a/tools/testing/selftests/bpf/progs/mptcp_subflow.c b/tools/testing/selftests/bpf/progs/mptcp_subflow.c
index 70302477e326..41389e579578 100644
--- a/tools/testing/selftests/bpf/progs/mptcp_subflow.c
+++ b/tools/testing/selftests/bpf/progs/mptcp_subflow.c
@@ -117,7 +117,7 @@ int _getsockopt_subflow(struct bpf_sockopt *ctx)
return 1;
msk = bpf_core_cast(sk, struct mptcp_sock);
- if (msk->pm.subflows != 1) {
+ if (msk->pm.extra_subflows != 1) {
ctx->retval = -1;
return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c
index a3620c15c136..49fe93d7e059 100644
--- a/tools/testing/selftests/bpf/progs/rbtree.c
+++ b/tools/testing/selftests/bpf/progs/rbtree.c
@@ -61,19 +61,19 @@ static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
}
m->key = 1;
- bpf_spin_lock(&glock);
- bpf_rbtree_add(&groot, &n->node, less);
- bpf_rbtree_add(&groot, &m->node, less);
- bpf_spin_unlock(&glock);
+ bpf_spin_lock(lock);
+ bpf_rbtree_add(root, &n->node, less);
+ bpf_rbtree_add(root, &m->node, less);
+ bpf_spin_unlock(lock);
n = bpf_obj_new(typeof(*n));
if (!n)
return 3;
n->key = 3;
- bpf_spin_lock(&glock);
- bpf_rbtree_add(&groot, &n->node, less);
- bpf_spin_unlock(&glock);
+ bpf_spin_lock(lock);
+ bpf_rbtree_add(root, &n->node, less);
+ bpf_spin_unlock(lock);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/rbtree_search.c b/tools/testing/selftests/bpf/progs/rbtree_search.c
index 098ef970fac1..b05565d1db0d 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_search.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_search.c
@@ -183,7 +183,7 @@ long test_##op##_spinlock_##dolock(void *ctx) \
}
/*
- * Use a spearate MSG macro instead of passing to TEST_XXX(..., MSG)
+ * Use a separate MSG macro instead of passing to TEST_XXX(..., MSG)
* to ensure the message itself is not in the bpf prog lineinfo
* which the verifier includes in its log.
* Otherwise, the test_loader will incorrectly match the prog lineinfo
diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
index 43637ee2cdcd..d70c28824bbe 100644
--- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c
+++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
@@ -16,10 +16,11 @@ struct {
__type(value, long);
} map_a SEC(".maps");
-__u32 user_data, key_serial, target_pid;
+__u32 user_data, target_pid;
+__s32 key_serial;
__u64 flags, task_storage_val, cgroup_id;
-struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
+struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
void bpf_key_put(struct bpf_key *key) __ksym;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
@@ -277,6 +278,46 @@ out:
return 0;
}
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int nested_rcu_region_unbalanced_1(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* nested rcu read lock regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
+int nested_rcu_region_unbalanced_2(void *ctx)
+{
+ struct task_struct *task, *real_parent;
+
+ /* nested rcu read lock regions */
+ task = bpf_get_current_task_btf();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ bpf_rcu_read_lock();
+ real_parent = task->real_parent;
+ if (!real_parent)
+ goto out;
+ (void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
+out:
+ bpf_rcu_read_unlock();
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int task_trusted_non_rcuptr(void *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c
new file mode 100644
index 000000000000..405adbe5e8b0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/read_cgroupfs_xattr.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t target_pid = 0;
+
+char xattr_value[64];
+static const char expected_value_a[] = "bpf_selftest_value_a";
+static const char expected_value_b[] = "bpf_selftest_value_b";
+bool found_value_a;
+bool found_value_b;
+
+SEC("lsm.s/file_open")
+int BPF_PROG(test_file_open)
+{
+ u64 cgrp_id = bpf_get_current_cgroup_id();
+ struct cgroup_subsys_state *css, *tmp;
+ struct bpf_dynptr value_ptr;
+ struct cgroup *cgrp;
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ bpf_rcu_read_lock();
+ cgrp = bpf_cgroup_from_id(cgrp_id);
+ if (!cgrp) {
+ bpf_rcu_read_unlock();
+ return 0;
+ }
+
+ css = &cgrp->self;
+ bpf_dynptr_from_mem(xattr_value, sizeof(xattr_value), 0, &value_ptr);
+ bpf_for_each(css, tmp, css, BPF_CGROUP_ITER_ANCESTORS_UP) {
+ int ret;
+
+ ret = bpf_cgroup_read_xattr(tmp->cgroup, "user.bpf_test",
+ &value_ptr);
+ if (ret < 0)
+ continue;
+
+ if (ret == sizeof(expected_value_a) &&
+ !bpf_strncmp(xattr_value, sizeof(expected_value_a), expected_value_a))
+ found_value_a = true;
+ if (ret == sizeof(expected_value_b) &&
+ !bpf_strncmp(xattr_value, sizeof(expected_value_b), expected_value_b))
+ found_value_b = true;
+ }
+
+ bpf_rcu_read_unlock();
+ bpf_cgroup_release(cgrp);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
index 893a4fdb4b6e..1aca85d86aeb 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -568,4 +568,64 @@ err_out:
return 0;
}
+private(kptr_ref) u64 ref;
+
+static int probe_read_refcount(void)
+{
+ u32 refcount;
+
+ bpf_probe_read_kernel(&refcount, sizeof(refcount), (void *) ref);
+ return refcount;
+}
+
+static int __insert_in_list(struct bpf_list_head *head, struct bpf_spin_lock *lock,
+ struct node_data __kptr **node)
+{
+ struct node_data *node_new, *node_ref, *node_old;
+
+ node_new = bpf_obj_new(typeof(*node_new));
+ if (!node_new)
+ return -1;
+
+ node_ref = bpf_refcount_acquire(node_new);
+ node_old = bpf_kptr_xchg(node, node_new);
+ if (node_old) {
+ bpf_obj_drop(node_old);
+ bpf_obj_drop(node_ref);
+ return -2;
+ }
+
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &node_ref->l);
+ ref = (u64)(void *) &node_ref->ref;
+ bpf_spin_unlock(lock);
+ return probe_read_refcount();
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} percpu_hash SEC(".maps");
+
+SEC("tc")
+int percpu_hash_refcount_leak(void *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&percpu_hash, &key);
+ if (!v)
+ return 0;
+
+ return __insert_in_list(&head, &lock, &v->node);
+}
+
+SEC("tc")
+int check_percpu_hash_refcount(void *ctx)
+{
+ return probe_read_refcount();
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/ringbuf_bench.c b/tools/testing/selftests/bpf/progs/ringbuf_bench.c
index 6a468496f539..d96c7d1e8fc2 100644
--- a/tools/testing/selftests/bpf/progs/ringbuf_bench.c
+++ b/tools/testing/selftests/bpf/progs/ringbuf_bench.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
+#include <stdbool.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <bpf/bpf_helpers.h>
@@ -14,9 +15,11 @@ struct {
const volatile int batch_cnt = 0;
const volatile long use_output = 0;
+const volatile bool bench_producer = false;
long sample_val = 42;
long dropped __attribute__((aligned(128))) = 0;
+long hits __attribute__((aligned(128))) = 0;
const volatile long wakeup_data_size = 0;
@@ -24,6 +27,9 @@ static __always_inline long get_flags()
{
long sz;
+ if (bench_producer)
+ return BPF_RB_NO_WAKEUP;
+
if (!wakeup_data_size)
return 0;
@@ -47,6 +53,8 @@ int bench_ringbuf(void *ctx)
*sample = sample_val;
flags = get_flags();
bpf_ringbuf_submit(sample, flags);
+ if (bench_producer)
+ __sync_add_and_fetch(&hits, 1);
}
}
} else {
@@ -55,6 +63,9 @@ int bench_ringbuf(void *ctx)
if (bpf_ringbuf_output(&ringbuf, &sample_val,
sizeof(sample_val), flags))
__sync_add_and_fetch(&dropped, 1);
+ else if (bench_producer)
+ __sync_add_and_fetch(&hits, 1);
+
}
}
return 0;
diff --git a/tools/testing/selftests/bpf/progs/security_bpf_map.c b/tools/testing/selftests/bpf/progs/security_bpf_map.c
new file mode 100644
index 000000000000..7216b3450e96
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/security_bpf_map.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define EPERM 1 /* Operation not permitted */
+
+/* From include/linux/mm.h. */
+#define FMODE_WRITE 0x2
+
+struct map;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} prot_status_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 3);
+} prot_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 3);
+} not_prot_map SEC(".maps");
+
+SEC("fmod_ret/security_bpf_map")
+int BPF_PROG(fmod_bpf_map, struct bpf_map *map, int fmode)
+{
+ __u32 key = 0;
+ __u32 *status_ptr = bpf_map_lookup_elem(&prot_status_map, &key);
+
+ if (!status_ptr || !*status_ptr)
+ return 0;
+
+ if (map == &prot_map) {
+ /* Allow read-only access */
+ if (fmode & FMODE_WRITE)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * This program keeps references to maps. This is needed to prevent
+ * optimizing them out.
+ */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_dummy1, int a)
+{
+ __u32 key = 0;
+ __u32 val1 = a;
+ __u32 val2 = a + 1;
+
+ bpf_map_update_elem(&prot_map, &key, &val1, BPF_ANY);
+ bpf_map_update_elem(&not_prot_map, &key, &val2, BPF_ANY);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/set_global_vars.c b/tools/testing/selftests/bpf/progs/set_global_vars.c
index 90f5656c3991..ebaef28b2cb3 100644
--- a/tools/testing/selftests/bpf/progs/set_global_vars.c
+++ b/tools/testing/selftests/bpf/progs/set_global_vars.c
@@ -7,22 +7,30 @@
char _license[] SEC("license") = "GPL";
-enum Enum { EA1 = 0, EA2 = 11 };
+typedef __s32 s32;
+typedef s32 i32;
+typedef __u8 u8;
+
+enum Enum { EA1 = 0, EA2 = 11, EA3 = 10 };
enum Enumu64 {EB1 = 0llu, EB2 = 12llu };
enum Enums64 { EC1 = 0ll, EC2 = 13ll };
const volatile __s64 var_s64 = -1;
const volatile __u64 var_u64 = 0;
-const volatile __s32 var_s32 = -1;
+const volatile i32 var_s32 = -1;
const volatile __u32 var_u32 = 0;
const volatile __s16 var_s16 = -1;
const volatile __u16 var_u16 = 0;
const volatile __s8 var_s8 = -1;
-const volatile __u8 var_u8 = 0;
+const volatile u8 var_u8 = 0;
const volatile enum Enum var_ea = EA1;
const volatile enum Enumu64 var_eb = EB1;
const volatile enum Enums64 var_ec = EC1;
const volatile bool var_b = false;
+const volatile i32 arr[32];
+const volatile enum Enum enum_arr[32];
+const volatile i32 three_d[47][19][17];
+const volatile i32 *ptr_arr[32];
struct Struct {
int:16;
@@ -35,34 +43,38 @@ struct Struct {
volatile struct {
const int:1;
union {
- const volatile __u8 var_u8;
+ const volatile u8 var_u8[3];
const volatile __s16 filler3;
const int:1;
+ s32 mat[7][5];
} u;
};
- } struct2;
+ } struct2[2][4];
};
const volatile __u32 stru = 0; /* same prefix as below */
-const volatile struct Struct struct1 = {.struct2 = {.u = {.var_u8 = 1}}};
+const volatile struct Struct struct1[3];
+const volatile struct Struct struct11[11][7];
-union Union {
- __u16 var_u16;
- struct Struct3 {
- struct {
- __u8 var_u8_l;
- };
+struct Struct3 {
+ struct {
+ u8 var_u8_l;
+ };
+ struct {
struct {
- struct {
- __u8 var_u8_h;
- };
+ u8 var_u8_h;
};
- } struct3;
+ };
};
-const volatile union Union union1 = {.var_u16 = -1};
+typedef struct Struct3 Struct3_t;
-char arr[4] = {0};
+union Union {
+ __u16 var_u16;
+ Struct3_t struct3;
+};
+
+const volatile union Union union1 = {.var_u16 = -1};
SEC("socket")
int test_set_globals(void *ctx)
@@ -81,8 +93,14 @@ int test_set_globals(void *ctx)
a = var_eb;
a = var_ec;
a = var_b;
- a = struct1.struct2.u.var_u8;
+ a = struct1[2].struct2[1][2].u.var_u8[2];
a = union1.var_u16;
+ a = arr[3];
+ a = arr[EA2];
+ a = enum_arr[EC2];
+ a = three_d[31][7][EA2];
+ a = struct1[2].struct2[1][2].u.mat[5][3];
+ a = struct11[7][5].struct2[0][1].u.mat[3][0];
return a;
}
diff --git a/tools/testing/selftests/bpf/progs/sk_bypass_prot_mem.c b/tools/testing/selftests/bpf/progs/sk_bypass_prot_mem.c
new file mode 100644
index 000000000000..09a00d11ffcc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sk_bypass_prot_mem.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2025 Google LLC */
+
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+extern int tcp_memory_per_cpu_fw_alloc __ksym;
+extern int udp_memory_per_cpu_fw_alloc __ksym;
+
+int nr_cpus;
+bool tcp_activated, udp_activated;
+long tcp_memory_allocated, udp_memory_allocated;
+
+struct sk_prot {
+ long *memory_allocated;
+ int *memory_per_cpu_fw_alloc;
+};
+
+static int drain_memory_per_cpu_fw_alloc(__u32 i, struct sk_prot *sk_prot_ctx)
+{
+ int *memory_per_cpu_fw_alloc;
+
+ memory_per_cpu_fw_alloc = bpf_per_cpu_ptr(sk_prot_ctx->memory_per_cpu_fw_alloc, i);
+ if (memory_per_cpu_fw_alloc)
+ *sk_prot_ctx->memory_allocated += *memory_per_cpu_fw_alloc;
+
+ return 0;
+}
+
+static long get_memory_allocated(struct sock *_sk, int *memory_per_cpu_fw_alloc)
+{
+ struct sock *sk = bpf_core_cast(_sk, struct sock);
+ struct sk_prot sk_prot_ctx;
+ long memory_allocated;
+
+ /* net_aligned_data.{tcp,udp}_memory_allocated was not available. */
+ memory_allocated = sk->__sk_common.skc_prot->memory_allocated->counter;
+
+ sk_prot_ctx.memory_allocated = &memory_allocated;
+ sk_prot_ctx.memory_per_cpu_fw_alloc = memory_per_cpu_fw_alloc;
+
+ bpf_loop(nr_cpus, drain_memory_per_cpu_fw_alloc, &sk_prot_ctx, 0);
+
+ return memory_allocated;
+}
+
+static void fentry_init_sock(struct sock *sk, bool *activated,
+ long *memory_allocated, int *memory_per_cpu_fw_alloc)
+{
+ if (!*activated)
+ return;
+
+ *memory_allocated = get_memory_allocated(sk, memory_per_cpu_fw_alloc);
+ *activated = false;
+}
+
+SEC("fentry/tcp_init_sock")
+int BPF_PROG(fentry_tcp_init_sock, struct sock *sk)
+{
+ fentry_init_sock(sk, &tcp_activated,
+ &tcp_memory_allocated, &tcp_memory_per_cpu_fw_alloc);
+ return 0;
+}
+
+SEC("fentry/udp_init_sock")
+int BPF_PROG(fentry_udp_init_sock, struct sock *sk)
+{
+ fentry_init_sock(sk, &udp_activated,
+ &udp_memory_allocated, &udp_memory_per_cpu_fw_alloc);
+ return 0;
+}
+
+SEC("cgroup/sock_create")
+int sock_create(struct bpf_sock *ctx)
+{
+ int err, val = 1;
+
+ err = bpf_setsockopt(ctx, SOL_SOCKET, SK_BPF_BYPASS_PROT_MEM,
+ &val, sizeof(val));
+ if (err)
+ goto err;
+
+ val = 0;
+
+ err = bpf_getsockopt(ctx, SOL_SOCKET, SK_BPF_BYPASS_PROT_MEM,
+ &val, sizeof(val));
+ if (err)
+ goto err;
+
+ if (val != 1) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ return 1;
+
+err:
+ bpf_set_retval(err);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
index 8f483337e103..77966ded5467 100644
--- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c
+++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
@@ -23,6 +23,7 @@ static bool ipv4_addr_loopback(__be32 a)
}
volatile const unsigned int sf;
+volatile const unsigned int ss;
volatile const __u16 ports[2];
unsigned int bucket[2];
@@ -42,16 +43,18 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
sock_cookie = bpf_get_socket_cookie(sk);
sk = bpf_core_cast(sk, struct sock);
if (sk->sk_family != sf ||
- sk->sk_state != TCP_LISTEN ||
- sk->sk_family == AF_INET6 ?
+ (ss && sk->sk_state != ss) ||
+ (sk->sk_family == AF_INET6 ?
!ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) :
- !ipv4_addr_loopback(sk->sk_rcv_saddr))
+ !ipv4_addr_loopback(sk->sk_rcv_saddr)))
return 0;
if (sk->sk_num == ports[0])
idx = 0;
else if (sk->sk_num == ports[1])
idx = 1;
+ else if (!ports[0] && !ports[1])
+ idx = 0;
else
return 0;
@@ -67,6 +70,27 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
return 0;
}
+volatile const __u64 destroy_cookie;
+
+SEC("iter/tcp")
+int iter_tcp_destroy(struct bpf_iter__tcp *ctx)
+{
+ struct sock_common *sk_common = (struct sock_common *)ctx->sk_common;
+ __u64 sock_cookie;
+
+ if (!sk_common)
+ return 0;
+
+ sock_cookie = bpf_get_socket_cookie(sk_common);
+ if (sock_cookie != destroy_cookie)
+ return 0;
+
+ bpf_sock_destroy(sk_common);
+ bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie));
+
+ return 0;
+}
+
#define udp_sk(ptr) container_of(ptr, struct udp_sock, inet.sk)
SEC("iter/udp")
@@ -83,15 +107,17 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx)
sock_cookie = bpf_get_socket_cookie(sk);
sk = bpf_core_cast(sk, struct sock);
if (sk->sk_family != sf ||
- sk->sk_family == AF_INET6 ?
+ (sk->sk_family == AF_INET6 ?
!ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) :
- !ipv4_addr_loopback(sk->sk_rcv_saddr))
+ !ipv4_addr_loopback(sk->sk_rcv_saddr)))
return 0;
if (sk->sk_num == ports[0])
idx = 0;
else if (sk->sk_num == ports[1])
idx = 1;
+ else if (!ports[0] && !ports[1])
+ idx = 0;
else
return 0;
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_ips.c b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
new file mode 100644
index 000000000000..a96c8150d7f5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH 127
+#endif
+
+typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(max_entries, 16384);
+ __type(key, __u32);
+ __type(value, stack_trace_t);
+} stackmap SEC(".maps");
+
+extern bool CONFIG_UNWINDER_ORC __kconfig __weak;
+
+/*
+ * This function is here to have CONFIG_UNWINDER_ORC
+ * used and added to object BTF.
+ */
+int unused(void)
+{
+ return CONFIG_UNWINDER_ORC ? 0 : 1;
+}
+
+__u32 stack_key;
+
+SEC("kprobe.multi")
+int kprobe_multi_test(struct pt_regs *ctx)
+{
+ stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+ return 0;
+}
+
+SEC("raw_tp/bpf_testmod_test_read")
+int rawtp_test(void *ctx)
+{
+ /* Skip ebpf program entry in the stack. */
+ stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/stacktrace_map.c
index 47568007b668..0c77df05be7f 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/stacktrace_map.c
@@ -50,6 +50,7 @@ struct sched_switch_args {
int next_prio;
};
+__u32 stack_id;
SEC("tracepoint/sched/sched_switch")
int oncpu(struct sched_switch_args *ctx)
{
@@ -64,6 +65,7 @@ int oncpu(struct sched_switch_args *ctx)
/* The size of stackmap and stackid_hmap should be the same */
key = bpf_get_stackid(ctx, &stackmap, 0);
if ((int)key >= 0) {
+ stack_id = key;
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
stack_p = bpf_map_lookup_elem(&stack_amap, &key);
if (stack_p)
diff --git a/tools/testing/selftests/bpf/progs/stream.c b/tools/testing/selftests/bpf/progs/stream.c
new file mode 100644
index 000000000000..4a5bd852f10c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stream.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+struct arr_elem {
+ struct bpf_res_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct arr_elem);
+} arrmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 1); /* number of pages */
+} arena SEC(".maps");
+
+struct elem {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+#define ENOSPC 28
+#define _STR "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+
+int size;
+u64 fault_addr;
+void *arena_ptr;
+
+SEC("syscall")
+__success __retval(0)
+int stream_exhaust(void *ctx)
+{
+ /* Use global variable for loop convergence. */
+ size = 0;
+ bpf_repeat(BPF_MAX_LOOPS) {
+ if (bpf_stream_printk(BPF_STDOUT, _STR) == -ENOSPC && size == 99954)
+ return 0;
+ size += sizeof(_STR) - 1;
+ }
+ return 1;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__arch_s390x
+__success __retval(0)
+__stderr("ERROR: Timeout detected for may_goto instruction")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_cond_break(void *ctx)
+{
+ while (can_loop)
+ ;
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+__stderr("ERROR: AA or ABBA deadlock detected for bpf_res_spin_lock")
+__stderr("{{Attempted lock = (0x[0-9a-fA-F]+)\n"
+"Total held locks = 1\n"
+"Held lock\\[ 0\\] = \\1}}")
+__stderr("...")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_deadlock(void *ctx)
+{
+ struct bpf_res_spin_lock *lock, *nlock;
+
+ lock = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!lock)
+ return 1;
+ nlock = bpf_map_lookup_elem(&arrmap, &(int){0});
+ if (!nlock)
+ return 1;
+ if (bpf_res_spin_lock(lock))
+ return 1;
+ if (bpf_res_spin_lock(nlock)) {
+ bpf_res_spin_unlock(lock);
+ return 0;
+ }
+ bpf_res_spin_unlock(nlock);
+ bpf_res_spin_unlock(lock);
+ return 1;
+}
+
+SEC("syscall")
+__success __retval(0)
+int stream_syscall(void *ctx)
+{
+ bpf_stream_printk(BPF_STDOUT, "foo");
+ return 0;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__success __retval(0)
+__stderr("ERROR: Arena WRITE access at unmapped address 0x{{.*}}")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_arena_write_fault(void *ctx)
+{
+ struct bpf_arena *ptr = (void *)&arena;
+ u64 user_vm_start;
+
+ /* Prevent GCC bounds warning: casting &arena to struct bpf_arena *
+ * triggers bounds checking since the map definition is smaller than struct
+ * bpf_arena. barrier_var() makes the pointer opaque to GCC, preventing the
+ * bounds analysis
+ */
+ barrier_var(ptr);
+ user_vm_start = ptr->user_vm_start;
+ fault_addr = user_vm_start + 0x7fff;
+ bpf_addr_space_cast(user_vm_start, 0, 1);
+ asm volatile (
+ "r1 = %0;"
+ "r2 = 1;"
+ "*(u32 *)(r1 + 0x7fff) = r2;"
+ :
+ : "r" (user_vm_start)
+ : "r1", "r2"
+ );
+ return 0;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__success __retval(0)
+__stderr("ERROR: Arena READ access at unmapped address 0x{{.*}}")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_arena_read_fault(void *ctx)
+{
+ struct bpf_arena *ptr = (void *)&arena;
+ u64 user_vm_start;
+
+ /* Prevent GCC bounds warning: casting &arena to struct bpf_arena *
+ * triggers bounds checking since the map definition is smaller than struct
+ * bpf_arena. barrier_var() makes the pointer opaque to GCC, preventing the
+ * bounds analysis
+ */
+ barrier_var(ptr);
+ user_vm_start = ptr->user_vm_start;
+ fault_addr = user_vm_start + 0x7fff;
+ bpf_addr_space_cast(user_vm_start, 0, 1);
+ asm volatile (
+ "r1 = %0;"
+ "r1 = *(u32 *)(r1 + 0x7fff);"
+ :
+ : "r" (user_vm_start)
+ : "r1"
+ );
+ return 0;
+}
+
+static __noinline void subprog(void)
+{
+ int __arena *addr = (int __arena *)0xdeadbeef;
+
+ arena_ptr = &arena;
+ *addr = 1;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__success __retval(0)
+__stderr("ERROR: Arena WRITE access at unmapped address 0x{{.*}}")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_arena_subprog_fault(void *ctx)
+{
+ subprog();
+ return 0;
+}
+
+static __noinline int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ int __arena *addr = (int __arena *)0xdeadbeef;
+
+ arena_ptr = &arena;
+ *addr = 1;
+ return 0;
+}
+
+SEC("syscall")
+__arch_x86_64
+__arch_arm64
+__success __retval(0)
+__stderr("ERROR: Arena WRITE access at unmapped address 0x{{.*}}")
+__stderr("CPU: {{[0-9]+}} UID: 0 PID: {{[0-9]+}} Comm: {{.*}}")
+__stderr("Call trace:\n"
+"{{([a-zA-Z_][a-zA-Z0-9_]*\\+0x[0-9a-fA-F]+/0x[0-9a-fA-F]+\n"
+"|[ \t]+[^\n]+\n)*}}")
+int stream_arena_callback_fault(void *ctx)
+{
+ struct bpf_timer *arr_timer;
+
+ arr_timer = bpf_map_lookup_elem(&array, &(int){0});
+ if (!arr_timer)
+ return 0;
+ bpf_timer_init(arr_timer, &array, 1);
+ bpf_timer_set_callback(arr_timer, timer_cb);
+ bpf_timer_start(arr_timer, 0, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c
new file mode 100644
index 000000000000..3662515f0107
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/stream_fail.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+
+SEC("syscall")
+__failure __msg("Possibly NULL pointer passed")
+int stream_vprintk_null_arg(void *ctx)
+{
+ bpf_stream_vprintk_impl(BPF_STDOUT, "", NULL, 0, NULL);
+ return 0;
+}
+
+SEC("syscall")
+__failure __msg("R3 type=scalar expected=")
+int stream_vprintk_scalar_arg(void *ctx)
+{
+ bpf_stream_vprintk_impl(BPF_STDOUT, "", (void *)46, 0, NULL);
+ return 0;
+}
+
+SEC("syscall")
+__failure __msg("arg#1 doesn't point to a const string")
+int stream_vprintk_string_arg(void *ctx)
+{
+ bpf_stream_vprintk_impl(BPF_STDOUT, ctx, NULL, 0, NULL);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c
new file mode 100644
index 000000000000..826e6b6aff7e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure1.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <linux/limits.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char *user_ptr = (char *)1;
+char *invalid_kern_ptr = (char *)-1;
+
+/*
+ * When passing userspace pointers, the error code differs based on arch:
+ * -ERANGE on arches with non-overlapping address spaces
+ * -EFAULT on other arches
+ */
+#if defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_loongarch) || \
+ defined(__TARGET_ARCH_powerpc) || defined(__TARGET_ARCH_x86)
+#define USER_PTR_ERR -ERANGE
+#else
+#define USER_PTR_ERR -EFAULT
+#endif
+
+/*
+ * On s390, __get_kernel_nofault (used in string kfuncs) returns 0 for NULL and
+ * user_ptr (instead of causing an exception) so the below two groups of tests
+ * are not applicable.
+ */
+#ifndef __TARGET_ARCH_s390
+
+/* Passing NULL to string kfuncs (treated as a userspace ptr) */
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_null1(void *ctx) { return bpf_strcmp(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcmp_null2(void *ctx) { return bpf_strcmp("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasecmp_null1(void *ctx) { return bpf_strcasecmp(NULL, "HELLO"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcasecmp_null2(void *ctx) { return bpf_strcasecmp("HELLO", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strchr_null(void *ctx) { return bpf_strchr(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strchrnul_null(void *ctx) { return bpf_strchrnul(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnchr_null(void *ctx) { return bpf_strnchr(NULL, 1, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strrchr_null(void *ctx) { return bpf_strrchr(NULL, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strlen_null(void *ctx) { return bpf_strlen(NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnlen_null(void *ctx) { return bpf_strnlen(NULL, 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null1(void *ctx) { return bpf_strspn(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strspn_null2(void *ctx) { return bpf_strspn("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null1(void *ctx) { return bpf_strcspn(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcspn_null2(void *ctx) { return bpf_strcspn("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null1(void *ctx) { return bpf_strstr(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strstr_null2(void *ctx) { return bpf_strstr("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcasestr_null1(void *ctx) { return bpf_strcasestr(NULL, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strcasestr_null2(void *ctx) { return bpf_strcasestr("hello", NULL); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null1(void *ctx) { return bpf_strnstr(NULL, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strnstr_null2(void *ctx) { return bpf_strnstr("hello", NULL, 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strncasestr_null1(void *ctx) { return bpf_strncasestr(NULL, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR)int test_strncasestr_null2(void *ctx) { return bpf_strncasestr("hello", NULL, 1); }
+
+/* Passing userspace ptr to string kfuncs */
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr1(void *ctx) { return bpf_strcmp(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcmp_user_ptr2(void *ctx) { return bpf_strcmp("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasecmp_user_ptr1(void *ctx) { return bpf_strcasecmp(user_ptr, "HELLO"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasecmp_user_ptr2(void *ctx) { return bpf_strcasecmp("HELLO", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strchr_user_ptr(void *ctx) { return bpf_strchr(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strchrnul_user_ptr(void *ctx) { return bpf_strchrnul(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnchr_user_ptr(void *ctx) { return bpf_strnchr(user_ptr, 1, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strrchr_user_ptr(void *ctx) { return bpf_strrchr(user_ptr, 'a'); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strlen_user_ptr(void *ctx) { return bpf_strlen(user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnlen_user_ptr(void *ctx) { return bpf_strnlen(user_ptr, 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr1(void *ctx) { return bpf_strspn(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strspn_user_ptr2(void *ctx) { return bpf_strspn("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr1(void *ctx) { return bpf_strcspn(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcspn_user_ptr2(void *ctx) { return bpf_strcspn("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr1(void *ctx) { return bpf_strstr(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strstr_user_ptr2(void *ctx) { return bpf_strstr("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasestr_user_ptr1(void *ctx) { return bpf_strcasestr(user_ptr, "hello"); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strcasestr_user_ptr2(void *ctx) { return bpf_strcasestr("hello", user_ptr); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr1(void *ctx) { return bpf_strnstr(user_ptr, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strnstr_user_ptr2(void *ctx) { return bpf_strnstr("hello", user_ptr, 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strncasestr_user_ptr1(void *ctx) { return bpf_strncasestr(user_ptr, "hello", 1); }
+SEC("syscall") __retval(USER_PTR_ERR) int test_strncasestr_user_ptr2(void *ctx) { return bpf_strncasestr("hello", user_ptr, 1); }
+
+#endif /* __TARGET_ARCH_s390 */
+
+/* Passing invalid kernel ptr to string kfuncs should always return -EFAULT */
+SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault1(void *ctx) { return bpf_strcmp(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strcmp_pagefault2(void *ctx) { return bpf_strcmp("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strcasecmp_pagefault1(void *ctx) { return bpf_strcasecmp(invalid_kern_ptr, "HELLO"); }
+SEC("syscall") __retval(-EFAULT) int test_strcasecmp_pagefault2(void *ctx) { return bpf_strcasecmp("HELLO", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strchr_pagefault(void *ctx) { return bpf_strchr(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strchrnul_pagefault(void *ctx) { return bpf_strchrnul(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strnchr_pagefault(void *ctx) { return bpf_strnchr(invalid_kern_ptr, 1, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strrchr_pagefault(void *ctx) { return bpf_strrchr(invalid_kern_ptr, 'a'); }
+SEC("syscall") __retval(-EFAULT) int test_strlen_pagefault(void *ctx) { return bpf_strlen(invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strnlen_pagefault(void *ctx) { return bpf_strnlen(invalid_kern_ptr, 1); }
+SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault1(void *ctx) { return bpf_strspn(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strspn_pagefault2(void *ctx) { return bpf_strspn("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault1(void *ctx) { return bpf_strcspn(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strcspn_pagefault2(void *ctx) { return bpf_strcspn("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault1(void *ctx) { return bpf_strstr(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strstr_pagefault2(void *ctx) { return bpf_strstr("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strcasestr_pagefault1(void *ctx) { return bpf_strcasestr(invalid_kern_ptr, "hello"); }
+SEC("syscall") __retval(-EFAULT) int test_strcasestr_pagefault2(void *ctx) { return bpf_strcasestr("hello", invalid_kern_ptr); }
+SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault1(void *ctx) { return bpf_strnstr(invalid_kern_ptr, "hello", 1); }
+SEC("syscall") __retval(-EFAULT) int test_strnstr_pagefault2(void *ctx) { return bpf_strnstr("hello", invalid_kern_ptr, 1); }
+SEC("syscall") __retval(-EFAULT) int test_strncasestr_pagefault1(void *ctx) { return bpf_strncasestr(invalid_kern_ptr, "hello", 1); }
+SEC("syscall") __retval(-EFAULT) int test_strncasestr_pagefault2(void *ctx) { return bpf_strncasestr("hello", invalid_kern_ptr, 1); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c
new file mode 100644
index 000000000000..05e1da1f250f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_failure2.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <linux/limits.h>
+
+char long_str[XATTR_SIZE_MAX + 1];
+
+SEC("syscall") int test_strcmp_too_long(void *ctx) { return bpf_strcmp(long_str, long_str); }
+SEC("syscall") int test_strcasecmp_too_long(void *ctx) { return bpf_strcasecmp(long_str, long_str); }
+SEC("syscall") int test_strchr_too_long(void *ctx) { return bpf_strchr(long_str, 'b'); }
+SEC("syscall") int test_strchrnul_too_long(void *ctx) { return bpf_strchrnul(long_str, 'b'); }
+SEC("syscall") int test_strnchr_too_long(void *ctx) { return bpf_strnchr(long_str, sizeof(long_str), 'b'); }
+SEC("syscall") int test_strrchr_too_long(void *ctx) { return bpf_strrchr(long_str, 'b'); }
+SEC("syscall") int test_strlen_too_long(void *ctx) { return bpf_strlen(long_str); }
+SEC("syscall") int test_strnlen_too_long(void *ctx) { return bpf_strnlen(long_str, sizeof(long_str)); }
+SEC("syscall") int test_strspn_str_too_long(void *ctx) { return bpf_strspn(long_str, "a"); }
+SEC("syscall") int test_strspn_accept_too_long(void *ctx) { return bpf_strspn("b", long_str); }
+SEC("syscall") int test_strcspn_str_too_long(void *ctx) { return bpf_strcspn(long_str, "b"); }
+SEC("syscall") int test_strcspn_reject_too_long(void *ctx) { return bpf_strcspn("b", long_str); }
+SEC("syscall") int test_strstr_too_long(void *ctx) { return bpf_strstr(long_str, "hello"); }
+SEC("syscall") int test_strcasestr_too_long(void *ctx) { return bpf_strcasestr(long_str, "hello"); }
+SEC("syscall") int test_strnstr_too_long(void *ctx) { return bpf_strnstr(long_str, "hello", sizeof(long_str)); }
+SEC("syscall") int test_strncasestr_too_long(void *ctx) { return bpf_strncasestr(long_str, "hello", sizeof(long_str)); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_success.c b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
new file mode 100644
index 000000000000..a8513964516b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Red Hat, Inc.*/
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char str[] = "hello world";
+
+#define __test(retval) SEC("syscall") __success __retval(retval)
+
+/* Functional tests */
+__test(0) int test_strcmp_eq(void *ctx) { return bpf_strcmp(str, "hello world"); }
+__test(1) int test_strcmp_neq(void *ctx) { return bpf_strcmp(str, "hello"); }
+__test(0) int test_strcasecmp_eq1(void *ctx) { return bpf_strcasecmp(str, "hello world"); }
+__test(0) int test_strcasecmp_eq2(void *ctx) { return bpf_strcasecmp(str, "HELLO WORLD"); }
+__test(0) int test_strcasecmp_eq3(void *ctx) { return bpf_strcasecmp(str, "HELLO world"); }
+__test(1) int test_strcasecmp_neq1(void *ctx) { return bpf_strcasecmp(str, "hello"); }
+__test(1) int test_strcasecmp_neq2(void *ctx) { return bpf_strcasecmp(str, "HELLO"); }
+__test(1) int test_strchr_found(void *ctx) { return bpf_strchr(str, 'e'); }
+__test(11) int test_strchr_null(void *ctx) { return bpf_strchr(str, '\0'); }
+__test(-ENOENT) int test_strchr_notfound(void *ctx) { return bpf_strchr(str, 'x'); }
+__test(1) int test_strchrnul_found(void *ctx) { return bpf_strchrnul(str, 'e'); }
+__test(11) int test_strchrnul_notfound(void *ctx) { return bpf_strchrnul(str, 'x'); }
+__test(1) int test_strnchr_found(void *ctx) { return bpf_strnchr(str, 5, 'e'); }
+__test(11) int test_strnchr_null(void *ctx) { return bpf_strnchr(str, 12, '\0'); }
+__test(-ENOENT) int test_strnchr_notfound(void *ctx) { return bpf_strnchr(str, 5, 'w'); }
+__test(9) int test_strrchr_found(void *ctx) { return bpf_strrchr(str, 'l'); }
+__test(11) int test_strrchr_null(void *ctx) { return bpf_strrchr(str, '\0'); }
+__test(-ENOENT) int test_strrchr_notfound(void *ctx) { return bpf_strrchr(str, 'x'); }
+__test(11) int test_strlen(void *ctx) { return bpf_strlen(str); }
+__test(11) int test_strnlen(void *ctx) { return bpf_strnlen(str, 12); }
+__test(5) int test_strspn(void *ctx) { return bpf_strspn(str, "ehlo"); }
+__test(2) int test_strcspn(void *ctx) { return bpf_strcspn(str, "lo"); }
+__test(6) int test_strstr_found(void *ctx) { return bpf_strstr(str, "world"); }
+__test(6) int test_strcasestr_found(void *ctx) { return bpf_strcasestr(str, "woRLD"); }
+__test(-ENOENT) int test_strstr_notfound(void *ctx) { return bpf_strstr(str, "hi"); }
+__test(-ENOENT) int test_strcasestr_notfound(void *ctx) { return bpf_strcasestr(str, "hi"); }
+__test(0) int test_strstr_empty(void *ctx) { return bpf_strstr(str, ""); }
+__test(0) int test_strcasestr_empty(void *ctx) { return bpf_strcasestr(str, ""); }
+__test(0) int test_strnstr_found1(void *ctx) { return bpf_strnstr("", "", 0); }
+__test(0) int test_strnstr_found2(void *ctx) { return bpf_strnstr(str, "hello", 5); }
+__test(0) int test_strnstr_found3(void *ctx) { return bpf_strnstr(str, "hello", 6); }
+__test(-ENOENT) int test_strnstr_notfound1(void *ctx) { return bpf_strnstr(str, "hi", 10); }
+__test(-ENOENT) int test_strnstr_notfound2(void *ctx) { return bpf_strnstr(str, "hello", 4); }
+__test(-ENOENT) int test_strnstr_notfound3(void *ctx) { return bpf_strnstr("", "a", 0); }
+__test(0) int test_strnstr_empty(void *ctx) { return bpf_strnstr(str, "", 1); }
+__test(0) int test_strncasestr_found1(void *ctx) { return bpf_strncasestr("", "", 0); }
+__test(0) int test_strncasestr_found2(void *ctx) { return bpf_strncasestr(str, "heLLO", 5); }
+__test(0) int test_strncasestr_found3(void *ctx) { return bpf_strncasestr(str, "heLLO", 6); }
+__test(-ENOENT) int test_strncasestr_notfound1(void *ctx) { return bpf_strncasestr(str, "hi", 10); }
+__test(-ENOENT) int test_strncasestr_notfound2(void *ctx) { return bpf_strncasestr(str, "hello", 4); }
+__test(-ENOENT) int test_strncasestr_notfound3(void *ctx) { return bpf_strncasestr("", "a", 0); }
+__test(0) int test_strncasestr_empty(void *ctx) { return bpf_strncasestr(str, "", 1); }
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index a5c74d31a244..6e1918deaf26 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -330,9 +330,9 @@ static void *calc_location(struct strobe_value_loc *loc, void *tls_base)
}
bpf_probe_read_user(&tls_ptr, sizeof(void *), dtv);
/* if pointer has (void *)-1 value, then TLS wasn't initialized yet */
- return tls_ptr && tls_ptr != (void *)-1
- ? tls_ptr + tls_index.offset
- : NULL;
+ if (!tls_ptr || tls_ptr == (void *)-1)
+ return NULL;
+ return tls_ptr + tls_index.offset;
}
#ifdef SUBPROGS
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c b/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c
new file mode 100644
index 000000000000..ad8bb546c9bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping1.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define bpf_kfunc_multi_st_ops_test_1(args) bpf_kfunc_multi_st_ops_test_1(args, st_ops_id)
+int st_ops_id;
+
+int test_pid;
+int test_err;
+
+#define MAP1_MAGIC 1234
+
+SEC("struct_ops")
+int BPF_PROG(test_1, struct st_ops_args *args)
+{
+ return MAP1_MAGIC;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(sys_enter, struct pt_regs *regs, long id)
+{
+ struct st_ops_args args = {};
+ struct task_struct *task;
+ int ret;
+
+ task = bpf_get_current_task_btf();
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ ret = bpf_kfunc_multi_st_ops_test_1(&args);
+ if (ret != MAP1_MAGIC)
+ test_err++;
+
+ return 0;
+}
+
+SEC("syscall")
+int syscall_prog(void *ctx)
+{
+ struct st_ops_args args = {};
+ int ret;
+
+ ret = bpf_kfunc_multi_st_ops_test_1(&args);
+ if (ret != MAP1_MAGIC)
+ test_err++;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_multi_st_ops st_ops_map = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c b/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c
new file mode 100644
index 000000000000..cea1a2f4b62f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_id_ops_mapping2.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../test_kmods/bpf_testmod.h"
+#include "../test_kmods/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define bpf_kfunc_multi_st_ops_test_1(args) bpf_kfunc_multi_st_ops_test_1(args, st_ops_id)
+int st_ops_id;
+
+int test_pid;
+int test_err;
+
+#define MAP2_MAGIC 4567
+
+SEC("struct_ops")
+int BPF_PROG(test_1, struct st_ops_args *args)
+{
+ return MAP2_MAGIC;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(sys_enter, struct pt_regs *regs, long id)
+{
+ struct st_ops_args args = {};
+ struct task_struct *task;
+ int ret;
+
+ task = bpf_get_current_task_btf();
+ if (!test_pid || task->pid != test_pid)
+ return 0;
+
+ ret = bpf_kfunc_multi_st_ops_test_1(&args);
+ if (ret != MAP2_MAGIC)
+ test_err++;
+
+ return 0;
+}
+
+SEC("syscall")
+int syscall_prog(void *ctx)
+{
+ struct st_ops_args args = {};
+ int ret;
+
+ ret = bpf_kfunc_multi_st_ops_test_1(&args);
+ if (ret != MAP2_MAGIC)
+ test_err++;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_multi_st_ops st_ops_map = {
+ .test_1 = (void *)test_1,
+};
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
index 36386b3c23a1..2b98b7710816 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_kptr_return.c
@@ -9,7 +9,7 @@ void bpf_task_release(struct task_struct *p) __ksym;
/* This test struct_ops BPF programs returning referenced kptr. The verifier should
* allow a referenced kptr or a NULL pointer to be returned. A referenced kptr to task
- * here is acquried automatically as the task argument is tagged with "__ref".
+ * here is acquired automatically as the task argument is tagged with "__ref".
*/
SEC("struct_ops/test_return_ref_kptr")
struct task_struct *BPF_PROG(kptr_return, int dummy,
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
index 0e4d2ff63ab8..dbe646013811 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c
@@ -7,7 +7,7 @@
char _license[] SEC("license") = "GPL";
-#if defined(__TARGET_ARCH_x86)
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
bool skip __attribute((__section__(".data"))) = false;
#else
bool skip = true;
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
index 58d5d8dc2235..3d89ad7cbe2a 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c
@@ -7,7 +7,7 @@
char _license[] SEC("license") = "GPL";
-#if defined(__TARGET_ARCH_x86)
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
bool skip __attribute((__section__(".data"))) = false;
#else
bool skip = true;
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
index 31e58389bb8b..b1f6d7e5a8e5 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c
@@ -7,7 +7,7 @@
char _license[] SEC("license") = "GPL";
-#if defined(__TARGET_ARCH_x86)
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
bool skip __attribute((__section__(".data"))) = false;
#else
bool skip = true;
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
index 76dcb6089d7f..9c0a65466356 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_refcounted.c
@@ -9,7 +9,7 @@ __attribute__((nomerge)) extern void bpf_task_release(struct task_struct *p) __k
/* This is a test BPF program that uses struct_ops to access a referenced
* kptr argument. This is a test for the verifier to ensure that it
- * 1) recongnizes the task as a referenced object (i.e., ref_obj_id > 0), and
+ * 1) recognizes the task as a referenced object (i.e., ref_obj_id > 0), and
* 2) the same reference can be acquired from multiple paths as long as it
* has not been released.
*/
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
index 327ca395e860..d556b19413d7 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
@@ -2,6 +2,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_legacy.h"
+#include "bpf_test_utils.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
@@ -24,6 +25,8 @@ int entry(struct __sk_buff *skb)
{
int ret = 1;
+ clobber_regs_stack();
+
count++;
subprog_tail(skb);
subprog_tail(skb);
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
index 72fd0d577506..ae94c9c70ab7 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
@@ -2,6 +2,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "bpf_test_utils.h"
int classifier_0(struct __sk_buff *skb);
int classifier_1(struct __sk_buff *skb);
@@ -60,6 +61,8 @@ int tailcall_bpf2bpf_hierarchy_2(struct __sk_buff *skb)
{
int ret = 0;
+ clobber_regs_stack();
+
subprog_tail0(skb);
subprog_tail1(skb);
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
index a7fb91cb05b7..56b6b0099840 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
@@ -2,6 +2,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "bpf_test_utils.h"
int classifier_0(struct __sk_buff *skb);
@@ -53,6 +54,8 @@ int tailcall_bpf2bpf_hierarchy_3(struct __sk_buff *skb)
{
int ret = 0;
+ clobber_regs_stack();
+
bpf_tail_call_static(skb, &jmp_table0, 0);
__sink(ret);
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
index c87f9ca982d3..5261395713cd 100644
--- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
@@ -4,6 +4,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include "bpf_test_utils.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
@@ -24,6 +25,8 @@ int subprog_tail(void *ctx)
SEC("fentry/dummy")
int BPF_PROG(fentry, struct sk_buff *skb)
{
+ clobber_regs_stack();
+
count++;
subprog_tail(ctx);
subprog_tail(ctx);
diff --git a/tools/testing/selftests/bpf/progs/task_local_data.bpf.h b/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
new file mode 100644
index 000000000000..432fff2af844
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_local_data.bpf.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TASK_LOCAL_DATA_BPF_H
+#define __TASK_LOCAL_DATA_BPF_H
+
+/*
+ * Task local data is a library that facilitates sharing per-task data
+ * between user space and bpf programs.
+ *
+ *
+ * USAGE
+ *
+ * A TLD, an entry of data in task local data, first needs to be created by the
+ * user space. This is done by calling user space API, TLD_DEFINE_KEY() or
+ * tld_create_key(), with the name of the TLD and the size.
+ *
+ * TLD_DEFINE_KEY(prio, "priority", sizeof(int));
+ *
+ * or
+ *
+ * void func_call(...) {
+ * tld_key_t prio, in_cs;
+ *
+ * prio = tld_create_key("priority", sizeof(int));
+ * in_cs = tld_create_key("in_critical_section", sizeof(bool));
+ * ...
+ *
+ * A key associated with the TLD, which has an opaque type tld_key_t, will be
+ * initialized or returned. It can be used to get a pointer to the TLD in the
+ * user space by calling tld_get_data().
+ *
+ * In a bpf program, tld_object_init() first needs to be called to initialized a
+ * tld_object on the stack. Then, TLDs can be accessed by calling tld_get_data().
+ * The API will try to fetch the key by the name and use it to locate the data.
+ * A pointer to the TLD will be returned. It also caches the key in a task local
+ * storage map, tld_key_map, whose value type, struct tld_keys, must be defined
+ * by the developer.
+ *
+ * struct tld_keys {
+ * tld_key_t prio;
+ * tld_key_t in_cs;
+ * };
+ *
+ * SEC("struct_ops")
+ * void prog(struct task_struct task, ...)
+ * {
+ * struct tld_object tld_obj;
+ * int err, *p;
+ *
+ * err = tld_object_init(task, &tld_obj);
+ * if (err)
+ * return;
+ *
+ * p = tld_get_data(&tld_obj, prio, "priority", sizeof(int));
+ * if (p)
+ * // do something depending on *p
+ */
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+
+#define TLD_ROUND_MASK(x, y) ((__typeof__(x))((y) - 1))
+#define TLD_ROUND_UP(x, y) ((((x) - 1) | TLD_ROUND_MASK(x, y)) + 1)
+
+#define TLD_MAX_DATA_CNT (__PAGE_SIZE / sizeof(struct tld_metadata) - 1)
+
+#ifndef TLD_NAME_LEN
+#define TLD_NAME_LEN 62
+#endif
+
+#ifndef TLD_KEY_MAP_CREATE_RETRY
+#define TLD_KEY_MAP_CREATE_RETRY 10
+#endif
+
+typedef struct {
+ __s16 off;
+} tld_key_t;
+
+struct tld_metadata {
+ char name[TLD_NAME_LEN];
+ __u16 size;
+};
+
+struct tld_meta_u {
+ __u8 cnt;
+ __u16 size;
+ struct tld_metadata metadata[TLD_MAX_DATA_CNT];
+};
+
+struct tld_data_u {
+ __u64 start; /* offset of tld_data_u->data in a page */
+ char data[__PAGE_SIZE - sizeof(__u64)];
+};
+
+struct tld_map_value {
+ struct tld_data_u __uptr *data;
+ struct tld_meta_u __uptr *meta;
+};
+
+typedef struct tld_uptr_dummy {
+ struct tld_data_u data[0];
+ struct tld_meta_u meta[0];
+} *tld_uptr_dummy_t;
+
+struct tld_object {
+ struct tld_map_value *data_map;
+ struct tld_keys *key_map;
+ /*
+ * Force the compiler to generate the actual definition of tld_meta_u
+ * and tld_data_u in BTF. Without it, tld_meta_u and u_tld_data will
+ * be BTF_KIND_FWD.
+ */
+ tld_uptr_dummy_t dummy[0];
+};
+
+/*
+ * Map value of tld_key_map for caching keys. Must be defined by the developer.
+ * Members should be tld_key_t and passed to the 3rd argument of tld_fetch_key().
+ */
+struct tld_keys;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tld_map_value);
+} tld_data_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tld_keys);
+} tld_key_map SEC(".maps");
+
+/**
+ * tld_object_init() - Initialize a tld_object.
+ *
+ * @task: The task_struct of the target task
+ * @tld_obj: A pointer to a tld_object to be initialized
+ *
+ * Return 0 on success; -ENODATA if the user space did not initialize task local data
+ * for the current task through tld_get_data(); -ENOMEM if the creation of tld_key_map
+ * fails
+ */
+__attribute__((unused))
+static int tld_object_init(struct task_struct *task, struct tld_object *tld_obj)
+{
+ int i;
+
+ tld_obj->data_map = bpf_task_storage_get(&tld_data_map, task, 0, 0);
+ if (!tld_obj->data_map)
+ return -ENODATA;
+
+ bpf_for(i, 0, TLD_KEY_MAP_CREATE_RETRY) {
+ tld_obj->key_map = bpf_task_storage_get(&tld_key_map, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (likely(tld_obj->key_map))
+ break;
+ }
+ if (!tld_obj->key_map)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Return the offset of TLD if @name is found. Otherwise, return the current TLD count
+ * using the nonpositive range so that the next tld_get_data() can skip fetching key if
+ * no new TLD is added or start comparing name from the first newly added TLD.
+ */
+__attribute__((unused))
+static int __tld_fetch_key(struct tld_object *tld_obj, const char *name, int i_start)
+{
+ struct tld_metadata *metadata;
+ int i, cnt, start, off = 0;
+
+ if (!tld_obj->data_map || !tld_obj->data_map->data || !tld_obj->data_map->meta)
+ return 0;
+
+ start = tld_obj->data_map->data->start;
+ cnt = tld_obj->data_map->meta->cnt;
+ metadata = tld_obj->data_map->meta->metadata;
+
+ bpf_for(i, 0, cnt) {
+ if (i >= TLD_MAX_DATA_CNT)
+ break;
+
+ if (i >= i_start && !bpf_strncmp(metadata[i].name, TLD_NAME_LEN, name))
+ return start + off;
+
+ off += TLD_ROUND_UP(metadata[i].size, 8);
+ }
+
+ return -cnt;
+}
+
+/**
+ * tld_get_data() - Retrieve a pointer to the TLD associated with the name.
+ *
+ * @tld_obj: A pointer to a valid tld_object initialized by tld_object_init()
+ * @key: The cached key of the TLD in tld_key_map
+ * @name: The name of the key associated with a TLD
+ * @size: The size of the TLD. Must be a known constant value
+ *
+ * Return a pointer to the TLD associated with @name; NULL if not found or @size is too
+ * big. @key is used to cache the key if the TLD is found to speed up subsequent calls.
+ * It should be defined as an member of tld_keys of tld_key_t type by the developer.
+ */
+#define tld_get_data(tld_obj, key, name, size) \
+ ({ \
+ void *data = NULL, *_data = (tld_obj)->data_map->data; \
+ long off = (tld_obj)->key_map->key.off; \
+ int cnt; \
+ \
+ if (likely(_data)) { \
+ if (likely(off > 0)) { \
+ barrier_var(off); \
+ if (likely(off < __PAGE_SIZE - size)) \
+ data = _data + off; \
+ } else { \
+ cnt = -(off); \
+ if (likely((tld_obj)->data_map->meta) && \
+ cnt < (tld_obj)->data_map->meta->cnt) { \
+ off = __tld_fetch_key(tld_obj, name, cnt); \
+ (tld_obj)->key_map->key.off = off; \
+ \
+ if (likely(off < __PAGE_SIZE - size)) { \
+ barrier_var(off); \
+ if (off > 0) \
+ data = _data + off; \
+ } \
+ } \
+ } \
+ } \
+ data; \
+ })
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/task_work.c b/tools/testing/selftests/bpf/progs/task_work.c
new file mode 100644
index 000000000000..663a80990f8f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_work.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char _license[] SEC("license") = "GPL";
+
+const void *user_ptr = NULL;
+
+struct elem {
+ char data[128];
+ struct bpf_task_work tw;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} arrmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} lrumap SEC(".maps");
+
+static int process_work(struct bpf_map *map, void *key, void *value)
+{
+ struct elem *work = value;
+
+ bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
+ return 0;
+}
+
+int key = 0;
+
+SEC("perf_event")
+int oncpu_hash_map(struct pt_regs *args)
+{
+ struct elem empty_work = { .data = { 0 } };
+ struct elem *work;
+ struct task_struct *task;
+ int err;
+
+ task = bpf_get_current_task_btf();
+ err = bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST);
+ if (err)
+ return 0;
+ work = bpf_map_lookup_elem(&hmap, &key);
+ if (!work)
+ return 0;
+
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+int oncpu_array_map(struct pt_regs *args)
+{
+ struct elem *work;
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ work = bpf_map_lookup_elem(&arrmap, &key);
+ if (!work)
+ return 0;
+ bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+int oncpu_lru_map(struct pt_regs *args)
+{
+ struct elem empty_work = { .data = { 0 } };
+ struct elem *work;
+ struct task_struct *task;
+ int err;
+
+ task = bpf_get_current_task_btf();
+ work = bpf_map_lookup_elem(&lrumap, &key);
+ if (work)
+ return 0;
+ err = bpf_map_update_elem(&lrumap, &key, &empty_work, BPF_NOEXIST);
+ if (err)
+ return 0;
+ work = bpf_map_lookup_elem(&lrumap, &key);
+ if (!work || work->data[0])
+ return 0;
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_work_fail.c b/tools/testing/selftests/bpf/progs/task_work_fail.c
new file mode 100644
index 000000000000..1270953fd092
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_work_fail.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+const void *user_ptr = NULL;
+
+struct elem {
+ char data[128];
+ struct bpf_task_work tw;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} arrmap SEC(".maps");
+
+static int process_work(struct bpf_map *map, void *key, void *value)
+{
+ struct elem *work = value;
+
+ bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
+ return 0;
+}
+
+int key = 0;
+
+SEC("perf_event")
+__failure __msg("doesn't match map pointer in R3")
+int mismatch_map(struct pt_regs *args)
+{
+ struct elem *work;
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ work = bpf_map_lookup_elem(&arrmap, &key);
+ if (!work)
+ return 0;
+ bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+__failure __msg("arg#1 doesn't point to a map value")
+int no_map_task_work(struct pt_regs *args)
+{
+ struct task_struct *task;
+ struct bpf_task_work tw;
+
+ task = bpf_get_current_task_btf();
+ bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+__failure __msg("Possibly NULL pointer passed to trusted arg1")
+int task_work_null(struct pt_regs *args)
+{
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL);
+ return 0;
+}
+
+SEC("perf_event")
+__failure __msg("Possibly NULL pointer passed to trusted arg2")
+int map_null(struct pt_regs *args)
+{
+ struct elem *work;
+ struct task_struct *task;
+
+ task = bpf_get_current_task_btf();
+ work = bpf_map_lookup_elem(&arrmap, &key);
+ if (!work)
+ return 0;
+ bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/task_work_stress.c b/tools/testing/selftests/bpf/progs/task_work_stress.c
new file mode 100644
index 000000000000..55e555f7f41b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/task_work_stress.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+#define ENTRIES 128
+
+char _license[] SEC("license") = "GPL";
+
+__u64 callback_scheduled = 0;
+__u64 callback_success = 0;
+__u64 schedule_error = 0;
+__u64 delete_success = 0;
+
+struct elem {
+ __u32 count;
+ struct bpf_task_work tw;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __uint(max_entries, ENTRIES);
+ __type(key, int);
+ __type(value, struct elem);
+} hmap SEC(".maps");
+
+static int process_work(struct bpf_map *map, void *key, void *value)
+{
+ __sync_fetch_and_add(&callback_success, 1);
+ return 0;
+}
+
+SEC("syscall")
+int schedule_task_work(void *ctx)
+{
+ struct elem empty_work = {.count = 0};
+ struct elem *work;
+ int key = 0, err;
+
+ key = bpf_ktime_get_ns() % ENTRIES;
+ work = bpf_map_lookup_elem(&hmap, &key);
+ if (!work) {
+ bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST);
+ work = bpf_map_lookup_elem(&hmap, &key);
+ if (!work)
+ return 0;
+ }
+ err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap,
+ process_work, NULL);
+ if (err)
+ __sync_fetch_and_add(&schedule_error, 1);
+ else
+ __sync_fetch_and_add(&callback_scheduled, 1);
+ return 0;
+}
+
+SEC("syscall")
+int delete_task_work(void *ctx)
+{
+ int key = 0, err;
+
+ key = bpf_get_prandom_u32() % ENTRIES;
+ err = bpf_map_delete_elem(&hmap, &key);
+ if (!err)
+ __sync_fetch_and_add(&delete_success, 1);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
index a58b5194fc89..022291f21dfb 100644
--- a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
@@ -8,8 +8,6 @@ char _license[] SEC("license") = "GPL";
#define USEC_PER_SEC 1000000UL
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
static unsigned int tcp_left_out(const struct tcp_sock *tp)
{
return tp->sacked_out + tp->lost_out;
diff --git a/tools/testing/selftests/bpf/progs/test_check_mtu.c b/tools/testing/selftests/bpf/progs/test_check_mtu.c
index 2ec1de11a3ae..7b6b2b342c1d 100644
--- a/tools/testing/selftests/bpf/progs/test_check_mtu.c
+++ b/tools/testing/selftests/bpf/progs/test_check_mtu.c
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <errno.h>
char _license[] SEC("license") = "GPL";
@@ -288,3 +289,14 @@ int tc_input_len_exceed(struct __sk_buff *ctx)
global_bpf_mtu_xdp = mtu_len;
return retval;
}
+
+SEC("tc")
+int tc_chk_segs_flag(struct __sk_buff *ctx)
+{
+ __u32 mtu_len = 0;
+ int err;
+
+ err = bpf_check_mtu(ctx, GLOBAL_USER_IFINDEX, &mtu_len, 0, BPF_MTU_CHK_SEGS);
+
+ return err == -EINVAL ? BPF_OK : BPF_DROP;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index f344c6835e84..26a53e54b8fa 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -22,6 +22,7 @@
#include "bpf_compiler.h"
#include "test_cls_redirect.h"
+#include "bpf_misc.h"
#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
@@ -31,9 +32,6 @@
#define INLINING __always_inline
#endif
-#define offsetofend(TYPE, MEMBER) \
- (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
-
#define IP_OFFSET_MASK (0x1FFF)
#define IP_MF (0x2000)
@@ -129,7 +127,7 @@ typedef uint8_t *net_ptr __attribute__((align_value(8)));
typedef struct buf {
struct __sk_buff *skb;
net_ptr head;
- /* NB: tail musn't have alignment other than 1, otherwise
+ /* NB: tail mustn't have alignment other than 1, otherwise
* LLVM will go and eliminate code, e.g. when checking packet lengths.
*/
uint8_t *const tail;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
index d0f7670351e5..dfd4a2710391 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
@@ -494,7 +494,7 @@ static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_header
*offset += sizeof(*next_hop);
- /* Skip the remainig next hops (may be zero). */
+ /* Skip the remaining next hops (may be zero). */
return skip_next_hops(offset, encap->unigue.hop_count - encap->unigue.next_hop - 1);
}
diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
index a3f220ba7025..ee65bad0436d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c
+++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
@@ -32,6 +32,16 @@ int my_int_last SEC(".data.array_not_last");
int percpu_arr[1] SEC(".data.percpu_arr");
+/* at least one extern is included, to ensure that a specific
+ * regression is tested whereby resizing resulted in a free-after-use
+ * bug after type information is invalidated by the resize operation.
+ *
+ * There isn't a particularly good API to test for this specific condition,
+ * but by having externs for the resizing tests it will cover this path.
+ */
+extern int LINUX_KERNEL_VERSION __kconfig;
+long version_sink;
+
SEC("tp/syscalls/sys_enter_getpid")
int bss_array_sum(void *ctx)
{
@@ -44,6 +54,9 @@ int bss_array_sum(void *ctx)
for (size_t i = 0; i < bss_array_len; ++i)
sum += array[i];
+ /* see above; ensure this is not optimized out */
+ version_sink = LINUX_KERNEL_VERSION;
+
return 0;
}
@@ -59,6 +72,9 @@ int data_array_sum(void *ctx)
for (size_t i = 0; i < data_array_len; ++i)
sum += my_array[i];
+ /* see above; ensure this is not optimized out */
+ version_sink = LINUX_KERNEL_VERSION;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_lookup_key.c b/tools/testing/selftests/bpf/progs/test_lookup_key.c
index cdbbb12f1491..1f7e1e59b073 100644
--- a/tools/testing/selftests/bpf/progs/test_lookup_key.c
+++ b/tools/testing/selftests/bpf/progs/test_lookup_key.c
@@ -14,11 +14,11 @@
char _license[] SEC("license") = "GPL";
__u32 monitored_pid;
-__u32 key_serial;
+__s32 key_serial;
__u32 key_id;
__u64 flags;
-extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
+extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
extern void bpf_key_put(struct bpf_key *key) __ksym;
diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
index abb7344b531f..5edf3cdc213d 100644
--- a/tools/testing/selftests/bpf/progs/test_overhead.c
+++ b/tools/testing/selftests/bpf/progs/test_overhead.c
@@ -1,9 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
-#include <stdbool.h>
-#include <stddef.h>
-#include <linux/bpf.h>
-#include <linux/ptrace.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/testing/selftests/bpf/progs/test_perf_branches.c b/tools/testing/selftests/bpf/progs/test_perf_branches.c
index a1ccc831c882..05ac9410cd68 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_branches.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_branches.c
@@ -8,6 +8,7 @@
#include <bpf/bpf_tracing.h>
int valid = 0;
+int run_cnt = 0;
int required_size_out = 0;
int written_stack_out = 0;
int written_global_out = 0;
@@ -24,6 +25,8 @@ int perf_branches(void *ctx)
__u64 entries[4 * 3] = {0};
int required_size, written_stack, written_global;
+ ++run_cnt;
+
/* write to stack */
written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0);
/* ignore spurious events */
diff --git a/tools/testing/selftests/bpf/progs/test_pinning_devmap.c b/tools/testing/selftests/bpf/progs/test_pinning_devmap.c
new file mode 100644
index 000000000000..c855f8f87eff
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_pinning_devmap.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+} pinmap1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+} pinmap2 SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_pinning_htab.c b/tools/testing/selftests/bpf/progs/test_pinning_htab.c
new file mode 100644
index 000000000000..ae227930c73c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_pinning_htab.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct timer_val {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, struct timer_val);
+ __uint(max_entries, 1);
+} timer_prealloc SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32);
+ __type(value, struct timer_val);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} timer_no_prealloc SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_overwrite.c b/tools/testing/selftests/bpf/progs/test_ringbuf_overwrite.c
new file mode 100644
index 000000000000..ff4aa67ddacc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_overwrite.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025. Huawei Technologies Co., Ltd */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(map_flags, BPF_F_RB_OVERWRITE);
+} ringbuf SEC(".maps");
+
+int pid;
+
+const volatile unsigned long LEN1;
+const volatile unsigned long LEN2;
+const volatile unsigned long LEN3;
+const volatile unsigned long LEN4;
+const volatile unsigned long LEN5;
+
+long reserve1_fail = 0;
+long reserve2_fail = 0;
+long reserve3_fail = 0;
+long reserve4_fail = 0;
+long reserve5_fail = 0;
+
+unsigned long avail_data = 0;
+unsigned long ring_size = 0;
+unsigned long cons_pos = 0;
+unsigned long prod_pos = 0;
+unsigned long over_pos = 0;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_overwrite_ringbuf(void *ctx)
+{
+ char *rec1, *rec2, *rec3, *rec4, *rec5;
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (cur_pid != pid)
+ return 0;
+
+ rec1 = bpf_ringbuf_reserve(&ringbuf, LEN1, 0);
+ if (!rec1) {
+ reserve1_fail = 1;
+ return 0;
+ }
+
+ rec2 = bpf_ringbuf_reserve(&ringbuf, LEN2, 0);
+ if (!rec2) {
+ bpf_ringbuf_discard(rec1, 0);
+ reserve2_fail = 1;
+ return 0;
+ }
+
+ rec3 = bpf_ringbuf_reserve(&ringbuf, LEN3, 0);
+ /* expect failure */
+ if (!rec3) {
+ reserve3_fail = 1;
+ } else {
+ bpf_ringbuf_discard(rec1, 0);
+ bpf_ringbuf_discard(rec2, 0);
+ bpf_ringbuf_discard(rec3, 0);
+ return 0;
+ }
+
+ rec4 = bpf_ringbuf_reserve(&ringbuf, LEN4, 0);
+ if (!rec4) {
+ reserve4_fail = 1;
+ bpf_ringbuf_discard(rec1, 0);
+ bpf_ringbuf_discard(rec2, 0);
+ return 0;
+ }
+
+ bpf_ringbuf_submit(rec1, 0);
+ bpf_ringbuf_submit(rec2, 0);
+ bpf_ringbuf_submit(rec4, 0);
+
+ rec5 = bpf_ringbuf_reserve(&ringbuf, LEN5, 0);
+ if (!rec5) {
+ reserve5_fail = 1;
+ return 0;
+ }
+
+ for (int i = 0; i < LEN3; i++)
+ rec5[i] = 0xdd;
+
+ bpf_ringbuf_submit(rec5, 0);
+
+ ring_size = bpf_ringbuf_query(&ringbuf, BPF_RB_RING_SIZE);
+ avail_data = bpf_ringbuf_query(&ringbuf, BPF_RB_AVAIL_DATA);
+ cons_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_CONS_POS);
+ prod_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_PROD_POS);
+ over_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_OVERWRITE_POS);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
index 350513c0e4c9..f063a0013f85 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_write.c
@@ -26,11 +26,11 @@ int test_ringbuf_write(void *ctx)
if (cur_pid != pid)
return 0;
- sample1 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
+ sample1 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
if (!sample1)
return 0;
/* first one can pass */
- sample2 = bpf_ringbuf_reserve(&ringbuf, 0x3000, 0);
+ sample2 = bpf_ringbuf_reserve(&ringbuf, 0x30000, 0);
if (!sample2) {
bpf_ringbuf_discard(sample1, 0);
__sync_fetch_and_add(&discarded, 1);
diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
index 8ef6b39335b6..34b30e2603f0 100644
--- a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
+++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
@@ -40,7 +40,7 @@ char digest[MAGIC_SIZE + SIZEOF_STRUCT_FSVERITY_DIGEST + SHA256_DIGEST_SIZE];
__u32 monitored_pid;
char sig[MAX_SIG_SIZE];
__u32 sig_size;
-__u32 user_keyring_serial;
+__s32 user_keyring_serial;
SEC("lsm.s/file_open")
int BPF_PROG(test_file_open, struct file *f)
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
index 2796dd8545eb..1c7941a4ad00 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
@@ -1,8 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 ByteDance */
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+#endif
+#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2)
+
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
@@ -31,7 +36,7 @@ int prog_skb_verdict(struct __sk_buff *skb)
change_tail_ret = bpf_skb_change_tail(skb, skb->len + 1, 0);
return SK_PASS;
} else if (data[0] == 'E') { /* Error */
- change_tail_ret = bpf_skb_change_tail(skb, 65535, 0);
+ change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0);
return SK_PASS;
}
return SK_PASS;
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c
index 8bdb9987c0c7..83df4919c224 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c
@@ -7,6 +7,8 @@ int cork_byte;
int push_start;
int push_end;
int apply_bytes;
+int pop_start;
+int pop_end;
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
@@ -22,6 +24,8 @@ int prog_sk_policy(struct sk_msg_md *msg)
bpf_msg_cork_bytes(msg, cork_byte);
if (push_start > 0 && push_end > 0)
bpf_msg_push_data(msg, push_start, push_end, 0);
+ if (pop_start >= 0 && pop_end > 0)
+ bpf_msg_pop_data(msg, pop_start, pop_end, 0);
return SK_PASS;
}
diff --git a/tools/testing/selftests/bpf/progs/test_task_local_data.c b/tools/testing/selftests/bpf/progs/test_task_local_data.c
new file mode 100644
index 000000000000..fffafc013044
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_task_local_data.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+
+#include "task_local_data.bpf.h"
+
+struct tld_keys {
+ tld_key_t value0;
+ tld_key_t value1;
+ tld_key_t value2;
+ tld_key_t value_not_exist;
+};
+
+struct test_tld_struct {
+ __u64 a;
+ __u64 b;
+ __u64 c;
+ __u64 d;
+};
+
+int test_value0;
+int test_value1;
+struct test_tld_struct test_value2;
+
+SEC("syscall")
+int task_main(void *ctx)
+{
+ struct tld_object tld_obj;
+ struct test_tld_struct *struct_p;
+ struct task_struct *task;
+ int err, *int_p;
+
+ task = bpf_get_current_task_btf();
+ err = tld_object_init(task, &tld_obj);
+ if (err)
+ return 1;
+
+ int_p = tld_get_data(&tld_obj, value0, "value0", sizeof(int));
+ if (int_p)
+ test_value0 = *int_p;
+ else
+ return 2;
+
+ int_p = tld_get_data(&tld_obj, value1, "value1", sizeof(int));
+ if (int_p)
+ test_value1 = *int_p;
+ else
+ return 3;
+
+ struct_p = tld_get_data(&tld_obj, value2, "value2", sizeof(struct test_tld_struct));
+ if (struct_p)
+ test_value2 = *struct_p;
+ else
+ return 4;
+
+ int_p = tld_get_data(&tld_obj, value_not_exist, "value_not_exist", sizeof(int));
+ if (int_p)
+ return 5;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
index 28edafe803f0..fcba8299f0bc 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
-#include <linux/if_ether.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/pkt_cls.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+#endif
+#define BPF_SKB_MAX_LEN (PAGE_SIZE << 2)
long change_tail_ret = 1;
@@ -94,7 +94,7 @@ int change_tail(struct __sk_buff *skb)
bpf_skb_change_tail(skb, len, 0);
return TCX_PASS;
} else if (payload[0] == 'E') { /* Error */
- change_tail_ret = bpf_skb_change_tail(skb, 65535, 0);
+ change_tail_ret = bpf_skb_change_tail(skb, BPF_SKB_MAX_LEN, 0);
return TCX_PASS;
} else if (payload[0] == 'Z') { /* Zero */
change_tail_ret = bpf_skb_change_tail(skb, 0, 0);
diff --git a/tools/testing/selftests/bpf/progs/test_tc_edt.c b/tools/testing/selftests/bpf/progs/test_tc_edt.c
index 950a70b61e74..4f6f03122d61 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_edt.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_edt.c
@@ -14,7 +14,6 @@
#define TIME_HORIZON_NS (2000 * 1000 * 1000)
#define NS_PER_SEC 1000000000
#define ECN_HORIZON_NS 5000000
-#define THROTTLE_RATE_BPS (5 * 1000 * 1000)
/* flow_key => last_tstamp timestamp used */
struct {
@@ -24,12 +23,13 @@ struct {
__uint(max_entries, 1);
} flow_map SEC(".maps");
+__uint64_t target_rate;
+
static inline int throttle_flow(struct __sk_buff *skb)
{
int key = 0;
uint64_t *last_tstamp = bpf_map_lookup_elem(&flow_map, &key);
- uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC /
- THROTTLE_RATE_BPS;
+ uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC / target_rate;
uint64_t now = bpf_ktime_get_ns();
uint64_t tstamp, next_tstamp = 0;
@@ -70,7 +70,7 @@ static inline int handle_tcp(struct __sk_buff *skb, struct tcphdr *tcp)
if ((void *)(tcp + 1) > data_end)
return TC_ACT_SHOT;
- if (tcp->dest == bpf_htons(9000))
+ if (tcp->source == bpf_htons(9000))
return throttle_flow(skb);
return TC_ACT_OK;
@@ -99,7 +99,8 @@ static inline int handle_ipv4(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("cls_test") int tc_prog(struct __sk_buff *skb)
+SEC("tc")
+int tc_prog(struct __sk_buff *skb)
{
if (skb->protocol == bpf_htons(ETH_P_IP))
return handle_ipv4(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index 404124a93892..7330c61b5730 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -2,23 +2,11 @@
/* In-place tunneling */
-#include <stdbool.h>
-#include <string.h>
-
-#include <linux/stddef.h>
-#include <linux/bpf.h>
-#include <linux/if_ether.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/mpls.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/pkt_cls.h>
-#include <linux/types.h>
+#include <vmlinux.h>
-#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
#include "bpf_compiler.h"
#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
@@ -27,6 +15,14 @@ static const int cfg_port = 8000;
static const int cfg_udp_src = 20000;
+#define ETH_P_MPLS_UC 0x8847
+#define ETH_P_TEB 0x6558
+
+#define MPLS_LS_S_MASK 0x00000100
+#define BPF_F_ADJ_ROOM_ENCAP_L2(len) \
+ (((__u64)len & BPF_ADJ_ROOM_ENCAP_L2_MASK) \
+ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
+
#define L2_PAD_SZ (sizeof(struct vxlanhdr) + ETH_HLEN)
#define UDP_PORT 5555
@@ -36,10 +32,9 @@ static const int cfg_udp_src = 20000;
#define EXTPROTO_VXLAN 0x1
-#define VXLAN_N_VID (1u << 24)
-#define VXLAN_VNI_MASK bpf_htonl((VXLAN_N_VID - 1) << 8)
-#define VXLAN_FLAGS 0x8
-#define VXLAN_VNI 1
+#define VXLAN_FLAGS bpf_htonl(1<<27)
+#define VNI_ID 1
+#define VXLAN_VNI bpf_htonl(VNI_ID << 8)
#ifndef NEXTHDR_DEST
#define NEXTHDR_DEST 60
@@ -48,12 +43,6 @@ static const int cfg_udp_src = 20000;
/* MPLS label 1000 with S bit (last label) set and ttl of 255. */
static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
MPLS_LS_S_MASK | 0xff);
-
-struct vxlanhdr {
- __be32 vx_flags;
- __be32 vx_vni;
-} __attribute__((packed));
-
struct gre_hdr {
__be16 flags;
__be16 protocol;
@@ -94,8 +83,8 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph)
static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
__u16 l2_proto, __u16 ext_proto)
{
+ struct iphdr iph_inner = {0};
__u16 udp_dst = UDP_PORT;
- struct iphdr iph_inner;
struct v4hdr h_outer;
struct tcphdr tcph;
int olen, l2_len;
@@ -122,7 +111,6 @@ static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
return TC_ACT_OK;
/* Derive the IPv4 header fields from the IPv6 header */
- memset(&iph_inner, 0, sizeof(iph_inner));
iph_inner.version = 4;
iph_inner.ihl = 5;
iph_inner.tot_len = bpf_htons(sizeof(iph6_inner) +
@@ -210,7 +198,7 @@ static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
vxlan_hdr->vx_flags = VXLAN_FLAGS;
- vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
+ vxlan_hdr->vx_vni = VXLAN_VNI;
l2_hdr += sizeof(struct vxlanhdr);
}
@@ -340,7 +328,7 @@ static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr;
vxlan_hdr->vx_flags = VXLAN_FLAGS;
- vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8);
+ vxlan_hdr->vx_vni = VXLAN_VNI;
l2_hdr += sizeof(struct vxlanhdr);
}
@@ -372,8 +360,8 @@ static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
static int encap_ipv6_ipip6(struct __sk_buff *skb)
{
+ struct v6hdr h_outer = {0};
struct iphdr iph_inner;
- struct v6hdr h_outer;
struct tcphdr tcph;
struct ethhdr eth;
__u64 flags;
@@ -400,13 +388,12 @@ static int encap_ipv6_ipip6(struct __sk_buff *skb)
return TC_ACT_SHOT;
/* prepare new outer network header */
- memset(&h_outer.ip, 0, sizeof(h_outer.ip));
h_outer.ip.version = 6;
h_outer.ip.hop_limit = iph_inner.ttl;
- h_outer.ip.saddr.s6_addr[1] = 0xfd;
- h_outer.ip.saddr.s6_addr[15] = 1;
- h_outer.ip.daddr.s6_addr[1] = 0xfd;
- h_outer.ip.daddr.s6_addr[15] = 2;
+ h_outer.ip.saddr.in6_u.u6_addr8[1] = 0xfd;
+ h_outer.ip.saddr.in6_u.u6_addr8[15] = 1;
+ h_outer.ip.daddr.in6_u.u6_addr8[1] = 0xfd;
+ h_outer.ip.daddr.in6_u.u6_addr8[15] = 2;
h_outer.ip.payload_len = iph_inner.tot_len;
h_outer.ip.nexthdr = IPPROTO_IPIP;
@@ -431,7 +418,7 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
return __encap_ipv6(skb, encap_proto, l2_proto, 0);
}
-SEC("encap_ipip_none")
+SEC("tc")
int __encap_ipip_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -440,7 +427,7 @@ int __encap_ipip_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_gre_none")
+SEC("tc")
int __encap_gre_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -449,7 +436,7 @@ int __encap_gre_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_gre_mpls")
+SEC("tc")
int __encap_gre_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -458,7 +445,7 @@ int __encap_gre_mpls(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_gre_eth")
+SEC("tc")
int __encap_gre_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -467,7 +454,7 @@ int __encap_gre_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_udp_none")
+SEC("tc")
int __encap_udp_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -476,7 +463,7 @@ int __encap_udp_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_udp_mpls")
+SEC("tc")
int __encap_udp_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -485,7 +472,7 @@ int __encap_udp_mpls(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_udp_eth")
+SEC("tc")
int __encap_udp_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -494,7 +481,7 @@ int __encap_udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_vxlan_eth")
+SEC("tc")
int __encap_vxlan_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -505,7 +492,7 @@ int __encap_vxlan_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_sit_none")
+SEC("tc")
int __encap_sit_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -514,7 +501,7 @@ int __encap_sit_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6tnl_none")
+SEC("tc")
int __encap_ip6tnl_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -523,7 +510,7 @@ int __encap_ip6tnl_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ipip6_none")
+SEC("tc")
int __encap_ipip6_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
@@ -532,7 +519,7 @@ int __encap_ipip6_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6gre_none")
+SEC("tc")
int __encap_ip6gre_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -541,7 +528,7 @@ int __encap_ip6gre_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6gre_mpls")
+SEC("tc")
int __encap_ip6gre_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -550,7 +537,7 @@ int __encap_ip6gre_mpls(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6gre_eth")
+SEC("tc")
int __encap_ip6gre_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -559,7 +546,7 @@ int __encap_ip6gre_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6udp_none")
+SEC("tc")
int __encap_ip6udp_none(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -568,7 +555,7 @@ int __encap_ip6udp_none(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6udp_mpls")
+SEC("tc")
int __encap_ip6udp_mpls(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -577,7 +564,7 @@ int __encap_ip6udp_mpls(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6udp_eth")
+SEC("tc")
int __encap_ip6udp_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -586,7 +573,7 @@ int __encap_ip6udp_eth(struct __sk_buff *skb)
return TC_ACT_OK;
}
-SEC("encap_ip6vxlan_eth")
+SEC("tc")
int __encap_ip6vxlan_eth(struct __sk_buff *skb)
{
if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
@@ -693,7 +680,7 @@ static int decap_ipv6(struct __sk_buff *skb)
iph_outer.nexthdr);
}
-SEC("decap")
+SEC("tc")
int decap_f(struct __sk_buff *skb)
{
switch (skb->protocol) {
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
index 5f4e87ee949a..1ecdf4c54de4 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
@@ -14,10 +14,7 @@
#include <bpf/bpf_endian.h>
#define BPF_PROG_TEST_TCP_HDR_OPTIONS
#include "test_tcp_hdr_options.h"
-
-#ifndef sizeof_field
-#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
-#endif
+#include "bpf_misc.h"
__u8 test_kind = TCPOPT_EXP;
__u16 test_magic = 0xeB9F;
diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index 540181c115a8..ef00d38b0a8d 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
@@ -23,7 +23,6 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(max_entries, 2);
__type(key, int);
__type(value, __u32);
} perf_event_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_uprobe.c b/tools/testing/selftests/bpf/progs/test_uprobe.c
index 896c88a4960d..12f4065fca20 100644
--- a/tools/testing/selftests/bpf/progs/test_uprobe.c
+++ b/tools/testing/selftests/bpf/progs/test_uprobe.c
@@ -59,3 +59,41 @@ int BPF_UPROBE(test4)
test4_result = 1;
return 0;
}
+
+#if defined(__TARGET_ARCH_x86)
+struct pt_regs regs;
+
+SEC("uprobe")
+int BPF_UPROBE(test_regs_change)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ ctx->ax = regs.ax;
+ ctx->cx = regs.cx;
+ ctx->dx = regs.dx;
+ ctx->r8 = regs.r8;
+ ctx->r9 = regs.r9;
+ ctx->r10 = regs.r10;
+ ctx->r11 = regs.r11;
+ ctx->di = regs.di;
+ ctx->si = regs.si;
+ return 0;
+}
+
+unsigned long ip;
+
+SEC("uprobe")
+int BPF_UPROBE(test_regs_change_ip)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid != my_pid)
+ return 0;
+
+ ctx->ip = ip;
+ return 0;
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_usdt.c b/tools/testing/selftests/bpf/progs/test_usdt.c
index 096488f47fbc..a78c87537b07 100644
--- a/tools/testing/selftests/bpf/progs/test_usdt.c
+++ b/tools/testing/selftests/bpf/progs/test_usdt.c
@@ -107,4 +107,35 @@ int BPF_USDT(usdt12, int a1, int a2, long a3, long a4, unsigned a5,
return 0;
}
+int usdt_sib_called;
+u64 usdt_sib_cookie;
+int usdt_sib_arg_cnt;
+int usdt_sib_arg_ret;
+short usdt_sib_arg;
+int usdt_sib_arg_size;
+
+/*
+ * usdt_sib is only tested on x86-related architectures, so it requires
+ * manual attach since auto-attach will panic tests under other architectures
+ */
+SEC("usdt")
+int usdt_sib(struct pt_regs *ctx)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt_sib_called, 1);
+
+ usdt_sib_cookie = bpf_usdt_cookie(ctx);
+ usdt_sib_arg_cnt = bpf_usdt_arg_cnt(ctx);
+
+ usdt_sib_arg_ret = bpf_usdt_arg(ctx, 0, &tmp);
+ usdt_sib_arg = (short)tmp;
+ usdt_sib_arg_size = bpf_usdt_arg_size(ctx, 0);
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
index e96d09e11115..ff8d755548b9 100644
--- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
+++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
@@ -17,7 +17,7 @@
#define MAX_SIG_SIZE 1024
__u32 monitored_pid;
-__u32 user_keyring_serial;
+__s32 user_keyring_serial;
__u64 system_keyring_id;
struct data {
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
index dc74d8cf9e3f..5904f45cfbc4 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c
@@ -19,7 +19,9 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
/* Data length determine test case */
if (data_len == 54) { /* sizeof(pkt_v4) */
- offset = 4096; /* test too large offset */
+ offset = 4096; /* test too large offset, 4k page size */
+ } else if (data_len == 53) { /* sizeof(pkt_v4) - 1 */
+ offset = 65536; /* test too large offset, 64k page size */
} else if (data_len == 74) { /* sizeof(pkt_v6) */
offset = 40;
} else if (data_len == 64) {
@@ -31,6 +33,10 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp)
offset = 10;
} else if (data_len == 9001) {
offset = 4096;
+ } else if (data_len == 90000) {
+ offset = 10; /* test a small offset, 64k page size */
+ } else if (data_len == 90001) {
+ offset = 65536; /* test too large offset, 64k page size */
} else {
return XDP_ABORTED; /* No matching test */
}
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_devmap_tailcall.c b/tools/testing/selftests/bpf/progs/test_xdp_devmap_tailcall.c
new file mode 100644
index 000000000000..814e2a980e97
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_devmap_tailcall.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+SEC("xdp")
+int xdp_devmap(struct xdp_md *ctx)
+{
+ return ctx->egress_ifindex;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, int (void *));
+} xdp_map SEC(".maps") = {
+ .values = {
+ [0] = (void *)&xdp_devmap,
+ },
+};
+
+SEC("xdp")
+int xdp_entry(struct xdp_md *ctx)
+{
+ bpf_tail_call(ctx, &xdp_map, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
index fcf6ca14f2ea..0a0f371a2dec 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
@@ -1,45 +1,312 @@
+#include <stdbool.h>
#include <linux/bpf.h>
+#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
+#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_kfuncs.h"
#define META_SIZE 32
#define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem
-/* Demonstrates how metadata can be passed from an XDP program to a TC program
- * using bpf_xdp_adjust_meta.
- * For the sake of testing the metadata support in drivers, the XDP program uses
- * a fixed-size payload after the Ethernet header as metadata. The TC program
- * copies the metadata it receives into a map so it can be checked from
- * userspace.
+/* Demonstrate passing metadata from XDP to TC using bpf_xdp_adjust_meta.
+ *
+ * The XDP program extracts a fixed-size payload following the Ethernet header
+ * and stores it as packet metadata to test the driver's metadata support. The
+ * TC program then verifies if the passed metadata is correct.
*/
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
- __type(key, __u32);
- __uint(value_size, META_SIZE);
-} test_result SEC(".maps");
+bool test_pass;
+
+static const __u8 smac_want[ETH_ALEN] = {
+ 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF,
+};
+
+static const __u8 meta_want[META_SIZE] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
+};
+
+static bool check_smac(const struct ethhdr *eth)
+{
+ return !__builtin_memcmp(eth->h_source, smac_want, ETH_ALEN);
+}
+
+static bool check_metadata(const char *file, int line, __u8 *meta_have)
+{
+ if (!__builtin_memcmp(meta_have, meta_want, META_SIZE))
+ return true;
+
+ bpf_stream_printk(BPF_STREAM_STDERR,
+ "FAIL:%s:%d: metadata mismatch\n"
+ " have:\n %pI6\n %pI6\n"
+ " want:\n %pI6\n %pI6\n",
+ file, line,
+ &meta_have[0x00], &meta_have[0x10],
+ &meta_want[0x00], &meta_want[0x10]);
+ return false;
+}
+
+#define check_metadata(meta_have) check_metadata(__FILE__, __LINE__, meta_have)
+
+static bool check_skb_metadata(const char *file, int line, struct __sk_buff *skb)
+{
+ __u8 *data_meta = ctx_ptr(skb, data_meta);
+ __u8 *data = ctx_ptr(skb, data);
+
+ return data_meta + META_SIZE <= data && (check_metadata)(file, line, data_meta);
+}
+
+#define check_skb_metadata(skb) check_skb_metadata(__FILE__, __LINE__, skb)
SEC("tc")
int ing_cls(struct __sk_buff *ctx)
{
- __u8 *data, *data_meta;
- __u32 key = 0;
+ __u8 *meta_have = ctx_ptr(ctx, data_meta);
+ __u8 *data = ctx_ptr(ctx, data);
- data_meta = ctx_ptr(ctx, data_meta);
- data = ctx_ptr(ctx, data);
+ if (meta_have + META_SIZE > data)
+ goto out;
+
+ if (!check_metadata(meta_have))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+/* Read from metadata using bpf_dynptr_read helper */
+SEC("tc")
+int ing_cls_dynptr_read(struct __sk_buff *ctx)
+{
+ __u8 meta_have[META_SIZE];
+ struct bpf_dynptr meta;
+
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ bpf_dynptr_read(meta_have, META_SIZE, &meta, 0, 0);
+
+ if (!check_metadata(meta_have))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
- if (data_meta + META_SIZE > data)
+/* Write to metadata using bpf_dynptr_write helper */
+SEC("tc")
+int ing_cls_dynptr_write(struct __sk_buff *ctx)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *src;
+
+ bpf_dynptr_from_skb(ctx, 0, &data);
+ src = bpf_dynptr_slice(&data, sizeof(struct ethhdr), NULL, META_SIZE);
+ if (!src)
return TC_ACT_SHOT;
- bpf_map_update_elem(&test_result, &key, data_meta, BPF_ANY);
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ bpf_dynptr_write(&meta, 0, src, META_SIZE, 0);
+
+ return TC_ACT_UNSPEC; /* pass */
+}
+
+/* Read from metadata using read-only dynptr slice */
+SEC("tc")
+int ing_cls_dynptr_slice(struct __sk_buff *ctx)
+{
+ struct bpf_dynptr meta;
+ __u8 *meta_have;
+
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ meta_have = bpf_dynptr_slice(&meta, 0, NULL, META_SIZE);
+ if (!meta_have)
+ goto out;
+ if (!check_metadata(meta_have))
+ goto out;
+
+ test_pass = true;
+out:
return TC_ACT_SHOT;
}
+/* Write to metadata using writeable dynptr slice */
+SEC("tc")
+int ing_cls_dynptr_slice_rdwr(struct __sk_buff *ctx)
+{
+ struct bpf_dynptr data, meta;
+ __u8 *src, *dst;
+
+ bpf_dynptr_from_skb(ctx, 0, &data);
+ src = bpf_dynptr_slice(&data, sizeof(struct ethhdr), NULL, META_SIZE);
+ if (!src)
+ return TC_ACT_SHOT;
+
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ dst = bpf_dynptr_slice_rdwr(&meta, 0, NULL, META_SIZE);
+ if (!dst)
+ return TC_ACT_SHOT;
+
+ __builtin_memcpy(dst, src, META_SIZE);
+
+ return TC_ACT_UNSPEC; /* pass */
+}
+
+/* Read skb metadata in chunks from various offsets in different ways. */
+SEC("tc")
+int ing_cls_dynptr_offset_rd(struct __sk_buff *ctx)
+{
+ const __u32 chunk_len = META_SIZE / 4;
+ __u8 meta_have[META_SIZE];
+ struct bpf_dynptr meta;
+ __u8 *dst, *src;
+
+ dst = meta_have;
+
+ /* 1. Regular read */
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ bpf_dynptr_read(dst, chunk_len, &meta, 0, 0);
+ dst += chunk_len;
+
+ /* 2. Read from an offset-adjusted dynptr */
+ bpf_dynptr_adjust(&meta, chunk_len, bpf_dynptr_size(&meta));
+ bpf_dynptr_read(dst, chunk_len, &meta, 0, 0);
+ dst += chunk_len;
+
+ /* 3. Read at an offset */
+ bpf_dynptr_read(dst, chunk_len, &meta, chunk_len, 0);
+ dst += chunk_len;
+
+ /* 4. Read from a slice starting at an offset */
+ src = bpf_dynptr_slice(&meta, 2 * chunk_len, NULL, chunk_len);
+ if (!src)
+ goto out;
+ __builtin_memcpy(dst, src, chunk_len);
+
+ if (!check_metadata(meta_have))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+/* Write skb metadata in chunks at various offsets in different ways. */
+SEC("tc")
+int ing_cls_dynptr_offset_wr(struct __sk_buff *ctx)
+{
+ const __u32 chunk_len = META_SIZE / 4;
+ __u8 payload[META_SIZE];
+ struct bpf_dynptr meta;
+ __u8 *dst, *src;
+
+ bpf_skb_load_bytes(ctx, sizeof(struct ethhdr), payload, sizeof(payload));
+ src = payload;
+
+ /* 1. Regular write */
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ bpf_dynptr_write(&meta, 0, src, chunk_len, 0);
+ src += chunk_len;
+
+ /* 2. Write to an offset-adjusted dynptr */
+ bpf_dynptr_adjust(&meta, chunk_len, bpf_dynptr_size(&meta));
+ bpf_dynptr_write(&meta, 0, src, chunk_len, 0);
+ src += chunk_len;
+
+ /* 3. Write at an offset */
+ bpf_dynptr_write(&meta, chunk_len, src, chunk_len, 0);
+ src += chunk_len;
+
+ /* 4. Write to a slice starting at an offset */
+ dst = bpf_dynptr_slice_rdwr(&meta, 2 * chunk_len, NULL, chunk_len);
+ if (!dst)
+ return TC_ACT_SHOT;
+ __builtin_memcpy(dst, src, chunk_len);
+
+ return TC_ACT_UNSPEC; /* pass */
+}
+
+/* Pass an OOB offset to dynptr read, write, adjust, slice. */
+SEC("tc")
+int ing_cls_dynptr_offset_oob(struct __sk_buff *ctx)
+{
+ struct bpf_dynptr meta;
+ __u8 md, *p;
+ int err;
+
+ err = bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ if (err)
+ goto fail;
+
+ /* read offset OOB */
+ err = bpf_dynptr_read(&md, sizeof(md), &meta, META_SIZE, 0);
+ if (err != -E2BIG)
+ goto fail;
+
+ /* write offset OOB */
+ err = bpf_dynptr_write(&meta, META_SIZE, &md, sizeof(md), 0);
+ if (err != -E2BIG)
+ goto fail;
+
+ /* adjust end offset OOB */
+ err = bpf_dynptr_adjust(&meta, 0, META_SIZE + 1);
+ if (err != -ERANGE)
+ goto fail;
+
+ /* adjust start offset OOB */
+ err = bpf_dynptr_adjust(&meta, META_SIZE + 1, META_SIZE + 1);
+ if (err != -ERANGE)
+ goto fail;
+
+ /* slice offset OOB */
+ p = bpf_dynptr_slice(&meta, META_SIZE, NULL, sizeof(*p));
+ if (p)
+ goto fail;
+
+ /* slice rdwr offset OOB */
+ p = bpf_dynptr_slice_rdwr(&meta, META_SIZE, NULL, sizeof(*p));
+ if (p)
+ goto fail;
+
+ return TC_ACT_UNSPEC;
+fail:
+ return TC_ACT_SHOT;
+}
+
+/* Reserve and clear space for metadata but don't populate it */
+SEC("xdp")
+int ing_xdp_zalloc_meta(struct xdp_md *ctx)
+{
+ struct ethhdr *eth = ctx_ptr(ctx, data);
+ __u8 *meta;
+ int ret;
+
+ /* Drop any non-test packets */
+ if (eth + 1 > ctx_ptr(ctx, data_end))
+ return XDP_DROP;
+ if (!check_smac(eth))
+ return XDP_DROP;
+
+ ret = bpf_xdp_adjust_meta(ctx, -META_SIZE);
+ if (ret < 0)
+ return XDP_DROP;
+
+ meta = ctx_ptr(ctx, data_meta);
+ if (meta + META_SIZE > ctx_ptr(ctx, data))
+ return XDP_DROP;
+
+ __builtin_memset(meta, 0, META_SIZE);
+
+ return XDP_PASS;
+}
+
SEC("xdp")
int ing_xdp(struct xdp_md *ctx)
{
@@ -64,13 +331,343 @@ int ing_xdp(struct xdp_md *ctx)
/* The Linux networking stack may send other packets on the test
* interface that interfere with the test. Just drop them.
- * The test packets can be recognized by their ethertype of zero.
+ * The test packets can be recognized by their source MAC address.
*/
- if (eth->h_proto != 0)
+ if (!check_smac(eth))
return XDP_DROP;
__builtin_memcpy(data_meta, payload, META_SIZE);
return XDP_PASS;
}
+/*
+ * Check that, when operating on a cloned packet, skb->data_meta..skb->data is
+ * kept intact if prog writes to packet _payload_ using packet pointers.
+ */
+SEC("tc")
+int clone_data_meta_survives_data_write(struct __sk_buff *ctx)
+{
+ __u8 *meta_have = ctx_ptr(ctx, data_meta);
+ struct ethhdr *eth = ctx_ptr(ctx, data);
+
+ if (eth + 1 > ctx_ptr(ctx, data_end))
+ goto out;
+ /* Ignore non-test packets */
+ if (!check_smac(eth))
+ goto out;
+
+ if (meta_have + META_SIZE > eth)
+ goto out;
+
+ if (!check_metadata(meta_have))
+ goto out;
+
+ /* Packet write to trigger unclone in prologue */
+ eth->h_proto = 42;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+/*
+ * Check that, when operating on a cloned packet, skb->data_meta..skb->data is
+ * kept intact if prog writes to packet _metadata_ using packet pointers.
+ */
+SEC("tc")
+int clone_data_meta_survives_meta_write(struct __sk_buff *ctx)
+{
+ __u8 *meta_have = ctx_ptr(ctx, data_meta);
+ struct ethhdr *eth = ctx_ptr(ctx, data);
+
+ if (eth + 1 > ctx_ptr(ctx, data_end))
+ goto out;
+ /* Ignore non-test packets */
+ if (!check_smac(eth))
+ goto out;
+
+ if (meta_have + META_SIZE > eth)
+ goto out;
+
+ if (!check_metadata(meta_have))
+ goto out;
+
+ /* Metadata write to trigger unclone in prologue */
+ *meta_have = 42;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+/*
+ * Check that, when operating on a cloned packet, metadata remains intact if
+ * prog creates a r/w slice to packet _payload_.
+ */
+SEC("tc")
+int clone_meta_dynptr_survives_data_slice_write(struct __sk_buff *ctx)
+{
+ struct bpf_dynptr data, meta;
+ __u8 meta_have[META_SIZE];
+ struct ethhdr *eth;
+
+ bpf_dynptr_from_skb(ctx, 0, &data);
+ eth = bpf_dynptr_slice_rdwr(&data, 0, NULL, sizeof(*eth));
+ if (!eth)
+ goto out;
+ /* Ignore non-test packets */
+ if (!check_smac(eth))
+ goto out;
+
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ bpf_dynptr_read(meta_have, META_SIZE, &meta, 0, 0);
+ if (!check_metadata(meta_have))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+/*
+ * Check that, when operating on a cloned packet, metadata remains intact if
+ * prog creates an r/w slice to packet _metadata_.
+ */
+SEC("tc")
+int clone_meta_dynptr_survives_meta_slice_write(struct __sk_buff *ctx)
+{
+ struct bpf_dynptr data, meta;
+ const struct ethhdr *eth;
+ __u8 *meta_have;
+
+ bpf_dynptr_from_skb(ctx, 0, &data);
+ eth = bpf_dynptr_slice(&data, 0, NULL, sizeof(*eth));
+ if (!eth)
+ goto out;
+ /* Ignore non-test packets */
+ if (!check_smac(eth))
+ goto out;
+
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ meta_have = bpf_dynptr_slice_rdwr(&meta, 0, NULL, META_SIZE);
+ if (!meta_have)
+ goto out;
+
+ if (!check_metadata(meta_have))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+/*
+ * Check that, when operating on a cloned packet, skb_meta dynptr is read-write
+ * before prog writes to packet _payload_ using dynptr_write helper and metadata
+ * remains intact before and after the write.
+ */
+SEC("tc")
+int clone_meta_dynptr_rw_before_data_dynptr_write(struct __sk_buff *ctx)
+{
+ struct bpf_dynptr data, meta;
+ __u8 meta_have[META_SIZE];
+ const struct ethhdr *eth;
+ int err;
+
+ bpf_dynptr_from_skb(ctx, 0, &data);
+ eth = bpf_dynptr_slice(&data, 0, NULL, sizeof(*eth));
+ if (!eth)
+ goto out;
+ /* Ignore non-test packets */
+ if (!check_smac(eth))
+ goto out;
+
+ /* Expect read-write metadata before unclone */
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ if (bpf_dynptr_is_rdonly(&meta))
+ goto out;
+
+ err = bpf_dynptr_read(meta_have, META_SIZE, &meta, 0, 0);
+ if (err || !check_metadata(meta_have))
+ goto out;
+
+ /* Helper write to payload will unclone the packet */
+ bpf_dynptr_write(&data, offsetof(struct ethhdr, h_proto), "x", 1, 0);
+
+ err = bpf_dynptr_read(meta_have, META_SIZE, &meta, 0, 0);
+ if (err || !check_metadata(meta_have))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+/*
+ * Check that, when operating on a cloned packet, skb_meta dynptr is read-write
+ * before prog writes to packet _metadata_ using dynptr_write helper and
+ * metadata remains intact before and after the write.
+ */
+SEC("tc")
+int clone_meta_dynptr_rw_before_meta_dynptr_write(struct __sk_buff *ctx)
+{
+ struct bpf_dynptr data, meta;
+ __u8 meta_have[META_SIZE];
+ const struct ethhdr *eth;
+ int err;
+
+ bpf_dynptr_from_skb(ctx, 0, &data);
+ eth = bpf_dynptr_slice(&data, 0, NULL, sizeof(*eth));
+ if (!eth)
+ goto out;
+ /* Ignore non-test packets */
+ if (!check_smac(eth))
+ goto out;
+
+ /* Expect read-write metadata before unclone */
+ bpf_dynptr_from_skb_meta(ctx, 0, &meta);
+ if (bpf_dynptr_is_rdonly(&meta))
+ goto out;
+
+ err = bpf_dynptr_read(meta_have, META_SIZE, &meta, 0, 0);
+ if (err || !check_metadata(meta_have))
+ goto out;
+
+ /* Helper write to metadata will unclone the packet */
+ bpf_dynptr_write(&meta, 0, &meta_have[0], 1, 0);
+
+ err = bpf_dynptr_read(meta_have, META_SIZE, &meta, 0, 0);
+ if (err || !check_metadata(meta_have))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+SEC("tc")
+int helper_skb_vlan_push_pop(struct __sk_buff *ctx)
+{
+ int err;
+
+ /* bpf_skb_vlan_push assumes HW offload for primary VLAN tag. Only
+ * secondary tag push triggers an actual MAC header modification.
+ */
+ err = bpf_skb_vlan_push(ctx, 0, 42);
+ if (err)
+ goto out;
+ err = bpf_skb_vlan_push(ctx, 0, 207);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ err = bpf_skb_vlan_pop(ctx);
+ if (err)
+ goto out;
+ err = bpf_skb_vlan_pop(ctx);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+SEC("tc")
+int helper_skb_adjust_room(struct __sk_buff *ctx)
+{
+ int err;
+
+ /* Grow a 1 byte hole after the MAC header */
+ err = bpf_skb_adjust_room(ctx, 1, BPF_ADJ_ROOM_MAC, 0);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ /* Shrink a 1 byte hole after the MAC header */
+ err = bpf_skb_adjust_room(ctx, -1, BPF_ADJ_ROOM_MAC, 0);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ /* Grow a 256 byte hole to trigger head reallocation */
+ err = bpf_skb_adjust_room(ctx, 256, BPF_ADJ_ROOM_MAC, 0);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+SEC("tc")
+int helper_skb_change_head_tail(struct __sk_buff *ctx)
+{
+ int err;
+
+ /* Reserve 1 extra in the front for packet data */
+ err = bpf_skb_change_head(ctx, 1, 0);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ /* Reserve 256 extra bytes in the front to trigger head reallocation */
+ err = bpf_skb_change_head(ctx, 256, 0);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ /* Reserve 4k extra bytes in the back to trigger head reallocation */
+ err = bpf_skb_change_tail(ctx, ctx->len + 4096, 0);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
+SEC("tc")
+int helper_skb_change_proto(struct __sk_buff *ctx)
+{
+ int err;
+
+ err = bpf_skb_change_proto(ctx, bpf_htons(ETH_P_IPV6), 0);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ err = bpf_skb_change_proto(ctx, bpf_htons(ETH_P_IP), 0);
+ if (err)
+ goto out;
+
+ if (!check_skb_metadata(ctx))
+ goto out;
+
+ test_pass = true;
+out:
+ return TC_ACT_SHOT;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_pull_data.c b/tools/testing/selftests/bpf/progs/test_xdp_pull_data.c
new file mode 100644
index 000000000000..c41a21413eaa
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_pull_data.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+int xdpf_sz;
+int sinfo_sz;
+int data_len;
+int pull_len;
+
+#define XDP_PACKET_HEADROOM 256
+
+SEC("xdp.frags")
+int xdp_find_sizes(struct xdp_md *ctx)
+{
+ xdpf_sz = sizeof(struct xdp_frame);
+ sinfo_sz = __PAGE_SIZE - XDP_PACKET_HEADROOM -
+ (ctx->data_end - ctx->data);
+
+ return XDP_PASS;
+}
+
+SEC("xdp.frags")
+int xdp_pull_data_prog(struct xdp_md *ctx)
+{
+ __u8 *data_end = (void *)(long)ctx->data_end;
+ __u8 *data = (void *)(long)ctx->data;
+ __u8 *val_p;
+ int err;
+
+ if (data_len != data_end - data)
+ return XDP_DROP;
+
+ err = bpf_xdp_pull_data(ctx, pull_len);
+ if (err)
+ return XDP_DROP;
+
+ val_p = (void *)(long)ctx->data + 1024;
+ if (val_p + 1 > (void *)(long)ctx->data_end)
+ return XDP_DROP;
+
+ if (*val_p != 0xbb)
+ return XDP_DROP;
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/timer_interrupt.c b/tools/testing/selftests/bpf/progs/timer_interrupt.c
new file mode 100644
index 000000000000..19180a455f40
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_interrupt.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define CLOCK_MONOTONIC 1
+
+int preempt_count;
+int in_interrupt;
+int in_interrupt_cb;
+
+struct elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} array SEC(".maps");
+
+static int timer_in_interrupt(void *map, int *key, struct bpf_timer *timer)
+{
+ preempt_count = get_preempt_count();
+ in_interrupt_cb = bpf_in_interrupt();
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test_timer_interrupt)
+{
+ struct bpf_timer *timer;
+ int key = 0;
+
+ timer = bpf_map_lookup_elem(&array, &key);
+ if (!timer)
+ return 0;
+
+ in_interrupt = bpf_in_interrupt();
+ bpf_timer_init(timer, &array, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(timer, timer_in_interrupt);
+ bpf_timer_start(timer, 0, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c
index d41665d2ec8c..65e485c4468c 100644
--- a/tools/testing/selftests/bpf/progs/tracing_failure.c
+++ b/tools/testing/selftests/bpf/progs/tracing_failure.c
@@ -18,3 +18,15 @@ int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock)
{
return 0;
}
+
+SEC("?fentry/__rcu_read_lock")
+int BPF_PROG(tracing_deny)
+{
+ return 0;
+}
+
+SEC("?fexit/do_exit")
+int BPF_PROG(fexit_noreturns)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c
index c435a3a8328a..d460732e2023 100644
--- a/tools/testing/selftests/bpf/progs/tracing_struct.c
+++ b/tools/testing/selftests/bpf/progs/tracing_struct.c
@@ -18,6 +18,18 @@ struct bpf_testmod_struct_arg_3 {
int b[];
};
+union bpf_testmod_union_arg_1 {
+ char a;
+ short b;
+ struct bpf_testmod_struct_arg_1 arg;
+};
+
+union bpf_testmod_union_arg_2 {
+ int a;
+ long b;
+ struct bpf_testmod_struct_arg_2 arg;
+};
+
long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs;
__u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3;
long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret;
@@ -26,6 +38,9 @@ long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret;
long t5_ret;
int t6;
+long ut1_a_a, ut1_b, ut1_c;
+long ut2_a, ut2_b_a, ut2_b_b;
+
SEC("fentry/bpf_testmod_test_struct_arg_1")
int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c)
{
@@ -130,4 +145,22 @@ int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a)
return 0;
}
+SEC("fexit/bpf_testmod_test_union_arg_1")
+int BPF_PROG2(test_union_arg_1, union bpf_testmod_union_arg_1, a, int, b, int, c)
+{
+ ut1_a_a = a.arg.a;
+ ut1_b = b;
+ ut1_c = c;
+ return 0;
+}
+
+SEC("fexit/bpf_testmod_test_union_arg_2")
+int BPF_PROG2(test_union_arg_2, int, a, union bpf_testmod_union_arg_2, b)
+{
+ ut2_a = a;
+ ut2_b_a = b.arg.a;
+ ut2_b_b = b.arg.b;
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 044a6d78923e..2898b3749d07 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -42,12 +42,14 @@ int bench_trigger_uprobe_multi(void *ctx)
const volatile int batch_iters = 0;
SEC("?raw_tp")
-int trigger_count(void *ctx)
+int trigger_kernel_count(void *ctx)
{
int i;
- for (i = 0; i < batch_iters; i++)
+ for (i = 0; i < batch_iters; i++) {
inc_counter();
+ bpf_get_numa_node_id();
+ }
return 0;
}
@@ -97,6 +99,12 @@ int bench_trigger_kprobe_multi(void *ctx)
return 0;
}
+SEC("?kprobe.multi/bpf_get_numa_node_id")
+int bench_kprobe_multi_empty(void *ctx)
+{
+ return 0;
+}
+
SEC("?kretprobe.multi/bpf_get_numa_node_id")
int bench_trigger_kretprobe_multi(void *ctx)
{
@@ -104,6 +112,12 @@ int bench_trigger_kretprobe_multi(void *ctx)
return 0;
}
+SEC("?kretprobe.multi/bpf_get_numa_node_id")
+int bench_kretprobe_multi_empty(void *ctx)
+{
+ return 0;
+}
+
SEC("?fentry/bpf_get_numa_node_id")
int bench_trigger_fentry(void *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall.c b/tools/testing/selftests/bpf/progs/uprobe_syscall.c
index 8a4fa6c7ef59..e08c31669e5a 100644
--- a/tools/testing/selftests/bpf/progs/uprobe_syscall.c
+++ b/tools/testing/selftests/bpf/progs/uprobe_syscall.c
@@ -7,8 +7,8 @@ struct pt_regs regs;
char _license[] SEC("license") = "GPL";
-SEC("uretprobe//proc/self/exe:uretprobe_regs_trigger")
-int uretprobe(struct pt_regs *ctx)
+SEC("uprobe")
+int probe(struct pt_regs *ctx)
{
__builtin_memcpy(&regs, ctx, sizeof(regs));
return 0;
diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
index 0d7f1a7db2e2..915d38591bf6 100644
--- a/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
+++ b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/usdt.bpf.h>
#include <string.h>
struct pt_regs regs;
@@ -8,10 +10,64 @@ struct pt_regs regs;
char _license[] SEC("license") = "GPL";
int executed = 0;
+int pid;
+
+SEC("uprobe")
+int BPF_UPROBE(test_uprobe)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ executed++;
+ return 0;
+}
+
+SEC("uretprobe")
+int BPF_URETPROBE(test_uretprobe)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ executed++;
+ return 0;
+}
+
+SEC("uprobe.multi")
+int test_uprobe_multi(struct pt_regs *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ executed++;
+ return 0;
+}
SEC("uretprobe.multi")
-int test(struct pt_regs *regs)
+int test_uretprobe_multi(struct pt_regs *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ executed++;
+ return 0;
+}
+
+SEC("uprobe.session")
+int test_uprobe_session(struct pt_regs *ctx)
{
- executed = 1;
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ executed++;
+ return 0;
+}
+
+SEC("usdt")
+int test_usdt(struct pt_regs *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ executed++;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/uretprobe_stack.c b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
index 9fdcf396b8f4..a2951e2f1711 100644
--- a/tools/testing/selftests/bpf/progs/uretprobe_stack.c
+++ b/tools/testing/selftests/bpf/progs/uretprobe_stack.c
@@ -26,8 +26,8 @@ int usdt_len;
SEC("uprobe//proc/self/exe:target_1")
int BPF_UPROBE(uprobe_1)
{
- /* target_1 is recursive wit depth of 2, so we capture two separate
- * stack traces, depending on which occurence it is
+ /* target_1 is recursive with depth of 2, so we capture two separate
+ * stack traces, depending on which occurrence it is
*/
static bool recur = false;
diff --git a/tools/testing/selftests/bpf/progs/verifier_and.c b/tools/testing/selftests/bpf/progs/verifier_and.c
index e97e518516b6..2b4fdca162be 100644
--- a/tools/testing/selftests/bpf/progs/verifier_and.c
+++ b/tools/testing/selftests/bpf/progs/verifier_and.c
@@ -85,8 +85,14 @@ l0_%=: r0 = r0; \
SEC("socket")
__description("check known subreg with unknown reg")
-__success __failure_unpriv __msg_unpriv("R1 !read_ok")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if w0 < 0x1 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R1 !read_ok'` */
+__xlated_unpriv("goto pc-1") /* `r1 = *(u32*)(r1 + 512)`, sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
__naked void known_subreg_with_unknown_reg(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c
index 67509c5d3982..7f4827eede3c 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena.c
@@ -3,6 +3,7 @@
#define BPF_NO_KFUNC_PROTOTYPES
#include <vmlinux.h>
+#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
@@ -114,6 +115,111 @@ int basic_alloc3(void *ctx)
return 0;
}
+SEC("syscall")
+__success __retval(0)
+int basic_reserve1(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *page;
+ int ret;
+
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (!page)
+ return 1;
+
+ page += __PAGE_SIZE;
+
+ /* Reserve the second page */
+ ret = bpf_arena_reserve_pages(&arena, page, 1);
+ if (ret)
+ return 2;
+
+ /* Try to explicitly allocate the reserved page. */
+ page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0);
+ if (page)
+ return 3;
+
+ /* Try to implicitly allocate the page (since there's only 2 of them). */
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ if (page)
+ return 4;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int basic_reserve2(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *page;
+ int ret;
+
+ page = arena_base(&arena);
+ ret = bpf_arena_reserve_pages(&arena, page, 1);
+ if (ret)
+ return 1;
+
+ page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0);
+ if ((u64)page)
+ return 2;
+#endif
+ return 0;
+}
+
+/* Reserve the same page twice, should return -EBUSY. */
+SEC("syscall")
+__success __retval(0)
+int reserve_twice(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *page;
+ int ret;
+
+ page = arena_base(&arena);
+
+ ret = bpf_arena_reserve_pages(&arena, page, 1);
+ if (ret)
+ return 1;
+
+ ret = bpf_arena_reserve_pages(&arena, page, 1);
+ if (ret != -EBUSY)
+ return 2;
+#endif
+ return 0;
+}
+
+/* Try to reserve past the end of the arena. */
+SEC("syscall")
+__success __retval(0)
+int reserve_invalid_region(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *page;
+ int ret;
+
+ /* Try a NULL pointer. */
+ ret = bpf_arena_reserve_pages(&arena, NULL, 3);
+ if (ret != -EINVAL)
+ return 1;
+
+ page = arena_base(&arena);
+
+ ret = bpf_arena_reserve_pages(&arena, page, 3);
+ if (ret != -EINVAL)
+ return 2;
+
+ ret = bpf_arena_reserve_pages(&arena, page, 4096);
+ if (ret != -EINVAL)
+ return 3;
+
+ ret = bpf_arena_reserve_pages(&arena, page, (1ULL << 32) - 1);
+ if (ret != -EINVAL)
+ return 4;
+#endif
+ return 0;
+}
+
SEC("iter.s/bpf_map")
__success __log_level(2)
int iter_maps1(struct bpf_iter__bpf_map *ctx)
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
index f94f30cf1bb8..f19e15400b3e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
@@ -67,6 +67,104 @@ int big_alloc1(void *ctx)
return 0;
}
+/* Try to access a reserved page. Behavior should be identical with accessing unallocated pages. */
+SEC("syscall")
+__success __retval(0)
+int access_reserved(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ volatile char __arena *page;
+ char __arena *base;
+ const size_t len = 4;
+ int ret, i;
+
+ /* Get a separate region of the arena. */
+ page = base = arena_base(&arena) + 16384 * PAGE_SIZE;
+
+ ret = bpf_arena_reserve_pages(&arena, base, len);
+ if (ret)
+ return 1;
+
+ /* Try to dirty reserved memory. */
+ for (i = 0; i < len && can_loop; i++)
+ *page = 0x5a;
+
+ for (i = 0; i < len && can_loop; i++) {
+ page = (volatile char __arena *)(base + i * PAGE_SIZE);
+
+ /*
+ * Error out in case either the write went through,
+ * or the address has random garbage.
+ */
+ if (*page == 0x5a)
+ return 2 + 2 * i;
+
+ if (*page)
+ return 2 + 2 * i + 1;
+ }
+#endif
+ return 0;
+}
+
+/* Try to allocate a region overlapping with a reservation. */
+SEC("syscall")
+__success __retval(0)
+int request_partially_reserved(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ volatile char __arena *page;
+ char __arena *base;
+ int ret;
+
+ /* Add an arbitrary page offset. */
+ page = base = arena_base(&arena) + 4096 * __PAGE_SIZE;
+
+ ret = bpf_arena_reserve_pages(&arena, base + 3 * __PAGE_SIZE, 4);
+ if (ret)
+ return 1;
+
+ page = bpf_arena_alloc_pages(&arena, base, 5, NUMA_NO_NODE, 0);
+ if ((u64)page != 0ULL)
+ return 2;
+#endif
+ return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int free_reserved(void *ctx)
+{
+#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+ char __arena *addr;
+ char __arena *page;
+ int ret;
+
+ /* Add an arbitrary page offset. */
+ addr = arena_base(&arena) + 32768 * __PAGE_SIZE;
+
+ page = bpf_arena_alloc_pages(&arena, addr, 2, NUMA_NO_NODE, 0);
+ if (!page)
+ return 1;
+
+ ret = bpf_arena_reserve_pages(&arena, addr + 2 * __PAGE_SIZE, 2);
+ if (ret)
+ return 2;
+
+ /*
+ * Reserved and allocated pages should be interchangeable for
+ * bpf_arena_free_pages(). Free a reserved and an allocated
+ * page with a single call.
+ */
+ bpf_arena_free_pages(&arena, addr + __PAGE_SIZE , 2);
+
+ /* The free call above should have succeeded, so this allocation should too. */
+ page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0);
+ if (!page)
+ return 3;
+#endif
+ return 0;
+}
+
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
#define PAGE_CNT 100
__u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */
@@ -142,6 +240,7 @@ int big_alloc2(void *ctx)
return 5;
bpf_arena_free_pages(&arena, (void __arena *)pg, 2);
page[i] = NULL;
+ barrier();
page[i + 1] = NULL;
cond_break;
}
diff --git a/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c b/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
new file mode 100644
index 000000000000..7efa9521105e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Timer tests */
+
+struct timer_elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct timer_elem);
+} timer_map SEC(".maps");
+
+static int timer_cb(void *map, int *key, struct bpf_timer *timer)
+{
+ u32 data;
+ /* Timer callbacks are never sleepable, even from non-sleepable programs */
+ bpf_copy_from_user(&data, sizeof(data), NULL);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+__failure __msg("helper call might sleep in a non-sleepable prog")
+int timer_non_sleepable_prog(void *ctx)
+{
+ struct timer_elem *val;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&timer_map, &key);
+ if (!val)
+ return 0;
+
+ bpf_timer_init(&val->t, &timer_map, 0);
+ bpf_timer_set_callback(&val->t, timer_cb);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("helper call might sleep in a non-sleepable prog")
+int timer_sleepable_prog(void *ctx)
+{
+ struct timer_elem *val;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&timer_map, &key);
+ if (!val)
+ return 0;
+
+ bpf_timer_init(&val->t, &timer_map, 0);
+ bpf_timer_set_callback(&val->t, timer_cb);
+ return 0;
+}
+
+/* Workqueue tests */
+
+struct wq_elem {
+ struct bpf_wq w;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct wq_elem);
+} wq_map SEC(".maps");
+
+static int wq_cb(void *map, int *key, void *value)
+{
+ u32 data;
+ /* Workqueue callbacks are always sleepable, even from non-sleepable programs */
+ bpf_copy_from_user(&data, sizeof(data), NULL);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+__success
+int wq_non_sleepable_prog(void *ctx)
+{
+ struct wq_elem *val;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&wq_map, &key);
+ if (!val)
+ return 0;
+
+ if (bpf_wq_init(&val->w, &wq_map, 0) != 0)
+ return 0;
+ if (bpf_wq_set_callback_impl(&val->w, wq_cb, 0, NULL) != 0)
+ return 0;
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__success
+int wq_sleepable_prog(void *ctx)
+{
+ struct wq_elem *val;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&wq_map, &key);
+ if (!val)
+ return 0;
+
+ if (bpf_wq_init(&val->w, &wq_map, 0) != 0)
+ return 0;
+ if (bpf_wq_set_callback_impl(&val->w, wq_cb, 0, NULL) != 0)
+ return 0;
+ return 0;
+}
+
+/* Task work tests */
+
+struct task_work_elem {
+ struct bpf_task_work tw;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct task_work_elem);
+} task_work_map SEC(".maps");
+
+static int task_work_cb(struct bpf_map *map, void *key, void *value)
+{
+ u32 data;
+ /* Task work callbacks are always sleepable, even from non-sleepable programs */
+ bpf_copy_from_user(&data, sizeof(data), NULL);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+__success
+int task_work_non_sleepable_prog(void *ctx)
+{
+ struct task_work_elem *val;
+ struct task_struct *task;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&task_work_map, &key);
+ if (!val)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+ if (!task)
+ return 0;
+
+ bpf_task_work_schedule_resume_impl(task, &val->tw, &task_work_map, task_work_cb, NULL);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__success
+int task_work_sleepable_prog(void *ctx)
+{
+ struct task_work_elem *val;
+ struct task_struct *task;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&task_work_map, &key);
+ if (!val)
+ return 0;
+
+ task = bpf_get_current_task_btf();
+ if (!task)
+ return 0;
+
+ bpf_task_work_schedule_resume_impl(task, &val->tw, &task_work_map, task_work_cb, NULL);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c
index 0eb33bb801b5..411a18437d7e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bounds.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c
@@ -2,6 +2,7 @@
/* Converted from tools/testing/selftests/bpf/verifier/bounds.c */
#include <linux/bpf.h>
+#include <../../../include/linux/filter.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
@@ -620,8 +621,14 @@ l1_%=: exit; \
SEC("socket")
__description("bounds check mixed 32bit and 64bit arithmetic. test1")
-__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("exit")
+#endif
__naked void _32bit_and_64bit_arithmetic_test1(void)
{
asm volatile (" \
@@ -643,8 +650,14 @@ l1_%=: exit; \
SEC("socket")
__description("bounds check mixed 32bit and 64bit arithmetic. test2")
-__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 invalid mem access 'scalar'` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("exit")
+#endif
__naked void _32bit_and_64bit_arithmetic_test2(void)
{
asm volatile (" \
@@ -691,9 +704,14 @@ l0_%=: r0 = 0; \
SEC("socket")
__description("bounds check for reg = 0, reg xor 1")
-__success __failure_unpriv
-__msg_unpriv("R0 min value is outside of the allowed memory range")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r1 != 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
__naked void reg_0_reg_xor_1(void)
{
asm volatile (" \
@@ -719,9 +737,14 @@ l1_%=: r0 = 0; \
SEC("socket")
__description("bounds check for reg32 = 0, reg32 xor 1")
-__success __failure_unpriv
-__msg_unpriv("R0 min value is outside of the allowed memory range")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if w1 != 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
__naked void reg32_0_reg32_xor_1(void)
{
asm volatile (" \
@@ -747,9 +770,14 @@ l1_%=: r0 = 0; \
SEC("socket")
__description("bounds check for reg = 2, reg xor 3")
-__success __failure_unpriv
-__msg_unpriv("R0 min value is outside of the allowed memory range")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r1 > 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
__naked void reg_2_reg_xor_3(void)
{
asm volatile (" \
@@ -829,9 +857,14 @@ l1_%=: r0 = 0; \
SEC("socket")
__description("bounds check for reg > 0, reg xor 3")
-__success __failure_unpriv
-__msg_unpriv("R0 min value is outside of the allowed memory range")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r1 >= 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
__naked void reg_0_reg_xor_3(void)
{
asm volatile (" \
@@ -858,9 +891,14 @@ l1_%=: r0 = 0; \
SEC("socket")
__description("bounds check for reg32 > 0, reg32 xor 3")
-__success __failure_unpriv
-__msg_unpriv("R0 min value is outside of the allowed memory range")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if w1 >= 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 min value is outside of the allowed memory range` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("r0 = 0")
+#endif
__naked void reg32_0_reg32_xor_3(void)
{
asm volatile (" \
@@ -888,7 +926,7 @@ l1_%=: r0 = 0; \
SEC("socket")
__description("bounds check for non const xor src dst")
__success __log_level(2)
-__msg("5: (af) r0 ^= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))")
+__msg("5: (af) r0 ^= r6 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))")
__naked void non_const_xor_src_dst(void)
{
asm volatile (" \
@@ -909,7 +947,7 @@ __naked void non_const_xor_src_dst(void)
SEC("socket")
__description("bounds check for non const or src dst")
__success __log_level(2)
-__msg("5: (4f) r0 |= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))")
+__msg("5: (4f) r0 |= r6 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=431,var_off=(0x0; 0x1af))")
__naked void non_const_or_src_dst(void)
{
asm volatile (" \
@@ -930,7 +968,7 @@ __naked void non_const_or_src_dst(void)
SEC("socket")
__description("bounds check for non const mul regs")
__success __log_level(2)
-__msg("5: (2f) r0 *= r6 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=3825,var_off=(0x0; 0xfff))")
+__msg("5: (2f) r0 *= r6 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=3825,var_off=(0x0; 0xfff))")
__naked void non_const_mul_regs(void)
{
asm volatile (" \
@@ -1028,7 +1066,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("bound check with JMP_JSLT for crossing 64-bit signed boundary")
__success __retval(0)
-__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
+__flag(BPF_F_TEST_REG_INVARIANTS)
__naked void crossing_64_bit_signed_boundary_2(void)
{
asm volatile (" \
@@ -1203,7 +1241,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("multiply mixed sign bounds. test 1")
__success __log_level(2)
-__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))")
+__msg("r6 *= r7 {{.*}}; R6=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))")
__naked void mult_mixed0_sign(void)
{
asm volatile (
@@ -1226,7 +1264,7 @@ __naked void mult_mixed0_sign(void)
SEC("tc")
__description("multiply mixed sign bounds. test 2")
__success __log_level(2)
-__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=smin32=-100,smax=smax32=200)")
+__msg("r6 *= r7 {{.*}}; R6=scalar(smin=smin32=-100,smax=smax32=200)")
__naked void mult_mixed1_sign(void)
{
asm volatile (
@@ -1249,7 +1287,7 @@ __naked void mult_mixed1_sign(void)
SEC("tc")
__description("multiply negative bounds")
__success __log_level(2)
-__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))")
+__msg("r6 *= r7 {{.*}}; R6=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))")
__naked void mult_sign_bounds(void)
{
asm volatile (
@@ -1273,7 +1311,7 @@ __naked void mult_sign_bounds(void)
SEC("tc")
__description("multiply bounds that don't cross signed boundary")
__success __log_level(2)
-__msg("r8 *= r6 {{.*}}; R6_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8_w=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))")
+__msg("r8 *= r6 {{.*}}; R6=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))")
__naked void mult_no_sign_crossing(void)
{
asm volatile (
@@ -1293,7 +1331,7 @@ __naked void mult_no_sign_crossing(void)
SEC("tc")
__description("multiplication overflow, result in unbounded reg. test 1")
__success __log_level(2)
-__msg("r6 *= r7 {{.*}}; R6_w=scalar()")
+__msg("r6 *= r7 {{.*}}; R6=scalar()")
__naked void mult_unsign_ovf(void)
{
asm volatile (
@@ -1315,7 +1353,7 @@ __naked void mult_unsign_ovf(void)
SEC("tc")
__description("multiplication overflow, result in unbounded reg. test 2")
__success __log_level(2)
-__msg("r6 *= r7 {{.*}}; R6_w=scalar()")
+__msg("r6 *= r7 {{.*}}; R6=scalar()")
__naked void mult_sign_ovf(void)
{
asm volatile (
@@ -1334,4 +1372,495 @@ __naked void mult_sign_ovf(void)
__imm(bpf_skb_store_bytes)
: __clobber_all);
}
+
+SEC("socket")
+__description("64-bit addition, all outcomes overflow")
+__success __log_level(2)
+__msg("5: (0f) r3 += r3 {{.*}} R3=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)")
+__retval(0)
+__naked void add64_full_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r4 = r0;"
+ "r3 = 0xa000000000000000 ll;"
+ "r3 |= r4;"
+ "r3 += r3;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit addition, partial overflow, result in unbounded reg")
+__success __log_level(2)
+__msg("4: (0f) r3 += r3 {{.*}} R3=scalar()")
+__retval(0)
+__naked void add64_partial_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r4 = r0;"
+ "r3 = 2;"
+ "r3 |= r4;"
+ "r3 += r3;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit addition overflow, all outcomes overflow")
+__success __log_level(2)
+__msg("4: (0c) w3 += w3 {{.*}} R3=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void add32_full_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "w4 = w0;"
+ "w3 = 0xa0000000;"
+ "w3 |= w4;"
+ "w3 += w3;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit addition, partial overflow, result in unbounded u32 bounds")
+__success __log_level(2)
+__msg("4: (0c) w3 += w3 {{.*}} R3=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void add32_partial_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "w4 = w0;"
+ "w3 = 2;"
+ "w3 |= w4;"
+ "w3 += w3;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit subtraction, all outcomes underflow")
+__success __log_level(2)
+__msg("6: (1f) r3 -= r1 {{.*}} R3=scalar(umin=1,umax=0x8000000000000000)")
+__retval(0)
+__naked void sub64_full_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r1 = r0;"
+ "r2 = 0x8000000000000000 ll;"
+ "r1 |= r2;"
+ "r3 = 0;"
+ "r3 -= r1;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit subtraction, partial overflow, result in unbounded reg")
+__success __log_level(2)
+__msg("3: (1f) r3 -= r2 {{.*}} R3=scalar()")
+__retval(0)
+__naked void sub64_partial_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r3 = r0;"
+ "r2 = 1;"
+ "r3 -= r2;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit subtraction overflow, all outcomes underflow")
+__success __log_level(2)
+__msg("5: (1c) w3 -= w1 {{.*}} R3=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void sub32_full_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "w1 = w0;"
+ "w2 = 0x80000000;"
+ "w1 |= w2;"
+ "w3 = 0;"
+ "w3 -= w1;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit subtraction, partial overflow, result in unbounded u32 bounds")
+__success __log_level(2)
+__msg("3: (1c) w3 -= w2 {{.*}} R3=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void sub32_partial_overflow(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "w3 = w0;"
+ "w2 = 1;"
+ "w3 -= w2;"
+ "r0 = 0;"
+ "exit"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("dead branch on jset, does not result in invariants violation error")
+__success __log_level(2)
+__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void jset_range_analysis(void)
+{
+ asm volatile (" \
+ call %[bpf_get_netns_cookie]; \
+ if r0 == 0 goto l0_%=; \
+ if r0 & 0xffffffff goto +0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_netns_cookie)
+ : __clobber_all);
+}
+
+/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges
+ * overlap on the negative side. At instruction 7, the ranges look as follows:
+ *
+ * 0 umin=0xfffffcf1 umax=0xff..ff6e U64_MAX
+ * | [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * |xxxxxxxxxx] [xxxxxxxxxxxx|
+ * 0 smax=0xeffffeee smin=-655 -1
+ *
+ * We should therefore deduce the following new bounds:
+ *
+ * 0 u64=[0xff..ffd71;0xff..ff6e] U64_MAX
+ * | [xxx] |
+ * |----------------------------|------------------------------|
+ * | [xxx] |
+ * 0 s64=[-655;-146] -1
+ *
+ * Without the deduction cross sign boundary, we end up with an invariant
+ * violation error.
+ */
+SEC("socket")
+__description("bounds deduction cross sign boundary, negative overlap")
+__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS)
+__msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))")
+__retval(0)
+__naked void bounds_deduct_negative_overlap(void)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ w3 = w0; \
+ w6 = (s8)w0; \
+ r0 = (s8)r0; \
+ if w6 >= 0xf0000000 goto l0_%=; \
+ r0 += r6; \
+ r6 += 400; \
+ r0 -= r6; \
+ if r3 < r0 goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* This test covers the bounds deduction on 64bits when the s64 and u64 ranges
+ * overlap on the positive side. At instruction 3, the ranges look as follows:
+ *
+ * 0 umin=0 umax=0xffffffffffffff00 U64_MAX
+ * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * |xxxxxxxx] [xxxxxxxx|
+ * 0 smax=127 smin=-128 -1
+ *
+ * We should therefore deduce the following new bounds:
+ *
+ * 0 u64=[0;127] U64_MAX
+ * [xxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * [xxxxxxxx] |
+ * 0 s64=[0;127] -1
+ *
+ * Without the deduction cross sign boundary, the program is rejected due to
+ * the frame pointer write.
+ */
+SEC("socket")
+__description("bounds deduction cross sign boundary, positive overlap")
+__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS)
+__msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))")
+__retval(0)
+__naked void bounds_deduct_positive_overlap(void)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ r0 = (s8)r0; \
+ r1 = 0xffffffffffffff00; \
+ if r0 > r1 goto l0_%=; \
+ if r0 < 128 goto l0_%=; \
+ r10 = 0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* This test is the same as above, but the s64 and u64 ranges overlap in two
+ * places. At instruction 3, the ranges look as follows:
+ *
+ * 0 umin=0 umax=0xffffffffffffff80 U64_MAX
+ * [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx] |
+ * |----------------------------|------------------------------|
+ * |xxxxxxxx] [xxxxxxxx|
+ * 0 smax=127 smin=-128 -1
+ *
+ * 0xffffffffffffff80 = (u64)-128. We therefore can't deduce anything new and
+ * the program should fail due to the frame pointer write.
+ */
+SEC("socket")
+__description("bounds deduction cross sign boundary, two overlaps")
+__failure __flag(BPF_F_TEST_REG_INVARIANTS)
+__msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)")
+__msg("frame pointer is read only")
+__naked void bounds_deduct_two_overlaps(void)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ r0 = (s8)r0; \
+ r1 = 0xffffffffffffff80; \
+ if r0 > r1 goto l0_%=; \
+ if r0 < 128 goto l0_%=; \
+ r10 = 0; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("dead jne branch due to disagreeing tnums")
+__success __log_level(2)
+__naked void jne_disagreeing_tnums(void *ctx)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ w0 = w0; \
+ r0 >>= 30; \
+ r0 <<= 30; \
+ r1 = r0; \
+ r1 += 1024; \
+ if r1 != r0 goto +1; \
+ r10 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("dead jeq branch due to disagreeing tnums")
+__success __log_level(2)
+__naked void jeq_disagreeing_tnums(void *ctx)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ w0 = w0; \
+ r0 >>= 30; \
+ r0 <<= 30; \
+ r1 = r0; \
+ r1 += 1024; \
+ if r1 == r0 goto +1; \
+ exit; \
+ r10 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("conditional jump on same register, branch taken")
+__not_msg("20: (b7) r0 = 1 {{.*}} R0=1")
+__success __log_level(2)
+__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void condition_jump_on_same_register(void *ctx)
+{
+ asm volatile(" \
+ call %[bpf_get_prandom_u32]; \
+ w8 = 0x80000000; \
+ r0 &= r8; \
+ if r0 == r0 goto +1; \
+ goto l1_%=; \
+ if r0 >= r0 goto +1; \
+ goto l1_%=; \
+ if r0 s>= r0 goto +1; \
+ goto l1_%=; \
+ if r0 <= r0 goto +1; \
+ goto l1_%=; \
+ if r0 s<= r0 goto +1; \
+ goto l1_%=; \
+ if r0 != r0 goto l1_%=; \
+ if r0 > r0 goto l1_%=; \
+ if r0 s> r0 goto l1_%=; \
+ if r0 < r0 goto l1_%=; \
+ if r0 s< r0 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+l1_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("jset on same register, constant value branch taken")
+__not_msg("7: (b7) r0 = 1 {{.*}} R0=1")
+__success __log_level(2)
+__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void jset_on_same_register_1(void *ctx)
+{
+ asm volatile(" \
+ r0 = 0; \
+ if r0 & r0 goto l1_%=; \
+ r0 = 1; \
+ if r0 & r0 goto +1; \
+ goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+l1_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("jset on same register, scalar value branch taken")
+__not_msg("12: (b7) r0 = 1 {{.*}} R0=1")
+__success __log_level(2)
+__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void jset_on_same_register_2(void *ctx)
+{
+ asm volatile(" \
+ /* range [1;2] */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x1; \
+ r0 += 1; \
+ if r0 & r0 goto +1; \
+ goto l1_%=; \
+ /* range [-2;-1] */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x1; \
+ r0 -= 2; \
+ if r0 & r0 goto +1; \
+ goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+l1_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("jset on same register, scalar value unknown branch 1")
+__msg("3: (b7) r0 = 0 {{.*}} R0=0")
+__msg("5: (b7) r0 = 1 {{.*}} R0=1")
+__success __log_level(2)
+__flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void jset_on_same_register_3(void *ctx)
+{
+ asm volatile(" \
+ /* range [0;1] */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x1; \
+ if r0 & r0 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+l1_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("jset on same register, scalar value unknown branch 2")
+__msg("4: (b7) r0 = 0 {{.*}} R0=0")
+__msg("6: (b7) r0 = 1 {{.*}} R0=1")
+__success __log_level(2)
+__flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void jset_on_same_register_4(void *ctx)
+{
+ asm volatile(" \
+ /* range [-1;0] */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x1; \
+ r0 -= 1; \
+ if r0 & r0 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+l1_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("jset on same register, scalar value unknown branch 3")
+__msg("4: (b7) r0 = 0 {{.*}} R0=0")
+__msg("6: (b7) r0 = 1 {{.*}} R0=1")
+__success __log_level(2)
+__flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void jset_on_same_register_5(void *ctx)
+{
+ asm volatile(" \
+ /* range [-1;1] */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x2; \
+ r0 -= 1; \
+ if r0 & r0 goto l1_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+l1_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c
index c506afbdd936..260a6df264e3 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c
@@ -159,13 +159,16 @@ __failure_unpriv
__naked void deducing_bounds_from_const_10(void)
{
asm volatile (" \
+ r6 = r1; \
r0 = 0; \
if r0 s<= 0 goto l0_%=; \
-l0_%=: /* Marks reg as unknown. */ \
- r0 = -r0; \
- r0 -= r1; \
+l0_%=: /* Marks r0 as unknown. */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 -= r6; \
exit; \
-" ::: __clobber_all);
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
index c258b0722e04..fb4fa465d67c 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -660,19 +660,24 @@ __naked void may_goto_interaction_x86_64(void)
SEC("raw_tp")
__arch_arm64
-__log_level(4) __msg("stack depth 16")
-/* may_goto counter at -16 */
-__xlated("0: *(u64 *)(r10 -16) =")
-__xlated("1: r1 = 1")
-__xlated("2: call bpf_get_smp_processor_id")
+__log_level(4) __msg("stack depth 24")
+/* may_goto counter at -24 */
+__xlated("0: *(u64 *)(r10 -24) =")
+/* may_goto timestamp at -16 */
+__xlated("1: *(u64 *)(r10 -16) =")
+__xlated("2: r1 = 1")
+__xlated("3: call bpf_get_smp_processor_id")
/* may_goto expansion starts */
-__xlated("3: r11 = *(u64 *)(r10 -16)")
-__xlated("4: if r11 == 0x0 goto pc+3")
-__xlated("5: r11 -= 1")
-__xlated("6: *(u64 *)(r10 -16) = r11")
+__xlated("4: r11 = *(u64 *)(r10 -24)")
+__xlated("5: if r11 == 0x0 goto pc+6")
+__xlated("6: r11 -= 1")
+__xlated("7: if r11 != 0x0 goto pc+2")
+__xlated("8: r11 = -24")
+__xlated("9: call unknown")
+__xlated("10: *(u64 *)(r10 -24) = r11")
/* may_goto expansion ends */
-__xlated("7: *(u64 *)(r10 -8) = r1")
-__xlated("8: exit")
+__xlated("11: *(u64 *)(r10 -8) = r1")
+__xlated("12: exit")
__success
__naked void may_goto_interaction_arm64(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c
index a83809a1dbbf..5ebf7d9bcc55 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/ctx.c */
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
@@ -218,4 +218,78 @@ __naked void null_check_8_null_bind(void)
: __clobber_all);
}
+#define narrow_load(type, ctx, field) \
+ SEC(type) \
+ __description("narrow load on field " #field " of " #ctx) \
+ __failure __msg("invalid bpf_context access") \
+ __naked void invalid_narrow_load##ctx##field(void) \
+ { \
+ asm volatile (" \
+ r1 = *(u32 *)(r1 + %[off]); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(off, offsetof(struct ctx, field) + 4) \
+ : __clobber_all); \
+ }
+
+narrow_load("cgroup/getsockopt", bpf_sockopt, sk);
+narrow_load("cgroup/getsockopt", bpf_sockopt, optval);
+narrow_load("cgroup/getsockopt", bpf_sockopt, optval_end);
+narrow_load("tc", __sk_buff, sk);
+narrow_load("cgroup/bind4", bpf_sock_addr, sk);
+narrow_load("sockops", bpf_sock_ops, sk);
+narrow_load("sockops", bpf_sock_ops, skb_data);
+narrow_load("sockops", bpf_sock_ops, skb_data_end);
+narrow_load("sockops", bpf_sock_ops, skb_hwtstamp);
+
+#define unaligned_access(type, ctx, field) \
+ SEC(type) \
+ __description("unaligned access on field " #field " of " #ctx) \
+ __failure __msg("invalid bpf_context access") \
+ __naked void unaligned_ctx_access_##ctx##field(void) \
+ { \
+ asm volatile (" \
+ r1 = *(u%[size] *)(r1 + %[off]); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(size, sizeof_field(struct ctx, field) * 8), \
+ __imm_const(off, offsetof(struct ctx, field) + 1) \
+ : __clobber_all); \
+ }
+
+unaligned_access("flow_dissector", __sk_buff, data);
+unaligned_access("netfilter", bpf_nf_ctx, skb);
+
+#define padding_access(type, ctx, prev_field, sz) \
+ SEC(type) \
+ __description("access on " #ctx " padding after " #prev_field) \
+ __naked void padding_ctx_access_##ctx(void) \
+ { \
+ asm volatile (" \
+ r1 = *(u%[size] *)(r1 + %[off]); \
+ r0 = 0; \
+ exit;" \
+ : \
+ : __imm_const(size, sz * 8), \
+ __imm_const(off, offsetofend(struct ctx, prev_field)) \
+ : __clobber_all); \
+ }
+
+__failure __msg("invalid bpf_context access")
+padding_access("cgroup/bind4", bpf_sock_addr, msg_src_ip6[3], 4);
+
+__success
+padding_access("sk_lookup", bpf_sk_lookup, remote_port, 2);
+
+__failure __msg("invalid bpf_context access")
+padding_access("tc", __sk_buff, tstamp_type, 2);
+
+__failure __msg("invalid bpf_context access")
+padding_access("cgroup/post_bind4", bpf_sock, dst_port, 2);
+
+__failure __msg("invalid bpf_context access")
+padding_access("sk_reuseport", sk_reuseport_md, hash, 4);
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
index 28b602ac9cbe..911caa8fd1b7 100644
--- a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */
+#include <linux/if_ether.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
@@ -800,4 +801,62 @@ l0_%=: /* exit(0) */ \
: __clobber_all);
}
+#define access_test_non_linear(name, type, desc, retval, linear_sz, off) \
+ SEC(type) \
+ __description("direct packet access: " #name " (non-linear, " type ", " desc ")") \
+ __success __retval(retval) \
+ __linear_size(linear_sz) \
+ __naked void access_non_linear_##name(void) \
+ { \
+ asm volatile (" \
+ r2 = *(u32*)(r1 + %[skb_data]); \
+ r3 = *(u32*)(r1 + %[skb_data_end]); \
+ r0 = r2; \
+ r0 += %[offset]; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r0 - 1); \
+ r0 = 0; \
+ exit; \
+ l0_%=: r0 = 1; \
+ exit; \
+ " : \
+ : __imm_const(skb_data, offsetof(struct __sk_buff, data)), \
+ __imm_const(skb_data_end, offsetof(struct __sk_buff, data_end)), \
+ __imm_const(offset, off) \
+ : __clobber_all); \
+ }
+
+access_test_non_linear(test31, "tc", "too short eth", 1, ETH_HLEN, 22);
+access_test_non_linear(test32, "tc", "too short 1", 1, 1, 22);
+access_test_non_linear(test33, "tc", "long enough", 0, 22, 22);
+access_test_non_linear(test34, "cgroup_skb/ingress", "too short eth", 1, ETH_HLEN, 8);
+access_test_non_linear(test35, "cgroup_skb/ingress", "too short 1", 1, 1, 8);
+access_test_non_linear(test36, "cgroup_skb/ingress", "long enough", 0, 22, 8);
+
+SEC("tc")
+__description("direct packet access: test37 (non-linear, linearized)")
+__success __retval(0)
+__linear_size(ETH_HLEN)
+__naked void access_non_linear_linearized(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ r2 = 22; \
+ call %[bpf_skb_pull_data]; \
+ r2 = *(u32*)(r6 + %[skb_data]); \
+ r3 = *(u32*)(r6 + %[skb_data_end]); \
+ r0 = r2; \
+ r0 += 22; \
+ if r0 > r3 goto l0_%=; \
+ r0 = *(u8*)(r0 - 1); \
+ exit; \
+l0_%=: r0 = 1; \
+ exit; \
+" :
+ : __imm(bpf_skb_pull_data),
+ __imm_const(skb_data, offsetof(struct __sk_buff, data)),
+ __imm_const(skb_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c
index 458984da804c..34e0c012ee76 100644
--- a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c
+++ b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c
@@ -77,7 +77,7 @@ l0_%=: exit; \
SEC("tc")
__description("MOD32 overflow, check 1")
-__success __retval(INT_MIN)
+__success __retval(_INT_MIN)
__naked void mod32_overflow_check_1(void)
{
asm volatile (" \
@@ -92,7 +92,7 @@ __naked void mod32_overflow_check_1(void)
SEC("tc")
__description("MOD32 overflow, check 2")
-__success __retval(INT_MIN)
+__success __retval(_INT_MIN)
__naked void mod32_overflow_check_2(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
index 4ab0ef18d7eb..1204fbc58178 100644
--- a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
+++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
@@ -179,4 +179,132 @@ int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags)
return subprog_trusted_acq_rel(task);
}
+__weak int subprog_untrusted_bad_tags(struct task_struct *task __arg_untrusted __arg_nullable)
+{
+ return task->pid;
+}
+
+SEC("tp_btf/sys_enter")
+__failure
+__msg("arg#0 untrusted cannot be combined with any other tags")
+int untrusted_bad_tags(void *ctx)
+{
+ return subprog_untrusted_bad_tags(0);
+}
+
+struct local_type_wont_be_accepted {};
+
+__weak int subprog_untrusted_bad_type(struct local_type_wont_be_accepted *p __arg_untrusted)
+{
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+__failure
+__msg("arg#0 reference type('STRUCT local_type_wont_be_accepted') has no matches")
+int untrusted_bad_type(void *ctx)
+{
+ return subprog_untrusted_bad_type(bpf_rdonly_cast(0, 0));
+}
+
+__weak int subprog_untrusted(const volatile struct task_struct *restrict task __arg_untrusted)
+{
+ return task->pid;
+}
+
+SEC("tp_btf/sys_enter")
+__success
+__log_level(2)
+__msg("r1 = {{.*}}; {{.*}}R1=trusted_ptr_task_struct()")
+__msg("Func#1 ('subprog_untrusted') is global and assumed valid.")
+__msg("Validating subprog_untrusted() func#1...")
+__msg(": R1=untrusted_ptr_task_struct")
+int trusted_to_untrusted(void *ctx)
+{
+ return subprog_untrusted(bpf_get_current_task_btf());
+}
+
+char mem[16];
+u32 offset;
+
+SEC("tp_btf/sys_enter")
+__success
+int anything_to_untrusted(void *ctx)
+{
+ /* untrusted to untrusted */
+ subprog_untrusted(bpf_core_cast(0, struct task_struct));
+ /* wrong type to untrusted */
+ subprog_untrusted((void *)bpf_core_cast(0, struct bpf_verifier_env));
+ /* map value to untrusted */
+ subprog_untrusted((void *)mem);
+ /* scalar to untrusted */
+ subprog_untrusted(0);
+ /* variable offset to untrusted (map) */
+ subprog_untrusted((void *)mem + offset);
+ /* variable offset to untrusted (trusted) */
+ subprog_untrusted((void *)bpf_get_current_task_btf() + offset);
+ return 0;
+}
+
+__weak int subprog_untrusted2(struct task_struct *task __arg_untrusted)
+{
+ return subprog_trusted_task_nullable(task);
+}
+
+SEC("tp_btf/sys_enter")
+__failure
+__msg("R1 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_")
+__msg("Caller passes invalid args into func#{{.*}} ('subprog_trusted_task_nullable')")
+int untrusted_to_trusted(void *ctx)
+{
+ return subprog_untrusted2(bpf_get_current_task_btf());
+}
+
+__weak int subprog_void_untrusted(void *p __arg_untrusted)
+{
+ return *(int *)p;
+}
+
+__weak int subprog_char_untrusted(char *p __arg_untrusted)
+{
+ return *(int *)p;
+}
+
+__weak int subprog_enum_untrusted(enum bpf_attach_type *p __arg_untrusted)
+{
+ return *(int *)p;
+}
+
+SEC("tp_btf/sys_enter")
+__success
+__log_level(2)
+__msg("r1 = {{.*}}; {{.*}}R1=trusted_ptr_task_struct()")
+__msg("Func#1 ('subprog_void_untrusted') is global and assumed valid.")
+__msg("Validating subprog_void_untrusted() func#1...")
+__msg(": R1=rdonly_untrusted_mem(sz=0)")
+int trusted_to_untrusted_mem(void *ctx)
+{
+ return subprog_void_untrusted(bpf_get_current_task_btf());
+}
+
+SEC("tp_btf/sys_enter")
+__success
+int anything_to_untrusted_mem(void *ctx)
+{
+ /* untrusted to untrusted mem */
+ subprog_void_untrusted(bpf_core_cast(0, struct task_struct));
+ /* map value to untrusted mem */
+ subprog_void_untrusted(mem);
+ /* scalar to untrusted mem */
+ subprog_void_untrusted(0);
+ /* variable offset to untrusted mem (map) */
+ subprog_void_untrusted((void *)mem + offset);
+ /* variable offset to untrusted mem (trusted) */
+ subprog_void_untrusted(bpf_get_current_task_btf() + offset);
+ /* variable offset to untrusted char/enum (map) */
+ subprog_char_untrusted(mem + offset);
+ subprog_enum_untrusted((void *)mem + offset);
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_gotox.c b/tools/testing/selftests/bpf/progs/verifier_gotox.c
new file mode 100644
index 000000000000..607dad058ca1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_gotox.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Isovalent */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../../../include/linux/filter.h"
+
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
+
+#define DEFINE_SIMPLE_JUMP_TABLE_PROG(NAME, SRC_REG, OFF, IMM, OUTCOME) \
+ \
+ SEC("socket") \
+ OUTCOME \
+ __naked void jump_table_ ## NAME(void) \
+ { \
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+ jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 8; \
+ r0 = *(u64 *)(r0 + 0); \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+ " : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, (SRC_REG), (OFF) , (IMM))) \
+ : __clobber_all); \
+ }
+
+/*
+ * The first program which doesn't use reserved fields
+ * loads and works properly. The rest fail to load.
+ */
+DEFINE_SIMPLE_JUMP_TABLE_PROG(ok, BPF_REG_0, 0, 0, __success __retval(1))
+DEFINE_SIMPLE_JUMP_TABLE_PROG(reserved_field_src_reg, BPF_REG_1, 0, 0, __failure __msg("BPF_JA|BPF_X uses reserved fields"))
+DEFINE_SIMPLE_JUMP_TABLE_PROG(reserved_field_non_zero_off, BPF_REG_0, 1, 0, __failure __msg("BPF_JA|BPF_X uses reserved fields"))
+DEFINE_SIMPLE_JUMP_TABLE_PROG(reserved_field_non_zero_imm, BPF_REG_0, 0, 1, __failure __msg("BPF_JA|BPF_X uses reserved fields"))
+
+/*
+ * Gotox is forbidden when there is no jump table loaded
+ * which points to the sub-function where the gotox is used
+ */
+SEC("socket")
+__failure __msg("no jump tables found for subprog starting at 0")
+__naked void jump_table_no_jump_table(void)
+{
+ asm volatile (" \
+ .8byte %[gotox_r0]; \
+ r0 = 1; \
+ exit; \
+" : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0))
+ : __clobber_all);
+}
+
+/*
+ * Incorrect type of the target register, only PTR_TO_INSN allowed
+ */
+SEC("socket")
+__failure __msg("R1 has type scalar, expected PTR_TO_INSN")
+__naked void jump_table_incorrect_dst_reg_type(void)
+{
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 8; \
+ r0 = *(u64 *)(r0 + 0); \
+ r1 = 42; \
+ .8byte %[gotox_r1]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+" : \
+ : __imm_insn(gotox_r1, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_1, 0, 0 , 0))
+ : __clobber_all);
+}
+
+#define DEFINE_INVALID_SIZE_PROG(READ_SIZE, OUTCOME) \
+ \
+ SEC("socket") \
+ OUTCOME \
+ __naked void jump_table_invalid_read_size_ ## READ_SIZE(void) \
+ { \
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+ jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 8; \
+ r0 = *(" #READ_SIZE " *)(r0 + 0); \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+ " : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0)) \
+ : __clobber_all); \
+ }
+
+DEFINE_INVALID_SIZE_PROG(u32, __failure __msg("Invalid read of 4 bytes from insn_array"))
+DEFINE_INVALID_SIZE_PROG(u16, __failure __msg("Invalid read of 2 bytes from insn_array"))
+DEFINE_INVALID_SIZE_PROG(u8, __failure __msg("Invalid read of 1 bytes from insn_array"))
+
+SEC("socket")
+__failure __msg("misaligned value access off 0+1+0 size 8")
+__naked void jump_table_misaligned_access(void)
+{
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 1; \
+ r0 = *(u64 *)(r0 + 0); \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+" : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __msg("invalid access to map value, value_size=16 off=24 size=8")
+__naked void jump_table_invalid_mem_acceess_pos(void)
+{
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 24; \
+ r0 = *(u64 *)(r0 + 0); \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+" : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __msg("invalid access to map value, value_size=16 off=-24 size=8")
+__naked void jump_table_invalid_mem_acceess_neg(void)
+{
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 -= 24; \
+ r0 = *(u64 *)(r0 + 0); \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+" : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(1)
+__naked void jump_table_add_sub_ok(void)
+{
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 -= 24; \
+ r0 += 32; \
+ r0 = *(u64 *)(r0 + 0); \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+" : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __msg("write into map forbidden, value_size=16 off=8 size=8")
+__naked void jump_table_no_writes(void)
+{
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 8; \
+ r1 = 0xbeef; \
+ *(u64 *)(r0 + 0) = r1; \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+" : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0))
+ : __clobber_all);
+}
+
+#define DEFINE_JUMP_TABLE_USE_REG(REG) \
+ SEC("socket") \
+ __success __retval(1) \
+ __naked void jump_table_use_reg_r ## REG(void) \
+ { \
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+ jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 16; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 8; \
+ r" #REG " = *(u64 *)(r0 + 0); \
+ .8byte %[gotox_rX]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+ " : \
+ : __imm_insn(gotox_rX, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_ ## REG, 0, 0 , 0)) \
+ : __clobber_all); \
+ }
+
+DEFINE_JUMP_TABLE_USE_REG(0)
+DEFINE_JUMP_TABLE_USE_REG(1)
+DEFINE_JUMP_TABLE_USE_REG(2)
+DEFINE_JUMP_TABLE_USE_REG(3)
+DEFINE_JUMP_TABLE_USE_REG(4)
+DEFINE_JUMP_TABLE_USE_REG(5)
+DEFINE_JUMP_TABLE_USE_REG(6)
+DEFINE_JUMP_TABLE_USE_REG(7)
+DEFINE_JUMP_TABLE_USE_REG(8)
+DEFINE_JUMP_TABLE_USE_REG(9)
+
+__used static int test_subprog(void)
+{
+ return 0;
+}
+
+SEC("socket")
+__failure __msg("jump table for insn 4 points outside of the subprog [0,10]")
+__naked void jump_table_outside_subprog(void)
+{
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .quad ret_out_%= - socket; \
+ .size jt0_%=, 24; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 8; \
+ r0 = *(u64 *)(r0 + 0); \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ call test_subprog; \
+ exit; \
+ ret_out_%=: \
+" : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0))
+ : __clobber_all);
+}
+
+SEC("socket")
+__success __retval(1)
+__naked void jump_table_contains_non_unique_values(void)
+{
+ asm volatile (" \
+ .pushsection .jumptables,\"\",@progbits; \
+jt0_%=: \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .quad ret0_%= - socket; \
+ .quad ret1_%= - socket; \
+ .size jt0_%=, 80; \
+ .global jt0_%=; \
+ .popsection; \
+ \
+ r0 = jt0_%= ll; \
+ r0 += 8; \
+ r0 = *(u64 *)(r0 + 0); \
+ .8byte %[gotox_r0]; \
+ ret0_%=: \
+ r0 = 0; \
+ exit; \
+ ret1_%=: \
+ r0 = 1; \
+ exit; \
+" : \
+ : __imm_insn(gotox_r0, BPF_RAW_INSN(BPF_JMP | BPF_JA | BPF_X, BPF_REG_0, 0, 0 , 0))
+ : __clobber_all);
+}
+
+#endif /* __TARGET_ARCH_x86 || __TARGET_ARCH_arm64 */
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
index 52edee41caf6..c8494b682c31 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
@@ -3,6 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "bpf_arena_common.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
@@ -10,6 +11,12 @@
defined(__TARGET_ARCH_loongarch)) && \
__clang_major__ >= 18
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 1);
+} arena SEC(".maps");
+
SEC("socket")
__description("LDSX, S8")
__success __success_unpriv __retval(-2)
@@ -65,7 +72,7 @@ __naked void ldsx_s32(void)
SEC("socket")
__description("LDSX, S8 range checking, privileged")
__log_level(2) __success __retval(1)
-__msg("R1_w=scalar(smin=smin32=-128,smax=smax32=127)")
+__msg("R1=scalar(smin=smin32=-128,smax=smax32=127)")
__naked void ldsx_s8_range_priv(void)
{
asm volatile (
@@ -256,6 +263,175 @@ __naked void ldsx_ctx_8(void)
: __clobber_all);
}
+SEC("syscall")
+__description("Arena LDSX Disasm")
+__success
+__arch_x86_64
+__jited("movslq 0x10(%rax,%r12), %r14")
+__jited("movswq 0x18(%rax,%r12), %r14")
+__jited("movsbq 0x20(%rax,%r12), %r14")
+__jited("movslq 0x10(%rdi,%r12), %r15")
+__jited("movswq 0x18(%rdi,%r12), %r15")
+__jited("movsbq 0x20(%rdi,%r12), %r15")
+__arch_arm64
+__jited("add x11, x7, x28")
+__jited("ldrsw x21, [x11, #0x10]")
+__jited("add x11, x7, x28")
+__jited("ldrsh x21, [x11, #0x18]")
+__jited("add x11, x7, x28")
+__jited("ldrsb x21, [x11, #0x20]")
+__jited("add x11, x0, x28")
+__jited("ldrsw x22, [x11, #0x10]")
+__jited("add x11, x0, x28")
+__jited("ldrsh x22, [x11, #0x18]")
+__jited("add x11, x0, x28")
+__jited("ldrsb x22, [x11, #0x20]")
+__naked void arena_ldsx_disasm(void *ctx)
+{
+ asm volatile (
+ "r1 = %[arena] ll;"
+ "r2 = 0;"
+ "r3 = 1;"
+ "r4 = %[numa_no_node];"
+ "r5 = 0;"
+ "call %[bpf_arena_alloc_pages];"
+ "r0 = addr_space_cast(r0, 0x0, 0x1);"
+ "r1 = r0;"
+ "r8 = *(s32 *)(r0 + 16);"
+ "r8 = *(s16 *)(r0 + 24);"
+ "r8 = *(s8 *)(r0 + 32);"
+ "r9 = *(s32 *)(r1 + 16);"
+ "r9 = *(s16 *)(r1 + 24);"
+ "r9 = *(s8 *)(r1 + 32);"
+ "r0 = 0;"
+ "exit;"
+ :: __imm(bpf_arena_alloc_pages),
+ __imm_addr(arena),
+ __imm_const(numa_no_node, NUMA_NO_NODE)
+ : __clobber_all
+ );
+}
+
+SEC("syscall")
+__description("Arena LDSX Exception")
+__success __retval(0)
+__arch_x86_64
+__arch_arm64
+__naked void arena_ldsx_exception(void *ctx)
+{
+ asm volatile (
+ "r1 = %[arena] ll;"
+ "r0 = 0xdeadbeef;"
+ "r0 = addr_space_cast(r0, 0x0, 0x1);"
+ "r1 = 0x3fe;"
+ "*(u64 *)(r0 + 0) = r1;"
+ "r0 = *(s8 *)(r0 + 0);"
+ "exit;"
+ :
+ : __imm_addr(arena)
+ : __clobber_all
+ );
+}
+
+SEC("syscall")
+__description("Arena LDSX, S8")
+__success __retval(-1)
+__arch_x86_64
+__arch_arm64
+__naked void arena_ldsx_s8(void *ctx)
+{
+ asm volatile (
+ "r1 = %[arena] ll;"
+ "r2 = 0;"
+ "r3 = 1;"
+ "r4 = %[numa_no_node];"
+ "r5 = 0;"
+ "call %[bpf_arena_alloc_pages];"
+ "r0 = addr_space_cast(r0, 0x0, 0x1);"
+ "r1 = 0x3fe;"
+ "*(u64 *)(r0 + 0) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s8 *)(r0 + 0);"
+#else
+ "r0 = *(s8 *)(r0 + 7);"
+#endif
+ "r0 >>= 1;"
+ "exit;"
+ :: __imm(bpf_arena_alloc_pages),
+ __imm_addr(arena),
+ __imm_const(numa_no_node, NUMA_NO_NODE)
+ : __clobber_all
+ );
+}
+
+SEC("syscall")
+__description("Arena LDSX, S16")
+__success __retval(-1)
+__arch_x86_64
+__arch_arm64
+__naked void arena_ldsx_s16(void *ctx)
+{
+ asm volatile (
+ "r1 = %[arena] ll;"
+ "r2 = 0;"
+ "r3 = 1;"
+ "r4 = %[numa_no_node];"
+ "r5 = 0;"
+ "call %[bpf_arena_alloc_pages];"
+ "r0 = addr_space_cast(r0, 0x0, 0x1);"
+ "r1 = 0x3fffe;"
+ "*(u64 *)(r0 + 0) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s16 *)(r0 + 0);"
+#else
+ "r0 = *(s16 *)(r0 + 6);"
+#endif
+ "r0 >>= 1;"
+ "exit;"
+ :: __imm(bpf_arena_alloc_pages),
+ __imm_addr(arena),
+ __imm_const(numa_no_node, NUMA_NO_NODE)
+ : __clobber_all
+ );
+}
+
+SEC("syscall")
+__description("Arena LDSX, S32")
+__success __retval(-1)
+__arch_x86_64
+__arch_arm64
+__naked void arena_ldsx_s32(void *ctx)
+{
+ asm volatile (
+ "r1 = %[arena] ll;"
+ "r2 = 0;"
+ "r3 = 1;"
+ "r4 = %[numa_no_node];"
+ "r5 = 0;"
+ "call %[bpf_arena_alloc_pages];"
+ "r0 = addr_space_cast(r0, 0x0, 0x1);"
+ "r1 = 0xfffffffe;"
+ "*(u64 *)(r0 + 0) = r1;"
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(s32 *)(r0 + 0);"
+#else
+ "r0 = *(s32 *)(r0 + 4);"
+#endif
+ "r0 >>= 1;"
+ "exit;"
+ :: __imm(bpf_arena_alloc_pages),
+ __imm_addr(arena),
+ __imm_const(numa_no_node, NUMA_NO_NODE)
+ : __clobber_all
+ );
+}
+
+/* to retain debug info for BTF generation */
+void kfunc_root(void)
+{
+ bpf_arena_alloc_pages(0, 0, 0, 0, 0);
+}
+
#else
SEC("socket")
diff --git a/tools/testing/selftests/bpf/progs/verifier_live_stack.c b/tools/testing/selftests/bpf/progs/verifier_live_stack.c
new file mode 100644
index 000000000000..2de105057bbc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_live_stack.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, long long);
+} map SEC(".maps");
+
+SEC("socket")
+__log_level(2)
+__msg("(0) frame 0 insn 2 +written -8")
+__msg("(0) frame 0 insn 1 +live -24")
+__msg("(0) frame 0 insn 1 +written -8")
+__msg("(0) frame 0 insn 0 +live -8,-24")
+__msg("(0) frame 0 insn 0 +written -8")
+__msg("(0) live stack update done in 2 iterations")
+__naked void simple_read_simple_write(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r10 - 8);"
+ "r2 = *(u64 *)(r10 - 24);"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("(0) frame 0 insn 1 +live -8")
+__not_msg("(0) frame 0 insn 1 +written")
+__msg("(0) live stack update done in 2 iterations")
+__msg("(0) frame 0 insn 1 +live -16")
+__msg("(0) frame 0 insn 1 +written -32")
+__msg("(0) live stack update done in 2 iterations")
+__naked void read_write_join(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 > 42 goto 1f;"
+ "r0 = *(u64 *)(r10 - 8);"
+ "*(u64 *)(r10 - 32) = r0;"
+ "*(u64 *)(r10 - 40) = r0;"
+ "exit;"
+"1:"
+ "r0 = *(u64 *)(r10 - 16);"
+ "*(u64 *)(r10 - 32) = r0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("2: (25) if r0 > 0x2a goto pc+1")
+__msg("7: (95) exit")
+__msg("(0) frame 0 insn 2 +written -16")
+__msg("(0) live stack update done in 2 iterations")
+__msg("7: (95) exit")
+__not_msg("(0) frame 0 insn 2")
+__msg("(0) live stack update done in 1 iterations")
+__naked void must_write_not_same_slot(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r1 = -8;"
+ "if r0 > 42 goto 1f;"
+ "r1 = -16;"
+"1:"
+ "r2 = r10;"
+ "r2 += r1;"
+ "*(u64 *)(r2 + 0) = r0;"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("(0) frame 0 insn 0 +written -8,-16")
+__msg("(0) live stack update done in 2 iterations")
+__msg("(0) frame 0 insn 0 +written -8")
+__msg("(0) live stack update done in 2 iterations")
+__naked void must_write_not_same_type(void)
+{
+ asm volatile (
+ "*(u64*)(r10 - 8) = 0;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = %[map] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 != 0 goto 1f;"
+ "r0 = r10;"
+ "r0 += -16;"
+"1:"
+ "*(u64 *)(r0 + 0) = 42;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+__msg("(2,4) frame 0 insn 4 +written -8")
+__msg("(2,4) live stack update done in 2 iterations")
+__msg("(0) frame 0 insn 2 +written -8")
+__msg("(0) live stack update done in 2 iterations")
+__naked void caller_stack_write(void)
+{
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "call write_first_param;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void write_first_param(void)
+{
+ asm volatile (
+ "*(u64 *)(r1 + 0) = 7;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__log_level(2)
+/* caller_stack_read() function */
+__msg("2: .12345.... (85) call pc+4")
+__msg("5: .12345.... (85) call pc+1")
+__msg("6: 0......... (95) exit")
+/* read_first_param() function */
+__msg("7: .1........ (79) r0 = *(u64 *)(r1 +0)")
+__msg("8: 0......... (95) exit")
+/* update for callsite at (2) */
+__msg("(2,7) frame 0 insn 7 +live -8")
+__msg("(2,7) live stack update done in 2 iterations")
+__msg("(0) frame 0 insn 2 +live -8")
+__msg("(0) live stack update done in 2 iterations")
+/* update for callsite at (5) */
+__msg("(5,7) frame 0 insn 7 +live -16")
+__msg("(5,7) live stack update done in 2 iterations")
+__msg("(0) frame 0 insn 5 +live -16")
+__msg("(0) live stack update done in 2 iterations")
+__naked void caller_stack_read(void)
+{
+ asm volatile (
+ "r1 = r10;"
+ "r1 += -8;"
+ "call read_first_param;"
+ "r1 = r10;"
+ "r1 += -16;"
+ "call read_first_param;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+static __used __naked void read_first_param(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r1 + 0);"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__log_level(2)
+/* read_first_param2() function */
+__msg(" 9: .1........ (79) r0 = *(u64 *)(r1 +0)")
+__msg("10: .......... (b7) r0 = 0")
+__msg("11: 0......... (05) goto pc+0")
+__msg("12: 0......... (95) exit")
+/*
+ * The purpose of the test is to check that checkpoint in
+ * read_first_param2() stops path traversal. This will only happen if
+ * verifier understands that fp[0]-8 at insn (12) is not alive.
+ */
+__msg("12: safe")
+__msg("processed 20 insns")
+__naked void caller_stack_pruning(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 42 goto 1f;"
+ "r0 = %[map] ll;"
+"1:"
+ "*(u64 *)(r10 - 8) = r0;"
+ "r1 = r10;"
+ "r1 += -8;"
+ /*
+ * fp[0]-8 is either pointer to map or a scalar,
+ * preventing state pruning at checkpoint created for call.
+ */
+ "call read_first_param2;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+static __used __naked void read_first_param2(void)
+{
+ asm volatile (
+ "r0 = *(u64 *)(r1 + 0);"
+ "r0 = 0;"
+ /*
+ * Checkpoint at goto +0 should fire,
+ * as caller stack fp[0]-8 is not alive at this point.
+ */
+ "goto +0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("socket")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure
+__msg("R1 type=scalar expected=map_ptr")
+__naked void caller_stack_pruning_callback(void)
+{
+ asm volatile (
+ "r0 = %[map] ll;"
+ "*(u64 *)(r10 - 8) = r0;"
+ "r1 = 2;"
+ "r2 = loop_cb ll;"
+ "r3 = r10;"
+ "r3 += -8;"
+ "r4 = 0;"
+ /*
+ * fp[0]-8 is either pointer to map or a scalar,
+ * preventing state pruning at checkpoint created for call.
+ */
+ "call %[bpf_loop];"
+ "r0 = 42;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_loop),
+ __imm_addr(map)
+ : __clobber_all);
+}
+
+static __used __naked void loop_cb(void)
+{
+ asm volatile (
+ /*
+ * Checkpoint at function entry should not fire, as caller
+ * stack fp[0]-8 is alive at this point.
+ */
+ "r6 = r2;"
+ "r1 = *(u64 *)(r6 + 0);"
+ "*(u64*)(r10 - 8) = 7;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "call %[bpf_map_lookup_elem];"
+ /*
+ * This should stop verifier on a second loop iteration,
+ * but only if verifier correctly maintains that fp[0]-8
+ * is still alive.
+ */
+ "*(u64 *)(r6 + 0) = 0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_map_lookup_elem),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/*
+ * Because of a bug in verifier.c:compute_postorder()
+ * the program below overflowed traversal queue in that function.
+ */
+SEC("socket")
+__naked void syzbot_postorder_bug1(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "if r0 != 0 goto -1;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} map_array SEC(".maps");
+
+SEC("socket")
+__failure __msg("invalid read from stack R2 off=-1024 size=8")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked unsigned long caller_stack_write_tail_call(void)
+{
+ asm volatile (
+ "r6 = r1;"
+ "*(u64 *)(r10 - 8) = -8;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 != 42 goto 1f;"
+ "goto 2f;"
+ "1:"
+ "*(u64 *)(r10 - 8) = -1024;"
+ "2:"
+ "r1 = r6;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "call write_tail_call;"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r2 = r10;"
+ "r2 += r1;"
+ "r0 = *(u64 *)(r2 + 0);"
+ "exit;"
+ :: __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+static __used __naked unsigned long write_tail_call(void)
+{
+ asm volatile (
+ "r6 = r2;"
+ "r2 = %[map_array] ll;"
+ "r3 = 0;"
+ "call %[bpf_tail_call];"
+ "*(u64 *)(r6 + 0) = -16;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_array)
+ : __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
index e07b43b78fd2..fbdde80e7b90 100644
--- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
+++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
@@ -283,4 +283,25 @@ exit_%=: \
: __clobber_all);
}
+/*
+ * This test case triggered a bug in verifier.c:maybe_exit_scc().
+ * Speculative execution path reaches stack access instruction,
+ * stops and triggers maybe_exit_scc() w/o accompanying maybe_enter_scc() call.
+ */
+SEC("socket")
+__arch_x86_64
+__caps_unpriv(CAP_BPF)
+__naked void maybe_exit_scc_bug1(void)
+{
+ asm volatile (
+ "r0 = 100;"
+"1:"
+ /* Speculative execution path reaches and stops here. */
+ "*(u64 *)(r10 - 512) = r0;"
+ /* Condition is always false, but verifier speculatively executes the true branch. */
+ "if r0 <= 0x0 goto 1b;"
+ "exit;"
+ ::: __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_lsm.c b/tools/testing/selftests/bpf/progs/verifier_lsm.c
index 32e5e779cb96..6af9100a37ff 100644
--- a/tools/testing/selftests/bpf/progs/verifier_lsm.c
+++ b/tools/testing/selftests/bpf/progs/verifier_lsm.c
@@ -4,7 +4,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-SEC("lsm/file_alloc_security")
+SEC("lsm/file_permission")
__description("lsm bpf prog with -4095~0 retval. test 1")
__success
__naked int errno_zero_retval_test1(void *ctx)
@@ -15,7 +15,7 @@ __naked int errno_zero_retval_test1(void *ctx)
::: __clobber_all);
}
-SEC("lsm/file_alloc_security")
+SEC("lsm/file_permission")
__description("lsm bpf prog with -4095~0 retval. test 2")
__success
__naked int errno_zero_retval_test2(void *ctx)
diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c
index 7d088ba99ea5..16b761e510f0 100644
--- a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c
@@ -139,4 +139,122 @@ __naked void on_the_inner_map_pointer(void)
: __clobber_all);
}
+SEC("socket")
+__description("map_ptr is never null")
+__success
+__naked void map_ptr_is_never_null(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ r1 = %[map_in_map] ll; \
+ if r1 != 0 goto l0_%=; \
+ r10 = 42; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map_ptr is never null inner")
+__success
+__naked void map_ptr_is_never_null_inner(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l0_%=; \
+ if r0 != 0 goto l0_%=; \
+ r10 = 42; \
+l0_%=: exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("map_ptr is never null inner spill fill")
+__success
+__naked void map_ptr_is_never_null_inner_spill_fill(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_in_map] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 != 0 goto l0_%=; \
+ exit; \
+l0_%=: *(u64 *)(r10 -16) = r0; \
+ r1 = *(u64 *)(r10 -16); \
+ if r1 == 0 goto l1_%=; \
+ exit; \
+l1_%=: r10 = 42; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_in_map)
+ : __clobber_all);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 64 * 1024);
+ });
+} rb_in_map SEC(".maps");
+
+struct rb_ctx {
+ void *rb;
+ struct bpf_dynptr dptr;
+};
+
+static __always_inline struct rb_ctx __rb_event_reserve(__u32 sz)
+{
+ struct rb_ctx rb_ctx = {};
+ void *rb;
+ __u32 cpu = bpf_get_smp_processor_id();
+ __u32 rb_slot = cpu & 1;
+
+ rb = bpf_map_lookup_elem(&rb_in_map, &rb_slot);
+ if (!rb)
+ return rb_ctx;
+
+ rb_ctx.rb = rb;
+ bpf_ringbuf_reserve_dynptr(rb, sz, 0, &rb_ctx.dptr);
+
+ return rb_ctx;
+}
+
+static __noinline void __rb_event_submit(struct rb_ctx *ctx)
+{
+ if (!ctx->rb)
+ return;
+
+ /* If the verifier (incorrectly) concludes that ctx->rb can be
+ * NULL at this point, we'll get "BPF_EXIT instruction in main
+ * prog would lead to reference leak" error
+ */
+ bpf_ringbuf_submit_dynptr(&ctx->dptr, 0);
+}
+
+SEC("socket")
+int map_ptr_is_never_null_rb(void *ctx)
+{
+ struct rb_ctx event_ctx = __rb_event_reserve(256);
+ __rb_event_submit(&event_ctx);
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ptr.c b/tools/testing/selftests/bpf/progs/verifier_map_ptr.c
index 11a079145966..e2767d27d8aa 100644
--- a/tools/testing/selftests/bpf/progs/verifier_map_ptr.c
+++ b/tools/testing/selftests/bpf/progs/verifier_map_ptr.c
@@ -70,10 +70,13 @@ __naked void bpf_map_ptr_write_rejected(void)
: __clobber_all);
}
+/* The first element of struct bpf_map is a SHA256 hash of 32 bytes, accessing
+ * into this array is valid. The opts field is now at offset 33.
+ */
SEC("socket")
__description("bpf_map_ptr: read non-existent field rejected")
__failure
-__msg("cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4")
+__msg("cannot access ptr member ops with moff 32 in struct bpf_map with off 33 size 4")
__failure_unpriv
__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN")
__flag(BPF_F_ANY_ALIGNMENT)
@@ -82,7 +85,7 @@ __naked void read_non_existent_field_rejected(void)
asm volatile (" \
r6 = 0; \
r1 = %[map_array_48b] ll; \
- r6 = *(u32*)(r1 + 1); \
+ r6 = *(u32*)(r1 + 33); \
r0 = 1; \
exit; \
" :
diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c
index 3966d827f288..6d1edaef9213 100644
--- a/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c
+++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c
@@ -9,6 +9,8 @@
SEC("raw_tp")
__description("may_goto 0")
__arch_x86_64
+__arch_s390x
+__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
@@ -27,6 +29,8 @@ __naked void may_goto_simple(void)
SEC("raw_tp")
__description("batch 2 of may_goto 0")
__arch_x86_64
+__arch_s390x
+__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
@@ -47,6 +51,8 @@ __naked void may_goto_batch_0(void)
SEC("raw_tp")
__description("may_goto batch with offsets 2/1/0")
__arch_x86_64
+__arch_s390x
+__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
@@ -69,8 +75,10 @@ __naked void may_goto_batch_1(void)
}
SEC("raw_tp")
-__description("may_goto batch with offsets 2/0 - x86_64")
+__description("may_goto batch with offsets 2/0")
__arch_x86_64
+__arch_s390x
+__arch_arm64
__xlated("0: *(u64 *)(r10 -16) = 65535")
__xlated("1: *(u64 *)(r10 -8) = 0")
__xlated("2: r11 = *(u64 *)(r10 -16)")
@@ -84,33 +92,7 @@ __xlated("9: r0 = 1")
__xlated("10: r0 = 2")
__xlated("11: exit")
__success
-__naked void may_goto_batch_2_x86_64(void)
-{
- asm volatile (
- ".8byte %[may_goto1];"
- ".8byte %[may_goto3];"
- "r0 = 1;"
- "r0 = 2;"
- "exit;"
- :
- : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
- __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
- : __clobber_all);
-}
-
-SEC("raw_tp")
-__description("may_goto batch with offsets 2/0 - arm64")
-__arch_arm64
-__xlated("0: *(u64 *)(r10 -8) = 8388608")
-__xlated("1: r11 = *(u64 *)(r10 -8)")
-__xlated("2: if r11 == 0x0 goto pc+3")
-__xlated("3: r11 -= 1")
-__xlated("4: *(u64 *)(r10 -8) = r11")
-__xlated("5: r0 = 1")
-__xlated("6: r0 = 2")
-__xlated("7: exit")
-__success
-__naked void may_goto_batch_2_arm64(void)
+__naked void may_goto_batch_2(void)
{
asm volatile (
".8byte %[may_goto1];"
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
index 994bbc346d25..a4d8814eb5ed 100644
--- a/tools/testing/selftests/bpf/progs/verifier_movsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -245,7 +245,13 @@ l0_%=: \
SEC("socket")
__description("MOV32SX, S8, var_off not u32_max, positive after s8 extension")
__success __retval(0)
-__failure_unpriv __msg_unpriv("frame pointer is read only")
+__success_unpriv
+#ifdef SPEC_V1
+__xlated_unpriv("w0 = 0")
+__xlated_unpriv("exit")
+__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */
+__xlated_unpriv("goto pc-1")
+#endif
__naked void mov64sx_s32_varoff_2(void)
{
asm volatile (" \
@@ -267,7 +273,13 @@ l0_%=: \
SEC("socket")
__description("MOV32SX, S8, var_off not u32_max, negative after s8 extension")
__success __retval(0)
-__failure_unpriv __msg_unpriv("frame pointer is read only")
+__success_unpriv
+#ifdef SPEC_V1
+__xlated_unpriv("w0 = 0")
+__xlated_unpriv("exit")
+__xlated_unpriv("nospec") /* inserted to prevent `frame pointer is read only` */
+__xlated_unpriv("goto pc-1")
+#endif
__naked void mov64sx_s32_varoff_3(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_mul.c b/tools/testing/selftests/bpf/progs/verifier_mul.c
new file mode 100644
index 000000000000..7145fe3351d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_mul.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Nandakumar Edamana */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+/* Intended to test the abstract multiplication technique(s) used by
+ * the verifier. Using assembly to avoid compiler optimizations.
+ */
+SEC("fentry/bpf_fentry_test1")
+void BPF_PROG(mul_precise, int x)
+{
+ /* First, force the verifier to be uncertain about the value:
+ * unsigned int a = (bpf_get_prandom_u32() & 0x2) | 0x1;
+ *
+ * Assuming the verifier is using tnum, a must be tnum{.v=0x1, .m=0x2}.
+ * Then a * 0x3 would be m0m1 (m for uncertain). Added imprecision
+ * would cause the following to fail, because the required return value
+ * is 0:
+ * return (a * 0x3) & 0x4);
+ */
+ asm volatile ("\
+ call %[bpf_get_prandom_u32];\
+ r0 &= 0x2;\
+ r0 |= 0x1;\
+ r0 *= 0x3;\
+ r0 &= 0x4;\
+ if r0 != 0 goto l0_%=;\
+ r0 = 0;\
+ goto l1_%=;\
+l0_%=:\
+ r0 = 1;\
+l1_%=:\
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
index ab9f9f2620ed..e2cbc5bda65e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c
@@ -79,11 +79,6 @@ int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx)
return NF_ACCEPT;
}
-extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
- struct bpf_dynptr *ptr__uninit) __ksym;
-extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
- void *buffer, uint32_t buffer__sz) __ksym;
-
SEC("netfilter")
__description("netfilter test prog with skb and state read access")
__success __failure_unpriv
diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
index 9fe5d255ee37..1fe090cd6744 100644
--- a/tools/testing/selftests/bpf/progs/verifier_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_precision.c
@@ -144,21 +144,21 @@ SEC("?raw_tp")
__success __log_level(2)
/*
* Without the bug fix there will be no history between "last_idx 3 first_idx 3"
- * and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
+ * and "parent state regs=" lines. "R0=6" parts are here to help anchor
* expected log messages to the one specific mark_chain_precision operation.
*
* This is quite fragile: if verifier checkpointing heuristic changes, this
* might need adjusting.
*/
-__msg("2: (07) r0 += 1 ; R0_w=6")
+__msg("2: (07) r0 += 1 ; R0=6")
__msg("3: (35) if r0 >= 0xa goto pc+1")
__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
-__msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4")
-__msg("3: R0_w=6")
+__msg("mark_precise: frame0: parent state regs= stack=: R0=P4")
+__msg("3: R0=6")
__naked int state_loop_first_last_equal(void)
{
asm volatile (
@@ -231,4 +231,74 @@ __naked void bpf_cond_op_not_r10(void)
::: __clobber_all);
}
+SEC("lsm.s/socket_connect")
+__success __log_level(2)
+__msg("0: (b7) r0 = 1 ; R0=1")
+__msg("1: (84) w0 = -w0 ; R0=0xffffffff")
+__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 1: (84) w0 = -w0")
+__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
+__naked int bpf_neg_2(void)
+{
+ /*
+ * lsm.s/socket_connect requires a return value within [-4095, 0].
+ * Returning -1 is allowed
+ */
+ asm volatile (
+ "r0 = 1;"
+ "w0 = -w0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm.s/socket_connect")
+__failure __msg("At program exit the register R0 has")
+__naked int bpf_neg_3(void)
+{
+ /*
+ * lsm.s/socket_connect requires a return value within [-4095, 0].
+ * Returning -10000 is not allowed.
+ */
+ asm volatile (
+ "r0 = 10000;"
+ "w0 = -w0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm.s/socket_connect")
+__success __log_level(2)
+__msg("0: (b7) r0 = 1 ; R0=1")
+__msg("1: (87) r0 = -r0 ; R0=-1")
+__msg("mark_precise: frame0: last_idx 2 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 1: (87) r0 = -r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 0: (b7) r0 = 1")
+__naked int bpf_neg_4(void)
+{
+ /*
+ * lsm.s/socket_connect requires a return value within [-4095, 0].
+ * Returning -1 is allowed
+ */
+ asm volatile (
+ "r0 = 1;"
+ "r0 = -r0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm.s/socket_connect")
+__failure __msg("At program exit the register R0 has")
+__naked int bpf_neg_5(void)
+{
+ /*
+ * lsm.s/socket_connect requires a return value within [-4095, 0].
+ * Returning -10000 is not allowed.
+ */
+ asm volatile (
+ "r0 = 10000;"
+ "r0 = -r0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
index fc91b414364e..1ecd34ebde19 100644
--- a/tools/testing/selftests/bpf/progs/verifier_private_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
@@ -8,7 +8,7 @@
/* From include/linux/filter.h */
#define MAX_BPF_STACK 512
-#if defined(__TARGET_ARCH_x86)
+#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
struct elem {
struct bpf_timer t;
@@ -30,6 +30,18 @@ __jited(" movabsq $0x{{.*}}, %r9")
__jited(" addq %gs:{{.*}}, %r9")
__jited(" movl $0x2a, %edi")
__jited(" movq %rdi, -0x100(%r9)")
+__arch_arm64
+__jited(" stp x25, x27, [sp, {{.*}}]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited("...")
+__jited(" ldp x25, x27, [sp], {{.*}}")
__naked void private_stack_single_prog(void)
{
asm volatile (" \
@@ -45,6 +57,9 @@ __description("No private stack")
__success
__arch_x86_64
__jited(" subq $0x8, %rsp")
+__arch_arm64
+__jited(" mov x25, sp")
+__jited(" sub sp, sp, #0x10")
__naked void no_private_stack_nested(void)
{
asm volatile (" \
@@ -81,6 +96,19 @@ __jited(" pushq %r9")
__jited(" callq 0x{{.*}}")
__jited(" popq %r9")
__jited(" xorl %eax, %eax")
+__arch_arm64
+__jited(" stp x25, x27, [sp, {{.*}}]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited(" bl {{.*}}")
+__jited("...")
+__jited(" ldp x25, x27, [sp], {{.*}}")
__naked void private_stack_nested_1(void)
{
asm volatile (" \
@@ -131,6 +159,24 @@ __jited(" movq %rdi, -0x200(%r9)")
__jited(" pushq %r9")
__jited(" callq")
__jited(" popq %r9")
+__arch_arm64
+__jited("func #1")
+__jited("...")
+__jited(" stp x25, x27, [sp, {{.*}}]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" bl 0x{{.*}}")
+__jited(" add x7, x0, #0x0")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited(" bl 0x{{.*}}")
+__jited(" add x7, x0, #0x0")
+__jited(" mov x7, #0x0")
+__jited(" ldp x25, x27, [sp], {{.*}}")
__naked void private_stack_callback(void)
{
asm volatile (" \
@@ -154,6 +200,28 @@ __arch_x86_64
__jited(" pushq %r9")
__jited(" callq")
__jited(" popq %r9")
+__arch_arm64
+__jited(" stp x29, x30, [sp, #-0x10]!")
+__jited(" mov x29, sp")
+__jited(" stp xzr, x26, [sp, #-0x10]!")
+__jited(" mov x26, sp")
+__jited(" stp x19, x20, [sp, #-0x10]!")
+__jited(" stp x21, x22, [sp, #-0x10]!")
+__jited(" stp x23, x24, [sp, #-0x10]!")
+__jited(" stp x25, x26, [sp, #-0x10]!")
+__jited(" stp x27, x28, [sp, #-0x10]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited(" mov x0, #0x0")
+__jited(" bl 0x{{.*}}")
+__jited(" add x7, x0, #0x0")
+__jited(" ldp x27, x28, [sp], #0x10")
int private_stack_exception_main_prog(void)
{
asm volatile (" \
@@ -179,6 +247,19 @@ __jited(" movq %rdi, -0x200(%r9)")
__jited(" pushq %r9")
__jited(" callq")
__jited(" popq %r9")
+__arch_arm64
+__jited(" stp x27, x28, [sp, #-0x10]!")
+__jited(" mov x27, {{.*}}")
+__jited(" movk x27, {{.*}}, lsl #16")
+__jited(" movk x27, {{.*}}")
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
+__jited(" mov x0, #0x2a")
+__jited(" str x0, [x27]")
+__jited(" bl 0x{{.*}}")
+__jited(" add x7, x0, #0x0")
+__jited(" ldp x27, x28, [sp], #0x10")
int private_stack_exception_sub_prog(void)
{
asm volatile (" \
@@ -220,6 +301,10 @@ __description("Private stack, async callback, not nested")
__success __retval(0)
__arch_x86_64
__jited(" movabsq $0x{{.*}}, %r9")
+__arch_arm64
+__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
+__jited(" add x27, x27, x10")
+__jited(" add x25, x27, {{.*}}")
int private_stack_async_callback_1(void)
{
struct bpf_timer *arr_timer;
@@ -241,6 +326,8 @@ __description("Private stack, async callback, potential nesting")
__success __retval(0)
__arch_x86_64
__jited(" subq $0x100, %rsp")
+__arch_arm64
+__jited(" sub sp, sp, #0x100")
int private_stack_async_callback_2(void)
{
struct bpf_timer *arr_timer;
diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
index 683a882b3e6d..910365201f68 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c
@@ -27,7 +27,7 @@ struct bpf_key {} __attribute__((preserve_access_index));
extern void bpf_key_put(struct bpf_key *key) __ksym;
extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
-extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
+extern struct bpf_key *bpf_lookup_user_key(__s32 serial, __u64 flags) __ksym;
/* BTF FUNC records are not generated for kfuncs referenced
* from inline assembly. These records are necessary for
diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
index 7c5e5e6d10eb..c0ce690ddb68 100644
--- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
@@ -349,11 +349,11 @@ __naked void precision_two_ids(void)
SEC("socket")
__success __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
-/* check thar r0 and r6 have different IDs after 'if',
+/* check that r0 and r6 have different IDs after 'if',
* collect_linked_regs() can't tie more than 6 registers for a single insn.
*/
__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
-__msg("9: (bf) r6 = r6 ; R6_w=scalar(id=2")
+__msg("9: (bf) r6 = r6 ; R6=scalar(id=2")
/* check that r{0-5} are marked precise after 'if' */
__msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
__msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
@@ -779,12 +779,12 @@ __success
__retval(0)
/* Check that verifier believes r1/r0 are zero at exit */
__log_level(2)
-__msg("4: (77) r1 >>= 32 ; R1_w=0")
-__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
+__msg("4: (77) r1 >>= 32 ; R1=0")
+__msg("5: (bf) r0 = r1 ; R0=0 R1=0")
__msg("6: (95) exit")
__msg("from 3 to 4")
-__msg("4: (77) r1 >>= 32 ; R1_w=0")
-__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
+__msg("4: (77) r1 >>= 32 ; R1=0")
+__msg("5: (bf) r0 = r1 ; R0=0 R1=0")
__msg("6: (95) exit")
/* Verify that statements to randomize upper half of r1 had not been
* generated.
diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c
index 0d5e56dffabb..a2132c72d3b8 100644
--- a/tools/testing/selftests/bpf/progs/verifier_sock.c
+++ b/tools/testing/selftests/bpf/progs/verifier_sock.c
@@ -1,14 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/sock.c */
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
-#define offsetofend(TYPE, MEMBER) \
- (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
-
struct {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, 1);
@@ -1073,16 +1069,65 @@ int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk)
}
__noinline
+long xdp_pull_data2(struct xdp_md *x, __u32 len)
+{
+ return bpf_xdp_pull_data(x, len);
+}
+
+__noinline
+long xdp_pull_data1(struct xdp_md *x, __u32 len)
+{
+ return xdp_pull_data2(x, len);
+}
+
+/* global function calls bpf_xdp_pull_data(), which invalidates packet
+ * pointers established before global function call.
+ */
+SEC("xdp")
+__failure __msg("invalid mem access")
+int invalidate_xdp_pkt_pointers_from_global_func(struct xdp_md *x)
+{
+ int *p = (void *)(long)x->data;
+
+ if ((void *)(p + 1) > (void *)(long)x->data_end)
+ return XDP_DROP;
+ xdp_pull_data1(x, 0);
+ *p = 42; /* this is unsafe */
+ return XDP_PASS;
+}
+
+/* XDP packet changing kfunc calls invalidate packet pointers */
+SEC("xdp")
+__failure __msg("invalid mem access")
+int invalidate_xdp_pkt_pointers(struct xdp_md *x)
+{
+ int *p = (void *)(long)x->data;
+
+ if ((void *)(p + 1) > (void *)(long)x->data_end)
+ return XDP_DROP;
+ bpf_xdp_pull_data(x, 0);
+ *p = 42; /* this is unsafe */
+ return XDP_PASS;
+}
+
+__noinline
int tail_call(struct __sk_buff *sk)
{
bpf_tail_call_static(sk, &jmp_table, 0);
return 0;
}
-/* Tail calls invalidate packet pointers. */
+static __noinline
+int static_tail_call(struct __sk_buff *sk)
+{
+ bpf_tail_call_static(sk, &jmp_table, 0);
+ return 0;
+}
+
+/* Tail calls in sub-programs invalidate packet pointers. */
SEC("tc")
__failure __msg("invalid mem access")
-int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
+int invalidate_pkt_pointers_by_global_tail_call(struct __sk_buff *sk)
{
int *p = (void *)(long)sk->data;
@@ -1093,4 +1138,32 @@ int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
return TCX_PASS;
}
+/* Tail calls in static sub-programs invalidate packet pointers. */
+SEC("tc")
+__failure __msg("invalid mem access")
+int invalidate_pkt_pointers_by_static_tail_call(struct __sk_buff *sk)
+{
+ int *p = (void *)(long)sk->data;
+
+ if ((void *)(p + 1) > (void *)(long)sk->data_end)
+ return TCX_DROP;
+ static_tail_call(sk);
+ *p = 42; /* this is unsafe */
+ return TCX_PASS;
+}
+
+/* Direct tail calls do not invalidate packet pointers. */
+SEC("tc")
+__success
+int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
+{
+ int *p = (void *)(long)sk->data;
+
+ if ((void *)(p + 1) > (void *)(long)sk->data_end)
+ return TCX_DROP;
+ bpf_tail_call_static(sk, &jmp_table, 0);
+ *p = 42; /* this is NOT unsafe: tail calls don't return */
+ return TCX_PASS;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 1e5a511e8494..7a13dbd794b2 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -506,17 +506,17 @@ SEC("raw_tp")
__log_level(2)
__success
/* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */
-__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=0")
+__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8=0")
/* but fp-16 is spilled IMPRECISE zero const reg */
-__msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=0 R10=fp0 fp-16_w=0")
+__msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0=0 R10=fp0 fp-16=0")
/* validate that assigning R2 from STACK_SPILL with zero value doesn't mark register
* precise immediately; if necessary, it will be marked precise later
*/
-__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=0")
+__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2=0 R10=fp0 fp-8=0")
/* similarly, when R2 is assigned from spilled register, it is initially
* imprecise, but will be marked precise later once it is used in precise context
*/
-__msg("10: (71) r2 = *(u8 *)(r10 -9) ; R2_w=0 R10=fp0 fp-16_w=0")
+__msg("10: (71) r2 = *(u8 *)(r10 -9) ; R2=0 R10=fp0 fp-16=0")
__msg("11: (0f) r1 += r2")
__msg("mark_precise: frame0: last_idx 11 first_idx 0 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 10: (71) r2 = *(u8 *)(r10 -9)")
@@ -598,7 +598,7 @@ __log_level(2)
__success
/* fp-4 is STACK_ZERO */
__msg("2: (62) *(u32 *)(r10 -4) = 0 ; R10=fp0 fp-8=0000????")
-__msg("4: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8=0000????")
+__msg("4: (71) r2 = *(u8 *)(r10 -1) ; R2=0 R10=fp0 fp-8=0000????")
__msg("5: (0f) r1 += r2")
__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)")
@@ -640,25 +640,25 @@ SEC("raw_tp")
__log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
__success
/* make sure fp-8 is IMPRECISE fake register spill */
-__msg("3: (7a) *(u64 *)(r10 -8) = 1 ; R10=fp0 fp-8_w=1")
+__msg("3: (7a) *(u64 *)(r10 -8) = 1 ; R10=fp0 fp-8=1")
/* and fp-16 is spilled IMPRECISE const reg */
-__msg("5: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=1 R10=fp0 fp-16_w=1")
+__msg("5: (7b) *(u64 *)(r10 -16) = r0 ; R0=1 R10=fp0 fp-16=1")
/* validate load from fp-8, which was initialized using BPF_ST_MEM */
-__msg("8: (79) r2 = *(u64 *)(r10 -8) ; R2_w=1 R10=fp0 fp-8=1")
+__msg("8: (79) r2 = *(u64 *)(r10 -8) ; R2=1 R10=fp0 fp-8=1")
__msg("9: (0f) r1 += r2")
__msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 8: (79) r2 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6")
/* note, fp-8 is precise, fp-16 is not yet precise, we'll get there */
-__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_w=1")
+__msg("mark_precise: frame0: parent state regs= stack=-8: R0=1 R1=ctx() R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=P1 fp-16=1")
__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0")
__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1")
__msg("mark_precise: frame0: regs= stack=-8 before 3: (7a) *(u64 *)(r10 -8) = 1")
-__msg("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
+__msg("10: R1=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2=1")
/* validate load from fp-16, which was initialized using BPF_STX_MEM */
-__msg("12: (79) r2 = *(u64 *)(r10 -16) ; R2_w=1 R10=fp0 fp-16=1")
+__msg("12: (79) r2 = *(u64 *)(r10 -16) ; R2=1 R10=fp0 fp-16=1")
__msg("13: (0f) r1 += r2")
__msg("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 12: (79) r2 = *(u64 *)(r10 -16)")
@@ -668,12 +668,12 @@ __msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2")
__msg("mark_precise: frame0: regs= stack=-16 before 8: (79) r2 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6")
/* now both fp-8 and fp-16 are precise, very good */
-__msg("mark_precise: frame0: parent state regs= stack=-16: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_rw=P1 fp-16_rw=P1")
+__msg("mark_precise: frame0: parent state regs= stack=-16: R0=1 R1=ctx() R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=P1 fp-16=P1")
__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0")
__msg("mark_precise: frame0: regs= stack=-16 before 5: (7b) *(u64 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1")
-__msg("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
+__msg("14: R1=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2=1")
__naked void stack_load_preserves_const_precision(void)
{
asm volatile (
@@ -719,22 +719,22 @@ __success
/* make sure fp-8 is 32-bit FAKE subregister spill */
__msg("3: (62) *(u32 *)(r10 -8) = 1 ; R10=fp0 fp-8=????1")
/* but fp-16 is spilled IMPRECISE zero const reg */
-__msg("5: (63) *(u32 *)(r10 -16) = r0 ; R0_w=1 R10=fp0 fp-16=????1")
+__msg("5: (63) *(u32 *)(r10 -16) = r0 ; R0=1 R10=fp0 fp-16=????1")
/* validate load from fp-8, which was initialized using BPF_ST_MEM */
-__msg("8: (61) r2 = *(u32 *)(r10 -8) ; R2_w=1 R10=fp0 fp-8=????1")
+__msg("8: (61) r2 = *(u32 *)(r10 -8) ; R2=1 R10=fp0 fp-8=????1")
__msg("9: (0f) r1 += r2")
__msg("mark_precise: frame0: last_idx 9 first_idx 7 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 8: (61) r2 = *(u32 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r1 = r6")
-__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16=????1")
+__msg("mark_precise: frame0: parent state regs= stack=-8: R0=1 R1=ctx() R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=????P1 fp-16=????1")
__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-8 before 6: (05) goto pc+0")
__msg("mark_precise: frame0: regs= stack=-8 before 5: (63) *(u32 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r0 = 1")
__msg("mark_precise: frame0: regs= stack=-8 before 3: (62) *(u32 *)(r10 -8) = 1")
-__msg("10: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
+__msg("10: R1=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2=1")
/* validate load from fp-16, which was initialized using BPF_STX_MEM */
-__msg("12: (61) r2 = *(u32 *)(r10 -16) ; R2_w=1 R10=fp0 fp-16=????1")
+__msg("12: (61) r2 = *(u32 *)(r10 -16) ; R2=1 R10=fp0 fp-16=????1")
__msg("13: (0f) r1 += r2")
__msg("mark_precise: frame0: last_idx 13 first_idx 7 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 12: (61) r2 = *(u32 *)(r10 -16)")
@@ -743,12 +743,12 @@ __msg("mark_precise: frame0: regs= stack=-16 before 10: (73) *(u8 *)(r1 +0) = r2
__msg("mark_precise: frame0: regs= stack=-16 before 9: (0f) r1 += r2")
__msg("mark_precise: frame0: regs= stack=-16 before 8: (61) r2 = *(u32 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-16 before 7: (bf) r1 = r6")
-__msg("mark_precise: frame0: parent state regs= stack=-16: R0_w=1 R1=ctx() R6_r=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8_r=????P1 fp-16_r=????P1")
+__msg("mark_precise: frame0: parent state regs= stack=-16: R0=1 R1=ctx() R6=map_value(map=.data.two_byte_,ks=4,vs=2) R10=fp0 fp-8=????P1 fp-16=????P1")
__msg("mark_precise: frame0: last_idx 6 first_idx 3 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-16 before 6: (05) goto pc+0")
__msg("mark_precise: frame0: regs= stack=-16 before 5: (63) *(u32 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (b7) r0 = 1")
-__msg("14: R1_w=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2_w=1")
+__msg("14: R1=map_value(map=.data.two_byte_,ks=4,vs=2,off=1) R2=1")
__naked void stack_load_preserves_const_precision_subreg(void)
{
asm volatile (
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
index 9d415f7ce599..61886ed554de 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -105,7 +105,7 @@ __msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
__msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
__msg("mark_precise: frame0: regs=r0 stack= before 10: (95) exit")
__msg("mark_precise: frame1: regs=r0 stack= before 9: (bf) r0 = (s8)r10")
-__msg("7: R0_w=scalar")
+__msg("7: R0=scalar")
__naked int fp_precise_subprog_result(void)
{
asm volatile (
@@ -141,7 +141,7 @@ __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = (s8)r1")
* anyways, at which point we'll break precision chain
*/
__msg("mark_precise: frame1: regs=r1 stack= before 9: (bf) r1 = r10")
-__msg("7: R0_w=scalar")
+__msg("7: R0=scalar")
__naked int sneaky_fp_precise_subprog_result(void)
{
asm volatile (
@@ -681,7 +681,7 @@ __msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1")
__msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8")
__msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4")
__msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)")
-__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1")
+__msg("mark_precise: frame0: parent state regs= stack=-8: R0=2 R6=1 R8=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8=P1")
__msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7")
__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2")
@@ -793,4 +793,57 @@ __naked int stack_slot_aliases_precision(void)
);
}
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} map_array SEC(".maps");
+
+__naked __noinline __used
+static unsigned long identity_tail_call(void)
+{
+ /* the simplest identity function involving a tail call */
+ asm volatile (
+ "r6 = r2;"
+ "r2 = %[map_array] ll;"
+ "r3 = 0;"
+ "call %[bpf_tail_call];"
+ "r0 = r6;"
+ "exit;"
+ :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_array)
+ : __clobber_all);
+}
+
+SEC("?raw_tp")
+__failure __log_level(2)
+__msg("13: (85) call bpf_tail_call#12")
+__msg("mark_precise: frame1: last_idx 13 first_idx 0 subseq_idx -1 ")
+__msg("returning from callee:")
+__msg("frame1: R0=scalar() R6=3 R10=fp0")
+__msg("to caller at 4:")
+__msg("R0=scalar() R6=map_value(map=.data.vals,ks=4,vs=16) R10=fp0")
+__msg("6: (0f) r1 += r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
+__msg("mark_precise: frame0: parent state regs=r0 stack=: R0=Pscalar() R6=map_value(map=.data.vals,ks=4,vs=16) R10=fp0")
+__msg("math between map_value pointer and register with unbounded min value is not allowed")
+__naked int subprog_result_tail_call(void)
+{
+ asm volatile (
+ "r2 = 3;"
+ "call identity_tail_call;"
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common
+ );
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall.c b/tools/testing/selftests/bpf/progs/verifier_tailcall.c
new file mode 100644
index 000000000000..b4acce60fb9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_tailcall.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} map_array SEC(".maps");
+
+SEC("socket")
+__description("invalid map type for tail call")
+__failure __msg("expected prog array map for tail call")
+__failure_unpriv
+__naked void invalid_map_for_tail_call(void)
+{
+ asm volatile (" \
+ r2 = %[map_array] ll; \
+ r3 = 0; \
+ call %[bpf_tail_call]; \
+ exit; \
+" :
+ : __imm(bpf_tail_call),
+ __imm_addr(map_array)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
index a4a5e2071604..28b4f7035ceb 100644
--- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c
+++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c
@@ -572,8 +572,14 @@ l0_%=: exit; \
SEC("socket")
__description("alu32: mov u32 const")
-__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'")
+__success __success_unpriv
__retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r0 == 0x0 goto pc+2")
+__xlated_unpriv("nospec") /* inserted to prevent `R7 invalid mem access 'scalar'` */
+__xlated_unpriv("goto pc-1") /* sanitized dead code */
+__xlated_unpriv("exit")
+#endif
__naked void alu32_mov_u32_const(void)
{
asm volatile (" \
@@ -619,12 +625,11 @@ __naked void pass_pointer_to_tail_call(void)
SEC("socket")
__description("unpriv: cmp map pointer with zero")
-__success __failure_unpriv __msg_unpriv("R1 pointer comparison")
+__success __success_unpriv
__retval(0)
__naked void cmp_map_pointer_with_zero(void)
{
asm volatile (" \
- r1 = 0; \
r1 = %[map_hash_8b] ll; \
if r1 == 0 goto l0_%=; \
l0_%=: r0 = 0; \
@@ -635,6 +640,22 @@ l0_%=: r0 = 0; \
}
SEC("socket")
+__description("unpriv: cmp map pointer with const")
+__success __failure_unpriv __msg_unpriv("R1 pointer comparison prohibited")
+__retval(0)
+__naked void cmp_map_pointer_with_const(void)
+{
+ asm volatile (" \
+ r1 = %[map_hash_8b] ll; \
+ if r1 == 0x0000beef goto l0_%=; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
__description("unpriv: write into frame pointer")
__failure __msg("frame pointer is read only")
__failure_unpriv
@@ -723,4 +744,210 @@ l0_%=: r0 = 0; \
" ::: __clobber_all);
}
+SEC("socket")
+__description("unpriv: Spectre v1 path-based type confusion of scalar as stack-ptr")
+__success __success_unpriv __retval(0)
+#ifdef SPEC_V1
+__xlated_unpriv("if r0 != 0x1 goto pc+2")
+/* This nospec prevents the exploit because it forces the mispredicted (not
+ * taken) `if r0 != 0x0 goto l0_%=` to resolve before using r6 as a pointer.
+ * This causes the CPU to realize that `r6 = r9` should have never executed. It
+ * ensures that r6 always contains a readable stack slot ptr when the insn after
+ * the nospec executes.
+ */
+__xlated_unpriv("nospec")
+__xlated_unpriv("r9 = *(u8 *)(r6 +0)")
+#endif
+__naked void unpriv_spec_v1_type_confusion(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ /* r0: pointer to a map array entry */ \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ /* r1, r2: prepared call args */ \
+ r6 = r10; \
+ r6 += -8; \
+ /* r6: pointer to readable stack slot */ \
+ r9 = 0xffffc900; \
+ r9 <<= 32; \
+ /* r9: scalar controlled by attacker */ \
+ r0 = *(u64 *)(r0 + 0); /* cache miss */ \
+ if r0 != 0x0 goto l0_%=; \
+ r6 = r9; \
+l0_%=: if r0 != 0x1 goto l1_%=; \
+ r9 = *(u8 *)(r6 + 0); \
+l1_%=: /* leak r9 */ \
+ r9 &= 1; \
+ r9 <<= 9; \
+ *(u64*)(r10 - 8) = r9; \
+ call %[bpf_map_lookup_elem]; \
+ if r0 == 0 goto l2_%=; \
+ /* leak secret into is_cached(map[0|512]): */ \
+ r0 = *(u64 *)(r0 + 0); \
+l2_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: ldimm64 before Spectre v4 barrier")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V4
+__xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */
+__xlated_unpriv("*(u64 *)(r10 -8) = r1")
+__xlated_unpriv("nospec")
+#endif
+__naked void unpriv_ldimm64_spectre_v4(void)
+{
+ asm volatile (" \
+ r1 = 0x2020200005642020 ll; \
+ *(u64 *)(r10 -8) = r1; \
+ r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: Spectre v1 and v4 barrier")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+#ifdef SPEC_V4
+/* starts with r0 == r8 == r9 == 0 */
+__xlated_unpriv("if r8 != 0x0 goto pc+1")
+__xlated_unpriv("goto pc+2")
+__xlated_unpriv("if r9 == 0x0 goto pc+4")
+__xlated_unpriv("r2 = r0")
+/* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64)
+ * = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The
+ * test therefore ensures the Spectre-v4--induced nospec does not prevent the
+ * Spectre-v1--induced speculative path from being fully analyzed.
+ */
+__xlated_unpriv("nospec") /* Spectre v1 */
+__xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */
+__xlated_unpriv("nospec") /* Spectre v4 */
+#endif
+#endif
+__naked void unpriv_spectre_v1_and_v4(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r8 = r0; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r9 = r0; \
+ r0 = r10; \
+ r1 = 0; \
+ r2 = r10; \
+ if r8 != 0 goto l0_%=; \
+ if r9 != 0 goto l0_%=; \
+ r0 = 0; \
+l0_%=: if r8 != 0 goto l1_%=; \
+ goto l2_%=; \
+l1_%=: if r9 == 0 goto l3_%=; \
+ r2 = r0; \
+l2_%=: *(u64 *)(r2 -64) = r1; \
+l3_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: Spectre v1 and v4 barrier (simple)")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+#ifdef SPEC_V4
+__xlated_unpriv("if r8 != 0x0 goto pc+1")
+__xlated_unpriv("goto pc+2")
+__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */
+__xlated_unpriv("goto pc-1") /* r2 = r0 */
+__xlated_unpriv("nospec")
+__xlated_unpriv("*(u64 *)(r2 -64) = r1")
+__xlated_unpriv("nospec")
+#endif
+#endif
+__naked void unpriv_spectre_v1_and_v4_simple(void)
+{
+ asm volatile (" \
+ r8 = 0; \
+ r9 = 0; \
+ r0 = r10; \
+ r1 = 0; \
+ r2 = r10; \
+ if r8 != 0 goto l0_%=; \
+ if r9 != 0 goto l0_%=; \
+ r0 = 0; \
+l0_%=: if r8 != 0 goto l1_%=; \
+ goto l2_%=; \
+l1_%=: if r9 == 0 goto l3_%=; \
+ r2 = r0; \
+l2_%=: *(u64 *)(r2 -64) = r1; \
+l3_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)")
+__success __success_unpriv
+__retval(0)
+#ifdef SPEC_V1
+#ifdef SPEC_V4
+__xlated_unpriv("if r8 != 0x0 goto pc+1")
+__xlated_unpriv("goto pc+4")
+__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */
+__xlated_unpriv("goto pc-1") /* r2 = r0 */
+__xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */
+__xlated_unpriv("goto pc-1") /* second part of ldimm64 */
+__xlated_unpriv("nospec")
+__xlated_unpriv("*(u64 *)(r2 -64) = r1")
+__xlated_unpriv("nospec")
+#endif
+#endif
+__naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void)
+{
+ asm volatile (" \
+ r8 = 0; \
+ r9 = 0; \
+ r0 = r10; \
+ r1 = 0; \
+ r2 = r10; \
+ if r8 != 0 goto l0_%=; \
+ if r9 != 0 goto l0_%=; \
+ r0 = 0; \
+l0_%=: if r8 != 0 goto l1_%=; \
+ goto l2_%=; \
+l1_%=: if r9 == 0 goto l3_%=; \
+ r2 = r0; \
+ r1 = 0x2020200005642020 ll; \
+l2_%=: *(u64 *)(r2 -64) = r1; \
+l3_%=: r0 = 0; \
+ exit; \
+" ::: __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c
index a9ab37d3b9e2..2129e4353fd9 100644
--- a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c
+++ b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c
@@ -3,6 +3,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
#define MAX_ENTRIES 11
@@ -146,6 +147,24 @@ l0_%=: exit; \
: __clobber_all);
}
+SEC("socket")
+__description("map_ptr illegal alu op, map_ptr = -map_ptr")
+__failure __msg("R0 invalid mem access 'scalar'")
+__failure_unpriv __msg_unpriv("R0 pointer arithmetic prohibited")
+__flag(BPF_F_ANY_ALIGNMENT)
+__naked void map_ptr_illegal_alu_op(void)
+{
+ asm volatile (" \
+ r0 = %[map_hash_48b] ll; \
+ r0 = -r0; \
+ r1 = 22; \
+ *(u64*)(r0 + 0) = r1; \
+ exit; \
+" :
+ : __imm_addr(map_hash_48b)
+ : __clobber_all);
+}
+
SEC("flow_dissector")
__description("flow_keys illegal alu op with variable offset")
__failure __msg("R7 pointer arithmetic on flow_keys prohibited")
@@ -165,4 +184,32 @@ __naked void flow_keys_illegal_variable_offset_alu(void)
: __clobber_all);
}
+#define DEFINE_BAD_OFFSET_TEST(name, op, off, imm) \
+ SEC("socket") \
+ __failure __msg("BPF_ALU uses reserved fields") \
+ __naked void name(void) \
+ { \
+ asm volatile( \
+ "r0 = 1;" \
+ ".8byte %[insn];" \
+ "r0 = 0;" \
+ "exit;" \
+ : \
+ : __imm_insn(insn, BPF_RAW_INSN((op), 0, 0, (off), (imm))) \
+ : __clobber_all); \
+ }
+
+/*
+ * Offset fields of 0 and 1 are legal for BPF_{DIV,MOD} instructions.
+ * Offset fields of 0 are legal for the rest of ALU instructions.
+ * Test that error is reported for illegal offsets, assuming that tests
+ * for legal offsets exist.
+ */
+DEFINE_BAD_OFFSET_TEST(bad_offset_divx, BPF_ALU64 | BPF_DIV | BPF_X, -1, 0)
+DEFINE_BAD_OFFSET_TEST(bad_offset_modk, BPF_ALU64 | BPF_MOD | BPF_K, -1, 1)
+DEFINE_BAD_OFFSET_TEST(bad_offset_addx, BPF_ALU64 | BPF_ADD | BPF_X, -1, 0)
+DEFINE_BAD_OFFSET_TEST(bad_offset_divx2, BPF_ALU64 | BPF_DIV | BPF_X, 2, 0)
+DEFINE_BAD_OFFSET_TEST(bad_offset_modk2, BPF_ALU64 | BPF_MOD | BPF_K, 2, 1)
+DEFINE_BAD_OFFSET_TEST(bad_offset_addx2, BPF_ALU64 | BPF_ADD | BPF_X, 1, 0)
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
index 5ba6e53571c8..af7938ce56cb 100644
--- a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c
@@ -231,6 +231,10 @@ __retval(1)
__naked void ptr_unknown_vs_unknown_lt(void)
{
asm volatile (" \
+ r8 = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r9 = r0; \
+ r1 = r8; \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
@@ -245,11 +249,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 6; \
- r1 = -r1; \
+ r1 = r9; \
r1 &= 0x3; \
goto l4_%=; \
l3_%=: r1 = 6; \
- r1 = -r1; \
+ r1 = r9; \
r1 &= 0x7; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
@@ -259,7 +263,8 @@ l2_%=: r0 = 1; \
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
- __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
+ __imm(bpf_get_prandom_u32)
: __clobber_all);
}
@@ -271,6 +276,10 @@ __retval(1)
__naked void ptr_unknown_vs_unknown_gt(void)
{
asm volatile (" \
+ r8 = r1; \
+ call %[bpf_get_prandom_u32]; \
+ r9 = r0; \
+ r1 = r8; \
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
@@ -285,11 +294,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \
r4 = *(u8*)(r0 + 0); \
if r4 == 1 goto l3_%=; \
r1 = 6; \
- r1 = -r1; \
+ r1 = r9; \
r1 &= 0x7; \
goto l4_%=; \
l3_%=: r1 = 6; \
- r1 = -r1; \
+ r1 = r9; \
r1 &= 0x3; \
l4_%=: r1 += r0; \
r0 = *(u8*)(r1 + 0); \
@@ -299,7 +308,8 @@ l2_%=: r0 = 1; \
: __imm(bpf_map_lookup_elem),
__imm_addr(map_array_48b),
__imm_addr(map_hash_16b),
- __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+ __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
+ __imm(bpf_get_prandom_u32)
: __clobber_all);
}
@@ -398,7 +408,8 @@ l2_%=: r0 = 1; \
SEC("socket")
__description("map access: mixing value pointer and scalar, 1")
-__success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited")
+__success __failure_unpriv
+__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root")
__retval(0)
__naked void value_pointer_and_scalar_1(void)
{
@@ -433,6 +444,7 @@ l2_%=: /* common instruction */ \
l3_%=: /* branch B */ \
r0 = 0x13371337; \
/* verifier follows fall-through */ \
+ /* unpriv: nospec (inserted to prevent `R2 pointer comparison prohibited`) */\
if r2 != 0x100000 goto l4_%=; \
r0 = 0; \
exit; \
@@ -450,7 +462,8 @@ l4_%=: /* fake-dead code; targeted from branch A to \
SEC("socket")
__description("map access: mixing value pointer and scalar, 2")
-__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'")
+__success __failure_unpriv
+__msg_unpriv("R2 tried to add from different maps, paths or scalars, pointer arithmetic with it prohibited for !root")
__retval(0)
__naked void value_pointer_and_scalar_2(void)
{
@@ -492,6 +505,7 @@ l4_%=: /* fake-dead code; targeted from branch A to \
* prevent dead code sanitization, rejected \
* via branch B however \
*/ \
+ /* unpriv: nospec (inserted to prevent `R0 invalid mem access 'scalar'`) */\
r0 = *(u8*)(r0 + 0); \
r0 = 0; \
exit; \
@@ -1296,9 +1310,13 @@ l0_%=: r0 = 1; \
SEC("socket")
__description("map access: value_ptr -= unknown scalar, 2")
-__success __failure_unpriv
-__msg_unpriv("R0 pointer arithmetic of map value goes out of range")
+__success __success_unpriv
__retval(1)
+#ifdef SPEC_V1
+__xlated_unpriv("r1 &= 7")
+__xlated_unpriv("nospec") /* inserted to prevent `R0 pointer arithmetic of map value goes out of range` */
+__xlated_unpriv("r0 -= r1")
+#endif
__naked void value_ptr_unknown_scalar_2_2(void)
{
asm volatile (" \
diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c
index 1d36d01b746e..f345466bca68 100644
--- a/tools/testing/selftests/bpf/progs/verifier_var_off.c
+++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c
@@ -114,8 +114,8 @@ __naked void stack_write_priv_vs_unpriv(void)
}
/* Similar to the previous test, but this time also perform a read from the
- * address written to with a variable offset. The read is allowed, showing that,
- * after a variable-offset write, a priviledged program can read the slots that
+ * address written to with a variable offet. The read is allowed, showing that,
+ * after a variable-offset write, a privileged program can read the slots that
* were in the range of that write (even if the verifier doesn't actually know if
* the slot being read was really written to or not.
*
@@ -157,7 +157,7 @@ __naked void stack_write_followed_by_read(void)
SEC("socket")
__description("variable-offset stack write clobbers spilled regs")
__failure
-/* In the priviledged case, dereferencing a spilled-and-then-filled
+/* In the privileged case, dereferencing a spilled-and-then-filled
* register is rejected because the previous variable offset stack
* write might have overwritten the spilled pointer (i.e. we lose track
* of the spilled register when we analyze the write).
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
index a7c0a553aa50..55398c04290a 100644
--- a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Google LLC. */
#include <vmlinux.h>
+#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -69,7 +70,7 @@ __success
int BPF_PROG(path_d_path_from_file_argument, struct file *file)
{
int ret;
- struct path *path;
+ const struct path *path;
/* The f_path member is a path which is embedded directly within a
* file. Therefore, a pointer to such embedded members are still
@@ -82,4 +83,21 @@ int BPF_PROG(path_d_path_from_file_argument, struct file *file)
return 0;
}
+SEC("lsm.s/inode_rename")
+__success
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct inode *inode = new_dentry->d_inode;
+ ino_t ino;
+
+ if (!inode)
+ return 0;
+ ino = inode->i_ino;
+ if (ino == 0)
+ return -EACCES;
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
index d6d3f4fcb24c..4b392c6c8fc4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Google LLC. */
#include <vmlinux.h>
+#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <linux/limits.h>
@@ -158,4 +159,18 @@ int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f)
return 0;
}
+SEC("lsm.s/inode_rename")
+__failure __msg("invalid mem access 'trusted_ptr_or_null_'")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct inode *inode = new_dentry->d_inode;
+ ino_t ino;
+
+ ino = inode->i_ino;
+ if (ino == 0)
+ return -EACCES;
+ return 0;
+}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c
index 2f1ba08c293e..25be2cd9d42c 100644
--- a/tools/testing/selftests/bpf/progs/wq.c
+++ b/tools/testing/selftests/bpf/progs/wq.c
@@ -187,3 +187,20 @@ long test_call_lru_sleepable(void *ctx)
return test_elem_callback(&lru, &key, wq_callback);
}
+
+SEC("tc")
+long test_map_no_btf(void *ctx)
+{
+ struct elem *val;
+ struct bpf_wq *wq;
+ int key = 42;
+
+ val = bpf_map_lookup_elem(&array, &key);
+ if (!val)
+ return -2;
+
+ wq = &val->w;
+ if (bpf_wq_init(wq, &array, 0) != 0)
+ return -3;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c
index 4240211a1900..d06f6d40594a 100644
--- a/tools/testing/selftests/bpf/progs/wq_failures.c
+++ b/tools/testing/selftests/bpf/progs/wq_failures.c
@@ -142,3 +142,26 @@ long test_wrong_wq_pointer_offset(void *ctx)
return -22;
}
+
+SEC("tc")
+__log_level(2)
+__failure
+__msg(": (85) call bpf_wq_init#")
+__msg("R1 doesn't have constant offset. bpf_wq has to be at the constant offset")
+long test_bad_wq_off(void *ctx)
+{
+ struct elem *val;
+ struct bpf_wq *wq;
+ int key = 42;
+ u64 unknown;
+
+ val = bpf_map_lookup_elem(&array, &key);
+ if (!val)
+ return -2;
+
+ unknown = bpf_get_prandom_u32();
+ wq = &val->w + unknown;
+ if (bpf_wq_init(wq, &array, 0) != 0)
+ return -3;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh
index 1453a53ed547..b03a87571592 100755
--- a/tools/testing/selftests/bpf/test_bpftool_build.sh
+++ b/tools/testing/selftests/bpf/test_bpftool_build.sh
@@ -90,10 +90,6 @@ echo -e "... through kbuild\n"
if [ -f ".config" ] ; then
make_and_clean tools/bpf
- ## "make tools/bpf" sets $(OUTPUT) to ...tools/bpf/runqslower for
- ## runqslower, but the default (used for the "clean" target) is .output.
- ## Let's make sure we clean runqslower's directory properly.
- make -C tools/bpf/runqslower OUTPUT=${KDIR_ROOT_DIR}/tools/bpf/runqslower/ clean
## $OUTPUT is overwritten in kbuild Makefile, and thus cannot be passed
## down from toplevel Makefile to bpftool's Makefile.
diff --git a/tools/testing/selftests/bpf/test_bpftool_map.sh b/tools/testing/selftests/bpf/test_bpftool_map.sh
new file mode 100755
index 000000000000..515b1df0501e
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_bpftool_map.sh
@@ -0,0 +1,398 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+TESTNAME="bpftool_map"
+BPF_FILE="security_bpf_map.bpf.o"
+BPF_ITER_FILE="bpf_iter_map_elem.bpf.o"
+PROTECTED_MAP_NAME="prot_map"
+NOT_PROTECTED_MAP_NAME="not_prot_map"
+BPF_FS_TMP_PARENT="/tmp"
+BPF_FS_PARENT=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts)
+BPF_FS_PARENT=${BPF_FS_PARENT:-$BPF_FS_TMP_PARENT}
+# bpftool will mount bpf file system under BPF_DIR if it is not mounted
+# under BPF_FS_PARENT.
+BPF_DIR="$BPF_FS_PARENT/test_$TESTNAME"
+SCRIPT_DIR=$(dirname $(realpath "$0"))
+BPF_FILE_PATH="$SCRIPT_DIR/$BPF_FILE"
+BPF_ITER_FILE_PATH="$SCRIPT_DIR/$BPF_ITER_FILE"
+BPFTOOL_PATH="bpftool"
+# Assume the script is located under tools/testing/selftests/bpf/
+KDIR_ROOT_DIR=$(realpath "$SCRIPT_DIR"/../../../../)
+
+_cleanup()
+{
+ set +eu
+
+ # If BPF_DIR is a mount point this will not remove the mount point itself.
+ [ -d "$BPF_DIR" ] && rm -rf "$BPF_DIR" 2> /dev/null
+
+ # Unmount if BPF filesystem was temporarily created.
+ if [ "$BPF_FS_PARENT" = "$BPF_FS_TMP_PARENT" ]; then
+ # A loop and recursive unmount are required as bpftool might
+ # create multiple mounts. For example, a bind mount of the directory
+ # to itself. The bind mount is created to change mount propagation
+ # flags on an actual mount point.
+ max_attempts=3
+ attempt=0
+ while mountpoint -q "$BPF_DIR" && [ $attempt -lt $max_attempts ]; do
+ umount -R "$BPF_DIR" 2>/dev/null
+ attempt=$((attempt+1))
+ done
+
+ # The directory still exists. Remove it now.
+ [ -d "$BPF_DIR" ] && rm -rf "$BPF_DIR" 2>/dev/null
+ fi
+}
+
+cleanup_skip()
+{
+ echo "selftests: $TESTNAME [SKIP]"
+ _cleanup
+
+ exit $ksft_skip
+}
+
+cleanup()
+{
+ if [ "$?" = 0 ]; then
+ echo "selftests: $TESTNAME [PASS]"
+ else
+ echo "selftests: $TESTNAME [FAILED]"
+ fi
+ _cleanup
+}
+
+check_root_privileges() {
+ if [ $(id -u) -ne 0 ]; then
+ echo "Need root privileges"
+ exit $ksft_skip
+ fi
+}
+
+# Function to verify bpftool path.
+# Parameters:
+# $1: bpftool path
+verify_bpftool_path() {
+ local bpftool_path="$1"
+ if ! "$bpftool_path" version > /dev/null 2>&1; then
+ echo "Could not run test without bpftool"
+ exit $ksft_skip
+ fi
+}
+
+# Function to verify BTF support.
+# The test requires BTF support for fmod_ret programs.
+verify_btf_support() {
+ if [ ! -f /sys/kernel/btf/vmlinux ]; then
+ echo "Could not run test without BTF support"
+ exit $ksft_skip
+ fi
+}
+
+# Function to initialize map entries with keys [0..2] and values set to 0.
+# Parameters:
+# $1: Map name
+# $2: bpftool path
+initialize_map_entries() {
+ local map_name="$1"
+ local bpftool_path="$2"
+
+ for key in 0 1 2; do
+ "$bpftool_path" map update name "$map_name" key $key 0 0 0 value 0 0 0 $key
+ done
+}
+
+# Test read access to the map.
+# Parameters:
+# $1: Name command (name/pinned)
+# $2: Map name
+# $3: bpftool path
+# $4: key
+access_for_read() {
+ local name_cmd="$1"
+ local map_name="$2"
+ local bpftool_path="$3"
+ local key="$4"
+
+ # Test read access to the map.
+ if ! "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then
+ echo " Read access to $key in $map_name failed"
+ exit 1
+ fi
+
+ # Test read access to map's BTF data.
+ if ! "$bpftool_path" btf dump map "$name_cmd" "$map_name" 1>/dev/null; then
+ echo " Read access to $map_name for BTF data failed"
+ exit 1
+ fi
+}
+
+# Test write access to the map.
+# Parameters:
+# $1: Name command (name/pinned)
+# $2: Map name
+# $3: bpftool path
+# $4: key
+# $5: Whether write should succeed (true/false)
+access_for_write() {
+ local name_cmd="$1"
+ local map_name="$2"
+ local bpftool_path="$3"
+ local key="$4"
+ local write_should_succeed="$5"
+ local value="1 1 1 1"
+
+ if "$bpftool_path" map update "$name_cmd" "$map_name" key $key value \
+ $value 2>/dev/null; then
+ if [ "$write_should_succeed" = "false" ]; then
+ echo " Write access to $key in $map_name succeeded but should have failed"
+ exit 1
+ fi
+ else
+ if [ "$write_should_succeed" = "true" ]; then
+ echo " Write access to $key in $map_name failed but should have succeeded"
+ exit 1
+ fi
+ fi
+}
+
+# Test entry deletion for the map.
+# Parameters:
+# $1: Name command (name/pinned)
+# $2: Map name
+# $3: bpftool path
+# $4: key
+# $5: Whether write should succeed (true/false)
+access_for_deletion() {
+ local name_cmd="$1"
+ local map_name="$2"
+ local bpftool_path="$3"
+ local key="$4"
+ local write_should_succeed="$5"
+ local value="1 1 1 1"
+
+ # Test deletion by key for the map.
+ # Before deleting, check the key exists.
+ if ! "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then
+ echo " Key $key does not exist in $map_name"
+ exit 1
+ fi
+
+ # Delete by key.
+ if "$bpftool_path" map delete "$name_cmd" "$map_name" key $key 2>/dev/null; then
+ if [ "$write_should_succeed" = "false" ]; then
+ echo " Deletion for $key in $map_name succeeded but should have failed"
+ exit 1
+ fi
+ else
+ if [ "$write_should_succeed" = "true" ]; then
+ echo " Deletion for $key in $map_name failed but should have succeeded"
+ exit 1
+ fi
+ fi
+
+ # After deleting, check the entry existence according to the expected status.
+ if "$bpftool_path" map lookup "$name_cmd" "$map_name" key $key 1>/dev/null; then
+ if [ "$write_should_succeed" = "true" ]; then
+ echo " Key $key for $map_name was not deleted but should have been deleted"
+ exit 1
+ fi
+ else
+ if [ "$write_should_succeed" = "false" ]; then
+ echo "Key $key for $map_name was deleted but should have not been deleted"
+ exit 1
+ fi
+ fi
+
+ # Test creation of map's deleted entry, if deletion was successful.
+ # Otherwise, the entry exists.
+ if "$bpftool_path" map update "$name_cmd" "$map_name" key $key value \
+ $value 2>/dev/null; then
+ if [ "$write_should_succeed" = "false" ]; then
+ echo " Write access to $key in $map_name succeeded after deletion attempt but should have failed"
+ exit 1
+ fi
+ else
+ if [ "$write_should_succeed" = "true" ]; then
+ echo " Write access to $key in $map_name failed after deletion attempt but should have succeeded"
+ exit 1
+ fi
+ fi
+}
+
+# Test map elements iterator.
+# Parameters:
+# $1: Name command (name/pinned)
+# $2: Map name
+# $3: bpftool path
+# $4: BPF_DIR
+# $5: bpf iterator object file path
+iterate_map_elem() {
+ local name_cmd="$1"
+ local map_name="$2"
+ local bpftool_path="$3"
+ local bpf_dir="$4"
+ local bpf_file="$5"
+ local pin_path="$bpf_dir/map_iterator"
+
+ "$bpftool_path" iter pin "$bpf_file" "$pin_path" map "$name_cmd" "$map_name"
+ if [ ! -f "$pin_path" ]; then
+ echo " Failed to pin iterator to $pin_path"
+ exit 1
+ fi
+
+ cat "$pin_path" 1>/dev/null
+ rm "$pin_path" 2>/dev/null
+}
+
+# Function to test map access with configurable write expectations
+# Parameters:
+# $1: Name command (name/pinned)
+# $2: Map name
+# $3: bpftool path
+# $4: key for rw
+# $5: key to delete
+# $6: Whether write should succeed (true/false)
+# $7: BPF_DIR
+# $8: bpf iterator object file path
+access_map() {
+ local name_cmd="$1"
+ local map_name="$2"
+ local bpftool_path="$3"
+ local key_for_rw="$4"
+ local key_to_del="$5"
+ local write_should_succeed="$6"
+ local bpf_dir="$7"
+ local bpf_iter_file_path="$8"
+
+ access_for_read "$name_cmd" "$map_name" "$bpftool_path" "$key_for_rw"
+ access_for_write "$name_cmd" "$map_name" "$bpftool_path" "$key_for_rw" \
+ "$write_should_succeed"
+ access_for_deletion "$name_cmd" "$map_name" "$bpftool_path" "$key_to_del" \
+ "$write_should_succeed"
+ iterate_map_elem "$name_cmd" "$map_name" "$bpftool_path" "$bpf_dir" \
+ "$bpf_iter_file_path"
+}
+
+# Function to test map access with configurable write expectations
+# Parameters:
+# $1: Map name
+# $2: bpftool path
+# $3: BPF_DIR
+# $4: Whether write should succeed (true/false)
+# $5: bpf iterator object file path
+test_map_access() {
+ local map_name="$1"
+ local bpftool_path="$2"
+ local bpf_dir="$3"
+ local pin_path="$bpf_dir/${map_name}_pinned"
+ local write_should_succeed="$4"
+ local bpf_iter_file_path="$5"
+
+ # Test access to the map by name.
+ access_map "name" "$map_name" "$bpftool_path" "0 0 0 0" "1 0 0 0" \
+ "$write_should_succeed" "$bpf_dir" "$bpf_iter_file_path"
+
+ # Pin the map to the BPF filesystem
+ "$bpftool_path" map pin name "$map_name" "$pin_path"
+ if [ ! -e "$pin_path" ]; then
+ echo " Failed to pin $map_name"
+ exit 1
+ fi
+
+ # Test access to the pinned map.
+ access_map "pinned" "$pin_path" "$bpftool_path" "0 0 0 0" "2 0 0 0" \
+ "$write_should_succeed" "$bpf_dir" "$bpf_iter_file_path"
+}
+
+# Function to test map creation and map-of-maps
+# Parameters:
+# $1: bpftool path
+# $2: BPF_DIR
+test_map_creation_and_map_of_maps() {
+ local bpftool_path="$1"
+ local bpf_dir="$2"
+ local outer_map_name="outer_map_tt"
+ local inner_map_name="inner_map_tt"
+
+ "$bpftool_path" map create "$bpf_dir/$inner_map_name" type array key 4 \
+ value 4 entries 4 name "$inner_map_name"
+ if [ ! -f "$bpf_dir/$inner_map_name" ]; then
+ echo " Failed to create inner map file at $bpf_dir/$outer_map_name"
+ return 1
+ fi
+
+ "$bpftool_path" map create "$bpf_dir/$outer_map_name" type hash_of_maps \
+ key 4 value 4 entries 2 name "$outer_map_name" inner_map name "$inner_map_name"
+ if [ ! -f "$bpf_dir/$outer_map_name" ]; then
+ echo " Failed to create outer map file at $bpf_dir/$outer_map_name"
+ return 1
+ fi
+
+ # Add entries to the outer map by name and by pinned path.
+ "$bpftool_path" map update pinned "$bpf_dir/$outer_map_name" key 0 0 0 0 \
+ value pinned "$bpf_dir/$inner_map_name"
+ "$bpftool_path" map update name "$outer_map_name" key 1 0 0 0 value \
+ name "$inner_map_name"
+
+ # The outer map should be full by now.
+ # The following map update command is expected to fail.
+ if "$bpftool_path" map update name "$outer_map_name" key 2 0 0 0 value name \
+ "$inner_map_name" 2>/dev/null; then
+ echo " Update for $outer_map_name succeeded but should have failed"
+ exit 1
+ fi
+}
+
+# Function to test map access with the btf list command
+# Parameters:
+# $1: bpftool path
+test_map_access_with_btf_list() {
+ local bpftool_path="$1"
+
+ # The btf list command iterates over maps for
+ # loaded BPF programs.
+ if ! "$bpftool_path" btf list 1>/dev/null; then
+ echo " Failed to access btf data"
+ exit 1
+ fi
+}
+
+set -eu
+
+trap cleanup_skip EXIT
+
+check_root_privileges
+
+verify_bpftool_path "$BPFTOOL_PATH"
+
+verify_btf_support
+
+trap cleanup EXIT
+
+# Load and attach the BPF programs to control maps access.
+"$BPFTOOL_PATH" prog loadall "$BPF_FILE_PATH" "$BPF_DIR" autoattach
+
+initialize_map_entries "$PROTECTED_MAP_NAME" "$BPFTOOL_PATH"
+initialize_map_entries "$NOT_PROTECTED_MAP_NAME" "$BPFTOOL_PATH"
+
+# Activate the map protection mechanism. Protection status is controlled
+# by a value stored in the prot_status_map at index 0.
+"$BPFTOOL_PATH" map update name prot_status_map key 0 0 0 0 value 1 0 0 0
+
+# Test protected map (write should fail).
+test_map_access "$PROTECTED_MAP_NAME" "$BPFTOOL_PATH" "$BPF_DIR" "false" \
+ "$BPF_ITER_FILE_PATH"
+
+# Test not protected map (write should succeed).
+test_map_access "$NOT_PROTECTED_MAP_NAME" "$BPFTOOL_PATH" "$BPF_DIR" "true" \
+ "$BPF_ITER_FILE_PATH"
+
+test_map_creation_and_map_of_maps "$BPFTOOL_PATH" "$BPF_DIR"
+
+test_map_access_with_btf_list "$BPFTOOL_PATH"
+
+exit 0
diff --git a/tools/testing/selftests/bpf/test_kmods/Makefile b/tools/testing/selftests/bpf/test_kmods/Makefile
index d4e50c4509c9..63c4d3f6a12f 100644
--- a/tools/testing/selftests/bpf/test_kmods/Makefile
+++ b/tools/testing/selftests/bpf/test_kmods/Makefile
@@ -8,7 +8,7 @@ Q = @
endif
MODULES = bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
- bpf_test_modorder_y.ko
+ bpf_test_modorder_y.ko bpf_test_rqspinlock.ko
$(foreach m,$(MODULES),$(eval obj-m += $(m:.ko=.o)))
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
new file mode 100644
index 000000000000..7b4ae5e81d32
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/prandom.h>
+#include <linux/ktime.h>
+#include <asm/rqspinlock.h>
+#include <linux/perf_event.h>
+#include <linux/kthread.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+
+static struct perf_event_attr hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+ .sample_period = 100000,
+};
+
+static rqspinlock_t lock_a;
+static rqspinlock_t lock_b;
+static rqspinlock_t lock_c;
+
+#define RQSL_SLOW_THRESHOLD_MS 10
+static const unsigned int rqsl_hist_ms[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 12, 14, 16, 18, 20, 25, 30, 40, 50, 75,
+ 100, 150, 200, 250, 1000,
+};
+#define RQSL_NR_HIST_BUCKETS ARRAY_SIZE(rqsl_hist_ms)
+
+enum rqsl_context {
+ RQSL_CTX_NORMAL = 0,
+ RQSL_CTX_NMI,
+ RQSL_CTX_MAX,
+};
+
+struct rqsl_cpu_hist {
+ atomic64_t hist[RQSL_CTX_MAX][RQSL_NR_HIST_BUCKETS];
+ atomic64_t success[RQSL_CTX_MAX];
+ atomic64_t failure[RQSL_CTX_MAX];
+};
+
+static DEFINE_PER_CPU(struct rqsl_cpu_hist, rqsl_cpu_hists);
+
+enum rqsl_mode {
+ RQSL_MODE_AA = 0,
+ RQSL_MODE_ABBA,
+ RQSL_MODE_ABBCCA,
+};
+
+static int test_mode = RQSL_MODE_AA;
+module_param(test_mode, int, 0644);
+MODULE_PARM_DESC(test_mode,
+ "rqspinlock test mode: 0 = AA, 1 = ABBA, 2 = ABBCCA");
+
+static int normal_delay = 20;
+module_param(normal_delay, int, 0644);
+MODULE_PARM_DESC(normal_delay,
+ "rqspinlock critical section length for normal context (20ms default)");
+
+static int nmi_delay = 10;
+module_param(nmi_delay, int, 0644);
+MODULE_PARM_DESC(nmi_delay,
+ "rqspinlock critical section length for NMI context (10ms default)");
+
+static struct perf_event **rqsl_evts;
+static int rqsl_nevts;
+
+static struct task_struct **rqsl_threads;
+static int rqsl_nthreads;
+static atomic_t rqsl_ready_cpus = ATOMIC_INIT(0);
+
+static int pause = 0;
+
+static const char *rqsl_mode_names[] = {
+ [RQSL_MODE_AA] = "AA",
+ [RQSL_MODE_ABBA] = "ABBA",
+ [RQSL_MODE_ABBCCA] = "ABBCCA",
+};
+
+struct rqsl_lock_pair {
+ rqspinlock_t *worker_lock;
+ rqspinlock_t *nmi_lock;
+};
+
+static struct rqsl_lock_pair rqsl_get_lock_pair(int cpu)
+{
+ int mode = READ_ONCE(test_mode);
+
+ switch (mode) {
+ default:
+ case RQSL_MODE_AA:
+ return (struct rqsl_lock_pair){ &lock_a, &lock_a };
+ case RQSL_MODE_ABBA:
+ if (cpu & 1)
+ return (struct rqsl_lock_pair){ &lock_b, &lock_a };
+ return (struct rqsl_lock_pair){ &lock_a, &lock_b };
+ case RQSL_MODE_ABBCCA:
+ switch (cpu % 3) {
+ case 0:
+ return (struct rqsl_lock_pair){ &lock_a, &lock_b };
+ case 1:
+ return (struct rqsl_lock_pair){ &lock_b, &lock_c };
+ default:
+ return (struct rqsl_lock_pair){ &lock_c, &lock_a };
+ }
+ }
+}
+
+static u32 rqsl_hist_bucket_idx(u32 delta_ms)
+{
+ int i;
+
+ for (i = 0; i < RQSL_NR_HIST_BUCKETS; i++) {
+ if (delta_ms <= rqsl_hist_ms[i])
+ return i;
+ }
+
+ return RQSL_NR_HIST_BUCKETS - 1;
+}
+
+static void rqsl_record_lock_result(u64 delta_ns, enum rqsl_context ctx, int ret)
+{
+ struct rqsl_cpu_hist *hist = this_cpu_ptr(&rqsl_cpu_hists);
+ u32 delta_ms = DIV_ROUND_UP_ULL(delta_ns, NSEC_PER_MSEC);
+ u32 bucket = rqsl_hist_bucket_idx(delta_ms);
+ atomic64_t *buckets = hist->hist[ctx];
+
+ atomic64_inc(&buckets[bucket]);
+ if (!ret)
+ atomic64_inc(&hist->success[ctx]);
+ else
+ atomic64_inc(&hist->failure[ctx]);
+}
+
+static int rqspinlock_worker_fn(void *arg)
+{
+ int cpu = smp_processor_id();
+ unsigned long flags;
+ u64 start_ns;
+ int ret;
+
+ if (cpu) {
+ atomic_inc(&rqsl_ready_cpus);
+
+ while (!kthread_should_stop()) {
+ struct rqsl_lock_pair locks = rqsl_get_lock_pair(cpu);
+ rqspinlock_t *worker_lock = locks.worker_lock;
+
+ if (READ_ONCE(pause)) {
+ msleep(1000);
+ continue;
+ }
+ start_ns = ktime_get_mono_fast_ns();
+ ret = raw_res_spin_lock_irqsave(worker_lock, flags);
+ rqsl_record_lock_result(ktime_get_mono_fast_ns() - start_ns,
+ RQSL_CTX_NORMAL, ret);
+ mdelay(normal_delay);
+ if (!ret)
+ raw_res_spin_unlock_irqrestore(worker_lock, flags);
+ cpu_relax();
+ }
+ return 0;
+ }
+
+ while (!kthread_should_stop()) {
+ int expected = rqsl_nthreads > 0 ? rqsl_nthreads - 1 : 0;
+ int ready = atomic_read(&rqsl_ready_cpus);
+
+ if (ready == expected && !READ_ONCE(pause)) {
+ for (int i = 0; i < rqsl_nevts; i++)
+ perf_event_enable(rqsl_evts[i]);
+ pr_err("Waiting 5 secs to pause the test\n");
+ msleep(1000 * 5);
+ WRITE_ONCE(pause, 1);
+ pr_err("Paused the test\n");
+ } else {
+ msleep(1000);
+ cpu_relax();
+ }
+ }
+ return 0;
+}
+
+static void nmi_cb(struct perf_event *event, struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct rqsl_lock_pair locks;
+ int cpu = smp_processor_id();
+ unsigned long flags;
+ u64 start_ns;
+ int ret;
+
+ if (!cpu || READ_ONCE(pause))
+ return;
+
+ locks = rqsl_get_lock_pair(cpu);
+ start_ns = ktime_get_mono_fast_ns();
+ ret = raw_res_spin_lock_irqsave(locks.nmi_lock, flags);
+ rqsl_record_lock_result(ktime_get_mono_fast_ns() - start_ns,
+ RQSL_CTX_NMI, ret);
+
+ mdelay(nmi_delay);
+
+ if (!ret)
+ raw_res_spin_unlock_irqrestore(locks.nmi_lock, flags);
+}
+
+static void free_rqsl_threads(void)
+{
+ int i;
+
+ if (rqsl_threads) {
+ for_each_online_cpu(i) {
+ if (rqsl_threads[i])
+ kthread_stop(rqsl_threads[i]);
+ }
+ kfree(rqsl_threads);
+ }
+}
+
+static void free_rqsl_evts(void)
+{
+ int i;
+
+ if (rqsl_evts) {
+ for (i = 0; i < rqsl_nevts; i++) {
+ if (rqsl_evts[i])
+ perf_event_release_kernel(rqsl_evts[i]);
+ }
+ kfree(rqsl_evts);
+ }
+}
+
+static int bpf_test_rqspinlock_init(void)
+{
+ int i, ret;
+ int ncpus = num_online_cpus();
+
+ if (test_mode < RQSL_MODE_AA || test_mode > RQSL_MODE_ABBCCA) {
+ pr_err("Invalid mode %d\n", test_mode);
+ return -EINVAL;
+ }
+
+ pr_err("Mode = %s\n", rqsl_mode_names[test_mode]);
+
+ if (ncpus < test_mode + 2)
+ return -ENOTSUPP;
+
+ raw_res_spin_lock_init(&lock_a);
+ raw_res_spin_lock_init(&lock_b);
+ raw_res_spin_lock_init(&lock_c);
+
+ rqsl_evts = kcalloc(ncpus - 1, sizeof(*rqsl_evts), GFP_KERNEL);
+ if (!rqsl_evts)
+ return -ENOMEM;
+ rqsl_nevts = ncpus - 1;
+
+ for (i = 1; i < ncpus; i++) {
+ struct perf_event *e;
+
+ e = perf_event_create_kernel_counter(&hw_attr, i, NULL, nmi_cb, NULL);
+ if (IS_ERR(e)) {
+ ret = PTR_ERR(e);
+ goto err_perf_events;
+ }
+ rqsl_evts[i - 1] = e;
+ }
+
+ rqsl_threads = kcalloc(ncpus, sizeof(*rqsl_threads), GFP_KERNEL);
+ if (!rqsl_threads) {
+ ret = -ENOMEM;
+ goto err_perf_events;
+ }
+ rqsl_nthreads = ncpus;
+
+ for_each_online_cpu(i) {
+ struct task_struct *t;
+
+ t = kthread_create(rqspinlock_worker_fn, NULL, "rqsl_w/%d", i);
+ if (IS_ERR(t)) {
+ ret = PTR_ERR(t);
+ goto err_threads_create;
+ }
+ kthread_bind(t, i);
+ rqsl_threads[i] = t;
+ wake_up_process(t);
+ }
+ return 0;
+
+err_threads_create:
+ free_rqsl_threads();
+err_perf_events:
+ free_rqsl_evts();
+ return ret;
+}
+
+module_init(bpf_test_rqspinlock_init);
+
+static void rqsl_print_histograms(void)
+{
+ int cpu, i;
+
+ pr_err("rqspinlock acquisition latency histogram (ms):\n");
+
+ for_each_online_cpu(cpu) {
+ struct rqsl_cpu_hist *hist = per_cpu_ptr(&rqsl_cpu_hists, cpu);
+ u64 norm_counts[RQSL_NR_HIST_BUCKETS];
+ u64 nmi_counts[RQSL_NR_HIST_BUCKETS];
+ u64 total_counts[RQSL_NR_HIST_BUCKETS];
+ u64 norm_success, nmi_success, success_total;
+ u64 norm_failure, nmi_failure, failure_total;
+ u64 norm_total = 0, nmi_total = 0, total = 0;
+ bool has_slow = false;
+
+ for (i = 0; i < RQSL_NR_HIST_BUCKETS; i++) {
+ norm_counts[i] = atomic64_read(&hist->hist[RQSL_CTX_NORMAL][i]);
+ nmi_counts[i] = atomic64_read(&hist->hist[RQSL_CTX_NMI][i]);
+ total_counts[i] = norm_counts[i] + nmi_counts[i];
+ norm_total += norm_counts[i];
+ nmi_total += nmi_counts[i];
+ total += total_counts[i];
+ if (rqsl_hist_ms[i] > RQSL_SLOW_THRESHOLD_MS &&
+ total_counts[i])
+ has_slow = true;
+ }
+
+ norm_success = atomic64_read(&hist->success[RQSL_CTX_NORMAL]);
+ nmi_success = atomic64_read(&hist->success[RQSL_CTX_NMI]);
+ norm_failure = atomic64_read(&hist->failure[RQSL_CTX_NORMAL]);
+ nmi_failure = atomic64_read(&hist->failure[RQSL_CTX_NMI]);
+ success_total = norm_success + nmi_success;
+ failure_total = norm_failure + nmi_failure;
+
+ if (!total)
+ continue;
+
+ if (!has_slow) {
+ pr_err(" cpu%d: total %llu (normal %llu, nmi %llu) | "
+ "success %llu (normal %llu, nmi %llu) | "
+ "failure %llu (normal %llu, nmi %llu), all within 0-%ums\n",
+ cpu, total, norm_total, nmi_total,
+ success_total, norm_success, nmi_success,
+ failure_total, norm_failure, nmi_failure,
+ RQSL_SLOW_THRESHOLD_MS);
+ continue;
+ }
+
+ pr_err(" cpu%d: total %llu (normal %llu, nmi %llu) | "
+ "success %llu (normal %llu, nmi %llu) | "
+ "failure %llu (normal %llu, nmi %llu)\n",
+ cpu, total, norm_total, nmi_total,
+ success_total, norm_success, nmi_success,
+ failure_total, norm_failure, nmi_failure);
+ for (i = 0; i < RQSL_NR_HIST_BUCKETS; i++) {
+ unsigned int start_ms;
+
+ if (!total_counts[i])
+ continue;
+
+ start_ms = i == 0 ? 0 : rqsl_hist_ms[i - 1] + 1;
+ if (i == RQSL_NR_HIST_BUCKETS - 1) {
+ pr_err(" >= %ums: total %llu (normal %llu, nmi %llu)\n",
+ start_ms, total_counts[i],
+ norm_counts[i], nmi_counts[i]);
+ } else {
+ pr_err(" %u-%ums: total %llu (normal %llu, nmi %llu)\n",
+ start_ms, rqsl_hist_ms[i],
+ total_counts[i],
+ norm_counts[i], nmi_counts[i]);
+ }
+ }
+ }
+}
+
+static void bpf_test_rqspinlock_exit(void)
+{
+ WRITE_ONCE(pause, 1);
+ free_rqsl_threads();
+ free_rqsl_evts();
+ rqsl_print_histograms();
+}
+
+module_exit(bpf_test_rqspinlock_exit);
+
+MODULE_AUTHOR("Kumar Kartikeya Dwivedi");
+MODULE_DESCRIPTION("BPF rqspinlock stress test module");
+MODULE_LICENSE("GPL");
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
index e6c248e3ae54..1669a7eeda26 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
@@ -62,6 +62,18 @@ struct bpf_testmod_struct_arg_5 {
long d;
};
+union bpf_testmod_union_arg_1 {
+ char a;
+ short b;
+ struct bpf_testmod_struct_arg_1 arg;
+};
+
+union bpf_testmod_union_arg_2 {
+ int a;
+ long b;
+ struct bpf_testmod_struct_arg_2 arg;
+};
+
__bpf_hook_start();
noinline int
@@ -129,6 +141,20 @@ bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
}
noinline int
+bpf_testmod_test_union_arg_1(union bpf_testmod_union_arg_1 a, int b, int c)
+{
+ bpf_testmod_test_struct_arg_result = a.arg.a + b + c;
+ return bpf_testmod_test_struct_arg_result;
+}
+
+noinline int
+bpf_testmod_test_union_arg_2(int a, union bpf_testmod_union_arg_2 b)
+{
+ bpf_testmod_test_struct_arg_result = a + b.arg.a + b.arg.b;
+ return bpf_testmod_test_struct_arg_result;
+}
+
+noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
bpf_testmod_test_struct_arg_result = a->a;
return bpf_testmod_test_struct_arg_result;
@@ -218,6 +244,16 @@ __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
{
}
+__bpf_kfunc struct task_struct *bpf_kfunc_ret_rcu_test(void)
+{
+ return NULL;
+}
+
+__bpf_kfunc int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size)
+{
+ return NULL;
+}
+
__bpf_kfunc struct bpf_testmod_ctx *
bpf_testmod_ctx_create(int *err)
{
@@ -381,11 +417,35 @@ noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
}
+noinline void bpf_testmod_stacktrace_test(void)
+{
+ /* used for stacktrace test as attach function */
+ asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_3(void)
+{
+ bpf_testmod_stacktrace_test();
+ asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_2(void)
+{
+ bpf_testmod_stacktrace_test_3();
+ asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_1(void)
+{
+ bpf_testmod_stacktrace_test_2();
+ asm volatile ("");
+}
+
int bpf_testmod_fentry_ok;
noinline ssize_t
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
struct bpf_testmod_test_read_ctx ctx = {
@@ -398,6 +458,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
struct bpf_testmod_struct_arg_3 *struct_arg3;
struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
+ union bpf_testmod_union_arg_1 union_arg1 = { .arg = {1} };
+ union bpf_testmod_union_arg_2 union_arg2 = { .arg = {2, 3} };
int i = 1;
while (bpf_testmod_return_ptr(i))
@@ -415,6 +477,9 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
(void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
21, 22, struct_arg5, 27);
+ (void)bpf_testmod_test_union_arg_1(union_arg1, 4, 5);
+ (void)bpf_testmod_test_union_arg_2(6, union_arg2);
+
(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
(void)trace_bpf_testmod_test_raw_tp_null_tp(NULL);
@@ -456,6 +521,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
21, 22, 23, 24, 25, 26) != 231)
goto out;
+ bpf_testmod_stacktrace_test_1();
+
bpf_testmod_fentry_ok = 1;
out:
return -EIO; /* always fail */
@@ -465,7 +532,7 @@ ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
noinline ssize_t
bpf_testmod_test_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
struct bpf_testmod_test_write_ctx ctx = {
@@ -501,14 +568,20 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
#ifdef __x86_64__
static int
+uprobe_handler(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data)
+{
+ regs->cx = 0x87654321feebdaed;
+ return 0;
+}
+
+static int
uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
struct pt_regs *regs, __u64 *data)
{
regs->ax = 0x12345678deadbeef;
- regs->cx = 0x87654321feebdaed;
regs->r11 = (u64) -1;
- return true;
+ return 0;
}
struct testmod_uprobe {
@@ -520,6 +593,7 @@ struct testmod_uprobe {
static DEFINE_MUTEX(testmod_uprobe_mutex);
static struct testmod_uprobe uprobe = {
+ .consumer.handler = uprobe_handler,
.consumer.ret_handler = uprobe_ret_handler,
};
@@ -567,7 +641,7 @@ static void testmod_unregister_uprobe(void)
static ssize_t
bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
unsigned long offset = 0;
@@ -623,6 +697,8 @@ BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
+BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test, KF_RET_NULL | KF_RCU_PROTECTED)
+BTF_ID_FLAGS(func, bpf_kfunc_ret_rcu_test_nostruct, KF_RET_NULL | KF_RCU_PROTECTED)
BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1)
@@ -850,7 +926,7 @@ __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
goto out;
}
- err = kernel_connect(sock, (struct sockaddr *)&args->addr,
+ err = kernel_connect(sock, (struct sockaddr_unsized *)&args->addr,
args->addrlen, 0);
out:
mutex_unlock(&sock_lock);
@@ -873,7 +949,7 @@ __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
goto out;
}
- err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
+ err = kernel_bind(sock, (struct sockaddr_unsized *)&args->addr, args->addrlen);
out:
mutex_unlock(&sock_lock);
@@ -1057,6 +1133,8 @@ __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
return args->a;
}
+__bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id);
+
BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
@@ -1097,6 +1175,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABL
BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
static int bpf_testmod_ops_init(struct btf *btf)
@@ -1528,6 +1607,114 @@ static struct bpf_struct_ops testmod_st_ops = {
.owner = THIS_MODULE,
};
+struct hlist_head multi_st_ops_list;
+static DEFINE_SPINLOCK(multi_st_ops_lock);
+
+static int multi_st_ops_init(struct btf *btf)
+{
+ spin_lock_init(&multi_st_ops_lock);
+ INIT_HLIST_HEAD(&multi_st_ops_list);
+
+ return 0;
+}
+
+static int multi_st_ops_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ return 0;
+}
+
+static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id)
+{
+ struct bpf_testmod_multi_st_ops *st_ops;
+
+ hlist_for_each_entry(st_ops, &multi_st_ops_list, node) {
+ if (st_ops->id == id)
+ return st_ops;
+ }
+
+ return NULL;
+}
+
+int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id)
+{
+ struct bpf_testmod_multi_st_ops *st_ops;
+ unsigned long flags;
+ int ret = -1;
+
+ spin_lock_irqsave(&multi_st_ops_lock, flags);
+ st_ops = multi_st_ops_find_nolock(id);
+ if (st_ops)
+ ret = st_ops->test_1(args);
+ spin_unlock_irqrestore(&multi_st_ops_lock, flags);
+
+ return ret;
+}
+
+static int multi_st_ops_reg(void *kdata, struct bpf_link *link)
+{
+ struct bpf_testmod_multi_st_ops *st_ops =
+ (struct bpf_testmod_multi_st_ops *)kdata;
+ unsigned long flags;
+ int err = 0;
+ u32 id;
+
+ if (!st_ops->test_1)
+ return -EINVAL;
+
+ id = bpf_struct_ops_id(kdata);
+
+ spin_lock_irqsave(&multi_st_ops_lock, flags);
+ if (multi_st_ops_find_nolock(id)) {
+ pr_err("multi_st_ops(id:%d) has already been registered\n", id);
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ st_ops->id = id;
+ hlist_add_head(&st_ops->node, &multi_st_ops_list);
+unlock:
+ spin_unlock_irqrestore(&multi_st_ops_lock, flags);
+
+ return err;
+}
+
+static void multi_st_ops_unreg(void *kdata, struct bpf_link *link)
+{
+ struct bpf_testmod_multi_st_ops *st_ops;
+ unsigned long flags;
+ u32 id;
+
+ id = bpf_struct_ops_id(kdata);
+
+ spin_lock_irqsave(&multi_st_ops_lock, flags);
+ st_ops = multi_st_ops_find_nolock(id);
+ if (st_ops)
+ hlist_del(&st_ops->node);
+ spin_unlock_irqrestore(&multi_st_ops_lock, flags);
+}
+
+static int bpf_testmod_multi_st_ops__test_1(struct st_ops_args *args)
+{
+ return 0;
+}
+
+static struct bpf_testmod_multi_st_ops multi_st_ops_cfi_stubs = {
+ .test_1 = bpf_testmod_multi_st_ops__test_1,
+};
+
+struct bpf_struct_ops testmod_multi_st_ops = {
+ .verifier_ops = &bpf_testmod_verifier_ops,
+ .init = multi_st_ops_init,
+ .init_member = multi_st_ops_init_member,
+ .reg = multi_st_ops_reg,
+ .unreg = multi_st_ops_unreg,
+ .cfi_stubs = &multi_st_ops_cfi_stubs,
+ .name = "bpf_testmod_multi_st_ops",
+ .owner = THIS_MODULE,
+};
+
extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
@@ -1550,6 +1737,7 @@ static int bpf_testmod_init(void)
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3);
ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
+ ret = ret ?: register_bpf_struct_ops(&testmod_multi_st_ops, bpf_testmod_multi_st_ops);
ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
ARRAY_SIZE(bpf_testmod_dtors),
THIS_MODULE);
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
index c9fab51f16e2..f6e492f9d042 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h
@@ -116,4 +116,10 @@ struct bpf_testmod_st_ops {
struct module *owner;
};
+struct bpf_testmod_multi_st_ops {
+ int (*test_1)(struct st_ops_args *args);
+ struct hlist_node node;
+ int id;
+};
+
#endif /* _BPF_TESTMOD_H */
diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
index b58817938deb..4df6fa6a92cb 100644
--- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h
@@ -158,5 +158,9 @@ void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) __ksym;
void bpf_kfunc_trusted_task_test(struct task_struct *ptr) __ksym;
void bpf_kfunc_trusted_num_test(int *ptr) __ksym;
void bpf_kfunc_rcu_task_test(struct task_struct *ptr) __ksym;
+struct task_struct *bpf_kfunc_ret_rcu_test(void) __ksym;
+int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size) __ksym;
+
+int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id) __ksym;
#endif /* _BPF_TESTMOD_KFUNC_H */
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c
index 4694422aa76c..88e4aeab21b7 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c
+++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c
@@ -74,7 +74,7 @@ int main(int argc, char **argv)
/* Let's try detach it before it was ever attached */
ret = bpf_prog_detach2(progfd, lircfd, BPF_LIRC_MODE2);
- if (ret != -1 || errno != ENOENT) {
+ if (ret != -ENOENT) {
printf("bpf_prog_detach2 not attached should fail: %m\n");
return 1;
}
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index 9551d8d5f8f9..338c035c3688 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <linux/capability.h>
#include <stdlib.h>
-#include <regex.h>
#include <test_progs.h>
#include <bpf/btf.h>
@@ -20,10 +19,12 @@
#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
+#define TEST_TAG_EXPECT_NOT_MSG_PFX "comment:test_expect_not_msg="
#define TEST_TAG_EXPECT_XLATED_PFX "comment:test_expect_xlated="
#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
+#define TEST_TAG_EXPECT_NOT_MSG_PFX_UNPRIV "comment:test_expect_not_msg_unpriv="
#define TEST_TAG_EXPECT_XLATED_PFX_UNPRIV "comment:test_expect_xlated_unpriv="
#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
@@ -38,9 +39,14 @@
#define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv="
#define TEST_TAG_CAPS_UNPRIV "comment:test_caps_unpriv="
#define TEST_TAG_LOAD_MODE_PFX "comment:load_mode="
+#define TEST_TAG_EXPECT_STDERR_PFX "comment:test_expect_stderr="
+#define TEST_TAG_EXPECT_STDERR_PFX_UNPRIV "comment:test_expect_stderr_unpriv="
+#define TEST_TAG_EXPECT_STDOUT_PFX "comment:test_expect_stdout="
+#define TEST_TAG_EXPECT_STDOUT_PFX_UNPRIV "comment:test_expect_stdout_unpriv="
+#define TEST_TAG_LINEAR_SIZE "comment:test_linear_size="
/* Warning: duplicated in bpf_misc.h */
-#define POINTER_VALUE 0xcafe4all
+#define POINTER_VALUE 0xbadcafe
#define TEST_DATA_LEN 64
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -61,24 +67,14 @@ enum load_mode {
NO_JITED = 1 << 1,
};
-struct expect_msg {
- const char *substr; /* substring match */
- regex_t regex;
- bool is_regex;
- bool on_next_line;
-};
-
-struct expected_msgs {
- struct expect_msg *patterns;
- size_t cnt;
-};
-
struct test_subspec {
char *name;
bool expect_failure;
struct expected_msgs expect_msgs;
struct expected_msgs expect_xlated;
struct expected_msgs jited;
+ struct expected_msgs stderr;
+ struct expected_msgs stdout;
int retval;
bool execute;
__u64 caps;
@@ -94,6 +90,7 @@ struct test_spec {
int mode_mask;
int arch_mask;
int load_mask;
+ int linear_sz;
bool auxiliary;
bool valid;
};
@@ -139,6 +136,10 @@ static void free_test_spec(struct test_spec *spec)
free_msgs(&spec->unpriv.expect_xlated);
free_msgs(&spec->priv.jited);
free_msgs(&spec->unpriv.jited);
+ free_msgs(&spec->unpriv.stderr);
+ free_msgs(&spec->priv.stderr);
+ free_msgs(&spec->unpriv.stdout);
+ free_msgs(&spec->priv.stdout);
free(spec->priv.name);
free(spec->unpriv.name);
@@ -206,7 +207,8 @@ static int compile_regex(const char *pattern, regex_t *regex)
return 0;
}
-static int __push_msg(const char *pattern, bool on_next_line, struct expected_msgs *msgs)
+static int __push_msg(const char *pattern, bool on_next_line, bool negative,
+ struct expected_msgs *msgs)
{
struct expect_msg *msg;
void *tmp;
@@ -222,6 +224,7 @@ static int __push_msg(const char *pattern, bool on_next_line, struct expected_ms
msg = &msgs->patterns[msgs->cnt];
msg->on_next_line = on_next_line;
msg->substr = pattern;
+ msg->negative = negative;
msg->is_regex = false;
if (strstr(pattern, "{{")) {
err = compile_regex(pattern, &msg->regex);
@@ -240,16 +243,16 @@ static int clone_msgs(struct expected_msgs *from, struct expected_msgs *to)
for (i = 0; i < from->cnt; i++) {
msg = &from->patterns[i];
- err = __push_msg(msg->substr, msg->on_next_line, to);
+ err = __push_msg(msg->substr, msg->on_next_line, msg->negative, to);
if (err)
return err;
}
return 0;
}
-static int push_msg(const char *substr, struct expected_msgs *msgs)
+static int push_msg(const char *substr, bool negative, struct expected_msgs *msgs)
{
- return __push_msg(substr, false, msgs);
+ return __push_msg(substr, false, negative, msgs);
}
static int push_disasm_msg(const char *regex_str, bool *on_next_line, struct expected_msgs *msgs)
@@ -260,7 +263,7 @@ static int push_disasm_msg(const char *regex_str, bool *on_next_line, struct exp
*on_next_line = false;
return 0;
}
- err = __push_msg(regex_str, *on_next_line, msgs);
+ err = __push_msg(regex_str, *on_next_line, false, msgs);
if (err)
return err;
*on_next_line = true;
@@ -318,20 +321,14 @@ static int parse_caps(const char *str, __u64 *val, const char *name)
static int parse_retval(const char *str, int *val, const char *name)
{
- struct {
- char *name;
- int val;
- } named_values[] = {
- { "INT_MIN" , INT_MIN },
- { "POINTER_VALUE", POINTER_VALUE },
- { "TEST_DATA_LEN", TEST_DATA_LEN },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(named_values); ++i) {
- if (strcmp(str, named_values[i].name) != 0)
- continue;
- *val = named_values[i].val;
+ /*
+ * INT_MIN is defined as (-INT_MAX -1), i.e. it doesn't expand to a
+ * single int and cannot be parsed with strtol, so we handle it
+ * separately here. In addition, it expands to different expressions in
+ * different compilers so we use a prefixed _INT_MIN instead.
+ */
+ if (strcmp(str, "_INT_MIN") == 0) {
+ *val = INT_MIN;
return 0;
}
@@ -380,6 +377,7 @@ enum arch {
ARCH_X86_64 = 0x2,
ARCH_ARM64 = 0x4,
ARCH_RISCV64 = 0x8,
+ ARCH_S390X = 0x10,
};
static int get_current_arch(void)
@@ -390,6 +388,8 @@ static int get_current_arch(void)
return ARCH_ARM64;
#elif defined(__riscv) && __riscv_xlen == 64
return ARCH_RISCV64;
+#elif defined(__s390x__)
+ return ARCH_S390X;
#endif
return ARCH_UNKNOWN;
}
@@ -410,6 +410,10 @@ static int parse_test_spec(struct test_loader *tester,
bool xlated_on_next_line = true;
bool unpriv_jit_on_next_line;
bool jit_on_next_line;
+ bool stderr_on_next_line = true;
+ bool unpriv_stderr_on_next_line = true;
+ bool stdout_on_next_line = true;
+ bool unpriv_stdout_on_next_line = true;
bool collect_jit = false;
int func_id, i, err = 0;
u32 arch_mask = 0;
@@ -471,12 +475,22 @@ static int parse_test_spec(struct test_loader *tester,
spec->auxiliary = true;
spec->mode_mask |= UNPRIV;
} else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX))) {
- err = push_msg(msg, &spec->priv.expect_msgs);
+ err = push_msg(msg, false, &spec->priv.expect_msgs);
+ if (err)
+ goto cleanup;
+ spec->mode_mask |= PRIV;
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_NOT_MSG_PFX))) {
+ err = push_msg(msg, true, &spec->priv.expect_msgs);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
} else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV))) {
- err = push_msg(msg, &spec->unpriv.expect_msgs);
+ err = push_msg(msg, false, &spec->unpriv.expect_msgs);
+ if (err)
+ goto cleanup;
+ spec->mode_mask |= UNPRIV;
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_NOT_MSG_PFX_UNPRIV))) {
+ err = push_msg(msg, true, &spec->unpriv.expect_msgs);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
@@ -571,8 +585,10 @@ static int parse_test_spec(struct test_loader *tester,
arch = ARCH_ARM64;
} else if (strcmp(val, "RISCV64") == 0) {
arch = ARCH_RISCV64;
+ } else if (strcmp(val, "s390x") == 0) {
+ arch = ARCH_S390X;
} else {
- PRINT_FAIL("bad arch spec: '%s'", val);
+ PRINT_FAIL("bad arch spec: '%s'\n", val);
err = -EINVAL;
goto cleanup;
}
@@ -599,6 +615,41 @@ static int parse_test_spec(struct test_loader *tester,
err = -EINVAL;
goto cleanup;
}
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDERR_PFX))) {
+ err = push_disasm_msg(msg, &stderr_on_next_line,
+ &spec->priv.stderr);
+ if (err)
+ goto cleanup;
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDERR_PFX_UNPRIV))) {
+ err = push_disasm_msg(msg, &unpriv_stderr_on_next_line,
+ &spec->unpriv.stderr);
+ if (err)
+ goto cleanup;
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDOUT_PFX))) {
+ err = push_disasm_msg(msg, &stdout_on_next_line,
+ &spec->priv.stdout);
+ if (err)
+ goto cleanup;
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_STDOUT_PFX_UNPRIV))) {
+ err = push_disasm_msg(msg, &unpriv_stdout_on_next_line,
+ &spec->unpriv.stdout);
+ if (err)
+ goto cleanup;
+ } else if (str_has_pfx(s, TEST_TAG_LINEAR_SIZE)) {
+ switch (bpf_program__type(prog)) {
+ case BPF_PROG_TYPE_SCHED_ACT:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ val = s + sizeof(TEST_TAG_LINEAR_SIZE) - 1;
+ err = parse_int(val, &spec->linear_sz, "test linear size");
+ if (err)
+ goto cleanup;
+ break;
+ default:
+ PRINT_FAIL("__linear_size for unsupported program type");
+ err = -EINVAL;
+ goto cleanup;
+ }
}
}
@@ -652,6 +703,10 @@ static int parse_test_spec(struct test_loader *tester,
clone_msgs(&spec->priv.expect_xlated, &spec->unpriv.expect_xlated);
if (spec->unpriv.jited.cnt == 0)
clone_msgs(&spec->priv.jited, &spec->unpriv.jited);
+ if (spec->unpriv.stderr.cnt == 0)
+ clone_msgs(&spec->priv.stderr, &spec->unpriv.stderr);
+ if (spec->unpriv.stdout.cnt == 0)
+ clone_msgs(&spec->priv.stdout, &spec->unpriv.stdout);
}
spec->valid = true;
@@ -713,44 +768,155 @@ static void emit_jited(const char *jited, bool force)
fprintf(stdout, "JITED:\n=============\n%s=============\n", jited);
}
-static void validate_msgs(char *log_buf, struct expected_msgs *msgs,
- void (*emit_fn)(const char *buf, bool force))
+static void emit_stderr(const char *stderr, bool force)
+{
+ if (!force && env.verbosity == VERBOSE_NONE)
+ return;
+ fprintf(stdout, "STDERR:\n=============\n%s=============\n", stderr);
+}
+
+static void emit_stdout(const char *bpf_stdout, bool force)
+{
+ if (!force && env.verbosity == VERBOSE_NONE)
+ return;
+ fprintf(stdout, "STDOUT:\n=============\n%s=============\n", bpf_stdout);
+}
+
+static const char *match_msg(struct expect_msg *msg, const char **log)
{
- const char *log = log_buf, *prev_match;
+ const char *match = NULL;
regmatch_t reg_match[1];
- int prev_match_line;
- int match_line;
- int i, j, err;
+ int err;
+
+ if (!msg->is_regex) {
+ match = strstr(*log, msg->substr);
+ if (match)
+ *log = match + strlen(msg->substr);
+ } else {
+ err = regexec(&msg->regex, *log, 1, reg_match, 0);
+ if (err == 0) {
+ match = *log + reg_match[0].rm_so;
+ *log += reg_match[0].rm_eo;
+ }
+ }
+ return match;
+}
+
+static int count_lines(const char *start, const char *end)
+{
+ const char *tmp;
+ int n = 0;
+
+ for (tmp = start; tmp < end; ++tmp)
+ if (*tmp == '\n')
+ n++;
+ return n;
+}
+
+struct match {
+ const char *start;
+ const char *end;
+ int line;
+};
+
+/*
+ * Positive messages are matched sequentially, each next message
+ * is looked for starting from the end of a previous matched one.
+ */
+static void match_positive_msgs(const char *log, struct expected_msgs *msgs, struct match *matches)
+{
+ const char *prev_match;
+ int i, line;
- prev_match_line = -1;
- match_line = 0;
prev_match = log;
+ line = 0;
for (i = 0; i < msgs->cnt; i++) {
struct expect_msg *msg = &msgs->patterns[i];
- const char *match = NULL, *pat_status;
- bool wrong_line = false;
-
- if (!msg->is_regex) {
- match = strstr(log, msg->substr);
- if (match)
- log = match + strlen(msg->substr);
- } else {
- err = regexec(&msg->regex, log, 1, reg_match, 0);
- if (err == 0) {
- match = log + reg_match[0].rm_so;
- log += reg_match[0].rm_eo;
+ const char *match = NULL;
+
+ if (msg->negative)
+ continue;
+
+ match = match_msg(msg, &log);
+ if (match) {
+ line += count_lines(prev_match, match);
+ matches[i].start = match;
+ matches[i].end = log;
+ matches[i].line = line;
+ prev_match = match;
+ }
+ }
+}
+
+/*
+ * Each negative messages N located between positive messages P1 and P2
+ * is matched in the span P1.end .. P2.start. Consequently, negative messages
+ * are unordered within the span.
+ */
+static void match_negative_msgs(const char *log, struct expected_msgs *msgs, struct match *matches)
+{
+ const char *start = log, *end, *next, *match;
+ const char *log_end = log + strlen(log);
+ int i, j, next_positive;
+
+ for (i = 0; i < msgs->cnt; i++) {
+ struct expect_msg *msg = &msgs->patterns[i];
+
+ /* positive message bumps span start */
+ if (!msg->negative) {
+ start = matches[i].end ?: start;
+ continue;
+ }
+
+ /* count stride of negative patterns and adjust span end */
+ end = log_end;
+ for (next_positive = i + 1; next_positive < msgs->cnt; next_positive++) {
+ if (!msgs->patterns[next_positive].negative) {
+ end = matches[next_positive].start;
+ break;
}
}
- if (match) {
- for (; prev_match < match; ++prev_match)
- if (*prev_match == '\n')
- ++match_line;
- wrong_line = msg->on_next_line && prev_match_line >= 0 &&
- prev_match_line + 1 != match_line;
+ /* try matching negative messages within identified span */
+ for (j = i; j < next_positive; j++) {
+ next = start;
+ match = match_msg(msg, &next);
+ if (match && next <= end) {
+ matches[j].start = match;
+ matches[j].end = next;
+ }
}
- if (!match || wrong_line) {
+ /* -1 to account for i++ */
+ i = next_positive - 1;
+ }
+}
+
+void validate_msgs(const char *log_buf, struct expected_msgs *msgs,
+ void (*emit_fn)(const char *buf, bool force))
+{
+ struct match matches[msgs->cnt];
+ struct match *prev_match = NULL;
+ int i, j;
+
+ memset(matches, 0, sizeof(*matches) * msgs->cnt);
+ match_positive_msgs(log_buf, msgs, matches);
+ match_negative_msgs(log_buf, msgs, matches);
+
+ for (i = 0; i < msgs->cnt; i++) {
+ struct expect_msg *msg = &msgs->patterns[i];
+ struct match *match = &matches[i];
+ const char *pat_status;
+ bool unexpected;
+ bool wrong_line;
+ bool no_match;
+
+ no_match = !msg->negative && !match->start;
+ wrong_line = !msg->negative &&
+ msg->on_next_line &&
+ prev_match && prev_match->line + 1 != match->line;
+ unexpected = msg->negative && match->start;
+ if (no_match || wrong_line || unexpected) {
PRINT_FAIL("expect_msg\n");
if (env.verbosity == VERBOSE_NONE)
emit_fn(log_buf, true /*force*/);
@@ -760,8 +926,10 @@ static void validate_msgs(char *log_buf, struct expected_msgs *msgs,
pat_status = "MATCHED ";
else if (wrong_line)
pat_status = "WRONG LINE";
- else
+ else if (no_match)
pat_status = "EXPECTED ";
+ else
+ pat_status = "UNEXPECTED";
msg = &msgs->patterns[j];
fprintf(stderr, "%s %s: '%s'\n",
pat_status,
@@ -771,12 +939,13 @@ static void validate_msgs(char *log_buf, struct expected_msgs *msgs,
if (wrong_line) {
fprintf(stderr,
"expecting match at line %d, actual match is at line %d\n",
- prev_match_line + 1, match_line);
+ prev_match->line + 1, match->line);
}
break;
}
- prev_match_line = match_line;
+ if (!msg->negative)
+ prev_match = match;
}
}
@@ -855,10 +1024,11 @@ static bool is_unpriv_capable_map(struct bpf_map *map)
}
}
-static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts)
+static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts, int linear_sz)
{
__u8 tmp_out[TEST_DATA_LEN << 2] = {};
__u8 tmp_in[TEST_DATA_LEN] = {};
+ struct __sk_buff ctx = {};
int err, saved_errno;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = tmp_in,
@@ -868,6 +1038,12 @@ static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts)
.repeat = 1,
);
+ if (linear_sz) {
+ ctx.data_end = linear_sz;
+ topts.ctx_in = &ctx;
+ topts.ctx_size_in = sizeof(ctx);
+ }
+
if (empty_opts) {
memset(&topts, 0, sizeof(struct bpf_test_run_opts));
topts.sz = sizeof(struct bpf_test_run_opts);
@@ -935,6 +1111,19 @@ out:
return err;
}
+/* Read the bpf stream corresponding to the stream_id */
+static int get_stream(int stream_id, int prog_fd, char *text, size_t text_sz)
+{
+ LIBBPF_OPTS(bpf_prog_stream_read_opts, ropts);
+ int ret;
+
+ ret = bpf_prog_stream_read(prog_fd, stream_id, text, text_sz, &ropts);
+ ASSERT_GT(ret, 0, "stream read");
+ text[ret] = '\0';
+
+ return ret;
+}
+
/* this function is forced noinline and has short generic name to look better
* in test_progs output (in case of a failure)
*/
@@ -1089,7 +1278,7 @@ void run_subtest(struct test_loader *tester,
link = bpf_map__attach_struct_ops(map);
if (!link) {
PRINT_FAIL("bpf_map__attach_struct_ops failed for map %s: err=%d\n",
- bpf_map__name(map), err);
+ bpf_map__name(map), -errno);
goto tobj_cleanup;
}
links[links_cnt++] = link;
@@ -1103,12 +1292,38 @@ void run_subtest(struct test_loader *tester,
}
}
- do_prog_test_run(bpf_program__fd(tprog), &retval,
- bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false);
- if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
+ err = do_prog_test_run(bpf_program__fd(tprog), &retval,
+ bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false,
+ spec->linear_sz);
+ if (!err && retval != subspec->retval && subspec->retval != POINTER_VALUE) {
PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
goto tobj_cleanup;
}
+
+ if (subspec->stderr.cnt) {
+ err = get_stream(2, bpf_program__fd(tprog),
+ tester->log_buf, tester->log_buf_sz);
+ if (err <= 0) {
+ PRINT_FAIL("Unexpected retval from get_stream(): %d, errno = %d\n",
+ err, errno);
+ goto tobj_cleanup;
+ }
+ emit_stderr(tester->log_buf, false /*force*/);
+ validate_msgs(tester->log_buf, &subspec->stderr, emit_stderr);
+ }
+
+ if (subspec->stdout.cnt) {
+ err = get_stream(1, bpf_program__fd(tprog),
+ tester->log_buf, tester->log_buf_sz);
+ if (err <= 0) {
+ PRINT_FAIL("Unexpected retval from get_stream(): %d, errno = %d\n",
+ err, errno);
+ goto tobj_cleanup;
+ }
+ emit_stdout(tester->log_buf, false /*force*/);
+ validate_msgs(tester->log_buf, &subspec->stdout, emit_stdout);
+ }
+
/* redo bpf_map__attach_struct_ops for each test */
while (links_cnt > 0)
bpf_link__destroy(links[--links_cnt]);
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index fda7589c5023..0921939532c6 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -138,6 +138,18 @@ static int sched_next_online(int pid, int *next_to_try)
return ret;
}
+/* Derive target_free from map_size, same as bpf_common_lru_populate */
+static unsigned int __tgt_size(unsigned int map_size)
+{
+ return (map_size / nr_cpus) / 2;
+}
+
+/* Inverse of how bpf_common_lru_populate derives target_free from map_size. */
+static unsigned int __map_size(unsigned int tgt_free)
+{
+ return tgt_free * nr_cpus * 2;
+}
+
/* Size of the LRU map is 2
* Add key=1 (+1 key)
* Add key=2 (+1 key)
@@ -231,11 +243,11 @@ static void test_lru_sanity0(int map_type, int map_flags)
printf("Pass\n");
}
-/* Size of the LRU map is 1.5*tgt_free
- * Insert 1 to tgt_free (+tgt_free keys)
- * Lookup 1 to tgt_free/2
- * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
- * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
+/* Verify that unreferenced elements are recycled before referenced ones.
+ * Insert elements.
+ * Reference a subset of these.
+ * Insert more, enough to trigger recycling.
+ * Verify that unreferenced are recycled.
*/
static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
{
@@ -257,7 +269,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
- map_size = tgt_free + batch_size;
+ map_size = __map_size(tgt_free) + batch_size;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
@@ -266,13 +278,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
- /* Insert 1 to tgt_free (+tgt_free keys) */
- end_key = 1 + tgt_free;
+ /* Insert map_size - batch_size keys */
+ end_key = 1 + __map_size(tgt_free);
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
- /* Lookup 1 to tgt_free/2 */
+ /* Lookup 1 to batch_size */
end_key = 1 + batch_size;
for (key = 1; key < end_key; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
@@ -280,12 +292,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
BPF_NOEXIST));
}
- /* Insert 1+tgt_free to 2*tgt_free
- * => 1+tgt_free/2 to LOCALFREE_TARGET will be
+ /* Insert another map_size - batch_size keys
+ * Map will contain 1 to batch_size plus these latest, i.e.,
+ * => previous 1+batch_size to map_size - batch_size will have been
* removed by LRU
*/
- key = 1 + tgt_free;
- end_key = key + tgt_free;
+ key = 1 + __map_size(tgt_free);
+ end_key = key + __map_size(tgt_free);
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@@ -301,17 +314,8 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
printf("Pass\n");
}
-/* Size of the LRU map 1.5 * tgt_free
- * Insert 1 to tgt_free (+tgt_free keys)
- * Update 1 to tgt_free/2
- * => The original 1 to tgt_free/2 will be removed due to
- * the LRU shrink process
- * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
- * Insert 1+tgt_free to tgt_free*3/2
- * Insert 1+tgt_free*3/2 to tgt_free*5/2
- * => Key 1+tgt_free to tgt_free*3/2
- * will be removed from LRU because it has never
- * been lookup and ref bit is not set
+/* Verify that insertions exceeding map size will recycle the oldest.
+ * Verify that unreferenced elements are recycled before referenced.
*/
static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
{
@@ -334,7 +338,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
- map_size = tgt_free + batch_size;
+ map_size = __map_size(tgt_free) + batch_size;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
@@ -343,8 +347,8 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
- /* Insert 1 to tgt_free (+tgt_free keys) */
- end_key = 1 + tgt_free;
+ /* Insert map_size - batch_size keys */
+ end_key = 1 + __map_size(tgt_free);
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@@ -357,8 +361,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
* shrink the inactive list to get tgt_free
* number of free nodes.
*
- * Hence, the oldest key 1 to tgt_free/2
- * are removed from the LRU list.
+ * Hence, the oldest key is removed from the LRU list.
*/
key = 1;
if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
@@ -370,8 +373,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
BPF_EXIST));
}
- /* Re-insert 1 to tgt_free/2 again and do a lookup
- * immeidately.
+ /* Re-insert 1 to batch_size again and do a lookup immediately.
*/
end_key = 1 + batch_size;
value[0] = 4321;
@@ -387,17 +389,18 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
- /* Insert 1+tgt_free to tgt_free*3/2 */
- end_key = 1 + tgt_free + batch_size;
- for (key = 1 + tgt_free; key < end_key; key++)
+ /* Insert batch_size new elements */
+ key = 1 + __map_size(tgt_free);
+ end_key = key + batch_size;
+ for (; key < end_key; key++)
/* These newly added but not referenced keys will be
* gone during the next LRU shrink.
*/
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
- /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
- end_key = key + tgt_free;
+ /* Insert map_size - batch_size elements */
+ end_key += __map_size(tgt_free);
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@@ -413,12 +416,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
printf("Pass\n");
}
-/* Size of the LRU map is 2*tgt_free
- * It is to test the active/inactive list rotation
- * Insert 1 to 2*tgt_free (+2*tgt_free keys)
- * Lookup key 1 to tgt_free*3/2
- * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
- * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
+/* Test the active/inactive list rotation
+ *
+ * Fill the whole map, deplete the free list.
+ * Reference all except the last lru->target_free elements.
+ * Insert lru->target_free new elements. This triggers one shrink.
+ * Verify that the non-referenced elements are replaced.
*/
static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
{
@@ -437,8 +440,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
assert(sched_next_online(0, &next_cpu) != -1);
- batch_size = tgt_free / 2;
- assert(batch_size * 2 == tgt_free);
+ batch_size = __tgt_size(tgt_free);
map_size = tgt_free * 2;
lru_map_fd = create_map(map_type, map_flags, map_size);
@@ -449,23 +451,21 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
- /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
- end_key = 1 + (2 * tgt_free);
+ /* Fill the map */
+ end_key = 1 + map_size;
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
- /* Lookup key 1 to tgt_free*3/2 */
- end_key = tgt_free + batch_size;
+ /* Reference all but the last batch_size */
+ end_key = 1 + map_size - batch_size;
for (key = 1; key < end_key; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
- /* Add 1+2*tgt_free to tgt_free*5/2
- * (+tgt_free/2 keys)
- */
+ /* Insert new batch_size: replaces the non-referenced elements */
key = 2 * tgt_free + 1;
end_key = key + batch_size;
for (; key < end_key; key++) {
@@ -500,7 +500,8 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
lru_map_fd = create_map(map_type, map_flags,
3 * tgt_free * nr_cpus);
else
- lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
+ lru_map_fd = create_map(map_type, map_flags,
+ 3 * __map_size(tgt_free));
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 986ce32b113a..ccc5acd55ff9 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -535,7 +535,7 @@ static void test_devmap_hash(unsigned int task, void *data)
static void test_queuemap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
- __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
+ __u32 vals[MAP_SIZE + MAP_SIZE/2], val = 0;
int fd, i;
/* Fill test values to be used */
@@ -591,7 +591,7 @@ static void test_queuemap(unsigned int task, void *data)
static void test_stackmap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
- __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
+ __u32 vals[MAP_SIZE + MAP_SIZE/2], val = 0;
int fd, i;
/* Fill test values to be used */
@@ -1399,7 +1399,8 @@ static void test_map_stress(void)
static bool can_retry(int err)
{
return (err == EAGAIN || err == EBUSY ||
- (err == ENOMEM && map_opts.map_flags == BPF_F_NO_PREALLOC));
+ ((err == ENOMEM || err == E2BIG) &&
+ map_opts.map_flags == BPF_F_NO_PREALLOC));
}
int map_update_retriable(int map_fd, const void *key, const void *value, int flags, int attempts,
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 309d9d4a8ace..02a85dda30e6 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -14,12 +14,14 @@
#include <netinet/in.h>
#include <sys/select.h>
#include <sys/socket.h>
+#include <linux/keyctl.h>
#include <sys/un.h>
#include <bpf/btf.h>
#include <time.h>
#include "json_writer.h"
#include "network_helpers.h"
+#include "verification_cert.h"
/* backtrace() and backtrace_symbols_fd() are glibc specific,
* use header file when glibc is available and provide stub
@@ -1928,6 +1930,13 @@ static void free_test_states(void)
}
}
+static __u32 register_session_key(const char *key_data, size_t key_data_size)
+{
+ return syscall(__NR_add_key, "asymmetric", "libbpf_session_key",
+ (const void *)key_data, key_data_size,
+ KEY_SPEC_SESSION_KEYRING);
+}
+
int main(int argc, char **argv)
{
static const struct argp argp = {
@@ -1961,6 +1970,10 @@ int main(int argc, char **argv)
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
libbpf_set_print(libbpf_print_fn);
+ err = register_session_key((const char *)test_progs_verification_cert,
+ test_progs_verification_cert_len);
+ if (err < 0)
+ return err;
traffic_monitor_set_print(traffic_monitor_print_fn);
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 870694f2a359..eebfc18cdcd2 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -7,6 +7,7 @@
#include <errno.h>
#include <string.h>
#include <assert.h>
+#include <regex.h>
#include <stdlib.h>
#include <stdarg.h>
#include <time.h>
@@ -460,6 +461,34 @@ static inline void *u64_to_ptr(__u64 ptr)
return (void *) (unsigned long) ptr;
}
+static inline __u32 id_from_prog_fd(int fd)
+{
+ struct bpf_prog_info prog_info = {};
+ __u32 prog_info_len = sizeof(prog_info);
+ int err;
+
+ err = bpf_obj_get_info_by_fd(fd, &prog_info, &prog_info_len);
+ if (!ASSERT_OK(err, "id_from_prog_fd"))
+ return 0;
+
+ ASSERT_NEQ(prog_info.id, 0, "prog_info.id");
+ return prog_info.id;
+}
+
+static inline __u32 id_from_link_fd(int fd)
+{
+ struct bpf_link_info link_info = {};
+ __u32 link_info_len = sizeof(link_info);
+ int err;
+
+ err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len);
+ if (!ASSERT_OK(err, "id_from_link_fd"))
+ return 0;
+
+ ASSERT_NEQ(link_info.id, 0, "link_info.id");
+ return link_info.id;
+}
+
int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
int compare_map_keys(int map1_fd, int map2_fd);
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
@@ -518,4 +547,20 @@ extern void test_loader_fini(struct test_loader *tester);
test_loader_fini(&tester); \
})
+struct expect_msg {
+ const char *substr; /* substring match */
+ regex_t regex;
+ bool is_regex;
+ bool on_next_line;
+ bool negative;
+};
+
+struct expected_msgs {
+ struct expect_msg *patterns;
+ size_t cnt;
+};
+
+void validate_msgs(const char *log_buf, struct expected_msgs *msgs,
+ void (*emit_fn)(const char *buf, bool force));
+
#endif /* __TEST_PROGS_H */
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index fd2da2234cc9..76568db7a664 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -1372,7 +1372,7 @@ run:
} else
fprintf(stderr, "unknown test\n");
out:
- /* Detatch and zero all the maps */
+ /* Detach and zero all the maps */
bpf_prog_detach2(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS);
for (i = 0; i < ARRAY_SIZE(links); i++) {
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
index 5546b05a0486..f1300047c1e0 100644
--- a/tools/testing/selftests/bpf/test_tag.c
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -116,7 +116,7 @@ static void tag_from_alg(int insns, uint8_t *tag, uint32_t len)
static const struct sockaddr_alg alg = {
.salg_family = AF_ALG,
.salg_type = "hash",
- .salg_name = "sha1",
+ .salg_name = "sha256",
};
int fd_base, fd_alg, ret;
ssize_t size;
diff --git a/tools/testing/selftests/bpf/test_tc_edt.sh b/tools/testing/selftests/bpf/test_tc_edt.sh
deleted file mode 100755
index 76f0bd17061f..000000000000
--- a/tools/testing/selftests/bpf/test_tc_edt.sh
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# This test installs a TC bpf program that throttles a TCP flow
-# with dst port = 9000 down to 5MBps. Then it measures actual
-# throughput of the flow.
-
-BPF_FILE="test_tc_edt.bpf.o"
-if [[ $EUID -ne 0 ]]; then
- echo "This script must be run as root"
- echo "FAIL"
- exit 1
-fi
-
-# check that nc, dd, and timeout are present
-command -v nc >/dev/null 2>&1 || \
- { echo >&2 "nc is not available"; exit 1; }
-command -v dd >/dev/null 2>&1 || \
- { echo >&2 "nc is not available"; exit 1; }
-command -v timeout >/dev/null 2>&1 || \
- { echo >&2 "timeout is not available"; exit 1; }
-
-readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)"
-readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)"
-
-readonly IP_SRC="172.16.1.100"
-readonly IP_DST="172.16.2.100"
-
-cleanup()
-{
- ip netns del ${NS_SRC}
- ip netns del ${NS_DST}
-}
-
-trap cleanup EXIT
-
-set -e # exit on error
-
-ip netns add "${NS_SRC}"
-ip netns add "${NS_DST}"
-ip link add veth_src type veth peer name veth_dst
-ip link set veth_src netns ${NS_SRC}
-ip link set veth_dst netns ${NS_DST}
-
-ip -netns ${NS_SRC} addr add ${IP_SRC}/24 dev veth_src
-ip -netns ${NS_DST} addr add ${IP_DST}/24 dev veth_dst
-
-ip -netns ${NS_SRC} link set dev veth_src up
-ip -netns ${NS_DST} link set dev veth_dst up
-
-ip -netns ${NS_SRC} route add ${IP_DST}/32 dev veth_src
-ip -netns ${NS_DST} route add ${IP_SRC}/32 dev veth_dst
-
-# set up TC on TX
-ip netns exec ${NS_SRC} tc qdisc add dev veth_src root fq
-ip netns exec ${NS_SRC} tc qdisc add dev veth_src clsact
-ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
- bpf da obj ${BPF_FILE} sec cls_test
-
-
-# start the listener
-ip netns exec ${NS_DST} bash -c \
- "nc -4 -l -p 9000 >/dev/null &"
-declare -i NC_PID=$!
-sleep 1
-
-declare -ir TIMEOUT=20
-declare -ir EXPECTED_BPS=5000000
-
-# run the load, capture RX bytes on DST
-declare -ir RX_BYTES_START=$( ip netns exec ${NS_DST} \
- cat /sys/class/net/veth_dst/statistics/rx_bytes )
-
-set +e
-ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero \
- bs=1000 count=1000000 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
-set -e
-
-declare -ir RX_BYTES_END=$( ip netns exec ${NS_DST} \
- cat /sys/class/net/veth_dst/statistics/rx_bytes )
-
-declare -ir ACTUAL_BPS=$(( ($RX_BYTES_END - $RX_BYTES_START) / $TIMEOUT ))
-
-echo $TIMEOUT $ACTUAL_BPS $EXPECTED_BPS | \
- awk '{printf "elapsed: %d sec; bps difference: %.2f%%\n",
- $1, ($2-$3)*100.0/$3}'
-
-# Pass the test if the actual bps is within 1% of the expected bps.
-# The difference is usually about 0.1% on a 20-sec test, and ==> zero
-# the longer the test runs.
-declare -ir RES=$( echo $ACTUAL_BPS $EXPECTED_BPS | \
- awk 'function abs(x){return ((x < 0.0) ? -x : x)}
- {if (abs(($1-$2)*100.0/$2) > 1.0) { print "1" }
- else { print "0"} }' )
-if [ "${RES}" == "0" ] ; then
- echo "PASS"
-else
- echo "FAIL"
- exit 1
-fi
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
deleted file mode 100755
index cb55a908bb0d..000000000000
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# In-place tunneling
-
-BPF_FILE="test_tc_tunnel.bpf.o"
-# must match the port that the bpf program filters on
-readonly port=8000
-
-readonly ns_prefix="ns-$$-"
-readonly ns1="${ns_prefix}1"
-readonly ns2="${ns_prefix}2"
-
-readonly ns1_v4=192.168.1.1
-readonly ns2_v4=192.168.1.2
-readonly ns1_v6=fd::1
-readonly ns2_v6=fd::2
-
-# Must match port used by bpf program
-readonly udpport=5555
-# MPLSoverUDP
-readonly mplsudpport=6635
-readonly mplsproto=137
-
-readonly infile="$(mktemp)"
-readonly outfile="$(mktemp)"
-
-setup() {
- ip netns add "${ns1}"
- ip netns add "${ns2}"
-
- ip link add dev veth1 mtu 1500 netns "${ns1}" type veth \
- peer name veth2 mtu 1500 netns "${ns2}"
-
- ip netns exec "${ns1}" ethtool -K veth1 tso off
-
- ip -netns "${ns1}" link set veth1 up
- ip -netns "${ns2}" link set veth2 up
-
- ip -netns "${ns1}" -4 addr add "${ns1_v4}/24" dev veth1
- ip -netns "${ns2}" -4 addr add "${ns2_v4}/24" dev veth2
- ip -netns "${ns1}" -6 addr add "${ns1_v6}/64" dev veth1 nodad
- ip -netns "${ns2}" -6 addr add "${ns2_v6}/64" dev veth2 nodad
-
- # clamp route to reserve room for tunnel headers
- ip -netns "${ns1}" -4 route flush table main
- ip -netns "${ns1}" -6 route flush table main
- ip -netns "${ns1}" -4 route add "${ns2_v4}" mtu 1450 dev veth1
- ip -netns "${ns1}" -6 route add "${ns2_v6}" mtu 1430 dev veth1
-
- sleep 1
-
- dd if=/dev/urandom of="${infile}" bs="${datalen}" count=1 status=none
-}
-
-cleanup() {
- ip netns del "${ns2}"
- ip netns del "${ns1}"
-
- if [[ -f "${outfile}" ]]; then
- rm "${outfile}"
- fi
- if [[ -f "${infile}" ]]; then
- rm "${infile}"
- fi
-
- if [[ -n $server_pid ]]; then
- kill $server_pid 2> /dev/null
- fi
-}
-
-server_listen() {
- ip netns exec "${ns2}" nc "${netcat_opt}" -l "${port}" > "${outfile}" &
- server_pid=$!
-}
-
-client_connect() {
- ip netns exec "${ns1}" timeout 2 nc "${netcat_opt}" -w 1 "${addr2}" "${port}" < "${infile}"
- echo $?
-}
-
-verify_data() {
- wait "${server_pid}"
- server_pid=
- # sha1sum returns two fields [sha1] [filepath]
- # convert to bash array and access first elem
- insum=($(sha1sum ${infile}))
- outsum=($(sha1sum ${outfile}))
- if [[ "${insum[0]}" != "${outsum[0]}" ]]; then
- echo "data mismatch"
- exit 1
- fi
-}
-
-wait_for_port() {
- for i in $(seq 20); do
- if ip netns exec "${ns2}" ss ${2:--4}OHntl | grep -q "$1"; then
- return 0
- fi
- sleep 0.1
- done
- return 1
-}
-
-set -e
-
-# no arguments: automated test, run all
-if [[ "$#" -eq "0" ]]; then
- echo "ipip"
- $0 ipv4 ipip none 100
-
- echo "ipip6"
- $0 ipv4 ipip6 none 100
-
- echo "ip6ip6"
- $0 ipv6 ip6tnl none 100
-
- echo "sit"
- $0 ipv6 sit none 100
-
- echo "ip4 vxlan"
- $0 ipv4 vxlan eth 2000
-
- echo "ip6 vxlan"
- $0 ipv6 ip6vxlan eth 2000
-
- for mac in none mpls eth ; do
- echo "ip gre $mac"
- $0 ipv4 gre $mac 100
-
- echo "ip6 gre $mac"
- $0 ipv6 ip6gre $mac 100
-
- echo "ip gre $mac gso"
- $0 ipv4 gre $mac 2000
-
- echo "ip6 gre $mac gso"
- $0 ipv6 ip6gre $mac 2000
-
- echo "ip udp $mac"
- $0 ipv4 udp $mac 100
-
- echo "ip6 udp $mac"
- $0 ipv6 ip6udp $mac 100
-
- echo "ip udp $mac gso"
- $0 ipv4 udp $mac 2000
-
- echo "ip6 udp $mac gso"
- $0 ipv6 ip6udp $mac 2000
- done
-
- echo "OK. All tests passed"
- exit 0
-fi
-
-if [[ "$#" -ne "4" ]]; then
- echo "Usage: $0"
- echo " or: $0 <ipv4|ipv6> <tuntype> <none|mpls|eth> <data_len>"
- exit 1
-fi
-
-case "$1" in
-"ipv4")
- readonly addr1="${ns1_v4}"
- readonly addr2="${ns2_v4}"
- readonly ipproto=4
- readonly netcat_opt=-${ipproto}
- readonly foumod=fou
- readonly foutype=ipip
- readonly fouproto=4
- readonly fouproto_mpls=${mplsproto}
- readonly gretaptype=gretap
- ;;
-"ipv6")
- readonly addr1="${ns1_v6}"
- readonly addr2="${ns2_v6}"
- readonly ipproto=6
- readonly netcat_opt=-${ipproto}
- readonly foumod=fou6
- readonly foutype=ip6tnl
- readonly fouproto="41 -6"
- readonly fouproto_mpls="${mplsproto} -6"
- readonly gretaptype=ip6gretap
- ;;
-*)
- echo "unknown arg: $1"
- exit 1
- ;;
-esac
-
-readonly tuntype=$2
-readonly mac=$3
-readonly datalen=$4
-
-echo "encap ${addr1} to ${addr2}, type ${tuntype}, mac ${mac} len ${datalen}"
-
-trap cleanup EXIT
-
-setup
-
-# basic communication works
-echo "test basic connectivity"
-server_listen
-wait_for_port ${port} ${netcat_opt}
-client_connect
-verify_data
-
-# clientside, insert bpf program to encap all TCP to port ${port}
-# client can no longer connect
-ip netns exec "${ns1}" tc qdisc add dev veth1 clsact
-ip netns exec "${ns1}" tc filter add dev veth1 egress \
- bpf direct-action object-file ${BPF_FILE} \
- section "encap_${tuntype}_${mac}"
-echo "test bpf encap without decap (expect failure)"
-server_listen
-wait_for_port ${port} ${netcat_opt}
-! client_connect
-
-if [[ "$tuntype" =~ "udp" ]]; then
- # Set up fou tunnel.
- ttype="${foutype}"
- targs="encap fou encap-sport auto encap-dport $udpport"
- # fou may be a module; allow this to fail.
- modprobe "${foumod}" ||true
- if [[ "$mac" == "mpls" ]]; then
- dport=${mplsudpport}
- dproto=${fouproto_mpls}
- tmode="mode any ttl 255"
- else
- dport=${udpport}
- dproto=${fouproto}
- fi
- ip netns exec "${ns2}" ip fou add port $dport ipproto ${dproto}
- targs="encap fou encap-sport auto encap-dport $dport"
-elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then
- ttype=$gretaptype
-elif [[ "$tuntype" =~ "vxlan" && "$mac" == "eth" ]]; then
- ttype="vxlan"
- targs="id 1 dstport 8472 udp6zerocsumrx"
-elif [[ "$tuntype" == "ipip6" ]]; then
- ttype="ip6tnl"
- targs=""
-else
- ttype=$tuntype
- targs=""
-fi
-
-# tunnel address family differs from inner for SIT
-if [[ "${tuntype}" == "sit" ]]; then
- link_addr1="${ns1_v4}"
- link_addr2="${ns2_v4}"
-elif [[ "${tuntype}" == "ipip6" ]]; then
- link_addr1="${ns1_v6}"
- link_addr2="${ns2_v6}"
-else
- link_addr1="${addr1}"
- link_addr2="${addr2}"
-fi
-
-# serverside, insert decap module
-# server is still running
-# client can connect again
-ip netns exec "${ns2}" ip link add name testtun0 type "${ttype}" \
- ${tmode} remote "${link_addr1}" local "${link_addr2}" $targs
-
-expect_tun_fail=0
-
-if [[ "$tuntype" == "ip6udp" && "$mac" == "mpls" ]]; then
- # No support for MPLS IPv6 fou tunnel; expect failure.
- expect_tun_fail=1
-elif [[ "$tuntype" =~ "udp" && "$mac" == "eth" ]]; then
- # No support for TEB fou tunnel; expect failure.
- expect_tun_fail=1
-elif [[ "$tuntype" =~ (gre|vxlan) && "$mac" == "eth" ]]; then
- # Share ethernet address between tunnel/veth2 so L2 decap works.
- ethaddr=$(ip netns exec "${ns2}" ip link show veth2 | \
- awk '/ether/ { print $2 }')
- ip netns exec "${ns2}" ip link set testtun0 address $ethaddr
-elif [[ "$mac" == "mpls" ]]; then
- modprobe mpls_iptunnel ||true
- modprobe mpls_gso ||true
- ip netns exec "${ns2}" sysctl -qw net.mpls.platform_labels=65536
- ip netns exec "${ns2}" ip -f mpls route add 1000 dev lo
- ip netns exec "${ns2}" ip link set lo up
- ip netns exec "${ns2}" sysctl -qw net.mpls.conf.testtun0.input=1
- ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.lo.rp_filter=0
-fi
-
-# Because packets are decapped by the tunnel they arrive on testtun0 from
-# the IP stack perspective. Ensure reverse path filtering is disabled
-# otherwise we drop the TCP SYN as arriving on testtun0 instead of the
-# expected veth2 (veth2 is where 192.168.1.2 is configured).
-ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.rp_filter=0
-# rp needs to be disabled for both all and testtun0 as the rp value is
-# selected as the max of the "all" and device-specific values.
-ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.testtun0.rp_filter=0
-ip netns exec "${ns2}" ip link set dev testtun0 up
-if [[ "$expect_tun_fail" == 1 ]]; then
- # This tunnel mode is not supported, so we expect failure.
- echo "test bpf encap with tunnel device decap (expect failure)"
- ! client_connect
-else
- echo "test bpf encap with tunnel device decap"
- client_connect
- verify_data
- server_listen
- wait_for_port ${port} ${netcat_opt}
-fi
-
-# serverside, use BPF for decap
-ip netns exec "${ns2}" ip link del dev testtun0
-ip netns exec "${ns2}" tc qdisc add dev veth2 clsact
-ip netns exec "${ns2}" tc filter add dev veth2 ingress \
- bpf direct-action object-file ${BPF_FILE} section decap
-echo "test bpf encap with bpf decap"
-client_connect
-verify_data
-
-echo OK
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 595194453ff8..35b4893ccdf8 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -15,20 +15,18 @@
#include <bpf/libbpf.h>
#include <sys/ioctl.h>
#include <linux/rtnetlink.h>
-#include <signal.h>
#include <linux/perf_event.h>
-#include <linux/err.h>
-#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "test_tcpnotify.h"
-#include "trace_helpers.h"
#include "testing_helpers.h"
#define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L)
pthread_t tid;
+static bool exit_thread;
+
int rx_callbacks;
static void dummyfn(void *ctx, int cpu, void *data, __u32 size)
@@ -45,7 +43,7 @@ void tcp_notifier_poller(struct perf_buffer *pb)
{
int err;
- while (1) {
+ while (!exit_thread) {
err = perf_buffer__poll(pb, 100);
if (err < 0 && err != -EINTR) {
printf("failed perf_buffer__poll: %d\n", err);
@@ -78,15 +76,10 @@ int main(int argc, char **argv)
int error = EXIT_FAILURE;
struct bpf_object *obj;
char test_script[80];
- cpu_set_t cpuset;
__u32 key = 0;
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
- CPU_ZERO(&cpuset);
- CPU_SET(0, &cpuset);
- pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
-
cg_fd = cgroup_setup_and_join(cg_path);
if (cg_fd < 0)
goto err;
@@ -151,6 +144,13 @@ int main(int argc, char **argv)
sleep(10);
+ exit_thread = true;
+ int ret = pthread_join(tid, NULL);
+ if (ret) {
+ printf("FAILED: pthread_join\n");
+ goto err;
+ }
+
if (verify_result(&g)) {
printf("FAILED: Wrong stats Expected %d calls, got %d\n",
g.ncalls, rx_callbacks);
diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
index 65aafe0003db..62db060298a4 100755
--- a/tools/testing/selftests/bpf/test_xsk.sh
+++ b/tools/testing/selftests/bpf/test_xsk.sh
@@ -241,4 +241,6 @@ done
if [ $failures -eq 0 ]; then
echo "All tests successful!"
+else
+ exit 1
fi
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 5e9f16683be5..16eb37e5bad6 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -399,7 +399,7 @@ int unload_module(const char *name, bool verbose)
return 0;
}
-int load_module(const char *path, bool verbose)
+static int __load_module(const char *path, const char *param_values, bool verbose)
{
int fd;
@@ -411,7 +411,7 @@ int load_module(const char *path, bool verbose)
fprintf(stdout, "Can't find %s kernel module: %d\n", path, -errno);
return -ENOENT;
}
- if (finit_module(fd, "", 0)) {
+ if (finit_module(fd, param_values, 0)) {
fprintf(stdout, "Failed to load %s into the kernel: %d\n", path, -errno);
close(fd);
return -EINVAL;
@@ -423,6 +423,16 @@ int load_module(const char *path, bool verbose)
return 0;
}
+int load_module_params(const char *path, const char *param_values, bool verbose)
+{
+ return __load_module(path, param_values, verbose);
+}
+
+int load_module(const char *path, bool verbose)
+{
+ return __load_module(path, "", verbose);
+}
+
int unload_bpf_testmod(bool verbose)
{
return unload_module("bpf_testmod", verbose);
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index 46d7f7089f63..eb20d3772218 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -39,6 +39,7 @@ int kern_sync_rcu(void);
int finit_module(int fd, const char *param_values, int flags);
int delete_module(const char *name, int flags);
int load_module(const char *path, bool verbose);
+int load_module_params(const char *path, const char *param_values, bool verbose);
int unload_module(const char *name, bool verbose);
static inline __u64 get_time_ns(void)
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 81943c6254e6..eeaab7013ca2 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -17,7 +17,9 @@
#include <linux/limits.h>
#include <libelf.h>
#include <gelf.h>
+#include "bpf/hashmap.h"
#include "bpf/libbpf_internal.h"
+#include "bpf_util.h"
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
@@ -519,3 +521,235 @@ void read_trace_pipe(void)
{
read_trace_pipe_iter(trace_pipe_cb, NULL, 0);
}
+
+static size_t symbol_hash(long key, void *ctx __maybe_unused)
+{
+ return str_hash((const char *) key);
+}
+
+static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return strcmp((const char *) key1, (const char *) key2) == 0;
+}
+
+static bool is_invalid_entry(char *buf, bool kernel)
+{
+ if (kernel && strchr(buf, '['))
+ return true;
+ if (!kernel && !strchr(buf, '['))
+ return true;
+ return false;
+}
+
+static const char * const trace_blacklist[] = {
+ "migrate_disable",
+ "migrate_enable",
+ "rcu_read_unlock_strict",
+ "preempt_count_add",
+ "preempt_count_sub",
+ "__rcu_read_lock",
+ "__rcu_read_unlock",
+ "bpf_get_numa_node_id",
+};
+
+static bool skip_entry(char *name)
+{
+ int i;
+
+ /*
+ * We attach to almost all kernel functions and some of them
+ * will cause 'suspicious RCU usage' when fprobe is attached
+ * to them. Filter out the current culprits - arch_cpu_idle
+ * default_idle and rcu_* functions.
+ */
+ if (!strcmp(name, "arch_cpu_idle"))
+ return true;
+ if (!strcmp(name, "default_idle"))
+ return true;
+ if (!strncmp(name, "rcu_", 4))
+ return true;
+ if (!strcmp(name, "bpf_dispatcher_xdp_func"))
+ return true;
+ if (!strncmp(name, "__ftrace_invalid_address__",
+ sizeof("__ftrace_invalid_address__") - 1))
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(trace_blacklist); i++) {
+ if (!strcmp(name, trace_blacklist[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/* Do comparison by ignoring '.llvm.<hash>' suffixes. */
+static int compare_name(const char *name1, const char *name2)
+{
+ const char *res1, *res2;
+ int len1, len2;
+
+ res1 = strstr(name1, ".llvm.");
+ res2 = strstr(name2, ".llvm.");
+ len1 = res1 ? res1 - name1 : strlen(name1);
+ len2 = res2 ? res2 - name2 : strlen(name2);
+
+ if (len1 == len2)
+ return strncmp(name1, name2, len1);
+ if (len1 < len2)
+ return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
+ return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
+}
+
+static int load_kallsyms_compare(const void *p1, const void *p2)
+{
+ return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
+}
+
+static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
+{
+ return compare_name(p1, p2->name);
+}
+
+int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel)
+{
+ size_t cap = 0, cnt = 0;
+ char *name = NULL, *ksym_name, **syms = NULL;
+ struct hashmap *map;
+ struct ksyms *ksyms;
+ struct ksym *ks;
+ char buf[256];
+ FILE *f;
+ int err = 0;
+
+ ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
+ if (!ksyms)
+ return -EINVAL;
+
+ /*
+ * The available_filter_functions contains many duplicates,
+ * but other than that all symbols are usable to trace.
+ * Filtering out duplicates by using hashmap__add, which won't
+ * add existing entry.
+ */
+
+ if (access("/sys/kernel/tracing/trace", F_OK) == 0)
+ f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
+ else
+ f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
+
+ if (!f)
+ return -EINVAL;
+
+ map = hashmap__new(symbol_hash, symbol_equal, NULL);
+ if (IS_ERR(map)) {
+ err = libbpf_get_error(map);
+ goto error;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ if (is_invalid_entry(buf, kernel))
+ continue;
+
+ free(name);
+ if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
+ continue;
+ if (skip_entry(name))
+ continue;
+
+ ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
+ if (!ks) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ ksym_name = ks->name;
+ err = hashmap__add(map, ksym_name, 0);
+ if (err == -EEXIST) {
+ err = 0;
+ continue;
+ }
+ if (err)
+ goto error;
+
+ err = libbpf_ensure_mem((void **) &syms, &cap,
+ sizeof(*syms), cnt + 1);
+ if (err)
+ goto error;
+
+ syms[cnt++] = ksym_name;
+ }
+
+ *symsp = syms;
+ *cntp = cnt;
+
+error:
+ free(name);
+ fclose(f);
+ hashmap__free(map);
+ if (err)
+ free(syms);
+ return err;
+}
+
+int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
+{
+ unsigned long *addr, *addrs, *tmp_addrs;
+ int err = 0, max_cnt, inc_cnt;
+ char *name = NULL;
+ size_t cnt = 0;
+ char buf[256];
+ FILE *f;
+
+ if (access("/sys/kernel/tracing/trace", F_OK) == 0)
+ f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
+ else
+ f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
+
+ if (!f)
+ return -ENOENT;
+
+ /* In my local setup, the number of entries is 50k+ so Let us initially
+ * allocate space to hold 64k entries. If 64k is not enough, incrementally
+ * increase 1k each time.
+ */
+ max_cnt = 65536;
+ inc_cnt = 1024;
+ addrs = malloc(max_cnt * sizeof(long));
+ if (addrs == NULL) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ if (is_invalid_entry(buf, kernel))
+ continue;
+
+ free(name);
+ if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
+ continue;
+ if (skip_entry(name))
+ continue;
+
+ if (cnt == max_cnt) {
+ max_cnt += inc_cnt;
+ tmp_addrs = realloc(addrs, max_cnt * sizeof(long));
+ if (!tmp_addrs) {
+ err = -ENOMEM;
+ goto error;
+ }
+ addrs = tmp_addrs;
+ }
+
+ addrs[cnt++] = (unsigned long)addr;
+ }
+
+ *addrsp = addrs;
+ *cntp = cnt;
+
+error:
+ free(name);
+ fclose(f);
+ if (err)
+ free(addrs);
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 2ce873c9f9aa..9437bdd4afa5 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -41,4 +41,7 @@ ssize_t get_rel_offset(uintptr_t addr);
int read_build_id(const char *path, char *build_id, size_t size);
+int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel);
+int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel);
+
#endif
diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c
index 220f6a963813..f997d7ec8fd0 100644
--- a/tools/testing/selftests/bpf/unpriv_helpers.c
+++ b/tools/testing/selftests/bpf/unpriv_helpers.c
@@ -1,15 +1,76 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <errno.h>
+#include <limits.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
+#include <sys/utsname.h>
#include <unistd.h>
#include <fcntl.h>
+#include <zlib.h>
#include "unpriv_helpers.h"
-static bool get_mitigations_off(void)
+static gzFile open_config(void)
+{
+ struct utsname uts;
+ char buf[PATH_MAX];
+ gzFile config;
+
+ if (uname(&uts)) {
+ perror("uname");
+ goto config_gz;
+ }
+
+ snprintf(buf, sizeof(buf), "/boot/config-%s", uts.release);
+ config = gzopen(buf, "rb");
+ if (config)
+ return config;
+ fprintf(stderr, "gzopen %s: %s\n", buf, strerror(errno));
+
+config_gz:
+ config = gzopen("/proc/config.gz", "rb");
+ if (!config)
+ perror("gzopen /proc/config.gz");
+ return config;
+}
+
+static int config_contains(const char *pat)
+{
+ const char *msg;
+ char buf[1024];
+ gzFile config;
+ int n, err;
+
+ config = open_config();
+ if (!config)
+ return -1;
+
+ for (;;) {
+ if (!gzgets(config, buf, sizeof(buf))) {
+ msg = gzerror(config, &err);
+ if (err == Z_ERRNO)
+ perror("gzgets /proc/config.gz");
+ else if (err != Z_OK)
+ fprintf(stderr, "gzgets /proc/config.gz: %s", msg);
+ gzclose(config);
+ return -1;
+ }
+ n = strlen(buf);
+ if (buf[n - 1] == '\n')
+ buf[n - 1] = 0;
+ if (strcmp(buf, pat) == 0) {
+ gzclose(config);
+ return 1;
+ }
+ }
+ gzclose(config);
+ return 0;
+}
+
+static bool cmdline_contains(const char *pat)
{
char cmdline[4096], *c;
int fd, ret = false;
@@ -27,7 +88,7 @@ static bool get_mitigations_off(void)
cmdline[sizeof(cmdline) - 1] = '\0';
for (c = strtok(cmdline, " \n"); c; c = strtok(NULL, " \n")) {
- if (strncmp(c, "mitigations=off", strlen(c)))
+ if (strncmp(c, pat, strlen(c)))
continue;
ret = true;
break;
@@ -37,8 +98,21 @@ out:
return ret;
}
+static int get_mitigations_off(void)
+{
+ int enabled_in_config;
+
+ if (cmdline_contains("mitigations=off"))
+ return 1;
+ enabled_in_config = config_contains("CONFIG_CPU_MITIGATIONS=y");
+ if (enabled_in_config < 0)
+ return -1;
+ return !enabled_in_config;
+}
+
bool get_unpriv_disabled(void)
{
+ int mitigations_off;
bool disabled;
char buf[2];
FILE *fd;
@@ -52,5 +126,19 @@ bool get_unpriv_disabled(void)
disabled = true;
}
- return disabled ? true : get_mitigations_off();
+ if (disabled)
+ return true;
+
+ /*
+ * Some unpriv tests rely on spectre mitigations being on.
+ * If mitigations are off or status can't be determined
+ * assume that unpriv tests are disabled.
+ */
+ mitigations_off = get_mitigations_off();
+ if (mitigations_off < 0) {
+ fprintf(stderr,
+ "Can't determine if mitigations are enabled, disabling unpriv tests.");
+ return true;
+ }
+ return mitigations_off;
}
diff --git a/tools/testing/selftests/bpf/usdt.h b/tools/testing/selftests/bpf/usdt.h
new file mode 100644
index 000000000000..549d1f774810
--- /dev/null
+++ b/tools/testing/selftests/bpf/usdt.h
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * This single-header library defines a collection of variadic macros for
+ * defining and triggering USDTs (User Statically-Defined Tracepoints):
+ *
+ * - For USDTs without associated semaphore:
+ * USDT(group, name, args...)
+ *
+ * - For USDTs with implicit (transparent to the user) semaphore:
+ * USDT_WITH_SEMA(group, name, args...)
+ * USDT_IS_ACTIVE(group, name)
+ *
+ * - For USDTs with explicit (user-defined and provided) semaphore:
+ * USDT_WITH_EXPLICIT_SEMA(sema, group, name, args...)
+ * USDT_SEMA_IS_ACTIVE(sema)
+ *
+ * all of which emit a NOP instruction into the instruction stream, and so
+ * have *zero* overhead for the surrounding code. USDTs are identified by
+ * a combination of `group` and `name` identifiers, which is used by external
+ * tracing tooling (tracers) for identifying exact USDTs of interest.
+ *
+ * USDTs can have an associated (2-byte) activity counter (USDT semaphore),
+ * automatically maintained by Linux kernel whenever any correctly written
+ * BPF-based tracer is attached to the USDT. This USDT semaphore can be used
+ * to check whether there is a need to do any extra data collection and
+ * processing for a given USDT (if necessary), and otherwise avoid extra work
+ * for a common case of USDT not being traced ("active").
+ *
+ * See documentation for USDT_WITH_SEMA()/USDT_IS_ACTIVE() or
+ * USDT_WITH_EXPLICIT_SEMA()/USDT_SEMA_IS_ACTIVE() APIs below for details on
+ * working with USDTs with implicitly or explicitly associated
+ * USDT semaphores, respectively.
+ *
+ * There is also some additional data recorded into an auxiliary note
+ * section. The data in the note section describes the operands, in terms of
+ * size and location, used by tracing tooling to know where to find USDT
+ * arguments. Each location is encoded as an assembler operand string.
+ * Tracing tools (bpftrace and BPF-based tracers, systemtap, etc) insert
+ * breakpoints on top of the nop, and decode the location operand-strings,
+ * like an assembler, to find the values being passed.
+ *
+ * The operand strings are selected by the compiler for each operand.
+ * They are constrained by inline-assembler codes.The default is:
+ *
+ * #define USDT_ARG_CONSTRAINT nor
+ *
+ * This is a good default if the operands tend to be integral and
+ * moderate in number (smaller than number of registers). In other
+ * cases, the compiler may report "'asm' requires impossible reload" or
+ * similar. In this case, consider simplifying the macro call (fewer
+ * and simpler operands), reduce optimization, or override the default
+ * constraints string via:
+ *
+ * #define USDT_ARG_CONSTRAINT g
+ * #include <usdt.h>
+ *
+ * For some historical description of USDT v3 format (the one used by this
+ * library and generally recognized and assumed by BPF-based tracing tools)
+ * see [0]. The more formal specification can be found at [1]. Additional
+ * argument constraints information can be found at [2].
+ *
+ * Original SystemTap's sys/sdt.h implementation ([3]) was used as a base for
+ * this USDT library implementation. Current implementation differs *a lot* in
+ * terms of exposed user API and general usability, which was the main goal
+ * and focus of the reimplementation work. Nevertheless, underlying recorded
+ * USDT definitions are fully binary compatible and any USDT-based tooling
+ * should work equally well with USDTs defined by either SystemTap's or this
+ * library's USDT implementation.
+ *
+ * [0] https://ecos.sourceware.org/ml/systemtap/2010-q3/msg00145.html
+ * [1] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ * [2] https://gcc.gnu.org/onlinedocs/gcc/Constraints.html
+ * [3] https://sourceware.org/git/?p=systemtap.git;a=blob;f=includes/sys/sdt.h
+ */
+#ifndef __USDT_H
+#define __USDT_H
+
+/*
+ * Changelog:
+ *
+ * 0.1.0
+ * -----
+ * - Initial release
+ */
+#define USDT_MAJOR_VERSION 0
+#define USDT_MINOR_VERSION 1
+#define USDT_PATCH_VERSION 0
+
+/* C++20 and C23 added __VA_OPT__ as a standard replacement for non-standard `##__VA_ARGS__` extension */
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) || (defined(__cplusplus) && __cplusplus > 201703L)
+#define __usdt_va_opt 1
+#define __usdt_va_args(...) __VA_OPT__(,) __VA_ARGS__
+#else
+#define __usdt_va_args(...) , ##__VA_ARGS__
+#endif
+
+/*
+ * Trigger USDT with `group`:`name` identifier and pass through `args` as its
+ * arguments. Zero arguments are acceptable as well. No USDT semaphore is
+ * associated with this USDT.
+ *
+ * Such "semaphoreless" USDTs are commonly used when there is no extra data
+ * collection or processing needed to collect and prepare USDT arguments and
+ * they are just available in the surrounding code. USDT() macro will just
+ * record their locations in CPU registers or in memory for tracing tooling to
+ * be able to access them, if necessary.
+ */
+#ifdef __usdt_va_opt
+#define USDT(group, name, ...) \
+ __usdt_probe(group, name, __usdt_sema_none, 0 __VA_OPT__(,) __VA_ARGS__)
+#else
+#define USDT(group, name, ...) \
+ __usdt_probe(group, name, __usdt_sema_none, 0, ##__VA_ARGS__)
+#endif
+
+/*
+ * Trigger USDT with `group`:`name` identifier and pass through `args` as its
+ * arguments. Zero arguments are acceptable as well. USDT also get an
+ * implicitly-defined associated USDT semaphore, which will be "activated" by
+ * tracing tooling and can be used to check whether USDT is being actively
+ * observed.
+ *
+ * USDTs with semaphore are commonly used when there is a need to perform
+ * additional data collection and processing to prepare USDT arguments, which
+ * otherwise might not be necessary for the rest of application logic. In such
+ * case, USDT semaphore can be used to avoid unnecessary extra work. If USDT
+ * is not traced (which is presumed to be a common situation), the associated
+ * USDT semaphore is "inactive", and so there is no need to waste resources to
+ * prepare USDT arguments. Use USDT_IS_ACTIVE(group, name) to check whether
+ * USDT is "active".
+ *
+ * N.B. There is an inherent (albeit short) gap between checking whether USDT
+ * is active and triggering corresponding USDT, in which external tracer can
+ * be attached to an USDT and activate USDT semaphore after the activity check.
+ * If such a race occurs, tracers might miss one USDT execution. Tracers are
+ * expected to accommodate such possibility and this is expected to not be
+ * a problem for applications and tracers.
+ *
+ * N.B. Implicit USDT semaphore defined by USDT_WITH_SEMA() is contained
+ * within a single executable or shared library and is not shared outside
+ * them. I.e., if you use USDT_WITH_SEMA() with the same USDT group and name
+ * identifier across executable and shared library, it will work and won't
+ * conflict, per se, but will define independent USDT semaphores, one for each
+ * shared library/executable in which USDT_WITH_SEMA(group, name) is used.
+ * That is, if you attach to this USDT in one shared library (or executable),
+ * then only USDT semaphore within that shared library (or executable) will be
+ * updated by the kernel, while other libraries (or executable) will not see
+ * activated USDT semaphore. In short, it's best to use unique USDT group:name
+ * identifiers across different shared libraries (and, equivalently, between
+ * executable and shared library). This is advanced consideration and is
+ * rarely (if ever) seen in practice, but just to avoid surprises this is
+ * called out here. (Static libraries become a part of final executable, once
+ * linked by linker, so the above considerations don't apply to them.)
+ */
+#ifdef __usdt_va_opt
+#define USDT_WITH_SEMA(group, name, ...) \
+ __usdt_probe(group, name, \
+ __usdt_sema_implicit, __usdt_sema_name(group, name) \
+ __VA_OPT__(,) __VA_ARGS__)
+#else
+#define USDT_WITH_SEMA(group, name, ...) \
+ __usdt_probe(group, name, \
+ __usdt_sema_implicit, __usdt_sema_name(group, name), \
+ ##__VA_ARGS__)
+#endif
+
+struct usdt_sema { volatile unsigned short active; };
+
+/*
+ * Check if USDT with `group`:`name` identifier is "active" (i.e., whether it
+ * is attached to by external tracing tooling and is actively observed).
+ *
+ * This macro can be used to decide whether any additional and potentially
+ * expensive data collection or processing should be done to pass extra
+ * information into the given USDT. It is assumed that USDT is triggered with
+ * USDT_WITH_SEMA() macro which will implicitly define associated USDT
+ * semaphore. (If one needs more control over USDT semaphore, see
+ * USDT_DEFINE_SEMA() and USDT_WITH_EXPLICIT_SEMA() macros below.)
+ *
+ * N.B. Such checks are necessarily racy and speculative. Between checking
+ * whether USDT is active and triggering the USDT itself, tracer can be
+ * detached with no notification. This race should be extremely rare and worst
+ * case should result in one-time wasted extra data collection and processing.
+ */
+#define USDT_IS_ACTIVE(group, name) ({ \
+ extern struct usdt_sema __usdt_sema_name(group, name) \
+ __usdt_asm_name(__usdt_sema_name(group, name)); \
+ __usdt_sema_implicit(__usdt_sema_name(group, name)); \
+ __usdt_sema_name(group, name).active > 0; \
+})
+
+/*
+ * APIs for working with user-defined explicit USDT semaphores.
+ *
+ * This is a less commonly used advanced API for use cases in which user needs
+ * an explicit control over (potentially shared across multiple USDTs) USDT
+ * semaphore instance. This can be used when there is a group of logically
+ * related USDTs that all need extra data collection and processing whenever
+ * any of a family of related USDTs are "activated" (i.e., traced). In such
+ * a case, all such related USDTs will be associated with the same shared USDT
+ * semaphore defined with USDT_DEFINE_SEMA() and the USDTs themselves will be
+ * triggered with USDT_WITH_EXPLICIT_SEMA() macros, taking an explicit extra
+ * USDT semaphore identifier as an extra parameter.
+ */
+
+/**
+ * Underlying C global variable name for user-defined USDT semaphore with
+ * `sema` identifier. Could be useful for debugging, but normally shouldn't be
+ * used explicitly.
+ */
+#define USDT_SEMA(sema) __usdt_sema_##sema
+
+/*
+ * Define storage for user-defined USDT semaphore `sema`.
+ *
+ * Should be used only once in non-header source file to let compiler allocate
+ * space for the semaphore variable. Just like with any other global variable.
+ *
+ * This macro can be used anywhere where global variable declaration is
+ * allowed. Just like with global variable definitions, there should be only
+ * one definition of user-defined USDT semaphore with given `sema` identifier,
+ * otherwise compiler or linker will complain about duplicate variable
+ * definition.
+ *
+ * For C++, it is allowed to use USDT_DEFINE_SEMA() both in global namespace
+ * and inside namespaces (including nested namespaces). Just make sure that
+ * USDT_DECLARE_SEMA() is placed within the namespace where this semaphore is
+ * referenced, or any of its parent namespaces, so the C++ language-level
+ * identifier is visible to the code that needs to reference the semaphore.
+ * At the lowest layer, USDT semaphores have global naming and visibility
+ * (they have a corresponding `__usdt_sema_<name>` symbol, which can be linked
+ * against from C or C++ code, if necessary). To keep it simple, putting
+ * USDT_DECLARE_SEMA() declarations into global namespaces is the simplest
+ * no-brainer solution. All these aspects are irrelevant for plain C, because
+ * C doesn't have namespaces and everything is always in the global namespace.
+ *
+ * N.B. Due to USDT metadata being recorded in non-allocatable ELF note
+ * section, it has limitations when it comes to relocations, which, in
+ * practice, means that it's not possible to correctly share USDT semaphores
+ * between main executable and shared libraries, or even between multiple
+ * shared libraries. USDT semaphore has to be contained to individual shared
+ * library or executable to avoid unpleasant surprises with half-working USDT
+ * semaphores. We enforce this by marking semaphore ELF symbols as having
+ * a hidden visibility. This is quite an advanced use case and consideration
+ * and for most users this should have no consequences whatsoever.
+ */
+#define USDT_DEFINE_SEMA(sema) \
+ struct usdt_sema __usdt_sema_sec USDT_SEMA(sema) \
+ __usdt_asm_name(USDT_SEMA(sema)) \
+ __attribute__((visibility("hidden"))) = { 0 }
+
+/*
+ * Declare extern reference to user-defined USDT semaphore `sema`.
+ *
+ * Refers to a variable defined in another compilation unit by
+ * USDT_DEFINE_SEMA() and allows to use the same USDT semaphore across
+ * multiple compilation units (i.e., .c and .cpp files).
+ *
+ * See USDT_DEFINE_SEMA() notes above for C++ language usage peculiarities.
+ */
+#define USDT_DECLARE_SEMA(sema) \
+ extern struct usdt_sema USDT_SEMA(sema) __usdt_asm_name(USDT_SEMA(sema))
+
+/*
+ * Check if user-defined USDT semaphore `sema` is "active" (i.e., whether it
+ * is attached to by external tracing tooling and is actively observed).
+ *
+ * This macro can be used to decide whether any additional and potentially
+ * expensive data collection or processing should be done to pass extra
+ * information into USDT(s) associated with USDT semaphore `sema`.
+ *
+ * N.B. Such checks are necessarily racy. Between checking the state of USDT
+ * semaphore and triggering associated USDT(s), the active tracer might attach
+ * or detach. This race should be extremely rare and worst case should result
+ * in one-time missed USDT event or wasted extra data collection and
+ * processing. USDT-using tracers should be written with this in mind and is
+ * not a concern of the application defining USDTs with associated semaphore.
+ */
+#define USDT_SEMA_IS_ACTIVE(sema) (USDT_SEMA(sema).active > 0)
+
+/*
+ * Invoke USDT specified by `group` and `name` identifiers and associate
+ * explicitly user-defined semaphore `sema` with it. Pass through `args` as
+ * USDT arguments. `args` are optional and zero arguments are acceptable.
+ *
+ * Semaphore is defined with the help of USDT_DEFINE_SEMA() macro and can be
+ * checked whether active with USDT_SEMA_IS_ACTIVE().
+ */
+#ifdef __usdt_va_opt
+#define USDT_WITH_EXPLICIT_SEMA(sema, group, name, ...) \
+ __usdt_probe(group, name, __usdt_sema_explicit, USDT_SEMA(sema), ##__VA_ARGS__)
+#else
+#define USDT_WITH_EXPLICIT_SEMA(sema, group, name, ...) \
+ __usdt_probe(group, name, __usdt_sema_explicit, USDT_SEMA(sema) __VA_OPT__(,) __VA_ARGS__)
+#endif
+
+/*
+ * Adjustable implementation aspects
+ */
+#ifndef USDT_ARG_CONSTRAINT
+#if defined __powerpc__
+#define USDT_ARG_CONSTRAINT nZr
+#elif defined __arm__
+#define USDT_ARG_CONSTRAINT g
+#elif defined __loongarch__
+#define USDT_ARG_CONSTRAINT nmr
+#else
+#define USDT_ARG_CONSTRAINT nor
+#endif
+#endif /* USDT_ARG_CONSTRAINT */
+
+#ifndef USDT_NOP
+#if defined(__ia64__) || defined(__s390__) || defined(__s390x__)
+#define USDT_NOP nop 0
+#else
+#define USDT_NOP nop
+#endif
+#endif /* USDT_NOP */
+
+/*
+ * Implementation details
+ */
+/* USDT name for implicitly-defined USDT semaphore, derived from group:name */
+#define __usdt_sema_name(group, name) __usdt_sema_##group##__##name
+/* ELF section into which USDT semaphores are put */
+#define __usdt_sema_sec __attribute__((section(".probes")))
+
+#define __usdt_concat(a, b) a ## b
+#define __usdt_apply(fn, n) __usdt_concat(fn, n)
+
+#ifndef __usdt_nth
+#define __usdt_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, N, ...) N
+#endif
+
+#ifndef __usdt_narg
+#ifdef __usdt_va_opt
+#define __usdt_narg(...) __usdt_nth(_ __VA_OPT__(,) __VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#else
+#define __usdt_narg(...) __usdt_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#endif
+#endif /* __usdt_narg */
+
+#define __usdt_hash #
+#define __usdt_str_(x) #x
+#define __usdt_str(x) __usdt_str_(x)
+
+#ifndef __usdt_asm_name
+#define __usdt_asm_name(name) __asm__(__usdt_str(name))
+#endif
+
+#define __usdt_asm0() "\n"
+#define __usdt_asm1(x) __usdt_str(x) "\n"
+#define __usdt_asm2(x, ...) __usdt_str(x) "," __usdt_asm1(__VA_ARGS__)
+#define __usdt_asm3(x, ...) __usdt_str(x) "," __usdt_asm2(__VA_ARGS__)
+#define __usdt_asm4(x, ...) __usdt_str(x) "," __usdt_asm3(__VA_ARGS__)
+#define __usdt_asm5(x, ...) __usdt_str(x) "," __usdt_asm4(__VA_ARGS__)
+#define __usdt_asm6(x, ...) __usdt_str(x) "," __usdt_asm5(__VA_ARGS__)
+#define __usdt_asm7(x, ...) __usdt_str(x) "," __usdt_asm6(__VA_ARGS__)
+#define __usdt_asm8(x, ...) __usdt_str(x) "," __usdt_asm7(__VA_ARGS__)
+#define __usdt_asm9(x, ...) __usdt_str(x) "," __usdt_asm8(__VA_ARGS__)
+#define __usdt_asm10(x, ...) __usdt_str(x) "," __usdt_asm9(__VA_ARGS__)
+#define __usdt_asm11(x, ...) __usdt_str(x) "," __usdt_asm10(__VA_ARGS__)
+#define __usdt_asm12(x, ...) __usdt_str(x) "," __usdt_asm11(__VA_ARGS__)
+#define __usdt_asm(...) __usdt_apply(__usdt_asm, __usdt_narg(__VA_ARGS__))(__VA_ARGS__)
+
+#ifdef __LP64__
+#define __usdt_asm_addr .8byte
+#else
+#define __usdt_asm_addr .4byte
+#endif
+
+#define __usdt_asm_strz_(x) __usdt_asm1(.asciz #x)
+#define __usdt_asm_strz(x) __usdt_asm_strz_(x)
+#define __usdt_asm_str_(x) __usdt_asm1(.ascii #x)
+#define __usdt_asm_str(x) __usdt_asm_str_(x)
+
+/* "semaphoreless" USDT case */
+#ifndef __usdt_sema_none
+#define __usdt_sema_none(sema)
+#endif
+
+/* implicitly defined __usdt_sema__group__name semaphore (using weak symbols) */
+#ifndef __usdt_sema_implicit
+#define __usdt_sema_implicit(sema) \
+ __asm__ __volatile__ ( \
+ __usdt_asm1(.ifndef sema) \
+ __usdt_asm3( .pushsection .probes, "aw", "progbits") \
+ __usdt_asm1( .weak sema) \
+ __usdt_asm1( .hidden sema) \
+ __usdt_asm1( .align 2) \
+ __usdt_asm1(sema:) \
+ __usdt_asm1( .zero 2) \
+ __usdt_asm2( .type sema, @object) \
+ __usdt_asm2( .size sema, 2) \
+ __usdt_asm1( .popsection) \
+ __usdt_asm1(.endif) \
+ );
+#endif
+
+/* externally defined semaphore using USDT_DEFINE_SEMA() and passed explicitly by user */
+#ifndef __usdt_sema_explicit
+#define __usdt_sema_explicit(sema) \
+ __asm__ __volatile__ ("" :: "m" (sema));
+#endif
+
+/* main USDT definition (nop and .note.stapsdt metadata) */
+#define __usdt_probe(group, name, sema_def, sema, ...) do { \
+ sema_def(sema) \
+ __asm__ __volatile__ ( \
+ __usdt_asm( 990: USDT_NOP) \
+ __usdt_asm3( .pushsection .note.stapsdt, "", "note") \
+ __usdt_asm1( .balign 4) \
+ __usdt_asm3( .4byte 992f-991f,994f-993f,3) \
+ __usdt_asm1(991: .asciz "stapsdt") \
+ __usdt_asm1(992: .balign 4) \
+ __usdt_asm1(993: __usdt_asm_addr 990b) \
+ __usdt_asm1( __usdt_asm_addr _.stapsdt.base) \
+ __usdt_asm1( __usdt_asm_addr sema) \
+ __usdt_asm_strz(group) \
+ __usdt_asm_strz(name) \
+ __usdt_asm_args(__VA_ARGS__) \
+ __usdt_asm1( .ascii "\0") \
+ __usdt_asm1(994: .balign 4) \
+ __usdt_asm1( .popsection) \
+ __usdt_asm1(.ifndef _.stapsdt.base) \
+ __usdt_asm5( .pushsection .stapsdt.base,"aG","progbits",.stapsdt.base,comdat)\
+ __usdt_asm1( .weak _.stapsdt.base) \
+ __usdt_asm1( .hidden _.stapsdt.base) \
+ __usdt_asm1(_.stapsdt.base:) \
+ __usdt_asm1( .space 1) \
+ __usdt_asm2( .size _.stapsdt.base, 1) \
+ __usdt_asm1( .popsection) \
+ __usdt_asm1(.endif) \
+ :: __usdt_asm_ops(__VA_ARGS__) \
+ ); \
+} while (0)
+
+/*
+ * NB: gdb PR24541 highlighted an unspecified corner of the sdt.h
+ * operand note format.
+ *
+ * The named register may be a longer or shorter (!) alias for the
+ * storage where the value in question is found. For example, on
+ * i386, 64-bit value may be put in register pairs, and a register
+ * name stored would identify just one of them. Previously, gcc was
+ * asked to emit the %w[id] (16-bit alias of some registers holding
+ * operands), even when a wider 32-bit value was used.
+ *
+ * Bottom line: the byte-width given before the @ sign governs. If
+ * there is a mismatch between that width and that of the named
+ * register, then a sys/sdt.h note consumer may need to employ
+ * architecture-specific heuristics to figure out where the compiler
+ * has actually put the complete value.
+ */
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define __usdt_argref(id) %I[id]%[id]
+#elif defined(__i386__)
+#define __usdt_argref(id) %k[id] /* gcc.gnu.org/PR80115 sourceware.org/PR24541 */
+#else
+#define __usdt_argref(id) %[id]
+#endif
+
+#define __usdt_asm_arg(n) __usdt_asm_str(%c[__usdt_asz##n]) \
+ __usdt_asm1(.ascii "@") \
+ __usdt_asm_str(__usdt_argref(__usdt_aval##n))
+
+#define __usdt_asm_args0 /* no arguments */
+#define __usdt_asm_args1 __usdt_asm_arg(1)
+#define __usdt_asm_args2 __usdt_asm_args1 __usdt_asm1(.ascii " ") __usdt_asm_arg(2)
+#define __usdt_asm_args3 __usdt_asm_args2 __usdt_asm1(.ascii " ") __usdt_asm_arg(3)
+#define __usdt_asm_args4 __usdt_asm_args3 __usdt_asm1(.ascii " ") __usdt_asm_arg(4)
+#define __usdt_asm_args5 __usdt_asm_args4 __usdt_asm1(.ascii " ") __usdt_asm_arg(5)
+#define __usdt_asm_args6 __usdt_asm_args5 __usdt_asm1(.ascii " ") __usdt_asm_arg(6)
+#define __usdt_asm_args7 __usdt_asm_args6 __usdt_asm1(.ascii " ") __usdt_asm_arg(7)
+#define __usdt_asm_args8 __usdt_asm_args7 __usdt_asm1(.ascii " ") __usdt_asm_arg(8)
+#define __usdt_asm_args9 __usdt_asm_args8 __usdt_asm1(.ascii " ") __usdt_asm_arg(9)
+#define __usdt_asm_args10 __usdt_asm_args9 __usdt_asm1(.ascii " ") __usdt_asm_arg(10)
+#define __usdt_asm_args11 __usdt_asm_args10 __usdt_asm1(.ascii " ") __usdt_asm_arg(11)
+#define __usdt_asm_args12 __usdt_asm_args11 __usdt_asm1(.ascii " ") __usdt_asm_arg(12)
+#define __usdt_asm_args(...) __usdt_apply(__usdt_asm_args, __usdt_narg(__VA_ARGS__))
+
+#define __usdt_is_arr(x) (__builtin_classify_type(x) == 14 || __builtin_classify_type(x) == 5)
+#define __usdt_arg_size(x) (__usdt_is_arr(x) ? sizeof(void *) : sizeof(x))
+
+/*
+ * We can't use __builtin_choose_expr() in C++, so fall back to table-based
+ * signedness determination for known types, utilizing templates magic.
+ */
+#ifdef __cplusplus
+
+#define __usdt_is_signed(x) (!__usdt_is_arr(x) && __usdt_t<__typeof(x)>::is_signed)
+
+#include <cstddef>
+
+template<typename T> struct __usdt_t { static const bool is_signed = false; };
+template<typename A> struct __usdt_t<A[]> : public __usdt_t<A *> {};
+template<typename A, size_t N> struct __usdt_t<A[N]> : public __usdt_t<A *> {};
+
+#define __usdt_def_signed(T) \
+template<> struct __usdt_t<T> { static const bool is_signed = true; }; \
+template<> struct __usdt_t<const T> { static const bool is_signed = true; }; \
+template<> struct __usdt_t<volatile T> { static const bool is_signed = true; }; \
+template<> struct __usdt_t<const volatile T> { static const bool is_signed = true; }
+#define __usdt_maybe_signed(T) \
+template<> struct __usdt_t<T> { static const bool is_signed = (T)-1 < (T)1; }; \
+template<> struct __usdt_t<const T> { static const bool is_signed = (T)-1 < (T)1; }; \
+template<> struct __usdt_t<volatile T> { static const bool is_signed = (T)-1 < (T)1; }; \
+template<> struct __usdt_t<const volatile T> { static const bool is_signed = (T)-1 < (T)1; }
+
+__usdt_def_signed(signed char);
+__usdt_def_signed(short);
+__usdt_def_signed(int);
+__usdt_def_signed(long);
+__usdt_def_signed(long long);
+__usdt_maybe_signed(char);
+__usdt_maybe_signed(wchar_t);
+
+#else /* !__cplusplus */
+
+#define __usdt_is_inttype(x) (__builtin_classify_type(x) >= 1 && __builtin_classify_type(x) <= 4)
+#define __usdt_inttype(x) __typeof(__builtin_choose_expr(__usdt_is_inttype(x), (x), 0U))
+#define __usdt_is_signed(x) ((__usdt_inttype(x))-1 < (__usdt_inttype(x))1)
+
+#endif /* __cplusplus */
+
+#define __usdt_asm_op(n, x) \
+ [__usdt_asz##n] "n" ((__usdt_is_signed(x) ? (int)-1 : 1) * (int)__usdt_arg_size(x)), \
+ [__usdt_aval##n] __usdt_str(USDT_ARG_CONSTRAINT)(x)
+
+#define __usdt_asm_ops0() [__usdt_dummy] "g" (0)
+#define __usdt_asm_ops1(x) __usdt_asm_op(1, x)
+#define __usdt_asm_ops2(a,x) __usdt_asm_ops1(a), __usdt_asm_op(2, x)
+#define __usdt_asm_ops3(a,b,x) __usdt_asm_ops2(a,b), __usdt_asm_op(3, x)
+#define __usdt_asm_ops4(a,b,c,x) __usdt_asm_ops3(a,b,c), __usdt_asm_op(4, x)
+#define __usdt_asm_ops5(a,b,c,d,x) __usdt_asm_ops4(a,b,c,d), __usdt_asm_op(5, x)
+#define __usdt_asm_ops6(a,b,c,d,e,x) __usdt_asm_ops5(a,b,c,d,e), __usdt_asm_op(6, x)
+#define __usdt_asm_ops7(a,b,c,d,e,f,x) __usdt_asm_ops6(a,b,c,d,e,f), __usdt_asm_op(7, x)
+#define __usdt_asm_ops8(a,b,c,d,e,f,g,x) __usdt_asm_ops7(a,b,c,d,e,f,g), __usdt_asm_op(8, x)
+#define __usdt_asm_ops9(a,b,c,d,e,f,g,h,x) __usdt_asm_ops8(a,b,c,d,e,f,g,h), __usdt_asm_op(9, x)
+#define __usdt_asm_ops10(a,b,c,d,e,f,g,h,i,x) __usdt_asm_ops9(a,b,c,d,e,f,g,h,i), __usdt_asm_op(10, x)
+#define __usdt_asm_ops11(a,b,c,d,e,f,g,h,i,j,x) __usdt_asm_ops10(a,b,c,d,e,f,g,h,i,j), __usdt_asm_op(11, x)
+#define __usdt_asm_ops12(a,b,c,d,e,f,g,h,i,j,k,x) __usdt_asm_ops11(a,b,c,d,e,f,g,h,i,j,k), __usdt_asm_op(12, x)
+#define __usdt_asm_ops(...) __usdt_apply(__usdt_asm_ops, __usdt_narg(__VA_ARGS__))(__VA_ARGS__)
+
+#endif /* __USDT_H */
diff --git a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
index b616575c3b00..ce13002c7a19 100644
--- a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
+++ b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c
@@ -93,7 +93,7 @@
.expected_attach_type = BPF_SK_LOOKUP,
.result = VERBOSE_ACCEPT,
.runs = -1,
- .errstr = "0: (7a) *(u64 *)(r10 -8) = -44 ; R10=fp0 fp-8_w=-44\
+ .errstr = "0: (7a) *(u64 *)(r10 -8) = -44 ; R10=fp0 fp-8=-44\
2: (c5) if r0 s< 0x0 goto pc+2\
- R0_w=-44",
+ R0=-44",
},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 18596ae0b0c1..c8d640802cce 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -1375,7 +1375,7 @@
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
@@ -1439,7 +1439,7 @@
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
@@ -1493,7 +1493,7 @@
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
@@ -2380,7 +2380,7 @@
*/
BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
- /* r9 = *r9 ; verifier get's to this point via two paths:
+ /* r9 = *r9 ; verifier gets to this point via two paths:
* ; (I) one including r9 = r8, verified first;
* ; (II) one excluding r9 = r8, verified next.
* ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id.
@@ -2409,3 +2409,27 @@
.errstr_unpriv = "",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
+{
+ "calls: several args with ref_obj_id",
+ .insns = {
+ /* Reserve at least sizeof(struct iphdr) bytes in the ring buffer.
+ * With a smaller size, the verifier would reject the call to
+ * bpf_tcp_raw_gen_syncookie_ipv4 before we can reach the
+ * ref_obj_id error.
+ */
+ BPF_MOV64_IMM(BPF_REG_2, 20),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
+ /* if r0 == 0 goto <exit> */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tcp_raw_gen_syncookie_ipv4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_ringbuf = { 2 },
+ .result = REJECT,
+ .errstr = "more than one arg with ref_obj_id",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
index ee454327e5c6..77207b498c6f 100644
--- a/tools/testing/selftests/bpf/verifier/dead_code.c
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -2,14 +2,13 @@
"dead code: start",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* unpriv: nospec (inserted to prevent "R9 !read_ok") */
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R9 !read_ok",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
index 43776f6f92f4..91d83e9cb148 100644
--- a/tools/testing/selftests/bpf/verifier/jmp32.c
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -84,11 +84,10 @@
BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1),
+ /* unpriv: nospec (inserted to prevent "R9 !read_ok") */
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R9 !read_ok",
- .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -149,11 +148,10 @@
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1),
+ /* unpriv: nospec (inserted to prevent "R9 !read_ok") */
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R9 !read_ok",
- .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -214,11 +212,10 @@
BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
BPF_EXIT_INSN(),
+ /* unpriv: nospec (inserted to prevent "R9 !read_ok") */
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R9 !read_ok",
- .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -283,11 +280,10 @@
BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1),
+ /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'scalar'",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -354,11 +350,10 @@
BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1),
+ /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'scalar'",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -425,11 +420,10 @@
BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1),
+ /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'scalar'",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -496,11 +490,10 @@
BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1),
+ /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'scalar'",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -567,11 +560,10 @@
BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1),
+ /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'scalar'",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -638,11 +630,10 @@
BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1),
+ /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'scalar'",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -709,11 +700,10 @@
BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1),
+ /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'scalar'",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -780,11 +770,10 @@
BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
BPF_EXIT_INSN(),
BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ /* unpriv: nospec (inserted to prevent "R0 invalid mem access 'scalar'") */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'scalar'",
- .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
index 11fc68da735e..e901eefd774a 100644
--- a/tools/testing/selftests/bpf/verifier/jset.c
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -78,12 +78,11 @@
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ /* unpriv: nospec (inserted to prevent "R9 !read_ok") */
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "R9 !read_ok",
- .result_unpriv = REJECT,
.retval = 1,
.result = ACCEPT,
},
@@ -136,13 +135,12 @@
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
+ /* unpriv: nospec (inserted to prevent "R9 !read_ok") */
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "R9 !read_ok",
- .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -154,16 +152,16 @@
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
+ /* unpriv: nospec (inserted to prevent "R9 !read_ok") */
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
+ /* unpriv: nospec (inserted to prevent "R9 !read_ok") */
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "R9 !read_ok",
- .result_unpriv = REJECT,
.result = ACCEPT,
},
diff --git a/tools/testing/selftests/bpf/verify_sig_setup.sh b/tools/testing/selftests/bpf/verify_sig_setup.sh
index f2cac42298ba..09179fb551f0 100755
--- a/tools/testing/selftests/bpf/verify_sig_setup.sh
+++ b/tools/testing/selftests/bpf/verify_sig_setup.sh
@@ -32,7 +32,7 @@ usage()
exit 1
}
-setup()
+genkey()
{
local tmp_dir="$1"
@@ -45,9 +45,14 @@ setup()
openssl x509 -in ${tmp_dir}/signing_key.pem -out \
${tmp_dir}/signing_key.der -outform der
+}
- key_id=$(cat ${tmp_dir}/signing_key.der | keyctl padd asymmetric ebpf_testing_key @s)
+setup()
+{
+ local tmp_dir="$1"
+ genkey "${tmp_dir}"
+ key_id=$(cat ${tmp_dir}/signing_key.der | keyctl padd asymmetric ebpf_testing_key @s)
keyring_id=$(keyctl newring ebpf_testing_keyring @s)
keyctl link $key_id $keyring_id
}
@@ -105,6 +110,8 @@ main()
if [[ "${action}" == "setup" ]]; then
setup "${tmp_dir}"
+ elif [[ "${action}" == "genkey" ]]; then
+ genkey "${tmp_dir}"
elif [[ "${action}" == "cleanup" ]]; then
cleanup "${tmp_dir}"
elif [[ "${action}" == "fsverity-create-sign" ]]; then
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index b2bb20b00952..e962f133250c 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -23,6 +23,7 @@
#include <float.h>
#include <math.h>
#include <limits.h>
+#include <assert.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
@@ -49,6 +50,7 @@ enum stat_id {
STACK,
PROG_TYPE,
ATTACH_TYPE,
+ MEMORY_PEAK,
FILE_NAME,
PROG_NAME,
@@ -155,16 +157,36 @@ struct filter {
bool abs;
};
-struct var_preset {
- char *name;
+struct rvalue {
enum { INTEGRAL, ENUMERATOR } type;
union {
long long ivalue;
char *svalue;
};
+};
+
+struct field_access {
+ enum { FIELD_NAME, ARRAY_INDEX } type;
+ union {
+ char *name;
+ struct rvalue index;
+ };
+};
+
+struct var_preset {
+ struct field_access *atoms;
+ int atom_count;
+ char *full_name;
+ struct rvalue value;
bool applied;
};
+enum dump_mode {
+ DUMP_NONE = 0,
+ DUMP_XLATED = 1,
+ DUMP_JITED = 2,
+};
+
static struct env {
char **filenames;
int filename_cnt;
@@ -208,6 +230,10 @@ static struct env {
int top_src_lines;
struct var_preset *presets;
int npresets;
+ char orig_cgroup[PATH_MAX];
+ char stat_cgroup[PATH_MAX];
+ int memory_peak_fd;
+ __u32 dump_mode;
} env;
static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
@@ -219,6 +245,22 @@ static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va
return vfprintf(stderr, format, args);
}
+#define log_errno(fmt, ...) log_errno_aux(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
+
+__attribute__((format(printf, 3, 4)))
+static int log_errno_aux(const char *file, int line, const char *fmt, ...)
+{
+ int err = -errno;
+ va_list ap;
+
+ va_start(ap, fmt);
+ fprintf(stderr, "%s:%d: ", file, line);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, " failed with error '%s'.\n", strerror(errno));
+ va_end(ap);
+ return err;
+}
+
#ifndef VERISTAT_VERSION
#define VERISTAT_VERSION "<kernel>"
#endif
@@ -236,6 +278,7 @@ const char argp_program_doc[] =
enum {
OPT_LOG_FIXED = 1000,
OPT_LOG_SIZE = 1001,
+ OPT_DUMP = 1002,
};
static const struct argp_option opts[] = {
@@ -260,6 +303,7 @@ static const struct argp_option opts[] = {
"Force BPF verifier failure on register invariant violation (BPF_F_TEST_REG_INVARIANTS program flag)" },
{ "top-src-lines", 'S', "N", 0, "Emit N most frequent source code lines" },
{ "set-global-vars", 'G', "GLOBAL", 0, "Set global variables provided in the expression, for example \"var1 = 1\"" },
+ { "dump", OPT_DUMP, "DUMP_MODE", OPTION_ARG_OPTIONAL, "Print BPF program dump (xlated, jited)" },
{},
};
@@ -344,6 +388,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
fprintf(stderr, "invalid top N specifier: %s\n", arg);
argp_usage(state);
}
+ break;
case 'C':
env.comparison_mode = true;
break;
@@ -391,6 +436,16 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return err;
}
break;
+ case OPT_DUMP:
+ if (!arg || strcasecmp(arg, "xlated") == 0) {
+ env.dump_mode |= DUMP_XLATED;
+ } else if (strcasecmp(arg, "jited") == 0) {
+ env.dump_mode |= DUMP_JITED;
+ } else {
+ fprintf(stderr, "Unrecognized dump mode '%s'\n", arg);
+ return -EINVAL;
+ }
+ break;
default:
return ARGP_ERR_UNKNOWN;
}
@@ -734,13 +789,13 @@ cleanup:
}
static const struct stat_specs default_csv_output_spec = {
- .spec_cnt = 14,
+ .spec_cnt = 15,
.ids = {
FILE_NAME, PROG_NAME, VERDICT, DURATION,
TOTAL_INSNS, TOTAL_STATES, PEAK_STATES,
MAX_STATES_PER_INSN, MARK_READ_MAX_LEN,
SIZE, JITED_SIZE, PROG_TYPE, ATTACH_TYPE,
- STACK,
+ STACK, MEMORY_PEAK,
},
};
@@ -781,6 +836,7 @@ static struct stat_def {
[STACK] = {"Stack depth", {"stack_depth", "stack"}, },
[PROG_TYPE] = { "Program type", {"prog_type"}, },
[ATTACH_TYPE] = { "Attach type", {"attach_type", }, },
+ [MEMORY_PEAK] = { "Peak memory (MiB)", {"mem_peak", }, },
};
static bool parse_stat_id_var(const char *name, size_t len, int *id,
@@ -854,6 +910,18 @@ static bool is_desc_sym(char c)
return c == 'v' || c == 'V' || c == '.' || c == '!' || c == '_';
}
+static char *rtrim(char *str)
+{
+ int i;
+
+ for (i = strlen(str) - 1; i > 0; --i) {
+ if (!isspace(str[i]))
+ break;
+ str[i] = '\0';
+ }
+ return str;
+}
+
static int parse_stat(const char *stat_name, struct stat_specs *specs)
{
int id;
@@ -1182,6 +1250,7 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch
case BPF_MAP_TYPE_TASK_STORAGE:
case BPF_MAP_TYPE_INODE_STORAGE:
case BPF_MAP_TYPE_CGROUP_STORAGE:
+ case BPF_MAP_TYPE_CGRP_STORAGE:
break;
case BPF_MAP_TYPE_STRUCT_OPS:
mask_unrelated_struct_ops_progs(obj, map, prog);
@@ -1278,16 +1347,273 @@ static int max_verifier_log_size(void)
return log_size;
}
+static bool output_stat_enabled(int id)
+{
+ int i;
+
+ for (i = 0; i < env.output_spec.spec_cnt; i++)
+ if (env.output_spec.ids[i] == id)
+ return true;
+ return false;
+}
+
+__attribute__((format(printf, 2, 3)))
+static int write_one_line(const char *file, const char *fmt, ...)
+{
+ int err, saved_errno;
+ va_list ap;
+ FILE *f;
+
+ f = fopen(file, "w");
+ if (!f)
+ return -1;
+
+ va_start(ap, fmt);
+ errno = 0;
+ err = vfprintf(f, fmt, ap);
+ saved_errno = errno;
+ va_end(ap);
+ fclose(f);
+ errno = saved_errno;
+ return err < 0 ? -1 : 0;
+}
+
+__attribute__((format(scanf, 3, 4)))
+static int scanf_one_line(const char *file, int fields_expected, const char *fmt, ...)
+{
+ int res = 0, saved_errno = 0;
+ char *line = NULL;
+ size_t line_len;
+ va_list ap;
+ FILE *f;
+
+ f = fopen(file, "r");
+ if (!f)
+ return -1;
+
+ va_start(ap, fmt);
+ while (getline(&line, &line_len, f) > 0) {
+ res = vsscanf(line, fmt, ap);
+ if (res == fields_expected)
+ goto out;
+ }
+ if (ferror(f)) {
+ saved_errno = errno;
+ res = -1;
+ }
+
+out:
+ va_end(ap);
+ free(line);
+ fclose(f);
+ errno = saved_errno;
+ return res;
+}
+
+static void destroy_stat_cgroup(void)
+{
+ char buf[PATH_MAX];
+ int err;
+
+ close(env.memory_peak_fd);
+
+ if (env.orig_cgroup[0]) {
+ snprintf(buf, sizeof(buf), "%s/cgroup.procs", env.orig_cgroup);
+ err = write_one_line(buf, "%d\n", getpid());
+ if (err < 0)
+ log_errno("moving self to original cgroup %s\n", env.orig_cgroup);
+ }
+
+ if (env.stat_cgroup[0]) {
+ err = rmdir(env.stat_cgroup);
+ if (err < 0)
+ log_errno("deletion of cgroup %s", env.stat_cgroup);
+ }
+
+ env.memory_peak_fd = -1;
+ env.orig_cgroup[0] = 0;
+ env.stat_cgroup[0] = 0;
+}
+
+/*
+ * Creates a cgroup at /sys/fs/cgroup/veristat-accounting-<pid>,
+ * moves current process to this cgroup.
+ */
+static void create_stat_cgroup(void)
+{
+ char cgroup_fs_mount[4096];
+ char buf[4096];
+ int err;
+
+ env.memory_peak_fd = -1;
+
+ if (!output_stat_enabled(MEMORY_PEAK))
+ return;
+
+ err = scanf_one_line("/proc/self/mounts", 2, "%*s %4095s cgroup2 %s",
+ cgroup_fs_mount, buf);
+ if (err != 2) {
+ if (err < 0)
+ log_errno("reading /proc/self/mounts");
+ else if (!env.quiet)
+ fprintf(stderr, "Can't find cgroupfs v2 mount point.\n");
+ goto err_out;
+ }
+
+ /* cgroup-v2.rst promises the line "0::<group>" for cgroups v2 */
+ err = scanf_one_line("/proc/self/cgroup", 1, "0::%4095s", buf);
+ if (err != 1) {
+ if (err < 0)
+ log_errno("reading /proc/self/cgroup");
+ else if (!env.quiet)
+ fprintf(stderr, "Can't infer veristat process cgroup.");
+ goto err_out;
+ }
+
+ snprintf(env.orig_cgroup, sizeof(env.orig_cgroup), "%s/%s", cgroup_fs_mount, buf);
+
+ snprintf(buf, sizeof(buf), "%s/veristat-accounting-%d", cgroup_fs_mount, getpid());
+ err = mkdir(buf, 0777);
+ if (err < 0) {
+ log_errno("creation of cgroup %s", buf);
+ goto err_out;
+ }
+ strcpy(env.stat_cgroup, buf);
+
+ snprintf(buf, sizeof(buf), "%s/cgroup.procs", env.stat_cgroup);
+ err = write_one_line(buf, "%d\n", getpid());
+ if (err < 0) {
+ log_errno("entering cgroup %s", buf);
+ goto err_out;
+ }
+
+ snprintf(buf, sizeof(buf), "%s/memory.peak", env.stat_cgroup);
+ env.memory_peak_fd = open(buf, O_RDWR | O_APPEND);
+ if (env.memory_peak_fd < 0) {
+ log_errno("opening %s", buf);
+ goto err_out;
+ }
+
+ return;
+
+err_out:
+ if (!env.quiet)
+ fprintf(stderr, "Memory usage metric unavailable.\n");
+ destroy_stat_cgroup();
+}
+
+/* Current value of /sys/fs/cgroup/veristat-accounting-<pid>/memory.peak */
+static long cgroup_memory_peak(void)
+{
+ long err, memory_peak;
+ char buf[32];
+
+ if (env.memory_peak_fd < 0)
+ return -1;
+
+ err = pread(env.memory_peak_fd, buf, sizeof(buf) - 1, 0);
+ if (err <= 0) {
+ log_errno("pread(%s/memory.peak)", env.stat_cgroup);
+ return -1;
+ }
+
+ buf[err] = 0;
+ errno = 0;
+ memory_peak = strtoll(buf, NULL, 10);
+ if (errno) {
+ log_errno("%s/memory.peak:strtoll(%s)", env.stat_cgroup, buf);
+ return -1;
+ }
+
+ return memory_peak;
+}
+
+static int reset_stat_cgroup(void)
+{
+ char buf[] = "r\n";
+ int err;
+
+ if (env.memory_peak_fd < 0)
+ return -1;
+
+ err = pwrite(env.memory_peak_fd, buf, sizeof(buf), 0);
+ if (err <= 0) {
+ log_errno("pwrite(%s/memory.peak)", env.stat_cgroup);
+ return -1;
+ }
+ return 0;
+}
+
+static int parse_rvalue(const char *val, struct rvalue *rvalue)
+{
+ long long value;
+ char *val_end;
+
+ if (val[0] == '-' || isdigit(val[0])) {
+ /* must be a number */
+ errno = 0;
+ value = strtoll(val, &val_end, 0);
+ if (errno == ERANGE) {
+ errno = 0;
+ value = strtoull(val, &val_end, 0);
+ }
+ if (errno || *val_end != '\0') {
+ fprintf(stderr, "Failed to parse value '%s'\n", val);
+ return -EINVAL;
+ }
+ rvalue->ivalue = value;
+ rvalue->type = INTEGRAL;
+ } else {
+ /* if not a number, consider it enum value */
+ rvalue->svalue = strdup(val);
+ if (!rvalue->svalue)
+ return -ENOMEM;
+ rvalue->type = ENUMERATOR;
+ }
+ return 0;
+}
+
+static void dump(__u32 prog_id, enum dump_mode mode, const char *file_name, const char *prog_name)
+{
+ char command[64], buf[4096];
+ FILE *fp;
+ int status;
+
+ status = system("command -v bpftool > /dev/null 2>&1");
+ if (status != 0) {
+ fprintf(stderr, "bpftool is not available, can't print program dump\n");
+ return;
+ }
+ snprintf(command, sizeof(command), "bpftool prog dump %s id %u",
+ mode == DUMP_JITED ? "jited" : "xlated", prog_id);
+ fp = popen(command, "r");
+ if (!fp) {
+ fprintf(stderr, "bpftool failed with error: %d\n", errno);
+ return;
+ }
+
+ printf("DUMP (%s) %s/%s:\n", mode == DUMP_JITED ? "JITED" : "XLATED", file_name, prog_name);
+ while (fgets(buf, sizeof(buf), fp))
+ fputs(buf, stdout);
+ fprintf(stdout, "\n");
+
+ if (ferror(fp))
+ fprintf(stderr, "Failed to dump BPF prog with error: %d\n", errno);
+
+ pclose(fp);
+}
+
static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog)
{
const char *base_filename = basename(strdupa(filename));
const char *prog_name = bpf_program__name(prog);
+ long mem_peak_a, mem_peak_b, mem_peak = -1;
char *buf;
int buf_sz, log_level;
struct verif_stats *stats;
struct bpf_prog_info info;
__u32 info_len = sizeof(info);
- int err = 0;
+ int err = 0, cgroup_err;
void *tmp;
int fd;
@@ -1332,7 +1658,15 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
if (env.force_reg_invariants)
bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_REG_INVARIANTS);
- err = bpf_object__load(obj);
+ err = bpf_object__prepare(obj);
+ if (!err) {
+ cgroup_err = reset_stat_cgroup();
+ mem_peak_a = cgroup_memory_peak();
+ err = bpf_object__load(obj);
+ mem_peak_b = cgroup_memory_peak();
+ if (!cgroup_err && mem_peak_a >= 0 && mem_peak_b >= 0)
+ mem_peak = mem_peak_b - mem_peak_a;
+ }
env.progs_processed++;
stats->file_name = strdup(base_filename);
@@ -1341,11 +1675,17 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
stats->stats[SIZE] = bpf_program__insn_cnt(prog);
stats->stats[PROG_TYPE] = bpf_program__type(prog);
stats->stats[ATTACH_TYPE] = bpf_program__expected_attach_type(prog);
+ stats->stats[MEMORY_PEAK] = mem_peak < 0 ? -1 : mem_peak / (1024 * 1024);
memset(&info, 0, info_len);
fd = bpf_program__fd(prog);
- if (fd > 0 && bpf_prog_get_info_by_fd(fd, &info, &info_len) == 0)
+ if (fd > 0 && bpf_prog_get_info_by_fd(fd, &info, &info_len) == 0) {
stats->stats[JITED_SIZE] = info.jited_prog_len;
+ if (env.dump_mode & DUMP_JITED)
+ dump(info.id, DUMP_JITED, base_filename, prog_name);
+ if (env.dump_mode & DUMP_XLATED)
+ dump(info.id, DUMP_XLATED, base_filename, prog_name);
+ }
parse_verif_log(buf, buf_sz, stats);
@@ -1361,15 +1701,74 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
free(buf);
return 0;
-};
+}
+
+static int append_preset_atom(struct var_preset *preset, char *value, bool is_index)
+{
+ struct field_access *tmp;
+ int i = preset->atom_count;
+ int err;
+
+ tmp = reallocarray(preset->atoms, i + 1, sizeof(*preset->atoms));
+ if (!tmp)
+ return -ENOMEM;
+
+ preset->atoms = tmp;
+ preset->atom_count++;
+
+ if (is_index) {
+ preset->atoms[i].type = ARRAY_INDEX;
+ err = parse_rvalue(value, &preset->atoms[i].index);
+ if (err)
+ return err;
+ } else {
+ preset->atoms[i].type = FIELD_NAME;
+ preset->atoms[i].name = strdup(value);
+ if (!preset->atoms[i].name)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int parse_var_atoms(const char *full_var, struct var_preset *preset)
+{
+ char expr[256], var[256], *name, *saveptr;
+ int n, len, off, err;
+
+ snprintf(expr, sizeof(expr), "%s", full_var);
+ preset->atom_count = 0;
+ while ((name = strtok_r(preset->atom_count ? NULL : expr, ".", &saveptr))) {
+ len = strlen(name);
+ /* parse variable name */
+ if (sscanf(name, "%[a-zA-Z0-9_] %n", var, &off) != 1) {
+ fprintf(stderr, "Can't parse %s\n", name);
+ return -EINVAL;
+ }
+ err = append_preset_atom(preset, var, false);
+ if (err)
+ return err;
+
+ /* parse optional array indexes */
+ while (off < len) {
+ if (sscanf(name + off, " [ %[a-zA-Z0-9_] ] %n", var, &n) != 1) {
+ fprintf(stderr, "Can't parse %s as index\n", name + off);
+ return -EINVAL;
+ }
+ err = append_preset_atom(preset, var, true);
+ if (err)
+ return err;
+ off += n;
+ }
+ }
+ return 0;
+}
static int append_var_preset(struct var_preset **presets, int *cnt, const char *expr)
{
void *tmp;
struct var_preset *cur;
- char var[256], val[256], *val_end;
- long long value;
- int n;
+ char var[256], val[256];
+ int n, err;
tmp = realloc(*presets, (*cnt + 1) * sizeof(**presets));
if (!tmp)
@@ -1379,37 +1778,25 @@ static int append_var_preset(struct var_preset **presets, int *cnt, const char *
memset(cur, 0, sizeof(*cur));
(*cnt)++;
- if (sscanf(expr, "%s = %s %n", var, val, &n) != 2 || n != strlen(expr)) {
+ if (sscanf(expr, " %[][a-zA-Z0-9_. ] = %s %n", var, val, &n) != 2 || n != strlen(expr)) {
fprintf(stderr, "Failed to parse expression '%s'\n", expr);
return -EINVAL;
}
+ /* Remove trailing spaces from var, as scanf may add those */
+ rtrim(var);
- if (val[0] == '-' || isdigit(val[0])) {
- /* must be a number */
- errno = 0;
- value = strtoll(val, &val_end, 0);
- if (errno == ERANGE) {
- errno = 0;
- value = strtoull(val, &val_end, 0);
- }
- if (errno || *val_end != '\0') {
- fprintf(stderr, "Failed to parse value '%s'\n", val);
- return -EINVAL;
- }
- cur->ivalue = value;
- cur->type = INTEGRAL;
- } else {
- /* if not a number, consider it enum value */
- cur->svalue = strdup(val);
- if (!cur->svalue)
- return -ENOMEM;
- cur->type = ENUMERATOR;
- }
+ err = parse_rvalue(val, &cur->value);
+ if (err)
+ return err;
- cur->name = strdup(var);
- if (!cur->name)
+ cur->full_name = strdup(var);
+ if (!cur->full_name)
return -ENOMEM;
+ err = parse_var_atoms(var, cur);
+ if (err)
+ return err;
+
return 0;
}
@@ -1486,22 +1873,96 @@ static bool is_preset_supported(const struct btf_type *t)
return btf_is_int(t) || btf_is_enum(t) || btf_is_enum64(t);
}
-const int btf_find_member(const struct btf *btf,
- const struct btf_type *parent_type,
- __u32 parent_offset,
- const char *member_name,
- int *member_tid,
- __u32 *member_offset)
+static int find_enum_value(const struct btf *btf, const char *name, long long *value)
+{
+ const struct btf_type *t;
+ int cnt, i;
+ long long lvalue;
+
+ cnt = btf__type_cnt(btf);
+ for (i = 1; i != cnt; ++i) {
+ t = btf__type_by_id(btf, i);
+
+ if (!btf_is_any_enum(t))
+ continue;
+
+ if (enum_value_from_name(btf, t, name, &lvalue) == 0) {
+ *value = lvalue;
+ return 0;
+ }
+ }
+ return -ESRCH;
+}
+
+static int resolve_rvalue(struct btf *btf, const struct rvalue *rvalue, long long *result)
+{
+ int err = 0;
+
+ switch (rvalue->type) {
+ case INTEGRAL:
+ *result = rvalue->ivalue;
+ return 0;
+ case ENUMERATOR:
+ err = find_enum_value(btf, rvalue->svalue, result);
+ if (err) {
+ fprintf(stderr, "Can't resolve enum value %s\n", rvalue->svalue);
+ return err;
+ }
+ return 0;
+ default:
+ fprintf(stderr, "Unknown rvalue type\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int adjust_var_secinfo_array(struct btf *btf, int tid, struct field_access *atom,
+ const char *array_name, struct btf_var_secinfo *sinfo)
+{
+ const struct btf_type *t;
+ struct btf_array *barr;
+ long long idx;
+ int err;
+
+ tid = btf__resolve_type(btf, tid);
+ t = btf__type_by_id(btf, tid);
+ if (!btf_is_array(t)) {
+ fprintf(stderr, "Array index is not expected for %s\n",
+ array_name);
+ return -EINVAL;
+ }
+ barr = btf_array(t);
+ err = resolve_rvalue(btf, &atom->index, &idx);
+ if (err)
+ return err;
+ if (idx < 0 || idx >= barr->nelems) {
+ fprintf(stderr, "Array index %lld is out of bounds [0, %u): %s\n",
+ idx, barr->nelems, array_name);
+ return -EINVAL;
+ }
+ sinfo->size = btf__resolve_size(btf, barr->type);
+ sinfo->offset += sinfo->size * idx;
+ sinfo->type = btf__resolve_type(btf, barr->type);
+ return 0;
+}
+
+static int adjust_var_secinfo_member(const struct btf *btf,
+ const struct btf_type *parent_type,
+ __u32 parent_offset,
+ const char *member_name,
+ struct btf_var_secinfo *sinfo)
{
int i;
- if (!btf_is_composite(parent_type))
+ if (!btf_is_composite(parent_type)) {
+ fprintf(stderr, "Can't resolve field %s for non-composite type\n", member_name);
return -EINVAL;
+ }
for (i = 0; i < btf_vlen(parent_type); ++i) {
const struct btf_member *member;
const struct btf_type *member_type;
- int tid;
+ int tid, off;
member = btf_members(parent_type) + i;
tid = btf__resolve_type(btf, member->type);
@@ -1509,6 +1970,7 @@ const int btf_find_member(const struct btf *btf,
return -EINVAL;
member_type = btf__type_by_id(btf, tid);
+ off = parent_offset + member->offset;
if (member->name_off) {
const char *name = btf__name_by_offset(btf, member->name_off);
@@ -1518,48 +1980,62 @@ const int btf_find_member(const struct btf *btf,
name);
return -EINVAL;
}
- *member_offset = parent_offset + member->offset;
- *member_tid = tid;
+ sinfo->offset += off / 8;
+ sinfo->type = tid;
+ sinfo->size = member_type->size;
return 0;
}
} else if (btf_is_composite(member_type)) {
int err;
- err = btf_find_member(btf, member_type, parent_offset + member->offset,
- member_name, member_tid, member_offset);
+ err = adjust_var_secinfo_member(btf, member_type, off,
+ member_name, sinfo);
if (!err)
return 0;
}
}
- return -EINVAL;
+ return -ESRCH;
}
static int adjust_var_secinfo(struct btf *btf, const struct btf_type *t,
- struct btf_var_secinfo *sinfo, const char *var)
+ struct btf_var_secinfo *sinfo, struct var_preset *preset)
{
- char expr[256], *saveptr;
- const struct btf_type *base_type, *member_type;
- int err, member_tid;
- char *name;
- __u32 member_offset = 0;
+ const struct btf_type *base_type;
+ const char *prev_name;
+ int err, i;
+ int tid;
- base_type = btf__type_by_id(btf, btf__resolve_type(btf, t->type));
- snprintf(expr, sizeof(expr), "%s", var);
- strtok_r(expr, ".", &saveptr);
+ assert(preset->atom_count > 0);
+ assert(preset->atoms[0].type == FIELD_NAME);
- while ((name = strtok_r(NULL, ".", &saveptr))) {
- err = btf_find_member(btf, base_type, 0, name, &member_tid, &member_offset);
- if (err) {
- fprintf(stderr, "Could not find member %s for variable %s\n", name, var);
- return err;
+ tid = btf__resolve_type(btf, t->type);
+ base_type = btf__type_by_id(btf, tid);
+ prev_name = preset->atoms[0].name;
+
+ for (i = 1; i < preset->atom_count; ++i) {
+ struct field_access *atom = preset->atoms + i;
+
+ switch (atom->type) {
+ case ARRAY_INDEX:
+ err = adjust_var_secinfo_array(btf, tid, atom, prev_name, sinfo);
+ break;
+ case FIELD_NAME:
+ err = adjust_var_secinfo_member(btf, base_type, 0, atom->name, sinfo);
+ if (err == -ESRCH)
+ fprintf(stderr, "Can't find '%s'\n", atom->name);
+ prev_name = atom->name;
+ break;
+ default:
+ fprintf(stderr, "Unknown field_access type\n");
+ return -EOPNOTSUPP;
}
- member_type = btf__type_by_id(btf, member_tid);
- sinfo->offset += member_offset / 8;
- sinfo->size = member_type->size;
- sinfo->type = member_tid;
- base_type = member_type;
+ if (err)
+ return err;
+ base_type = btf__type_by_id(btf, sinfo->type);
+ tid = sinfo->type;
}
+
return 0;
}
@@ -1569,7 +2045,7 @@ static int set_global_var(struct bpf_object *obj, struct btf *btf,
{
const struct btf_type *base_type;
void *ptr;
- long long value = preset->ivalue;
+ long long value = preset->value.ivalue;
size_t size;
base_type = btf__type_by_id(btf, btf__resolve_type(btf, sinfo->type));
@@ -1578,22 +2054,23 @@ static int set_global_var(struct bpf_object *obj, struct btf *btf,
return -EINVAL;
}
if (!is_preset_supported(base_type)) {
- fprintf(stderr, "Setting value for type %s is not supported\n",
- btf__name_by_offset(btf, base_type->name_off));
+ fprintf(stderr, "Can't set %s. Only ints and enums are supported\n",
+ preset->full_name);
return -EINVAL;
}
- if (preset->type == ENUMERATOR) {
+ if (preset->value.type == ENUMERATOR) {
if (btf_is_any_enum(base_type)) {
- if (enum_value_from_name(btf, base_type, preset->svalue, &value)) {
+ if (enum_value_from_name(btf, base_type, preset->value.svalue, &value)) {
fprintf(stderr,
"Failed to find integer value for enum element %s\n",
- preset->svalue);
+ preset->value.svalue);
return -EINVAL;
}
} else {
fprintf(stderr, "Value %s is not supported for type %s\n",
- preset->svalue, btf__name_by_offset(btf, base_type->name_off));
+ preset->value.svalue,
+ btf__name_by_offset(btf, base_type->name_off));
return -EINVAL;
}
}
@@ -1660,20 +2137,16 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i
for (j = 0; j < n; ++j, ++sinfo) {
const struct btf_type *var_type = btf__type_by_id(btf, sinfo->type);
const char *var_name;
- int var_len;
if (!btf_is_var(var_type))
continue;
var_name = btf__name_by_offset(btf, var_type->name_off);
- var_len = strlen(var_name);
for (k = 0; k < npresets; ++k) {
struct btf_var_secinfo tmp_sinfo;
- if (strncmp(var_name, presets[k].name, var_len) != 0 ||
- (presets[k].name[var_len] != '\0' &&
- presets[k].name[var_len] != '.'))
+ if (strcmp(var_name, presets[k].atoms[0].name) != 0)
continue;
if (presets[k].applied) {
@@ -1683,7 +2156,7 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i
}
tmp_sinfo = *sinfo;
err = adjust_var_secinfo(btf, var_type,
- &tmp_sinfo, presets[k].name);
+ &tmp_sinfo, presets + k);
if (err)
return err;
@@ -1698,7 +2171,8 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i
for (i = 0; i < npresets; ++i) {
if (!presets[i].applied) {
fprintf(stderr, "Global variable preset %s has not been applied\n",
- presets[i].name);
+ presets[i].full_name);
+ err = -EINVAL;
}
presets[i].applied = false;
}
@@ -1824,6 +2298,7 @@ static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2,
case TOTAL_STATES:
case PEAK_STATES:
case MAX_STATES_PER_INSN:
+ case MEMORY_PEAK:
case MARK_READ_MAX_LEN: {
long v1 = s1->stats[id];
long v2 = s2->stats[id];
@@ -2053,6 +2528,7 @@ static void prepare_value(const struct verif_stats *s, enum stat_id id,
case STACK:
case SIZE:
case JITED_SIZE:
+ case MEMORY_PEAK:
*val = s ? s->stats[id] : 0;
break;
default:
@@ -2139,6 +2615,7 @@ static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats
case MARK_READ_MAX_LEN:
case SIZE:
case JITED_SIZE:
+ case MEMORY_PEAK:
case STACK: {
long val;
int err, n;
@@ -2776,7 +3253,7 @@ static void output_prog_stats(void)
static int handle_verif_mode(void)
{
- int i, err;
+ int i, err = 0;
if (env.filename_cnt == 0) {
fprintf(stderr, "Please provide path to BPF object file!\n\n");
@@ -2784,11 +3261,12 @@ static int handle_verif_mode(void)
return -EINVAL;
}
+ create_stat_cgroup();
for (i = 0; i < env.filename_cnt; i++) {
err = process_obj(env.filenames[i]);
if (err) {
fprintf(stderr, "Failed to process '%s': %d\n", env.filenames[i], err);
- return err;
+ goto out;
}
}
@@ -2796,7 +3274,9 @@ static int handle_verif_mode(void)
output_prog_stats();
- return 0;
+out:
+ destroy_stat_cgroup();
+ return err;
}
static int handle_replay_mode(void)
@@ -2826,7 +3306,7 @@ static int handle_replay_mode(void)
int main(int argc, char **argv)
{
- int err = 0, i;
+ int err = 0, i, j;
if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
return 1;
@@ -2885,9 +3365,19 @@ int main(int argc, char **argv)
}
free(env.deny_filters);
for (i = 0; i < env.npresets; ++i) {
- free(env.presets[i].name);
- if (env.presets[i].type == ENUMERATOR)
- free(env.presets[i].svalue);
+ free(env.presets[i].full_name);
+ for (j = 0; j < env.presets[i].atom_count; ++j) {
+ switch (env.presets[i].atoms[j].type) {
+ case FIELD_NAME:
+ free(env.presets[i].atoms[j].name);
+ break;
+ case ARRAY_INDEX:
+ if (env.presets[i].atoms[j].index.type == ENUMERATOR)
+ free(env.presets[i].atoms[j].index.svalue);
+ break;
+ }
+ }
+ free(env.presets[i].atoms);
}
free(env.presets);
return -err;
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index 79505d294c44..2f869daf8a06 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -43,6 +43,15 @@ riscv64)
BZIMAGE="arch/riscv/boot/Image"
ARCH="riscv"
;;
+ppc64el)
+ QEMU_BINARY=qemu-system-ppc64
+ QEMU_CONSOLE="hvc0"
+ # KVM could not be tested for powerpc, therefore not enabled for now.
+ HOST_FLAGS=(-machine pseries -cpu POWER9)
+ CROSS_FLAGS=(-machine pseries -cpu POWER9)
+ BZIMAGE="vmlinux"
+ ARCH="powerpc"
+ ;;
*)
echo "Unsupported architecture"
exit 1
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index 1503a1d2faa0..9ed8c796645d 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -155,7 +155,7 @@ int main(int argc, char **argv)
}
if (!server) {
- /* Only supports IPv4; see hints initiailization above. */
+ /* Only supports IPv4; see hints initialization above. */
if (getaddrinfo(argv[optind], NULL, &hints, &a) || !a) {
fprintf(stderr, "Could not resolve %s\n", argv[optind]);
return 1;
diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h
index 93c2cc413cfc..48729da142c2 100644
--- a/tools/testing/selftests/bpf/xsk.h
+++ b/tools/testing/selftests/bpf/xsk.h
@@ -93,8 +93,8 @@ static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
/* Refresh the local tail pointer.
* cached_cons is r->size bigger than the real consumer pointer so
* that this addition can be avoided in the more frequently
- * executed code that computs free_entries in the beginning of
- * this function. Without this optimization it whould have been
+ * executed code that computes free_entries in the beginning of
+ * this function. Without this optimization it would have been
* free_entries = r->cached_prod - r->cached_cons + r->size.
*/
r->cached_cons = __atomic_load_n(r->consumer, __ATOMIC_ACQUIRE);
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 0ced4026ee44..05b3cebc5ca9 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -74,42 +74,33 @@
#define _GNU_SOURCE
#include <assert.h>
#include <fcntl.h>
-#include <errno.h>
#include <getopt.h>
#include <linux/if_link.h>
#include <linux/if_ether.h>
#include <linux/mman.h>
#include <linux/netdev.h>
-#include <linux/bitmap.h>
#include <linux/ethtool.h>
#include <arpa/inet.h>
#include <net/if.h>
#include <locale.h>
-#include <poll.h>
-#include <pthread.h>
-#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <libgen.h>
-#include <string.h>
#include <stddef.h>
#include <sys/mman.h>
-#include <sys/socket.h>
-#include <sys/time.h>
#include <sys/types.h>
-#include <unistd.h>
+#include "prog_tests/test_xsk.h"
#include "xsk_xdp_progs.skel.h"
#include "xsk.h"
#include "xskxceiver.h"
#include <bpf/bpf.h>
#include <linux/filter.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "xsk_xdp_common.h"
#include <network_helpers.h>
-static bool opt_verbose;
static bool opt_print_tests;
static enum test_mode opt_mode = TEST_MODE_ALL;
static u32 opt_run_test = RUN_ALL_TESTS;
@@ -118,169 +109,12 @@ void test__fail(void) { /* for network_helpers.c */ }
static void __exit_with_error(int error, const char *file, const char *func, int line)
{
- ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
- strerror(error));
+ ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line,
+ error, strerror(error));
ksft_exit_xfail();
}
#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
-#define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
-static char *mode_string(struct test_spec *test)
-{
- switch (test->mode) {
- case TEST_MODE_SKB:
- return "SKB";
- case TEST_MODE_DRV:
- return "DRV";
- case TEST_MODE_ZC:
- return "ZC";
- default:
- return "BOGUS";
- }
-}
-
-static void report_failure(struct test_spec *test)
-{
- if (test->fail)
- return;
-
- ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
- test->name);
- test->fail = true;
-}
-
-/* The payload is a word consisting of a packet sequence number in the upper
- * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
- * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
- */
-static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
-{
- u32 *ptr = (u32 *)dest, i;
-
- start /= sizeof(*ptr);
- size /= sizeof(*ptr);
- for (i = 0; i < size; i++)
- ptr[i] = htonl(pkt_nb << 16 | (i + start));
-}
-
-static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr)
-{
- memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN);
- memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN);
- eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
-}
-
-static bool is_umem_valid(struct ifobject *ifobj)
-{
- return !!ifobj->umem->umem;
-}
-
-static u32 mode_to_xdp_flags(enum test_mode mode)
-{
- return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
-}
-
-static u64 umem_size(struct xsk_umem_info *umem)
-{
- return umem->num_frames * umem->frame_size;
-}
-
-static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
- u64 size)
-{
- struct xsk_umem_config cfg = {
- .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
- .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
- .frame_size = umem->frame_size,
- .frame_headroom = umem->frame_headroom,
- .flags = XSK_UMEM__DEFAULT_FLAGS
- };
- int ret;
-
- if (umem->fill_size)
- cfg.fill_size = umem->fill_size;
-
- if (umem->comp_size)
- cfg.comp_size = umem->comp_size;
-
- if (umem->unaligned_mode)
- cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
-
- ret = xsk_umem__create(&umem->umem, buffer, size,
- &umem->fq, &umem->cq, &cfg);
- if (ret)
- return ret;
-
- umem->buffer = buffer;
- if (ifobj->shared_umem && ifobj->rx_on) {
- umem->base_addr = umem_size(umem);
- umem->next_buffer = umem_size(umem);
- }
-
- return 0;
-}
-
-static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
-{
- u64 addr;
-
- addr = umem->next_buffer;
- umem->next_buffer += umem->frame_size;
- if (umem->next_buffer >= umem->base_addr + umem_size(umem))
- umem->next_buffer = umem->base_addr;
-
- return addr;
-}
-
-static void umem_reset_alloc(struct xsk_umem_info *umem)
-{
- umem->next_buffer = 0;
-}
-
-static void enable_busy_poll(struct xsk_socket_info *xsk)
-{
- int sock_opt;
-
- sock_opt = 1;
- if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
- (void *)&sock_opt, sizeof(sock_opt)) < 0)
- exit_with_error(errno);
-
- sock_opt = 20;
- if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
- (void *)&sock_opt, sizeof(sock_opt)) < 0)
- exit_with_error(errno);
-
- sock_opt = xsk->batch_size;
- if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
- (void *)&sock_opt, sizeof(sock_opt)) < 0)
- exit_with_error(errno);
-}
-
-static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
- struct ifobject *ifobject, bool shared)
-{
- struct xsk_socket_config cfg = {};
- struct xsk_ring_cons *rxr;
- struct xsk_ring_prod *txr;
-
- xsk->umem = umem;
- cfg.rx_size = xsk->rxqsize;
- cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
- cfg.bind_flags = ifobject->bind_flags;
- if (shared)
- cfg.bind_flags |= XDP_SHARED_UMEM;
- if (ifobject->mtu > MAX_ETH_PKT_SIZE)
- cfg.bind_flags |= XDP_USE_SG;
- if (umem->comp_size)
- cfg.tx_size = umem->comp_size;
- if (umem->fill_size)
- cfg.rx_size = umem->fill_size;
-
- txr = ifobject->tx_on ? &xsk->tx : NULL;
- rxr = ifobject->rx_on ? &xsk->rx : NULL;
- return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
-}
static bool ifobj_zc_avail(struct ifobject *ifobject)
{
@@ -312,7 +146,7 @@ static bool ifobj_zc_avail(struct ifobject *ifobject)
ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY;
ifobject->rx_on = true;
xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
- ret = __xsk_configure_socket(xsk, umem, ifobject, false);
+ ret = xsk_configure_socket(xsk, umem, ifobject, false);
if (!ret)
zc_avail = true;
@@ -325,25 +159,6 @@ out:
return zc_avail;
}
-#define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags"
-static unsigned int get_max_skb_frags(void)
-{
- unsigned int max_skb_frags = 0;
- FILE *file;
-
- file = fopen(MAX_SKB_FRAGS_PATH, "r");
- if (!file) {
- ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH);
- return 0;
- }
-
- if (fscanf(file, "%u", &max_skb_frags) != 1)
- ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH);
-
- fclose(file);
- return max_skb_frags;
-}
-
static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
{"busy-poll", no_argument, 0, 'b'},
@@ -444,2217 +259,36 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
}
}
-static int set_ring_size(struct ifobject *ifobj)
-{
- int ret;
- u32 ctr = 0;
-
- while (ctr++ < SOCK_RECONF_CTR) {
- ret = set_hw_ring_size(ifobj->ifname, &ifobj->ring);
- if (!ret)
- break;
-
- /* Retry if it fails */
- if (ctr >= SOCK_RECONF_CTR || errno != EBUSY)
- return -errno;
-
- usleep(USLEEP_MAX);
- }
-
- return ret;
-}
-
-static int hw_ring_size_reset(struct ifobject *ifobj)
-{
- ifobj->ring.tx_pending = ifobj->set_ring.default_tx;
- ifobj->ring.rx_pending = ifobj->set_ring.default_rx;
- return set_ring_size(ifobj);
-}
-
-static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
- struct ifobject *ifobj_rx)
-{
- u32 i, j;
-
- for (i = 0; i < MAX_INTERFACES; i++) {
- struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
-
- ifobj->xsk = &ifobj->xsk_arr[0];
- ifobj->use_poll = false;
- ifobj->use_fill_ring = true;
- ifobj->release_rx = true;
- ifobj->validation_func = NULL;
- ifobj->use_metadata = false;
-
- if (i == 0) {
- ifobj->rx_on = false;
- ifobj->tx_on = true;
- } else {
- ifobj->rx_on = true;
- ifobj->tx_on = false;
- }
-
- memset(ifobj->umem, 0, sizeof(*ifobj->umem));
- ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
- ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
-
- for (j = 0; j < MAX_SOCKETS; j++) {
- memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
- ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
- ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE;
- if (i == 0)
- ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default;
- else
- ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default;
-
- memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN);
- memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN);
- ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0);
- ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1);
- }
- }
-
- if (ifobj_tx->hw_ring_size_supp)
- hw_ring_size_reset(ifobj_tx);
-
- test->ifobj_tx = ifobj_tx;
- test->ifobj_rx = ifobj_rx;
- test->current_step = 0;
- test->total_steps = 1;
- test->nb_sockets = 1;
- test->fail = false;
- test->set_ring = false;
- test->adjust_tail = false;
- test->adjust_tail_support = false;
- test->mtu = MAX_ETH_PKT_SIZE;
- test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
- test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
- test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
- test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
-}
-
-static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
- struct ifobject *ifobj_rx, enum test_mode mode,
- const struct test_spec *test_to_run)
-{
- struct pkt_stream *tx_pkt_stream;
- struct pkt_stream *rx_pkt_stream;
- u32 i;
-
- tx_pkt_stream = test->tx_pkt_stream_default;
- rx_pkt_stream = test->rx_pkt_stream_default;
- memset(test, 0, sizeof(*test));
- test->tx_pkt_stream_default = tx_pkt_stream;
- test->rx_pkt_stream_default = rx_pkt_stream;
-
- for (i = 0; i < MAX_INTERFACES; i++) {
- struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
-
- ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
- if (mode == TEST_MODE_ZC)
- ifobj->bind_flags |= XDP_ZEROCOPY;
- else
- ifobj->bind_flags |= XDP_COPY;
- }
-
- strncpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
- test->test_func = test_to_run->test_func;
- test->mode = mode;
- __test_spec_init(test, ifobj_tx, ifobj_rx);
-}
-
-static void test_spec_reset(struct test_spec *test)
-{
- __test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
-}
-
-static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
- struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
- struct bpf_map *xskmap_tx)
-{
- test->xdp_prog_rx = xdp_prog_rx;
- test->xdp_prog_tx = xdp_prog_tx;
- test->xskmap_rx = xskmap_rx;
- test->xskmap_tx = xskmap_tx;
-}
-
-static int test_spec_set_mtu(struct test_spec *test, int mtu)
-{
- int err;
-
- if (test->ifobj_rx->mtu != mtu) {
- err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu);
- if (err)
- return err;
- test->ifobj_rx->mtu = mtu;
- }
- if (test->ifobj_tx->mtu != mtu) {
- err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu);
- if (err)
- return err;
- test->ifobj_tx->mtu = mtu;
- }
-
- return 0;
-}
-
-static void pkt_stream_reset(struct pkt_stream *pkt_stream)
-{
- if (pkt_stream) {
- pkt_stream->current_pkt_nb = 0;
- pkt_stream->nb_rx_pkts = 0;
- }
-}
-
-static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
-{
- if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
- return NULL;
-
- return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
-}
-
-static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
-{
- while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
- (*pkts_sent)++;
- if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
- return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
- pkt_stream->current_pkt_nb++;
- }
- return NULL;
-}
-
-static void pkt_stream_delete(struct pkt_stream *pkt_stream)
-{
- free(pkt_stream->pkts);
- free(pkt_stream);
-}
-
-static void pkt_stream_restore_default(struct test_spec *test)
-{
- struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream;
- struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream;
-
- if (tx_pkt_stream != test->tx_pkt_stream_default) {
- pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream);
- test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default;
- }
-
- if (rx_pkt_stream != test->rx_pkt_stream_default) {
- pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream);
- test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default;
- }
-}
-
-static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
-{
- struct pkt_stream *pkt_stream;
-
- pkt_stream = calloc(1, sizeof(*pkt_stream));
- if (!pkt_stream)
- return NULL;
-
- pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
- if (!pkt_stream->pkts) {
- free(pkt_stream);
- return NULL;
- }
-
- pkt_stream->nb_pkts = nb_pkts;
- return pkt_stream;
-}
-
-static bool pkt_continues(u32 options)
-{
- return options & XDP_PKT_CONTD;
-}
-
-static u32 ceil_u32(u32 a, u32 b)
-{
- return (a + b - 1) / b;
-}
-
-static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt)
-{
- u32 nb_frags = 1, next_frag;
-
- if (!pkt)
- return 1;
-
- if (!pkt_stream->verbatim) {
- if (!pkt->valid || !pkt->len)
- return 1;
- return ceil_u32(pkt->len, frame_size);
- }
-
- /* Search for the end of the packet in verbatim mode */
- if (!pkt_continues(pkt->options))
- return nb_frags;
-
- next_frag = pkt_stream->current_pkt_nb;
- pkt++;
- while (next_frag++ < pkt_stream->nb_pkts) {
- nb_frags++;
- if (!pkt_continues(pkt->options) || !pkt->valid)
- break;
- pkt++;
- }
- return nb_frags;
-}
-
-static bool set_pkt_valid(int offset, u32 len)
-{
- return len <= MAX_ETH_JUMBO_SIZE;
-}
-
-static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
-{
- pkt->offset = offset;
- pkt->len = len;
- pkt->valid = set_pkt_valid(offset, len);
-}
-
-static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len)
-{
- bool prev_pkt_valid = pkt->valid;
-
- pkt_set(pkt_stream, pkt, offset, len);
- pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid;
-}
-
-static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
-{
- return ceil_u32(len, umem->frame_size) * umem->frame_size;
-}
-
-static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off)
-{
- struct pkt_stream *pkt_stream;
- u32 i;
-
- pkt_stream = __pkt_stream_alloc(nb_pkts);
- if (!pkt_stream)
- exit_with_error(ENOMEM);
-
- pkt_stream->nb_pkts = nb_pkts;
- pkt_stream->max_pkt_len = pkt_len;
- for (i = 0; i < nb_pkts; i++) {
- struct pkt *pkt = &pkt_stream->pkts[i];
-
- pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len);
- pkt->pkt_nb = nb_start + i * nb_off;
- }
-
- return pkt_stream;
-}
-
-static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len)
-{
- return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1);
-}
-
-static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream)
-{
- return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
-}
-
-static void pkt_stream_replace_ifobject(struct ifobject *ifobj, u32 nb_pkts, u32 pkt_len)
-{
- ifobj->xsk->pkt_stream = pkt_stream_generate(nb_pkts, pkt_len);
-}
-
-static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
-{
- pkt_stream_replace_ifobject(test->ifobj_tx, nb_pkts, pkt_len);
- pkt_stream_replace_ifobject(test->ifobj_rx, nb_pkts, pkt_len);
-}
-
-static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
- int offset)
-{
- struct pkt_stream *pkt_stream;
- u32 i;
-
- pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream);
- for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2)
- pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
-
- ifobj->xsk->pkt_stream = pkt_stream;
-}
-
-static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
-{
- __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
- __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
-}
-
-static void pkt_stream_receive_half(struct test_spec *test)
-{
- struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream;
- u32 i;
-
- test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts,
- pkt_stream->pkts[0].len);
- pkt_stream = test->ifobj_rx->xsk->pkt_stream;
- for (i = 1; i < pkt_stream->nb_pkts; i += 2)
- pkt_stream->pkts[i].valid = false;
-
- pkt_stream->nb_valid_entries /= 2;
-}
-
-static void pkt_stream_even_odd_sequence(struct test_spec *test)
-{
- struct pkt_stream *pkt_stream;
- u32 i;
-
- for (i = 0; i < test->nb_sockets; i++) {
- pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream;
- pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
- pkt_stream->pkts[0].len, i, 2);
- test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
-
- pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream;
- pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2,
- pkt_stream->pkts[0].len, i, 2);
- test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream;
- }
-}
-
-static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
-{
- if (!pkt->valid)
- return pkt->offset;
- return pkt->offset + umem_alloc_buffer(umem);
-}
-
-static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
-{
- pkt_stream->current_pkt_nb--;
-}
-
-static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len,
- u32 pkt_nb, u32 bytes_written)
-{
- void *data = xsk_umem__get_data(umem->buffer, addr);
-
- if (len < MIN_PKT_SIZE)
- return;
-
- if (!bytes_written) {
- gen_eth_hdr(xsk, data);
-
- len -= PKT_HDR_SIZE;
- data += PKT_HDR_SIZE;
- } else {
- bytes_written -= PKT_HDR_SIZE;
- }
-
- write_payload(data, pkt_nb, bytes_written, len);
-}
-
-static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames,
- u32 nb_frames, bool verbatim)
-{
- u32 i, len = 0, pkt_nb = 0, payload = 0;
- struct pkt_stream *pkt_stream;
-
- pkt_stream = __pkt_stream_alloc(nb_frames);
- if (!pkt_stream)
- exit_with_error(ENOMEM);
-
- for (i = 0; i < nb_frames; i++) {
- struct pkt *pkt = &pkt_stream->pkts[pkt_nb];
- struct pkt *frame = &frames[i];
-
- pkt->offset = frame->offset;
- if (verbatim) {
- *pkt = *frame;
- pkt->pkt_nb = payload;
- if (!frame->valid || !pkt_continues(frame->options))
- payload++;
- } else {
- if (frame->valid)
- len += frame->len;
- if (frame->valid && pkt_continues(frame->options))
- continue;
-
- pkt->pkt_nb = pkt_nb;
- pkt->len = len;
- pkt->valid = frame->valid;
- pkt->options = 0;
-
- len = 0;
- }
-
- print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
- pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
-
- if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
- pkt_stream->max_pkt_len = pkt->len;
-
- if (pkt->valid)
- pkt_stream->nb_valid_entries++;
-
- pkt_nb++;
- }
-
- pkt_stream->nb_pkts = pkt_nb;
- pkt_stream->verbatim = verbatim;
- return pkt_stream;
-}
-
-static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
-{
- struct pkt_stream *pkt_stream;
-
- pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true);
- test->ifobj_tx->xsk->pkt_stream = pkt_stream;
-
- pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false);
- test->ifobj_rx->xsk->pkt_stream = pkt_stream;
-}
-
-static void pkt_print_data(u32 *data, u32 cnt)
-{
- u32 i;
-
- for (i = 0; i < cnt; i++) {
- u32 seqnum, pkt_nb;
-
- seqnum = ntohl(*data) & 0xffff;
- pkt_nb = ntohl(*data) >> 16;
- ksft_print_msg("%u:%u ", pkt_nb, seqnum);
- data++;
- }
-}
-
-static void pkt_dump(void *pkt, u32 len, bool eth_header)
-{
- struct ethhdr *ethhdr = pkt;
- u32 i, *data;
-
- if (eth_header) {
- /*extract L2 frame */
- ksft_print_msg("DEBUG>> L2: dst mac: ");
- for (i = 0; i < ETH_ALEN; i++)
- ksft_print_msg("%02X", ethhdr->h_dest[i]);
-
- ksft_print_msg("\nDEBUG>> L2: src mac: ");
- for (i = 0; i < ETH_ALEN; i++)
- ksft_print_msg("%02X", ethhdr->h_source[i]);
-
- data = pkt + PKT_HDR_SIZE;
- } else {
- data = pkt;
- }
-
- /*extract L5 frame */
- ksft_print_msg("\nDEBUG>> L5: seqnum: ");
- pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
- ksft_print_msg("....");
- if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
- ksft_print_msg("\n.... ");
- pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
- PKT_DUMP_NB_TO_PRINT);
- }
- ksft_print_msg("\n---------------------------------------\n");
-}
-
-static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
-{
- u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
- u32 offset = addr % umem->frame_size, expected_offset;
- int pkt_offset = pkt->valid ? pkt->offset : 0;
-
- if (!umem->unaligned_mode)
- pkt_offset = 0;
-
- expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
-
- if (offset == expected_offset)
- return true;
-
- ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
- return false;
-}
-
-static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
-{
- void *data = xsk_umem__get_data(buffer, addr);
- struct xdp_info *meta = data - sizeof(struct xdp_info);
-
- if (meta->count != pkt->pkt_nb) {
- ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
- __func__, pkt->pkt_nb,
- (unsigned long long)meta->count);
- return false;
- }
-
- return true;
-}
-
-static bool is_adjust_tail_supported(struct xsk_xdp_progs *skel_rx)
-{
- struct bpf_map *data_map;
- int adjust_value = 0;
- int key = 0;
- int ret;
-
- data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
- if (!data_map || !bpf_map__is_internal(data_map)) {
- ksft_print_msg("Error: could not find bss section of XDP program\n");
- exit_with_error(errno);
- }
-
- ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &key, &adjust_value);
- if (ret) {
- ksft_print_msg("Error: bpf_map_lookup_elem failed with error %d\n", ret);
- exit_with_error(errno);
- }
-
- /* Set the 'adjust_value' variable to -EOPNOTSUPP in the XDP program if the adjust_tail
- * helper is not supported. Skip the adjust_tail test case in this scenario.
- */
- return adjust_value != -EOPNOTSUPP;
-}
-
-static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
- u32 bytes_processed)
-{
- u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
- void *data = xsk_umem__get_data(umem->buffer, addr);
-
- addr -= umem->base_addr;
-
- if (addr >= umem->num_frames * umem->frame_size ||
- addr + len > umem->num_frames * umem->frame_size) {
- ksft_print_msg("Frag invalid addr: %llx len: %u\n",
- (unsigned long long)addr, len);
- return false;
- }
- if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
- ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
- (unsigned long long)addr, len);
- return false;
- }
-
- pkt_data = data;
- if (!bytes_processed) {
- pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
- len -= PKT_HDR_SIZE;
- } else {
- bytes_processed -= PKT_HDR_SIZE;
- }
-
- expected_seqnum = bytes_processed / sizeof(*pkt_data);
- seqnum = ntohl(*pkt_data) & 0xffff;
- pkt_nb = ntohl(*pkt_data) >> 16;
-
- if (expected_pkt_nb != pkt_nb) {
- ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
- __func__, expected_pkt_nb, pkt_nb);
- goto error;
- }
- if (expected_seqnum != seqnum) {
- ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
- __func__, expected_seqnum, seqnum);
- goto error;
- }
-
- words_to_end = len / sizeof(*pkt_data) - 1;
- pkt_data += words_to_end;
- seqnum = ntohl(*pkt_data) & 0xffff;
- expected_seqnum += words_to_end;
- if (expected_seqnum != seqnum) {
- ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
- __func__, expected_seqnum, seqnum);
- goto error;
- }
-
- return true;
-
-error:
- pkt_dump(data, len, !bytes_processed);
- return false;
-}
-
-static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
-{
- if (pkt->len != len) {
- ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
- __func__, pkt->len, len);
- pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
- return false;
- }
-
- return true;
-}
-
-static int kick_tx(struct xsk_socket_info *xsk)
-{
- int ret;
-
- ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
- if (ret >= 0)
- return TEST_PASS;
- if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
- usleep(100);
- return TEST_PASS;
- }
- return TEST_FAILURE;
-}
-
-static int kick_rx(struct xsk_socket_info *xsk)
-{
- int ret;
-
- ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
- if (ret < 0)
- return TEST_FAILURE;
-
- return TEST_PASS;
-}
-
-static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
-{
- unsigned int rcvd;
- u32 idx;
- int ret;
-
- if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
- ret = kick_tx(xsk);
- if (ret)
- return TEST_FAILURE;
- }
-
- rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
- if (rcvd) {
- if (rcvd > xsk->outstanding_tx) {
- u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
-
- ksft_print_msg("[%s] Too many packets completed\n", __func__);
- ksft_print_msg("Last completion address: %llx\n",
- (unsigned long long)addr);
- return TEST_FAILURE;
- }
-
- xsk_ring_cons__release(&xsk->umem->cq, rcvd);
- xsk->outstanding_tx -= rcvd;
- }
-
- return TEST_PASS;
-}
-
-static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk)
-{
- u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
- u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
- struct pkt_stream *pkt_stream = xsk->pkt_stream;
- struct ifobject *ifobj = test->ifobj_rx;
- struct xsk_umem_info *umem = xsk->umem;
- struct pollfd fds = { };
- struct pkt *pkt;
- u64 first_addr = 0;
- int ret;
-
- fds.fd = xsk_socket__fd(xsk->xsk);
- fds.events = POLLIN;
-
- ret = kick_rx(xsk);
- if (ret)
- return TEST_FAILURE;
-
- if (ifobj->use_poll) {
- ret = poll(&fds, 1, POLL_TMOUT);
- if (ret < 0)
- return TEST_FAILURE;
-
- if (!ret) {
- if (!is_umem_valid(test->ifobj_tx))
- return TEST_PASS;
-
- ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
- return TEST_CONTINUE;
- }
-
- if (!(fds.revents & POLLIN))
- return TEST_CONTINUE;
- }
-
- rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx);
- if (!rcvd)
- return TEST_CONTINUE;
-
- if (ifobj->use_fill_ring) {
- ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
- while (ret != rcvd) {
- if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
- ret = poll(&fds, 1, POLL_TMOUT);
- if (ret < 0)
- return TEST_FAILURE;
- }
- ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
- }
- }
-
- while (frags_processed < rcvd) {
- const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
- u64 addr = desc->addr, orig;
-
- orig = xsk_umem__extract_addr(addr);
- addr = xsk_umem__add_offset_to_addr(addr);
-
- if (!nb_frags) {
- pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
- if (!pkt) {
- ksft_print_msg("[%s] received too many packets addr: %lx len %u\n",
- __func__, addr, desc->len);
- return TEST_FAILURE;
- }
- }
-
- print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
- addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
-
- if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
- !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata &&
- !is_metadata_correct(pkt, umem->buffer, addr)))
- return TEST_FAILURE;
-
- if (!nb_frags++)
- first_addr = addr;
- frags_processed++;
- pkt_len += desc->len;
- if (ifobj->use_fill_ring)
- *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
-
- if (pkt_continues(desc->options))
- continue;
-
- /* The complete packet has been received */
- if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
- !is_offset_correct(umem, pkt, addr))
- return TEST_FAILURE;
-
- pkt_stream->nb_rx_pkts++;
- nb_frags = 0;
- pkt_len = 0;
- }
-
- if (nb_frags) {
- /* In the middle of a packet. Start over from beginning of packet. */
- idx_rx -= nb_frags;
- xsk_ring_cons__cancel(&xsk->rx, nb_frags);
- if (ifobj->use_fill_ring) {
- idx_fq -= nb_frags;
- xsk_ring_prod__cancel(&umem->fq, nb_frags);
- }
- frags_processed -= nb_frags;
- }
-
- if (ifobj->use_fill_ring)
- xsk_ring_prod__submit(&umem->fq, frags_processed);
- if (ifobj->release_rx)
- xsk_ring_cons__release(&xsk->rx, frags_processed);
-
- pthread_mutex_lock(&pacing_mutex);
- pkts_in_flight -= pkts_sent;
- pthread_mutex_unlock(&pacing_mutex);
- pkts_sent = 0;
-
-return TEST_CONTINUE;
-}
-
-bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num,
- unsigned long *bitmap)
-{
- struct pkt_stream *pkt_stream = xsk->pkt_stream;
-
- if (!pkt_stream) {
- __set_bit(sock_num, bitmap);
- return false;
- }
-
- if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) {
- __set_bit(sock_num, bitmap);
- if (bitmap_full(bitmap, test->nb_sockets))
- return true;
- }
-
- return false;
-}
-
-static int receive_pkts(struct test_spec *test)
-{
- struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
- DECLARE_BITMAP(bitmap, test->nb_sockets);
- struct xsk_socket_info *xsk;
- u32 sock_num = 0;
- int res, ret;
-
- ret = gettimeofday(&tv_now, NULL);
- if (ret)
- exit_with_error(errno);
-
- timeradd(&tv_now, &tv_timeout, &tv_end);
-
- while (1) {
- xsk = &test->ifobj_rx->xsk_arr[sock_num];
-
- if ((all_packets_received(test, xsk, sock_num, bitmap)))
- break;
-
- res = __receive_pkts(test, xsk);
- if (!(res == TEST_PASS || res == TEST_CONTINUE))
- return res;
-
- ret = gettimeofday(&tv_now, NULL);
- if (ret)
- exit_with_error(errno);
-
- if (timercmp(&tv_now, &tv_end, >)) {
- ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
- return TEST_FAILURE;
- }
- sock_num = (sock_num + 1) % test->nb_sockets;
- }
-
- return TEST_PASS;
-}
-
-static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout)
-{
- u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
- struct pkt_stream *pkt_stream = xsk->pkt_stream;
- struct xsk_umem_info *umem = ifobject->umem;
- bool use_poll = ifobject->use_poll;
- struct pollfd fds = { };
- int ret;
-
- buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
- /* pkts_in_flight might be negative if many invalid packets are sent */
- if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) /
- buffer_len)) {
- ret = kick_tx(xsk);
- if (ret)
- return TEST_FAILURE;
- return TEST_CONTINUE;
- }
-
- fds.fd = xsk_socket__fd(xsk->xsk);
- fds.events = POLLOUT;
-
- while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) {
- if (use_poll) {
- ret = poll(&fds, 1, POLL_TMOUT);
- if (timeout) {
- if (ret < 0) {
- ksft_print_msg("ERROR: [%s] Poll error %d\n",
- __func__, errno);
- return TEST_FAILURE;
- }
- if (ret == 0)
- return TEST_PASS;
- break;
- }
- if (ret <= 0) {
- ksft_print_msg("ERROR: [%s] Poll error %d\n",
- __func__, errno);
- return TEST_FAILURE;
- }
- }
-
- complete_pkts(xsk, xsk->batch_size);
- }
-
- for (i = 0; i < xsk->batch_size; i++) {
- struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
- u32 nb_frags_left, nb_frags, bytes_written = 0;
-
- if (!pkt)
- break;
-
- nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt);
- if (nb_frags > xsk->batch_size - i) {
- pkt_stream_cancel(pkt_stream);
- xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i);
- break;
- }
- nb_frags_left = nb_frags;
-
- while (nb_frags_left--) {
- struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
-
- tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
- if (pkt_stream->verbatim) {
- tx_desc->len = pkt->len;
- tx_desc->options = pkt->options;
- } else if (nb_frags_left) {
- tx_desc->len = umem->frame_size;
- tx_desc->options = XDP_PKT_CONTD;
- } else {
- tx_desc->len = pkt->len - bytes_written;
- tx_desc->options = 0;
- }
- if (pkt->valid)
- pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
- bytes_written);
- bytes_written += tx_desc->len;
-
- print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
- tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
-
- if (nb_frags_left) {
- i++;
- if (pkt_stream->verbatim)
- pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
- }
- }
-
- if (pkt && pkt->valid) {
- valid_pkts++;
- valid_frags += nb_frags;
- }
- }
-
- pthread_mutex_lock(&pacing_mutex);
- pkts_in_flight += valid_pkts;
- pthread_mutex_unlock(&pacing_mutex);
-
- xsk_ring_prod__submit(&xsk->tx, i);
- xsk->outstanding_tx += valid_frags;
-
- if (use_poll) {
- ret = poll(&fds, 1, POLL_TMOUT);
- if (ret <= 0) {
- if (ret == 0 && timeout)
- return TEST_PASS;
-
- ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
- return TEST_FAILURE;
- }
- }
-
- if (!timeout) {
- if (complete_pkts(xsk, i))
- return TEST_FAILURE;
-
- usleep(10);
- return TEST_PASS;
- }
-
- return TEST_CONTINUE;
-}
-
-static int wait_for_tx_completion(struct xsk_socket_info *xsk)
-{
- struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
- int ret;
-
- ret = gettimeofday(&tv_now, NULL);
- if (ret)
- exit_with_error(errno);
- timeradd(&tv_now, &tv_timeout, &tv_end);
-
- while (xsk->outstanding_tx) {
- ret = gettimeofday(&tv_now, NULL);
- if (ret)
- exit_with_error(errno);
- if (timercmp(&tv_now, &tv_end, >)) {
- ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
- return TEST_FAILURE;
- }
-
- complete_pkts(xsk, xsk->batch_size);
- }
-
- return TEST_PASS;
-}
-
-bool all_packets_sent(struct test_spec *test, unsigned long *bitmap)
-{
- return bitmap_full(bitmap, test->nb_sockets);
-}
-
-static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
-{
- bool timeout = !is_umem_valid(test->ifobj_rx);
- DECLARE_BITMAP(bitmap, test->nb_sockets);
- u32 i, ret;
-
- while (!(all_packets_sent(test, bitmap))) {
- for (i = 0; i < test->nb_sockets; i++) {
- struct pkt_stream *pkt_stream;
-
- pkt_stream = ifobject->xsk_arr[i].pkt_stream;
- if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) {
- __set_bit(i, bitmap);
- continue;
- }
- ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout);
- if (ret == TEST_CONTINUE && !test->fail)
- continue;
-
- if ((ret || test->fail) && !timeout)
- return TEST_FAILURE;
-
- if (ret == TEST_PASS && timeout)
- return ret;
-
- ret = wait_for_tx_completion(&ifobject->xsk_arr[i]);
- if (ret)
- return TEST_FAILURE;
- }
- }
-
- return TEST_PASS;
-}
-
-static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
-{
- int fd = xsk_socket__fd(xsk), err;
- socklen_t optlen, expected_len;
-
- optlen = sizeof(*stats);
- err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
- if (err) {
- ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
- __func__, -err, strerror(-err));
- return TEST_FAILURE;
- }
-
- expected_len = sizeof(struct xdp_statistics);
- if (optlen != expected_len) {
- ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
- __func__, expected_len, optlen);
- return TEST_FAILURE;
- }
-
- return TEST_PASS;
-}
-
-static int validate_rx_dropped(struct ifobject *ifobject)
-{
- struct xsk_socket *xsk = ifobject->xsk->xsk;
- struct xdp_statistics stats;
- int err;
-
- err = kick_rx(ifobject->xsk);
- if (err)
- return TEST_FAILURE;
-
- err = get_xsk_stats(xsk, &stats);
- if (err)
- return TEST_FAILURE;
-
- /* The receiver calls getsockopt after receiving the last (valid)
- * packet which is not the final packet sent in this test (valid and
- * invalid packets are sent in alternating fashion with the final
- * packet being invalid). Since the last packet may or may not have
- * been dropped already, both outcomes must be allowed.
- */
- if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 ||
- stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1)
- return TEST_PASS;
-
- return TEST_FAILURE;
-}
-
-static int validate_rx_full(struct ifobject *ifobject)
-{
- struct xsk_socket *xsk = ifobject->xsk->xsk;
- struct xdp_statistics stats;
- int err;
-
- usleep(1000);
- err = kick_rx(ifobject->xsk);
- if (err)
- return TEST_FAILURE;
-
- err = get_xsk_stats(xsk, &stats);
- if (err)
- return TEST_FAILURE;
-
- if (stats.rx_ring_full)
- return TEST_PASS;
-
- return TEST_FAILURE;
-}
-
-static int validate_fill_empty(struct ifobject *ifobject)
-{
- struct xsk_socket *xsk = ifobject->xsk->xsk;
- struct xdp_statistics stats;
- int err;
-
- usleep(1000);
- err = kick_rx(ifobject->xsk);
- if (err)
- return TEST_FAILURE;
-
- err = get_xsk_stats(xsk, &stats);
- if (err)
- return TEST_FAILURE;
-
- if (stats.rx_fill_ring_empty_descs)
- return TEST_PASS;
-
- return TEST_FAILURE;
-}
-
-static int validate_tx_invalid_descs(struct ifobject *ifobject)
-{
- struct xsk_socket *xsk = ifobject->xsk->xsk;
- int fd = xsk_socket__fd(xsk);
- struct xdp_statistics stats;
- socklen_t optlen;
- int err;
-
- optlen = sizeof(stats);
- err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
- if (err) {
- ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
- __func__, -err, strerror(-err));
- return TEST_FAILURE;
- }
-
- if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
- ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
- __func__,
- (unsigned long long)stats.tx_invalid_descs,
- ifobject->xsk->pkt_stream->nb_pkts);
- return TEST_FAILURE;
- }
-
- return TEST_PASS;
-}
-
-static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
- struct xsk_umem_info *umem, bool tx)
-{
- int i, ret;
-
- for (i = 0; i < test->nb_sockets; i++) {
- bool shared = (ifobject->shared_umem && tx) ? true : !!i;
- u32 ctr = 0;
-
- while (ctr++ < SOCK_RECONF_CTR) {
- ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
- ifobject, shared);
- if (!ret)
- break;
-
- /* Retry if it fails as xsk_socket__create() is asynchronous */
- if (ctr >= SOCK_RECONF_CTR)
- exit_with_error(-ret);
- usleep(USLEEP_MAX);
- }
- if (ifobject->busy_poll)
- enable_busy_poll(&ifobject->xsk_arr[i]);
- }
-}
-
-static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
-{
- xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
- ifobject->xsk = &ifobject->xsk_arr[0];
- ifobject->xskmap = test->ifobj_rx->xskmap;
- memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
- ifobject->umem->base_addr = 0;
-}
-
-static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
- bool fill_up)
-{
- u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
- u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
- int ret;
-
- if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
- buffers_to_fill = umem->num_frames;
- else
- buffers_to_fill = umem->fill_size;
-
- ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
- if (ret != buffers_to_fill)
- exit_with_error(ENOSPC);
-
- while (filled < buffers_to_fill) {
- struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
- u64 addr;
- u32 i;
-
- for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) {
- if (!pkt) {
- if (!fill_up)
- break;
- addr = filled * umem->frame_size + umem->base_addr;
- } else if (pkt->offset >= 0) {
- addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
- } else {
- addr = pkt->offset + umem_alloc_buffer(umem);
- }
-
- *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
- if (++filled >= buffers_to_fill)
- break;
- }
- }
- xsk_ring_prod__submit(&umem->fq, filled);
- xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
-
- pkt_stream_reset(pkt_stream);
- umem_reset_alloc(umem);
-}
-
-static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
-{
- u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
- int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
- LIBBPF_OPTS(bpf_xdp_query_opts, opts);
- void *bufs;
- int ret;
- u32 i;
-
- if (ifobject->umem->unaligned_mode)
- mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
-
- if (ifobject->shared_umem)
- umem_sz *= 2;
-
- bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
- if (bufs == MAP_FAILED)
- exit_with_error(errno);
-
- ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
- if (ret)
- exit_with_error(-ret);
-
- xsk_configure_socket(test, ifobject, ifobject->umem, false);
-
- ifobject->xsk = &ifobject->xsk_arr[0];
-
- if (!ifobject->rx_on)
- return;
-
- xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring);
-
- for (i = 0; i < test->nb_sockets; i++) {
- ifobject->xsk = &ifobject->xsk_arr[i];
- ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i);
- if (ret)
- exit_with_error(errno);
- }
-}
-
-static void *worker_testapp_validate_tx(void *arg)
-{
- struct test_spec *test = (struct test_spec *)arg;
- struct ifobject *ifobject = test->ifobj_tx;
- int err;
-
- if (test->current_step == 1) {
- if (!ifobject->shared_umem)
- thread_common_ops(test, ifobject);
- else
- thread_common_ops_tx(test, ifobject);
- }
-
- err = send_pkts(test, ifobject);
-
- if (!err && ifobject->validation_func)
- err = ifobject->validation_func(ifobject);
- if (err)
- report_failure(test);
-
- pthread_exit(NULL);
-}
-
-static void *worker_testapp_validate_rx(void *arg)
-{
- struct test_spec *test = (struct test_spec *)arg;
- struct ifobject *ifobject = test->ifobj_rx;
- int err;
-
- if (test->current_step == 1) {
- thread_common_ops(test, ifobject);
- } else {
- xsk_clear_xskmap(ifobject->xskmap);
- err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0);
- if (err) {
- ksft_print_msg("Error: Failed to update xskmap, error %s\n",
- strerror(-err));
- exit_with_error(-err);
- }
- }
-
- pthread_barrier_wait(&barr);
-
- err = receive_pkts(test);
-
- if (!err && ifobject->validation_func)
- err = ifobject->validation_func(ifobject);
-
- if (err) {
- if (test->adjust_tail && !is_adjust_tail_supported(ifobject->xdp_progs))
- test->adjust_tail_support = false;
- else
- report_failure(test);
- }
-
- pthread_exit(NULL);
-}
-
-static u64 ceil_u64(u64 a, u64 b)
-{
- return (a + b - 1) / b;
-}
-
-static void testapp_clean_xsk_umem(struct ifobject *ifobj)
-{
- u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
-
- if (ifobj->shared_umem)
- umem_sz *= 2;
-
- umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
- xsk_umem__delete(ifobj->umem->umem);
- munmap(ifobj->umem->buffer, umem_sz);
-}
-
-static void handler(int signum)
-{
- pthread_exit(NULL);
-}
-
-static bool xdp_prog_changed_rx(struct test_spec *test)
-{
- struct ifobject *ifobj = test->ifobj_rx;
-
- return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
-}
-
-static bool xdp_prog_changed_tx(struct test_spec *test)
-{
- struct ifobject *ifobj = test->ifobj_tx;
-
- return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
-}
-
-static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
- struct bpf_map *xskmap, enum test_mode mode)
-{
- int err;
-
- xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
- err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
- if (err) {
- ksft_print_msg("Error attaching XDP program\n");
- exit_with_error(-err);
- }
-
- if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
- if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
- ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
- exit_with_error(EINVAL);
- }
-
- ifobj->xdp_prog = xdp_prog;
- ifobj->xskmap = xskmap;
- ifobj->mode = mode;
-}
-
-static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
- struct ifobject *ifobj_tx)
-{
- if (xdp_prog_changed_rx(test))
- xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
-
- if (!ifobj_tx || ifobj_tx->shared_umem)
- return;
-
- if (xdp_prog_changed_tx(test))
- xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
-}
-
-static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
- struct ifobject *ifobj2)
-{
- pthread_t t0, t1;
- int err;
-
- if (test->mtu > MAX_ETH_PKT_SIZE) {
- if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp ||
- (ifobj2 && !ifobj2->multi_buff_zc_supp))) {
- ksft_test_result_skip("Multi buffer for zero-copy not supported.\n");
- return TEST_SKIP;
- }
- if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp ||
- (ifobj2 && !ifobj2->multi_buff_supp))) {
- ksft_test_result_skip("Multi buffer not supported.\n");
- return TEST_SKIP;
- }
- }
- err = test_spec_set_mtu(test, test->mtu);
- if (err) {
- ksft_print_msg("Error, could not set mtu.\n");
- exit_with_error(err);
- }
-
- if (ifobj2) {
- if (pthread_barrier_init(&barr, NULL, 2))
- exit_with_error(errno);
- pkt_stream_reset(ifobj2->xsk->pkt_stream);
- }
-
- test->current_step++;
- pkt_stream_reset(ifobj1->xsk->pkt_stream);
- pkts_in_flight = 0;
-
- signal(SIGUSR1, handler);
- /*Spawn RX thread */
- pthread_create(&t0, NULL, ifobj1->func_ptr, test);
-
- if (ifobj2) {
- pthread_barrier_wait(&barr);
- if (pthread_barrier_destroy(&barr))
- exit_with_error(errno);
-
- /*Spawn TX thread */
- pthread_create(&t1, NULL, ifobj2->func_ptr, test);
-
- pthread_join(t1, NULL);
- }
-
- if (!ifobj2)
- pthread_kill(t0, SIGUSR1);
- else
- pthread_join(t0, NULL);
-
- if (test->total_steps == test->current_step || test->fail) {
- u32 i;
-
- if (ifobj2)
- for (i = 0; i < test->nb_sockets; i++)
- xsk_socket__delete(ifobj2->xsk_arr[i].xsk);
-
- for (i = 0; i < test->nb_sockets; i++)
- xsk_socket__delete(ifobj1->xsk_arr[i].xsk);
-
- testapp_clean_xsk_umem(ifobj1);
- if (ifobj2 && !ifobj2->shared_umem)
- testapp_clean_xsk_umem(ifobj2);
- }
-
- return !!test->fail;
-}
-
-static int testapp_validate_traffic(struct test_spec *test)
-{
- struct ifobject *ifobj_rx = test->ifobj_rx;
- struct ifobject *ifobj_tx = test->ifobj_tx;
-
- if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
- (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
- ksft_test_result_skip("No huge pages present.\n");
- return TEST_SKIP;
- }
-
- if (test->set_ring) {
- if (ifobj_tx->hw_ring_size_supp) {
- if (set_ring_size(ifobj_tx)) {
- ksft_test_result_skip("Failed to change HW ring size.\n");
- return TEST_FAILURE;
- }
- } else {
- ksft_test_result_skip("Changing HW ring size not supported.\n");
- return TEST_SKIP;
- }
- }
-
- xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx);
- return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
-}
-
-static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
-{
- return __testapp_validate_traffic(test, ifobj, NULL);
-}
-
-static int testapp_teardown(struct test_spec *test)
-{
- int i;
-
- for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
- if (testapp_validate_traffic(test))
- return TEST_FAILURE;
- test_spec_reset(test);
- }
-
- return TEST_PASS;
-}
-
-static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
-{
- thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
- struct ifobject *tmp_ifobj = (*ifobj1);
-
- (*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
- (*ifobj2)->func_ptr = tmp_func_ptr;
-
- *ifobj1 = *ifobj2;
- *ifobj2 = tmp_ifobj;
-}
-
-static int testapp_bidirectional(struct test_spec *test)
-{
- int res;
-
- test->ifobj_tx->rx_on = true;
- test->ifobj_rx->tx_on = true;
- test->total_steps = 2;
- if (testapp_validate_traffic(test))
- return TEST_FAILURE;
-
- print_verbose("Switching Tx/Rx direction\n");
- swap_directions(&test->ifobj_rx, &test->ifobj_tx);
- res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
-
- swap_directions(&test->ifobj_rx, &test->ifobj_tx);
- return res;
-}
-
-static int swap_xsk_resources(struct test_spec *test)
-{
- int ret;
-
- test->ifobj_tx->xsk_arr[0].pkt_stream = NULL;
- test->ifobj_rx->xsk_arr[0].pkt_stream = NULL;
- test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default;
- test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default;
- test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1];
- test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1];
-
- ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0);
- if (ret)
- return TEST_FAILURE;
-
- return TEST_PASS;
-}
-
-static int testapp_xdp_prog_cleanup(struct test_spec *test)
-{
- test->total_steps = 2;
- test->nb_sockets = 2;
- if (testapp_validate_traffic(test))
- return TEST_FAILURE;
-
- if (swap_xsk_resources(test))
- return TEST_FAILURE;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_headroom(struct test_spec *test)
-{
- test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_stats_rx_dropped(struct test_spec *test)
-{
- if (test->mode == TEST_MODE_ZC) {
- ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
- return TEST_SKIP;
- }
-
- pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
- test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
- XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
- pkt_stream_receive_half(test);
- test->ifobj_rx->validation_func = validate_rx_dropped;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_stats_tx_invalid_descs(struct test_spec *test)
-{
- pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
- test->ifobj_tx->validation_func = validate_tx_invalid_descs;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_stats_rx_full(struct test_spec *test)
-{
- pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
- test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
-
- test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
- test->ifobj_rx->release_rx = false;
- test->ifobj_rx->validation_func = validate_rx_full;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_stats_fill_empty(struct test_spec *test)
-{
- pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
- test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
-
- test->ifobj_rx->use_fill_ring = false;
- test->ifobj_rx->validation_func = validate_fill_empty;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_send_receive_unaligned(struct test_spec *test)
-{
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- /* Let half of the packets straddle a 4K buffer boundary */
- pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2);
-
- return testapp_validate_traffic(test);
-}
-
-static int testapp_send_receive_unaligned_mb(struct test_spec *test)
-{
- test->mtu = MAX_ETH_JUMBO_SIZE;
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
- return testapp_validate_traffic(test);
-}
-
-static int testapp_single_pkt(struct test_spec *test)
-{
- struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
-
- pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
- return testapp_validate_traffic(test);
-}
-
-static int testapp_send_receive_mb(struct test_spec *test)
-{
- test->mtu = MAX_ETH_JUMBO_SIZE;
- pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
-
- return testapp_validate_traffic(test);
-}
-
-static int testapp_invalid_desc_mb(struct test_spec *test)
-{
- struct xsk_umem_info *umem = test->ifobj_tx->umem;
- u64 umem_size = umem->num_frames * umem->frame_size;
- struct pkt pkts[] = {
- /* Valid packet for synch to start with */
- {0, MIN_PKT_SIZE, 0, true, 0},
- /* Zero frame len is not legal */
- {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- {0, 0, 0, false, 0},
- /* Invalid address in the second frame */
- {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- /* Invalid len in the middle */
- {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- /* Invalid options in the middle */
- {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION},
- /* Transmit 2 frags, receive 3 */
- {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD},
- {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0},
- /* Middle frame crosses chunk boundary with small length */
- {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD},
- {-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0},
- /* Valid packet for synch so that something is received */
- {0, MIN_PKT_SIZE, 0, true, 0}};
-
- if (umem->unaligned_mode) {
- /* Crossing a chunk boundary allowed */
- pkts[12].valid = true;
- pkts[13].valid = true;
- }
-
- test->mtu = MAX_ETH_JUMBO_SIZE;
- pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
- return testapp_validate_traffic(test);
-}
-
-static int testapp_invalid_desc(struct test_spec *test)
-{
- struct xsk_umem_info *umem = test->ifobj_tx->umem;
- u64 umem_size = umem->num_frames * umem->frame_size;
- struct pkt pkts[] = {
- /* Zero packet address allowed */
- {0, MIN_PKT_SIZE, 0, true},
- /* Allowed packet */
- {0, MIN_PKT_SIZE, 0, true},
- /* Straddling the start of umem */
- {-2, MIN_PKT_SIZE, 0, false},
- /* Packet too large */
- {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
- /* Up to end of umem allowed */
- {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
- /* After umem ends */
- {umem_size, MIN_PKT_SIZE, 0, false},
- /* Straddle the end of umem */
- {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
- /* Straddle a 4K boundary */
- {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
- /* Straddle a 2K boundary */
- {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
- /* Valid packet for synch so that something is received */
- {0, MIN_PKT_SIZE, 0, true}};
-
- if (umem->unaligned_mode) {
- /* Crossing a page boundary allowed */
- pkts[7].valid = true;
- }
- if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
- /* Crossing a 2K frame size boundary not allowed */
- pkts[8].valid = false;
- }
-
- if (test->ifobj_tx->shared_umem) {
- pkts[4].offset += umem_size;
- pkts[5].offset += umem_size;
- pkts[6].offset += umem_size;
- }
-
- pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
- return testapp_validate_traffic(test);
-}
-
-static int testapp_xdp_drop(struct test_spec *test)
-{
- struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
- struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
-
- test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
- skel_rx->maps.xsk, skel_tx->maps.xsk);
-
- pkt_stream_receive_half(test);
- return testapp_validate_traffic(test);
-}
-
-static int testapp_xdp_metadata_copy(struct test_spec *test)
-{
- struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
- struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
- struct bpf_map *data_map;
- int count = 0;
- int key = 0;
-
- test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
- skel_tx->progs.xsk_xdp_populate_metadata,
- skel_rx->maps.xsk, skel_tx->maps.xsk);
- test->ifobj_rx->use_metadata = true;
-
- data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
- if (!data_map || !bpf_map__is_internal(data_map)) {
- ksft_print_msg("Error: could not find bss section of XDP program\n");
- return TEST_FAILURE;
- }
-
- if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) {
- ksft_print_msg("Error: could not update count element\n");
- return TEST_FAILURE;
- }
-
- return testapp_validate_traffic(test);
-}
-
-static int testapp_xdp_shared_umem(struct test_spec *test)
-{
- struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
- struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
-
- test->total_steps = 1;
- test->nb_sockets = 2;
-
- test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem,
- skel_tx->progs.xsk_xdp_shared_umem,
- skel_rx->maps.xsk, skel_tx->maps.xsk);
-
- pkt_stream_even_odd_sequence(test);
-
- return testapp_validate_traffic(test);
-}
-
-static int testapp_poll_txq_tmout(struct test_spec *test)
-{
- test->ifobj_tx->use_poll = true;
- /* create invalid frame by set umem frame_size and pkt length equal to 2048 */
- test->ifobj_tx->umem->frame_size = 2048;
- pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
- return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
-}
-
-static int testapp_poll_rxq_tmout(struct test_spec *test)
-{
- test->ifobj_rx->use_poll = true;
- return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
-}
-
-static int testapp_too_many_frags(struct test_spec *test)
-{
- struct pkt *pkts;
- u32 max_frags, i;
- int ret;
-
- if (test->mode == TEST_MODE_ZC) {
- max_frags = test->ifobj_tx->xdp_zc_max_segs;
- } else {
- max_frags = get_max_skb_frags();
- if (!max_frags) {
- ksft_print_msg("Couldn't retrieve MAX_SKB_FRAGS from system, using default (17) value\n");
- max_frags = 17;
- }
- max_frags += 1;
- }
-
- pkts = calloc(2 * max_frags + 2, sizeof(struct pkt));
- if (!pkts)
- return TEST_FAILURE;
-
- test->mtu = MAX_ETH_JUMBO_SIZE;
-
- /* Valid packet for synch */
- pkts[0].len = MIN_PKT_SIZE;
- pkts[0].valid = true;
-
- /* One valid packet with the max amount of frags */
- for (i = 1; i < max_frags + 1; i++) {
- pkts[i].len = MIN_PKT_SIZE;
- pkts[i].options = XDP_PKT_CONTD;
- pkts[i].valid = true;
- }
- pkts[max_frags].options = 0;
-
- /* An invalid packet with the max amount of frags but signals packet
- * continues on the last frag
- */
- for (i = max_frags + 1; i < 2 * max_frags + 1; i++) {
- pkts[i].len = MIN_PKT_SIZE;
- pkts[i].options = XDP_PKT_CONTD;
- pkts[i].valid = false;
- }
-
- /* Valid packet for synch */
- pkts[2 * max_frags + 1].len = MIN_PKT_SIZE;
- pkts[2 * max_frags + 1].valid = true;
-
- pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2);
- ret = testapp_validate_traffic(test);
-
- free(pkts);
- return ret;
-}
-
-static int xsk_load_xdp_programs(struct ifobject *ifobj)
-{
- ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
- if (libbpf_get_error(ifobj->xdp_progs))
- return libbpf_get_error(ifobj->xdp_progs);
-
- return 0;
-}
-
static void xsk_unload_xdp_programs(struct ifobject *ifobj)
{
xsk_xdp_progs__destroy(ifobj->xdp_progs);
}
-/* Simple test */
-static bool hugepages_present(void)
-{
- size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
- void *bufs;
-
- bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
- if (bufs == MAP_FAILED)
- return false;
-
- mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
- munmap(bufs, mmap_sz);
- return true;
-}
-
-static void init_iface(struct ifobject *ifobj, thread_func_t func_ptr)
-{
- LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
- int err;
-
- ifobj->func_ptr = func_ptr;
-
- err = xsk_load_xdp_programs(ifobj);
- if (err) {
- ksft_print_msg("Error loading XDP program\n");
- exit_with_error(err);
- }
-
- if (hugepages_present())
- ifobj->unaligned_supp = true;
-
- err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts);
- if (err) {
- ksft_print_msg("Error querying XDP capabilities\n");
- exit_with_error(-err);
- }
- if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG)
- ifobj->multi_buff_supp = true;
- if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
- if (query_opts.xdp_zc_max_segs > 1) {
- ifobj->multi_buff_zc_supp = true;
- ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs;
- } else {
- ifobj->xdp_zc_max_segs = 0;
- }
- }
-}
-
-static int testapp_send_receive(struct test_spec *test)
-{
- return testapp_validate_traffic(test);
-}
-
-static int testapp_send_receive_2k_frame(struct test_spec *test)
-{
- test->ifobj_tx->umem->frame_size = 2048;
- test->ifobj_rx->umem->frame_size = 2048;
- pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
- return testapp_validate_traffic(test);
-}
-
-static int testapp_poll_rx(struct test_spec *test)
-{
- test->ifobj_rx->use_poll = true;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_poll_tx(struct test_spec *test)
-{
- test->ifobj_tx->use_poll = true;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_aligned_inv_desc(struct test_spec *test)
-{
- return testapp_invalid_desc(test);
-}
-
-static int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
-{
- test->ifobj_tx->umem->frame_size = 2048;
- test->ifobj_rx->umem->frame_size = 2048;
- return testapp_invalid_desc(test);
-}
-
-static int testapp_unaligned_inv_desc(struct test_spec *test)
-{
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- return testapp_invalid_desc(test);
-}
-
-static int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
-{
- u64 page_size, umem_size;
-
- /* Odd frame size so the UMEM doesn't end near a page boundary. */
- test->ifobj_tx->umem->frame_size = 4001;
- test->ifobj_rx->umem->frame_size = 4001;
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- /* This test exists to test descriptors that staddle the end of
- * the UMEM but not a page.
- */
- page_size = sysconf(_SC_PAGESIZE);
- umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
- assert(umem_size % page_size > MIN_PKT_SIZE);
- assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
-
- return testapp_invalid_desc(test);
-}
-
-static int testapp_aligned_inv_desc_mb(struct test_spec *test)
-{
- return testapp_invalid_desc_mb(test);
-}
-
-static int testapp_unaligned_inv_desc_mb(struct test_spec *test)
-{
- test->ifobj_tx->umem->unaligned_mode = true;
- test->ifobj_rx->umem->unaligned_mode = true;
- return testapp_invalid_desc_mb(test);
-}
-
-static int testapp_xdp_metadata(struct test_spec *test)
-{
- return testapp_xdp_metadata_copy(test);
-}
-
-static int testapp_xdp_metadata_mb(struct test_spec *test)
-{
- test->mtu = MAX_ETH_JUMBO_SIZE;
- return testapp_xdp_metadata_copy(test);
-}
-
-static int testapp_hw_sw_min_ring_size(struct test_spec *test)
-{
- int ret;
-
- test->set_ring = true;
- test->total_steps = 2;
- test->ifobj_tx->ring.tx_pending = DEFAULT_BATCH_SIZE;
- test->ifobj_tx->ring.rx_pending = DEFAULT_BATCH_SIZE * 2;
- test->ifobj_tx->xsk->batch_size = 1;
- test->ifobj_rx->xsk->batch_size = 1;
- ret = testapp_validate_traffic(test);
- if (ret)
- return ret;
-
- /* Set batch size to hw_ring_size - 1 */
- test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
- test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1;
- return testapp_validate_traffic(test);
-}
-
-static int testapp_hw_sw_max_ring_size(struct test_spec *test)
-{
- u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4;
- int ret;
-
- test->set_ring = true;
- test->total_steps = 2;
- test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending;
- test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending;
- test->ifobj_rx->umem->num_frames = max_descs;
- test->ifobj_rx->umem->fill_size = max_descs;
- test->ifobj_rx->umem->comp_size = max_descs;
- test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
- test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
-
- ret = testapp_validate_traffic(test);
- if (ret)
- return ret;
-
- /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when
- * updating the Rx HW tail register.
- */
- test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
- test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8;
- pkt_stream_replace(test, max_descs, MIN_PKT_SIZE);
- return testapp_validate_traffic(test);
-}
-
-static int testapp_xdp_adjust_tail(struct test_spec *test, int adjust_value)
-{
- struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
- struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
-
- test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_adjust_tail,
- skel_tx->progs.xsk_xdp_adjust_tail,
- skel_rx->maps.xsk, skel_tx->maps.xsk);
-
- skel_rx->bss->adjust_value = adjust_value;
-
- return testapp_validate_traffic(test);
-}
-
-static int testapp_adjust_tail(struct test_spec *test, u32 value, u32 pkt_len)
-{
- int ret;
-
- test->adjust_tail_support = true;
- test->adjust_tail = true;
- test->total_steps = 1;
-
- pkt_stream_replace_ifobject(test->ifobj_tx, DEFAULT_BATCH_SIZE, pkt_len);
- pkt_stream_replace_ifobject(test->ifobj_rx, DEFAULT_BATCH_SIZE, pkt_len + value);
-
- ret = testapp_xdp_adjust_tail(test, value);
- if (ret)
- return ret;
-
- if (!test->adjust_tail_support) {
- ksft_test_result_skip("%s %sResize pkt with bpf_xdp_adjust_tail() not supported\n",
- mode_string(test), busy_poll_string(test));
- return TEST_SKIP;
- }
-
- return 0;
-}
-
-static int testapp_adjust_tail_shrink(struct test_spec *test)
-{
- /* Shrink by 4 bytes for testing purpose */
- return testapp_adjust_tail(test, -4, MIN_PKT_SIZE * 2);
-}
-
-static int testapp_adjust_tail_shrink_mb(struct test_spec *test)
-{
- test->mtu = MAX_ETH_JUMBO_SIZE;
- /* Shrink by the frag size */
- return testapp_adjust_tail(test, -XSK_UMEM__MAX_FRAME_SIZE, XSK_UMEM__LARGE_FRAME_SIZE * 2);
-}
-
-static int testapp_adjust_tail_grow(struct test_spec *test)
-{
- /* Grow by 4 bytes for testing purpose */
- return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2);
-}
-
-static int testapp_adjust_tail_grow_mb(struct test_spec *test)
-{
- test->mtu = MAX_ETH_JUMBO_SIZE;
- /* Grow by (frag_size - last_frag_Size) - 1 to stay inside the last fragment */
- return testapp_adjust_tail(test, (XSK_UMEM__MAX_FRAME_SIZE / 2) - 1,
- XSK_UMEM__LARGE_FRAME_SIZE * 2);
-}
-
static void run_pkt_test(struct test_spec *test)
{
int ret;
ret = test->test_func(test);
- if (ret == TEST_PASS)
+ switch (ret) {
+ case TEST_PASS:
ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
test->name);
- pkt_stream_restore_default(test);
-}
-
-static struct ifobject *ifobject_create(void)
-{
- struct ifobject *ifobj;
-
- ifobj = calloc(1, sizeof(struct ifobject));
- if (!ifobj)
- return NULL;
-
- ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
- if (!ifobj->xsk_arr)
- goto out_xsk_arr;
-
- ifobj->umem = calloc(1, sizeof(*ifobj->umem));
- if (!ifobj->umem)
- goto out_umem;
-
- return ifobj;
-
-out_umem:
- free(ifobj->xsk_arr);
-out_xsk_arr:
- free(ifobj);
- return NULL;
-}
+ break;
+ case TEST_SKIP:
+ ksft_test_result_skip("SKIP: %s %s%s\n", mode_string(test), busy_poll_string(test),
+ test->name);
+ break;
+ case TEST_FAILURE:
+ ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
+ test->name);
+ break;
+ default:
+ ksft_test_result_fail("FAIL: %s %s%s -- Unexpected returned value (%d)\n",
+ mode_string(test), busy_poll_string(test), test->name, ret);
+ }
-static void ifobject_delete(struct ifobject *ifobj)
-{
- free(ifobj->umem);
- free(ifobj->xsk_arr);
- free(ifobj);
+ pkt_stream_restore_default(test);
}
static bool is_xdp_supported(int ifindex)
@@ -2685,46 +319,6 @@ static bool is_xdp_supported(int ifindex)
return true;
}
-static const struct test_spec tests[] = {
- {.name = "SEND_RECEIVE", .test_func = testapp_send_receive},
- {.name = "SEND_RECEIVE_2K_FRAME", .test_func = testapp_send_receive_2k_frame},
- {.name = "SEND_RECEIVE_SINGLE_PKT", .test_func = testapp_single_pkt},
- {.name = "POLL_RX", .test_func = testapp_poll_rx},
- {.name = "POLL_TX", .test_func = testapp_poll_tx},
- {.name = "POLL_RXQ_FULL", .test_func = testapp_poll_rxq_tmout},
- {.name = "POLL_TXQ_FULL", .test_func = testapp_poll_txq_tmout},
- {.name = "SEND_RECEIVE_UNALIGNED", .test_func = testapp_send_receive_unaligned},
- {.name = "ALIGNED_INV_DESC", .test_func = testapp_aligned_inv_desc},
- {.name = "ALIGNED_INV_DESC_2K_FRAME_SIZE", .test_func = testapp_aligned_inv_desc_2k_frame},
- {.name = "UNALIGNED_INV_DESC", .test_func = testapp_unaligned_inv_desc},
- {.name = "UNALIGNED_INV_DESC_4001_FRAME_SIZE",
- .test_func = testapp_unaligned_inv_desc_4001_frame},
- {.name = "UMEM_HEADROOM", .test_func = testapp_headroom},
- {.name = "TEARDOWN", .test_func = testapp_teardown},
- {.name = "BIDIRECTIONAL", .test_func = testapp_bidirectional},
- {.name = "STAT_RX_DROPPED", .test_func = testapp_stats_rx_dropped},
- {.name = "STAT_TX_INVALID", .test_func = testapp_stats_tx_invalid_descs},
- {.name = "STAT_RX_FULL", .test_func = testapp_stats_rx_full},
- {.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty},
- {.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup},
- {.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop},
- {.name = "XDP_SHARED_UMEM", .test_func = testapp_xdp_shared_umem},
- {.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata},
- {.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb},
- {.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb},
- {.name = "SEND_RECEIVE_UNALIGNED_9K_PACKETS",
- .test_func = testapp_send_receive_unaligned_mb},
- {.name = "ALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_aligned_inv_desc_mb},
- {.name = "UNALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_unaligned_inv_desc_mb},
- {.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags},
- {.name = "HW_SW_MIN_RING_SIZE", .test_func = testapp_hw_sw_min_ring_size},
- {.name = "HW_SW_MAX_RING_SIZE", .test_func = testapp_hw_sw_max_ring_size},
- {.name = "XDP_ADJUST_TAIL_SHRINK", .test_func = testapp_adjust_tail_shrink},
- {.name = "XDP_ADJUST_TAIL_SHRINK_MULTI_BUFF", .test_func = testapp_adjust_tail_shrink_mb},
- {.name = "XDP_ADJUST_TAIL_GROW", .test_func = testapp_adjust_tail_grow},
- {.name = "XDP_ADJUST_TAIL_GROW_MULTI_BUFF", .test_func = testapp_adjust_tail_grow_mb},
- };
-
static void print_tests(void)
{
u32 i;
@@ -2732,10 +326,13 @@ static void print_tests(void)
printf("Tests:\n");
for (i = 0; i < ARRAY_SIZE(tests); i++)
printf("%u: %s\n", i, tests[i].name);
+ for (i = ARRAY_SIZE(tests); i < ARRAY_SIZE(tests) + ARRAY_SIZE(ci_skip_tests); i++)
+ printf("%u: %s\n", i, ci_skip_tests[i - ARRAY_SIZE(tests)].name);
}
int main(int argc, char **argv)
{
+ const size_t total_tests = ARRAY_SIZE(tests) + ARRAY_SIZE(ci_skip_tests);
struct pkt_stream *rx_pkt_stream_default;
struct pkt_stream *tx_pkt_stream_default;
struct ifobject *ifobj_tx, *ifobj_rx;
@@ -2763,7 +360,7 @@ int main(int argc, char **argv)
print_tests();
ksft_exit_xpass();
}
- if (opt_run_test != RUN_ALL_TESTS && opt_run_test >= ARRAY_SIZE(tests)) {
+ if (opt_run_test != RUN_ALL_TESTS && opt_run_test >= total_tests) {
ksft_print_msg("Error: test %u does not exist.\n", opt_run_test);
ksft_exit_xfail();
}
@@ -2788,10 +385,13 @@ int main(int argc, char **argv)
ifobj_tx->set_ring.default_rx = ifobj_tx->ring.rx_pending;
}
- init_iface(ifobj_rx, worker_testapp_validate_rx);
- init_iface(ifobj_tx, worker_testapp_validate_tx);
+ if (init_iface(ifobj_rx, worker_testapp_validate_rx) ||
+ init_iface(ifobj_tx, worker_testapp_validate_tx)) {
+ ksft_print_msg("Error : can't initialize interfaces\n");
+ ksft_exit_xfail();
+ }
- test_spec_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
+ test_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
tx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
rx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE);
if (!tx_pkt_stream_default || !rx_pkt_stream_default)
@@ -2800,7 +400,7 @@ int main(int argc, char **argv)
test.rx_pkt_stream_default = rx_pkt_stream_default;
if (opt_run_test == RUN_ALL_TESTS)
- nb_tests = ARRAY_SIZE(tests);
+ nb_tests = total_tests;
else
nb_tests = 1;
if (opt_mode == TEST_MODE_ALL) {
@@ -2822,11 +422,15 @@ int main(int argc, char **argv)
if (opt_mode != TEST_MODE_ALL && i != opt_mode)
continue;
- for (j = 0; j < ARRAY_SIZE(tests); j++) {
+ for (j = 0; j < total_tests; j++) {
if (opt_run_test != RUN_ALL_TESTS && j != opt_run_test)
continue;
- test_spec_init(&test, ifobj_tx, ifobj_rx, i, &tests[j]);
+ if (j < ARRAY_SIZE(tests))
+ test_init(&test, ifobj_tx, ifobj_rx, i, &tests[j]);
+ else
+ test_init(&test, ifobj_tx, ifobj_rx, i,
+ &ci_skip_tests[j - ARRAY_SIZE(tests)]);
run_pkt_test(&test);
usleep(USLEEP_MAX);
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 67fc44b2813b..3ca518df23ad 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -22,168 +22,13 @@
#define PF_XDP AF_XDP
#endif
-#ifndef SO_BUSY_POLL_BUDGET
-#define SO_BUSY_POLL_BUDGET 70
-#endif
-
-#ifndef SO_PREFER_BUSY_POLL
-#define SO_PREFER_BUSY_POLL 69
-#endif
-
-#define TEST_PASS 0
-#define TEST_FAILURE -1
-#define TEST_CONTINUE 1
-#define TEST_SKIP 2
-#define MAX_INTERFACES 2
-#define MAX_INTERFACE_NAME_CHARS 16
-#define MAX_TEST_NAME_SIZE 48
#define MAX_TEARDOWN_ITER 10
-#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */
-#define MIN_PKT_SIZE 64
-#define MAX_ETH_PKT_SIZE 1518
#define MAX_ETH_JUMBO_SIZE 9000
-#define USLEEP_MAX 10000
#define SOCK_RECONF_CTR 10
-#define DEFAULT_BATCH_SIZE 64
-#define POLL_TMOUT 1000
-#define THREAD_TMOUT 3
-#define DEFAULT_PKT_CNT (4 * 1024)
-#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
#define RX_FULL_RXQSIZE 32
#define UMEM_HEADROOM_TEST_SIZE 128
#define XSK_UMEM__INVALID_FRAME_SIZE (MAX_ETH_JUMBO_SIZE + 1)
-#define XSK_UMEM__LARGE_FRAME_SIZE (3 * 1024)
-#define XSK_UMEM__MAX_FRAME_SIZE (4 * 1024)
-#define XSK_DESC__INVALID_OPTION (0xffff)
-#define HUGEPAGE_SIZE (2 * 1024 * 1024)
-#define PKT_DUMP_NB_TO_PRINT 16
#define RUN_ALL_TESTS UINT_MAX
#define NUM_MAC_ADDRESSES 4
-#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
-
-enum test_mode {
- TEST_MODE_SKB,
- TEST_MODE_DRV,
- TEST_MODE_ZC,
- TEST_MODE_ALL
-};
-
-struct xsk_umem_info {
- struct xsk_ring_prod fq;
- struct xsk_ring_cons cq;
- struct xsk_umem *umem;
- u64 next_buffer;
- u32 num_frames;
- u32 frame_headroom;
- void *buffer;
- u32 frame_size;
- u32 base_addr;
- u32 fill_size;
- u32 comp_size;
- bool unaligned_mode;
-};
-
-struct xsk_socket_info {
- struct xsk_ring_cons rx;
- struct xsk_ring_prod tx;
- struct xsk_umem_info *umem;
- struct xsk_socket *xsk;
- struct pkt_stream *pkt_stream;
- u32 outstanding_tx;
- u32 rxqsize;
- u32 batch_size;
- u8 dst_mac[ETH_ALEN];
- u8 src_mac[ETH_ALEN];
-};
-
-struct pkt {
- int offset;
- u32 len;
- u32 pkt_nb;
- bool valid;
- u16 options;
-};
-
-struct pkt_stream {
- u32 nb_pkts;
- u32 current_pkt_nb;
- struct pkt *pkts;
- u32 max_pkt_len;
- u32 nb_rx_pkts;
- u32 nb_valid_entries;
- bool verbatim;
-};
-
-struct set_hw_ring {
- u32 default_tx;
- u32 default_rx;
-};
-
-struct ifobject;
-struct test_spec;
-typedef int (*validation_func_t)(struct ifobject *ifobj);
-typedef void *(*thread_func_t)(void *arg);
-typedef int (*test_func_t)(struct test_spec *test);
-
-struct ifobject {
- char ifname[MAX_INTERFACE_NAME_CHARS];
- struct xsk_socket_info *xsk;
- struct xsk_socket_info *xsk_arr;
- struct xsk_umem_info *umem;
- thread_func_t func_ptr;
- validation_func_t validation_func;
- struct xsk_xdp_progs *xdp_progs;
- struct bpf_map *xskmap;
- struct bpf_program *xdp_prog;
- struct ethtool_ringparam ring;
- struct set_hw_ring set_ring;
- enum test_mode mode;
- int ifindex;
- int mtu;
- u32 bind_flags;
- u32 xdp_zc_max_segs;
- bool tx_on;
- bool rx_on;
- bool use_poll;
- bool busy_poll;
- bool use_fill_ring;
- bool release_rx;
- bool shared_umem;
- bool use_metadata;
- bool unaligned_supp;
- bool multi_buff_supp;
- bool multi_buff_zc_supp;
- bool hw_ring_size_supp;
-};
-
-struct test_spec {
- struct ifobject *ifobj_tx;
- struct ifobject *ifobj_rx;
- struct pkt_stream *tx_pkt_stream_default;
- struct pkt_stream *rx_pkt_stream_default;
- struct bpf_program *xdp_prog_rx;
- struct bpf_program *xdp_prog_tx;
- struct bpf_map *xskmap_rx;
- struct bpf_map *xskmap_tx;
- test_func_t test_func;
- int mtu;
- u16 total_steps;
- u16 current_step;
- u16 nb_sockets;
- bool fail;
- bool set_ring;
- bool adjust_tail;
- bool adjust_tail_support;
- enum test_mode mode;
- char name[MAX_TEST_NAME_SIZE];
-};
-
-pthread_barrier_t barr;
-pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-int pkts_in_flight;
-
-static const u8 g_mac[ETH_ALEN] = {0x55, 0x44, 0x33, 0x22, 0x11, 0x00};
-
#endif /* XSKXCEIVER_H_ */
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test.c b/tools/testing/selftests/breakpoints/breakpoint_test.c
index d46962a24724..1159d81890c2 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test.c
@@ -18,7 +18,7 @@
#include <errno.h>
#include <string.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define COUNT_ISN_BPS 4
#define COUNT_WPS 4
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
index e7041816085a..5fc0f37f3fd4 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -26,7 +26,7 @@
#include <errno.h>
#include <signal.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static volatile uint8_t var[96] __attribute__((__aligned__(32)));
diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
index 8d275f03e977..ca2aaab9e4ca 100644
--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
@@ -19,7 +19,7 @@
#include <sys/types.h>
#include <sys/wait.h>
-#include "../kselftest.h"
+#include "kselftest.h"
void child(int cpu)
{
@@ -127,22 +127,42 @@ int run_test(int cpu)
return KSFT_PASS;
}
+/*
+ * Reads the suspend success count from sysfs.
+ * Returns the count on success or exits on failure.
+ */
+static int get_suspend_success_count_or_fail(void)
+{
+ FILE *fp;
+ int val;
+
+ fp = fopen("/sys/power/suspend_stats/success", "r");
+ if (!fp)
+ ksft_exit_fail_msg(
+ "Failed to open suspend_stats/success: %s\n",
+ strerror(errno));
+
+ if (fscanf(fp, "%d", &val) != 1) {
+ fclose(fp);
+ ksft_exit_fail_msg(
+ "Failed to read suspend success count\n");
+ }
+
+ fclose(fp);
+ return val;
+}
+
void suspend(void)
{
- int power_state_fd;
int timerfd;
int err;
+ int count_before;
+ int count_after;
struct itimerspec spec = {};
if (getuid() != 0)
ksft_exit_skip("Please run the test as root - Exiting.\n");
- power_state_fd = open("/sys/power/state", O_RDWR);
- if (power_state_fd < 0)
- ksft_exit_fail_msg(
- "open(\"/sys/power/state\") failed %s)\n",
- strerror(errno));
-
timerfd = timerfd_create(CLOCK_BOOTTIME_ALARM, 0);
if (timerfd < 0)
ksft_exit_fail_msg("timerfd_create() failed\n");
@@ -152,14 +172,15 @@ void suspend(void)
if (err < 0)
ksft_exit_fail_msg("timerfd_settime() failed\n");
+ count_before = get_suspend_success_count_or_fail();
+
system("(echo mem > /sys/power/state) 2> /dev/null");
- timerfd_gettime(timerfd, &spec);
- if (spec.it_value.tv_sec != 0 || spec.it_value.tv_nsec != 0)
+ count_after = get_suspend_success_count_or_fail();
+ if (count_after <= count_before)
ksft_exit_fail_msg("Failed to enter Suspend state\n");
close(timerfd);
- close(power_state_fd);
}
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/cachestat/.gitignore b/tools/testing/selftests/cachestat/.gitignore
index d6c30b43a4bb..abbb13b6e96b 100644
--- a/tools/testing/selftests/cachestat/.gitignore
+++ b/tools/testing/selftests/cachestat/.gitignore
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
test_cachestat
+tmpshmcstat
diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c
index 632ab44737ec..542cd09cb443 100644
--- a/tools/testing/selftests/cachestat/test_cachestat.c
+++ b/tools/testing/selftests/cachestat/test_cachestat.c
@@ -16,7 +16,7 @@
#include <fcntl.h>
#include <errno.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define NR_TESTS 9
@@ -33,6 +33,11 @@ void print_cachestat(struct cachestat *cs)
cs->nr_evicted, cs->nr_recently_evicted);
}
+enum file_type {
+ FILE_MMAP,
+ FILE_SHMEM
+};
+
bool write_exactly(int fd, size_t filesize)
{
int random_fd = open("/dev/urandom", O_RDONLY);
@@ -201,38 +206,73 @@ out1:
out:
return ret;
}
+const char *file_type_str(enum file_type type)
+{
+ switch (type) {
+ case FILE_SHMEM:
+ return "shmem";
+ case FILE_MMAP:
+ return "mmap";
+ default:
+ return "unknown";
+ }
+}
-bool test_cachestat_shmem(void)
+
+bool run_cachestat_test(enum file_type type)
{
size_t PS = sysconf(_SC_PAGESIZE);
size_t filesize = PS * 512 * 2; /* 2 2MB huge pages */
int syscall_ret;
size_t compute_len = PS * 512;
struct cachestat_range cs_range = { PS, compute_len };
- char *filename = "tmpshmcstat";
+ char *filename = "tmpshmcstat", *map;
struct cachestat cs;
bool ret = true;
+ int fd;
unsigned long num_pages = compute_len / PS;
- int fd = shm_open(filename, O_CREAT | O_RDWR, 0600);
+ if (type == FILE_SHMEM)
+ fd = shm_open(filename, O_CREAT | O_RDWR, 0600);
+ else
+ fd = open(filename, O_RDWR | O_CREAT | O_TRUNC, 0666);
if (fd < 0) {
- ksft_print_msg("Unable to create shmem file.\n");
+ ksft_print_msg("Unable to create %s file.\n",
+ file_type_str(type));
ret = false;
goto out;
}
if (ftruncate(fd, filesize)) {
- ksft_print_msg("Unable to truncate shmem file.\n");
+ ksft_print_msg("Unable to truncate %s file.\n",file_type_str(type));
ret = false;
goto close_fd;
}
+ switch (type) {
+ case FILE_SHMEM:
+ if (!write_exactly(fd, filesize)) {
+ ksft_print_msg("Unable to write to file.\n");
+ ret = false;
+ goto close_fd;
+ }
+ break;
+ case FILE_MMAP:
+ map = mmap(NULL, filesize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
- if (!write_exactly(fd, filesize)) {
- ksft_print_msg("Unable to write to shmem file.\n");
+ if (map == MAP_FAILED) {
+ ksft_print_msg("mmap failed.\n");
+ ret = false;
+ goto close_fd;
+ }
+ for (int i = 0; i < filesize; i++)
+ map[i] = 'A';
+ break;
+ default:
+ ksft_print_msg("Unsupported file type.\n");
ret = false;
goto close_fd;
}
-
syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
if (syscall_ret) {
@@ -308,12 +348,18 @@ int main(void)
break;
}
- if (test_cachestat_shmem())
+ if (run_cachestat_test(FILE_SHMEM))
ksft_test_result_pass("cachestat works with a shmem file\n");
else {
ksft_test_result_fail("cachestat fails with a shmem file\n");
ret = 1;
}
+ if (run_cachestat_test(FILE_MMAP))
+ ksft_test_result_pass("cachestat works with a mmap file\n");
+ else {
+ ksft_test_result_fail("cachestat fails with a mmap file\n");
+ ret = 1;
+ }
return ret;
}
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
index 47bad7ddc5bc..46fc8d46b6e6 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -18,7 +18,7 @@
#include <sys/prctl.h>
#include <sys/stat.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static int nerrs;
static pid_t mpid; /* main() pid is used to avoid duplicate test counts */
diff --git a/tools/testing/selftests/capabilities/validate_cap.c b/tools/testing/selftests/capabilities/validate_cap.c
index 65f2a1c89239..cef1d9937b9f 100644
--- a/tools/testing/selftests/capabilities/validate_cap.c
+++ b/tools/testing/selftests/capabilities/validate_cap.c
@@ -7,7 +7,7 @@
#include <sys/prctl.h>
#include <sys/auxv.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 19)
# define HAVE_GETAUXVAL
diff --git a/tools/testing/selftests/cgroup/lib/cgroup_util.c b/tools/testing/selftests/cgroup/lib/cgroup_util.c
index 0e89fcff4d05..44c52f620fda 100644
--- a/tools/testing/selftests/cgroup/lib/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/lib/cgroup_util.c
@@ -522,6 +522,18 @@ int proc_mount_contains(const char *option)
return strstr(buf, option) != NULL;
}
+int cgroup_feature(const char *feature)
+{
+ char buf[PAGE_SIZE];
+ ssize_t read;
+
+ read = read_text("/sys/kernel/cgroup/features", buf, sizeof(buf));
+ if (read < 0)
+ return read;
+
+ return strstr(buf, feature) != NULL;
+}
+
ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size)
{
char path[PATH_MAX];
diff --git a/tools/testing/selftests/cgroup/lib/include/cgroup_util.h b/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
index c69cab66254b..7ab2824ed7b5 100644
--- a/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
@@ -25,6 +25,26 @@ static inline int values_close(long a, long b, int err)
return labs(a - b) <= (a + b) / 100 * err;
}
+/*
+ * Checks if two given values differ by less than err% of their sum and assert
+ * with detailed debug info if not.
+ */
+static inline int values_close_report(long a, long b, int err)
+{
+ long diff = labs(a - b);
+ long limit = (a + b) / 100 * err;
+ double actual_err = (a + b) ? (100.0 * diff / (a + b)) : 0.0;
+ int close = diff <= limit;
+
+ if (!close)
+ fprintf(stderr,
+ "[FAIL] actual=%ld expected=%ld | diff=%ld | limit=%ld | "
+ "tolerance=%d%% | actual_error=%.2f%%\n",
+ a, b, diff, limit, err, actual_err);
+
+ return close;
+}
+
extern ssize_t read_text(const char *path, char *buf, size_t max_len);
extern ssize_t write_text(const char *path, char *buf, ssize_t len);
@@ -60,6 +80,7 @@ extern int cg_run_nowait(const char *cgroup,
extern int cg_wait_for_proc_count(const char *cgroup, int count);
extern int cg_killall(const char *cgroup);
int proc_mount_contains(const char *option);
+int cgroup_feature(const char *feature);
extern ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size);
extern int proc_read_strstr(int pid, bool thread, const char *item, const char *needle);
extern pid_t clone_into_cgroup(int cgroup_fd);
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index a360e2eb2eef..102262555a59 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -17,7 +17,7 @@
#include <string.h>
#include <pthread.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
static bool nsdelegate;
@@ -923,8 +923,10 @@ struct corecg_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), &nsdelegate)) {
if (setup_named_v1_root(root, sizeof(root), CG_NAMED_NAME))
ksft_exit_skip("cgroup v2 isn't mounted and could not setup named v1 hierarchy\n");
@@ -946,12 +948,11 @@ post_v2_setup:
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
cleanup_named_v1_root(root);
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
index a2b50af8e9ee..c83f05438d7c 100644
--- a/tools/testing/selftests/cgroup/test_cpu.c
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -2,6 +2,7 @@
#define _GNU_SOURCE
#include <linux/limits.h>
+#include <sys/param.h>
#include <sys/sysinfo.h>
#include <sys/wait.h>
#include <errno.h>
@@ -10,7 +11,7 @@
#include <time.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
enum hog_clock_type {
@@ -218,7 +219,7 @@ static int test_cpucg_stats(const char *root)
if (user_usec <= 0)
goto cleanup;
- if (!values_close(usage_usec, expected_usage_usec, 1))
+ if (!values_close_report(usage_usec, expected_usage_usec, 1))
goto cleanup;
ret = KSFT_PASS;
@@ -290,7 +291,7 @@ static int test_cpucg_nice(const char *root)
user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec");
- if (!values_close(nice_usec, expected_nice_usec, 1))
+ if (!values_close_report(nice_usec, expected_nice_usec, 1))
goto cleanup;
ret = KSFT_PASS;
@@ -403,7 +404,7 @@ overprovision_validate(const struct cpu_hogger *children, int num_children)
goto cleanup;
delta = children[i + 1].usage - children[i].usage;
- if (!values_close(delta, children[0].usage, 35))
+ if (!values_close_report(delta, children[0].usage, 35))
goto cleanup;
}
@@ -443,7 +444,7 @@ underprovision_validate(const struct cpu_hogger *children, int num_children)
int ret = KSFT_FAIL, i;
for (i = 0; i < num_children - 1; i++) {
- if (!values_close(children[i + 1].usage, children[0].usage, 15))
+ if (!values_close_report(children[i + 1].usage, children[0].usage, 15))
goto cleanup;
}
@@ -572,16 +573,16 @@ run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
nested_leaf_usage = leaf[1].usage + leaf[2].usage;
if (overprovisioned) {
- if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
+ if (!values_close_report(leaf[0].usage, nested_leaf_usage, 15))
goto cleanup;
- } else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
+ } else if (!values_close_report(leaf[0].usage * 2, nested_leaf_usage, 15))
goto cleanup;
child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
if (child_usage <= 0)
goto cleanup;
- if (!values_close(child_usage, nested_leaf_usage, 1))
+ if (!values_close_report(child_usage, nested_leaf_usage, 1))
goto cleanup;
ret = KSFT_PASS;
@@ -645,10 +646,16 @@ test_cpucg_nested_weight_underprovisioned(const char *root)
static int test_cpucg_max(const char *root)
{
int ret = KSFT_FAIL;
- long usage_usec, user_usec;
- long usage_seconds = 1;
- long expected_usage_usec = usage_seconds * USEC_PER_SEC;
+ long quota_usec = 1000;
+ long default_period_usec = 100000; /* cpu.max's default period */
+ long duration_seconds = 1;
+
+ long duration_usec = duration_seconds * USEC_PER_SEC;
+ long usage_usec, n_periods, remainder_usec, expected_usage_usec;
char *cpucg;
+ char quota_buf[32];
+
+ snprintf(quota_buf, sizeof(quota_buf), "%ld", quota_usec);
cpucg = cg_name(root, "cpucg_test");
if (!cpucg)
@@ -657,13 +664,13 @@ static int test_cpucg_max(const char *root)
if (cg_create(cpucg))
goto cleanup;
- if (cg_write(cpucg, "cpu.max", "1000"))
+ if (cg_write(cpucg, "cpu.max", quota_buf))
goto cleanup;
struct cpu_hog_func_param param = {
.nprocs = 1,
.ts = {
- .tv_sec = usage_seconds,
+ .tv_sec = duration_seconds,
.tv_nsec = 0,
},
.clock_type = CPU_HOG_CLOCK_WALL,
@@ -672,14 +679,19 @@ static int test_cpucg_max(const char *root)
goto cleanup;
usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
- user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
- if (user_usec <= 0)
+ if (usage_usec <= 0)
goto cleanup;
- if (user_usec >= expected_usage_usec)
- goto cleanup;
+ /*
+ * The following calculation applies only since
+ * the cpu hog is set to run as per wall-clock time
+ */
+ n_periods = duration_usec / default_period_usec;
+ remainder_usec = duration_usec - n_periods * default_period_usec;
+ expected_usage_usec
+ = n_periods * quota_usec + MIN(remainder_usec, quota_usec);
- if (values_close(usage_usec, expected_usage_usec, 95))
+ if (!values_close_report(usage_usec, expected_usage_usec, 10))
goto cleanup;
ret = KSFT_PASS;
@@ -698,10 +710,16 @@ cleanup:
static int test_cpucg_max_nested(const char *root)
{
int ret = KSFT_FAIL;
- long usage_usec, user_usec;
- long usage_seconds = 1;
- long expected_usage_usec = usage_seconds * USEC_PER_SEC;
+ long quota_usec = 1000;
+ long default_period_usec = 100000; /* cpu.max's default period */
+ long duration_seconds = 1;
+
+ long duration_usec = duration_seconds * USEC_PER_SEC;
+ long usage_usec, n_periods, remainder_usec, expected_usage_usec;
char *parent, *child;
+ char quota_buf[32];
+
+ snprintf(quota_buf, sizeof(quota_buf), "%ld", quota_usec);
parent = cg_name(root, "cpucg_parent");
child = cg_name(parent, "cpucg_child");
@@ -717,13 +735,13 @@ static int test_cpucg_max_nested(const char *root)
if (cg_create(child))
goto cleanup;
- if (cg_write(parent, "cpu.max", "1000"))
+ if (cg_write(parent, "cpu.max", quota_buf))
goto cleanup;
struct cpu_hog_func_param param = {
.nprocs = 1,
.ts = {
- .tv_sec = usage_seconds,
+ .tv_sec = duration_seconds,
.tv_nsec = 0,
},
.clock_type = CPU_HOG_CLOCK_WALL,
@@ -732,14 +750,19 @@ static int test_cpucg_max_nested(const char *root)
goto cleanup;
usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
- user_usec = cg_read_key_long(child, "cpu.stat", "user_usec");
- if (user_usec <= 0)
+ if (usage_usec <= 0)
goto cleanup;
- if (user_usec >= expected_usage_usec)
- goto cleanup;
+ /*
+ * The following calculation applies only since
+ * the cpu hog is set to run as per wall-clock time
+ */
+ n_periods = duration_usec / default_period_usec;
+ remainder_usec = duration_usec - n_periods * default_period_usec;
+ expected_usage_usec
+ = n_periods * quota_usec + MIN(remainder_usec, quota_usec);
- if (values_close(usage_usec, expected_usage_usec, 95))
+ if (!values_close_report(usage_usec, expected_usage_usec, 10))
goto cleanup;
ret = KSFT_PASS;
@@ -773,8 +796,10 @@ struct cpucg_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -791,11 +816,10 @@ int main(int argc, char *argv[])
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_cpuset.c b/tools/testing/selftests/cgroup/test_cpuset.c
index 4034d14ba69a..c5cf8b56ceb8 100644
--- a/tools/testing/selftests/cgroup/test_cpuset.c
+++ b/tools/testing/selftests/cgroup/test_cpuset.c
@@ -3,7 +3,7 @@
#include <linux/limits.h>
#include <signal.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
static int idle_process_fn(const char *cgroup, void *arg)
@@ -247,8 +247,10 @@ struct cpuset_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -265,11 +267,10 @@ int main(int argc, char *argv[])
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
index 8730645d363a..97fae92c8387 100644
--- a/tools/testing/selftests/cgroup/test_freezer.c
+++ b/tools/testing/selftests/cgroup/test_freezer.c
@@ -11,7 +11,7 @@
#include <string.h>
#include <sys/wait.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
#define DEBUG
@@ -804,6 +804,662 @@ cleanup:
return ret;
}
+/*
+ * Get the current frozen_usec for the cgroup.
+ */
+static long cg_check_freezetime(const char *cgroup)
+{
+ return cg_read_key_long(cgroup, "cgroup.stat.local",
+ "frozen_usec ");
+}
+
+/*
+ * Test that the freeze time will behave as expected for an empty cgroup.
+ */
+static int test_cgfreezer_time_empty(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cgroup = NULL;
+ long prev, curr;
+
+ cgroup = cg_name(root, "cg_time_test_empty");
+ if (!cgroup)
+ goto cleanup;
+
+ /*
+ * 1) Create an empty cgroup and check that its freeze time
+ * is 0.
+ */
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ curr = cg_check_freezetime(cgroup);
+ if (curr < 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+ if (curr > 0) {
+ debug("Expect time (%ld) to be 0\n", curr);
+ goto cleanup;
+ }
+
+ if (cg_freeze_nowait(cgroup, true))
+ goto cleanup;
+
+ /*
+ * 2) Sleep for 1000 us. Check that the freeze time is at
+ * least 1000 us.
+ */
+ usleep(1000);
+ curr = cg_check_freezetime(cgroup);
+ if (curr < 1000) {
+ debug("Expect time (%ld) to be at least 1000 us\n",
+ curr);
+ goto cleanup;
+ }
+
+ /*
+ * 3) Unfreeze the cgroup. Check that the freeze time is
+ * larger than at 2).
+ */
+ if (cg_freeze_nowait(cgroup, false))
+ goto cleanup;
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr <= prev) {
+ debug("Expect time (%ld) to be more than previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ /*
+ * 4) Check the freeze time again to ensure that it has not
+ * changed.
+ */
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr != prev) {
+ debug("Expect time (%ld) to be unchanged from previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+/*
+ * A simple test for cgroup freezer time accounting. This test follows
+ * the same flow as test_cgfreezer_time_empty, but with a single process
+ * in the cgroup.
+ */
+static int test_cgfreezer_time_simple(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cgroup = NULL;
+ long prev, curr;
+
+ cgroup = cg_name(root, "cg_time_test_simple");
+ if (!cgroup)
+ goto cleanup;
+
+ /*
+ * 1) Create a cgroup and check that its freeze time is 0.
+ */
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ curr = cg_check_freezetime(cgroup);
+ if (curr < 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+ if (curr > 0) {
+ debug("Expect time (%ld) to be 0\n", curr);
+ goto cleanup;
+ }
+
+ /*
+ * 2) Populate the cgroup with one child and check that the
+ * freeze time is still 0.
+ */
+ cg_run_nowait(cgroup, child_fn, NULL);
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr > prev) {
+ debug("Expect time (%ld) to be 0\n", curr);
+ goto cleanup;
+ }
+
+ if (cg_freeze_nowait(cgroup, true))
+ goto cleanup;
+
+ /*
+ * 3) Sleep for 1000 us. Check that the freeze time is at
+ * least 1000 us.
+ */
+ usleep(1000);
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr < 1000) {
+ debug("Expect time (%ld) to be at least 1000 us\n",
+ curr);
+ goto cleanup;
+ }
+
+ /*
+ * 4) Unfreeze the cgroup. Check that the freeze time is
+ * larger than at 3).
+ */
+ if (cg_freeze_nowait(cgroup, false))
+ goto cleanup;
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr <= prev) {
+ debug("Expect time (%ld) to be more than previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ /*
+ * 5) Sleep for 1000 us. Check that the freeze time is the
+ * same as at 4).
+ */
+ usleep(1000);
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr != prev) {
+ debug("Expect time (%ld) to be unchanged from previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+/*
+ * Test that freezer time accounting works as expected, even while we're
+ * populating a cgroup with processes.
+ */
+static int test_cgfreezer_time_populate(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cgroup = NULL;
+ long prev, curr;
+ int i;
+
+ cgroup = cg_name(root, "cg_time_test_populate");
+ if (!cgroup)
+ goto cleanup;
+
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ curr = cg_check_freezetime(cgroup);
+ if (curr < 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+ if (curr > 0) {
+ debug("Expect time (%ld) to be 0\n", curr);
+ goto cleanup;
+ }
+
+ /*
+ * 1) Populate the cgroup with 100 processes. Check that
+ * the freeze time is 0.
+ */
+ for (i = 0; i < 100; i++)
+ cg_run_nowait(cgroup, child_fn, NULL);
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr != prev) {
+ debug("Expect time (%ld) to be 0\n", curr);
+ goto cleanup;
+ }
+
+ /*
+ * 2) Wait for the group to become fully populated. Check
+ * that the freeze time is 0.
+ */
+ if (cg_wait_for_proc_count(cgroup, 100))
+ goto cleanup;
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr != prev) {
+ debug("Expect time (%ld) to be 0\n", curr);
+ goto cleanup;
+ }
+
+ /*
+ * 3) Freeze the cgroup and then populate it with 100 more
+ * processes. Check that the freeze time continues to grow.
+ */
+ if (cg_freeze_nowait(cgroup, true))
+ goto cleanup;
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr <= prev) {
+ debug("Expect time (%ld) to be more than previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ for (i = 0; i < 100; i++)
+ cg_run_nowait(cgroup, child_fn, NULL);
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr <= prev) {
+ debug("Expect time (%ld) to be more than previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ /*
+ * 4) Wait for the group to become fully populated. Check
+ * that the freeze time is larger than at 3).
+ */
+ if (cg_wait_for_proc_count(cgroup, 200))
+ goto cleanup;
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr <= prev) {
+ debug("Expect time (%ld) to be more than previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ /*
+ * 5) Unfreeze the cgroup. Check that the freeze time is
+ * larger than at 4).
+ */
+ if (cg_freeze_nowait(cgroup, false))
+ goto cleanup;
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr <= prev) {
+ debug("Expect time (%ld) to be more than previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ /*
+ * 6) Kill the processes. Check that the freeze time is the
+ * same as it was at 5).
+ */
+ if (cg_killall(cgroup))
+ goto cleanup;
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr != prev) {
+ debug("Expect time (%ld) to be unchanged from previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ /*
+ * 7) Freeze and unfreeze the cgroup. Check that the freeze
+ * time is larger than it was at 6).
+ */
+ if (cg_freeze_nowait(cgroup, true))
+ goto cleanup;
+ if (cg_freeze_nowait(cgroup, false))
+ goto cleanup;
+ prev = curr;
+ curr = cg_check_freezetime(cgroup);
+ if (curr <= prev) {
+ debug("Expect time (%ld) to be more than previous check (%ld)\n",
+ curr, prev);
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+/*
+ * Test that frozen time for a cgroup continues to work as expected,
+ * even as processes are migrated. Frozen cgroup A's freeze time should
+ * continue to increase and running cgroup B's should stay 0.
+ */
+static int test_cgfreezer_time_migrate(const char *root)
+{
+ long prev_A, curr_A, curr_B;
+ char *cgroup[2] = {0};
+ int ret = KSFT_FAIL;
+ int pid;
+
+ cgroup[0] = cg_name(root, "cg_time_test_migrate_A");
+ if (!cgroup[0])
+ goto cleanup;
+
+ cgroup[1] = cg_name(root, "cg_time_test_migrate_B");
+ if (!cgroup[1])
+ goto cleanup;
+
+ if (cg_create(cgroup[0]))
+ goto cleanup;
+
+ if (cg_check_freezetime(cgroup[0]) < 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
+ if (cg_create(cgroup[1]))
+ goto cleanup;
+
+ pid = cg_run_nowait(cgroup[0], child_fn, NULL);
+ if (pid < 0)
+ goto cleanup;
+
+ if (cg_wait_for_proc_count(cgroup[0], 1))
+ goto cleanup;
+
+ curr_A = cg_check_freezetime(cgroup[0]);
+ if (curr_A) {
+ debug("Expect time (%ld) to be 0\n", curr_A);
+ goto cleanup;
+ }
+ curr_B = cg_check_freezetime(cgroup[1]);
+ if (curr_B) {
+ debug("Expect time (%ld) to be 0\n", curr_B);
+ goto cleanup;
+ }
+
+ /*
+ * Freeze cgroup A.
+ */
+ if (cg_freeze_wait(cgroup[0], true))
+ goto cleanup;
+ prev_A = curr_A;
+ curr_A = cg_check_freezetime(cgroup[0]);
+ if (curr_A <= prev_A) {
+ debug("Expect time (%ld) to be > 0\n", curr_A);
+ goto cleanup;
+ }
+
+ /*
+ * Migrate from A (frozen) to B (running).
+ */
+ if (cg_enter(cgroup[1], pid))
+ goto cleanup;
+
+ usleep(1000);
+ curr_B = cg_check_freezetime(cgroup[1]);
+ if (curr_B) {
+ debug("Expect time (%ld) to be 0\n", curr_B);
+ goto cleanup;
+ }
+
+ prev_A = curr_A;
+ curr_A = cg_check_freezetime(cgroup[0]);
+ if (curr_A <= prev_A) {
+ debug("Expect time (%ld) to be more than previous check (%ld)\n",
+ curr_A, prev_A);
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup[0])
+ cg_destroy(cgroup[0]);
+ free(cgroup[0]);
+ if (cgroup[1])
+ cg_destroy(cgroup[1]);
+ free(cgroup[1]);
+ return ret;
+}
+
+/*
+ * The test creates a cgroup and freezes it. Then it creates a child cgroup.
+ * After that it checks that the child cgroup has a non-zero freeze time
+ * that is less than the parent's. Next, it freezes the child, unfreezes
+ * the parent, and sleeps. Finally, it checks that the child's freeze
+ * time has grown larger than the parent's.
+ */
+static int test_cgfreezer_time_parent(const char *root)
+{
+ char *parent, *child = NULL;
+ int ret = KSFT_FAIL;
+ long ptime, ctime;
+
+ parent = cg_name(root, "cg_test_parent_A");
+ if (!parent)
+ goto cleanup;
+
+ child = cg_name(parent, "cg_test_parent_B");
+ if (!child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_check_freezetime(parent) < 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
+ if (cg_freeze_wait(parent, true))
+ goto cleanup;
+
+ usleep(1000);
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_check_frozen(child, true))
+ goto cleanup;
+
+ /*
+ * Since the parent was frozen the entire time the child cgroup
+ * was being created, we expect the parent's freeze time to be
+ * larger than the child's.
+ *
+ * Ideally, we would be able to check both times simultaneously,
+ * but here we get the child's after we get the parent's.
+ */
+ ptime = cg_check_freezetime(parent);
+ ctime = cg_check_freezetime(child);
+ if (ptime <= ctime) {
+ debug("Expect ptime (%ld) > ctime (%ld)\n", ptime, ctime);
+ goto cleanup;
+ }
+
+ if (cg_freeze_nowait(child, true))
+ goto cleanup;
+
+ if (cg_freeze_wait(parent, false))
+ goto cleanup;
+
+ if (cg_check_frozen(child, true))
+ goto cleanup;
+
+ usleep(100000);
+
+ ctime = cg_check_freezetime(child);
+ ptime = cg_check_freezetime(parent);
+
+ if (ctime <= ptime) {
+ debug("Expect ctime (%ld) > ptime (%ld)\n", ctime, ptime);
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (child)
+ cg_destroy(child);
+ free(child);
+ if (parent)
+ cg_destroy(parent);
+ free(parent);
+ return ret;
+}
+
+/*
+ * The test creates a parent cgroup and a child cgroup. Then, it freezes
+ * the child and checks that the child's freeze time is greater than the
+ * parent's, which should be zero.
+ */
+static int test_cgfreezer_time_child(const char *root)
+{
+ char *parent, *child = NULL;
+ int ret = KSFT_FAIL;
+ long ptime, ctime;
+
+ parent = cg_name(root, "cg_test_child_A");
+ if (!parent)
+ goto cleanup;
+
+ child = cg_name(parent, "cg_test_child_B");
+ if (!child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_check_freezetime(parent) < 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_freeze_wait(child, true))
+ goto cleanup;
+
+ ctime = cg_check_freezetime(child);
+ ptime = cg_check_freezetime(parent);
+ if (ptime != 0) {
+ debug("Expect ptime (%ld) to be 0\n", ptime);
+ goto cleanup;
+ }
+
+ if (ctime <= ptime) {
+ debug("Expect ctime (%ld) <= ptime (%ld)\n", ctime, ptime);
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (child)
+ cg_destroy(child);
+ free(child);
+ if (parent)
+ cg_destroy(parent);
+ free(parent);
+ return ret;
+}
+
+/*
+ * The test creates the following hierarchy:
+ * A
+ * |
+ * B
+ * |
+ * C
+ *
+ * Then it freezes the cgroups in the order C, B, A.
+ * Then it unfreezes the cgroups in the order A, B, C.
+ * Then it checks that C's freeze time is larger than B's and
+ * that B's is larger than A's.
+ */
+static int test_cgfreezer_time_nested(const char *root)
+{
+ char *cgroup[3] = {0};
+ int ret = KSFT_FAIL;
+ long time[3] = {0};
+ int i;
+
+ cgroup[0] = cg_name(root, "cg_test_time_A");
+ if (!cgroup[0])
+ goto cleanup;
+
+ cgroup[1] = cg_name(cgroup[0], "B");
+ if (!cgroup[1])
+ goto cleanup;
+
+ cgroup[2] = cg_name(cgroup[1], "C");
+ if (!cgroup[2])
+ goto cleanup;
+
+ if (cg_create(cgroup[0]))
+ goto cleanup;
+
+ if (cg_check_freezetime(cgroup[0]) < 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
+ if (cg_create(cgroup[1]))
+ goto cleanup;
+
+ if (cg_create(cgroup[2]))
+ goto cleanup;
+
+ if (cg_freeze_nowait(cgroup[2], true))
+ goto cleanup;
+
+ if (cg_freeze_nowait(cgroup[1], true))
+ goto cleanup;
+
+ if (cg_freeze_nowait(cgroup[0], true))
+ goto cleanup;
+
+ usleep(1000);
+
+ if (cg_freeze_nowait(cgroup[0], false))
+ goto cleanup;
+
+ if (cg_freeze_nowait(cgroup[1], false))
+ goto cleanup;
+
+ if (cg_freeze_nowait(cgroup[2], false))
+ goto cleanup;
+
+ time[2] = cg_check_freezetime(cgroup[2]);
+ time[1] = cg_check_freezetime(cgroup[1]);
+ time[0] = cg_check_freezetime(cgroup[0]);
+
+ if (time[2] <= time[1]) {
+ debug("Expect C's time (%ld) > B's time (%ld)", time[2], time[1]);
+ goto cleanup;
+ }
+
+ if (time[1] <= time[0]) {
+ debug("Expect B's time (%ld) > A's time (%ld)", time[1], time[0]);
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+
+cleanup:
+ for (i = 2; i >= 0 && cgroup[i]; i--) {
+ cg_destroy(cgroup[i]);
+ free(cgroup[i]);
+ }
+
+ return ret;
+}
+
#define T(x) { x, #x }
struct cgfreezer_test {
int (*fn)(const char *root);
@@ -819,14 +1475,23 @@ struct cgfreezer_test {
T(test_cgfreezer_stopped),
T(test_cgfreezer_ptraced),
T(test_cgfreezer_vfork),
+ T(test_cgfreezer_time_empty),
+ T(test_cgfreezer_time_simple),
+ T(test_cgfreezer_time_populate),
+ T(test_cgfreezer_time_migrate),
+ T(test_cgfreezer_time_parent),
+ T(test_cgfreezer_time_child),
+ T(test_cgfreezer_time_nested),
};
#undef T
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
@@ -838,11 +1503,10 @@ int main(int argc, char *argv[])
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_hugetlb_memcg.c b/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
index 856f9508ea56..f451aa449be6 100644
--- a/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
+++ b/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
@@ -7,7 +7,7 @@
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
#define ADDR ((void *)(0x0UL))
diff --git a/tools/testing/selftests/cgroup/test_kill.c b/tools/testing/selftests/cgroup/test_kill.c
index 0e5bb6c7307a..c8c9d306925b 100644
--- a/tools/testing/selftests/cgroup/test_kill.c
+++ b/tools/testing/selftests/cgroup/test_kill.c
@@ -9,7 +9,7 @@
#include <sys/types.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "../pidfd/pidfd.h"
#include "cgroup_util.h"
@@ -274,8 +274,10 @@ struct cgkill_test {
int main(int argc, char *argv[])
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
@@ -287,11 +289,10 @@ int main(int argc, char *argv[])
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
index 63b3c9aad399..ca38525484e3 100644
--- a/tools/testing/selftests/cgroup/test_kmem.c
+++ b/tools/testing/selftests/cgroup/test_kmem.c
@@ -14,7 +14,7 @@
#include <sys/sysinfo.h>
#include <pthread.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
@@ -421,8 +421,10 @@ struct kmem_test {
int main(int argc, char **argv)
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -446,11 +448,10 @@ int main(int argc, char **argv)
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index a680f773f2d5..4e1647568c5b 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -18,7 +18,7 @@
#include <errno.h>
#include <sys/mman.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
static bool has_localevents;
@@ -1650,8 +1650,10 @@ struct memcg_test {
int main(int argc, char **argv)
{
char root[PATH_MAX];
- int i, proc_status, ret = EXIT_SUCCESS;
+ int i, proc_status;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -1685,11 +1687,10 @@ int main(int argc, char **argv)
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/cgroup/test_pids.c b/tools/testing/selftests/cgroup/test_pids.c
index 9ecb83c6cc5c..9a387c815d2c 100644
--- a/tools/testing/selftests/cgroup/test_pids.c
+++ b/tools/testing/selftests/cgroup/test_pids.c
@@ -9,7 +9,7 @@
#include <sys/types.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
static int run_success(const char *cgroup, void *arg)
@@ -77,6 +77,9 @@ static int test_pids_events(const char *root)
char *cg_parent = NULL, *cg_child = NULL;
int pid;
+ if (cgroup_feature("pids_localevents") <= 0)
+ return KSFT_SKIP;
+
cg_parent = cg_name(root, "pids_parent");
cg_child = cg_name(cg_parent, "pids_child");
if (!cg_parent || !cg_child)
diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c
index e1f578ca2841..64ebc3f3f203 100644
--- a/tools/testing/selftests/cgroup/test_zswap.c
+++ b/tools/testing/selftests/cgroup/test_zswap.c
@@ -10,7 +10,7 @@
#include <sys/wait.h>
#include <sys/mman.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "cgroup_util.h"
static int read_int(const char *path, size_t *value)
@@ -597,8 +597,10 @@ static bool zswap_configured(void)
int main(int argc, char **argv)
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i;
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(tests));
if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -625,11 +627,10 @@ int main(int argc, char **argv)
ksft_test_result_skip("%s\n", tests[i].name);
break;
default:
- ret = EXIT_FAILURE;
ksft_test_result_fail("%s\n", tests[i].name);
break;
}
}
- return ret;
+ ksft_finished();
}
diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
index e61f07973ce5..289e0c7c1f09 100644
--- a/tools/testing/selftests/clone3/clone3.c
+++ b/tools/testing/selftests/clone3/clone3.c
@@ -18,7 +18,7 @@
#include <unistd.h>
#include <sched.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "clone3_selftests.h"
enum test_mode {
diff --git a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c
index 3c196fa86c99..e82281efa273 100644
--- a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c
+++ b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c
@@ -24,7 +24,7 @@
#include <unistd.h>
#include <sched.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "clone3_selftests.h"
static void child_exit(int ret)
diff --git a/tools/testing/selftests/clone3/clone3_clear_sighand.c b/tools/testing/selftests/clone3/clone3_clear_sighand.c
index ce0426786828..de0c9d62015d 100644
--- a/tools/testing/selftests/clone3/clone3_clear_sighand.c
+++ b/tools/testing/selftests/clone3/clone3_clear_sighand.c
@@ -13,7 +13,7 @@
#include <sys/syscall.h>
#include <sys/wait.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "clone3_selftests.h"
static void nop_handler(int signo)
diff --git a/tools/testing/selftests/clone3/clone3_selftests.h b/tools/testing/selftests/clone3/clone3_selftests.h
index eeca8005723f..a0593e8950f0 100644
--- a/tools/testing/selftests/clone3/clone3_selftests.h
+++ b/tools/testing/selftests/clone3/clone3_selftests.h
@@ -11,7 +11,7 @@
#include <syscall.h>
#include <sys/wait.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
diff --git a/tools/testing/selftests/clone3/clone3_set_tid.c b/tools/testing/selftests/clone3/clone3_set_tid.c
index bfb0da2b4fdd..5c944aee6b41 100644
--- a/tools/testing/selftests/clone3/clone3_set_tid.c
+++ b/tools/testing/selftests/clone3/clone3_set_tid.c
@@ -20,7 +20,7 @@
#include <unistd.h>
#include <sched.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "clone3_selftests.h"
#define MAX_PID_NS_LEVEL 32
diff --git a/tools/testing/selftests/connector/proc_filter.c b/tools/testing/selftests/connector/proc_filter.c
index 4a825b997666..36c11467a8f1 100644
--- a/tools/testing/selftests/connector/proc_filter.c
+++ b/tools/testing/selftests/connector/proc_filter.c
@@ -16,7 +16,7 @@
#include <signal.h>
#include <string.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define NL_MESSAGE_SIZE (sizeof(struct nlmsghdr) + sizeof(struct cn_msg) + \
sizeof(struct proc_input))
diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c
index e0d9851fe1c9..f14eca63f20c 100644
--- a/tools/testing/selftests/core/close_range_test.c
+++ b/tools/testing/selftests/core/close_range_test.c
@@ -14,7 +14,7 @@
#include <sys/resource.h>
#include <linux/close_range.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../clone3/clone3_selftests.h"
diff --git a/tools/testing/selftests/core/unshare_test.c b/tools/testing/selftests/core/unshare_test.c
index 7fec9dfb1b0e..ffce75a6c228 100644
--- a/tools/testing/selftests/core/unshare_test.c
+++ b/tools/testing/selftests/core/unshare_test.c
@@ -14,7 +14,7 @@
#include <sys/resource.h>
#include <linux/close_range.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../clone3/clone3_selftests.h"
TEST(unshare_EMFILE)
diff --git a/tools/testing/selftests/coredump/.gitignore b/tools/testing/selftests/coredump/.gitignore
new file mode 100644
index 000000000000..097f52db0be9
--- /dev/null
+++ b/tools/testing/selftests/coredump/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+stackdump_test
+coredump_socket_test
+coredump_socket_protocol_test
diff --git a/tools/testing/selftests/coredump/Makefile b/tools/testing/selftests/coredump/Makefile
index ed210037b29d..dece1a31d561 100644
--- a/tools/testing/selftests/coredump/Makefile
+++ b/tools/testing/selftests/coredump/Makefile
@@ -1,7 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
-CFLAGS = $(KHDR_INCLUDES)
+CFLAGS += -Wall -O0 -g $(KHDR_INCLUDES) $(TOOLS_INCLUDES)
-TEST_GEN_PROGS := stackdump_test
+TEST_GEN_PROGS := stackdump_test \
+ coredump_socket_test \
+ coredump_socket_protocol_test
TEST_FILES := stackdump
include ../lib.mk
+
+$(OUTPUT)/stackdump_test: coredump_test_helpers.c
+$(OUTPUT)/coredump_socket_test: coredump_test_helpers.c
+$(OUTPUT)/coredump_socket_protocol_test: coredump_test_helpers.c
diff --git a/tools/testing/selftests/coredump/config b/tools/testing/selftests/coredump/config
new file mode 100644
index 000000000000..a05ef112b4f9
--- /dev/null
+++ b/tools/testing/selftests/coredump/config
@@ -0,0 +1,3 @@
+CONFIG_COREDUMP=y
+CONFIG_NET=y
+CONFIG_UNIX=y
diff --git a/tools/testing/selftests/coredump/coredump_socket_protocol_test.c b/tools/testing/selftests/coredump/coredump_socket_protocol_test.c
new file mode 100644
index 000000000000..d19b6717c53e
--- /dev/null
+++ b/tools/testing/selftests/coredump/coredump_socket_protocol_test.c
@@ -0,0 +1,1568 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/stat.h>
+#include <sys/epoll.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include "coredump_test.h"
+
+#define NUM_CRASHING_COREDUMPS 5
+
+FIXTURE_SETUP(coredump)
+{
+ FILE *file;
+ int ret;
+
+ self->pid_coredump_server = -ESRCH;
+ self->fd_tmpfs_detached = -1;
+ file = fopen("/proc/sys/kernel/core_pattern", "r");
+ ASSERT_NE(NULL, file);
+
+ ret = fread(self->original_core_pattern, 1, sizeof(self->original_core_pattern), file);
+ ASSERT_TRUE(ret || feof(file));
+ ASSERT_LT(ret, sizeof(self->original_core_pattern));
+
+ self->original_core_pattern[ret] = '\0';
+ self->fd_tmpfs_detached = create_detached_tmpfs();
+ ASSERT_GE(self->fd_tmpfs_detached, 0);
+
+ ret = fclose(file);
+ ASSERT_EQ(0, ret);
+}
+
+FIXTURE_TEARDOWN(coredump)
+{
+ const char *reason;
+ FILE *file;
+ int ret, status;
+
+ if (self->pid_coredump_server > 0) {
+ kill(self->pid_coredump_server, SIGTERM);
+ waitpid(self->pid_coredump_server, &status, 0);
+ }
+ unlink("/tmp/coredump.file");
+ unlink("/tmp/coredump.socket");
+
+ file = fopen("/proc/sys/kernel/core_pattern", "w");
+ if (!file) {
+ reason = "Unable to open core_pattern";
+ goto fail;
+ }
+
+ ret = fprintf(file, "%s", self->original_core_pattern);
+ if (ret < 0) {
+ reason = "Unable to write to core_pattern";
+ goto fail;
+ }
+
+ ret = fclose(file);
+ if (ret) {
+ reason = "Unable to close core_pattern";
+ goto fail;
+ }
+
+ if (self->fd_tmpfs_detached >= 0) {
+ ret = close(self->fd_tmpfs_detached);
+ if (ret < 0) {
+ reason = "Unable to close detached tmpfs";
+ goto fail;
+ }
+ self->fd_tmpfs_detached = -1;
+ }
+
+ return;
+fail:
+ /* This should never happen */
+ fprintf(stderr, "Failed to cleanup coredump test: %s\n", reason);
+}
+
+TEST_F(coredump, socket_request_kernel)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct stat st;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_core_file = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_request_kernel: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_request_kernel: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_request_kernel: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_request_kernel: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_request_kernel: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_request_kernel: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_request_kernel: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ fd_core_file = creat("/tmp/coredump.file", 0644);
+ if (fd_core_file < 0) {
+ fprintf(stderr, "socket_request_kernel: creat coredump file failed: %m\n");
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_request_kernel: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "socket_request_kernel: check_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_KERNEL | COREDUMP_WAIT, 0)) {
+ fprintf(stderr, "socket_request_kernel: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_REQACK)) {
+ fprintf(stderr, "socket_request_kernel: read_marker COREDUMP_MARK_REQACK failed\n");
+ goto out;
+ }
+
+ for (;;) {
+ char buffer[4096];
+ ssize_t bytes_read, bytes_write;
+
+ bytes_read = read(fd_coredump, buffer, sizeof(buffer));
+ if (bytes_read < 0) {
+ fprintf(stderr, "socket_request_kernel: read from coredump socket failed: %m\n");
+ goto out;
+ }
+
+ if (bytes_read == 0)
+ break;
+
+ bytes_write = write(fd_core_file, buffer, bytes_read);
+ if (bytes_read != bytes_write) {
+ if (bytes_write < 0 && errno == ENOSPC)
+ continue;
+ fprintf(stderr, "socket_request_kernel: write to core file failed (read=%zd, write=%zd): %m\n",
+ bytes_read, bytes_write);
+ goto out;
+ }
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_request_kernel: completed successfully\n");
+out:
+ if (fd_core_file >= 0)
+ close(fd_core_file);
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_TRUE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+
+ ASSERT_EQ(stat("/tmp/coredump.file", &st), 0);
+ ASSERT_GT(st.st_size, 0);
+ system("file /tmp/coredump.file");
+}
+
+TEST_F(coredump, socket_request_userspace)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_request_userspace: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_request_userspace: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_request_userspace: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_request_userspace: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_request_userspace: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_request_userspace: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_request_userspace: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_request_userspace: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "socket_request_userspace: check_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_USERSPACE | COREDUMP_WAIT, 0)) {
+ fprintf(stderr, "socket_request_userspace: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_REQACK)) {
+ fprintf(stderr, "socket_request_userspace: read_marker COREDUMP_MARK_REQACK failed\n");
+ goto out;
+ }
+
+ for (;;) {
+ char buffer[4096];
+ ssize_t bytes_read;
+
+ bytes_read = read(fd_coredump, buffer, sizeof(buffer));
+ if (bytes_read > 0) {
+ fprintf(stderr, "socket_request_userspace: unexpected data received (expected no coredump data)\n");
+ goto out;
+ }
+
+ if (bytes_read < 0) {
+ fprintf(stderr, "socket_request_userspace: read from coredump socket failed: %m\n");
+ goto out;
+ }
+
+ if (bytes_read == 0)
+ break;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_request_userspace: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_TRUE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_F(coredump, socket_request_reject)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_request_reject: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_request_reject: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_request_reject: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_request_reject: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_request_reject: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_request_reject: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_request_reject: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_request_reject: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "socket_request_reject: check_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_REJECT | COREDUMP_WAIT, 0)) {
+ fprintf(stderr, "socket_request_reject: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_REQACK)) {
+ fprintf(stderr, "socket_request_reject: read_marker COREDUMP_MARK_REQACK failed\n");
+ goto out;
+ }
+
+ for (;;) {
+ char buffer[4096];
+ ssize_t bytes_read;
+
+ bytes_read = read(fd_coredump, buffer, sizeof(buffer));
+ if (bytes_read > 0) {
+ fprintf(stderr, "socket_request_reject: unexpected data received (expected no coredump data for REJECT)\n");
+ goto out;
+ }
+
+ if (bytes_read < 0) {
+ fprintf(stderr, "socket_request_reject: read from coredump socket failed: %m\n");
+ goto out;
+ }
+
+ if (bytes_read == 0)
+ break;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_request_reject: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_FALSE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_F(coredump, socket_request_invalid_flag_combination)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: check_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_KERNEL | COREDUMP_REJECT | COREDUMP_WAIT, 0)) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_CONFLICTING)) {
+ fprintf(stderr, "socket_request_invalid_flag_combination: read_marker COREDUMP_MARK_CONFLICTING failed\n");
+ goto out;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_request_invalid_flag_combination: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_FALSE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_F(coredump, socket_request_unknown_flag)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_request_unknown_flag: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_request_unknown_flag: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_request_unknown_flag: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_request_unknown_flag: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_request_unknown_flag: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_request_unknown_flag: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_request_unknown_flag: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_request_unknown_flag: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "socket_request_unknown_flag: check_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req, (1ULL << 63), 0)) {
+ fprintf(stderr, "socket_request_unknown_flag: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_UNSUPPORTED)) {
+ fprintf(stderr, "socket_request_unknown_flag: read_marker COREDUMP_MARK_UNSUPPORTED failed\n");
+ goto out;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_request_unknown_flag: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_FALSE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_F(coredump, socket_request_invalid_size_small)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_request_invalid_size_small: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_request_invalid_size_small: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_request_invalid_size_small: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_request_invalid_size_small: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_request_invalid_size_small: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_request_invalid_size_small: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_request_invalid_size_small: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_request_invalid_size_small: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "socket_request_invalid_size_small: check_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_REJECT | COREDUMP_WAIT,
+ COREDUMP_ACK_SIZE_VER0 / 2)) {
+ fprintf(stderr, "socket_request_invalid_size_small: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_MINSIZE)) {
+ fprintf(stderr, "socket_request_invalid_size_small: read_marker COREDUMP_MARK_MINSIZE failed\n");
+ goto out;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_request_invalid_size_small: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_FALSE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_F(coredump, socket_request_invalid_size_large)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_request_invalid_size_large: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_request_invalid_size_large: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_request_invalid_size_large: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_request_invalid_size_large: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_request_invalid_size_large: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_request_invalid_size_large: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_request_invalid_size_large: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_request_invalid_size_large: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "socket_request_invalid_size_large: check_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_REJECT | COREDUMP_WAIT,
+ COREDUMP_ACK_SIZE_VER0 + PAGE_SIZE)) {
+ fprintf(stderr, "socket_request_invalid_size_large: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_MAXSIZE)) {
+ fprintf(stderr, "socket_request_invalid_size_large: read_marker COREDUMP_MARK_MAXSIZE failed\n");
+ goto out;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_request_invalid_size_large: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_FALSE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+/*
+ * Test: PIDFD_INFO_COREDUMP_SIGNAL via socket coredump with SIGSEGV
+ *
+ * Verify that when using socket-based coredump protocol,
+ * the coredump_signal field is correctly exposed as SIGSEGV.
+ */
+TEST_F(coredump, socket_coredump_signal_sigsegv)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ /* Verify coredump_signal is available and correct */
+ if (!(info.mask & PIDFD_INFO_COREDUMP_SIGNAL)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: PIDFD_INFO_COREDUMP_SIGNAL not set in mask\n");
+ goto out;
+ }
+
+ if (info.coredump_signal != SIGSEGV) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: coredump_signal=%d, expected SIGSEGV=%d\n",
+ info.coredump_signal, SIGSEGV);
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_REJECT | COREDUMP_WAIT, 0)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_REQACK)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: read_marker COREDUMP_MARK_REQACK failed\n");
+ goto out;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_coredump_signal_sigsegv: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_EQ(WTERMSIG(status), SIGSEGV);
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_COREDUMP));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_COREDUMP_SIGNAL));
+ ASSERT_EQ(info.coredump_signal, SIGSEGV);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+/*
+ * Test: PIDFD_INFO_COREDUMP_SIGNAL via socket coredump with SIGABRT
+ *
+ * Verify that when using socket-based coredump protocol,
+ * the coredump_signal field is correctly exposed as SIGABRT.
+ */
+TEST_F(coredump, socket_coredump_signal_sigabrt)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ struct coredump_req req = {};
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ /* Verify coredump_signal is available and correct */
+ if (!(info.mask & PIDFD_INFO_COREDUMP_SIGNAL)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: PIDFD_INFO_COREDUMP_SIGNAL not set in mask\n");
+ goto out;
+ }
+
+ if (info.coredump_signal != SIGABRT) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: coredump_signal=%d, expected SIGABRT=%d\n",
+ info.coredump_signal, SIGABRT);
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: read_coredump_req failed\n");
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_REJECT | COREDUMP_WAIT, 0)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: send_coredump_ack failed\n");
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_REQACK)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: read_marker COREDUMP_MARK_REQACK failed\n");
+ goto out;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_coredump_signal_sigabrt: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ abort();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_EQ(WTERMSIG(status), SIGABRT);
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_COREDUMP));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_COREDUMP_SIGNAL));
+ ASSERT_EQ(info.coredump_signal, SIGABRT);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_F_TIMEOUT(coredump, socket_multiple_crashing_coredumps, 500)
+{
+ int pidfd[NUM_CRASHING_COREDUMPS], status[NUM_CRASHING_COREDUMPS];
+ pid_t pid[NUM_CRASHING_COREDUMPS], pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets), 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1, fd_core_file = -1;
+ int exit_code = EXIT_FAILURE;
+ struct coredump_req req = {};
+
+ close(ipc_sockets[0]);
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "Failed to create and listen on unix socket\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "Failed to notify parent via ipc socket\n");
+ goto out;
+ }
+ close(ipc_sockets[1]);
+
+ for (int i = 0; i < NUM_CRASHING_COREDUMPS; i++) {
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "get_peer_pidfd failed for fd %d: %m\n", fd_coredump);
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "get_pidfd_info failed for fd %d\n", fd_peer_pidfd);
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "pidfd info missing PIDFD_INFO_COREDUMP for fd %d\n", fd_peer_pidfd);
+ goto out;
+ }
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "pidfd info missing PIDFD_COREDUMPED for fd %d\n", fd_peer_pidfd);
+ goto out;
+ }
+
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "read_coredump_req failed for fd %d\n", fd_coredump);
+ goto out;
+ }
+
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "check_coredump_req failed for fd %d\n", fd_coredump);
+ goto out;
+ }
+
+ if (!send_coredump_ack(fd_coredump, &req,
+ COREDUMP_KERNEL | COREDUMP_WAIT, 0)) {
+ fprintf(stderr, "send_coredump_ack failed for fd %d\n", fd_coredump);
+ goto out;
+ }
+
+ if (!read_marker(fd_coredump, COREDUMP_MARK_REQACK)) {
+ fprintf(stderr, "read_marker failed for fd %d\n", fd_coredump);
+ goto out;
+ }
+
+ fd_core_file = open_coredump_tmpfile(self->fd_tmpfs_detached);
+ if (fd_core_file < 0) {
+ fprintf(stderr, "%m - open_coredump_tmpfile failed for fd %d\n", fd_coredump);
+ goto out;
+ }
+
+ for (;;) {
+ char buffer[4096];
+ ssize_t bytes_read, bytes_write;
+
+ bytes_read = read(fd_coredump, buffer, sizeof(buffer));
+ if (bytes_read < 0) {
+ fprintf(stderr, "read failed for fd %d: %m\n", fd_coredump);
+ goto out;
+ }
+
+ if (bytes_read == 0)
+ break;
+
+ bytes_write = write(fd_core_file, buffer, bytes_read);
+ if (bytes_read != bytes_write) {
+ if (bytes_write < 0 && errno == ENOSPC)
+ continue;
+ fprintf(stderr, "write failed for fd %d: %m\n", fd_core_file);
+ goto out;
+ }
+ }
+
+ close(fd_core_file);
+ close(fd_peer_pidfd);
+ close(fd_coredump);
+ fd_peer_pidfd = -1;
+ fd_coredump = -1;
+ }
+
+ exit_code = EXIT_SUCCESS;
+out:
+ if (fd_core_file >= 0)
+ close(fd_core_file);
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ for (int i = 0; i < NUM_CRASHING_COREDUMPS; i++) {
+ pid[i] = fork();
+ ASSERT_GE(pid[i], 0);
+ if (pid[i] == 0)
+ crashing_child();
+ pidfd[i] = sys_pidfd_open(pid[i], 0);
+ ASSERT_GE(pidfd[i], 0);
+ }
+
+ for (int i = 0; i < NUM_CRASHING_COREDUMPS; i++) {
+ waitpid(pid[i], &status[i], 0);
+ ASSERT_TRUE(WIFSIGNALED(status[i]));
+ ASSERT_TRUE(WCOREDUMP(status[i]));
+ }
+
+ for (int i = 0; i < NUM_CRASHING_COREDUMPS; i++) {
+ info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP;
+ ASSERT_EQ(ioctl(pidfd[i], PIDFD_GET_INFO, &info), 0);
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+ }
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_F_TIMEOUT(coredump, socket_multiple_crashing_coredumps_epoll_workers, 500)
+{
+ int pidfd[NUM_CRASHING_COREDUMPS], status[NUM_CRASHING_COREDUMPS];
+ pid_t pid[NUM_CRASHING_COREDUMPS], pid_coredump_server, worker_pids[NUM_CRASHING_COREDUMPS];
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@@/tmp/coredump.socket"));
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets), 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ int fd_server = -1, exit_code = EXIT_FAILURE, n_conns = 0;
+ fd_server = -1;
+ exit_code = EXIT_FAILURE;
+ n_conns = 0;
+ close(ipc_sockets[0]);
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+ close(ipc_sockets[1]);
+
+ while (n_conns < NUM_CRASHING_COREDUMPS) {
+ int fd_coredump = -1, fd_peer_pidfd = -1, fd_core_file = -1;
+ struct coredump_req req = {};
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ continue;
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: accept4 failed: %m\n");
+ goto out;
+ }
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: get_peer_pidfd failed\n");
+ goto out;
+ }
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: get_pidfd_info failed\n");
+ goto out;
+ }
+ if (!(info.mask & PIDFD_INFO_COREDUMP) || !(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: missing PIDFD_INFO_COREDUMP or PIDFD_COREDUMPED\n");
+ goto out;
+ }
+ if (!read_coredump_req(fd_coredump, &req)) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: read_coredump_req failed\n");
+ goto out;
+ }
+ if (!check_coredump_req(&req, COREDUMP_ACK_SIZE_VER0,
+ COREDUMP_KERNEL | COREDUMP_USERSPACE |
+ COREDUMP_REJECT | COREDUMP_WAIT)) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: check_coredump_req failed\n");
+ goto out;
+ }
+ if (!send_coredump_ack(fd_coredump, &req, COREDUMP_KERNEL | COREDUMP_WAIT, 0)) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: send_coredump_ack failed\n");
+ goto out;
+ }
+ if (!read_marker(fd_coredump, COREDUMP_MARK_REQACK)) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: read_marker failed\n");
+ goto out;
+ }
+ fd_core_file = open_coredump_tmpfile(self->fd_tmpfs_detached);
+ if (fd_core_file < 0) {
+ fprintf(stderr, "socket_multiple_crashing_coredumps_epoll_workers: open_coredump_tmpfile failed: %m\n");
+ goto out;
+ }
+ pid_t worker = fork();
+ if (worker == 0) {
+ close(fd_server);
+ process_coredump_worker(fd_coredump, fd_peer_pidfd, fd_core_file);
+ }
+ worker_pids[n_conns] = worker;
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_core_file >= 0)
+ close(fd_core_file);
+ n_conns++;
+ }
+ exit_code = EXIT_SUCCESS;
+out:
+ if (fd_server >= 0)
+ close(fd_server);
+
+ // Reap all worker processes
+ for (int i = 0; i < n_conns; i++) {
+ int wstatus;
+ if (waitpid(worker_pids[i], &wstatus, 0) < 0) {
+ fprintf(stderr, "Failed to wait for worker %d: %m\n", worker_pids[i]);
+ } else if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) != EXIT_SUCCESS) {
+ fprintf(stderr, "Worker %d exited with error code %d\n", worker_pids[i], WEXITSTATUS(wstatus));
+ exit_code = EXIT_FAILURE;
+ }
+ }
+
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ for (int i = 0; i < NUM_CRASHING_COREDUMPS; i++) {
+ pid[i] = fork();
+ ASSERT_GE(pid[i], 0);
+ if (pid[i] == 0)
+ crashing_child();
+ pidfd[i] = sys_pidfd_open(pid[i], 0);
+ ASSERT_GE(pidfd[i], 0);
+ }
+
+ for (int i = 0; i < NUM_CRASHING_COREDUMPS; i++) {
+ ASSERT_GE(waitpid(pid[i], &status[i], 0), 0);
+ ASSERT_TRUE(WIFSIGNALED(status[i]));
+ ASSERT_TRUE(WCOREDUMP(status[i]));
+ }
+
+ for (int i = 0; i < NUM_CRASHING_COREDUMPS; i++) {
+ info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP;
+ ASSERT_EQ(ioctl(pidfd[i], PIDFD_GET_INFO, &info), 0);
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+ }
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/coredump/coredump_socket_test.c b/tools/testing/selftests/coredump/coredump_socket_test.c
new file mode 100644
index 000000000000..7e26d4a6a15d
--- /dev/null
+++ b/tools/testing/selftests/coredump/coredump_socket_test.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/stat.h>
+#include <sys/epoll.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include "coredump_test.h"
+
+FIXTURE_SETUP(coredump)
+{
+ FILE *file;
+ int ret;
+
+ self->pid_coredump_server = -ESRCH;
+ self->fd_tmpfs_detached = -1;
+ file = fopen("/proc/sys/kernel/core_pattern", "r");
+ ASSERT_NE(NULL, file);
+
+ ret = fread(self->original_core_pattern, 1, sizeof(self->original_core_pattern), file);
+ ASSERT_TRUE(ret || feof(file));
+ ASSERT_LT(ret, sizeof(self->original_core_pattern));
+
+ self->original_core_pattern[ret] = '\0';
+ self->fd_tmpfs_detached = create_detached_tmpfs();
+ ASSERT_GE(self->fd_tmpfs_detached, 0);
+
+ ret = fclose(file);
+ ASSERT_EQ(0, ret);
+}
+
+FIXTURE_TEARDOWN(coredump)
+{
+ const char *reason;
+ FILE *file;
+ int ret, status;
+
+ if (self->pid_coredump_server > 0) {
+ kill(self->pid_coredump_server, SIGTERM);
+ waitpid(self->pid_coredump_server, &status, 0);
+ }
+ unlink("/tmp/coredump.file");
+ unlink("/tmp/coredump.socket");
+
+ file = fopen("/proc/sys/kernel/core_pattern", "w");
+ if (!file) {
+ reason = "Unable to open core_pattern";
+ goto fail;
+ }
+
+ ret = fprintf(file, "%s", self->original_core_pattern);
+ if (ret < 0) {
+ reason = "Unable to write to core_pattern";
+ goto fail;
+ }
+
+ ret = fclose(file);
+ if (ret) {
+ reason = "Unable to close core_pattern";
+ goto fail;
+ }
+
+ if (self->fd_tmpfs_detached >= 0) {
+ ret = close(self->fd_tmpfs_detached);
+ if (ret < 0) {
+ reason = "Unable to close detached tmpfs";
+ goto fail;
+ }
+ self->fd_tmpfs_detached = -1;
+ }
+
+ return;
+fail:
+ /* This should never happen */
+ fprintf(stderr, "Failed to cleanup coredump test: %s\n", reason);
+}
+
+TEST_F(coredump, socket)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct stat st;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1, fd_core_file = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket test: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket test: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket test: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket test: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket test: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket test: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket test: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ fd_core_file = creat("/tmp/coredump.file", 0644);
+ if (fd_core_file < 0) {
+ fprintf(stderr, "socket test: creat coredump file failed: %m\n");
+ goto out;
+ }
+
+ for (;;) {
+ char buffer[4096];
+ ssize_t bytes_read, bytes_write;
+
+ bytes_read = read(fd_coredump, buffer, sizeof(buffer));
+ if (bytes_read < 0) {
+ fprintf(stderr, "socket test: read from coredump socket failed: %m\n");
+ goto out;
+ }
+
+ if (bytes_read == 0)
+ break;
+
+ bytes_write = write(fd_core_file, buffer, bytes_read);
+ if (bytes_read != bytes_write) {
+ if (bytes_write < 0 && errno == ENOSPC)
+ continue;
+ fprintf(stderr, "socket test: write to core file failed (read=%zd, write=%zd): %m\n", bytes_read, bytes_write);
+ goto out;
+ }
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket test: completed successfully\n");
+out:
+ if (fd_core_file >= 0)
+ close(fd_core_file);
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_TRUE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+
+ ASSERT_EQ(stat("/tmp/coredump.file", &st), 0);
+ ASSERT_GT(st.st_size, 0);
+}
+
+TEST_F(coredump, socket_detect_userspace_client)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct stat st;
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_COREDUMP,
+ };
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_detect_userspace_client: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_detect_userspace_client: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_detect_userspace_client: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_detect_userspace_client: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_detect_userspace_client: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_detect_userspace_client: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (info.coredump_mask & PIDFD_COREDUMPED) {
+ fprintf(stderr, "socket_detect_userspace_client: PIDFD_COREDUMPED incorrectly set (should be userspace client)\n");
+ goto out;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_detect_userspace_client: completed successfully\n");
+out:
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ int fd_socket;
+ ssize_t ret;
+ const struct sockaddr_un coredump_sk = {
+ .sun_family = AF_UNIX,
+ .sun_path = "/tmp/coredump.socket",
+ };
+ size_t coredump_sk_len =
+ offsetof(struct sockaddr_un, sun_path) +
+ sizeof("/tmp/coredump.socket");
+
+ fd_socket = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd_socket < 0) {
+ fprintf(stderr, "socket_detect_userspace_client (client): socket failed: %m\n");
+ _exit(EXIT_FAILURE);
+ }
+
+ ret = connect(fd_socket, (const struct sockaddr *)&coredump_sk, coredump_sk_len);
+ if (ret < 0) {
+ fprintf(stderr, "socket_detect_userspace_client (client): connect failed: %m\n");
+ _exit(EXIT_FAILURE);
+ }
+
+ close(fd_socket);
+ pause();
+ fprintf(stderr, "socket_detect_userspace_client (client): completed successfully\n");
+ _exit(EXIT_SUCCESS);
+ }
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
+ ASSERT_EQ((info.coredump_mask & PIDFD_COREDUMPED), 0);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+
+ ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0), 0);
+ ASSERT_EQ(close(pidfd), 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_EQ(WTERMSIG(status), SIGKILL);
+
+ ASSERT_NE(stat("/tmp/coredump.file", &st), 0);
+ ASSERT_EQ(errno, ENOENT);
+}
+
+TEST_F(coredump, socket_enoent)
+{
+ int pidfd, status;
+ pid_t pid;
+
+ ASSERT_TRUE(set_core_pattern("@/tmp/coredump.socket"));
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_FALSE(WCOREDUMP(status));
+}
+
+TEST_F(coredump, socket_no_listener)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ int ipc_sockets[2];
+ char c;
+ const struct sockaddr_un coredump_sk = {
+ .sun_family = AF_UNIX,
+ .sun_path = "/tmp/coredump.socket",
+ };
+ size_t coredump_sk_len = offsetof(struct sockaddr_un, sun_path) +
+ sizeof("/tmp/coredump.socket");
+
+ ASSERT_TRUE(set_core_pattern("@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ int fd_server = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_no_listener: socket failed: %m\n");
+ goto out;
+ }
+
+ ret = bind(fd_server, (const struct sockaddr *)&coredump_sk, coredump_sk_len);
+ if (ret < 0) {
+ fprintf(stderr, "socket_no_listener: bind failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_no_listener: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_no_listener: completed successfully\n");
+out:
+ if (fd_server >= 0)
+ close(fd_server);
+ close(ipc_sockets[1]);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_FALSE(WCOREDUMP(status));
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+/*
+ * Test: PIDFD_INFO_COREDUMP_SIGNAL via simple socket coredump
+ *
+ * Verify that when using simple socket-based coredump (@ pattern),
+ * the coredump_signal field is correctly exposed as SIGSEGV.
+ */
+TEST_F(coredump, socket_coredump_signal_sigsegv)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1, fd_core_file = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ /* Verify coredump_signal is available and correct */
+ if (!(info.mask & PIDFD_INFO_COREDUMP_SIGNAL)) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: PIDFD_INFO_COREDUMP_SIGNAL not set in mask\n");
+ goto out;
+ }
+
+ if (info.coredump_signal != SIGSEGV) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: coredump_signal=%d, expected SIGSEGV=%d\n",
+ info.coredump_signal, SIGSEGV);
+ goto out;
+ }
+
+ fd_core_file = open_coredump_tmpfile(self->fd_tmpfs_detached);
+ if (fd_core_file < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: open_coredump_tmpfile failed: %m\n");
+ goto out;
+ }
+
+ for (;;) {
+ char buffer[4096];
+ ssize_t bytes_read, bytes_write;
+
+ bytes_read = read(fd_coredump, buffer, sizeof(buffer));
+ if (bytes_read < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: read from coredump socket failed: %m\n");
+ goto out;
+ }
+
+ if (bytes_read == 0)
+ break;
+
+ bytes_write = write(fd_core_file, buffer, bytes_read);
+ if (bytes_read != bytes_write) {
+ fprintf(stderr, "socket_coredump_signal_sigsegv: write to core file failed (read=%zd, write=%zd): %m\n",
+ bytes_read, bytes_write);
+ goto out;
+ }
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_coredump_signal_sigsegv: completed successfully\n");
+out:
+ if (fd_core_file >= 0)
+ close(fd_core_file);
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ crashing_child();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_EQ(WTERMSIG(status), SIGSEGV);
+ ASSERT_TRUE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_COREDUMP));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_COREDUMP_SIGNAL));
+ ASSERT_EQ(info.coredump_signal, SIGSEGV);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+/*
+ * Test: PIDFD_INFO_COREDUMP_SIGNAL via simple socket coredump with SIGABRT
+ *
+ * Verify that when using simple socket-based coredump (@ pattern),
+ * the coredump_signal field is correctly exposed as SIGABRT.
+ */
+TEST_F(coredump, socket_coredump_signal_sigabrt)
+{
+ int pidfd, ret, status;
+ pid_t pid, pid_coredump_server;
+ struct pidfd_info info = {};
+ int ipc_sockets[2];
+ char c;
+
+ ASSERT_TRUE(set_core_pattern("@/tmp/coredump.socket"));
+
+ ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_EQ(ret, 0);
+
+ pid_coredump_server = fork();
+ ASSERT_GE(pid_coredump_server, 0);
+ if (pid_coredump_server == 0) {
+ int fd_server = -1, fd_coredump = -1, fd_peer_pidfd = -1, fd_core_file = -1;
+ int exit_code = EXIT_FAILURE;
+
+ close(ipc_sockets[0]);
+
+ fd_server = create_and_listen_unix_socket("/tmp/coredump.socket");
+ if (fd_server < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: create_and_listen_unix_socket failed: %m\n");
+ goto out;
+ }
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: write_nointr to ipc socket failed: %m\n");
+ goto out;
+ }
+
+ close(ipc_sockets[1]);
+
+ fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
+ if (fd_coredump < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: accept4 failed: %m\n");
+ goto out;
+ }
+
+ fd_peer_pidfd = get_peer_pidfd(fd_coredump);
+ if (fd_peer_pidfd < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: get_peer_pidfd failed\n");
+ goto out;
+ }
+
+ if (!get_pidfd_info(fd_peer_pidfd, &info)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: get_pidfd_info failed\n");
+ goto out;
+ }
+
+ if (!(info.mask & PIDFD_INFO_COREDUMP)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: PIDFD_INFO_COREDUMP not set in mask\n");
+ goto out;
+ }
+
+ if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: PIDFD_COREDUMPED not set in coredump_mask\n");
+ goto out;
+ }
+
+ /* Verify coredump_signal is available and correct */
+ if (!(info.mask & PIDFD_INFO_COREDUMP_SIGNAL)) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: PIDFD_INFO_COREDUMP_SIGNAL not set in mask\n");
+ goto out;
+ }
+
+ if (info.coredump_signal != SIGABRT) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: coredump_signal=%d, expected SIGABRT=%d\n",
+ info.coredump_signal, SIGABRT);
+ goto out;
+ }
+
+ fd_core_file = open_coredump_tmpfile(self->fd_tmpfs_detached);
+ if (fd_core_file < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: open_coredump_tmpfile failed: %m\n");
+ goto out;
+ }
+
+ for (;;) {
+ char buffer[4096];
+ ssize_t bytes_read, bytes_write;
+
+ bytes_read = read(fd_coredump, buffer, sizeof(buffer));
+ if (bytes_read < 0) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: read from coredump socket failed: %m\n");
+ goto out;
+ }
+
+ if (bytes_read == 0)
+ break;
+
+ bytes_write = write(fd_core_file, buffer, bytes_read);
+ if (bytes_read != bytes_write) {
+ fprintf(stderr, "socket_coredump_signal_sigabrt: write to core file failed (read=%zd, write=%zd): %m\n",
+ bytes_read, bytes_write);
+ goto out;
+ }
+ }
+
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "socket_coredump_signal_sigabrt: completed successfully\n");
+out:
+ if (fd_core_file >= 0)
+ close(fd_core_file);
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ if (fd_server >= 0)
+ close(fd_server);
+ _exit(exit_code);
+ }
+ self->pid_coredump_server = pid_coredump_server;
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0)
+ abort();
+
+ pidfd = sys_pidfd_open(pid, 0);
+ ASSERT_GE(pidfd, 0);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_EQ(WTERMSIG(status), SIGABRT);
+ ASSERT_TRUE(WCOREDUMP(status));
+
+ ASSERT_TRUE(get_pidfd_info(pidfd, &info));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_COREDUMP));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_COREDUMP_SIGNAL));
+ ASSERT_EQ(info.coredump_signal, SIGABRT);
+
+ wait_and_check_coredump_server(pid_coredump_server, _metadata, self);
+}
+
+TEST_F(coredump, socket_invalid_paths)
+{
+ ASSERT_FALSE(set_core_pattern("@ /tmp/coredump.socket"));
+ ASSERT_FALSE(set_core_pattern("@/tmp/../coredump.socket"));
+ ASSERT_FALSE(set_core_pattern("@../coredump.socket"));
+ ASSERT_FALSE(set_core_pattern("@/tmp/coredump.socket/.."));
+ ASSERT_FALSE(set_core_pattern("@.."));
+
+ ASSERT_FALSE(set_core_pattern("@@ /tmp/coredump.socket"));
+ ASSERT_FALSE(set_core_pattern("@@/tmp/../coredump.socket"));
+ ASSERT_FALSE(set_core_pattern("@@../coredump.socket"));
+ ASSERT_FALSE(set_core_pattern("@@/tmp/coredump.socket/.."));
+ ASSERT_FALSE(set_core_pattern("@@.."));
+
+ ASSERT_FALSE(set_core_pattern("@@@/tmp/coredump.socket"));
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/coredump/coredump_test.h b/tools/testing/selftests/coredump/coredump_test.h
new file mode 100644
index 000000000000..ed47f01fa53c
--- /dev/null
+++ b/tools/testing/selftests/coredump/coredump_test.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __COREDUMP_TEST_H
+#define __COREDUMP_TEST_H
+
+#include <stdbool.h>
+#include <sys/types.h>
+#include <linux/coredump.h>
+
+#include "../kselftest_harness.h"
+#include "../pidfd/pidfd.h"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NUM_THREAD_SPAWN 128
+
+/* Coredump fixture */
+FIXTURE(coredump)
+{
+ char original_core_pattern[256];
+ pid_t pid_coredump_server;
+ int fd_tmpfs_detached;
+};
+
+/* Shared helper function declarations */
+void *do_nothing(void *arg);
+void crashing_child(void);
+int create_detached_tmpfs(void);
+int create_and_listen_unix_socket(const char *path);
+bool set_core_pattern(const char *pattern);
+int get_peer_pidfd(int fd);
+bool get_pidfd_info(int fd_peer_pidfd, struct pidfd_info *info);
+
+/* Inline helper that uses harness types */
+static inline void wait_and_check_coredump_server(pid_t pid_coredump_server,
+ struct __test_metadata *const _metadata,
+ FIXTURE_DATA(coredump) *self)
+{
+ int status;
+ waitpid(pid_coredump_server, &status, 0);
+ self->pid_coredump_server = -ESRCH;
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+}
+
+/* Protocol helper function declarations */
+ssize_t recv_marker(int fd);
+bool read_marker(int fd, enum coredump_mark mark);
+bool read_coredump_req(int fd, struct coredump_req *req);
+bool send_coredump_ack(int fd, const struct coredump_req *req,
+ __u64 mask, size_t size_ack);
+bool check_coredump_req(const struct coredump_req *req, size_t min_size,
+ __u64 required_mask);
+int open_coredump_tmpfile(int fd_tmpfs_detached);
+void process_coredump_worker(int fd_coredump, int fd_peer_pidfd, int fd_core_file);
+
+#endif /* __COREDUMP_TEST_H */
diff --git a/tools/testing/selftests/coredump/coredump_test_helpers.c b/tools/testing/selftests/coredump/coredump_test_helpers.c
new file mode 100644
index 000000000000..a6f6d5f2ae07
--- /dev/null
+++ b/tools/testing/selftests/coredump/coredump_test_helpers.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <linux/coredump.h>
+#include <linux/fs.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "../filesystems/wrappers.h"
+#include "../pidfd/pidfd.h"
+
+/* Forward declarations to avoid including harness header */
+struct __test_metadata;
+
+/* Match the fixture definition from coredump_test.h */
+struct _fixture_coredump_data {
+ char original_core_pattern[256];
+ pid_t pid_coredump_server;
+ int fd_tmpfs_detached;
+};
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define NUM_THREAD_SPAWN 128
+
+void *do_nothing(void *arg)
+{
+ (void)arg;
+ while (1)
+ pause();
+
+ return NULL;
+}
+
+void crashing_child(void)
+{
+ pthread_t thread;
+ int i;
+
+ for (i = 0; i < NUM_THREAD_SPAWN; ++i)
+ pthread_create(&thread, NULL, do_nothing, NULL);
+
+ /* crash on purpose */
+ i = *(int *)NULL;
+}
+
+int create_detached_tmpfs(void)
+{
+ int fd_context, fd_tmpfs;
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ if (fd_context < 0)
+ return -1;
+
+ if (sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0) < 0)
+ return -1;
+
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ close(fd_context);
+ return fd_tmpfs;
+}
+
+int create_and_listen_unix_socket(const char *path)
+{
+ struct sockaddr_un addr = {
+ .sun_family = AF_UNIX,
+ };
+ assert(strlen(path) < sizeof(addr.sun_path) - 1);
+ strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
+ size_t addr_len =
+ offsetof(struct sockaddr_un, sun_path) + strlen(path) + 1;
+ int fd, ret;
+
+ fd = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
+ if (fd < 0)
+ goto out;
+
+ ret = bind(fd, (const struct sockaddr *)&addr, addr_len);
+ if (ret < 0)
+ goto out;
+
+ ret = listen(fd, 128);
+ if (ret < 0)
+ goto out;
+
+ return fd;
+
+out:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+bool set_core_pattern(const char *pattern)
+{
+ int fd;
+ ssize_t ret;
+
+ fd = open("/proc/sys/kernel/core_pattern", O_WRONLY | O_CLOEXEC);
+ if (fd < 0)
+ return false;
+
+ ret = write(fd, pattern, strlen(pattern));
+ close(fd);
+ if (ret < 0)
+ return false;
+
+ fprintf(stderr, "Set core_pattern to '%s' | %zu == %zu\n", pattern, ret, strlen(pattern));
+ return ret == strlen(pattern);
+}
+
+int get_peer_pidfd(int fd)
+{
+ int fd_peer_pidfd;
+ socklen_t fd_peer_pidfd_len = sizeof(fd_peer_pidfd);
+ int ret = getsockopt(fd, SOL_SOCKET, SO_PEERPIDFD, &fd_peer_pidfd,
+ &fd_peer_pidfd_len);
+ if (ret < 0) {
+ fprintf(stderr, "get_peer_pidfd: getsockopt(SO_PEERPIDFD) failed: %m\n");
+ return -1;
+ }
+ fprintf(stderr, "get_peer_pidfd: successfully retrieved pidfd %d\n", fd_peer_pidfd);
+ return fd_peer_pidfd;
+}
+
+bool get_pidfd_info(int fd_peer_pidfd, struct pidfd_info *info)
+{
+ int ret;
+ memset(info, 0, sizeof(*info));
+ info->mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP | PIDFD_INFO_COREDUMP_SIGNAL;
+ ret = ioctl(fd_peer_pidfd, PIDFD_GET_INFO, info);
+ if (ret < 0) {
+ fprintf(stderr, "get_pidfd_info: ioctl(PIDFD_GET_INFO) failed: %m\n");
+ return false;
+ }
+ fprintf(stderr, "get_pidfd_info: mask=0x%llx, coredump_mask=0x%x, coredump_signal=%d\n",
+ (unsigned long long)info->mask, info->coredump_mask, info->coredump_signal);
+ return true;
+}
+
+/* Protocol helper functions */
+
+ssize_t recv_marker(int fd)
+{
+ enum coredump_mark mark = COREDUMP_MARK_REQACK;
+ ssize_t ret;
+
+ ret = recv(fd, &mark, sizeof(mark), MSG_WAITALL);
+ if (ret != sizeof(mark))
+ return -1;
+
+ switch (mark) {
+ case COREDUMP_MARK_REQACK:
+ fprintf(stderr, "Received marker: ReqAck\n");
+ return COREDUMP_MARK_REQACK;
+ case COREDUMP_MARK_MINSIZE:
+ fprintf(stderr, "Received marker: MinSize\n");
+ return COREDUMP_MARK_MINSIZE;
+ case COREDUMP_MARK_MAXSIZE:
+ fprintf(stderr, "Received marker: MaxSize\n");
+ return COREDUMP_MARK_MAXSIZE;
+ case COREDUMP_MARK_UNSUPPORTED:
+ fprintf(stderr, "Received marker: Unsupported\n");
+ return COREDUMP_MARK_UNSUPPORTED;
+ case COREDUMP_MARK_CONFLICTING:
+ fprintf(stderr, "Received marker: Conflicting\n");
+ return COREDUMP_MARK_CONFLICTING;
+ default:
+ fprintf(stderr, "Received unknown marker: %u\n", mark);
+ break;
+ }
+ return -1;
+}
+
+bool read_marker(int fd, enum coredump_mark mark)
+{
+ ssize_t ret;
+
+ ret = recv_marker(fd);
+ if (ret < 0)
+ return false;
+ return ret == mark;
+}
+
+bool read_coredump_req(int fd, struct coredump_req *req)
+{
+ ssize_t ret;
+ size_t field_size, user_size, ack_size, kernel_size, remaining_size;
+
+ memset(req, 0, sizeof(*req));
+ field_size = sizeof(req->size);
+
+ /* Peek the size of the coredump request. */
+ ret = recv(fd, req, field_size, MSG_PEEK | MSG_WAITALL);
+ if (ret != field_size) {
+ fprintf(stderr, "read_coredump_req: peek failed (got %zd, expected %zu): %m\n",
+ ret, field_size);
+ return false;
+ }
+ kernel_size = req->size;
+
+ if (kernel_size < COREDUMP_ACK_SIZE_VER0) {
+ fprintf(stderr, "read_coredump_req: kernel_size %zu < min %d\n",
+ kernel_size, COREDUMP_ACK_SIZE_VER0);
+ return false;
+ }
+ if (kernel_size >= PAGE_SIZE) {
+ fprintf(stderr, "read_coredump_req: kernel_size %zu >= PAGE_SIZE %d\n",
+ kernel_size, PAGE_SIZE);
+ return false;
+ }
+
+ /* Use the minimum of user and kernel size to read the full request. */
+ user_size = sizeof(struct coredump_req);
+ ack_size = user_size < kernel_size ? user_size : kernel_size;
+ ret = recv(fd, req, ack_size, MSG_WAITALL);
+ if (ret != ack_size)
+ return false;
+
+ fprintf(stderr, "Read coredump request with size %u and mask 0x%llx\n",
+ req->size, (unsigned long long)req->mask);
+
+ if (user_size > kernel_size)
+ remaining_size = user_size - kernel_size;
+ else
+ remaining_size = kernel_size - user_size;
+
+ if (PAGE_SIZE <= remaining_size)
+ return false;
+
+ /*
+ * Discard any additional data if the kernel's request was larger than
+ * what we knew about or cared about.
+ */
+ if (remaining_size) {
+ char buffer[PAGE_SIZE];
+
+ ret = recv(fd, buffer, sizeof(buffer), MSG_WAITALL);
+ if (ret != remaining_size)
+ return false;
+ fprintf(stderr, "Discarded %zu bytes of data after coredump request\n", remaining_size);
+ }
+
+ return true;
+}
+
+bool send_coredump_ack(int fd, const struct coredump_req *req,
+ __u64 mask, size_t size_ack)
+{
+ ssize_t ret;
+ /*
+ * Wrap struct coredump_ack in a larger struct so we can
+ * simulate sending to much data to the kernel.
+ */
+ struct large_ack_for_size_testing {
+ struct coredump_ack ack;
+ char buffer[PAGE_SIZE];
+ } large_ack = {};
+
+ if (!size_ack)
+ size_ack = sizeof(struct coredump_ack) < req->size_ack ?
+ sizeof(struct coredump_ack) :
+ req->size_ack;
+ large_ack.ack.mask = mask;
+ large_ack.ack.size = size_ack;
+ ret = send(fd, &large_ack, size_ack, MSG_NOSIGNAL);
+ if (ret != size_ack)
+ return false;
+
+ fprintf(stderr, "Sent coredump ack with size %zu and mask 0x%llx\n",
+ size_ack, (unsigned long long)mask);
+ return true;
+}
+
+bool check_coredump_req(const struct coredump_req *req, size_t min_size,
+ __u64 required_mask)
+{
+ if (req->size < min_size)
+ return false;
+ if ((req->mask & required_mask) != required_mask)
+ return false;
+ if (req->mask & ~required_mask)
+ return false;
+ return true;
+}
+
+int open_coredump_tmpfile(int fd_tmpfs_detached)
+{
+ return openat(fd_tmpfs_detached, ".", O_TMPFILE | O_RDWR | O_EXCL, 0600);
+}
+
+void process_coredump_worker(int fd_coredump, int fd_peer_pidfd, int fd_core_file)
+{
+ int epfd = -1;
+ int exit_code = EXIT_FAILURE;
+ struct epoll_event ev;
+ int flags;
+
+ /* Set socket to non-blocking mode for edge-triggered epoll */
+ flags = fcntl(fd_coredump, F_GETFL, 0);
+ if (flags < 0) {
+ fprintf(stderr, "Worker: fcntl(F_GETFL) failed: %m\n");
+ goto out;
+ }
+ if (fcntl(fd_coredump, F_SETFL, flags | O_NONBLOCK) < 0) {
+ fprintf(stderr, "Worker: fcntl(F_SETFL, O_NONBLOCK) failed: %m\n");
+ goto out;
+ }
+
+ epfd = epoll_create1(0);
+ if (epfd < 0) {
+ fprintf(stderr, "Worker: epoll_create1() failed: %m\n");
+ goto out;
+ }
+
+ ev.events = EPOLLIN | EPOLLRDHUP | EPOLLET;
+ ev.data.fd = fd_coredump;
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd_coredump, &ev) < 0) {
+ fprintf(stderr, "Worker: epoll_ctl(EPOLL_CTL_ADD) failed: %m\n");
+ goto out;
+ }
+
+ for (;;) {
+ struct epoll_event events[1];
+ int n = epoll_wait(epfd, events, 1, -1);
+ if (n < 0) {
+ fprintf(stderr, "Worker: epoll_wait() failed: %m\n");
+ break;
+ }
+
+ if (events[0].events & (EPOLLIN | EPOLLRDHUP)) {
+ for (;;) {
+ char buffer[4096];
+ ssize_t bytes_read = read(fd_coredump, buffer, sizeof(buffer));
+ if (bytes_read < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+ fprintf(stderr, "Worker: read() failed: %m\n");
+ goto out;
+ }
+ if (bytes_read == 0)
+ goto done;
+ ssize_t bytes_write = write(fd_core_file, buffer, bytes_read);
+ if (bytes_write != bytes_read) {
+ if (bytes_write < 0 && errno == ENOSPC)
+ continue;
+ fprintf(stderr, "Worker: write() failed (read=%zd, write=%zd): %m\n",
+ bytes_read, bytes_write);
+ goto out;
+ }
+ }
+ }
+ }
+
+done:
+ exit_code = EXIT_SUCCESS;
+ fprintf(stderr, "Worker: completed successfully\n");
+out:
+ if (epfd >= 0)
+ close(epfd);
+ if (fd_core_file >= 0)
+ close(fd_core_file);
+ if (fd_peer_pidfd >= 0)
+ close(fd_peer_pidfd);
+ if (fd_coredump >= 0)
+ close(fd_coredump);
+ _exit(exit_code);
+}
diff --git a/tools/testing/selftests/coredump/stackdump_test.c b/tools/testing/selftests/coredump/stackdump_test.c
index 9984413be9f0..1ec88937a1c2 100644
--- a/tools/testing/selftests/coredump/stackdump_test.c
+++ b/tools/testing/selftests/coredump/stackdump_test.c
@@ -1,57 +1,44 @@
// SPDX-License-Identifier: GPL-2.0
+#include <assert.h>
#include <fcntl.h>
#include <inttypes.h>
#include <libgen.h>
+#include <limits.h>
+#include <linux/coredump.h>
+#include <linux/fs.h>
#include <linux/limits.h>
#include <pthread.h>
#include <string.h>
#include <sys/mount.h>
+#include <poll.h>
+#include <sys/epoll.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
+#include "../filesystems/wrappers.h"
#include "../pidfd/pidfd.h"
+#include "coredump_test.h"
+
#define STACKDUMP_FILE "stack_values"
#define STACKDUMP_SCRIPT "stackdump"
-#define NUM_THREAD_SPAWN 128
-
-static void *do_nothing(void *)
-{
- while (1)
- pause();
-}
-
-static void crashing_child(void)
-{
- pthread_t thread;
- int i;
- for (i = 0; i < NUM_THREAD_SPAWN; ++i)
- pthread_create(&thread, NULL, do_nothing, NULL);
-
- /* crash on purpose */
- i = *(int *)NULL;
-}
-
-FIXTURE(coredump)
-{
- char original_core_pattern[256];
- pid_t pid_coredump_server;
-};
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
FIXTURE_SETUP(coredump)
{
- char buf[PATH_MAX];
FILE *file;
- char *dir;
int ret;
self->pid_coredump_server = -ESRCH;
+ self->fd_tmpfs_detached = -1;
file = fopen("/proc/sys/kernel/core_pattern", "r");
ASSERT_NE(NULL, file);
@@ -60,6 +47,8 @@ FIXTURE_SETUP(coredump)
ASSERT_LT(ret, sizeof(self->original_core_pattern));
self->original_core_pattern[ret] = '\0';
+ self->fd_tmpfs_detached = create_detached_tmpfs();
+ ASSERT_GE(self->fd_tmpfs_detached, 0);
ret = fclose(file);
ASSERT_EQ(0, ret);
@@ -98,6 +87,15 @@ FIXTURE_TEARDOWN(coredump)
goto fail;
}
+ if (self->fd_tmpfs_detached >= 0) {
+ ret = close(self->fd_tmpfs_detached);
+ if (ret < 0) {
+ reason = "Unable to close detached tmpfs";
+ goto fail;
+ }
+ self->fd_tmpfs_detached = -1;
+ }
+
return;
fail:
/* This should never happen */
@@ -106,11 +104,10 @@ fail:
TEST_F_TIMEOUT(coredump, stackdump, 120)
{
- struct sigaction action = {};
unsigned long long stack;
char *test_dir, *line;
size_t line_length;
- char buf[PATH_MAX];
+ char buf[PAGE_SIZE];
int ret, i, status;
FILE *file;
pid_t pid;
@@ -169,454 +166,4 @@ TEST_F_TIMEOUT(coredump, stackdump, 120)
fclose(file);
}
-TEST_F(coredump, socket)
-{
- int fd, pidfd, ret, status;
- FILE *file;
- pid_t pid, pid_coredump_server;
- struct stat st;
- char core_file[PATH_MAX];
- struct pidfd_info info = {};
- int ipc_sockets[2];
- char c;
- const struct sockaddr_un coredump_sk = {
- .sun_family = AF_UNIX,
- .sun_path = "/tmp/coredump.socket",
- };
- size_t coredump_sk_len = offsetof(struct sockaddr_un, sun_path) +
- sizeof("/tmp/coredump.socket");
-
- ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
- ASSERT_EQ(ret, 0);
-
- file = fopen("/proc/sys/kernel/core_pattern", "w");
- ASSERT_NE(file, NULL);
-
- ret = fprintf(file, "@/tmp/coredump.socket");
- ASSERT_EQ(ret, strlen("@/tmp/coredump.socket"));
- ASSERT_EQ(fclose(file), 0);
-
- pid_coredump_server = fork();
- ASSERT_GE(pid_coredump_server, 0);
- if (pid_coredump_server == 0) {
- int fd_server, fd_coredump, fd_peer_pidfd, fd_core_file;
- socklen_t fd_peer_pidfd_len;
-
- close(ipc_sockets[0]);
-
- fd_server = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
- if (fd_server < 0)
- _exit(EXIT_FAILURE);
-
- ret = bind(fd_server, (const struct sockaddr *)&coredump_sk, coredump_sk_len);
- if (ret < 0) {
- fprintf(stderr, "Failed to bind coredump socket\n");
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_FAILURE);
- }
-
- ret = listen(fd_server, 1);
- if (ret < 0) {
- fprintf(stderr, "Failed to listen on coredump socket\n");
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_FAILURE);
- }
-
- if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_FAILURE);
- }
-
- close(ipc_sockets[1]);
-
- fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
- if (fd_coredump < 0) {
- fprintf(stderr, "Failed to accept coredump socket connection\n");
- close(fd_server);
- _exit(EXIT_FAILURE);
- }
-
- fd_peer_pidfd_len = sizeof(fd_peer_pidfd);
- ret = getsockopt(fd_coredump, SOL_SOCKET, SO_PEERPIDFD,
- &fd_peer_pidfd, &fd_peer_pidfd_len);
- if (ret < 0) {
- fprintf(stderr, "%m - Failed to retrieve peer pidfd for coredump socket connection\n");
- close(fd_coredump);
- close(fd_server);
- _exit(EXIT_FAILURE);
- }
-
- memset(&info, 0, sizeof(info));
- info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP;
- ret = ioctl(fd_peer_pidfd, PIDFD_GET_INFO, &info);
- if (ret < 0) {
- fprintf(stderr, "Failed to retrieve pidfd info from peer pidfd for coredump socket connection\n");
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- _exit(EXIT_FAILURE);
- }
-
- if (!(info.mask & PIDFD_INFO_COREDUMP)) {
- fprintf(stderr, "Missing coredump information from coredumping task\n");
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- _exit(EXIT_FAILURE);
- }
-
- if (!(info.coredump_mask & PIDFD_COREDUMPED)) {
- fprintf(stderr, "Received connection from non-coredumping task\n");
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- _exit(EXIT_FAILURE);
- }
-
- fd_core_file = creat("/tmp/coredump.file", 0644);
- if (fd_core_file < 0) {
- fprintf(stderr, "Failed to create coredump file\n");
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- _exit(EXIT_FAILURE);
- }
-
- for (;;) {
- char buffer[4096];
- ssize_t bytes_read, bytes_write;
-
- bytes_read = read(fd_coredump, buffer, sizeof(buffer));
- if (bytes_read < 0) {
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- close(fd_core_file);
- _exit(EXIT_FAILURE);
- }
-
- if (bytes_read == 0)
- break;
-
- bytes_write = write(fd_core_file, buffer, bytes_read);
- if (bytes_read != bytes_write) {
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- close(fd_core_file);
- _exit(EXIT_FAILURE);
- }
- }
-
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- close(fd_core_file);
- _exit(EXIT_SUCCESS);
- }
- self->pid_coredump_server = pid_coredump_server;
-
- EXPECT_EQ(close(ipc_sockets[1]), 0);
- ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
- EXPECT_EQ(close(ipc_sockets[0]), 0);
-
- pid = fork();
- ASSERT_GE(pid, 0);
- if (pid == 0)
- crashing_child();
-
- pidfd = sys_pidfd_open(pid, 0);
- ASSERT_GE(pidfd, 0);
-
- waitpid(pid, &status, 0);
- ASSERT_TRUE(WIFSIGNALED(status));
- ASSERT_TRUE(WCOREDUMP(status));
-
- info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP;
- ASSERT_EQ(ioctl(pidfd, PIDFD_GET_INFO, &info), 0);
- ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
- ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0);
-
- waitpid(pid_coredump_server, &status, 0);
- self->pid_coredump_server = -ESRCH;
- ASSERT_TRUE(WIFEXITED(status));
- ASSERT_EQ(WEXITSTATUS(status), 0);
-
- ASSERT_EQ(stat("/tmp/coredump.file", &st), 0);
- ASSERT_GT(st.st_size, 0);
- /*
- * We should somehow validate the produced core file.
- * For now just allow for visual inspection
- */
- system("file /tmp/coredump.file");
-}
-
-TEST_F(coredump, socket_detect_userspace_client)
-{
- int fd, pidfd, ret, status;
- FILE *file;
- pid_t pid, pid_coredump_server;
- struct stat st;
- char core_file[PATH_MAX];
- struct pidfd_info info = {};
- int ipc_sockets[2];
- char c;
- const struct sockaddr_un coredump_sk = {
- .sun_family = AF_UNIX,
- .sun_path = "/tmp/coredump.socket",
- };
- size_t coredump_sk_len = offsetof(struct sockaddr_un, sun_path) +
- sizeof("/tmp/coredump.socket");
-
- file = fopen("/proc/sys/kernel/core_pattern", "w");
- ASSERT_NE(file, NULL);
-
- ret = fprintf(file, "@/tmp/coredump.socket");
- ASSERT_EQ(ret, strlen("@/tmp/coredump.socket"));
- ASSERT_EQ(fclose(file), 0);
-
- ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
- ASSERT_EQ(ret, 0);
-
- pid_coredump_server = fork();
- ASSERT_GE(pid_coredump_server, 0);
- if (pid_coredump_server == 0) {
- int fd_server, fd_coredump, fd_peer_pidfd, fd_core_file;
- socklen_t fd_peer_pidfd_len;
-
- close(ipc_sockets[0]);
-
- fd_server = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
- if (fd_server < 0)
- _exit(EXIT_FAILURE);
-
- ret = bind(fd_server, (const struct sockaddr *)&coredump_sk, coredump_sk_len);
- if (ret < 0) {
- fprintf(stderr, "Failed to bind coredump socket\n");
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_FAILURE);
- }
-
- ret = listen(fd_server, 1);
- if (ret < 0) {
- fprintf(stderr, "Failed to listen on coredump socket\n");
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_FAILURE);
- }
-
- if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_FAILURE);
- }
-
- close(ipc_sockets[1]);
-
- fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC);
- if (fd_coredump < 0) {
- fprintf(stderr, "Failed to accept coredump socket connection\n");
- close(fd_server);
- _exit(EXIT_FAILURE);
- }
-
- fd_peer_pidfd_len = sizeof(fd_peer_pidfd);
- ret = getsockopt(fd_coredump, SOL_SOCKET, SO_PEERPIDFD,
- &fd_peer_pidfd, &fd_peer_pidfd_len);
- if (ret < 0) {
- fprintf(stderr, "%m - Failed to retrieve peer pidfd for coredump socket connection\n");
- close(fd_coredump);
- close(fd_server);
- _exit(EXIT_FAILURE);
- }
-
- memset(&info, 0, sizeof(info));
- info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP;
- ret = ioctl(fd_peer_pidfd, PIDFD_GET_INFO, &info);
- if (ret < 0) {
- fprintf(stderr, "Failed to retrieve pidfd info from peer pidfd for coredump socket connection\n");
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- _exit(EXIT_FAILURE);
- }
-
- if (!(info.mask & PIDFD_INFO_COREDUMP)) {
- fprintf(stderr, "Missing coredump information from coredumping task\n");
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- _exit(EXIT_FAILURE);
- }
-
- if (info.coredump_mask & PIDFD_COREDUMPED) {
- fprintf(stderr, "Received unexpected connection from coredumping task\n");
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- _exit(EXIT_FAILURE);
- }
-
- close(fd_coredump);
- close(fd_server);
- close(fd_peer_pidfd);
- close(fd_core_file);
- _exit(EXIT_SUCCESS);
- }
- self->pid_coredump_server = pid_coredump_server;
-
- EXPECT_EQ(close(ipc_sockets[1]), 0);
- ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
- EXPECT_EQ(close(ipc_sockets[0]), 0);
-
- pid = fork();
- ASSERT_GE(pid, 0);
- if (pid == 0) {
- int fd_socket;
- ssize_t ret;
-
- fd_socket = socket(AF_UNIX, SOCK_STREAM, 0);
- if (fd_socket < 0)
- _exit(EXIT_FAILURE);
-
-
- ret = connect(fd_socket, (const struct sockaddr *)&coredump_sk, coredump_sk_len);
- if (ret < 0)
- _exit(EXIT_FAILURE);
-
- (void *)write(fd_socket, &(char){ 0 }, 1);
- close(fd_socket);
- _exit(EXIT_SUCCESS);
- }
-
- pidfd = sys_pidfd_open(pid, 0);
- ASSERT_GE(pidfd, 0);
-
- waitpid(pid, &status, 0);
- ASSERT_TRUE(WIFEXITED(status));
- ASSERT_EQ(WEXITSTATUS(status), 0);
-
- info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP;
- ASSERT_EQ(ioctl(pidfd, PIDFD_GET_INFO, &info), 0);
- ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0);
- ASSERT_EQ((info.coredump_mask & PIDFD_COREDUMPED), 0);
-
- waitpid(pid_coredump_server, &status, 0);
- self->pid_coredump_server = -ESRCH;
- ASSERT_TRUE(WIFEXITED(status));
- ASSERT_EQ(WEXITSTATUS(status), 0);
-
- ASSERT_NE(stat("/tmp/coredump.file", &st), 0);
- ASSERT_EQ(errno, ENOENT);
-}
-
-TEST_F(coredump, socket_enoent)
-{
- int pidfd, ret, status;
- FILE *file;
- pid_t pid;
- char core_file[PATH_MAX];
-
- file = fopen("/proc/sys/kernel/core_pattern", "w");
- ASSERT_NE(file, NULL);
-
- ret = fprintf(file, "@/tmp/coredump.socket");
- ASSERT_EQ(ret, strlen("@/tmp/coredump.socket"));
- ASSERT_EQ(fclose(file), 0);
-
- pid = fork();
- ASSERT_GE(pid, 0);
- if (pid == 0)
- crashing_child();
-
- pidfd = sys_pidfd_open(pid, 0);
- ASSERT_GE(pidfd, 0);
-
- waitpid(pid, &status, 0);
- ASSERT_TRUE(WIFSIGNALED(status));
- ASSERT_FALSE(WCOREDUMP(status));
-}
-
-TEST_F(coredump, socket_no_listener)
-{
- int pidfd, ret, status;
- FILE *file;
- pid_t pid, pid_coredump_server;
- int ipc_sockets[2];
- char c;
- const struct sockaddr_un coredump_sk = {
- .sun_family = AF_UNIX,
- .sun_path = "/tmp/coredump.socket",
- };
- size_t coredump_sk_len = offsetof(struct sockaddr_un, sun_path) +
- sizeof("/tmp/coredump.socket");
-
- ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
- ASSERT_EQ(ret, 0);
-
- file = fopen("/proc/sys/kernel/core_pattern", "w");
- ASSERT_NE(file, NULL);
-
- ret = fprintf(file, "@/tmp/coredump.socket");
- ASSERT_EQ(ret, strlen("@/tmp/coredump.socket"));
- ASSERT_EQ(fclose(file), 0);
-
- pid_coredump_server = fork();
- ASSERT_GE(pid_coredump_server, 0);
- if (pid_coredump_server == 0) {
- int fd_server;
- socklen_t fd_peer_pidfd_len;
-
- close(ipc_sockets[0]);
-
- fd_server = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
- if (fd_server < 0)
- _exit(EXIT_FAILURE);
-
- ret = bind(fd_server, (const struct sockaddr *)&coredump_sk, coredump_sk_len);
- if (ret < 0) {
- fprintf(stderr, "Failed to bind coredump socket\n");
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_FAILURE);
- }
-
- if (write_nointr(ipc_sockets[1], "1", 1) < 0) {
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_FAILURE);
- }
-
- close(fd_server);
- close(ipc_sockets[1]);
- _exit(EXIT_SUCCESS);
- }
- self->pid_coredump_server = pid_coredump_server;
-
- EXPECT_EQ(close(ipc_sockets[1]), 0);
- ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
- EXPECT_EQ(close(ipc_sockets[0]), 0);
-
- pid = fork();
- ASSERT_GE(pid, 0);
- if (pid == 0)
- crashing_child();
-
- pidfd = sys_pidfd_open(pid, 0);
- ASSERT_GE(pidfd, 0);
-
- waitpid(pid, &status, 0);
- ASSERT_TRUE(WIFSIGNALED(status));
- ASSERT_FALSE(WCOREDUMP(status));
-
- waitpid(pid_coredump_server, &status, 0);
- self->pid_coredump_server = -ESRCH;
- ASSERT_TRUE(WIFEXITED(status));
- ASSERT_EQ(WEXITSTATUS(status), 0);
-}
-
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index d5dc7e0dc726..6232a46ca6e1 100755
--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
@@ -67,7 +67,7 @@ hotpluggable_cpus()
done
}
-hotplaggable_offline_cpus()
+hotpluggable_offline_cpus()
{
hotpluggable_cpus 0
}
@@ -151,7 +151,7 @@ offline_cpu_expect_fail()
online_all_hot_pluggable_cpus()
{
- for cpu in `hotplaggable_offline_cpus`; do
+ for cpu in `hotpluggable_offline_cpus`; do
online_cpu_expect_success $cpu
done
}
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
index ff21524be458..2180c328a825 100644
--- a/tools/testing/selftests/damon/Makefile
+++ b/tools/testing/selftests/damon/Makefile
@@ -4,9 +4,12 @@
TEST_GEN_FILES += access_memory access_memory_even
TEST_FILES = _damon_sysfs.py
+TEST_FILES += drgn_dump_damon_status.py
+TEST_FILES += _common.sh
# functionality tests
TEST_PROGS += sysfs.sh
+TEST_PROGS += sysfs.py
TEST_PROGS += sysfs_update_schemes_tried_regions_wss_estimation.py
TEST_PROGS += damos_quota.py damos_quota_goal.py damos_apply_interval.py
TEST_PROGS += damos_tried_regions.py damon_nr_regions.py
@@ -15,6 +18,8 @@ TEST_PROGS += reclaim.sh lru_sort.sh
# regression tests (reproducers of previously found bugs)
TEST_PROGS += sysfs_update_removed_scheme_dir.sh
TEST_PROGS += sysfs_update_schemes_tried_regions_hang.py
+TEST_PROGS += sysfs_memcg_path_leak.sh
+TEST_PROGS += sysfs_no_op_commit_break.py
EXTRA_CLEAN = __pycache__
diff --git a/tools/testing/selftests/damon/_common.sh b/tools/testing/selftests/damon/_common.sh
new file mode 100644
index 000000000000..0279698f733e
--- /dev/null
+++ b/tools/testing/selftests/damon/_common.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+check_dependencies()
+{
+ if [ $EUID -ne 0 ]
+ then
+ echo "Run as root"
+ exit $ksft_skip
+ fi
+}
diff --git a/tools/testing/selftests/damon/_damon_sysfs.py b/tools/testing/selftests/damon/_damon_sysfs.py
index 5b1cb6b3ce4e..748778b563cd 100644
--- a/tools/testing/selftests/damon/_damon_sysfs.py
+++ b/tools/testing/selftests/damon/_damon_sysfs.py
@@ -52,9 +52,9 @@ class DamosAccessPattern:
if self.size is None:
self.size = [0, 2**64 - 1]
if self.nr_accesses is None:
- self.nr_accesses = [0, 2**64 - 1]
+ self.nr_accesses = [0, 2**32 - 1]
if self.age is None:
- self.age = [0, 2**64 - 1]
+ self.age = [0, 2**32 - 1]
def sysfs_dir(self):
return os.path.join(self.scheme.sysfs_dir(), 'access_pattern')
@@ -93,14 +93,16 @@ class DamosQuotaGoal:
metric = None
target_value = None
current_value = None
+ nid = None
effective_bytes = None
quota = None # owner quota
idx = None
- def __init__(self, metric, target_value=10000, current_value=0):
+ def __init__(self, metric, target_value=10000, current_value=0, nid=0):
self.metric = metric
self.target_value = target_value
self.current_value = current_value
+ self.nid = nid
def sysfs_dir(self):
return os.path.join(self.quota.sysfs_dir(), 'goals', '%d' % self.idx)
@@ -118,6 +120,10 @@ class DamosQuotaGoal:
self.current_value)
if err is not None:
return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'nid'), self.nid)
+ if err is not None:
+ return err
+
return None
class DamosQuota:
@@ -125,12 +131,20 @@ class DamosQuota:
ms = None # time quota
goals = None # quota goals
reset_interval_ms = None # quota reset interval
+ weight_sz_permil = None
+ weight_nr_accesses_permil = None
+ weight_age_permil = None
scheme = None # owner scheme
- def __init__(self, sz=0, ms=0, goals=None, reset_interval_ms=0):
+ def __init__(self, sz=0, ms=0, goals=None, reset_interval_ms=0,
+ weight_sz_permil=0, weight_nr_accesses_permil=0,
+ weight_age_permil=0):
self.sz = sz
self.ms = ms
self.reset_interval_ms = reset_interval_ms
+ self.weight_sz_permil = weight_sz_permil
+ self.weight_nr_accesses_permil = weight_nr_accesses_permil
+ self.weight_age_permil = weight_age_permil
self.goals = goals if goals is not None else []
for idx, goal in enumerate(self.goals):
goal.idx = idx
@@ -151,6 +165,20 @@ class DamosQuota:
if err is not None:
return err
+ err = write_file(os.path.join(
+ self.sysfs_dir(), 'weights', 'sz_permil'), self.weight_sz_permil)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(
+ self.sysfs_dir(), 'weights', 'nr_accesses_permil'),
+ self.weight_nr_accesses_permil)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(
+ self.sysfs_dir(), 'weights', 'age_permil'), self.weight_age_permil)
+ if err is not None:
+ return err
+
nr_goals_file = os.path.join(self.sysfs_dir(), 'goals', 'nr_goals')
content, err = read_file(nr_goals_file)
if err is not None:
@@ -165,6 +193,178 @@ class DamosQuota:
return err
return None
+class DamosWatermarks:
+ metric = None
+ interval = None
+ high = None
+ mid = None
+ low = None
+ scheme = None # owner scheme
+
+ def __init__(self, metric='none', interval=0, high=0, mid=0, low=0):
+ self.metric = metric
+ self.interval = interval
+ self.high = high
+ self.mid = mid
+ self.low = low
+
+ def sysfs_dir(self):
+ return os.path.join(self.scheme.sysfs_dir(), 'watermarks')
+
+ def stage(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'metric'), self.metric)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'interval_us'),
+ self.interval)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'high'), self.high)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'mid'), self.mid)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'low'), self.low)
+ if err is not None:
+ return err
+
+class DamosFilter:
+ type_ = None
+ matching = None
+ allow = None
+ memcg_path = None
+ addr_start = None
+ addr_end = None
+ target_idx = None
+ min_ = None
+ max_ = None
+ idx = None
+ filters = None # owner filters
+
+ def __init__(self, type_='anon', matching=False, allow=False,
+ memcg_path='', addr_start=0, addr_end=0, target_idx=0, min_=0,
+ max_=0):
+ self.type_ = type_
+ self.matching = matching
+ self.allow = allow
+ self.memcg_path = memcg_path,
+ self.addr_start = addr_start
+ self.addr_end = addr_end
+ self.target_idx = target_idx
+ self.min_ = min_
+ self.max_ = max_
+
+ def sysfs_dir(self):
+ return os.path.join(self.filters.sysfs_dir(), '%d' % self.idx)
+
+ def stage(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'type'), self.type_)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'matching'),
+ self.matching)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'allow'), self.allow)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'memcg_path'),
+ self.memcg_path)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'addr_start'),
+ self.addr_start)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'addr_end'),
+ self.addr_end)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'damon_target_idx'),
+ self.target_idx)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'min'), self.min_)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'max'), self.max_)
+ if err is not None:
+ return err
+ return None
+
+class DamosFilters:
+ name = None
+ filters = None
+ scheme = None # owner scheme
+
+ def __init__(self, name, filters=[]):
+ self.name = name
+ self.filters = filters
+ for idx, filter_ in enumerate(self.filters):
+ filter_.idx = idx
+ filter_.filters = self
+
+ def sysfs_dir(self):
+ return os.path.join(self.scheme.sysfs_dir(), self.name)
+
+ def stage(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'nr_filters'),
+ len(self.filters))
+ if err is not None:
+ return err
+ for filter_ in self.filters:
+ err = filter_.stage()
+ if err is not None:
+ return err
+ return None
+
+class DamosDest:
+ id = None
+ weight = None
+ idx = None
+ dests = None # owner dests
+
+ def __init__(self, id=0, weight=0):
+ self.id = id
+ self.weight = weight
+
+ def sysfs_dir(self):
+ return os.path.join(self.dests.sysfs_dir(), '%d' % self.idx)
+
+ def stage(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'id'), self.id)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'weight'), self.weight)
+ if err is not None:
+ return err
+ return None
+
+class DamosDests:
+ dests = None
+ scheme = None # owner scheme
+
+ def __init__(self, dests=[]):
+ self.dests = dests
+ for idx, dest in enumerate(self.dests):
+ dest.idx = idx
+ dest.dests = self
+
+ def sysfs_dir(self):
+ return os.path.join(self.scheme.sysfs_dir(), 'dests')
+
+ def stage(self):
+ err = write_file(os.path.join(self.sysfs_dir(), 'nr_dests'),
+ len(self.dests))
+ if err is not None:
+ return err
+ for dest in self.dests:
+ err = dest.stage()
+ if err is not None:
+ return err
+ return None
+
class DamosStats:
nr_tried = None
sz_tried = None
@@ -190,8 +390,13 @@ class Damos:
action = None
access_pattern = None
quota = None
+ watermarks = None
+ core_filters = None
+ ops_filters = None
+ filters = None
apply_interval_us = None
- # todo: Support watermarks, stats
+ target_nid = None
+ dests = None
idx = None
context = None
tried_bytes = None
@@ -199,12 +404,30 @@ class Damos:
tried_regions = None
def __init__(self, action='stat', access_pattern=DamosAccessPattern(),
- quota=DamosQuota(), apply_interval_us=0):
+ quota=DamosQuota(), watermarks=DamosWatermarks(),
+ core_filters=[], ops_filters=[], filters=[], target_nid=0,
+ dests=DamosDests(), apply_interval_us=0):
self.action = action
self.access_pattern = access_pattern
self.access_pattern.scheme = self
self.quota = quota
self.quota.scheme = self
+ self.watermarks = watermarks
+ self.watermarks.scheme = self
+
+ self.core_filters = DamosFilters(name='core_filters',
+ filters=core_filters)
+ self.core_filters.scheme = self
+ self.ops_filters = DamosFilters(name='ops_filters',
+ filters=ops_filters)
+ self.ops_filters.scheme = self
+ self.filters = DamosFilters(name='filters', filters=filters)
+ self.filters.scheme = self
+
+ self.target_nid = target_nid
+ self.dests = dests
+ self.dests.scheme = self
+
self.apply_interval_us = apply_interval_us
def sysfs_dir(self):
@@ -227,26 +450,39 @@ class Damos:
if err is not None:
return err
- # disable watermarks
- err = write_file(
- os.path.join(self.sysfs_dir(), 'watermarks', 'metric'), 'none')
+ err = self.watermarks.stage()
if err is not None:
return err
- # disable filters
- err = write_file(
- os.path.join(self.sysfs_dir(), 'filters', 'nr_filters'), '0')
+ err = self.core_filters.stage()
+ if err is not None:
+ return err
+ err = self.ops_filters.stage()
+ if err is not None:
+ return err
+ err = self.filters.stage()
+ if err is not None:
+ return err
+
+ err = write_file(os.path.join(self.sysfs_dir(), 'target_nid'), '%d' %
+ self.target_nid)
+ if err is not None:
+ return err
+
+ err = self.dests.stage()
if err is not None:
return err
class DamonTarget:
pid = None
+ obsolete = None
# todo: Support target regions if test is made
idx = None
context = None
- def __init__(self, pid):
+ def __init__(self, pid, obsolete=False):
self.pid = pid
+ self.obsolete = obsolete
def sysfs_dir(self):
return os.path.join(
@@ -257,21 +493,64 @@ class DamonTarget:
os.path.join(self.sysfs_dir(), 'regions', 'nr_regions'), '0')
if err is not None:
return err
- return write_file(
+ err = write_file(
os.path.join(self.sysfs_dir(), 'pid_target'), self.pid)
+ if err is not None:
+ return err
+ return write_file(
+ os.path.join(self.sysfs_dir(), 'obsolete_target'),
+ 'Y' if self.obsolete else 'N')
+
+class IntervalsGoal:
+ access_bp = None
+ aggrs = None
+ min_sample_us = None
+ max_sample_us = None
+ attrs = None # owner DamonAttrs
+
+ def __init__(self, access_bp=0, aggrs=0, min_sample_us=0, max_sample_us=0):
+ self.access_bp = access_bp
+ self.aggrs = aggrs
+ self.min_sample_us = min_sample_us
+ self.max_sample_us = max_sample_us
+
+ def sysfs_dir(self):
+ return os.path.join(self.attrs.interval_sysfs_dir(), 'intervals_goal')
+
+ def stage(self):
+ err = write_file(
+ os.path.join(self.sysfs_dir(), 'access_bp'), self.access_bp)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'aggrs'), self.aggrs)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'min_sample_us'),
+ self.min_sample_us)
+ if err is not None:
+ return err
+ err = write_file(os.path.join(self.sysfs_dir(), 'max_sample_us'),
+ self.max_sample_us)
+ if err is not None:
+ return err
+ return None
class DamonAttrs:
sample_us = None
aggr_us = None
+ intervals_goal = None
update_us = None
min_nr_regions = None
max_nr_regions = None
context = None
- def __init__(self, sample_us=5000, aggr_us=100000, update_us=1000000,
+ def __init__(self, sample_us=5000, aggr_us=100000,
+ intervals_goal=IntervalsGoal(), update_us=1000000,
min_nr_regions=10, max_nr_regions=1000):
self.sample_us = sample_us
self.aggr_us = aggr_us
+ self.intervals_goal = intervals_goal
+ self.intervals_goal.attrs = self
self.update_us = update_us
self.min_nr_regions = min_nr_regions
self.max_nr_regions = max_nr_regions
@@ -293,6 +572,9 @@ class DamonAttrs:
self.aggr_us)
if err is not None:
return err
+ err = self.intervals_goal.stage()
+ if err is not None:
+ return err
err = write_file(os.path.join(self.interval_sysfs_dir(), 'update_us'),
self.update_us)
if err is not None:
@@ -408,6 +690,9 @@ class Kdamond:
if err is not None:
return err
err = write_file(os.path.join(self.sysfs_dir(), 'state'), 'on')
+ if err is not None:
+ return err
+ self.pid, err = read_file(os.path.join(self.sysfs_dir(), 'pid'))
return err
def stop(self):
diff --git a/tools/testing/selftests/damon/access_memory_even.c b/tools/testing/selftests/damon/access_memory_even.c
index a9f4e9aaf3a9..93f3a71bcfd4 100644
--- a/tools/testing/selftests/damon/access_memory_even.c
+++ b/tools/testing/selftests/damon/access_memory_even.c
@@ -9,7 +9,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <time.h>
int main(int argc, char *argv[])
{
diff --git a/tools/testing/selftests/damon/drgn_dump_damon_status.py b/tools/testing/selftests/damon/drgn_dump_damon_status.py
new file mode 100755
index 000000000000..5374d18d1fa8
--- /dev/null
+++ b/tools/testing/selftests/damon/drgn_dump_damon_status.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env drgn
+# SPDX-License-Identifier: GPL-2.0
+
+'''
+Read DAMON context data and dump as a json string.
+'''
+import drgn
+from drgn import FaultError, NULL, Object, cast, container_of, execscript, offsetof, reinterpret, sizeof
+from drgn.helpers.common import *
+from drgn.helpers.linux import *
+
+import json
+import sys
+
+if "prog" not in globals():
+ try:
+ prog = drgn.get_default_prog()
+ except drgn.NoDefaultProgramError:
+ prog = drgn.program_from_kernel()
+ drgn.set_default_prog(prog)
+
+def to_dict(object, attr_name_converter):
+ d = {}
+ for attr_name, converter in attr_name_converter:
+ d[attr_name] = converter(getattr(object, attr_name))
+ return d
+
+def ops_to_dict(ops):
+ return to_dict(ops, [
+ ['id', int],
+ ])
+
+def intervals_goal_to_dict(goal):
+ return to_dict(goal, [
+ ['access_bp', int],
+ ['aggrs', int],
+ ['min_sample_us', int],
+ ['max_sample_us', int],
+ ])
+
+def attrs_to_dict(attrs):
+ return to_dict(attrs, [
+ ['sample_interval', int],
+ ['aggr_interval', int],
+ ['ops_update_interval', int],
+ ['intervals_goal', intervals_goal_to_dict],
+ ['min_nr_regions', int],
+ ['max_nr_regions', int],
+ ])
+
+def addr_range_to_dict(addr_range):
+ return to_dict(addr_range, [
+ ['start', int],
+ ['end', int],
+ ])
+
+def region_to_dict(region):
+ return to_dict(region, [
+ ['ar', addr_range_to_dict],
+ ['sampling_addr', int],
+ ['nr_accesses', int],
+ ['nr_accesses_bp', int],
+ ['age', int],
+ ])
+
+def regions_to_list(regions):
+ return [region_to_dict(r)
+ for r in list_for_each_entry(
+ 'struct damon_region', regions.address_of_(), 'list')]
+
+def target_to_dict(target):
+ return to_dict(target, [
+ ['pid', int],
+ ['nr_regions', int],
+ ['regions_list', regions_to_list],
+ ['obsolete', bool],
+ ])
+
+def targets_to_list(targets):
+ return [target_to_dict(t)
+ for t in list_for_each_entry(
+ 'struct damon_target', targets.address_of_(), 'list')]
+
+def damos_access_pattern_to_dict(pattern):
+ return to_dict(pattern, [
+ ['min_sz_region', int],
+ ['max_sz_region', int],
+ ['min_nr_accesses', int],
+ ['max_nr_accesses', int],
+ ['min_age_region', int],
+ ['max_age_region', int],
+ ])
+
+def damos_quota_goal_to_dict(goal):
+ return to_dict(goal, [
+ ['metric', int],
+ ['target_value', int],
+ ['current_value', int],
+ ['last_psi_total', int],
+ ['nid', int],
+ ])
+
+def damos_quota_goals_to_list(goals):
+ return [damos_quota_goal_to_dict(g)
+ for g in list_for_each_entry(
+ 'struct damos_quota_goal', goals.address_of_(), 'list')]
+
+def damos_quota_to_dict(quota):
+ return to_dict(quota, [
+ ['reset_interval', int],
+ ['ms', int], ['sz', int],
+ ['goals', damos_quota_goals_to_list],
+ ['esz', int],
+ ['weight_sz', int],
+ ['weight_nr_accesses', int],
+ ['weight_age', int],
+ ])
+
+def damos_watermarks_to_dict(watermarks):
+ return to_dict(watermarks, [
+ ['metric', int],
+ ['interval', int],
+ ['high', int], ['mid', int], ['low', int],
+ ])
+
+def damos_migrate_dests_to_dict(dests):
+ nr_dests = int(dests.nr_dests)
+ node_id_arr = []
+ weight_arr = []
+ for i in range(nr_dests):
+ node_id_arr.append(int(dests.node_id_arr[i]))
+ weight_arr.append(int(dests.weight_arr[i]))
+ return {
+ 'node_id_arr': node_id_arr,
+ 'weight_arr': weight_arr,
+ 'nr_dests': nr_dests,
+ }
+
+def damos_filter_to_dict(damos_filter):
+ filter_type_keyword = {
+ 0: 'anon',
+ 1: 'active',
+ 2: 'memcg',
+ 3: 'young',
+ 4: 'hugepage_size',
+ 5: 'unmapped',
+ 6: 'addr',
+ 7: 'target'
+ }
+ dict_ = {
+ 'type': filter_type_keyword[int(damos_filter.type)],
+ 'matching': bool(damos_filter.matching),
+ 'allow': bool(damos_filter.allow),
+ }
+ type_ = dict_['type']
+ if type_ == 'memcg':
+ dict_['memcg_id'] = int(damos_filter.memcg_id)
+ elif type_ == 'addr':
+ dict_['addr_range'] = [int(damos_filter.addr_range.start),
+ int(damos_filter.addr_range.end)]
+ elif type_ == 'target':
+ dict_['target_idx'] = int(damos_filter.target_idx)
+ elif type_ == 'hugeapge_size':
+ dict_['sz_range'] = [int(damos_filter.sz_range.min),
+ int(damos_filter.sz_range.max)]
+ return dict_
+
+def scheme_to_dict(scheme):
+ dict_ = to_dict(scheme, [
+ ['pattern', damos_access_pattern_to_dict],
+ ['action', int],
+ ['apply_interval_us', int],
+ ['quota', damos_quota_to_dict],
+ ['wmarks', damos_watermarks_to_dict],
+ ['target_nid', int],
+ ['migrate_dests', damos_migrate_dests_to_dict],
+ ])
+ core_filters = []
+ for f in list_for_each_entry(
+ 'struct damos_filter', scheme.core_filters.address_of_(), 'list'):
+ core_filters.append(damos_filter_to_dict(f))
+ dict_['core_filters'] = core_filters
+ ops_filters = []
+ for f in list_for_each_entry(
+ 'struct damos_filter', scheme.ops_filters.address_of_(), 'list'):
+ ops_filters.append(damos_filter_to_dict(f))
+ dict_['ops_filters'] = ops_filters
+
+ return dict_
+
+def schemes_to_list(schemes):
+ return [scheme_to_dict(s)
+ for s in list_for_each_entry(
+ 'struct damos', schemes.address_of_(), 'list')]
+
+def damon_ctx_to_dict(ctx):
+ return to_dict(ctx, [
+ ['ops', ops_to_dict],
+ ['attrs', attrs_to_dict],
+ ['adaptive_targets', targets_to_list],
+ ['schemes', schemes_to_list],
+ ])
+
+def main():
+ if len(sys.argv) < 3:
+ print('Usage: %s <kdamond pid> <file>' % sys.argv[0])
+ exit(1)
+
+ pid = int(sys.argv[1])
+ file_to_store = sys.argv[2]
+
+ kthread_data = cast('struct kthread *',
+ find_task(prog, pid).worker_private).data
+ ctx = cast('struct damon_ctx *', kthread_data)
+ status = {'contexts': [damon_ctx_to_dict(ctx)]}
+ if file_to_store == 'stdout':
+ print(json.dumps(status, indent=4))
+ else:
+ with open(file_to_store, 'w') as f:
+ json.dump(status, f, indent=4)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/testing/selftests/damon/lru_sort.sh b/tools/testing/selftests/damon/lru_sort.sh
index 61b80197c896..1e4849db78a9 100755
--- a/tools/testing/selftests/damon/lru_sort.sh
+++ b/tools/testing/selftests/damon/lru_sort.sh
@@ -1,14 +1,12 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+source _common.sh
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
-if [ $EUID -ne 0 ]
-then
- echo "Run as root"
- exit $ksft_skip
-fi
+check_dependencies
damon_lru_sort_enabled="/sys/module/damon_lru_sort/parameters/enabled"
if [ ! -f "$damon_lru_sort_enabled" ]
diff --git a/tools/testing/selftests/damon/reclaim.sh b/tools/testing/selftests/damon/reclaim.sh
index 78dbc2334cbe..e56ceb035129 100755
--- a/tools/testing/selftests/damon/reclaim.sh
+++ b/tools/testing/selftests/damon/reclaim.sh
@@ -1,14 +1,12 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+source _common.sh
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
-if [ $EUID -ne 0 ]
-then
- echo "Run as root"
- exit $ksft_skip
-fi
+check_dependencies
damon_reclaim_enabled="/sys/module/damon_reclaim/parameters/enabled"
if [ ! -f "$damon_reclaim_enabled" ]
diff --git a/tools/testing/selftests/damon/sysfs.py b/tools/testing/selftests/damon/sysfs.py
new file mode 100755
index 000000000000..9cca71eb0325
--- /dev/null
+++ b/tools/testing/selftests/damon/sysfs.py
@@ -0,0 +1,303 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import json
+import os
+import subprocess
+
+import _damon_sysfs
+
+def dump_damon_status_dict(pid):
+ try:
+ subprocess.check_output(['which', 'drgn'], stderr=subprocess.DEVNULL)
+ except:
+ return None, 'drgn not found'
+ file_dir = os.path.dirname(os.path.abspath(__file__))
+ dump_script = os.path.join(file_dir, 'drgn_dump_damon_status.py')
+ rc = subprocess.call(['drgn', dump_script, pid, 'damon_dump_output'],
+ stderr=subprocess.DEVNULL)
+ if rc != 0:
+ return None, 'drgn fail'
+ try:
+ with open('damon_dump_output', 'r') as f:
+ return json.load(f), None
+ except Exception as e:
+ return None, 'json.load fail (%s)' % e
+
+def fail(expectation, status):
+ print('unexpected %s' % expectation)
+ print(json.dumps(status, indent=4))
+ exit(1)
+
+def assert_true(condition, expectation, status):
+ if condition is not True:
+ fail(expectation, status)
+
+def assert_watermarks_committed(watermarks, dump):
+ wmark_metric_val = {
+ 'none': 0,
+ 'free_mem_rate': 1,
+ }
+ assert_true(dump['metric'] == wmark_metric_val[watermarks.metric],
+ 'metric', dump)
+ assert_true(dump['interval'] == watermarks.interval, 'interval', dump)
+ assert_true(dump['high'] == watermarks.high, 'high', dump)
+ assert_true(dump['mid'] == watermarks.mid, 'mid', dump)
+ assert_true(dump['low'] == watermarks.low, 'low', dump)
+
+def assert_quota_goal_committed(qgoal, dump):
+ metric_val = {
+ 'user_input': 0,
+ 'some_mem_psi_us': 1,
+ 'node_mem_used_bp': 2,
+ 'node_mem_free_bp': 3,
+ }
+ assert_true(dump['metric'] == metric_val[qgoal.metric], 'metric', dump)
+ assert_true(dump['target_value'] == qgoal.target_value, 'target_value',
+ dump)
+ if qgoal.metric == 'user_input':
+ assert_true(dump['current_value'] == qgoal.current_value,
+ 'current_value', dump)
+ assert_true(dump['nid'] == qgoal.nid, 'nid', dump)
+
+def assert_quota_committed(quota, dump):
+ assert_true(dump['reset_interval'] == quota.reset_interval_ms,
+ 'reset_interval', dump)
+ assert_true(dump['ms'] == quota.ms, 'ms', dump)
+ assert_true(dump['sz'] == quota.sz, 'sz', dump)
+ for idx, qgoal in enumerate(quota.goals):
+ assert_quota_goal_committed(qgoal, dump['goals'][idx])
+ assert_true(dump['weight_sz'] == quota.weight_sz_permil, 'weight_sz', dump)
+ assert_true(dump['weight_nr_accesses'] == quota.weight_nr_accesses_permil,
+ 'weight_nr_accesses', dump)
+ assert_true(
+ dump['weight_age'] == quota.weight_age_permil, 'weight_age', dump)
+
+
+def assert_migrate_dests_committed(dests, dump):
+ assert_true(dump['nr_dests'] == len(dests.dests), 'nr_dests', dump)
+ for idx, dest in enumerate(dests.dests):
+ assert_true(dump['node_id_arr'][idx] == dest.id, 'node_id', dump)
+ assert_true(dump['weight_arr'][idx] == dest.weight, 'weight', dump)
+
+def assert_filter_committed(filter_, dump):
+ assert_true(filter_.type_ == dump['type'], 'type', dump)
+ assert_true(filter_.matching == dump['matching'], 'matching', dump)
+ assert_true(filter_.allow == dump['allow'], 'allow', dump)
+ # TODO: check memcg_path and memcg_id if type is memcg
+ if filter_.type_ == 'addr':
+ assert_true([filter_.addr_start, filter_.addr_end] ==
+ dump['addr_range'], 'addr_range', dump)
+ elif filter_.type_ == 'target':
+ assert_true(filter_.target_idx == dump['target_idx'], 'target_idx',
+ dump)
+ elif filter_.type_ == 'hugepage_size':
+ assert_true([filter_.min_, filter_.max_] == dump['sz_range'],
+ 'sz_range', dump)
+
+def assert_access_pattern_committed(pattern, dump):
+ assert_true(dump['min_sz_region'] == pattern.size[0], 'min_sz_region',
+ dump)
+ assert_true(dump['max_sz_region'] == pattern.size[1], 'max_sz_region',
+ dump)
+ assert_true(dump['min_nr_accesses'] == pattern.nr_accesses[0],
+ 'min_nr_accesses', dump)
+ assert_true(dump['max_nr_accesses'] == pattern.nr_accesses[1],
+ 'max_nr_accesses', dump)
+ assert_true(dump['min_age_region'] == pattern.age[0], 'min_age_region',
+ dump)
+ assert_true(dump['max_age_region'] == pattern.age[1], 'miaxage_region',
+ dump)
+
+def assert_scheme_committed(scheme, dump):
+ assert_access_pattern_committed(scheme.access_pattern, dump['pattern'])
+ action_val = {
+ 'willneed': 0,
+ 'cold': 1,
+ 'pageout': 2,
+ 'hugepage': 3,
+ 'nohugeapge': 4,
+ 'lru_prio': 5,
+ 'lru_deprio': 6,
+ 'migrate_hot': 7,
+ 'migrate_cold': 8,
+ 'stat': 9,
+ }
+ assert_true(dump['action'] == action_val[scheme.action], 'action', dump)
+ assert_true(dump['apply_interval_us'] == scheme. apply_interval_us,
+ 'apply_interval_us', dump)
+ assert_true(dump['target_nid'] == scheme.target_nid, 'target_nid', dump)
+ assert_migrate_dests_committed(scheme.dests, dump['migrate_dests'])
+ assert_quota_committed(scheme.quota, dump['quota'])
+ assert_watermarks_committed(scheme.watermarks, dump['wmarks'])
+ # TODO: test filters directory
+ for idx, f in enumerate(scheme.core_filters.filters):
+ assert_filter_committed(f, dump['core_filters'][idx])
+ for idx, f in enumerate(scheme.ops_filters.filters):
+ assert_filter_committed(f, dump['ops_filters'][idx])
+
+def assert_schemes_committed(schemes, dump):
+ assert_true(len(schemes) == len(dump), 'len_schemes', dump)
+ for idx, scheme in enumerate(schemes):
+ assert_scheme_committed(scheme, dump[idx])
+
+def assert_monitoring_attrs_committed(attrs, dump):
+ assert_true(dump['sample_interval'] == attrs.sample_us, 'sample_interval',
+ dump)
+ assert_true(dump['aggr_interval'] == attrs.aggr_us, 'aggr_interval', dump)
+ assert_true(dump['intervals_goal']['access_bp'] ==
+ attrs.intervals_goal.access_bp, 'access_bp',
+ dump['intervals_goal'])
+ assert_true(dump['intervals_goal']['aggrs'] == attrs.intervals_goal.aggrs,
+ 'aggrs', dump['intervals_goal'])
+ assert_true(dump['intervals_goal']['min_sample_us'] ==
+ attrs.intervals_goal.min_sample_us, 'min_sample_us',
+ dump['intervals_goal'])
+ assert_true(dump['intervals_goal']['max_sample_us'] ==
+ attrs.intervals_goal.max_sample_us, 'max_sample_us',
+ dump['intervals_goal'])
+
+ assert_true(dump['ops_update_interval'] == attrs.update_us,
+ 'ops_update_interval', dump)
+ assert_true(dump['min_nr_regions'] == attrs.min_nr_regions,
+ 'min_nr_regions', dump)
+ assert_true(dump['max_nr_regions'] == attrs.max_nr_regions,
+ 'max_nr_regions', dump)
+
+def assert_monitoring_target_committed(target, dump):
+ # target.pid is the pid "number", while dump['pid'] is 'struct pid'
+ # pointer, and hence cannot be compared.
+ assert_true(dump['obsolete'] == target.obsolete, 'target obsolete', dump)
+
+def assert_monitoring_targets_committed(targets, dump):
+ assert_true(len(targets) == len(dump), 'len_targets', dump)
+ for idx, target in enumerate(targets):
+ assert_monitoring_target_committed(target, dump[idx])
+
+def assert_ctx_committed(ctx, dump):
+ ops_val = {
+ 'vaddr': 0,
+ 'fvaddr': 1,
+ 'paddr': 2,
+ }
+ assert_true(dump['ops']['id'] == ops_val[ctx.ops], 'ops_id', dump)
+ assert_monitoring_attrs_committed(ctx.monitoring_attrs, dump['attrs'])
+ assert_monitoring_targets_committed(ctx.targets, dump['adaptive_targets'])
+ assert_schemes_committed(ctx.schemes, dump['schemes'])
+
+def assert_ctxs_committed(kdamonds):
+ status, err = dump_damon_status_dict(kdamonds.kdamonds[0].pid)
+ if err is not None:
+ print(err)
+ kdamonds.stop()
+ exit(1)
+
+ ctxs = kdamonds.kdamonds[0].contexts
+ dump = status['contexts']
+ assert_true(len(ctxs) == len(dump), 'ctxs length', dump)
+ for idx, ctx in enumerate(ctxs):
+ assert_ctx_committed(ctx, dump[idx])
+
+def main():
+ kdamonds = _damon_sysfs.Kdamonds(
+ [_damon_sysfs.Kdamond(
+ contexts=[_damon_sysfs.DamonCtx(
+ targets=[_damon_sysfs.DamonTarget(pid=-1)],
+ schemes=[_damon_sysfs.Damos()],
+ )])])
+ err = kdamonds.start()
+ if err is not None:
+ print('kdamond start failed: %s' % err)
+ exit(1)
+
+ assert_ctxs_committed(kdamonds)
+
+ context = _damon_sysfs.DamonCtx(
+ monitoring_attrs=_damon_sysfs.DamonAttrs(
+ sample_us=100000, aggr_us=2000000,
+ intervals_goal=_damon_sysfs.IntervalsGoal(
+ access_bp=400, aggrs=3, min_sample_us=5000,
+ max_sample_us=10000000),
+ update_us=2000000),
+ schemes=[_damon_sysfs.Damos(
+ action='pageout',
+ access_pattern=_damon_sysfs.DamosAccessPattern(
+ size=[4096, 2**10],
+ nr_accesses=[3, 317],
+ age=[5,71]),
+ quota=_damon_sysfs.DamosQuota(
+ sz=100*1024*1024, ms=100,
+ goals=[_damon_sysfs.DamosQuotaGoal(
+ metric='node_mem_used_bp',
+ target_value=9950,
+ nid=1)],
+ reset_interval_ms=1500,
+ weight_sz_permil=20,
+ weight_nr_accesses_permil=200,
+ weight_age_permil=1000),
+ watermarks=_damon_sysfs.DamosWatermarks(
+ metric = 'free_mem_rate', interval = 500000, # 500 ms
+ high = 500, mid = 400, low = 50),
+ target_nid=1,
+ apply_interval_us=1000000,
+ dests=_damon_sysfs.DamosDests(
+ dests=[_damon_sysfs.DamosDest(id=1, weight=30),
+ _damon_sysfs.DamosDest(id=0, weight=70)]),
+ core_filters=[
+ _damon_sysfs.DamosFilter(type_='addr', matching=True,
+ allow=False, addr_start=42,
+ addr_end=4242),
+ ],
+ ops_filters=[
+ _damon_sysfs.DamosFilter(type_='anon', matching=True,
+ allow=True),
+ ],
+ )])
+ context.idx = 0
+ context.kdamond = kdamonds.kdamonds[0]
+ kdamonds.kdamonds[0].contexts = [context]
+ kdamonds.kdamonds[0].commit()
+
+ assert_ctxs_committed(kdamonds)
+
+ # test online commitment of minimum context.
+ context = _damon_sysfs.DamonCtx()
+ context.idx = 0
+ context.kdamond = kdamonds.kdamonds[0]
+ kdamonds.kdamonds[0].contexts = [context]
+ kdamonds.kdamonds[0].commit()
+
+ assert_ctxs_committed(kdamonds)
+
+ kdamonds.stop()
+
+ # test obsolete_target.
+ proc1 = subprocess.Popen(['sh'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc2 = subprocess.Popen(['sh'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc3 = subprocess.Popen(['sh'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ kdamonds = _damon_sysfs.Kdamonds(
+ [_damon_sysfs.Kdamond(
+ contexts=[_damon_sysfs.DamonCtx(
+ ops='vaddr',
+ targets=[
+ _damon_sysfs.DamonTarget(pid=proc1.pid),
+ _damon_sysfs.DamonTarget(pid=proc2.pid),
+ _damon_sysfs.DamonTarget(pid=proc3.pid),
+ ],
+ schemes=[_damon_sysfs.Damos()],
+ )])])
+ err = kdamonds.start()
+ if err is not None:
+ print('kdamond start failed: %s' % err)
+ exit(1)
+ kdamonds.kdamonds[0].contexts[0].targets[1].obsolete = True
+ kdamonds.kdamonds[0].commit()
+ del kdamonds.kdamonds[0].contexts[0].targets[1]
+ assert_ctxs_committed(kdamonds)
+ kdamonds.stop()
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/testing/selftests/damon/sysfs.sh b/tools/testing/selftests/damon/sysfs.sh
index e9a976d296e2..83e3b7f63d81 100755
--- a/tools/testing/selftests/damon/sysfs.sh
+++ b/tools/testing/selftests/damon/sysfs.sh
@@ -1,6 +1,8 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+source _common.sh
+
# Kselftest frmework requirement - SKIP code is 4.
ksft_skip=4
@@ -364,14 +366,5 @@ test_damon_sysfs()
test_kdamonds "$damon_sysfs/kdamonds"
}
-check_dependencies()
-{
- if [ $EUID -ne 0 ]
- then
- echo "Run as root"
- exit $ksft_skip
- fi
-}
-
check_dependencies
test_damon_sysfs "/sys/kernel/mm/damon/admin"
diff --git a/tools/testing/selftests/damon/sysfs_memcg_path_leak.sh b/tools/testing/selftests/damon/sysfs_memcg_path_leak.sh
new file mode 100755
index 000000000000..64c5d8c518a4
--- /dev/null
+++ b/tools/testing/selftests/damon/sysfs_memcg_path_leak.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+if [ $EUID -ne 0 ]
+then
+ echo "Run as root"
+ exit $ksft_skip
+fi
+
+damon_sysfs="/sys/kernel/mm/damon/admin"
+if [ ! -d "$damon_sysfs" ]
+then
+ echo "damon sysfs not found"
+ exit $ksft_skip
+fi
+
+# ensure filter directory
+echo 1 > "$damon_sysfs/kdamonds/nr_kdamonds"
+echo 1 > "$damon_sysfs/kdamonds/0/contexts/nr_contexts"
+echo 1 > "$damon_sysfs/kdamonds/0/contexts/0/schemes/nr_schemes"
+echo 1 > "$damon_sysfs/kdamonds/0/contexts/0/schemes/0/filters/nr_filters"
+
+filter_dir="$damon_sysfs/kdamonds/0/contexts/0/schemes/0/filters/0"
+
+before_kb=$(grep Slab /proc/meminfo | awk '{print $2}')
+
+# try to leak 3000 KiB
+for i in {1..102400};
+do
+ echo "012345678901234567890123456789" > "$filter_dir/memcg_path"
+done
+
+after_kb=$(grep Slab /proc/meminfo | awk '{print $2}')
+# expect up to 1500 KiB free from other tasks memory
+expected_after_kb_max=$((before_kb + 1500))
+
+if [ "$after_kb" -gt "$expected_after_kb_max" ]
+then
+ echo "maybe memcg_path are leaking: $before_kb -> $after_kb"
+ exit 1
+else
+ exit 0
+fi
diff --git a/tools/testing/selftests/damon/sysfs_no_op_commit_break.py b/tools/testing/selftests/damon/sysfs_no_op_commit_break.py
new file mode 100755
index 000000000000..2c65cffe6b54
--- /dev/null
+++ b/tools/testing/selftests/damon/sysfs_no_op_commit_break.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import json
+import os
+import subprocess
+import sys
+
+import _damon_sysfs
+
+def dump_damon_status_dict(pid):
+ try:
+ subprocess.check_output(['which', 'drgn'], stderr=subprocess.DEVNULL)
+ except:
+ return None, 'drgn not found'
+ file_dir = os.path.dirname(os.path.abspath(__file__))
+ dump_script = os.path.join(file_dir, 'drgn_dump_damon_status.py')
+ rc = subprocess.call(['drgn', dump_script, pid, 'damon_dump_output'],
+ stderr=subprocess.DEVNULL)
+
+ if rc != 0:
+ return None, f'drgn fail: return code({rc})'
+ try:
+ with open('damon_dump_output', 'r') as f:
+ return json.load(f), None
+ except Exception as e:
+ return None, 'json.load fail (%s)' % e
+
+def main():
+ kdamonds = _damon_sysfs.Kdamonds(
+ [_damon_sysfs.Kdamond(
+ contexts=[_damon_sysfs.DamonCtx(
+ schemes=[_damon_sysfs.Damos(
+ ops_filters=[
+ _damon_sysfs.DamosFilter(
+ type_='anon',
+ matching=True,
+ allow=True,
+ )
+ ]
+ )],
+ )])]
+ )
+
+ err = kdamonds.start()
+ if err is not None:
+ print('kdamond start failed: %s' % err)
+ exit(1)
+
+ before_commit_status, err = \
+ dump_damon_status_dict(kdamonds.kdamonds[0].pid)
+ if err is not None:
+ print('before-commit status dump failed: %s' % err)
+ exit(1)
+
+ kdamonds.kdamonds[0].commit()
+
+ after_commit_status, err = \
+ dump_damon_status_dict(kdamonds.kdamonds[0].pid)
+ if err is not None:
+ print('after-commit status dump failed: %s' % err)
+ exit(1)
+
+ if before_commit_status != after_commit_status:
+ print(f'before: {json.dumps(before_commit_status, indent=2)}')
+ print(f'after: {json.dumps(after_commit_status, indent=2)}')
+ exit(1)
+
+ kdamonds.stop()
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh b/tools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh
index ade35576e748..35fc32beeaf7 100755
--- a/tools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh
+++ b/tools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh
@@ -1,14 +1,12 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+source _common.sh
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
-if [ $EUID -ne 0 ]
-then
- echo "Run as root"
- exit $ksft_skip
-fi
+check_dependencies
damon_sysfs="/sys/kernel/mm/damon/admin"
if [ ! -d "$damon_sysfs" ]
diff --git a/tools/testing/selftests/dma/Makefile b/tools/testing/selftests/dma/Makefile
deleted file mode 100644
index cd8c5ece1cba..000000000000
--- a/tools/testing/selftests/dma/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -I../../../../usr/include/
-CFLAGS += -I../../../../include/
-
-TEST_GEN_PROGS := dma_map_benchmark
-
-include ../lib.mk
diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
index 5d0a809dc2df..fc9694fc4e89 100644
--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
@@ -15,7 +15,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <drm/drm.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define DEVPATH "/dev/dma_heap"
diff --git a/tools/testing/selftests/drivers/dma-buf/udmabuf.c b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
index 6062723a172e..d78aec662586 100644
--- a/tools/testing/selftests/drivers/dma-buf/udmabuf.c
+++ b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
@@ -16,7 +16,7 @@
#include <sys/mman.h>
#include <linux/memfd.h>
#include <linux/udmabuf.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#define TEST_PREFIX "drivers/dma-buf/udmabuf"
#define NUM_PAGES 4
@@ -138,7 +138,7 @@ int main(int argc, char *argv[])
void *addr1, *addr2;
ksft_print_header();
- ksft_set_plan(6);
+ ksft_set_plan(7);
devfd = open("/dev/udmabuf", O_RDWR);
if (devfd < 0) {
@@ -250,6 +250,24 @@ int main(int argc, char *argv[])
close(buf);
close(memfd);
+
+ /* same test as above but we pin first before writing to memfd */
+ page_size = getpagesize() * 512; /* 2 MB */
+ size = MEMFD_SIZE * page_size;
+ memfd = create_memfd_with_seals(size, true);
+ buf = create_udmabuf_list(devfd, memfd, size);
+ addr2 = mmap_fd(buf, NUM_PAGES * NUM_ENTRIES * getpagesize());
+ addr1 = mmap_fd(memfd, size);
+ write_to_memfd(addr1, size, 'a');
+ write_to_memfd(addr1, size, 'b');
+ ret = compare_chunks(addr1, addr2, size);
+ if (ret < 0)
+ ksft_test_result_fail("%s: [FAIL,test-7]\n", TEST_PREFIX);
+ else
+ ksft_test_result_pass("%s: [PASS,test-7]\n", TEST_PREFIX);
+
+ close(buf);
+ close(memfd);
close(devfd);
ksft_print_msg("%s: ok\n", TEST_PREFIX);
diff --git a/tools/testing/selftests/drivers/net/.gitignore b/tools/testing/selftests/drivers/net/.gitignore
index d634d8395d90..3633c7a3ed65 100644
--- a/tools/testing/selftests/drivers/net/.gitignore
+++ b/tools/testing/selftests/drivers/net/.gitignore
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+gro
napi_id_helper
+psp_responder
diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile
index be780bcb73a3..f5c71d993750 100644
--- a/tools/testing/selftests/drivers/net/Makefile
+++ b/tools/testing/selftests/drivers/net/Makefile
@@ -6,20 +6,38 @@ TEST_INCLUDES := $(wildcard lib/py/*.py) \
../../net/lib.sh \
TEST_GEN_FILES := \
+ gro \
napi_id_helper \
# end of TEST_GEN_FILES
TEST_PROGS := \
+ gro.py \
+ hds.py \
napi_id.py \
+ napi_threaded.py \
netcons_basic.sh \
+ netcons_cmdline.sh \
netcons_fragmented_msg.sh \
netcons_overflow.sh \
netcons_sysdata.sh \
+ netcons_torture.sh \
+ netpoll_basic.py \
ping.py \
+ psp.py \
queues.py \
- stats.py \
+ ring_reconfig.py \
shaper.py \
- hds.py \
+ stats.py \
+ xdp.py \
# end of TEST_PROGS
+# YNL files, must be before "include ..lib.mk"
+YNL_GEN_FILES := psp_responder
+TEST_GEN_FILES += $(YNL_GEN_FILES)
+
include ../../lib.mk
+
+# YNL build
+YNL_GENS := psp
+
+include ../../net/ynl.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
index 2b10854e4b1e..6c5c60adb5e8 100644
--- a/tools/testing/selftests/drivers/net/bonding/Makefile
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -4,21 +4,29 @@
TEST_PROGS := \
bond-arp-interval-causes-panic.sh \
bond-break-lacpdu-tx.sh \
+ bond-eth-type-change.sh \
bond-lladdr-target.sh \
+ bond_ipsec_offload.sh \
+ bond_lacp_prio.sh \
+ bond_macvlan_ipvlan.sh \
+ bond_options.sh \
+ bond_passive_lacp.sh \
dev_addr_lists.sh \
mode-1-recovery-updelay.sh \
mode-2-recovery-updelay.sh \
- bond_options.sh \
- bond-eth-type-change.sh \
- bond_macvlan_ipvlan.sh
+ netcons_over_bonding.sh \
+# end of TEST_PROGS
TEST_FILES := \
- lag_lib.sh \
bond_topo_2d1c.sh \
- bond_topo_3d1c.sh
+ bond_topo_3d1c.sh \
+ lag_lib.sh \
+# end of TEST_FILES
TEST_INCLUDES := \
+ ../../../net/lib.sh \
+ ../lib/sh/lib_netcons.sh \
../../../net/forwarding/lib.sh \
- ../../../net/lib.sh
+# end of TEST_INCLUDES
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_ipsec_offload.sh b/tools/testing/selftests/drivers/net/bonding/bond_ipsec_offload.sh
new file mode 100755
index 000000000000..f09e100232c7
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/bond_ipsec_offload.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# IPsec over bonding offload test:
+#
+# +----------------+
+# | bond0 |
+# | | |
+# | eth0 eth1 |
+# +---+-------+----+
+#
+# We use netdevsim instead of physical interfaces
+#-------------------------------------------------------------------
+# Example commands
+# ip x s add proto esp src 192.0.2.1 dst 192.0.2.2 \
+# spi 0x07 mode transport reqid 0x07 replay-window 32 \
+# aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \
+# sel src 192.0.2.1/24 dst 192.0.2.2/24
+# offload dev bond0 dir out
+# ip x p add dir out src 192.0.2.1/24 dst 192.0.2.2/24 \
+# tmpl proto esp src 192.0.2.1 dst 192.0.2.2 \
+# spi 0x07 mode transport reqid 0x07
+#
+#-------------------------------------------------------------------
+
+lib_dir=$(dirname "$0")
+# shellcheck disable=SC1091
+source "$lib_dir"/../../../net/lib.sh
+srcip=192.0.2.1
+dstip=192.0.2.2
+ipsec0=/sys/kernel/debug/netdevsim/netdevsim0/ports/0/ipsec
+ipsec1=/sys/kernel/debug/netdevsim/netdevsim0/ports/1/ipsec
+active_slave=""
+
+# shellcheck disable=SC2317
+active_slave_changed()
+{
+ local old_active_slave=$1
+ local new_active_slave
+
+ # shellcheck disable=SC2154
+ new_active_slave=$(ip -n "${ns}" -d -j link show bond0 | \
+ jq -r ".[].linkinfo.info_data.active_slave")
+ [ "$new_active_slave" != "$old_active_slave" ] && [ "$new_active_slave" != "null" ]
+}
+
+test_offload()
+{
+ # use ping to exercise the Tx path
+ ip netns exec "$ns" ping -I bond0 -c 3 -W 1 -i 0 "$dstip" >/dev/null
+
+ active_slave=$(ip -n "${ns}" -d -j link show bond0 | \
+ jq -r ".[].linkinfo.info_data.active_slave")
+
+ if [ "$active_slave" = "$nic0" ]; then
+ sysfs=$ipsec0
+ elif [ "$active_slave" = "$nic1" ]; then
+ sysfs=$ipsec1
+ else
+ check_err 1 "bond_ipsec_offload invalid active_slave $active_slave"
+ fi
+
+ # The tx/rx order in sysfs may changed after failover
+ grep -q "SA count=2 tx=3" "$sysfs" && grep -q "tx ipaddr=$dstip" "$sysfs"
+ check_err $? "incorrect tx count with link ${active_slave}"
+
+ log_test bond_ipsec_offload "active_slave ${active_slave}"
+}
+
+setup_env()
+{
+ if ! mount | grep -q debugfs; then
+ mount -t debugfs none /sys/kernel/debug/ &> /dev/null
+ defer umount /sys/kernel/debug/
+
+ fi
+
+ # setup netdevsim since dummy/veth dev doesn't have offload support
+ if [ ! -w /sys/bus/netdevsim/new_device ] ; then
+ if ! modprobe -q netdevsim; then
+ echo "SKIP: can't load netdevsim for ipsec offload"
+ # shellcheck disable=SC2154
+ exit "$ksft_skip"
+ fi
+ defer modprobe -r netdevsim
+ fi
+
+ setup_ns ns
+ defer cleanup_ns "$ns"
+}
+
+setup_bond()
+{
+ ip -n "$ns" link add bond0 type bond mode active-backup miimon 100
+ ip -n "$ns" addr add "$srcip/24" dev bond0
+ ip -n "$ns" link set bond0 up
+
+ echo "0 2" | ip netns exec "$ns" tee /sys/bus/netdevsim/new_device >/dev/null
+ nic0=$(ip netns exec "$ns" ls /sys/bus/netdevsim/devices/netdevsim0/net | head -n 1)
+ nic1=$(ip netns exec "$ns" ls /sys/bus/netdevsim/devices/netdevsim0/net | tail -n 1)
+ ip -n "$ns" link set "$nic0" master bond0
+ ip -n "$ns" link set "$nic1" master bond0
+
+ # we didn't create a peer, make sure we can Tx by adding a permanent
+ # neighbour this need to be added after enslave
+ ip -n "$ns" neigh add "$dstip" dev bond0 lladdr 00:11:22:33:44:55
+
+ # create offloaded SAs, both in and out
+ ip -n "$ns" x p add dir out src "$srcip/24" dst "$dstip/24" \
+ tmpl proto esp src "$srcip" dst "$dstip" spi 9 \
+ mode transport reqid 42
+
+ ip -n "$ns" x p add dir in src "$dstip/24" dst "$srcip/24" \
+ tmpl proto esp src "$dstip" dst "$srcip" spi 9 \
+ mode transport reqid 42
+
+ ip -n "$ns" x s add proto esp src "$srcip" dst "$dstip" spi 9 \
+ mode transport reqid 42 aead "rfc4106(gcm(aes))" \
+ 0x3132333435363738393031323334353664636261 128 \
+ sel src "$srcip/24" dst "$dstip/24" \
+ offload dev bond0 dir out
+
+ ip -n "$ns" x s add proto esp src "$dstip" dst "$srcip" spi 9 \
+ mode transport reqid 42 aead "rfc4106(gcm(aes))" \
+ 0x3132333435363738393031323334353664636261 128 \
+ sel src "$dstip/24" dst "$srcip/24" \
+ offload dev bond0 dir in
+
+ # does offload show up in ip output
+ lines=$(ip -n "$ns" x s list | grep -c "crypto offload parameters: dev bond0 dir")
+ if [ "$lines" -ne 2 ] ; then
+ check_err 1 "bond_ipsec_offload SA offload missing from list output"
+ fi
+}
+
+trap defer_scopes_cleanup EXIT
+setup_env
+setup_bond
+
+# start Offload testing
+test_offload
+
+# do failover and re-test
+ip -n "$ns" link set "$active_slave" down
+slowwait 5 active_slave_changed "$active_slave"
+test_offload
+
+# make sure offload get removed from driver
+ip -n "$ns" x s flush
+ip -n "$ns" x p flush
+line0=$(grep -c "SA count=0" "$ipsec0")
+line1=$(grep -c "SA count=0" "$ipsec1")
+[ "$line0" -ne 1 ] || [ "$line1" -ne 1 ]
+check_fail $? "bond_ipsec_offload SA not removed from driver"
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_lacp_prio.sh b/tools/testing/selftests/drivers/net/bonding/bond_lacp_prio.sh
new file mode 100755
index 000000000000..a483d505c6a8
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/bond_lacp_prio.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Testing if bond lacp per port priority works
+#
+# Switch (s_ns) Backup Switch (b_ns)
+# +-------------------------+ +-------------------------+
+# | bond0 | | bond0 |
+# | + | | + |
+# | eth0 | eth1 | | eth0 | eth1 |
+# | +---+---+ | | +---+---+ |
+# | | | | | | | |
+# +-------------------------+ +-------------------------+
+# | | | |
+# +-----------------------------------------------------+
+# | | | | | |
+# | +-------+---------+---------+-------+ |
+# | eth0 eth1 | eth2 eth3 |
+# | + |
+# | bond0 |
+# +-----------------------------------------------------+
+# Client (c_ns)
+
+lib_dir=$(dirname "$0")
+# shellcheck disable=SC1091
+source "$lib_dir"/../../../net/lib.sh
+
+setup_links()
+{
+ # shellcheck disable=SC2154
+ ip -n "${c_ns}" link add eth0 type veth peer name eth0 netns "${s_ns}"
+ ip -n "${c_ns}" link add eth1 type veth peer name eth1 netns "${s_ns}"
+ # shellcheck disable=SC2154
+ ip -n "${c_ns}" link add eth2 type veth peer name eth0 netns "${b_ns}"
+ ip -n "${c_ns}" link add eth3 type veth peer name eth1 netns "${b_ns}"
+
+ ip -n "${c_ns}" link add bond0 type bond mode 802.3ad miimon 100 \
+ lacp_rate fast ad_select actor_port_prio
+ ip -n "${s_ns}" link add bond0 type bond mode 802.3ad miimon 100 \
+ lacp_rate fast
+ ip -n "${b_ns}" link add bond0 type bond mode 802.3ad miimon 100 \
+ lacp_rate fast
+
+ ip -n "${c_ns}" link set eth0 master bond0
+ ip -n "${c_ns}" link set eth1 master bond0
+ ip -n "${c_ns}" link set eth2 master bond0
+ ip -n "${c_ns}" link set eth3 master bond0
+ ip -n "${s_ns}" link set eth0 master bond0
+ ip -n "${s_ns}" link set eth1 master bond0
+ ip -n "${b_ns}" link set eth0 master bond0
+ ip -n "${b_ns}" link set eth1 master bond0
+
+ ip -n "${c_ns}" link set bond0 up
+ ip -n "${s_ns}" link set bond0 up
+ ip -n "${b_ns}" link set bond0 up
+}
+
+test_port_prio_setting()
+{
+ RET=0
+ ip -n "${c_ns}" link set eth0 type bond_slave actor_port_prio 1000
+ prio=$(cmd_jq "ip -n ${c_ns} -d -j link show eth0" \
+ ".[].linkinfo.info_slave_data.actor_port_prio")
+ [ "$prio" -ne 1000 ] && RET=1
+ ip -n "${c_ns}" link set eth2 type bond_slave actor_port_prio 10
+ prio=$(cmd_jq "ip -n ${c_ns} -d -j link show eth2" \
+ ".[].linkinfo.info_slave_data.actor_port_prio")
+ [ "$prio" -ne 10 ] && RET=1
+}
+
+test_agg_reselect()
+{
+ local bond_agg_id slave_agg_id
+ local expect_slave="$1"
+ RET=0
+
+ # Trigger link state change to reselect the aggregator
+ ip -n "${c_ns}" link set eth1 down
+ sleep 0.5
+ ip -n "${c_ns}" link set eth1 up
+ sleep 0.5
+
+ bond_agg_id=$(cmd_jq "ip -n ${c_ns} -d -j link show bond0" \
+ ".[].linkinfo.info_data.ad_info.aggregator")
+ slave_agg_id=$(cmd_jq "ip -n ${c_ns} -d -j link show $expect_slave" \
+ ".[].linkinfo.info_slave_data.ad_aggregator_id")
+ # shellcheck disable=SC2034
+ [ "${bond_agg_id}" -ne "${slave_agg_id}" ] && \
+ RET=1
+}
+
+trap cleanup_all_ns EXIT
+setup_ns c_ns s_ns b_ns
+setup_links
+
+test_port_prio_setting
+log_test "bond 802.3ad" "actor_port_prio setting"
+
+test_agg_reselect eth0
+log_test "bond 802.3ad" "actor_port_prio select"
+
+# Change the actor port prio and re-test
+ip -n "${c_ns}" link set eth0 type bond_slave actor_port_prio 10
+ip -n "${c_ns}" link set eth2 type bond_slave actor_port_prio 1000
+test_agg_reselect eth2
+log_test "bond 802.3ad" "actor_port_prio switch"
+
+exit "${EXIT_STATUS}"
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_macvlan_ipvlan.sh b/tools/testing/selftests/drivers/net/bonding/bond_macvlan_ipvlan.sh
index c4711272fe45..559f300f965a 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond_macvlan_ipvlan.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_macvlan_ipvlan.sh
@@ -30,6 +30,7 @@ check_connection()
local message=${3}
RET=0
+ sleep 0.25
ip netns exec ${ns} ping ${target} -c 4 -i 0.1 &>/dev/null
check_err $? "ping failed"
log_test "${bond_mode}/${xvlan_type}_${xvlan_mode}: ${message}"
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
index 7bc148889ca7..187b478d0ddf 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
@@ -7,6 +7,8 @@ ALL_TESTS="
prio
arp_validate
num_grat_arp
+ fail_over_mac
+ vlan_over_bond
"
lib_dir=$(dirname "$0")
@@ -352,8 +354,8 @@ garp_test()
exp_num=$(echo "${param}" | cut -f6 -d ' ')
active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
- slowwait_for_counter $((exp_num + 5)) $exp_num \
- tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}"
+ slowwait_for_counter $((exp_num + 5)) $exp_num tc_rule_handle_stats_get \
+ "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}" &> /dev/null
# check result
real_num=$(tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}")
@@ -376,6 +378,197 @@ num_grat_arp()
done
}
+check_all_mac_same()
+{
+ RET=0
+ # all slaves should have same mac address (with the first port's mac)
+ local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]')
+ local eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]')
+ local eth1_mac=$(ip -n "$s_ns" -j link show eth1 | jq -r '.[]["address"]')
+ local eth2_mac=$(ip -n "$s_ns" -j link show eth2 | jq -r '.[]["address"]')
+ if [ "$bond_mac" != "${mac[0]}" ] || [ "$eth0_mac" != "$bond_mac" ] || \
+ [ "$eth1_mac" != "$bond_mac" ] || [ "$eth2_mac" != "$bond_mac" ]; then
+ RET=1
+ fi
+}
+
+check_bond_mac_same_with_first()
+{
+ RET=0
+ # bond mac address should be same with the first added slave
+ local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]')
+ if [ "$bond_mac" != "${mac[0]}" ]; then
+ RET=1
+ fi
+}
+
+check_bond_mac_same_with_active()
+{
+ RET=0
+ # bond mac address should be same with active slave
+ local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]')
+ local active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
+ local active_slave_mac=$(ip -n "$s_ns" -j link show "$active_slave" | jq -r '.[]["address"]')
+ if [ "$bond_mac" != "$active_slave_mac" ]; then
+ RET=1
+ fi
+}
+
+check_backup_slave_mac_not_change()
+{
+ RET=0
+ # backup slave's mac address is not changed
+ if ip -n "$s_ns" -d -j link show type bond_slave | jq -e '.[]
+ | select(.linkinfo.info_slave_data.state=="BACKUP")
+ | select(.address != .linkinfo.info_slave_data.perm_hwaddr)' &> /dev/null; then
+ RET=1
+ fi
+}
+
+check_backup_slave_mac_inherit()
+{
+ local backup_mac
+ RET=0
+
+ # backup slaves should use mac[1] or mac[2]
+ local backup_macs=$(ip -n "$s_ns" -d -j link show type bond_slave | \
+ jq -r '.[] | select(.linkinfo.info_slave_data.state=="BACKUP") | .address')
+ for backup_mac in $backup_macs; do
+ if [ "$backup_mac" != "${mac[1]}" ] && [ "$backup_mac" != "${mac[2]}" ]; then
+ RET=1
+ fi
+ done
+}
+
+check_first_slave_random_mac()
+{
+ RET=0
+ # remove the first added slave and added it back
+ ip -n "$s_ns" link set eth0 nomaster
+ ip -n "$s_ns" link set eth0 master bond0
+
+ # the first slave should use random mac address
+ eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]')
+ [ "$eth0_mac" = "${mac[0]}" ] && RET=1
+ log_test "bond fail_over_mac follow" "random first slave mac"
+
+ # remove the first slave, the permanent MAC address should be restored back
+ ip -n "$s_ns" link set eth0 nomaster
+ eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]')
+ [ "$eth0_mac" != "${mac[0]}" ] && RET=1
+}
+
+do_active_backup_failover()
+{
+ local active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
+ ip -n ${s_ns} link set ${active_slave} down
+ slowwait 2 active_slave_changed $active_slave
+ ip -n ${s_ns} link set ${active_slave} up
+}
+
+fail_over_mac()
+{
+ # Bring down the first interface on the switch to force the bond to
+ # select another active interface instead of the first one that joined.
+ ip -n "$g_ns" link set s0 down
+
+ # fail_over_mac none
+ bond_reset "mode active-backup miimon 100 fail_over_mac 0"
+ check_all_mac_same
+ log_test "fail_over_mac 0" "all slaves have same mac"
+ do_active_backup_failover
+ check_all_mac_same
+ log_test "fail_over_mac 0" "failover: all slaves have same mac"
+
+ # fail_over_mac active
+ bond_reset "mode active-backup miimon 100 fail_over_mac 1"
+ check_bond_mac_same_with_active
+ log_test "fail_over_mac 1" "bond mac is same with active slave mac"
+ check_backup_slave_mac_not_change
+ log_test "fail_over_mac 1" "backup slave mac is not changed"
+ do_active_backup_failover
+ check_bond_mac_same_with_active
+ log_test "fail_over_mac 1" "failover: bond mac is same with active slave mac"
+ check_backup_slave_mac_not_change
+ log_test "fail_over_mac 1" "failover: backup slave mac is not changed"
+
+ # fail_over_mac follow
+ bond_reset "mode active-backup miimon 100 fail_over_mac 2"
+ check_bond_mac_same_with_first
+ log_test "fail_over_mac 2" "bond mac is same with first slave mac"
+ check_bond_mac_same_with_active
+ log_test "fail_over_mac 2" "bond mac is same with active slave mac"
+ check_backup_slave_mac_inherit
+ log_test "fail_over_mac 2" "backup slave mac inherit"
+ do_active_backup_failover
+ check_bond_mac_same_with_first
+ log_test "fail_over_mac 2" "failover: bond mac is same with first slave mac"
+ check_bond_mac_same_with_active
+ log_test "fail_over_mac 2" "failover: bond mac is same with active slave mac"
+ check_backup_slave_mac_inherit
+ log_test "fail_over_mac 2" "failover: backup slave mac inherit"
+ check_first_slave_random_mac
+ log_test "fail_over_mac 2" "first slave mac random"
+}
+
+vlan_over_bond_arp()
+{
+ local mode="$1"
+ RET=0
+
+ bond_reset "mode $mode arp_interval 100 arp_ip_target 192.0.3.10"
+ ip -n "${s_ns}" link add bond0.3 link bond0 type vlan id 3
+ ip -n "${s_ns}" link set bond0.3 up
+ ip -n "${s_ns}" addr add 192.0.3.1/24 dev bond0.3
+ ip -n "${s_ns}" addr add 2001:db8::3:1/64 dev bond0.3
+
+ slowwait_for_counter 5 5 tc_rule_handle_stats_get \
+ "dev eth0.3 ingress" 101 ".packets" "-n ${c_ns}" &> /dev/null || RET=1
+ log_test "vlan over bond arp" "$mode"
+}
+
+vlan_over_bond_ns()
+{
+ local mode="$1"
+ RET=0
+
+ if skip_ns; then
+ log_test_skip "vlan_over_bond ns" "$mode"
+ return 0
+ fi
+
+ bond_reset "mode $mode arp_interval 100 ns_ip6_target 2001:db8::3:10"
+ ip -n "${s_ns}" link add bond0.3 link bond0 type vlan id 3
+ ip -n "${s_ns}" link set bond0.3 up
+ ip -n "${s_ns}" addr add 192.0.3.1/24 dev bond0.3
+ ip -n "${s_ns}" addr add 2001:db8::3:1/64 dev bond0.3
+
+ slowwait_for_counter 5 5 tc_rule_handle_stats_get \
+ "dev eth0.3 ingress" 102 ".packets" "-n ${c_ns}" &> /dev/null || RET=1
+ log_test "vlan over bond ns" "$mode"
+}
+
+vlan_over_bond()
+{
+ # add vlan 3 for client
+ ip -n "${c_ns}" link add eth0.3 link eth0 type vlan id 3
+ ip -n "${c_ns}" link set eth0.3 up
+ ip -n "${c_ns}" addr add 192.0.3.10/24 dev eth0.3
+ ip -n "${c_ns}" addr add 2001:db8::3:10/64 dev eth0.3
+
+ # Add tc rule to check the vlan pkts
+ tc -n "${c_ns}" qdisc add dev eth0.3 clsact
+ tc -n "${c_ns}" filter add dev eth0.3 ingress protocol arp \
+ handle 101 flower skip_hw arp_op request \
+ arp_sip 192.0.3.1 arp_tip 192.0.3.10 action pass
+ tc -n "${c_ns}" filter add dev eth0.3 ingress protocol ipv6 \
+ handle 102 flower skip_hw ip_proto icmpv6 \
+ type 135 src_ip 2001:db8::3:1 action pass
+
+ vlan_over_bond_arp "active-backup"
+ vlan_over_bond_ns "active-backup"
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_passive_lacp.sh b/tools/testing/selftests/drivers/net/bonding/bond_passive_lacp.sh
new file mode 100755
index 000000000000..9c3b089813df
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/bond_passive_lacp.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test if a bond interface works with lacp_active=off.
+
+# shellcheck disable=SC2034
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+# shellcheck disable=SC1091
+source "$lib_dir"/../../../net/forwarding/lib.sh
+
+# shellcheck disable=SC2317
+check_port_state()
+{
+ local netns=$1
+ local port=$2
+ local state=$3
+
+ ip -n "${netns}" -d -j link show "$port" | \
+ jq -e ".[].linkinfo.info_slave_data.ad_actor_oper_port_state_str | index(\"${state}\") != null" > /dev/null
+}
+
+check_pkt_count()
+{
+ RET=0
+ local ns="$1"
+ local iface="$2"
+
+ # wait 65s, one per 30s
+ slowwait_for_counter 65 2 tc_rule_handle_stats_get \
+ "dev ${iface} egress" 101 ".packets" "-n ${ns}" &> /dev/null
+}
+
+setup() {
+ setup_ns c_ns s_ns
+
+ # shellcheck disable=SC2154
+ ip -n "${c_ns}" link add eth0 type veth peer name eth0 netns "${s_ns}"
+ ip -n "${c_ns}" link add eth1 type veth peer name eth1 netns "${s_ns}"
+
+ # Add tc filter to count the pkts
+ tc -n "${c_ns}" qdisc add dev eth0 clsact
+ tc -n "${c_ns}" filter add dev eth0 egress handle 101 protocol 0x8809 matchall action pass
+ tc -n "${s_ns}" qdisc add dev eth1 clsact
+ tc -n "${s_ns}" filter add dev eth1 egress handle 101 protocol 0x8809 matchall action pass
+
+ ip -n "${s_ns}" link add bond0 type bond mode 802.3ad lacp_active on lacp_rate fast
+ ip -n "${s_ns}" link set eth0 master bond0
+ ip -n "${s_ns}" link set eth1 master bond0
+
+ ip -n "${c_ns}" link add bond0 type bond mode 802.3ad lacp_active off lacp_rate fast
+ ip -n "${c_ns}" link set eth0 master bond0
+ ip -n "${c_ns}" link set eth1 master bond0
+
+}
+
+trap cleanup_all_ns EXIT
+setup
+
+# The bond will send 2 lacpdu pkts during init time, let's wait at least 2s
+# after interface up
+ip -n "${c_ns}" link set bond0 up
+sleep 2
+
+# 1. The passive side shouldn't send LACPDU.
+check_pkt_count "${c_ns}" "eth0" && RET=1
+log_test "802.3ad lacp_active off" "init port"
+
+ip -n "${s_ns}" link set bond0 up
+# 2. The passive side should not have the 'active' flag.
+RET=0
+slowwait 2 check_port_state "${c_ns}" "eth0" "active" && RET=1
+log_test "802.3ad lacp_active off" "port state active"
+
+# 3. The active side should have the 'active' flag.
+RET=0
+slowwait 2 check_port_state "${s_ns}" "eth0" "active" || RET=1
+log_test "802.3ad lacp_active on" "port state active"
+
+# 4. Make sure the connection is not expired.
+RET=0
+slowwait 5 check_port_state "${s_ns}" "eth0" "distributing"
+slowwait 10 check_port_state "${s_ns}" "eth0" "expired" && RET=1
+log_test "bond 802.3ad lacp_active off" "port connection"
+
+# After testing, disconnect one port on each side to check the state.
+ip -n "${s_ns}" link set eth0 nomaster
+ip -n "${s_ns}" link set eth0 up
+ip -n "${c_ns}" link set eth1 nomaster
+ip -n "${c_ns}" link set eth1 up
+# Due to Periodic Machine and Rx Machine state change, the bond will still
+# send lacpdu pkts in a few seconds. sleep at lease 5s to make sure
+# negotiation finished
+sleep 5
+
+# 5. The active side should keep sending LACPDU.
+check_pkt_count "${s_ns}" "eth1" || RET=1
+log_test "bond 802.3ad lacp_active on" "port pkt after disconnect"
+
+# 6. The passive side shouldn't send LACPDU anymore.
+check_pkt_count "${c_ns}" "eth0" && RET=1
+log_test "bond 802.3ad lacp_active off" "port pkt after disconnect"
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
index 195ef83cfbf1..167aa4a4a12a 100644
--- a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
@@ -39,6 +39,8 @@ g_ip4="192.0.2.254"
s_ip6="2001:db8::1"
c_ip6="2001:db8::10"
g_ip6="2001:db8::254"
+mac[0]="00:0a:0b:0c:0d:01"
+mac[1]="00:0a:0b:0c:0d:02"
gateway_create()
{
@@ -62,6 +64,7 @@ server_create()
for i in $(seq 0 1); do
ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns}
+ ip -n "${s_ns}" link set "eth${i}" addr "${mac[$i]}"
ip -n ${g_ns} link set s${i} up
ip -n ${g_ns} link set s${i} master br0
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh
index 3a1333d9a85b..23a2932301cc 100644
--- a/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh
@@ -26,6 +26,7 @@
# +-------------------------------------+
source bond_topo_2d1c.sh
+mac[2]="00:0a:0b:0c:0d:03"
setup_prepare()
{
@@ -36,6 +37,7 @@ setup_prepare()
# Add the extra device as we use 3 down links for bond0
local i=2
ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns}
+ ip -n "${s_ns}" link set "eth${i}" addr "${mac[$i]}"
ip -n ${g_ns} link set s${i} up
ip -n ${g_ns} link set s${i} master br0
ip -n ${s_ns} link set eth${i} master bond0
diff --git a/tools/testing/selftests/drivers/net/bonding/config b/tools/testing/selftests/drivers/net/bonding/config
index dad4e5fda4db..991494376223 100644
--- a/tools/testing/selftests/drivers/net/bonding/config
+++ b/tools/testing/selftests/drivers/net/bonding/config
@@ -1,11 +1,21 @@
CONFIG_BONDING=y
CONFIG_BRIDGE=y
+CONFIG_CONFIGFS_FS=y
CONFIG_DUMMY=y
+CONFIG_INET_ESP=y
+CONFIG_INET_ESP_OFFLOAD=y
CONFIG_IPV6=y
-CONFIG_MACVLAN=y
CONFIG_IPVLAN=y
+CONFIG_MACVLAN=y
CONFIG_NET_ACT_GACT=y
CONFIG_NET_CLS_FLOWER=y
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETCONSOLE_EXTENDED_LOG=y
+CONFIG_NETDEVSIM=m
CONFIG_NET_SCH_INGRESS=y
CONFIG_NLMON=y
CONFIG_VETH=y
+CONFIG_VLAN_8021Q=m
+CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/drivers/net/bonding/netcons_over_bonding.sh b/tools/testing/selftests/drivers/net/bonding/netcons_over_bonding.sh
new file mode 100755
index 000000000000..477cc9379500
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/netcons_over_bonding.sh
@@ -0,0 +1,361 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This selftest exercises trying to have multiple netpoll users at the same
+# time.
+#
+# This selftest has multiple smalls test inside, and the goal is to
+# get interfaces with bonding and netconsole in different orders in order
+# to catch any possible issue.
+#
+# The main test composes of four interfaces being created using netdevsim; two
+# of them are bonded to serve as the netconsole's transmit interface. The
+# remaining two interfaces are similarly bonded and assigned to a separate
+# network namespace, which acts as the receive interface, where socat monitors
+# for incoming messages.
+#
+# A netconsole message is then sent to ensure it is properly received across
+# this configuration.
+#
+# Later, run a few other tests, to make sure that bonding and netconsole
+# cannot coexist.
+#
+# The test's objective is to exercise netpoll usage when managed simultaneously
+# by multiple subsystems (netconsole and bonding).
+#
+# Author: Breno Leitao <leitao@debian.org>
+
+set -euo pipefail
+
+SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
+
+source "${SCRIPTDIR}"/../lib/sh/lib_netcons.sh
+
+modprobe netdevsim 2> /dev/null || true
+modprobe netconsole 2> /dev/null || true
+modprobe bonding 2> /dev/null || true
+modprobe veth 2> /dev/null || true
+
+# The content of kmsg will be save to the following file
+OUTPUT_FILE="/tmp/${TARGET}"
+
+# Check for basic system dependency and exit if not found
+check_for_dependencies
+# Set current loglevel to KERN_INFO(6), and default to KERN_NOTICE(5)
+echo "6 5" > /proc/sys/kernel/printk
+# Remove the namespace, interfaces and netconsole target on exit
+trap cleanup_bond EXIT
+
+FORMAT="extended"
+IP_VERSION="ipv4"
+VETH0="veth"$(( RANDOM % 256))
+VETH1="veth"$((256 + RANDOM % 256))
+TXNS=""
+RXNS=""
+
+# Create "bond_tx_XX" and "bond_rx_XX" interfaces, and set DSTIF and SRCIF with
+# the bonding interfaces
+function setup_bonding_ifaces() {
+ local RAND=$(( RANDOM % 100 ))
+ BOND_TX_MAIN_IF="bond_tx_$RAND"
+ BOND_RX_MAIN_IF="bond_rx_$RAND"
+
+ # Setup TX
+ if ! ip -n "${TXNS}" link add "${BOND_TX_MAIN_IF}" type bond mode balance-rr
+ then
+ echo "Failed to create bond TX interface. Is CONFIG_BONDING set?" >&2
+ # only clean nsim ifaces and namespace. Nothing else has been
+ # initialized
+ cleanup_bond_nsim
+ trap - EXIT
+ exit "${ksft_skip}"
+ fi
+
+ # create_netdevsim() got the interface up, but it needs to be down
+ # before being enslaved.
+ ip -n "${TXNS}" \
+ link set "${BOND_TX1_SLAVE_IF}" down
+ ip -n "${TXNS}" \
+ link set "${BOND_TX2_SLAVE_IF}" down
+ ip -n "${TXNS}" \
+ link set "${BOND_TX1_SLAVE_IF}" master "${BOND_TX_MAIN_IF}"
+ ip -n "${TXNS}" \
+ link set "${BOND_TX2_SLAVE_IF}" master "${BOND_TX_MAIN_IF}"
+ ip -n "${TXNS}" \
+ link set "${BOND_TX_MAIN_IF}" up
+
+ # Setup RX
+ ip -n "${RXNS}" \
+ link add "${BOND_RX_MAIN_IF}" type bond mode balance-rr
+ ip -n "${RXNS}" \
+ link set "${BOND_RX1_SLAVE_IF}" down
+ ip -n "${RXNS}" \
+ link set "${BOND_RX2_SLAVE_IF}" down
+ ip -n "${RXNS}" \
+ link set "${BOND_RX1_SLAVE_IF}" master "${BOND_RX_MAIN_IF}"
+ ip -n "${RXNS}" \
+ link set "${BOND_RX2_SLAVE_IF}" master "${BOND_RX_MAIN_IF}"
+ ip -n "${RXNS}" \
+ link set "${BOND_RX_MAIN_IF}" up
+
+ export DSTIF="${BOND_RX_MAIN_IF}"
+ export SRCIF="${BOND_TX_MAIN_IF}"
+}
+
+# Create 4 netdevsim interfaces. Two of them will be bound to TX bonding iface
+# and the other two will be bond to the RX interface (on the other namespace)
+function create_ifaces_bond() {
+ BOND_TX1_SLAVE_IF=$(create_netdevsim "${NSIM_BOND_TX_1}" "${TXNS}")
+ BOND_TX2_SLAVE_IF=$(create_netdevsim "${NSIM_BOND_TX_2}" "${TXNS}")
+ BOND_RX1_SLAVE_IF=$(create_netdevsim "${NSIM_BOND_RX_1}" "${RXNS}")
+ BOND_RX2_SLAVE_IF=$(create_netdevsim "${NSIM_BOND_RX_2}" "${RXNS}")
+}
+
+# netdevsim link BOND_TX to BOND_RX interfaces
+function link_ifaces_bond() {
+ local BOND_TX1_SLAVE_IFIDX
+ local BOND_TX2_SLAVE_IFIDX
+ local BOND_RX1_SLAVE_IFIDX
+ local BOND_RX2_SLAVE_IFIDX
+ local TXNS_FD
+ local RXNS_FD
+
+ BOND_TX1_SLAVE_IFIDX=$(ip netns exec "${TXNS}" \
+ cat /sys/class/net/"$BOND_TX1_SLAVE_IF"/ifindex)
+ BOND_TX2_SLAVE_IFIDX=$(ip netns exec "${TXNS}" \
+ cat /sys/class/net/"$BOND_TX2_SLAVE_IF"/ifindex)
+ BOND_RX1_SLAVE_IFIDX=$(ip netns exec "${RXNS}" \
+ cat /sys/class/net/"$BOND_RX1_SLAVE_IF"/ifindex)
+ BOND_RX2_SLAVE_IFIDX=$(ip netns exec "${RXNS}" \
+ cat /sys/class/net/"$BOND_RX2_SLAVE_IF"/ifindex)
+
+ exec {TXNS_FD}</var/run/netns/"${TXNS}"
+ exec {RXNS_FD}</var/run/netns/"${RXNS}"
+
+ # Linking TX ifaces to the RX ones (on the other namespace)
+ echo "${TXNS_FD}:$BOND_TX1_SLAVE_IFIDX $RXNS_FD:$BOND_RX1_SLAVE_IFIDX" \
+ > "$NSIM_DEV_SYS_LINK"
+ echo "${TXNS_FD}:$BOND_TX2_SLAVE_IFIDX $RXNS_FD:$BOND_RX2_SLAVE_IFIDX" \
+ > "$NSIM_DEV_SYS_LINK"
+
+ exec {TXNS_FD}<&-
+ exec {RXNS_FD}<&-
+}
+
+function create_all_ifaces() {
+ # setup_ns function is coming from lib.sh
+ setup_ns TXNS RXNS
+ export NAMESPACE="${RXNS}"
+
+ # Create two interfaces for RX and two for TX
+ create_ifaces_bond
+ # Link netlink ifaces
+ link_ifaces_bond
+}
+
+# configure DSTIF and SRCIF IPs
+function configure_ifaces_ips() {
+ local IP_VERSION=${1:-"ipv4"}
+ select_ipv4_or_ipv6 "${IP_VERSION}"
+
+ ip -n "${RXNS}" addr add "${DSTIP}"/24 dev "${DSTIF}"
+ ip -n "${RXNS}" link set "${DSTIF}" up
+
+ ip -n "${TXNS}" addr add "${SRCIP}"/24 dev "${SRCIF}"
+ ip -n "${TXNS}" link set "${SRCIF}" up
+}
+
+function test_enable_netpoll_on_enslaved_iface() {
+ echo 0 > "${NETCONS_PATH}"/enabled
+
+ # At this stage, BOND_TX1_SLAVE_IF is enslaved to BOND_TX_MAIN_IF, and
+ # linked to BOND_RX1_SLAVE_IF inside the namespace.
+ echo "${BOND_TX1_SLAVE_IF}" > "${NETCONS_PATH}"/dev_name
+
+ # This should fail with the following message in dmesg:
+ # netpoll: netconsole: ethX is a slave device, aborting
+ set +e
+ enable_netcons_ns 2> /dev/null
+ set -e
+
+ if [[ $(cat "${NETCONS_PATH}"/enabled) -eq 1 ]]
+ then
+ echo "test failed: Bonding and netpoll cannot co-exists." >&2
+ exit "${ksft_fail}"
+ fi
+}
+
+function test_delete_bond_and_reenable_target() {
+ ip -n "${TXNS}" \
+ link delete "${BOND_TX_MAIN_IF}" type bond
+
+ # BOND_TX1_SLAVE_IF is not attached to a bond interface anymore
+ # netpoll can be plugged in there
+ echo "${BOND_TX1_SLAVE_IF}" > "${NETCONS_PATH}"/dev_name
+
+ # this should work, since the interface is not enslaved
+ enable_netcons_ns
+
+ if [[ $(cat "${NETCONS_PATH}"/enabled) -eq 0 ]]
+ then
+ echo "test failed: Unable to start netpoll on an unbond iface." >&2
+ exit "${ksft_fail}"
+ fi
+}
+
+# Send a netconsole message to the netconsole target
+function test_send_netcons_msg_through_bond_iface() {
+ # Listen for netconsole port inside the namespace and
+ # destination interface
+ listen_port_and_save_to "${OUTPUT_FILE}" "${IP_VERSION}" &
+ # Wait for socat to start and listen to the port.
+ wait_for_port "${RXNS}" "${PORT}" "${IP_VERSION}"
+ # Send the message
+ echo "${MSG}: ${TARGET}" > /dev/kmsg
+ # Wait until socat saves the file to disk
+ busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}"
+ # Make sure the message was received in the dst part
+ # and exit
+ validate_result "${OUTPUT_FILE}" "${FORMAT}"
+ # kill socat in case it is still running
+ pkill_socat
+}
+
+# BOND_TX1_SLAVE_IF has netconsole enabled on it, bind it to BOND_TX_MAIN_IF.
+# Given BOND_TX_MAIN_IF was deleted, recreate it first
+function test_enslave_netcons_enabled_iface {
+ # netconsole got disabled while the interface was down
+ if [[ $(cat "${NETCONS_PATH}"/enabled) -eq 0 ]]
+ then
+ echo "test failed: netconsole expected to be enabled against BOND_TX1_SLAVE_IF" >&2
+ exit "${ksft_fail}"
+ fi
+
+ # recreate the bonding iface. it got deleted by previous
+ # test (test_delete_bond_and_reenable_target)
+ ip -n "${TXNS}" \
+ link add "${BOND_TX_MAIN_IF}" type bond mode balance-rr
+
+ # sub-interface need to be down before attaching to bonding
+ # This will also disable netconsole.
+ ip -n "${TXNS}" \
+ link set "${BOND_TX1_SLAVE_IF}" down
+ ip -n "${TXNS}" \
+ link set "${BOND_TX1_SLAVE_IF}" master "${BOND_TX_MAIN_IF}"
+ ip -n "${TXNS}" \
+ link set "${BOND_TX_MAIN_IF}" up
+
+ # netconsole got disabled while the interface was down
+ if [[ $(cat "${NETCONS_PATH}"/enabled) -eq 1 ]]
+ then
+ echo "test failed: Device is part of a bond iface, cannot have netcons enabled" >&2
+ exit "${ksft_fail}"
+ fi
+}
+
+# Get netconsole enabled on a bonding interface and attach a second
+# sub-interface.
+function test_enslave_iface_to_bond {
+ # BOND_TX_MAIN_IF has only BOND_TX1_SLAVE_IF right now
+ echo "${BOND_TX_MAIN_IF}" > "${NETCONS_PATH}"/dev_name
+ enable_netcons_ns
+
+ # netcons is attached to bond0 and BOND_TX1_SLAVE_IF is
+ # part of BOND_TX_MAIN_IF. Attach BOND_TX2_SLAVE_IF to BOND_TX_MAIN_IF.
+ ip -n "${TXNS}" \
+ link set "${BOND_TX2_SLAVE_IF}" master "${BOND_TX_MAIN_IF}"
+ if [[ $(cat "${NETCONS_PATH}"/enabled) -eq 0 ]]
+ then
+ echo "test failed: Netconsole should be enabled on bonding interface. Failed" >&2
+ exit "${ksft_fail}"
+ fi
+}
+
+function test_enslave_iff_disabled_netpoll_iface {
+ local ret
+
+ # Create two interfaces. veth interfaces it known to have
+ # IFF_DISABLE_NETPOLL set
+ if ! ip link add "${VETH0}" type veth peer name "${VETH1}"
+ then
+ echo "Failed to create veth TX interface. Is CONFIG_VETH set?" >&2
+ exit "${ksft_skip}"
+ fi
+ set +e
+ # This will print RTNETLINK answers: Device or resource busy
+ ip link set "${VETH0}" master "${BOND_TX_MAIN_IF}" 2> /dev/null
+ ret=$?
+ set -e
+ if [[ $ret -eq 0 ]]
+ then
+ echo "test failed: veth interface could not be enslaved"
+ exit "${ksft_fail}"
+ fi
+}
+
+# Given that netconsole picks the current net namespace, we need to enable it
+# from inside the TXNS namespace
+function enable_netcons_ns() {
+ ip netns exec "${TXNS}" sh -c \
+ "mount -t configfs configfs /sys/kernel/config && echo 1 > $NETCONS_PATH/enabled"
+}
+
+####################
+# Tests start here #
+####################
+
+# Create regular interfaces using netdevsim and link them
+create_all_ifaces
+
+# Setup the bonding interfaces
+# BOND_RX_MAIN_IF has BOND_RX{1,2}_SLAVE_IF
+# BOND_TX_MAIN_IF has BOND_TX{1,2}_SLAVE_IF
+setup_bonding_ifaces
+
+# Configure the ips as BOND_RX1_SLAVE_IF and BOND_TX1_SLAVE_IF
+configure_ifaces_ips "${IP_VERSION}"
+
+_create_dynamic_target "${FORMAT}" "${NETCONS_PATH}"
+enable_netcons_ns
+set_user_data
+
+# Test #1 : Create an bonding interface and attach netpoll into
+# the bonding interface. Netconsole/netpoll should work on
+# the bonding interface.
+test_send_netcons_msg_through_bond_iface
+echo "test #1: netpoll on bonding interface worked. Test passed" >&2
+
+# Test #2: Attach netpoll to an enslaved interface
+# Try to attach netpoll to an enslaved sub-interface (while still being part of
+# a bonding interface), which shouldn't be allowed
+test_enable_netpoll_on_enslaved_iface
+echo "test #2: netpoll correctly rejected enslaved interface (expected behavior). Test passed." >&2
+
+# Test #3: Unplug the sub-interface from bond and enable netconsole
+# Detach the interface from a bonding interface and attach netpoll again
+test_delete_bond_and_reenable_target
+echo "test #3: Able to attach to an unbound interface. Test passed." >&2
+
+# Test #4: Enslave a sub-interface that had netconsole enabled
+# Try to enslave an interface that has netconsole/netpoll enabled.
+# Previous test has netconsole enabled in BOND_TX1_SLAVE_IF, try to enslave it
+test_enslave_netcons_enabled_iface
+echo "test #4: Enslaving an interface with netpoll attached. Test passed." >&2
+
+# Test #5: Enslave a sub-interface to a bonding interface
+# Enslave an interface to a bond interface that has netpoll attached
+# At this stage, BOND_TX_MAIN_IF is created and BOND_TX1_SLAVE_IF is part of
+# it. Netconsole is currently disabled
+test_enslave_iface_to_bond
+echo "test #5: Enslaving an interface to bond+netpoll. Test passed." >&2
+
+# Test #6: Enslave a IFF_DISABLE_NETPOLL sub-interface to a bonding interface
+# At this stage, BOND_TX_MAIN_IF has both sub interface and netconsole is
+# enabled. This test will try to enslave an a veth (IFF_DISABLE_NETPOLL) interface
+# and it should fail, with netpoll: veth0 doesn't support polling
+test_enslave_iff_disabled_netpoll_iface
+echo "test #6: Enslaving IFF_DISABLE_NETPOLL ifaces to bond iface is not supported. Test passed." >&2
+
+cleanup_bond
+trap - EXIT
+exit "${EXIT_STATUS}"
diff --git a/tools/testing/selftests/drivers/net/config b/tools/testing/selftests/drivers/net/config
index f27172ddee0a..77ccf83d87e0 100644
--- a/tools/testing/selftests/drivers/net/config
+++ b/tools/testing/selftests/drivers/net/config
@@ -1,7 +1,10 @@
-CONFIG_IPV6=y
-CONFIG_NETDEVSIM=m
CONFIG_CONFIGFS_FS=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_BTF_MODULES=n
+CONFIG_INET_PSP=y
+CONFIG_IPV6=y
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_NETCONSOLE_EXTENDED_LOG=y
+CONFIG_NETDEVSIM=m
CONFIG_XDP_SOCKETS=y
diff --git a/tools/testing/selftests/drivers/net/dsa/Makefile b/tools/testing/selftests/drivers/net/dsa/Makefile
index cd6817fe5be6..7994bd0e5c44 100644
--- a/tools/testing/selftests/drivers/net/dsa/Makefile
+++ b/tools/testing/selftests/drivers/net/dsa/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0+ OR MIT
-TEST_PROGS = bridge_locked_port.sh \
+TEST_PROGS := \
+ bridge_locked_port.sh \
bridge_mdb.sh \
bridge_mld.sh \
bridge_vlan_aware.sh \
@@ -9,11 +10,13 @@ TEST_PROGS = bridge_locked_port.sh \
local_termination.sh \
no_forwarding.sh \
tc_actions.sh \
- test_bridge_fdb_stress.sh
+ test_bridge_fdb_stress.sh \
+# end of TEST_PROGS
TEST_FILES := \
+ forwarding.config \
run_net_forwarding_test.sh \
- forwarding.config
+# end of TEST_FILES
TEST_INCLUDES := \
../../../net/forwarding/bridge_locked_port.sh \
@@ -27,6 +30,7 @@ TEST_INCLUDES := \
../../../net/forwarding/no_forwarding.sh \
../../../net/forwarding/tc_actions.sh \
../../../net/forwarding/tc_common.sh \
- ../../../net/lib.sh
+ ../../../net/lib.sh \
+# end of TEST_INCLUDES
include ../../../lib.mk
diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/drivers/net/gro.c
index d5824eadea10..e894037d2e3e 100644
--- a/tools/testing/selftests/net/gro.c
+++ b/tools/testing/selftests/drivers/net/gro.c
@@ -57,7 +57,8 @@
#include <string.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
+#include "../../net/lib/ksft.h"
#define DPORT 8000
#define SPORT 1500
@@ -93,6 +94,7 @@ static bool tx_socket = true;
static int tcp_offset = -1;
static int total_hdr_len = -1;
static int ethhdr_proto = -1;
+static bool ipip;
static const int num_flush_id_cases = 6;
static void vlog(const char *fmt, ...)
@@ -114,7 +116,9 @@ static void setup_sock_filter(int fd)
int ipproto_off, opt_ipproto_off;
int next_off;
- if (proto == PF_INET)
+ if (ipip)
+ next_off = sizeof(struct iphdr) + offsetof(struct iphdr, protocol);
+ else if (proto == PF_INET)
next_off = offsetof(struct iphdr, protocol);
else
next_off = offsetof(struct ipv6hdr, nexthdr);
@@ -244,7 +248,7 @@ static void fill_datalinklayer(void *buf)
eth->h_proto = ethhdr_proto;
}
-static void fill_networklayer(void *buf, int payload_len)
+static void fill_networklayer(void *buf, int payload_len, int protocol)
{
struct ipv6hdr *ip6h = buf;
struct iphdr *iph = buf;
@@ -254,7 +258,7 @@ static void fill_networklayer(void *buf, int payload_len)
ip6h->version = 6;
ip6h->payload_len = htons(sizeof(struct tcphdr) + payload_len);
- ip6h->nexthdr = IPPROTO_TCP;
+ ip6h->nexthdr = protocol;
ip6h->hop_limit = 8;
if (inet_pton(AF_INET6, addr6_src, &ip6h->saddr) != 1)
error(1, errno, "inet_pton source ip6");
@@ -266,7 +270,7 @@ static void fill_networklayer(void *buf, int payload_len)
iph->version = 4;
iph->ihl = 5;
iph->ttl = 8;
- iph->protocol = IPPROTO_TCP;
+ iph->protocol = protocol;
iph->tot_len = htons(sizeof(struct tcphdr) +
payload_len + sizeof(struct iphdr));
iph->frag_off = htons(0x4000); /* DF = 1, MF = 0 */
@@ -313,9 +317,19 @@ static void create_packet(void *buf, int seq_offset, int ack_offset,
{
memset(buf, 0, total_hdr_len);
memset(buf + total_hdr_len, 'a', payload_len);
+
fill_transportlayer(buf + tcp_offset, seq_offset, ack_offset,
payload_len, fin);
- fill_networklayer(buf + ETH_HLEN, payload_len);
+
+ if (ipip) {
+ fill_networklayer(buf + ETH_HLEN, payload_len + sizeof(struct iphdr),
+ IPPROTO_IPIP);
+ fill_networklayer(buf + ETH_HLEN + sizeof(struct iphdr),
+ payload_len, IPPROTO_TCP);
+ } else {
+ fill_networklayer(buf + ETH_HLEN, payload_len, IPPROTO_TCP);
+ }
+
fill_datalinklayer(buf);
}
@@ -416,6 +430,13 @@ static void recompute_packet(char *buf, char *no_ext, int extlen)
iph->tot_len = htons(ntohs(iph->tot_len) + extlen);
iph->check = 0;
iph->check = checksum_fold(iph, sizeof(struct iphdr), 0);
+
+ if (ipip) {
+ iph += 1;
+ iph->tot_len = htons(ntohs(iph->tot_len) + extlen);
+ iph->check = 0;
+ iph->check = checksum_fold(iph, sizeof(struct iphdr), 0);
+ }
} else {
ip6h->payload_len = htons(ntohs(ip6h->payload_len) + extlen);
}
@@ -670,7 +691,7 @@ static void send_flush_id_case(int fd, struct sockaddr_ll *daddr, int tcase)
iph2->id = htons(9);
break;
- case 3: /* DF=0, Fixed - should not coalesce */
+ case 3: /* DF=0, Fixed - should coalesce */
iph1->frag_off &= ~htons(IP_DF);
iph1->id = htons(8);
@@ -734,11 +755,11 @@ static void send_ipv6_exthdr(int fd, struct sockaddr_ll *daddr, char *ext_data1,
static char exthdr_pck[sizeof(buf) + MIN_EXTHDR_SIZE];
create_packet(buf, 0, 0, PAYLOAD_LEN, 0);
- add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_HOPOPTS, ext_data1);
+ add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_DSTOPTS, ext_data1);
write_packet(fd, exthdr_pck, total_hdr_len + PAYLOAD_LEN + MIN_EXTHDR_SIZE, daddr);
create_packet(buf, PAYLOAD_LEN * 1, 0, PAYLOAD_LEN, 0);
- add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_HOPOPTS, ext_data2);
+ add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_DSTOPTS, ext_data2);
write_packet(fd, exthdr_pck, total_hdr_len + PAYLOAD_LEN + MIN_EXTHDR_SIZE, daddr);
}
@@ -777,7 +798,7 @@ static void send_fragment4(int fd, struct sockaddr_ll *daddr)
*/
memset(buf + total_hdr_len, 'a', PAYLOAD_LEN * 2);
fill_transportlayer(buf + tcp_offset, PAYLOAD_LEN, 0, PAYLOAD_LEN * 2, 0);
- fill_networklayer(buf + ETH_HLEN, PAYLOAD_LEN);
+ fill_networklayer(buf + ETH_HLEN, PAYLOAD_LEN, IPPROTO_TCP);
fill_datalinklayer(buf);
iph->frag_off = htons(0x6000); // DF = 1, MF = 1
@@ -969,6 +990,7 @@ static void check_recv_pkts(int fd, int *correct_payload,
static void gro_sender(void)
{
+ const int fin_delay_us = 100 * 1000;
static char fin_pkt[MAX_HDR_LEN];
struct sockaddr_ll daddr = {};
int txfd = -1;
@@ -1012,15 +1034,22 @@ static void gro_sender(void)
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
} else if (strcmp(testname, "tcp") == 0) {
send_changed_checksum(txfd, &daddr);
+ /* Adding sleep before sending FIN so that it is not
+ * received prior to other packets.
+ */
+ usleep(fin_delay_us);
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
send_changed_seq(txfd, &daddr);
+ usleep(fin_delay_us);
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
send_changed_ts(txfd, &daddr);
+ usleep(fin_delay_us);
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
send_diff_opt(txfd, &daddr);
+ usleep(fin_delay_us);
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
} else if (strcmp(testname, "ip") == 0) {
send_changed_ECN(txfd, &daddr);
@@ -1071,7 +1100,7 @@ static void gro_sender(void)
* and min ipv6hdr size. Like MAX_HDR_SIZE,
* MAX_PAYLOAD is defined with the larger header of the two.
*/
- int offset = proto == PF_INET ? 20 : 0;
+ int offset = (proto == PF_INET && !ipip) ? 20 : 0;
int remainder = (MAX_PAYLOAD + offset) % MSS;
send_large(txfd, &daddr, remainder);
@@ -1099,6 +1128,8 @@ static void gro_receiver(void)
set_timeout(rxfd);
bind_packetsocket(rxfd);
+ ksft_ready();
+
memset(correct_payload, 0, sizeof(correct_payload));
if (strcmp(testname, "data") == 0) {
@@ -1188,10 +1219,9 @@ static void gro_receiver(void)
correct_payload[0] = PAYLOAD_LEN * 2;
check_recv_pkts(rxfd, correct_payload, 1);
- printf("DF=0, Fixed - should not coalesce: ");
- correct_payload[0] = PAYLOAD_LEN;
- correct_payload[1] = PAYLOAD_LEN;
- check_recv_pkts(rxfd, correct_payload, 2);
+ printf("DF=0, Fixed - should coalesce: ");
+ correct_payload[0] = PAYLOAD_LEN * 2;
+ check_recv_pkts(rxfd, correct_payload, 1);
printf("DF=1, 2 Incrementing and one fixed - should coalesce only first 2 packets: ");
correct_payload[0] = PAYLOAD_LEN * 2;
@@ -1222,7 +1252,7 @@ static void gro_receiver(void)
check_recv_pkts(rxfd, correct_payload, 2);
}
} else if (strcmp(testname, "large") == 0) {
- int offset = proto == PF_INET ? 20 : 0;
+ int offset = (proto == PF_INET && !ipip) ? 20 : 0;
int remainder = (MAX_PAYLOAD + offset) % MSS;
correct_payload[0] = (MAX_PAYLOAD + offset);
@@ -1251,6 +1281,7 @@ static void parse_args(int argc, char **argv)
{ "iface", required_argument, NULL, 'i' },
{ "ipv4", no_argument, NULL, '4' },
{ "ipv6", no_argument, NULL, '6' },
+ { "ipip", no_argument, NULL, 'e' },
{ "rx", no_argument, NULL, 'r' },
{ "saddr", required_argument, NULL, 's' },
{ "smac", required_argument, NULL, 'S' },
@@ -1260,7 +1291,7 @@ static void parse_args(int argc, char **argv)
};
int c;
- while ((c = getopt_long(argc, argv, "46d:D:i:rs:S:t:v", opts, NULL)) != -1) {
+ while ((c = getopt_long(argc, argv, "46d:D:ei:rs:S:t:v", opts, NULL)) != -1) {
switch (c) {
case '4':
proto = PF_INET;
@@ -1270,6 +1301,11 @@ static void parse_args(int argc, char **argv)
proto = PF_INET6;
ethhdr_proto = htons(ETH_P_IPV6);
break;
+ case 'e':
+ ipip = true;
+ proto = PF_INET;
+ ethhdr_proto = htons(ETH_P_IP);
+ break;
case 'd':
addr4_dst = addr6_dst = optarg;
break;
@@ -1305,7 +1341,10 @@ int main(int argc, char **argv)
{
parse_args(argc, argv);
- if (proto == PF_INET) {
+ if (ipip) {
+ tcp_offset = ETH_HLEN + sizeof(struct iphdr) * 2;
+ total_hdr_len = tcp_offset + sizeof(struct tcphdr);
+ } else if (proto == PF_INET) {
tcp_offset = ETH_HLEN + sizeof(struct iphdr);
total_hdr_len = tcp_offset + sizeof(struct tcphdr);
} else if (proto == PF_INET6) {
diff --git a/tools/testing/selftests/drivers/net/gro.py b/tools/testing/selftests/drivers/net/gro.py
new file mode 100755
index 000000000000..ba83713bf7b5
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/gro.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+GRO (Generic Receive Offload) conformance tests.
+
+Validates that GRO coalescing works correctly by running the gro
+binary in different configurations and checking for correct packet
+coalescing behavior.
+
+Test cases:
+ - data: Data packets with same size/headers and correct seq numbers coalesce
+ - ack: Pure ACK packets do not coalesce
+ - flags: Packets with PSH, SYN, URG, RST flags do not coalesce
+ - tcp: Packets with incorrect checksum, non-consecutive seqno don't coalesce
+ - ip: Packets with different ECN, TTL, TOS, or IP options don't coalesce
+ - large: Packets larger than GRO_MAX_SIZE don't coalesce
+"""
+
+import os
+from lib.py import ksft_run, ksft_exit, ksft_pr
+from lib.py import NetDrvEpEnv, KsftXfailEx
+from lib.py import cmd, defer, bkg, ip
+from lib.py import ksft_variants
+
+
+def _resolve_dmac(cfg, ipver):
+ """
+ Find the destination MAC address remote host should use to send packets
+ towards the local host. It may be a router / gateway address.
+ """
+
+ attr = "dmac" + ipver
+ # Cache the response across test cases
+ if hasattr(cfg, attr):
+ return getattr(cfg, attr)
+
+ route = ip(f"-{ipver} route get {cfg.addr_v[ipver]}",
+ json=True, host=cfg.remote)[0]
+ gw = route.get("gateway")
+ # Local L2 segment, address directly
+ if not gw:
+ setattr(cfg, attr, cfg.dev['address'])
+ return getattr(cfg, attr)
+
+ # ping to make sure neighbor is resolved,
+ # bind to an interface, for v6 the GW is likely link local
+ cmd(f"ping -c1 -W0 -I{cfg.remote_ifname} {gw}", host=cfg.remote)
+
+ neigh = ip(f"neigh get {gw} dev {cfg.remote_ifname}",
+ json=True, host=cfg.remote)[0]
+ setattr(cfg, attr, neigh['lladdr'])
+ return getattr(cfg, attr)
+
+
+def _write_defer_restore(cfg, path, val, defer_undo=False):
+ with open(path, "r", encoding="utf-8") as fp:
+ orig_val = fp.read().strip()
+ if str(val) == orig_val:
+ return
+ with open(path, "w", encoding="utf-8") as fp:
+ fp.write(val)
+ if defer_undo:
+ defer(_write_defer_restore, cfg, path, orig_val)
+
+
+def _set_mtu_restore(dev, mtu, host):
+ if dev['mtu'] < mtu:
+ ip(f"link set dev {dev['ifname']} mtu {mtu}", host=host)
+ defer(ip, f"link set dev {dev['ifname']} mtu {dev['mtu']}", host=host)
+
+
+def _setup(cfg, test_name):
+ """ Setup hardware loopback mode for GRO testing. """
+
+ if not hasattr(cfg, "bin_remote"):
+ cfg.bin_local = cfg.test_dir / "gro"
+ cfg.bin_remote = cfg.remote.deploy(cfg.bin_local)
+
+ # "large" test needs at least 4k MTU
+ if test_name == "large":
+ _set_mtu_restore(cfg.dev, 4096, None)
+ _set_mtu_restore(cfg.remote_dev, 4096, cfg.remote)
+
+ flush_path = f"/sys/class/net/{cfg.ifname}/gro_flush_timeout"
+ irq_path = f"/sys/class/net/{cfg.ifname}/napi_defer_hard_irqs"
+
+ _write_defer_restore(cfg, flush_path, "200000", defer_undo=True)
+ _write_defer_restore(cfg, irq_path, "10", defer_undo=True)
+
+ try:
+ # Disable TSO for local tests
+ cfg.require_nsim() # will raise KsftXfailEx if not running on nsim
+
+ cmd(f"ethtool -K {cfg.ifname} gro on tso off")
+ cmd(f"ethtool -K {cfg.remote_ifname} gro on tso off", host=cfg.remote)
+ except KsftXfailEx:
+ pass
+
+def _gro_variants():
+ """Generator that yields all combinations of protocol and test types."""
+
+ for protocol in ["ipv4", "ipv6", "ipip"]:
+ for test_name in ["data", "ack", "flags", "tcp", "ip", "large"]:
+ yield protocol, test_name
+
+
+@ksft_variants(_gro_variants())
+def test(cfg, protocol, test_name):
+ """Run a single GRO test with retries."""
+
+ ipver = "6" if protocol[-1] == "6" else "4"
+ cfg.require_ipver(ipver)
+
+ _setup(cfg, test_name)
+
+ base_cmd_args = [
+ f"--{protocol}",
+ f"--dmac {_resolve_dmac(cfg, ipver)}",
+ f"--smac {cfg.remote_dev['address']}",
+ f"--daddr {cfg.addr_v[ipver]}",
+ f"--saddr {cfg.remote_addr_v[ipver]}",
+ f"--test {test_name}",
+ "--verbose"
+ ]
+ base_args = " ".join(base_cmd_args)
+
+ # Each test is run 6 times to deflake, because given the receive timing,
+ # not all packets that should coalesce will be considered in the same flow
+ # on every try.
+ max_retries = 6
+ for attempt in range(max_retries):
+ rx_cmd = f"{cfg.bin_local} {base_args} --rx --iface {cfg.ifname}"
+ tx_cmd = f"{cfg.bin_remote} {base_args} --iface {cfg.remote_ifname}"
+
+ fail_now = attempt >= max_retries - 1
+
+ with bkg(rx_cmd, ksft_ready=True, exit_wait=True,
+ fail=fail_now) as rx_proc:
+ cmd(tx_cmd, host=cfg.remote)
+
+ if rx_proc.ret == 0:
+ return
+
+ ksft_pr(rx_proc.stdout.strip().replace('\n', '\n# '))
+ ksft_pr(rx_proc.stderr.strip().replace('\n', '\n# '))
+
+ if test_name == "large" and os.environ.get("KSFT_MACHINE_SLOW"):
+ ksft_pr(f"Ignoring {protocol}/{test_name} failure due to slow environment")
+ return
+
+ ksft_pr(f"Attempt {attempt + 1}/{max_retries} failed, retrying...")
+
+
+def main() -> None:
+ """ Ksft boiler plate main """
+
+ with NetDrvEpEnv(__file__) as cfg:
+ ksft_run(cases=[test], args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hds.py b/tools/testing/selftests/drivers/net/hds.py
index 7c90a040ce45..c4fe049e9baa 100755
--- a/tools/testing/selftests/drivers/net/hds.py
+++ b/tools/testing/selftests/drivers/net/hds.py
@@ -3,10 +3,12 @@
import errno
import os
+import random
+from typing import Union
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises, KsftSkipEx
from lib.py import CmdExitFailure, EthtoolFamily, NlError
from lib.py import NetDrvEnv
-from lib.py import defer, ethtool, ip, random
+from lib.py import defer, ethtool, ip
def _get_hds_mode(cfg, netnl) -> str:
@@ -58,7 +60,39 @@ def get_hds_thresh(cfg, netnl) -> None:
if 'hds-thresh' not in rings:
raise KsftSkipEx('hds-thresh not supported by device')
+
+def _hds_reset(cfg, netnl, rings) -> None:
+ cur = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+
+ arg = {'header': {'dev-index': cfg.ifindex}}
+ if cur.get('tcp-data-split') != rings.get('tcp-data-split'):
+ # Try to reset to "unknown" first, we don't know if the setting
+ # was the default or user chose it. Default seems more likely.
+ arg['tcp-data-split'] = "unknown"
+ netnl.rings_set(arg)
+ cur = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+ if cur['tcp-data-split'] == rings['tcp-data-split']:
+ del arg['tcp-data-split']
+ else:
+ # Try the explicit setting
+ arg['tcp-data-split'] = rings['tcp-data-split']
+ if cur.get('hds-thresh') != rings.get('hds-thresh'):
+ arg['hds-thresh'] = rings['hds-thresh']
+ if len(arg) > 1:
+ netnl.rings_set(arg)
+
+
+def _defer_reset_hds(cfg, netnl) -> Union[dict, None]:
+ try:
+ rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+ if 'hds-thresh' in rings or 'tcp-data-split' in rings:
+ defer(_hds_reset, cfg, netnl, rings)
+ except NlError as e:
+ pass
+
+
def set_hds_enable(cfg, netnl) -> None:
+ _defer_reset_hds(cfg, netnl)
try:
netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'tcp-data-split': 'enabled'})
except NlError as e:
@@ -76,6 +110,7 @@ def set_hds_enable(cfg, netnl) -> None:
ksft_eq('enabled', rings['tcp-data-split'])
def set_hds_disable(cfg, netnl) -> None:
+ _defer_reset_hds(cfg, netnl)
try:
netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'tcp-data-split': 'disabled'})
except NlError as e:
@@ -93,6 +128,7 @@ def set_hds_disable(cfg, netnl) -> None:
ksft_eq('disabled', rings['tcp-data-split'])
def set_hds_thresh_zero(cfg, netnl) -> None:
+ _defer_reset_hds(cfg, netnl)
try:
netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': 0})
except NlError as e:
@@ -110,6 +146,7 @@ def set_hds_thresh_zero(cfg, netnl) -> None:
ksft_eq(0, rings['hds-thresh'])
def set_hds_thresh_random(cfg, netnl) -> None:
+ _defer_reset_hds(cfg, netnl)
try:
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
except NlError as e:
@@ -140,6 +177,7 @@ def set_hds_thresh_random(cfg, netnl) -> None:
ksft_eq(hds_thresh, rings['hds-thresh'])
def set_hds_thresh_max(cfg, netnl) -> None:
+ _defer_reset_hds(cfg, netnl)
try:
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
except NlError as e:
@@ -157,6 +195,7 @@ def set_hds_thresh_max(cfg, netnl) -> None:
ksft_eq(rings['hds-thresh'], rings['hds-thresh-max'])
def set_hds_thresh_gt(cfg, netnl) -> None:
+ _defer_reset_hds(cfg, netnl)
try:
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
except NlError as e:
@@ -178,6 +217,7 @@ def set_xdp(cfg, netnl) -> None:
"""
mode = _get_hds_mode(cfg, netnl)
if mode == 'enabled':
+ _defer_reset_hds(cfg, netnl)
netnl.rings_set({'header': {'dev-index': cfg.ifindex},
'tcp-data-split': 'unknown'})
diff --git a/tools/testing/selftests/drivers/net/hw/.gitignore b/tools/testing/selftests/drivers/net/hw/.gitignore
index 6942bf575497..46540468a775 100644
--- a/tools/testing/selftests/drivers/net/hw/.gitignore
+++ b/tools/testing/selftests/drivers/net/hw/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
iou-zcrx
ncdevmem
+toeplitz
diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile
index df2c047ffa90..9c163ba6feee 100644
--- a/tools/testing/selftests/drivers/net/hw/Makefile
+++ b/tools/testing/selftests/drivers/net/hw/Makefile
@@ -1,10 +1,26 @@
# SPDX-License-Identifier: GPL-2.0+ OR MIT
-TEST_GEN_FILES = iou-zcrx
+# Check if io_uring supports zero-copy receive
+HAS_IOURING_ZCRX := $(shell \
+ echo -e '#include <liburing.h>\n' \
+ 'void *func = (void *)io_uring_register_ifq;\n' \
+ 'int main() {return 0;}' | \
+ $(CC) -luring -x c - -o /dev/null 2>&1 && echo y)
+
+ifeq ($(HAS_IOURING_ZCRX),y)
+COND_GEN_FILES += iou-zcrx
+else
+$(warning excluding iouring tests, liburing not installed or too old)
+endif
+
+TEST_GEN_FILES := \
+ $(COND_GEN_FILES) \
+# end of TEST_GEN_FILES
TEST_PROGS = \
csum.py \
devlink_port_split.py \
+ devlink_rate_tc_bw.py \
devmem.py \
ethtool.sh \
ethtool_extended_state.sh \
@@ -15,9 +31,13 @@ TEST_PROGS = \
iou-zcrx.py \
irq.py \
loopback.sh \
+ nic_timestamp.py \
pp_alloc_fail.py \
+ rss_api.py \
rss_ctx.py \
+ rss_flow_label.py \
rss_input_xfrm.py \
+ toeplitz.py \
tso.py \
xsk_reconfig.py \
#
@@ -29,22 +49,31 @@ TEST_FILES := \
TEST_INCLUDES := \
$(wildcard lib/py/*.py ../lib/py/*.py) \
../../../net/lib.sh \
- ../../../net/forwarding/lib.sh \
../../../net/forwarding/ipip_lib.sh \
+ ../../../net/forwarding/lib.sh \
../../../net/forwarding/tc_common.sh \
#
# YNL files, must be before "include ..lib.mk"
-YNL_GEN_FILES := ncdevmem
+YNL_GEN_FILES := \
+ ncdevmem \
+ toeplitz \
+# end of YNL_GEN_FILES
TEST_GEN_FILES += $(YNL_GEN_FILES)
TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c))
include ../../../lib.mk
# YNL build
-YNL_GENS := ethtool netdev
+YNL_GENS := \
+ ethtool \
+ netdev \
+# end of YNL_GENS
+
include ../../../net/ynl.mk
include ../../../net/bpf.mk
+ifeq ($(HAS_IOURING_ZCRX),y)
$(OUTPUT)/iou-zcrx: LDLIBS += -luring
+endif
diff --git a/tools/testing/selftests/drivers/net/hw/config b/tools/testing/selftests/drivers/net/hw/config
new file mode 100644
index 000000000000..2307aa001be1
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/config
@@ -0,0 +1,11 @@
+CONFIG_FAIL_FUNCTION=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FUNCTION_ERROR_INJECTION=y
+CONFIG_IO_URING=y
+CONFIG_IPV6=y
+CONFIG_IPV6_GRE=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_UDMABUF=y
+CONFIG_VXLAN=y
diff --git a/tools/testing/selftests/drivers/net/hw/csum.py b/tools/testing/selftests/drivers/net/hw/csum.py
index cd23af875317..3e3a89a34afe 100755
--- a/tools/testing/selftests/drivers/net/hw/csum.py
+++ b/tools/testing/selftests/drivers/net/hw/csum.py
@@ -17,7 +17,7 @@ def test_receive(cfg, ipver="6", extra_args=None):
ip_args = f"-{ipver} -S {cfg.remote_addr_v[ipver]} -D {cfg.addr_v[ipver]}"
rx_cmd = f"{cfg.bin_local} -i {cfg.ifname} -n 100 {ip_args} -r 1 -R {extra_args}"
- tx_cmd = f"{cfg.bin_remote} -i {cfg.ifname} -n 100 {ip_args} -r 1 -T {extra_args}"
+ tx_cmd = f"{cfg.bin_remote} -i {cfg.remote_ifname} -n 100 {ip_args} -r 1 -T {extra_args}"
with bkg(rx_cmd, exit_wait=True):
wait_port_listen(34000, proto="udp")
@@ -37,7 +37,7 @@ def test_transmit(cfg, ipver="6", extra_args=None):
if extra_args != "-U -Z":
extra_args += " -r 1"
- rx_cmd = f"{cfg.bin_remote} -i {cfg.ifname} -L 1 -n 100 {ip_args} -R {extra_args}"
+ rx_cmd = f"{cfg.bin_remote} -i {cfg.remote_ifname} -L 1 -n 100 {ip_args} -R {extra_args}"
tx_cmd = f"{cfg.bin_local} -i {cfg.ifname} -L 1 -n 100 {ip_args} -T {extra_args}"
with bkg(rx_cmd, host=cfg.remote, exit_wait=True):
diff --git a/tools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py b/tools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py
new file mode 100755
index 000000000000..4e4faa9275bb
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py
@@ -0,0 +1,439 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Devlink Rate TC Bandwidth Test Suite
+===================================
+
+This test suite verifies the functionality of devlink-rate traffic class (TC)
+bandwidth distribution in a virtualized environment. The tests validate that
+bandwidth can be properly allocated between different traffic classes and
+that TC mapping works as expected.
+
+Test Environment:
+----------------
+- Creates 1 VF
+- Establishes a bridge connecting the VF representor and the uplink representor
+- Sets up 2 VLAN interfaces on the VF with different VLAN IDs (101, 102)
+- Configures different traffic classes (TC3 and TC4) for each VLAN
+
+Test Cases:
+----------
+1. test_no_tc_mapping_bandwidth:
+ - Verifies that without TC mapping, bandwidth is NOT distributed according to
+ the configured 20/80 split between TC3 and TC4
+ - This test should fail if bandwidth matches the 20/80 split without TC
+ mapping
+ - Expected: Bandwidth should NOT be distributed as 20/80
+
+2. test_tc_mapping_bandwidth:
+ - Configures TC mapping using mqprio qdisc
+ - Verifies that with TC mapping, bandwidth IS distributed according to the
+ configured 20/80 split between TC3 and TC4
+ - Expected: Bandwidth should be distributed as 20/80
+
+Bandwidth Distribution:
+----------------------
+- TC3 (VLAN 101): Configured for 20% of total bandwidth
+- TC4 (VLAN 102): Configured for 80% of total bandwidth
+- Total bandwidth: 1Gbps
+- Tolerance: +-12%
+
+Hardware-Specific Behavior (mlx5):
+--------------------------
+mlx5 hardware enforces traffic class separation by ensuring that each transmit
+queue (SQ) is associated with a single TC. If a packet is sent on a queue that
+doesn't match the expected TC (based on DSCP or VLAN priority and hypervisor-set
+mapping), the hardware moves the queue to the correct TC scheduler to preserve
+traffic isolation.
+
+This behavior means that even without explicit TC-to-queue mapping, bandwidth
+enforcement may still appear to work—because the hardware dynamically adjusts
+the scheduling context. However, this can lead to performance issues in high
+rates and HOL blocking if traffic from different TCs is mixed on the same queue.
+"""
+
+import json
+import os
+import subprocess
+import threading
+import time
+
+from lib.py import ksft_pr, ksft_run, ksft_exit
+from lib.py import KsftSkipEx, KsftFailEx, KsftXfailEx
+from lib.py import NetDrvEpEnv, DevlinkFamily
+from lib.py import NlError
+from lib.py import cmd, defer, ethtool, ip
+from lib.py import Iperf3Runner
+
+
+class BandwidthValidator:
+ """
+ Validates total bandwidth and individual shares with tolerance
+ relative to the overall total.
+ """
+
+ def __init__(self, shares):
+ self.tolerance_percent = 12
+ self.expected_total = sum(shares.values())
+ self.bounds = {}
+
+ for name, exp in shares.items():
+ self.bounds[name] = (self.min_expected(exp), self.max_expected(exp))
+
+ def min_expected(self, value):
+ """Calculates the minimum acceptable value based on tolerance."""
+ return value - (self.expected_total * self.tolerance_percent / 100)
+
+ def max_expected(self, value):
+ """Calculates the maximum acceptable value based on tolerance."""
+ return value + (self.expected_total * self.tolerance_percent / 100)
+
+ def bound(self, values):
+ """
+ Return True if all given values fall within tolerance.
+ """
+ for name, value in values.items():
+ low, high = self.bounds[name]
+ if not low <= value <= high:
+ return False
+ return True
+
+
+def setup_vf(cfg, set_tc_mapping=True):
+ """
+ Sets up a VF on the given network interface.
+
+ Enables SR-IOV and switchdev mode, brings the VF interface up,
+ and optionally configures TC mapping using mqprio.
+ """
+ try:
+ cmd(f"devlink dev eswitch set pci/{cfg.pci} mode switchdev")
+ defer(cmd, f"devlink dev eswitch set pci/{cfg.pci} mode legacy")
+ except Exception as exc:
+ raise KsftSkipEx(f"Failed to enable switchdev mode on {cfg.pci}") from exc
+ try:
+ cmd(f"echo 1 > /sys/class/net/{cfg.ifname}/device/sriov_numvfs", shell=True)
+ defer(cmd, f"echo 0 > /sys/class/net/{cfg.ifname}/device/sriov_numvfs", shell=True)
+ except Exception as exc:
+ raise KsftSkipEx(f"Failed to enable SR-IOV on {cfg.ifname}") from exc
+
+ time.sleep(2)
+ vf_ifc = (os.listdir(
+ f"/sys/class/net/{cfg.ifname}/device/virtfn0/net") or [None])[0]
+ if vf_ifc:
+ ip(f"link set dev {vf_ifc} up")
+ else:
+ raise KsftSkipEx("VF interface not found")
+ if set_tc_mapping:
+ cmd(f"tc qdisc add dev {vf_ifc} root handle 5 mqprio mode dcb hw 1 num_tc 8")
+
+ return vf_ifc
+
+
+def setup_vlans_on_vf(vf_ifc):
+ """
+ Sets up two VLAN interfaces on the given VF, each mapped to a different TC.
+ """
+ vlan_configs = [
+ {"vlan_id": 101, "tc": 3, "ip": "198.51.100.1"},
+ {"vlan_id": 102, "tc": 4, "ip": "198.51.100.9"},
+ ]
+
+ for config in vlan_configs:
+ vlan_dev = f"{vf_ifc}.{config['vlan_id']}"
+ ip(f"link add link {vf_ifc} name {vlan_dev} type vlan id {config['vlan_id']}")
+ ip(f"addr add {config['ip']}/29 dev {vlan_dev}")
+ ip(f"link set dev {vlan_dev} up")
+ ip(f"link set dev {vlan_dev} type vlan egress-qos-map 0:{config['tc']}")
+ ksft_pr(f"Created VLAN {vlan_dev} on {vf_ifc} with tc {config['tc']} and IP {config['ip']}")
+
+
+def get_vf_info(cfg):
+ """
+ Finds the VF representor interface and devlink port index
+ for the given PCI device used in the test environment.
+ """
+ cfg.vf_representor = None
+ cfg.vf_port_index = None
+ out = subprocess.check_output(["devlink", "-j", "port", "show"], encoding="utf-8")
+ ports = json.loads(out)["port"]
+
+ for port_name, props in ports.items():
+ netdev = props.get("netdev")
+
+ if (port_name.startswith(f"pci/{cfg.pci}/") and
+ props.get("vfnum") == 0):
+ cfg.vf_representor = netdev
+ cfg.vf_port_index = int(port_name.split("/")[-1])
+ break
+
+
+def setup_bridge(cfg):
+ """
+ Creates and configures a Linux bridge, with both the uplink
+ and VF representor interfaces attached to it.
+ """
+ bridge_name = f"br_{os.getpid()}"
+ ip(f"link add name {bridge_name} type bridge")
+ defer(cmd, f"ip link del name {bridge_name} type bridge")
+
+ ip(f"link set dev {cfg.ifname} master {bridge_name}")
+
+ rep_name = cfg.vf_representor
+ if rep_name:
+ ip(f"link set dev {rep_name} master {bridge_name}")
+ ip(f"link set dev {rep_name} up")
+ ksft_pr(f"Set representor {rep_name} up and added to bridge")
+ else:
+ raise KsftSkipEx("Could not find representor for the VF")
+
+ ip(f"link set dev {bridge_name} up")
+
+
+def setup_devlink_rate(cfg):
+ """
+ Configures devlink rate tx_max and traffic class bandwidth for the VF.
+ """
+ port_index = cfg.vf_port_index
+ if port_index is None:
+ raise KsftSkipEx("Could not find VF port index")
+ try:
+ cfg.devnl.rate_set({
+ "bus-name": "pci",
+ "dev-name": cfg.pci,
+ "port-index": port_index,
+ "rate-tx-max": 125000000,
+ "rate-tc-bws": [
+ {"index": 0, "bw": 0},
+ {"index": 1, "bw": 0},
+ {"index": 2, "bw": 0},
+ {"index": 3, "bw": 20},
+ {"index": 4, "bw": 80},
+ {"index": 5, "bw": 0},
+ {"index": 6, "bw": 0},
+ {"index": 7, "bw": 0},
+ ]
+ })
+ except NlError as exc:
+ if exc.error == 95: # EOPNOTSUPP
+ raise KsftSkipEx("devlink rate configuration is not supported on the VF") from exc
+ raise KsftFailEx(f"rate_set failed on VF port {port_index}") from exc
+
+
+def setup_remote_vlans(cfg):
+ """
+ Sets up VLAN interfaces on the remote side.
+ """
+ remote_dev = cfg.remote_ifname
+ vlan_ids = [101, 102]
+ remote_ips = ["198.51.100.2", "198.51.100.10"]
+
+ for vlan_id, ip_addr in zip(vlan_ids, remote_ips):
+ vlan_dev = f"{remote_dev}.{vlan_id}"
+ cmd(f"ip link add link {remote_dev} name {vlan_dev} "
+ f"type vlan id {vlan_id}", host=cfg.remote)
+ cmd(f"ip addr add {ip_addr}/29 dev {vlan_dev}", host=cfg.remote)
+ cmd(f"ip link set dev {vlan_dev} up", host=cfg.remote)
+ defer(cmd, f"ip link del {vlan_dev}", host=cfg.remote)
+
+
+def setup_test_environment(cfg, set_tc_mapping=True):
+ """
+ Sets up the complete test environment including VF creation, VLANs,
+ bridge configuration and devlink rate setup.
+ """
+ vf_ifc = setup_vf(cfg, set_tc_mapping)
+ ksft_pr(f"Created VF interface: {vf_ifc}")
+
+ setup_vlans_on_vf(vf_ifc)
+
+ get_vf_info(cfg)
+ setup_bridge(cfg)
+
+ setup_devlink_rate(cfg)
+ setup_remote_vlans(cfg)
+
+
+def measure_bandwidth(cfg, server_ip, client_ip, barrier):
+ """
+ Synchronizes with peers and runs an iperf3-based bandwidth measurement
+ between the given endpoints. Returns average Gbps.
+ """
+ runner = Iperf3Runner(cfg, server_ip=server_ip, client_ip=client_ip)
+ try:
+ barrier.wait(timeout=10)
+ except Exception as exc:
+ raise KsftFailEx("iperf3 barrier wait timed") from exc
+
+ try:
+ bw_gbps = runner.measure_bandwidth(reverse=True)
+ except Exception as exc:
+ raise KsftFailEx("iperf3 bandwidth measurement failed") from exc
+
+ return bw_gbps
+
+
+def run_bandwidth_test(cfg):
+ """
+ Runs parallel bandwidth measurements for each VLAN/TC pair and collects results.
+ """
+ def _run_measure_bandwidth_thread(local_ip, remote_ip, results, barrier, tc_ix):
+ results[tc_ix] = measure_bandwidth(cfg, local_ip, remote_ip, barrier)
+
+ vf_vlan_data = [
+ # (local_ip, remote_ip, TC)
+ ("198.51.100.1", "198.51.100.2", 3),
+ ("198.51.100.9", "198.51.100.10", 4),
+ ]
+
+ results = {}
+ threads = []
+ start_barrier = threading.Barrier(len(vf_vlan_data))
+
+ for local_ip, remote_ip, tc_ix in vf_vlan_data:
+ thread = threading.Thread(
+ target=_run_measure_bandwidth_thread,
+ args=(local_ip, remote_ip, results, start_barrier, tc_ix)
+ )
+ thread.start()
+ threads.append(thread)
+
+ for thread in threads:
+ thread.join()
+
+ for tc_ix, tc_bw in results.items():
+ if tc_bw is None:
+ raise KsftFailEx("iperf3 failed; cannot evaluate bandwidth")
+
+ return results
+
+
+def calculate_bandwidth_percentages(results):
+ """
+ Calculates the percentage of total bandwidth received by TC3 and TC4.
+ """
+ if 3 not in results or 4 not in results:
+ raise KsftFailEx(f"Missing expected TC results in {results}")
+
+ tc3_bw = results[3]
+ tc4_bw = results[4]
+ total_bw = tc3_bw + tc4_bw
+ tc3_percentage = (tc3_bw / total_bw) * 100
+ tc4_percentage = (tc4_bw / total_bw) * 100
+
+ return {
+ 'tc3_bw': tc3_bw,
+ 'tc4_bw': tc4_bw,
+ 'tc3_percentage': tc3_percentage,
+ 'tc4_percentage': tc4_percentage,
+ 'total_bw': total_bw
+ }
+
+
+def print_bandwidth_results(bw_data, test_name):
+ """
+ Prints bandwidth measurements and TC usage summary for a given test.
+ """
+ ksft_pr(f"Bandwidth check results {test_name}:")
+ ksft_pr(f"TC 3: {bw_data['tc3_bw']:.2f} Gbits/sec")
+ ksft_pr(f"TC 4: {bw_data['tc4_bw']:.2f} Gbits/sec")
+ ksft_pr(f"Total bandwidth: {bw_data['total_bw']:.2f} Gbits/sec")
+ ksft_pr(f"TC 3 percentage: {bw_data['tc3_percentage']:.1f}%")
+ ksft_pr(f"TC 4 percentage: {bw_data['tc4_percentage']:.1f}%")
+
+
+def verify_total_bandwidth(bw_data, validator):
+ """
+ Ensures the total measured bandwidth falls within the acceptable tolerance.
+ """
+ total = bw_data['total_bw']
+
+ if validator.bound({"total": total}):
+ return
+
+ low, high = validator.bounds["total"]
+
+ if total < low:
+ raise KsftSkipEx(
+ f"Total bandwidth {total:.2f} Gbps < minimum "
+ f"{low:.2f} Gbps; "
+ f"parent tx_max ({validator.expected_total:.1f} G) "
+ f"not reached, cannot validate share"
+ )
+
+ raise KsftFailEx(
+ f"Total bandwidth {total:.2f} Gbps exceeds allowed ceiling "
+ f"{high:.2f} Gbps "
+ f"(VF tx_max set to {validator.expected_total:.1f} G)"
+ )
+
+
+def run_bandwidth_distribution_test(cfg, set_tc_mapping):
+ """
+ Runs parallel bandwidth measurements for both TCs and collects results.
+ """
+ setup_test_environment(cfg, set_tc_mapping)
+ bandwidths = run_bandwidth_test(cfg)
+ bw_data = calculate_bandwidth_percentages(bandwidths)
+ test_name = "with TC mapping" if set_tc_mapping else "without TC mapping"
+ print_bandwidth_results(bw_data, test_name)
+
+ verify_total_bandwidth(bw_data, cfg.traffic_bw_validator)
+
+ return cfg.tc_bw_validator.bound({"tc3": bw_data['tc3_percentage'],
+ "tc4": bw_data['tc4_percentage']})
+
+
+def test_no_tc_mapping_bandwidth(cfg):
+ """
+ Verifies that bandwidth is not split 20/80 without traffic class mapping.
+ """
+ pass_bw_msg = "Bandwidth is NOT distributed as 20/80 without TC mapping"
+ fail_bw_msg = "Bandwidth matched 20/80 split without TC mapping"
+ is_mlx5 = "driver: mlx5" in ethtool(f"-i {cfg.ifname}").stdout
+
+ if run_bandwidth_distribution_test(cfg, set_tc_mapping=False):
+ if is_mlx5:
+ raise KsftXfailEx(fail_bw_msg)
+ raise KsftFailEx(fail_bw_msg)
+ if is_mlx5:
+ raise KsftFailEx("mlx5 behavior changed:" + pass_bw_msg)
+ ksft_pr(pass_bw_msg)
+
+
+def test_tc_mapping_bandwidth(cfg):
+ """
+ Verifies that bandwidth is correctly split 20/80 between TC3 and TC4
+ when traffic class mapping is set.
+ """
+ if run_bandwidth_distribution_test(cfg, set_tc_mapping=True):
+ ksft_pr("Bandwidth is distributed as 20/80 with TC mapping")
+ else:
+ raise KsftFailEx("Bandwidth did not match 20/80 split with TC mapping")
+
+
+def main() -> None:
+ """
+ Main entry point for running the test cases.
+ """
+ with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
+ cfg.devnl = DevlinkFamily()
+
+ cfg.pci = os.path.basename(
+ os.path.realpath(f"/sys/class/net/{cfg.ifname}/device")
+ )
+ if not cfg.pci:
+ raise KsftSkipEx("Could not get PCI address of the interface")
+
+ cfg.traffic_bw_validator = BandwidthValidator({"total": 1})
+ cfg.tc_bw_validator = BandwidthValidator({"tc3": 20, "tc4": 80})
+
+ cases = [test_no_tc_mapping_bandwidth, test_tc_mapping_bandwidth]
+
+ ksft_run(cases=cases, args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/devmem.py b/tools/testing/selftests/drivers/net/hw/devmem.py
index 7947650210a0..45c2d49d55b6 100755
--- a/tools/testing/selftests/drivers/net/hw/devmem.py
+++ b/tools/testing/selftests/drivers/net/hw/devmem.py
@@ -24,7 +24,7 @@ def check_rx(cfg) -> None:
require_devmem(cfg)
port = rand_port()
- socat = f"socat -u - TCP{cfg.addr_ipver}:{cfg.addr}:{port},bind={cfg.remote_addr}:{port}"
+ socat = f"socat -u - TCP{cfg.addr_ipver}:{cfg.baddr}:{port},bind={cfg.remote_baddr}:{port}"
listen_cmd = f"{cfg.bin_local} -l -f {cfg.ifname} -s {cfg.addr} -p {port} -c {cfg.remote_addr} -v 7"
with bkg(listen_cmd, exit_wait=True) as ncdevmem:
@@ -42,24 +42,23 @@ def check_tx(cfg) -> None:
port = rand_port()
listen_cmd = f"socat -U - TCP{cfg.addr_ipver}-LISTEN:{port}"
- with bkg(listen_cmd) as socat:
- wait_port_listen(port)
- cmd(f"echo -e \"hello\\nworld\"| {cfg.bin_remote} -f {cfg.ifname} -s {cfg.addr} -p {port}", host=cfg.remote, shell=True)
+ with bkg(listen_cmd, host=cfg.remote, exit_wait=True) as socat:
+ wait_port_listen(port, host=cfg.remote)
+ cmd(f"echo -e \"hello\\nworld\"| {cfg.bin_local} -f {cfg.ifname} -s {cfg.remote_addr} -p {port}", shell=True)
ksft_eq(socat.stdout.strip(), "hello\nworld")
@ksft_disruptive
def check_tx_chunks(cfg) -> None:
- cfg.require_ipver("6")
require_devmem(cfg)
port = rand_port()
- listen_cmd = f"socat -U - TCP6-LISTEN:{port}"
+ listen_cmd = f"socat -U - TCP{cfg.addr_ipver}-LISTEN:{port}"
- with bkg(listen_cmd, exit_wait=True) as socat:
- wait_port_listen(port)
- cmd(f"echo -e \"hello\\nworld\"| {cfg.bin_remote} -f {cfg.ifname} -s {cfg.addr_v['6']} -p {port} -z 3", host=cfg.remote, shell=True)
+ with bkg(listen_cmd, host=cfg.remote, exit_wait=True) as socat:
+ wait_port_listen(port, host=cfg.remote)
+ cmd(f"echo -e \"hello\\nworld\"| {cfg.bin_local} -f {cfg.ifname} -s {cfg.remote_addr} -p {port} -z 3", shell=True)
ksft_eq(socat.stdout.strip(), "hello\nworld")
diff --git a/tools/testing/selftests/drivers/net/hw/iou-zcrx.py b/tools/testing/selftests/drivers/net/hw/iou-zcrx.py
index 9c03fd777f3d..712c806508b5 100755
--- a/tools/testing/selftests/drivers/net/hw/iou-zcrx.py
+++ b/tools/testing/selftests/drivers/net/hw/iou-zcrx.py
@@ -3,37 +3,37 @@
import re
from os import path
-from lib.py import ksft_run, ksft_exit
+from lib.py import ksft_run, ksft_exit, KsftSkipEx
from lib.py import NetDrvEpEnv
from lib.py import bkg, cmd, defer, ethtool, rand_port, wait_port_listen
def _get_current_settings(cfg):
- output = ethtool(f"-g {cfg.ifname}", json=True, host=cfg.remote)[0]
+ output = ethtool(f"-g {cfg.ifname}", json=True)[0]
return (output['rx'], output['hds-thresh'])
def _get_combined_channels(cfg):
- output = ethtool(f"-l {cfg.ifname}", host=cfg.remote).stdout
+ output = ethtool(f"-l {cfg.ifname}").stdout
values = re.findall(r'Combined:\s+(\d+)', output)
return int(values[1])
def _create_rss_ctx(cfg, chan):
- output = ethtool(f"-X {cfg.ifname} context new start {chan} equal 1", host=cfg.remote).stdout
+ output = ethtool(f"-X {cfg.ifname} context new start {chan} equal 1").stdout
values = re.search(r'New RSS context is (\d+)', output).group(1)
ctx_id = int(values)
- return (ctx_id, defer(ethtool, f"-X {cfg.ifname} delete context {ctx_id}", host=cfg.remote))
+ return (ctx_id, defer(ethtool, f"-X {cfg.ifname} delete context {ctx_id}"))
def _set_flow_rule(cfg, port, chan):
- output = ethtool(f"-N {cfg.ifname} flow-type tcp6 dst-port {port} action {chan}", host=cfg.remote).stdout
+ output = ethtool(f"-N {cfg.ifname} flow-type tcp6 dst-port {port} action {chan}").stdout
values = re.search(r'ID (\d+)', output).group(1)
return int(values)
def _set_flow_rule_rss(cfg, port, ctx_id):
- output = ethtool(f"-N {cfg.ifname} flow-type tcp6 dst-port {port} context {ctx_id}", host=cfg.remote).stdout
+ output = ethtool(f"-N {cfg.ifname} flow-type tcp6 dst-port {port} context {ctx_id}").stdout
values = re.search(r'ID (\d+)', output).group(1)
return int(values)
@@ -47,26 +47,26 @@ def test_zcrx(cfg) -> None:
(rx_ring, hds_thresh) = _get_current_settings(cfg)
port = rand_port()
- ethtool(f"-G {cfg.ifname} tcp-data-split on", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} tcp-data-split auto", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} tcp-data-split on")
+ defer(ethtool, f"-G {cfg.ifname} tcp-data-split auto")
- ethtool(f"-G {cfg.ifname} hds-thresh 0", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} hds-thresh {hds_thresh}", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} hds-thresh 0")
+ defer(ethtool, f"-G {cfg.ifname} hds-thresh {hds_thresh}")
- ethtool(f"-G {cfg.ifname} rx 64", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} rx {rx_ring}", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} rx 64")
+ defer(ethtool, f"-G {cfg.ifname} rx {rx_ring}")
- ethtool(f"-X {cfg.ifname} equal {combined_chans - 1}", host=cfg.remote)
- defer(ethtool, f"-X {cfg.ifname} default", host=cfg.remote)
+ ethtool(f"-X {cfg.ifname} equal {combined_chans - 1}")
+ defer(ethtool, f"-X {cfg.ifname} default")
flow_rule_id = _set_flow_rule(cfg, port, combined_chans - 1)
- defer(ethtool, f"-N {cfg.ifname} delete {flow_rule_id}", host=cfg.remote)
+ defer(ethtool, f"-N {cfg.ifname} delete {flow_rule_id}")
- rx_cmd = f"{cfg.bin_remote} -s -p {port} -i {cfg.ifname} -q {combined_chans - 1}"
- tx_cmd = f"{cfg.bin_local} -c -h {cfg.remote_addr_v['6']} -p {port} -l 12840"
- with bkg(rx_cmd, host=cfg.remote, exit_wait=True):
- wait_port_listen(port, proto="tcp", host=cfg.remote)
- cmd(tx_cmd)
+ rx_cmd = f"{cfg.bin_local} -s -p {port} -i {cfg.ifname} -q {combined_chans - 1}"
+ tx_cmd = f"{cfg.bin_remote} -c -h {cfg.addr_v['6']} -p {port} -l 12840"
+ with bkg(rx_cmd, exit_wait=True):
+ wait_port_listen(port, proto="tcp")
+ cmd(tx_cmd, host=cfg.remote)
def test_zcrx_oneshot(cfg) -> None:
@@ -78,26 +78,26 @@ def test_zcrx_oneshot(cfg) -> None:
(rx_ring, hds_thresh) = _get_current_settings(cfg)
port = rand_port()
- ethtool(f"-G {cfg.ifname} tcp-data-split on", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} tcp-data-split auto", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} tcp-data-split on")
+ defer(ethtool, f"-G {cfg.ifname} tcp-data-split auto")
- ethtool(f"-G {cfg.ifname} hds-thresh 0", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} hds-thresh {hds_thresh}", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} hds-thresh 0")
+ defer(ethtool, f"-G {cfg.ifname} hds-thresh {hds_thresh}")
- ethtool(f"-G {cfg.ifname} rx 64", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} rx {rx_ring}", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} rx 64")
+ defer(ethtool, f"-G {cfg.ifname} rx {rx_ring}")
- ethtool(f"-X {cfg.ifname} equal {combined_chans - 1}", host=cfg.remote)
- defer(ethtool, f"-X {cfg.ifname} default", host=cfg.remote)
+ ethtool(f"-X {cfg.ifname} equal {combined_chans - 1}")
+ defer(ethtool, f"-X {cfg.ifname} default")
flow_rule_id = _set_flow_rule(cfg, port, combined_chans - 1)
- defer(ethtool, f"-N {cfg.ifname} delete {flow_rule_id}", host=cfg.remote)
+ defer(ethtool, f"-N {cfg.ifname} delete {flow_rule_id}")
- rx_cmd = f"{cfg.bin_remote} -s -p {port} -i {cfg.ifname} -q {combined_chans - 1} -o 4"
- tx_cmd = f"{cfg.bin_local} -c -h {cfg.remote_addr_v['6']} -p {port} -l 4096 -z 16384"
- with bkg(rx_cmd, host=cfg.remote, exit_wait=True):
- wait_port_listen(port, proto="tcp", host=cfg.remote)
- cmd(tx_cmd)
+ rx_cmd = f"{cfg.bin_local} -s -p {port} -i {cfg.ifname} -q {combined_chans - 1} -o 4"
+ tx_cmd = f"{cfg.bin_remote} -c -h {cfg.addr_v['6']} -p {port} -l 4096 -z 16384"
+ with bkg(rx_cmd, exit_wait=True):
+ wait_port_listen(port, proto="tcp")
+ cmd(tx_cmd, host=cfg.remote)
def test_zcrx_rss(cfg) -> None:
@@ -109,27 +109,27 @@ def test_zcrx_rss(cfg) -> None:
(rx_ring, hds_thresh) = _get_current_settings(cfg)
port = rand_port()
- ethtool(f"-G {cfg.ifname} tcp-data-split on", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} tcp-data-split auto", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} tcp-data-split on")
+ defer(ethtool, f"-G {cfg.ifname} tcp-data-split auto")
- ethtool(f"-G {cfg.ifname} hds-thresh 0", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} hds-thresh {hds_thresh}", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} hds-thresh 0")
+ defer(ethtool, f"-G {cfg.ifname} hds-thresh {hds_thresh}")
- ethtool(f"-G {cfg.ifname} rx 64", host=cfg.remote)
- defer(ethtool, f"-G {cfg.ifname} rx {rx_ring}", host=cfg.remote)
+ ethtool(f"-G {cfg.ifname} rx 64")
+ defer(ethtool, f"-G {cfg.ifname} rx {rx_ring}")
- ethtool(f"-X {cfg.ifname} equal {combined_chans - 1}", host=cfg.remote)
- defer(ethtool, f"-X {cfg.ifname} default", host=cfg.remote)
+ ethtool(f"-X {cfg.ifname} equal {combined_chans - 1}")
+ defer(ethtool, f"-X {cfg.ifname} default")
(ctx_id, delete_ctx) = _create_rss_ctx(cfg, combined_chans - 1)
flow_rule_id = _set_flow_rule_rss(cfg, port, ctx_id)
- defer(ethtool, f"-N {cfg.ifname} delete {flow_rule_id}", host=cfg.remote)
+ defer(ethtool, f"-N {cfg.ifname} delete {flow_rule_id}")
- rx_cmd = f"{cfg.bin_remote} -s -p {port} -i {cfg.ifname} -q {combined_chans - 1}"
- tx_cmd = f"{cfg.bin_local} -c -h {cfg.remote_addr_v['6']} -p {port} -l 12840"
- with bkg(rx_cmd, host=cfg.remote, exit_wait=True):
- wait_port_listen(port, proto="tcp", host=cfg.remote)
- cmd(tx_cmd)
+ rx_cmd = f"{cfg.bin_local} -s -p {port} -i {cfg.ifname} -q {combined_chans - 1}"
+ tx_cmd = f"{cfg.bin_remote} -c -h {cfg.addr_v['6']} -p {port} -l 12840"
+ with bkg(rx_cmd, exit_wait=True):
+ wait_port_listen(port, proto="tcp")
+ cmd(tx_cmd, host=cfg.remote)
def main() -> None:
diff --git a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
index b582885786f5..766bfc4ad842 100644
--- a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
@@ -1,5 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+"""
+Driver test environment (hardware-only tests).
+NetDrvEnv and NetDrvEpEnv are the main environment classes.
+Former is for local host only tests, latter creates / connects
+to a remote endpoint. See NIPA wiki for more information about
+running and writing driver tests.
+"""
+
import sys
from pathlib import Path
@@ -7,10 +15,38 @@ KSFT_DIR = (Path(__file__).parent / "../../../../..").resolve()
try:
sys.path.append(KSFT_DIR.as_posix())
- from net.lib.py import *
- from drivers.net.lib.py import *
+
+ # Import one by one to avoid pylint false positives
+ from net.lib.py import NetNS, NetNSEnter, NetdevSimDev
+ from net.lib.py import EthtoolFamily, NetdevFamily, NetshaperFamily, \
+ NlError, RtnlFamily, DevlinkFamily, PSPFamily
+ from net.lib.py import CmdExitFailure
+ from net.lib.py import bkg, cmd, bpftool, bpftrace, defer, ethtool, \
+ fd_read_timeout, ip, rand_port, wait_port_listen, wait_file
+ from net.lib.py import KsftSkipEx, KsftFailEx, KsftXfailEx
+ from net.lib.py import ksft_disruptive, ksft_exit, ksft_pr, ksft_run, \
+ ksft_setup, ksft_variants, KsftNamedVariant
+ from net.lib.py import ksft_eq, ksft_ge, ksft_in, ksft_is, ksft_lt, \
+ ksft_ne, ksft_not_in, ksft_raises, ksft_true, ksft_gt, ksft_not_none
+ from drivers.net.lib.py import GenerateTraffic, Remote, Iperf3Runner
+ from drivers.net.lib.py import NetDrvEnv, NetDrvEpEnv
+
+ __all__ = ["NetNS", "NetNSEnter", "NetdevSimDev",
+ "EthtoolFamily", "NetdevFamily", "NetshaperFamily",
+ "NlError", "RtnlFamily", "DevlinkFamily", "PSPFamily",
+ "CmdExitFailure",
+ "bkg", "cmd", "bpftool", "bpftrace", "defer", "ethtool",
+ "fd_read_timeout", "ip", "rand_port",
+ "wait_port_listen", "wait_file",
+ "KsftSkipEx", "KsftFailEx", "KsftXfailEx",
+ "ksft_disruptive", "ksft_exit", "ksft_pr", "ksft_run",
+ "ksft_setup", "ksft_variants", "KsftNamedVariant",
+ "ksft_eq", "ksft_ge", "ksft_in", "ksft_is", "ksft_lt",
+ "ksft_ne", "ksft_not_in", "ksft_raises", "ksft_true", "ksft_gt",
+ "ksft_not_none", "ksft_not_none",
+ "NetDrvEnv", "NetDrvEpEnv", "GenerateTraffic", "Remote",
+ "Iperf3Runner"]
except ModuleNotFoundError as e:
- ksft_pr("Failed importing `net` library from kernel sources")
- ksft_pr(str(e))
- ktap_result(True, comment="SKIP")
+ print("Failed importing `net` library from kernel sources")
+ print(str(e))
sys.exit(4)
diff --git a/tools/testing/selftests/drivers/net/hw/ncdevmem.c b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
index 02e4d3d7ded2..3288ed04ce08 100644
--- a/tools/testing/selftests/drivers/net/hw/ncdevmem.c
+++ b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
@@ -39,6 +39,7 @@
#define __EXPORTED_HEADERS__
#include <linux/uio.h>
+#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
@@ -97,6 +98,10 @@ static unsigned int dmabuf_id;
static uint32_t tx_dmabuf_id;
static int waittime_ms = 500;
+/* System state loaded by current_config_load() */
+#define MAX_FLOWS 8
+static int ntuple_ids[MAX_FLOWS] = { -1, -1, -1, -1, -1, -1, -1, -1, };
+
struct memory_buffer {
int fd;
size_t size;
@@ -115,6 +120,21 @@ struct memory_provider {
size_t off, int n);
};
+static void pr_err(const char *fmt, ...)
+{
+ va_list args;
+
+ fprintf(stderr, "%s: ", TEST_PREFIX);
+
+ va_start(args, fmt);
+ vfprintf(stderr, fmt, args);
+ va_end(args);
+
+ if (errno != 0)
+ fprintf(stderr, ": %s", strerror(errno));
+ fprintf(stderr, "\n");
+}
+
static struct memory_buffer *udmabuf_alloc(size_t size)
{
struct udmabuf_create create;
@@ -123,27 +143,33 @@ static struct memory_buffer *udmabuf_alloc(size_t size)
ctx = malloc(sizeof(*ctx));
if (!ctx)
- error(1, ENOMEM, "malloc failed");
+ return NULL;
ctx->size = size;
ctx->devfd = open("/dev/udmabuf", O_RDWR);
- if (ctx->devfd < 0)
- error(1, errno,
- "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
- TEST_PREFIX);
+ if (ctx->devfd < 0) {
+ pr_err("[skip,no-udmabuf: Unable to access DMA buffer device file]");
+ goto err_free_ctx;
+ }
ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
- if (ctx->memfd < 0)
- error(1, errno, "%s: [skip,no-memfd]\n", TEST_PREFIX);
+ if (ctx->memfd < 0) {
+ pr_err("[skip,no-memfd]");
+ goto err_close_dev;
+ }
ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
- if (ret < 0)
- error(1, errno, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
+ if (ret < 0) {
+ pr_err("[skip,fcntl-add-seals]");
+ goto err_close_memfd;
+ }
ret = ftruncate(ctx->memfd, size);
- if (ret == -1)
- error(1, errno, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
+ if (ret == -1) {
+ pr_err("[FAIL,memfd-truncate]");
+ goto err_close_memfd;
+ }
memset(&create, 0, sizeof(create));
@@ -151,15 +177,29 @@ static struct memory_buffer *udmabuf_alloc(size_t size)
create.offset = 0;
create.size = size;
ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create);
- if (ctx->fd < 0)
- error(1, errno, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
+ if (ctx->fd < 0) {
+ pr_err("[FAIL, create udmabuf]");
+ goto err_close_fd;
+ }
ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
ctx->fd, 0);
- if (ctx->buf_mem == MAP_FAILED)
- error(1, errno, "%s: [FAIL, map udmabuf]\n", TEST_PREFIX);
+ if (ctx->buf_mem == MAP_FAILED) {
+ pr_err("[FAIL, map udmabuf]");
+ goto err_close_fd;
+ }
return ctx;
+
+err_close_fd:
+ close(ctx->fd);
+err_close_memfd:
+ close(ctx->memfd);
+err_close_dev:
+ close(ctx->devfd);
+err_free_ctx:
+ free(ctx);
+ return NULL;
}
static void udmabuf_free(struct memory_buffer *ctx)
@@ -217,7 +257,7 @@ static void print_nonzero_bytes(void *ptr, size_t size)
putchar(p[i]);
}
-void validate_buffer(void *line, size_t size)
+int validate_buffer(void *line, size_t size)
{
static unsigned char seed = 1;
unsigned char *ptr = line;
@@ -232,8 +272,10 @@ void validate_buffer(void *line, size_t size)
"Failed validation: expected=%u, actual=%u, index=%lu\n",
expected, ptr[i], i);
errors++;
- if (errors > 20)
- error(1, 0, "validation failed.");
+ if (errors > 20) {
+ pr_err("validation failed");
+ return -1;
+ }
}
seed++;
if (seed == do_validation)
@@ -241,6 +283,86 @@ void validate_buffer(void *line, size_t size)
}
fprintf(stdout, "Validated buffer\n");
+ return 0;
+}
+
+static int
+__run_command(char *out, size_t outlen, const char *cmd, va_list args)
+{
+ char command[256];
+ FILE *fp;
+
+ vsnprintf(command, sizeof(command), cmd, args);
+
+ fprintf(stderr, "Running: %s\n", command);
+ fp = popen(command, "r");
+ if (!fp)
+ return -1;
+ if (out) {
+ size_t len;
+
+ if (!fgets(out, outlen, fp))
+ return -1;
+
+ /* Remove trailing newline if present */
+ len = strlen(out);
+ if (len && out[len - 1] == '\n')
+ out[len - 1] = '\0';
+ }
+ return pclose(fp);
+}
+
+static int run_command(const char *cmd, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, cmd);
+ ret = __run_command(NULL, 0, cmd, args);
+ va_end(args);
+
+ return ret;
+}
+
+static int ethtool_add_flow(const char *format, ...)
+{
+ char local_output[256], cmd[256];
+ const char *id_start;
+ int flow_idx, ret;
+ char *endptr;
+ long flow_id;
+ va_list args;
+
+ for (flow_idx = 0; flow_idx < MAX_FLOWS; flow_idx++)
+ if (ntuple_ids[flow_idx] == -1)
+ break;
+ if (flow_idx == MAX_FLOWS) {
+ fprintf(stderr, "Error: too many flows\n");
+ return -1;
+ }
+
+ snprintf(cmd, sizeof(cmd), "ethtool -N %s %s", ifname, format);
+
+ va_start(args, format);
+ ret = __run_command(local_output, sizeof(local_output), cmd, args);
+ va_end(args);
+
+ if (ret != 0)
+ return ret;
+
+ /* Extract the ID from the output */
+ id_start = strstr(local_output, "Added rule with ID ");
+ if (!id_start)
+ return -1;
+ id_start += strlen("Added rule with ID ");
+
+ flow_id = strtol(id_start, &endptr, 10);
+ if (endptr == id_start || flow_id < 0 || flow_id > INT_MAX)
+ return -1;
+
+ fprintf(stderr, "Added flow rule with ID %ld\n", flow_id);
+ ntuple_ids[flow_idx] = flow_id;
+ return flow_id;
}
static int rxq_num(int ifindex)
@@ -270,29 +392,17 @@ static int rxq_num(int ifindex)
return num;
}
-#define run_command(cmd, ...) \
- ({ \
- char command[256]; \
- memset(command, 0, sizeof(command)); \
- snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \
- fprintf(stderr, "Running: %s\n", command); \
- system(command); \
- })
-
-static int reset_flow_steering(void)
+static void reset_flow_steering(void)
{
- /* Depending on the NIC, toggling ntuple off and on might not
- * be allowed. Additionally, attempting to delete existing filters
- * will fail if no filters are present. Therefore, do not enforce
- * the exit status.
- */
-
- run_command("sudo ethtool -K %s ntuple off >&2", ifname);
- run_command("sudo ethtool -K %s ntuple on >&2", ifname);
- run_command(
- "sudo ethtool -n %s | grep 'Filter:' | awk '{print $2}' | xargs -n1 ethtool -N %s delete >&2",
- ifname, ifname);
- return 0;
+ int i;
+
+ for (i = 0; i < MAX_FLOWS; i++) {
+ if (ntuple_ids[i] == -1)
+ continue;
+ run_command("ethtool -N %s delete %d",
+ ifname, ntuple_ids[i]);
+ ntuple_ids[i] = -1;
+ }
}
static const char *tcp_data_split_str(int val)
@@ -309,7 +419,81 @@ static const char *tcp_data_split_str(int val)
}
}
-static int configure_headersplit(bool on)
+static struct ethtool_rings_get_rsp *get_ring_config(void)
+{
+ struct ethtool_rings_get_req *get_req;
+ struct ethtool_rings_get_rsp *get_rsp;
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+
+ ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return NULL;
+ }
+
+ get_req = ethtool_rings_get_req_alloc();
+ ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
+ get_rsp = ethtool_rings_get(ys, get_req);
+ ethtool_rings_get_req_free(get_req);
+
+ ynl_sock_destroy(ys);
+
+ return get_rsp;
+}
+
+static void restore_ring_config(const struct ethtool_rings_get_rsp *config)
+{
+ struct ethtool_rings_get_req *get_req;
+ struct ethtool_rings_get_rsp *get_rsp;
+ struct ethtool_rings_set_req *req;
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int ret;
+
+ if (!config)
+ return;
+
+ ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return;
+ }
+
+ req = ethtool_rings_set_req_alloc();
+ ethtool_rings_set_req_set_header_dev_index(req, ifindex);
+ ethtool_rings_set_req_set_tcp_data_split(req,
+ ETHTOOL_TCP_DATA_SPLIT_UNKNOWN);
+ if (config->_present.hds_thresh)
+ ethtool_rings_set_req_set_hds_thresh(req, config->hds_thresh);
+
+ ret = ethtool_rings_set(ys, req);
+ if (ret < 0)
+ fprintf(stderr, "YNL restoring HDS cfg: %s\n", ys->err.msg);
+
+ get_req = ethtool_rings_get_req_alloc();
+ ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
+ get_rsp = ethtool_rings_get(ys, get_req);
+ ethtool_rings_get_req_free(get_req);
+
+ /* use explicit value if UKNOWN didn't give us the previous */
+ if (get_rsp->tcp_data_split != config->tcp_data_split) {
+ ethtool_rings_set_req_set_tcp_data_split(req,
+ config->tcp_data_split);
+ ret = ethtool_rings_set(ys, req);
+ if (ret < 0)
+ fprintf(stderr, "YNL restoring expl HDS cfg: %s\n",
+ ys->err.msg);
+ }
+
+ ethtool_rings_get_rsp_free(get_rsp);
+ ethtool_rings_set_req_free(req);
+
+ ynl_sock_destroy(ys);
+}
+
+static int
+configure_headersplit(const struct ethtool_rings_get_rsp *old, bool on)
{
struct ethtool_rings_get_req *get_req;
struct ethtool_rings_get_rsp *get_rsp;
@@ -326,8 +510,15 @@ static int configure_headersplit(bool on)
req = ethtool_rings_set_req_alloc();
ethtool_rings_set_req_set_header_dev_index(req, ifindex);
- /* 0 - off, 1 - auto, 2 - on */
- ethtool_rings_set_req_set_tcp_data_split(req, on ? 2 : 0);
+ if (on) {
+ ethtool_rings_set_req_set_tcp_data_split(req,
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED);
+ if (old->_present.hds_thresh)
+ ethtool_rings_set_req_set_hds_thresh(req, 0);
+ } else {
+ ethtool_rings_set_req_set_tcp_data_split(req,
+ ETHTOOL_TCP_DATA_SPLIT_UNKNOWN);
+ }
ret = ethtool_rings_set(ys, req);
if (ret < 0)
fprintf(stderr, "YNL failed: %s\n", ys->err.msg);
@@ -351,12 +542,103 @@ static int configure_headersplit(bool on)
static int configure_rss(void)
{
- return run_command("sudo ethtool -X %s equal %d >&2", ifname, start_queue);
+ return run_command("ethtool -X %s equal %d >&2", ifname, start_queue);
}
-static int configure_channels(unsigned int rx, unsigned int tx)
+static void reset_rss(void)
{
- return run_command("sudo ethtool -L %s rx %u tx %u", ifname, rx, tx);
+ run_command("ethtool -X %s default >&2", ifname, start_queue);
+}
+
+static int check_changing_channels(unsigned int rx, unsigned int tx)
+{
+ struct ethtool_channels_get_req *gchan;
+ struct ethtool_channels_set_req *schan;
+ struct ethtool_channels_get_rsp *chan;
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int ret;
+
+ fprintf(stderr, "setting channel count rx:%u tx:%u\n", rx, tx);
+
+ ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return -1;
+ }
+
+ gchan = ethtool_channels_get_req_alloc();
+ if (!gchan) {
+ ret = -1;
+ goto exit_close_sock;
+ }
+
+ ethtool_channels_get_req_set_header_dev_index(gchan, ifindex);
+ chan = ethtool_channels_get(ys, gchan);
+ ethtool_channels_get_req_free(gchan);
+ if (!chan) {
+ fprintf(stderr, "YNL get channels: %s\n", ys->err.msg);
+ ret = -1;
+ goto exit_close_sock;
+ }
+
+ schan = ethtool_channels_set_req_alloc();
+ if (!schan) {
+ ret = -1;
+ goto exit_free_chan;
+ }
+
+ ethtool_channels_set_req_set_header_dev_index(schan, ifindex);
+
+ if (chan->_present.combined_count) {
+ if (chan->_present.rx_count || chan->_present.tx_count) {
+ ethtool_channels_set_req_set_rx_count(schan, 0);
+ ethtool_channels_set_req_set_tx_count(schan, 0);
+ }
+
+ if (rx == tx) {
+ ethtool_channels_set_req_set_combined_count(schan, rx);
+ } else if (rx > tx) {
+ ethtool_channels_set_req_set_combined_count(schan, tx);
+ ethtool_channels_set_req_set_rx_count(schan, rx - tx);
+ } else {
+ ethtool_channels_set_req_set_combined_count(schan, rx);
+ ethtool_channels_set_req_set_tx_count(schan, tx - rx);
+ }
+
+ } else if (chan->_present.rx_count) {
+ ethtool_channels_set_req_set_rx_count(schan, rx);
+ ethtool_channels_set_req_set_tx_count(schan, tx);
+ } else {
+ fprintf(stderr, "Error: device has neither combined nor rx channels\n");
+ ret = -1;
+ goto exit_free_schan;
+ }
+
+ ret = ethtool_channels_set(ys, schan);
+ if (ret) {
+ fprintf(stderr, "YNL set channels: %s\n", ys->err.msg);
+ } else {
+ /* We were expecting a failure, go back to previous settings */
+ ethtool_channels_set_req_set_combined_count(schan,
+ chan->combined_count);
+ ethtool_channels_set_req_set_rx_count(schan, chan->rx_count);
+ ethtool_channels_set_req_set_tx_count(schan, chan->tx_count);
+
+ ret = ethtool_channels_set(ys, schan);
+ if (ret)
+ fprintf(stderr, "YNL un-setting channels: %s\n",
+ ys->err.msg);
+ }
+
+exit_free_schan:
+ ethtool_channels_set_req_free(schan);
+exit_free_chan:
+ ethtool_channels_get_rsp_free(chan);
+exit_close_sock:
+ ynl_sock_destroy(ys);
+
+ return ret;
}
static int configure_flow_steering(struct sockaddr_in6 *server_sin)
@@ -364,6 +646,7 @@ static int configure_flow_steering(struct sockaddr_in6 *server_sin)
const char *type = "tcp6";
const char *server_addr;
char buf[40];
+ int flow_id;
inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf));
server_addr = buf;
@@ -374,23 +657,22 @@ static int configure_flow_steering(struct sockaddr_in6 *server_sin)
}
/* Try configure 5-tuple */
- if (run_command("sudo ethtool -N %s flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d >&2",
- ifname,
- type,
- client_ip ? "src-ip" : "",
- client_ip ?: "",
- server_addr,
- client_ip ? "src-port" : "",
- client_ip ? port : "",
- port, start_queue))
+ flow_id = ethtool_add_flow("flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d",
+ type,
+ client_ip ? "src-ip" : "",
+ client_ip ?: "",
+ server_addr,
+ client_ip ? "src-port" : "",
+ client_ip ? port : "",
+ port, start_queue);
+ if (flow_id < 0) {
/* If that fails, try configure 3-tuple */
- if (run_command("sudo ethtool -N %s flow-type %s dst-ip %s dst-port %s queue %d >&2",
- ifname,
- type,
- server_addr,
- port, start_queue))
+ flow_id = ethtool_add_flow("flow-type %s dst-ip %s dst-port %s queue %d",
+ type, server_addr, port, start_queue);
+ if (flow_id < 0)
/* If that fails, return error */
return -1;
+ }
return 0;
}
@@ -405,6 +687,7 @@ static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
*ys = ynl_sock_create(&ynl_netdev_family, &yerr);
if (!*ys) {
+ netdev_queue_id_free(queues);
fprintf(stderr, "YNL: %s\n", yerr.msg);
return -1;
}
@@ -483,18 +766,24 @@ err_close:
return -1;
}
-static void enable_reuseaddr(int fd)
+static int enable_reuseaddr(int fd)
{
int opt = 1;
int ret;
ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
- if (ret)
- error(1, errno, "%s: [FAIL, SO_REUSEPORT]\n", TEST_PREFIX);
+ if (ret) {
+ pr_err("SO_REUSEPORT failed");
+ return -1;
+ }
ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
- if (ret)
- error(1, errno, "%s: [FAIL, SO_REUSEADDR]\n", TEST_PREFIX);
+ if (ret) {
+ pr_err("SO_REUSEADDR failed");
+ return -1;
+ }
+
+ return 0;
}
static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
@@ -526,12 +815,10 @@ static struct netdev_queue_id *create_queues(void)
struct netdev_queue_id *queues;
size_t i = 0;
- queues = calloc(num_queues, sizeof(*queues));
+ queues = netdev_queue_id_alloc(num_queues);
for (i = 0; i < num_queues; i++) {
- queues[i]._present.type = 1;
- queues[i]._present.id = 1;
- queues[i].type = NETDEV_QUEUE_TYPE_RX;
- queues[i].id = start_queue + i;
+ netdev_queue_id_set_type(&queues[i], NETDEV_QUEUE_TYPE_RX);
+ netdev_queue_id_set_id(&queues[i], start_queue + i);
}
return queues;
@@ -539,6 +826,7 @@ static struct netdev_queue_id *create_queues(void)
static int do_server(struct memory_buffer *mem)
{
+ struct ethtool_rings_get_rsp *ring_config;
char ctrl_data[sizeof(int) * 20000];
size_t non_page_aligned_frags = 0;
struct sockaddr_in6 client_addr;
@@ -550,54 +838,72 @@ static int do_server(struct memory_buffer *mem)
char *tmp_mem = NULL;
struct ynl_sock *ys;
char iobuf[819200];
+ int ret, err = -1;
char buffer[256];
int socket_fd;
int client_fd;
- int ret;
ret = parse_address(server_ip, atoi(port), &server_sin);
- if (ret < 0)
- error(1, 0, "parse server address");
+ if (ret < 0) {
+ pr_err("parse server address");
+ return -1;
+ }
- if (reset_flow_steering())
- error(1, 0, "Failed to reset flow steering\n");
+ ring_config = get_ring_config();
+ if (!ring_config) {
+ pr_err("Failed to get current ring configuration");
+ return -1;
+ }
- if (configure_headersplit(1))
- error(1, 0, "Failed to enable TCP header split\n");
+ if (configure_headersplit(ring_config, 1)) {
+ pr_err("Failed to enable TCP header split");
+ goto err_free_ring_config;
+ }
/* Configure RSS to divert all traffic from our devmem queues */
- if (configure_rss())
- error(1, 0, "Failed to configure rss\n");
+ if (configure_rss()) {
+ pr_err("Failed to configure rss");
+ goto err_reset_headersplit;
+ }
/* Flow steer our devmem flows to start_queue */
- if (configure_flow_steering(&server_sin))
- error(1, 0, "Failed to configure flow steering\n");
-
- sleep(1);
+ if (configure_flow_steering(&server_sin)) {
+ pr_err("Failed to configure flow steering");
+ goto err_reset_rss;
+ }
- if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
- error(1, 0, "Failed to bind\n");
+ if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys)) {
+ pr_err("Failed to bind");
+ goto err_reset_flow_steering;
+ }
tmp_mem = malloc(mem->size);
if (!tmp_mem)
- error(1, ENOMEM, "malloc failed");
+ goto err_unbind;
socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (socket_fd < 0)
- error(1, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
+ if (socket_fd < 0) {
+ pr_err("Failed to create socket");
+ goto err_free_tmp;
+ }
- enable_reuseaddr(socket_fd);
+ if (enable_reuseaddr(socket_fd))
+ goto err_close_socket;
fprintf(stderr, "binding to address %s:%d\n", server_ip,
ntohs(server_sin.sin6_port));
ret = bind(socket_fd, &server_sin, sizeof(server_sin));
- if (ret)
- error(1, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
+ if (ret) {
+ pr_err("Failed to bind");
+ goto err_close_socket;
+ }
ret = listen(socket_fd, 1);
- if (ret)
- error(1, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
+ if (ret) {
+ pr_err("Failed to listen");
+ goto err_close_socket;
+ }
client_addr_len = sizeof(client_addr);
@@ -606,6 +912,10 @@ static int do_server(struct memory_buffer *mem)
fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
ntohs(server_sin.sin6_port));
client_fd = accept(socket_fd, &client_addr, &client_addr_len);
+ if (client_fd < 0) {
+ pr_err("Failed to accept");
+ goto err_close_socket;
+ }
inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer,
sizeof(buffer));
@@ -633,10 +943,15 @@ static int do_server(struct memory_buffer *mem)
continue;
if (ret < 0) {
perror("recvmsg");
+ if (errno == EFAULT) {
+ pr_err("received EFAULT, won't recover");
+ goto err_close_client;
+ }
continue;
}
if (ret == 0) {
- fprintf(stderr, "client exited\n");
+ errno = 0;
+ pr_err("client exited");
goto cleanup;
}
@@ -674,9 +989,10 @@ static int do_server(struct memory_buffer *mem)
dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token,
total_received, dmabuf_cmsg->dmabuf_id);
- if (dmabuf_cmsg->dmabuf_id != dmabuf_id)
- error(1, 0,
- "received on wrong dmabuf_id: flow steering error\n");
+ if (dmabuf_cmsg->dmabuf_id != dmabuf_id) {
+ pr_err("received on wrong dmabuf_id: flow steering error");
+ goto err_close_client;
+ }
if (dmabuf_cmsg->frag_size % getpagesize())
non_page_aligned_frags++;
@@ -687,22 +1003,27 @@ static int do_server(struct memory_buffer *mem)
dmabuf_cmsg->frag_offset,
dmabuf_cmsg->frag_size);
- if (do_validation)
- validate_buffer(tmp_mem,
- dmabuf_cmsg->frag_size);
- else
+ if (do_validation) {
+ if (validate_buffer(tmp_mem,
+ dmabuf_cmsg->frag_size))
+ goto err_close_client;
+ } else {
print_nonzero_bytes(tmp_mem,
dmabuf_cmsg->frag_size);
+ }
ret = setsockopt(client_fd, SOL_SOCKET,
SO_DEVMEM_DONTNEED, &token,
sizeof(token));
- if (ret != 1)
- error(1, 0,
- "SO_DEVMEM_DONTNEED not enough tokens");
+ if (ret != 1) {
+ pr_err("SO_DEVMEM_DONTNEED not enough tokens");
+ goto err_close_client;
+ }
+ }
+ if (!is_devmem) {
+ pr_err("flow steering error");
+ goto err_close_client;
}
- if (!is_devmem)
- error(1, 0, "flow steering error\n");
fprintf(stderr, "total_received=%lu\n", total_received);
}
@@ -713,54 +1034,121 @@ static int do_server(struct memory_buffer *mem)
page_aligned_frags, non_page_aligned_frags);
cleanup:
+ err = 0;
- free(tmp_mem);
+err_close_client:
close(client_fd);
+err_close_socket:
close(socket_fd);
+err_free_tmp:
+ free(tmp_mem);
+err_unbind:
ynl_sock_destroy(ys);
-
- return 0;
+err_reset_flow_steering:
+ reset_flow_steering();
+err_reset_rss:
+ reset_rss();
+err_reset_headersplit:
+ restore_ring_config(ring_config);
+err_free_ring_config:
+ ethtool_rings_get_rsp_free(ring_config);
+ return err;
}
-void run_devmem_tests(void)
+int run_devmem_tests(void)
{
+ struct ethtool_rings_get_rsp *ring_config;
+ struct netdev_queue_id *queues;
struct memory_buffer *mem;
struct ynl_sock *ys;
+ int err = -1;
mem = provider->alloc(getpagesize() * NUM_PAGES);
+ if (!mem) {
+ pr_err("Failed to allocate memory buffer");
+ return -1;
+ }
+
+ ring_config = get_ring_config();
+ if (!ring_config) {
+ pr_err("Failed to get current ring configuration");
+ goto err_free_mem;
+ }
/* Configure RSS to divert all traffic from our devmem queues */
- if (configure_rss())
- error(1, 0, "rss error\n");
+ if (configure_rss()) {
+ pr_err("rss error");
+ goto err_free_ring_config;
+ }
- if (configure_headersplit(1))
- error(1, 0, "Failed to configure header split\n");
+ if (configure_headersplit(ring_config, 1)) {
+ pr_err("Failed to configure header split");
+ goto err_reset_rss;
+ }
- if (!bind_rx_queue(ifindex, mem->fd,
- calloc(num_queues, sizeof(struct netdev_queue_id)),
- num_queues, &ys))
- error(1, 0, "Binding empty queues array should have failed\n");
+ queues = netdev_queue_id_alloc(num_queues);
+ if (!queues) {
+ pr_err("Failed to allocate empty queues array");
+ goto err_reset_headersplit;
+ }
- if (configure_headersplit(0))
- error(1, 0, "Failed to configure header split\n");
+ if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
+ pr_err("Binding empty queues array should have failed");
+ goto err_unbind;
+ }
- if (!bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
- error(1, 0, "Configure dmabuf with header split off should have failed\n");
+ if (configure_headersplit(ring_config, 0)) {
+ pr_err("Failed to configure header split");
+ goto err_reset_headersplit;
+ }
- if (configure_headersplit(1))
- error(1, 0, "Failed to configure header split\n");
+ queues = create_queues();
+ if (!queues) {
+ pr_err("Failed to create queues");
+ goto err_reset_headersplit;
+ }
- if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
- error(1, 0, "Failed to bind\n");
+ if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
+ pr_err("Configure dmabuf with header split off should have failed");
+ goto err_unbind;
+ }
+
+ if (configure_headersplit(ring_config, 1)) {
+ pr_err("Failed to configure header split");
+ goto err_reset_headersplit;
+ }
+
+ queues = create_queues();
+ if (!queues) {
+ pr_err("Failed to create queues");
+ goto err_reset_headersplit;
+ }
+
+ if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
+ pr_err("Failed to bind");
+ goto err_reset_headersplit;
+ }
/* Deactivating a bound queue should not be legal */
- if (!configure_channels(num_queues, num_queues - 1))
- error(1, 0, "Deactivating a bound queue should be illegal.\n");
+ if (!check_changing_channels(num_queues, num_queues)) {
+ pr_err("Deactivating a bound queue should be illegal");
+ goto err_unbind;
+ }
- /* Closing the netlink socket does an implicit unbind */
- ynl_sock_destroy(ys);
+ err = 0;
+ goto err_unbind;
+err_unbind:
+ ynl_sock_destroy(ys);
+err_reset_headersplit:
+ restore_ring_config(ring_config);
+err_reset_rss:
+ reset_rss();
+err_free_ring_config:
+ ethtool_rings_get_rsp_free(ring_config);
+err_free_mem:
provider->free(mem);
+ return err;
}
static uint64_t gettimeofday_ms(void)
@@ -780,13 +1168,15 @@ static int do_poll(int fd)
pfd.fd = fd;
ret = poll(&pfd, 1, waittime_ms);
- if (ret == -1)
- error(1, errno, "poll");
+ if (ret == -1) {
+ pr_err("poll");
+ return -1;
+ }
return ret && (pfd.revents & POLLERR);
}
-static void wait_compl(int fd)
+static int wait_compl(int fd)
{
int64_t tstop = gettimeofday_ms() + waittime_ms;
char control[CMSG_SPACE(100)] = {};
@@ -800,18 +1190,23 @@ static void wait_compl(int fd)
msg.msg_controllen = sizeof(control);
while (gettimeofday_ms() < tstop) {
- if (!do_poll(fd))
+ ret = do_poll(fd);
+ if (ret < 0)
+ return ret;
+ if (!ret)
continue;
ret = recvmsg(fd, &msg, MSG_ERRQUEUE);
if (ret < 0) {
if (errno == EAGAIN)
continue;
- error(1, errno, "recvmsg(MSG_ERRQUEUE)");
- return;
+ pr_err("recvmsg(MSG_ERRQUEUE)");
+ return -1;
+ }
+ if (msg.msg_flags & MSG_CTRUNC) {
+ pr_err("MSG_CTRUNC");
+ return -1;
}
- if (msg.msg_flags & MSG_CTRUNC)
- error(1, 0, "MSG_CTRUNC\n");
for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
if (cm->cmsg_level != SOL_IP &&
@@ -825,20 +1220,25 @@ static void wait_compl(int fd)
continue;
serr = (void *)CMSG_DATA(cm);
- if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY)
- error(1, 0, "wrong origin %u", serr->ee_origin);
- if (serr->ee_errno != 0)
- error(1, 0, "wrong errno %d", serr->ee_errno);
+ if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) {
+ pr_err("wrong origin %u", serr->ee_origin);
+ return -1;
+ }
+ if (serr->ee_errno != 0) {
+ pr_err("wrong errno %d", serr->ee_errno);
+ return -1;
+ }
hi = serr->ee_data;
lo = serr->ee_info;
fprintf(stderr, "tx complete [%d,%d]\n", lo, hi);
- return;
+ return 0;
}
}
- error(1, 0, "did not receive tx completion");
+ pr_err("did not receive tx completion");
+ return -1;
}
static int do_client(struct memory_buffer *mem)
@@ -852,51 +1252,69 @@ static int do_client(struct memory_buffer *mem)
ssize_t line_size = 0;
struct cmsghdr *cmsg;
char *line = NULL;
- unsigned long mid;
+ int ret, err = -1;
size_t len = 0;
int socket_fd;
__u32 ddmabuf;
int opt = 1;
- int ret;
ret = parse_address(server_ip, atoi(port), &server_sin);
- if (ret < 0)
- error(1, 0, "parse server address");
+ if (ret < 0) {
+ pr_err("parse server address");
+ return -1;
+ }
+
+ if (client_ip) {
+ ret = parse_address(client_ip, atoi(port), &client_sin);
+ if (ret < 0) {
+ pr_err("parse client address");
+ return ret;
+ }
+ }
socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (socket_fd < 0)
- error(1, socket_fd, "create socket");
+ if (socket_fd < 0) {
+ pr_err("create socket");
+ return -1;
+ }
- enable_reuseaddr(socket_fd);
+ if (enable_reuseaddr(socket_fd))
+ goto err_close_socket;
ret = setsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE, ifname,
strlen(ifname) + 1);
- if (ret)
- error(1, errno, "bindtodevice");
+ if (ret) {
+ pr_err("bindtodevice");
+ goto err_close_socket;
+ }
- if (bind_tx_queue(ifindex, mem->fd, &ys))
- error(1, 0, "Failed to bind\n");
+ if (bind_tx_queue(ifindex, mem->fd, &ys)) {
+ pr_err("Failed to bind");
+ goto err_close_socket;
+ }
if (client_ip) {
- ret = parse_address(client_ip, atoi(port), &client_sin);
- if (ret < 0)
- error(1, 0, "parse client address");
-
ret = bind(socket_fd, &client_sin, sizeof(client_sin));
- if (ret)
- error(1, errno, "bind");
+ if (ret) {
+ pr_err("bind");
+ goto err_unbind;
+ }
}
ret = setsockopt(socket_fd, SOL_SOCKET, SO_ZEROCOPY, &opt, sizeof(opt));
- if (ret)
- error(1, errno, "set sock opt");
+ if (ret) {
+ pr_err("set sock opt");
+ goto err_unbind;
+ }
fprintf(stderr, "Connect to %s %d (via %s)\n", server_ip,
ntohs(server_sin.sin6_port), ifname);
ret = connect(socket_fd, &server_sin, sizeof(server_sin));
- if (ret)
- error(1, errno, "connect");
+ if (ret) {
+ pr_err("connect");
+ goto err_unbind;
+ }
while (1) {
free(line);
@@ -909,10 +1327,11 @@ static int do_client(struct memory_buffer *mem)
if (max_chunk) {
msg.msg_iovlen =
(line_size + max_chunk - 1) / max_chunk;
- if (msg.msg_iovlen > MAX_IOV)
- error(1, 0,
- "can't partition %zd bytes into maximum of %d chunks",
- line_size, MAX_IOV);
+ if (msg.msg_iovlen > MAX_IOV) {
+ pr_err("can't partition %zd bytes into maximum of %d chunks",
+ line_size, MAX_IOV);
+ goto err_free_line;
+ }
for (int i = 0; i < msg.msg_iovlen; i++) {
iov[i].iov_base = (void *)(i * max_chunk);
@@ -943,34 +1362,40 @@ static int do_client(struct memory_buffer *mem)
*((__u32 *)CMSG_DATA(cmsg)) = ddmabuf;
ret = sendmsg(socket_fd, &msg, MSG_ZEROCOPY);
- if (ret < 0)
- error(1, errno, "Failed sendmsg");
+ if (ret < 0) {
+ pr_err("Failed sendmsg");
+ goto err_free_line;
+ }
fprintf(stderr, "sendmsg_ret=%d\n", ret);
- if (ret != line_size)
- error(1, errno, "Did not send all bytes %d vs %zd", ret,
- line_size);
+ if (ret != line_size) {
+ pr_err("Did not send all bytes %d vs %zd", ret, line_size);
+ goto err_free_line;
+ }
- wait_compl(socket_fd);
+ if (wait_compl(socket_fd))
+ goto err_free_line;
}
fprintf(stderr, "%s: tx ok\n", TEST_PREFIX);
+ err = 0;
+
+err_free_line:
free(line);
+err_unbind:
+ ynl_sock_destroy(ys);
+err_close_socket:
close(socket_fd);
-
- if (ys)
- ynl_sock_destroy(ys);
-
- return 0;
+ return err;
}
int main(int argc, char *argv[])
{
struct memory_buffer *mem;
int is_server = 0, opt;
- int ret;
+ int ret, err = 1;
while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:z:")) != -1) {
switch (opt) {
@@ -1007,8 +1432,10 @@ int main(int argc, char *argv[])
}
}
- if (!ifname)
- error(1, 0, "Missing -f argument\n");
+ if (!ifname) {
+ pr_err("Missing -f argument");
+ return 1;
+ }
ifindex = if_nametoindex(ifname);
@@ -1017,33 +1444,41 @@ int main(int argc, char *argv[])
if (!server_ip && !client_ip) {
if (start_queue < 0 && num_queues < 0) {
num_queues = rxq_num(ifindex);
- if (num_queues < 0)
- error(1, 0, "couldn't detect number of queues\n");
- if (num_queues < 2)
- error(1, 0,
- "number of device queues is too low\n");
+ if (num_queues < 0) {
+ pr_err("couldn't detect number of queues");
+ return 1;
+ }
+ if (num_queues < 2) {
+ pr_err("number of device queues is too low");
+ return 1;
+ }
/* make sure can bind to multiple queues */
start_queue = num_queues / 2;
num_queues /= 2;
}
- if (start_queue < 0 || num_queues < 0)
- error(1, 0, "Both -t and -q are required\n");
+ if (start_queue < 0 || num_queues < 0) {
+ pr_err("Both -t and -q are required");
+ return 1;
+ }
- run_devmem_tests();
- return 0;
+ return run_devmem_tests();
}
if (start_queue < 0 && num_queues < 0) {
num_queues = rxq_num(ifindex);
- if (num_queues < 2)
- error(1, 0, "number of device queues is too low\n");
+ if (num_queues < 2) {
+ pr_err("number of device queues is too low");
+ return 1;
+ }
num_queues = 1;
start_queue = rxq_num(ifindex) - num_queues;
- if (start_queue < 0)
- error(1, 0, "couldn't detect number of queues\n");
+ if (start_queue < 0) {
+ pr_err("couldn't detect number of queues");
+ return 1;
+ }
fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues);
}
@@ -1051,21 +1486,39 @@ int main(int argc, char *argv[])
for (; optind < argc; optind++)
fprintf(stderr, "extra arguments: %s\n", argv[optind]);
- if (start_queue < 0)
- error(1, 0, "Missing -t argument\n");
+ if (start_queue < 0) {
+ pr_err("Missing -t argument");
+ return 1;
+ }
- if (num_queues < 0)
- error(1, 0, "Missing -q argument\n");
+ if (num_queues < 0) {
+ pr_err("Missing -q argument");
+ return 1;
+ }
- if (!server_ip)
- error(1, 0, "Missing -s argument\n");
+ if (!server_ip) {
+ pr_err("Missing -s argument");
+ return 1;
+ }
- if (!port)
- error(1, 0, "Missing -p argument\n");
+ if (!port) {
+ pr_err("Missing -p argument");
+ return 1;
+ }
mem = provider->alloc(getpagesize() * NUM_PAGES);
+ if (!mem) {
+ pr_err("Failed to allocate memory buffer");
+ return 1;
+ }
+
ret = is_server ? do_server(mem) : do_client(mem);
- provider->free(mem);
+ if (ret)
+ goto err_free_mem;
- return ret;
+ err = 0;
+
+err_free_mem:
+ provider->free(mem);
+ return err;
}
diff --git a/tools/testing/selftests/drivers/net/hw/nic_timestamp.py b/tools/testing/selftests/drivers/net/hw/nic_timestamp.py
new file mode 100755
index 000000000000..c1e943d53f19
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/nic_timestamp.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Tests related to configuration of HW timestamping
+"""
+
+import errno
+from lib.py import ksft_run, ksft_exit, ksft_ge, ksft_eq, KsftSkipEx
+from lib.py import NetDrvEnv, EthtoolFamily, NlError
+
+
+def __get_hwtimestamp_support(cfg):
+ """ Retrieve supported configuration information """
+
+ try:
+ tsinfo = cfg.ethnl.tsinfo_get({'header': {'dev-name': cfg.ifname}})
+ except NlError as e:
+ if e.error == errno.EOPNOTSUPP:
+ raise KsftSkipEx("timestamping configuration is not supported") from e
+ raise
+
+ ctx = {}
+ tx = tsinfo.get('tx-types', {})
+ rx = tsinfo.get('rx-filters', {})
+
+ bits = tx.get('bits', {})
+ ctx['tx'] = bits.get('bit', [])
+ bits = rx.get('bits', {})
+ ctx['rx'] = bits.get('bit', [])
+ return ctx
+
+
+def __get_hwtimestamp_config(cfg):
+ """ Retrieve current TS configuration information """
+
+ try:
+ tscfg = cfg.ethnl.tsconfig_get({'header': {'dev-name': cfg.ifname}})
+ except NlError as e:
+ if e.error == errno.EOPNOTSUPP:
+ raise KsftSkipEx("timestamping configuration is not supported via netlink") from e
+ raise
+ return tscfg
+
+
+def __set_hwtimestamp_config(cfg, ts):
+ """ Setup new TS configuration information """
+
+ ts['header'] = {'dev-name': cfg.ifname}
+ try:
+ res = cfg.ethnl.tsconfig_set(ts)
+ except NlError as e:
+ if e.error == errno.EOPNOTSUPP:
+ raise KsftSkipEx("timestamping configuration is not supported via netlink") from e
+ raise
+ return res
+
+
+def test_hwtstamp_tx(cfg):
+ """
+ Test TX timestamp configuration.
+ The driver should apply provided config and report back proper state.
+ """
+
+ orig_tscfg = __get_hwtimestamp_config(cfg)
+ ts = __get_hwtimestamp_support(cfg)
+ tx = ts['tx']
+ for t in tx:
+ tscfg = orig_tscfg
+ tscfg['tx-types']['bits']['bit'] = [t]
+ res = __set_hwtimestamp_config(cfg, tscfg)
+ if res is None:
+ res = __get_hwtimestamp_config(cfg)
+ ksft_eq(res['tx-types']['bits']['bit'], [t])
+ __set_hwtimestamp_config(cfg, orig_tscfg)
+
+
+def test_hwtstamp_rx(cfg):
+ """
+ Test RX timestamp configuration.
+ The filter configuration is taken from the list of supported filters.
+ The driver should apply the config without error and report back proper state.
+ Some extension of the timestamping scope is allowed for PTP filters.
+ """
+
+ orig_tscfg = __get_hwtimestamp_config(cfg)
+ ts = __get_hwtimestamp_support(cfg)
+ rx = ts['rx']
+ for r in rx:
+ tscfg = orig_tscfg
+ tscfg['rx-filters']['bits']['bit'] = [r]
+ res = __set_hwtimestamp_config(cfg, tscfg)
+ if res is None:
+ res = __get_hwtimestamp_config(cfg)
+ if r['index'] == 0 or r['index'] == 1:
+ ksft_eq(res['rx-filters']['bits']['bit'][0]['index'], r['index'])
+ else:
+ # the driver can fallback to some value which has higher coverage for timestamping
+ ksft_ge(res['rx-filters']['bits']['bit'][0]['index'], r['index'])
+ __set_hwtimestamp_config(cfg, orig_tscfg)
+
+
+def main() -> None:
+ """ Ksft boiler plate main """
+
+ with NetDrvEnv(__file__, nsim_test=False) as cfg:
+ cfg.ethnl = EthtoolFamily()
+ ksft_run([test_hwtstamp_tx, test_hwtstamp_rx], args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/pp_alloc_fail.py b/tools/testing/selftests/drivers/net/hw/pp_alloc_fail.py
index ad192fef3117..2a51b60df8a1 100755
--- a/tools/testing/selftests/drivers/net/hw/pp_alloc_fail.py
+++ b/tools/testing/selftests/drivers/net/hw/pp_alloc_fail.py
@@ -1,8 +1,13 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
+"""
+Test driver resilience vs page pool allocation failures.
+"""
+
import errno
import time
+import math
import os
from lib.py import ksft_run, ksft_exit, ksft_pr
from lib.py import KsftSkipEx, KsftFailEx
@@ -13,7 +18,8 @@ from lib.py import cmd, tool, GenerateTraffic
def _write_fail_config(config):
for key, value in config.items():
- with open("/sys/kernel/debug/fail_function/" + key, "w") as fp:
+ path = "/sys/kernel/debug/fail_function/"
+ with open(path + key, "w", encoding='ascii') as fp:
fp.write(str(value) + "\n")
@@ -22,8 +28,7 @@ def _enable_pp_allocation_fail():
raise KsftSkipEx("Kernel built without function error injection (or DebugFS)")
if not os.path.exists("/sys/kernel/debug/fail_function/page_pool_alloc_netmems"):
- with open("/sys/kernel/debug/fail_function/inject", "w") as fp:
- fp.write("page_pool_alloc_netmems\n")
+ _write_fail_config({"inject": "page_pool_alloc_netmems"})
_write_fail_config({
"verbose": 0,
@@ -38,8 +43,7 @@ def _disable_pp_allocation_fail():
return
if os.path.exists("/sys/kernel/debug/fail_function/page_pool_alloc_netmems"):
- with open("/sys/kernel/debug/fail_function/inject", "w") as fp:
- fp.write("\n")
+ _write_fail_config({"inject": ""})
_write_fail_config({
"probability": 0,
@@ -48,6 +52,10 @@ def _disable_pp_allocation_fail():
def test_pp_alloc(cfg, netdevnl):
+ """
+ Configure page pool allocation fail injection while traffic is running.
+ """
+
def get_stats():
return netdevnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
@@ -55,7 +63,7 @@ def test_pp_alloc(cfg, netdevnl):
stat1 = get_stats()
time.sleep(1)
stat2 = get_stats()
- if stat2['rx-packets'] - stat1['rx-packets'] < 15000:
+ if stat2['rx-packets'] - stat1['rx-packets'] < 4000:
raise KsftFailEx("Traffic seems low:", stat2['rx-packets'] - stat1['rx-packets'])
@@ -82,11 +90,16 @@ def test_pp_alloc(cfg, netdevnl):
time.sleep(3)
s2 = get_stats()
- if s2['rx-alloc-fail'] - s1['rx-alloc-fail'] < 1:
+ seen_fails = s2['rx-alloc-fail'] - s1['rx-alloc-fail']
+ if seen_fails < 1:
raise KsftSkipEx("Allocation failures not increasing")
- if s2['rx-alloc-fail'] - s1['rx-alloc-fail'] < 100:
- raise KsftSkipEx("Allocation increasing too slowly", s2['rx-alloc-fail'] - s1['rx-alloc-fail'],
- "packets:", s2['rx-packets'] - s1['rx-packets'])
+ pkts = s2['rx-packets'] - s1['rx-packets']
+ # Expecting one failure per 512 buffers, 3.1x safety margin
+ want_fails = math.floor(pkts / 512 / 3.1)
+ if seen_fails < want_fails:
+ raise KsftSkipEx("Allocation increasing too slowly", seen_fails,
+ "packets:", pkts)
+ ksft_pr(f"Seen: pkts:{pkts} fails:{seen_fails} (pass thrs:{want_fails})")
# Basic failures are fine, try to wobble some settings to catch extra failures
check_traffic_flowing()
@@ -105,7 +118,7 @@ def test_pp_alloc(cfg, netdevnl):
else:
ksft_pr("ethtool -G change retval: did not succeed", new_g)
else:
- ksft_pr("ethtool -G change retval: did not try")
+ ksft_pr("ethtool -G change retval: did not try")
time.sleep(0.1)
check_traffic_flowing()
@@ -119,6 +132,7 @@ def test_pp_alloc(cfg, netdevnl):
def main() -> None:
+ """ Ksft boiler plate main """
netdevnl = NetdevFamily()
with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
diff --git a/tools/testing/selftests/drivers/net/hw/rss_api.py b/tools/testing/selftests/drivers/net/hw/rss_api.py
new file mode 100755
index 000000000000..19847f3d4a00
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/rss_api.py
@@ -0,0 +1,476 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+API level tests for RSS (mostly Netlink vs IOCTL).
+"""
+
+import errno
+import glob
+import random
+from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_is, ksft_ne, ksft_raises
+from lib.py import KsftSkipEx, KsftFailEx
+from lib.py import defer, ethtool, CmdExitFailure
+from lib.py import EthtoolFamily, NlError
+from lib.py import NetDrvEnv
+
+
+def _require_2qs(cfg):
+ qcnt = len(glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*"))
+ if qcnt < 2:
+ raise KsftSkipEx(f"Local has only {qcnt} queues")
+ return qcnt
+
+
+def _ethtool_create(cfg, act, opts):
+ output = ethtool(f"{act} {cfg.ifname} {opts}").stdout
+ # Output will be something like: "New RSS context is 1" or
+ # "Added rule with ID 7", we want the integer from the end
+ return int(output.split()[-1])
+
+
+def _ethtool_get_cfg(cfg, fl_type, to_nl=False):
+ descr = ethtool(f"-n {cfg.ifname} rx-flow-hash {fl_type}").stdout
+
+ if to_nl:
+ converter = {
+ "IP SA": "ip-src",
+ "IP DA": "ip-dst",
+ "L4 bytes 0 & 1 [TCP/UDP src port]": "l4-b-0-1",
+ "L4 bytes 2 & 3 [TCP/UDP dst port]": "l4-b-2-3",
+ }
+
+ ret = set()
+ else:
+ converter = {
+ "IP SA": "s",
+ "IP DA": "d",
+ "L3 proto": "t",
+ "L4 bytes 0 & 1 [TCP/UDP src port]": "f",
+ "L4 bytes 2 & 3 [TCP/UDP dst port]": "n",
+ }
+
+ ret = ""
+
+ for line in descr.split("\n")[1:-2]:
+ # if this raises we probably need to add more keys to converter above
+ if to_nl:
+ ret.add(converter[line])
+ else:
+ ret += converter[line]
+ return ret
+
+
+def test_rxfh_nl_set_fail(cfg):
+ """
+ Test error path of Netlink SET.
+ """
+ _require_2qs(cfg)
+
+ ethnl = EthtoolFamily()
+ ethnl.ntf_subscribe("monitor")
+
+ with ksft_raises(NlError):
+ ethnl.rss_set({"header": {"dev-name": "lo"},
+ "indir": None})
+
+ with ksft_raises(NlError):
+ ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "indir": [100000]})
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ ksft_is(ntf, None)
+
+
+def test_rxfh_nl_set_indir(cfg):
+ """
+ Test setting indirection table via Netlink.
+ """
+ qcnt = _require_2qs(cfg)
+
+ # Test some SETs with a value
+ reset = defer(cfg.ethnl.rss_set,
+ {"header": {"dev-index": cfg.ifindex}, "indir": None})
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "indir": [1]})
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(set(rss.get("indir", [-1])), {1})
+
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "indir": [0, 1]})
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(set(rss.get("indir", [-1])), {0, 1})
+
+ # Make sure we can't set the queue count below max queue used
+ with ksft_raises(CmdExitFailure):
+ ethtool(f"-L {cfg.ifname} combined 0 rx 1")
+ with ksft_raises(CmdExitFailure):
+ ethtool(f"-L {cfg.ifname} combined 1 rx 0")
+
+ # Test reset back to default
+ reset.exec()
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(set(rss.get("indir", [-1])), set(range(qcnt)))
+
+
+def test_rxfh_nl_set_indir_ctx(cfg):
+ """
+ Test setting indirection table for a custom context via Netlink.
+ """
+ _require_2qs(cfg)
+
+ # Get setting for ctx 0, we'll make sure they don't get clobbered
+ dflt = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+
+ # Create context
+ ctx_id = _ethtool_create(cfg, "-X", "context new")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "context": ctx_id, "indir": [1]})
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex},
+ "context": ctx_id})
+ ksft_eq(set(rss.get("indir", [-1])), {1})
+
+ ctx0 = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(ctx0, dflt)
+
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "context": ctx_id, "indir": [0, 1]})
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex},
+ "context": ctx_id})
+ ksft_eq(set(rss.get("indir", [-1])), {0, 1})
+
+ ctx0 = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(ctx0, dflt)
+
+ # Make sure we can't set the queue count below max queue used
+ with ksft_raises(CmdExitFailure):
+ ethtool(f"-L {cfg.ifname} combined 0 rx 1")
+ with ksft_raises(CmdExitFailure):
+ ethtool(f"-L {cfg.ifname} combined 1 rx 0")
+
+
+def test_rxfh_indir_ntf(cfg):
+ """
+ Check that Netlink notifications are generated when RSS indirection
+ table was modified.
+ """
+ _require_2qs(cfg)
+
+ ethnl = EthtoolFamily()
+ ethnl.ntf_subscribe("monitor")
+
+ ethtool(f"--disable-netlink -X {cfg.ifname} weight 0 1")
+ reset = defer(ethtool, f"-X {cfg.ifname} default")
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("No notification received")
+ ksft_eq(ntf["name"], "rss-ntf")
+ ksft_eq(set(ntf["msg"]["indir"]), {1})
+
+ reset.exec()
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("No notification received after reset")
+ ksft_eq(ntf["name"], "rss-ntf")
+ ksft_is(ntf["msg"].get("context"), None)
+ ksft_ne(set(ntf["msg"]["indir"]), {1})
+
+
+def test_rxfh_indir_ctx_ntf(cfg):
+ """
+ Check that Netlink notifications are generated when RSS indirection
+ table was modified on an additional RSS context.
+ """
+ _require_2qs(cfg)
+
+ ctx_id = _ethtool_create(cfg, "-X", "context new")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+
+ ethnl = EthtoolFamily()
+ ethnl.ntf_subscribe("monitor")
+
+ ethtool(f"--disable-netlink -X {cfg.ifname} context {ctx_id} weight 0 1")
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("No notification received")
+ ksft_eq(ntf["name"], "rss-ntf")
+ ksft_eq(ntf["msg"].get("context"), ctx_id)
+ ksft_eq(set(ntf["msg"]["indir"]), {1})
+
+
+def test_rxfh_nl_set_key(cfg):
+ """
+ Test setting hashing key via Netlink.
+ """
+
+ dflt = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ defer(cfg.ethnl.rss_set,
+ {"header": {"dev-index": cfg.ifindex},
+ "hkey": dflt["hkey"], "indir": None})
+
+ # Empty key should error out
+ with ksft_raises(NlError) as cm:
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "hkey": None})
+ ksft_eq(cm.exception.nl_msg.extack['bad-attr'], '.hkey')
+
+ # Set key to random
+ mod = random.randbytes(len(dflt["hkey"]))
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "hkey": mod})
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(rss.get("hkey", [-1]), mod)
+
+ # Set key to random and indir tbl to something at once
+ mod = random.randbytes(len(dflt["hkey"]))
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "indir": [0, 1], "hkey": mod})
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(rss.get("hkey", [-1]), mod)
+ ksft_eq(set(rss.get("indir", [-1])), {0, 1})
+
+
+def test_rxfh_fields(cfg):
+ """
+ Test reading Rx Flow Hash over Netlink.
+ """
+
+ flow_types = ["tcp4", "tcp6", "udp4", "udp6"]
+ ethnl = EthtoolFamily()
+
+ cfg_nl = ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ for fl_type in flow_types:
+ one = _ethtool_get_cfg(cfg, fl_type, to_nl=True)
+ ksft_eq(one, cfg_nl["flow-hash"][fl_type],
+ comment="Config for " + fl_type)
+
+
+def test_rxfh_fields_set(cfg):
+ """ Test configuring Rx Flow Hash over Netlink. """
+
+ flow_types = ["tcp4", "tcp6", "udp4", "udp6"]
+
+ # Collect current settings
+ cfg_old = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ # symmetric hashing is config-order-sensitive make sure we leave
+ # symmetric mode, or make the flow-hash sym-compatible first
+ changes = [{"flow-hash": cfg_old["flow-hash"],},
+ {"input-xfrm": cfg_old.get("input-xfrm", {}),}]
+ if cfg_old.get("input-xfrm"):
+ changes = list(reversed(changes))
+ for old in changes:
+ defer(cfg.ethnl.rss_set, {"header": {"dev-index": cfg.ifindex},} | old)
+
+ # symmetric hashing prevents some of the configs below
+ if cfg_old.get("input-xfrm"):
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "input-xfrm": {}})
+
+ for fl_type in flow_types:
+ cur = _ethtool_get_cfg(cfg, fl_type)
+ if cur == "sdfn":
+ change_nl = {"ip-src", "ip-dst"}
+ change_ic = "sd"
+ else:
+ change_nl = {"l4-b-0-1", "l4-b-2-3", "ip-src", "ip-dst"}
+ change_ic = "sdfn"
+
+ cfg.ethnl.rss_set({
+ "header": {"dev-index": cfg.ifindex},
+ "flow-hash": {fl_type: change_nl}
+ })
+ reset = defer(ethtool, f"--disable-netlink -N {cfg.ifname} "
+ f"rx-flow-hash {fl_type} {cur}")
+
+ cfg_nl = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(change_nl, cfg_nl["flow-hash"][fl_type],
+ comment=f"Config for {fl_type} over Netlink")
+ cfg_ic = _ethtool_get_cfg(cfg, fl_type)
+ ksft_eq(change_ic, cfg_ic,
+ comment=f"Config for {fl_type} over IOCTL")
+
+ reset.exec()
+ cfg_nl = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ ksft_eq(cfg_old["flow-hash"][fl_type], cfg_nl["flow-hash"][fl_type],
+ comment=f"Un-config for {fl_type} over Netlink")
+ cfg_ic = _ethtool_get_cfg(cfg, fl_type)
+ ksft_eq(cur, cfg_ic, comment=f"Un-config for {fl_type} over IOCTL")
+
+ # Try to set multiple at once, the defer was already installed at the start
+ change = {"ip-src"}
+ if change == cfg_old["flow-hash"]["tcp4"]:
+ change = {"ip-dst"}
+ cfg.ethnl.rss_set({
+ "header": {"dev-index": cfg.ifindex},
+ "flow-hash": {x: change for x in flow_types}
+ })
+
+ cfg_nl = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ for fl_type in flow_types:
+ ksft_eq(change, cfg_nl["flow-hash"][fl_type],
+ comment=f"multi-config for {fl_type} over Netlink")
+
+
+def test_rxfh_fields_set_xfrm(cfg):
+ """ Test changing Rx Flow Hash vs xfrm_input at once. """
+
+ def set_rss(cfg, xfrm, fh):
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "input-xfrm": xfrm, "flow-hash": fh})
+
+ # Install the reset handler
+ cfg_old = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ # symmetric hashing is config-order-sensitive make sure we leave
+ # symmetric mode, or make the flow-hash sym-compatible first
+ changes = [{"flow-hash": cfg_old["flow-hash"],},
+ {"input-xfrm": cfg_old.get("input-xfrm", {}),}]
+ if cfg_old.get("input-xfrm"):
+ changes = list(reversed(changes))
+ for old in changes:
+ defer(cfg.ethnl.rss_set, {"header": {"dev-index": cfg.ifindex},} | old)
+
+ # Make sure we start with input-xfrm off, and tcp4 config non-sym
+ set_rss(cfg, {}, {})
+ set_rss(cfg, {}, {"tcp4": {"ip-src"}})
+
+ # Setting sym and fixing tcp4 config not expected to pass right now
+ with ksft_raises(NlError):
+ set_rss(cfg, {"sym-xor"}, {"tcp4": {"ip-src", "ip-dst"}})
+ # One at a time should work, hopefully
+ set_rss(cfg, 0, {"tcp4": {"ip-src", "ip-dst"}})
+ no_support = False
+ try:
+ set_rss(cfg, {"sym-xor"}, {})
+ except NlError:
+ try:
+ set_rss(cfg, {"sym-or-xor"}, {})
+ except NlError:
+ no_support = True
+ if no_support:
+ raise KsftSkipEx("no input-xfrm supported")
+ # Disabling two at once should not work either without kernel changes
+ with ksft_raises(NlError):
+ set_rss(cfg, {}, {"tcp4": {"ip-src"}})
+
+
+def test_rxfh_fields_ntf(cfg):
+ """ Test Rx Flow Hash notifications. """
+
+ cur = _ethtool_get_cfg(cfg, "tcp4")
+ if cur == "sdfn":
+ change = {"ip-src", "ip-dst"}
+ else:
+ change = {"l4-b-0-1", "l4-b-2-3", "ip-src", "ip-dst"}
+
+ ethnl = EthtoolFamily()
+ ethnl.ntf_subscribe("monitor")
+
+ ethnl.rss_set({
+ "header": {"dev-index": cfg.ifindex},
+ "flow-hash": {"tcp4": change}
+ })
+ reset = defer(ethtool,
+ f"--disable-netlink -N {cfg.ifname} rx-flow-hash tcp4 {cur}")
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("No notification received after IOCTL change")
+ ksft_eq(ntf["name"], "rss-ntf")
+ ksft_eq(ntf["msg"]["flow-hash"]["tcp4"], change)
+ ksft_eq(next(ethnl.poll_ntf(duration=0.01), None), None)
+
+ reset.exec()
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("No notification received after Netlink change")
+ ksft_eq(ntf["name"], "rss-ntf")
+ ksft_ne(ntf["msg"]["flow-hash"]["tcp4"], change)
+ ksft_eq(next(ethnl.poll_ntf(duration=0.01), None), None)
+
+
+def test_rss_ctx_add(cfg):
+ """ Test creating an additional RSS context via Netlink """
+
+ _require_2qs(cfg)
+
+ # Test basic creation
+ ctx = cfg.ethnl.rss_create_act({"header": {"dev-index": cfg.ifindex}})
+ d = defer(ethtool, f"-X {cfg.ifname} context {ctx.get('context')} delete")
+ ksft_ne(ctx.get("context", 0), 0)
+ ksft_ne(set(ctx.get("indir", [0])), {0},
+ comment="Driver should init the indirection table")
+
+ # Try requesting the ID we just got allocated
+ with ksft_raises(NlError) as cm:
+ ctx = cfg.ethnl.rss_create_act({
+ "header": {"dev-index": cfg.ifindex},
+ "context": ctx.get("context"),
+ })
+ ethtool(f"-X {cfg.ifname} context {ctx.get('context')} delete")
+ d.exec()
+ ksft_eq(cm.exception.nl_msg.error, -errno.EBUSY)
+
+ # Test creating with a specified RSS table, and context ID
+ ctx_id = ctx.get("context")
+ ctx = cfg.ethnl.rss_create_act({
+ "header": {"dev-index": cfg.ifindex},
+ "context": ctx_id,
+ "indir": [1],
+ })
+ ethtool(f"-X {cfg.ifname} context {ctx.get('context')} delete")
+ ksft_eq(ctx.get("context"), ctx_id)
+ ksft_eq(set(ctx.get("indir", [0])), {1})
+
+
+def test_rss_ctx_ntf(cfg):
+ """ Test notifications for creating additional RSS contexts """
+
+ ethnl = EthtoolFamily()
+ ethnl.ntf_subscribe("monitor")
+
+ # Create / delete via Netlink
+ ctx = cfg.ethnl.rss_create_act({"header": {"dev-index": cfg.ifindex}})
+ cfg.ethnl.rss_delete_act({
+ "header": {"dev-index": cfg.ifindex},
+ "context": ctx["context"],
+ })
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("[NL] No notification after context creation")
+ ksft_eq(ntf["name"], "rss-create-ntf")
+ ksft_eq(ctx, ntf["msg"])
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("[NL] No notification after context deletion")
+ ksft_eq(ntf["name"], "rss-delete-ntf")
+
+ # Create / deleve via IOCTL
+ ctx_id = _ethtool_create(cfg, "--disable-netlink -X", "context new")
+ ethtool(f"--disable-netlink -X {cfg.ifname} context {ctx_id} delete")
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("[IOCTL] No notification after context creation")
+ ksft_eq(ntf["name"], "rss-create-ntf")
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("[IOCTL] No notification after context deletion")
+ ksft_eq(ntf["name"], "rss-delete-ntf")
+
+
+def main() -> None:
+ """ Ksft boiler plate main """
+
+ with NetDrvEnv(__file__, nsim_test=False) as cfg:
+ cfg.ethnl = EthtoolFamily()
+ ksft_run(globs=globals(), case_pfx={"test_"}, args=(cfg, ))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
index ca60ae325c22..ed7e405682f0 100755
--- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py
+++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
@@ -118,7 +118,7 @@ def test_rss_key_indir(cfg):
qcnt = len(_get_rx_cnts(cfg))
if qcnt < 3:
- KsftSkipEx("Device has fewer than 3 queues (or doesn't support queue stats)")
+ raise KsftSkipEx("Device has fewer than 3 queues (or doesn't support queue stats)")
data = get_rss(cfg)
want_keys = ['rss-hash-key', 'rss-hash-function', 'rss-indirection-table']
@@ -178,8 +178,13 @@ def test_rss_key_indir(cfg):
cnts = _get_rx_cnts(cfg)
GenerateTraffic(cfg).wait_pkts_and_stop(20000)
cnts = _get_rx_cnts(cfg, prev=cnts)
- # First two queues get less traffic than all the rest
- ksft_lt(sum(cnts[:2]), sum(cnts[2:]), "traffic distributed: " + str(cnts))
+ if qcnt > 4:
+ # First two queues get less traffic than all the rest
+ ksft_lt(sum(cnts[:2]), sum(cnts[2:]),
+ "traffic distributed: " + str(cnts))
+ else:
+ # When queue count is low make sure third queue got significant pkts
+ ksft_ge(cnts[2], 3500, "traffic distributed: " + str(cnts))
def test_rss_queue_reconfigure(cfg, main_ctx=True):
@@ -335,19 +340,20 @@ def test_hitless_key_update(cfg):
data = get_rss(cfg)
key_len = len(data['rss-hash-key'])
- key = _rss_key_rand(key_len)
+ ethnl = EthtoolFamily()
+ key = random.randbytes(key_len)
tgen = GenerateTraffic(cfg)
try:
errors0, carrier0 = get_drop_err_sum(cfg)
t0 = datetime.datetime.now()
- ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key))
+ ethnl.rss_set({"header": {"dev-index": cfg.ifindex}, "hkey": key})
t1 = datetime.datetime.now()
errors1, carrier1 = get_drop_err_sum(cfg)
finally:
tgen.wait_pkts_and_stop(5000)
- ksft_lt((t1 - t0).total_seconds(), 0.2)
+ ksft_lt((t1 - t0).total_seconds(), 0.15)
ksft_eq(errors1 - errors1, 0)
ksft_eq(carrier1 - carrier0, 0)
@@ -747,6 +753,62 @@ def test_rss_ntuple_addition(cfg):
'noise' : (0,) })
+def test_rss_default_context_rule(cfg):
+ """
+ Allocate a port, direct this port to context 0, then create a new RSS
+ context and steer all TCP traffic to it (context 1). Verify that:
+ * Traffic to the specific port continues to use queues of the main
+ context (0/1).
+ * Traffic to any other TCP port is redirected to the new context
+ (queues 2/3).
+ """
+
+ require_ntuple(cfg)
+
+ queue_cnt = len(_get_rx_cnts(cfg))
+ if queue_cnt < 4:
+ try:
+ ksft_pr(f"Increasing queue count {queue_cnt} -> 4")
+ ethtool(f"-L {cfg.ifname} combined 4")
+ defer(ethtool, f"-L {cfg.ifname} combined {queue_cnt}")
+ except Exception as exc:
+ raise KsftSkipEx("Not enough queues for the test") from exc
+
+ # Use queues 0 and 1 for the main context
+ ethtool(f"-X {cfg.ifname} equal 2")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ # Create a new RSS context that uses queues 2 and 3
+ ctx_id = ethtool_create(cfg, "-X", "context new start 2 equal 2")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+
+ # Generic low-priority rule: redirect all TCP traffic to the new context.
+ # Give it an explicit higher location number (lower priority).
+ flow_generic = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} context {ctx_id} loc 1"
+ ethtool(f"-N {cfg.ifname} {flow_generic}")
+ defer(ethtool, f"-N {cfg.ifname} delete 1")
+
+ # Specific high-priority rule for a random port that should stay on context 0.
+ # Assign loc 0 so it is evaluated before the generic rule.
+ port_main = rand_port()
+ flow_main = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port_main} context 0 loc 0"
+ ethtool(f"-N {cfg.ifname} {flow_main}")
+ defer(ethtool, f"-N {cfg.ifname} delete 0")
+
+ _ntuple_rule_check(cfg, 1, ctx_id)
+
+ # Verify that traffic matching the specific rule still goes to queues 0/1
+ _send_traffic_check(cfg, port_main, "context 0",
+ { 'target': (0, 1),
+ 'empty' : (2, 3) })
+
+ # And that traffic for any other port is steered to the new context
+ port_other = rand_port()
+ _send_traffic_check(cfg, port_other, f"context {ctx_id}",
+ { 'target': (2, 3),
+ 'noise' : (0, 1) })
+
+
def main() -> None:
with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
cfg.context_cnt = None
@@ -760,7 +822,8 @@ def main() -> None:
test_rss_context_overlap, test_rss_context_overlap2,
test_rss_context_out_of_order, test_rss_context4_create_with_cfg,
test_flow_add_context_missing,
- test_delete_rss_context_busy, test_rss_ntuple_addition],
+ test_delete_rss_context_busy, test_rss_ntuple_addition,
+ test_rss_default_context_rule],
args=(cfg, ))
ksft_exit()
diff --git a/tools/testing/selftests/drivers/net/hw/rss_flow_label.py b/tools/testing/selftests/drivers/net/hw/rss_flow_label.py
new file mode 100755
index 000000000000..6fa95fe27c47
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/rss_flow_label.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Tests for RSS hashing on IPv6 Flow Label.
+"""
+
+import glob
+import os
+import socket
+from lib.py import CmdExitFailure
+from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_ge, ksft_in, \
+ ksft_not_in, ksft_raises, KsftSkipEx
+from lib.py import bkg, cmd, defer, fd_read_timeout, rand_port
+from lib.py import NetDrvEpEnv
+
+
+def _check_system(cfg):
+ if not hasattr(socket, "SO_INCOMING_CPU"):
+ raise KsftSkipEx("socket.SO_INCOMING_CPU was added in Python 3.11")
+
+ qcnt = len(glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*"))
+ if qcnt < 2:
+ raise KsftSkipEx(f"Local has only {qcnt} queues")
+
+ for f in [f"/sys/class/net/{cfg.ifname}/queues/rx-0/rps_flow_cnt",
+ f"/sys/class/net/{cfg.ifname}/queues/rx-0/rps_cpus"]:
+ try:
+ with open(f, 'r') as fp:
+ setting = fp.read().strip()
+ # CPU mask will be zeros and commas
+ if setting.replace("0", "").replace(",", ""):
+ raise KsftSkipEx(f"RPS/RFS is configured: {f}: {setting}")
+ except FileNotFoundError:
+ pass
+
+ # 1 is the default, if someone changed it we probably shouldn"t mess with it
+ af = cmd("cat /proc/sys/net/ipv6/auto_flowlabels", host=cfg.remote).stdout
+ if af.strip() != "1":
+ raise KsftSkipEx("Remote does not have auto_flowlabels enabled")
+
+
+def _ethtool_get_cfg(cfg, fl_type):
+ descr = cmd(f"ethtool -n {cfg.ifname} rx-flow-hash {fl_type}").stdout
+
+ converter = {
+ "IP SA": "s",
+ "IP DA": "d",
+ "L3 proto": "t",
+ "L4 bytes 0 & 1 [TCP/UDP src port]": "f",
+ "L4 bytes 2 & 3 [TCP/UDP dst port]": "n",
+ "IPv6 Flow Label": "l",
+ }
+
+ ret = ""
+ for line in descr.split("\n")[1:-2]:
+ # if this raises we probably need to add more keys to converter above
+ ret += converter[line]
+ return ret
+
+
+def _traffic(cfg, one_sock, one_cpu):
+ local_port = rand_port(socket.SOCK_DGRAM)
+ remote_port = rand_port(socket.SOCK_DGRAM)
+
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
+ sock.bind(("", local_port))
+ sock.connect((cfg.remote_addr_v["6"], 0))
+ if one_sock:
+ send = f"exec 5<>/dev/udp/{cfg.addr_v['6']}/{local_port}; " \
+ "for i in `seq 20`; do echo a >&5; sleep 0.02; done; exec 5>&-"
+ else:
+ send = "for i in `seq 20`; do echo a | socat -t0.02 - UDP6:" \
+ f"[{cfg.addr_v['6']}]:{local_port},sourceport={remote_port}; done"
+
+ cpus = set()
+ with bkg(send, shell=True, host=cfg.remote, exit_wait=True):
+ for _ in range(20):
+ fd_read_timeout(sock.fileno(), 1)
+ cpu = sock.getsockopt(socket.SOL_SOCKET, socket.SO_INCOMING_CPU)
+ cpus.add(cpu)
+
+ if one_cpu:
+ ksft_eq(len(cpus), 1,
+ f"{one_sock=} - expected one CPU, got traffic on: {cpus=}")
+ else:
+ ksft_ge(len(cpus), 2,
+ f"{one_sock=} - expected many CPUs, got traffic on: {cpus=}")
+
+
+def test_rss_flow_label(cfg):
+ """
+ Test hashing on IPv6 flow label. Send traffic over a single socket
+ and over multiple sockets. Depend on the remote having auto-label
+ enabled so that it randomizes the label per socket.
+ """
+
+ cfg.require_ipver("6")
+ cfg.require_cmd("socat", remote=True)
+ _check_system(cfg)
+
+ # Enable flow label hashing for UDP6
+ initial = _ethtool_get_cfg(cfg, "udp6")
+ no_lbl = initial.replace("l", "")
+ if "l" not in initial:
+ try:
+ cmd(f"ethtool -N {cfg.ifname} rx-flow-hash udp6 l{no_lbl}")
+ except CmdExitFailure as exc:
+ raise KsftSkipEx("Device doesn't support Flow Label for UDP6") from exc
+
+ defer(cmd, f"ethtool -N {cfg.ifname} rx-flow-hash udp6 {initial}")
+
+ _traffic(cfg, one_sock=True, one_cpu=True)
+ _traffic(cfg, one_sock=False, one_cpu=False)
+
+ # Disable it, we should see no hashing (reset was already defer()ed)
+ cmd(f"ethtool -N {cfg.ifname} rx-flow-hash udp6 {no_lbl}")
+
+ _traffic(cfg, one_sock=False, one_cpu=True)
+
+
+def _check_v4_flow_types(cfg):
+ for fl_type in ["tcp4", "udp4", "ah4", "esp4", "sctp4"]:
+ try:
+ cur = cmd(f"ethtool -n {cfg.ifname} rx-flow-hash {fl_type}").stdout
+ ksft_not_in("Flow Label", cur,
+ comment=f"{fl_type=} has Flow Label:" + cur)
+ except CmdExitFailure:
+ # Probably does not support this flow type
+ pass
+
+
+def test_rss_flow_label_6only(cfg):
+ """
+ Test interactions with IPv4 flow types. It should not be possible to set
+ IPv6 Flow Label hashing for an IPv4 flow type. The Flow Label should also
+ not appear in the IPv4 "current config".
+ """
+
+ with ksft_raises(CmdExitFailure) as cm:
+ cmd(f"ethtool -N {cfg.ifname} rx-flow-hash tcp4 sdfnl")
+ ksft_in("Invalid argument", cm.exception.cmd.stderr)
+
+ _check_v4_flow_types(cfg)
+
+ # Try to enable Flow Labels and check again, in case it leaks thru
+ initial = _ethtool_get_cfg(cfg, "udp6")
+ changed = initial.replace("l", "") if "l" in initial else initial + "l"
+
+ cmd(f"ethtool -N {cfg.ifname} rx-flow-hash udp6 {changed}")
+ restore = defer(cmd, f"ethtool -N {cfg.ifname} rx-flow-hash udp6 {initial}")
+
+ _check_v4_flow_types(cfg)
+ restore.exec()
+ _check_v4_flow_types(cfg)
+
+
+def main() -> None:
+ with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
+ ksft_run([test_rss_flow_label,
+ test_rss_flow_label_6only],
+ args=(cfg, ))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/rss_input_xfrm.py b/tools/testing/selftests/drivers/net/hw/rss_input_xfrm.py
index f439c434ba36..72880e388478 100755
--- a/tools/testing/selftests/drivers/net/hw/rss_input_xfrm.py
+++ b/tools/testing/selftests/drivers/net/hw/rss_input_xfrm.py
@@ -32,16 +32,16 @@ def test_rss_input_xfrm(cfg, ipver):
if multiprocessing.cpu_count() < 2:
raise KsftSkipEx("Need at least two CPUs to test symmetric RSS hash")
- cfg.require_cmd("socat", remote=True)
+ cfg.require_cmd("socat", local=False, remote=True)
if not hasattr(socket, "SO_INCOMING_CPU"):
raise KsftSkipEx("socket.SO_INCOMING_CPU was added in Python 3.11")
- input_xfrm = cfg.ethnl.rss_get(
- {'header': {'dev-name': cfg.ifname}}).get('input_xfrm')
+ rss = cfg.ethnl.rss_get({'header': {'dev-name': cfg.ifname}})
+ input_xfrm = set(filter(lambda x: 'sym' in x, rss.get('input-xfrm', {})))
# Check for symmetric xor/or-xor
- if not input_xfrm or (input_xfrm != 1 and input_xfrm != 2):
+ if not input_xfrm:
raise KsftSkipEx("Symmetric RSS hash not requested")
cpus = set()
diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/drivers/net/hw/toeplitz.c
index 9ba03164d73a..d23b3b0c20a3 100644
--- a/tools/testing/selftests/net/toeplitz.c
+++ b/tools/testing/selftests/drivers/net/hw/toeplitz.c
@@ -52,7 +52,11 @@
#include <sys/types.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include <ynl.h>
+#include "ethtool-user.h"
+
+#include "kselftest.h"
+#include "../../../net/lib/ksft.h"
#define TOEPLITZ_KEY_MIN_LEN 40
#define TOEPLITZ_KEY_MAX_LEN 60
@@ -64,6 +68,7 @@
#define FOUR_TUPLE_MAX_LEN ((sizeof(struct in6_addr) * 2) + (sizeof(uint16_t) * 2))
#define RSS_MAX_CPUS (1 << 16) /* real constraint is PACKET_FANOUT_MAX */
+#define RSS_MAX_INDIR (1 << 16)
#define RPS_MAX_CPUS 16UL /* must be a power of 2 */
@@ -101,6 +106,8 @@ struct ring_state {
static unsigned int rx_irq_cpus[RSS_MAX_CPUS]; /* map from rxq to cpu */
static int rps_silo_to_cpu[RPS_MAX_CPUS];
static unsigned char toeplitz_key[TOEPLITZ_KEY_MAX_LEN];
+static unsigned int rss_indir_tbl[RSS_MAX_INDIR];
+static unsigned int rss_indir_tbl_size;
static struct ring_state rings[RSS_MAX_CPUS];
static inline uint32_t toeplitz(const unsigned char *four_tuple,
@@ -129,7 +136,12 @@ static inline uint32_t toeplitz(const unsigned char *four_tuple,
/* Compare computed cpu with arrival cpu from packet_fanout_cpu */
static void verify_rss(uint32_t rx_hash, int cpu)
{
- int queue = rx_hash % cfg_num_queues;
+ int queue;
+
+ if (rss_indir_tbl_size)
+ queue = rss_indir_tbl[rx_hash % rss_indir_tbl_size];
+ else
+ queue = rx_hash % cfg_num_queues;
log_verbose(" rxq %d (cpu %d)", queue, rx_irq_cpus[queue]);
if (rx_irq_cpus[queue] != cpu) {
@@ -482,6 +494,56 @@ static void parse_rps_bitmap(const char *arg)
rps_silo_to_cpu[cfg_num_rps_cpus++] = i;
}
+static void read_rss_dev_info_ynl(void)
+{
+ struct ethtool_rss_get_req *req;
+ struct ethtool_rss_get_rsp *rsp;
+ struct ynl_sock *ys;
+
+ ys = ynl_sock_create(&ynl_ethtool_family, NULL);
+ if (!ys)
+ error(1, errno, "ynl_sock_create failed");
+
+ req = ethtool_rss_get_req_alloc();
+ if (!req)
+ error(1, errno, "ethtool_rss_get_req_alloc failed");
+
+ ethtool_rss_get_req_set_header_dev_name(req, cfg_ifname);
+
+ rsp = ethtool_rss_get(ys, req);
+ if (!rsp)
+ error(1, ys->err.code, "YNL: %s", ys->err.msg);
+
+ if (!rsp->_len.hkey)
+ error(1, 0, "RSS key not available for %s", cfg_ifname);
+
+ if (rsp->_len.hkey < TOEPLITZ_KEY_MIN_LEN ||
+ rsp->_len.hkey > TOEPLITZ_KEY_MAX_LEN)
+ error(1, 0, "RSS key length %u out of bounds [%u, %u]",
+ rsp->_len.hkey, TOEPLITZ_KEY_MIN_LEN,
+ TOEPLITZ_KEY_MAX_LEN);
+
+ memcpy(toeplitz_key, rsp->hkey, rsp->_len.hkey);
+
+ if (rsp->_count.indir > RSS_MAX_INDIR)
+ error(1, 0, "RSS indirection table too large (%u > %u)",
+ rsp->_count.indir, RSS_MAX_INDIR);
+
+ /* If indir table not available we'll fallback to simple modulo math */
+ if (rsp->_count.indir) {
+ memcpy(rss_indir_tbl, rsp->indir,
+ rsp->_count.indir * sizeof(rss_indir_tbl[0]));
+ rss_indir_tbl_size = rsp->_count.indir;
+
+ log_verbose("RSS indirection table size: %u\n",
+ rss_indir_tbl_size);
+ }
+
+ ethtool_rss_get_rsp_free(rsp);
+ ethtool_rss_get_req_free(req);
+ ynl_sock_destroy(ys);
+}
+
static void parse_opts(int argc, char **argv)
{
static struct option long_options[] = {
@@ -550,7 +612,7 @@ static void parse_opts(int argc, char **argv)
}
if (!have_toeplitz)
- error(1, 0, "Must supply rss key ('-k')");
+ read_rss_dev_info_ynl();
num_cpus = get_nprocs();
if (num_cpus > RSS_MAX_CPUS)
@@ -576,6 +638,10 @@ int main(int argc, char **argv)
fd_sink = setup_sink();
setup_rings();
+
+ /* Signal to test framework that we're ready to receive */
+ ksft_ready();
+
process_rings();
cleanup_rings();
diff --git a/tools/testing/selftests/drivers/net/hw/toeplitz.py b/tools/testing/selftests/drivers/net/hw/toeplitz.py
new file mode 100755
index 000000000000..d2db5ee9e358
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/toeplitz.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Toeplitz Rx hashing test:
+ - rxhash (the hash value calculation itself);
+ - RSS mapping from rxhash to rx queue;
+ - RPS mapping from rxhash to cpu.
+"""
+
+import glob
+import os
+import socket
+from lib.py import ksft_run, ksft_exit, ksft_pr
+from lib.py import NetDrvEpEnv, EthtoolFamily, NetdevFamily
+from lib.py import cmd, bkg, rand_port, defer
+from lib.py import ksft_in
+from lib.py import ksft_variants, KsftNamedVariant, KsftSkipEx, KsftFailEx
+
+# "define" for the ID of the Toeplitz hash function
+ETH_RSS_HASH_TOP = 1
+
+
+def _check_rps_and_rfs_not_configured(cfg):
+ """Verify that RPS is not already configured."""
+
+ for rps_file in glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*/rps_cpus"):
+ with open(rps_file, "r", encoding="utf-8") as fp:
+ val = fp.read().strip()
+ if set(val) - {"0", ","}:
+ raise KsftSkipEx(f"RPS already configured on {rps_file}: {val}")
+
+ rfs_file = "/proc/sys/net/core/rps_sock_flow_entries"
+ with open(rfs_file, "r", encoding="utf-8") as fp:
+ val = fp.read().strip()
+ if val != "0":
+ raise KsftSkipEx(f"RFS already configured {rfs_file}: {val}")
+
+
+def _get_cpu_for_irq(irq):
+ with open(f"/proc/irq/{irq}/smp_affinity_list", "r",
+ encoding="utf-8") as fp:
+ data = fp.read().strip()
+ if "," in data or "-" in data:
+ raise KsftFailEx(f"IRQ{irq} is not mapped to a single core: {data}")
+ return int(data)
+
+
+def _get_irq_cpus(cfg):
+ """
+ Read the list of IRQs for the device Rx queues.
+ """
+ queues = cfg.netnl.queue_get({"ifindex": cfg.ifindex}, dump=True)
+ napis = cfg.netnl.napi_get({"ifindex": cfg.ifindex}, dump=True)
+
+ # Remap into ID-based dicts
+ napis = {n["id"]: n for n in napis}
+ queues = {f"{q['type']}{q['id']}": q for q in queues}
+
+ cpus = []
+ for rx in range(9999):
+ name = f"rx{rx}"
+ if name not in queues:
+ break
+ cpus.append(_get_cpu_for_irq(napis[queues[name]["napi-id"]]["irq"]))
+
+ return cpus
+
+
+def _get_unused_cpus(cfg, count=2):
+ """
+ Get CPUs that are not used by Rx queues.
+ Returns a list of at least 'count' CPU numbers.
+ """
+
+ # Get CPUs used by Rx queues
+ rx_cpus = set(_get_irq_cpus(cfg))
+
+ # Get total number of CPUs
+ num_cpus = os.cpu_count()
+
+ # Find unused CPUs
+ unused_cpus = [cpu for cpu in range(num_cpus) if cpu not in rx_cpus]
+
+ if len(unused_cpus) < count:
+ raise KsftSkipEx(f"Need at {count} CPUs not used by Rx queues, found {len(unused_cpus)}")
+
+ return unused_cpus[:count]
+
+
+def _configure_rps(cfg, rps_cpus):
+ """Configure RPS for all Rx queues."""
+
+ mask = 0
+ for cpu in rps_cpus:
+ mask |= (1 << cpu)
+ mask = hex(mask)[2:]
+
+ # Set RPS bitmap for all rx queues
+ for rps_file in glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*/rps_cpus"):
+ with open(rps_file, "w", encoding="utf-8") as fp:
+ fp.write(mask)
+
+ return mask
+
+
+def _send_traffic(cfg, proto_flag, ipver, port):
+ """Send 20 packets of requested type."""
+
+ # Determine protocol and IP version for socat
+ if proto_flag == "-u":
+ proto = "UDP"
+ else:
+ proto = "TCP"
+
+ baddr = f"[{cfg.addr_v['6']}]" if ipver == "6" else cfg.addr_v["4"]
+
+ # Run socat in a loop to send traffic periodically
+ # Use sh -c with a loop similar to toeplitz_client.sh
+ socat_cmd = f"""
+ for i in `seq 20`; do
+ echo "msg $i" | socat -{ipver} -t 0.1 - {proto}:{baddr}:{port};
+ sleep 0.001;
+ done
+ """
+
+ cmd(socat_cmd, shell=True, host=cfg.remote)
+
+
+def _test_variants():
+ for grp in ["", "rss", "rps"]:
+ for l4 in ["tcp", "udp"]:
+ for l3 in ["4", "6"]:
+ name = f"{l4}_ipv{l3}"
+ if grp:
+ name = f"{grp}_{name}"
+ yield KsftNamedVariant(name, "-" + l4[0], l3, grp)
+
+
+@ksft_variants(_test_variants())
+def test(cfg, proto_flag, ipver, grp):
+ """Run a single toeplitz test."""
+
+ cfg.require_ipver(ipver)
+
+ # Check that rxhash is enabled
+ ksft_in("receive-hashing: on", cmd(f"ethtool -k {cfg.ifname}").stdout)
+
+ rss = cfg.ethnl.rss_get({"header": {"dev-index": cfg.ifindex}})
+ # Make sure NIC is configured to use Toeplitz hash, and no key xfrm.
+ if rss.get('hfunc') != ETH_RSS_HASH_TOP or rss.get('input-xfrm'):
+ cfg.ethnl.rss_set({"header": {"dev-index": cfg.ifindex},
+ "hfunc": ETH_RSS_HASH_TOP,
+ "input-xfrm": {}})
+ defer(cfg.ethnl.rss_set, {"header": {"dev-index": cfg.ifindex},
+ "hfunc": rss.get('hfunc'),
+ "input-xfrm": rss.get('input-xfrm', {})
+ })
+
+ port = rand_port(socket.SOCK_DGRAM)
+
+ toeplitz_path = cfg.test_dir / "toeplitz"
+ rx_cmd = [
+ str(toeplitz_path),
+ "-" + ipver,
+ proto_flag,
+ "-d", str(port),
+ "-i", cfg.ifname,
+ "-T", "4000",
+ "-s",
+ "-v"
+ ]
+
+ if grp:
+ _check_rps_and_rfs_not_configured(cfg)
+ if grp == "rss":
+ irq_cpus = ",".join([str(x) for x in _get_irq_cpus(cfg)])
+ rx_cmd += ["-C", irq_cpus]
+ ksft_pr(f"RSS using CPUs: {irq_cpus}")
+ elif grp == "rps":
+ # Get CPUs not used by Rx queues and configure them for RPS
+ rps_cpus = _get_unused_cpus(cfg, count=2)
+ rps_mask = _configure_rps(cfg, rps_cpus)
+ defer(_configure_rps, cfg, [])
+ rx_cmd += ["-r", rps_mask]
+ ksft_pr(f"RPS using CPUs: {rps_cpus}, mask: {rps_mask}")
+
+ # Run rx in background, it will exit once it has seen enough packets
+ with bkg(" ".join(rx_cmd), ksft_ready=True, exit_wait=True) as rx_proc:
+ while rx_proc.proc.poll() is None:
+ _send_traffic(cfg, proto_flag, ipver, port)
+
+ # Check rx result
+ ksft_pr("Receiver output:")
+ ksft_pr(rx_proc.stdout.strip().replace('\n', '\n# '))
+ if rx_proc.stderr:
+ ksft_pr(rx_proc.stderr.strip().replace('\n', '\n# '))
+
+
+def main() -> None:
+ """Ksft boilerplate main."""
+
+ with NetDrvEpEnv(__file__) as cfg:
+ cfg.ethnl = EthtoolFamily()
+ cfg.netnl = NetdevFamily()
+ ksft_run(cases=[test], args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/tso.py b/tools/testing/selftests/drivers/net/hw/tso.py
index e1ecb92f79d9..0998e68ebaf0 100755
--- a/tools/testing/selftests/drivers/net/hw/tso.py
+++ b/tools/testing/selftests/drivers/net/hw/tso.py
@@ -34,12 +34,12 @@ def tcp_sock_get_retrans(sock):
def run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso):
- cfg.require_cmd("socat", remote=True)
+ cfg.require_cmd("socat", local=False, remote=True)
port = rand_port()
listen_cmd = f"socat -{ipver} -t 2 -u TCP-LISTEN:{port},reuseport /dev/null,ignoreeof"
- with bkg(listen_cmd, host=cfg.remote) as nc:
+ with bkg(listen_cmd, host=cfg.remote, exit_wait=True) as nc:
wait_port_listen(port, host=cfg.remote)
if ipver == "4":
@@ -60,16 +60,17 @@ def run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso):
sock_wait_drain(sock)
qstat_new = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
- # No math behind the 10 here, but try to catch cases where
- # TCP falls back to non-LSO.
- ksft_lt(tcp_sock_get_retrans(sock), 10)
- sock.close()
-
# Check that at least 90% of the data was sent as LSO packets.
# System noise may cause false negatives. Also header overheads
# will add up to 5% of extra packes... The check is best effort.
total_lso_wire = len(buf) * 0.90 // cfg.dev["mtu"]
total_lso_super = len(buf) * 0.90 // cfg.dev["tso_max_size"]
+
+ # Make sure we have order of magnitude more LSO packets than
+ # retransmits, in case TCP retransmitted all the LSO packets.
+ ksft_lt(tcp_sock_get_retrans(sock), total_lso_wire / 4)
+ sock.close()
+
if should_lso:
if cfg.have_stat_super_count:
ksft_ge(qstat_new['tx-hw-gso-packets'] -
@@ -102,7 +103,7 @@ def build_tunnel(cfg, outer_ipver, tun_info):
remote_addr = cfg.remote_addr_v[outer_ipver]
tun_type = tun_info[0]
- tun_arg = tun_info[2]
+ tun_arg = tun_info[1]
ip(f"link add {tun_type}-ksft type {tun_type} {tun_arg} local {local_addr} remote {remote_addr} dev {cfg.ifname}")
defer(ip, f"link del {tun_type}-ksft")
ip(f"link set dev {tun_type}-ksft up")
@@ -119,15 +120,30 @@ def build_tunnel(cfg, outer_ipver, tun_info):
return remote_v4, remote_v6
+def restore_wanted_features(cfg):
+ features_cmd = ""
+ for feature in cfg.hw_features:
+ setting = "on" if feature in cfg.wanted_features else "off"
+ features_cmd += f" {feature} {setting}"
+ try:
+ ethtool(f"-K {cfg.ifname} {features_cmd}")
+ except Exception as e:
+ ksft_pr(f"WARNING: failure restoring wanted features: {e}")
+
+
def test_builder(name, cfg, outer_ipver, feature, tun=None, inner_ipver=None):
"""Construct specific tests from the common template."""
def f(cfg):
cfg.require_ipver(outer_ipver)
+ defer(restore_wanted_features, cfg)
if not cfg.have_stat_super_count and \
not cfg.have_stat_wire_count:
raise KsftSkipEx(f"Device does not support LSO queue stats")
+ if feature not in cfg.hw_features:
+ raise KsftSkipEx(f"Device does not support {feature}")
+
ipver = outer_ipver
if tun:
remote_v4, remote_v6 = build_tunnel(cfg, ipver, tun)
@@ -136,36 +152,21 @@ def test_builder(name, cfg, outer_ipver, feature, tun=None, inner_ipver=None):
remote_v4 = cfg.remote_addr_v["4"]
remote_v6 = cfg.remote_addr_v["6"]
- tun_partial = tun and tun[1]
- # Tunnel which can silently fall back to gso-partial
- has_gso_partial = tun and 'tx-gso-partial' in cfg.features
-
- # For TSO4 via partial we need mangleid
- if ipver == "4" and feature in cfg.partial_features:
- ksft_pr("Testing with mangleid enabled")
- if 'tx-tcp-mangleid-segmentation' not in cfg.features:
- ethtool(f"-K {cfg.ifname} tx-tcp-mangleid-segmentation on")
- defer(ethtool, f"-K {cfg.ifname} tx-tcp-mangleid-segmentation off")
-
# First test without the feature enabled.
ethtool(f"-K {cfg.ifname} {feature} off")
- if has_gso_partial:
- ethtool(f"-K {cfg.ifname} tx-gso-partial off")
run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso=False)
- # Now test with the feature enabled.
- # For compatible tunnels only - just GSO partial, not specific feature.
- if has_gso_partial:
+ ethtool(f"-K {cfg.ifname} tx-gso-partial off")
+ ethtool(f"-K {cfg.ifname} tx-tcp-mangleid-segmentation off")
+ if feature in cfg.partial_features:
ethtool(f"-K {cfg.ifname} tx-gso-partial on")
- run_one_stream(cfg, ipver, remote_v4, remote_v6,
- should_lso=tun_partial)
+ if ipver == "4":
+ ksft_pr("Testing with mangleid enabled")
+ ethtool(f"-K {cfg.ifname} tx-tcp-mangleid-segmentation on")
# Full feature enabled.
- if feature in cfg.features:
- ethtool(f"-K {cfg.ifname} {feature} on")
- run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso=True)
- else:
- raise KsftXfailEx(f"Device does not support {feature}")
+ ethtool(f"-K {cfg.ifname} {feature} on")
+ run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso=True)
f.__name__ = name + ((outer_ipver + "_") if tun else "") + "ipv" + inner_ipver
return f
@@ -176,23 +177,39 @@ def query_nic_features(cfg) -> None:
cfg.have_stat_super_count = False
cfg.have_stat_wire_count = False
- cfg.features = set()
features = cfg.ethnl.features_get({"header": {"dev-index": cfg.ifindex}})
- for f in features["active"]["bits"]["bit"]:
- cfg.features.add(f["name"])
+
+ cfg.wanted_features = set()
+ for f in features["wanted"]["bits"]["bit"]:
+ cfg.wanted_features.add(f["name"])
+
+ cfg.hw_features = set()
+ hw_all_features_cmd = ""
+ for f in features["hw"]["bits"]["bit"]:
+ if f.get("value", False):
+ feature = f["name"]
+ cfg.hw_features.add(feature)
+ hw_all_features_cmd += f" {feature} on"
+ try:
+ ethtool(f"-K {cfg.ifname} {hw_all_features_cmd}")
+ except Exception as e:
+ ksft_pr(f"WARNING: failure enabling all hw features: {e}")
+ ksft_pr("partial gso feature detection may be impacted")
# Check which features are supported via GSO partial
cfg.partial_features = set()
- if 'tx-gso-partial' in cfg.features:
+ if 'tx-gso-partial' in cfg.hw_features:
ethtool(f"-K {cfg.ifname} tx-gso-partial off")
no_partial = set()
features = cfg.ethnl.features_get({"header": {"dev-index": cfg.ifindex}})
for f in features["active"]["bits"]["bit"]:
no_partial.add(f["name"])
- cfg.partial_features = cfg.features - no_partial
+ cfg.partial_features = cfg.hw_features - no_partial
ethtool(f"-K {cfg.ifname} tx-gso-partial on")
+ restore_wanted_features(cfg)
+
stats = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)
if stats:
if 'tx-hw-gso-packets' in stats[0]:
@@ -211,13 +228,14 @@ def main() -> None:
query_nic_features(cfg)
test_info = (
- # name, v4/v6 ethtool_feature tun:(type, partial, args)
- ("", "4", "tx-tcp-segmentation", None),
- ("", "6", "tx-tcp6-segmentation", None),
- ("vxlan", "", "tx-udp_tnl-segmentation", ("vxlan", True, "id 100 dstport 4789 noudpcsum")),
- ("vxlan_csum", "", "tx-udp_tnl-csum-segmentation", ("vxlan", False, "id 100 dstport 4789 udpcsum")),
- ("gre", "4", "tx-gre-segmentation", ("ipgre", False, "")),
- ("gre", "6", "tx-gre-segmentation", ("ip6gre", False, "")),
+ # name, v4/v6 ethtool_feature tun:(type, args, inner ip versions)
+ ("", "4", "tx-tcp-segmentation", None),
+ ("", "6", "tx-tcp6-segmentation", None),
+ ("vxlan", "4", "tx-udp_tnl-segmentation", ("vxlan", "id 100 dstport 4789 noudpcsum", ("4", "6"))),
+ ("vxlan", "6", "tx-udp_tnl-segmentation", ("vxlan", "id 100 dstport 4789 udp6zerocsumtx udp6zerocsumrx", ("4", "6"))),
+ ("vxlan_csum", "", "tx-udp_tnl-csum-segmentation", ("vxlan", "id 100 dstport 4789 udpcsum", ("4", "6"))),
+ ("gre", "4", "tx-gre-segmentation", ("gre", "", ("4", "6"))),
+ ("gre", "6", "tx-gre-segmentation", ("ip6gre","", ("4", "6"))),
)
cases = []
@@ -227,11 +245,13 @@ def main() -> None:
if info[1] and outer_ipver != info[1]:
continue
- cases.append(test_builder(info[0], cfg, outer_ipver, info[2],
- tun=info[3], inner_ipver="4"))
if info[3]:
- cases.append(test_builder(info[0], cfg, outer_ipver, info[2],
- tun=info[3], inner_ipver="6"))
+ cases += [
+ test_builder(info[0], cfg, outer_ipver, info[2], info[3], inner_ipver)
+ for inner_ipver in info[3][2]
+ ]
+ else:
+ cases.append(test_builder(info[0], cfg, outer_ipver, info[2], None, outer_ipver))
ksft_run(cases=cases, args=(cfg, ))
ksft_exit()
diff --git a/tools/testing/selftests/drivers/net/lib/py/__init__.py b/tools/testing/selftests/drivers/net/lib/py/__init__.py
index 401e70f7f136..8b75faa9af6d 100644
--- a/tools/testing/selftests/drivers/net/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/lib/py/__init__.py
@@ -1,5 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+"""
+Driver test environment.
+NetDrvEnv and NetDrvEpEnv are the main environment classes.
+Former is for local host only tests, latter creates / connects
+to a remote endpoint. See NIPA wiki for more information about
+running and writing driver tests.
+"""
+
import sys
from pathlib import Path
@@ -7,13 +15,41 @@ KSFT_DIR = (Path(__file__).parent / "../../../..").resolve()
try:
sys.path.append(KSFT_DIR.as_posix())
- from net.lib.py import *
+
+ # Import one by one to avoid pylint false positives
+ from net.lib.py import NetNS, NetNSEnter, NetdevSimDev
+ from net.lib.py import EthtoolFamily, NetdevFamily, NetshaperFamily, \
+ NlError, RtnlFamily, DevlinkFamily, PSPFamily
+ from net.lib.py import CmdExitFailure
+ from net.lib.py import bkg, cmd, bpftool, bpftrace, defer, ethtool, \
+ fd_read_timeout, ip, rand_port, wait_port_listen, wait_file
+ from net.lib.py import KsftSkipEx, KsftFailEx, KsftXfailEx
+ from net.lib.py import ksft_disruptive, ksft_exit, ksft_pr, ksft_run, \
+ ksft_setup, ksft_variants, KsftNamedVariant
+ from net.lib.py import ksft_eq, ksft_ge, ksft_in, ksft_is, ksft_lt, \
+ ksft_ne, ksft_not_in, ksft_raises, ksft_true, ksft_gt, ksft_not_none
+
+ __all__ = ["NetNS", "NetNSEnter", "NetdevSimDev",
+ "EthtoolFamily", "NetdevFamily", "NetshaperFamily",
+ "NlError", "RtnlFamily", "DevlinkFamily", "PSPFamily",
+ "CmdExitFailure",
+ "bkg", "cmd", "bpftool", "bpftrace", "defer", "ethtool",
+ "fd_read_timeout", "ip", "rand_port",
+ "wait_port_listen", "wait_file",
+ "KsftSkipEx", "KsftFailEx", "KsftXfailEx",
+ "ksft_disruptive", "ksft_exit", "ksft_pr", "ksft_run",
+ "ksft_setup", "ksft_variants", "KsftNamedVariant",
+ "ksft_eq", "ksft_ge", "ksft_in", "ksft_is", "ksft_lt",
+ "ksft_ne", "ksft_not_in", "ksft_raises", "ksft_true", "ksft_gt",
+ "ksft_not_none", "ksft_not_none"]
+
+ from .env import NetDrvEnv, NetDrvEpEnv
+ from .load import GenerateTraffic, Iperf3Runner
+ from .remote import Remote
+
+ __all__ += ["NetDrvEnv", "NetDrvEpEnv", "GenerateTraffic", "Remote",
+ "Iperf3Runner"]
except ModuleNotFoundError as e:
- ksft_pr("Failed importing `net` library from kernel sources")
- ksft_pr(str(e))
- ktap_result(True, comment="SKIP")
+ print("Failed importing `net` library from kernel sources")
+ print(str(e))
sys.exit(4)
-
-from .env import *
-from .load import *
-from .remote import Remote
diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
index 3bccddf8cbc5..8b644fd84ff2 100644
--- a/tools/testing/selftests/drivers/net/lib/py/env.py
+++ b/tools/testing/selftests/drivers/net/lib/py/env.py
@@ -4,7 +4,7 @@ import os
import time
from pathlib import Path
from lib.py import KsftSkipEx, KsftXfailEx
-from lib.py import ksft_setup
+from lib.py import ksft_setup, wait_file
from lib.py import cmd, ethtool, ip, CmdExitFailure
from lib.py import NetNS, NetdevSimDev
from .remote import Remote
@@ -25,6 +25,9 @@ class NetDrvEnvBase:
self.env = self._load_env_file()
+ # Following attrs must be set be inheriting classes
+ self.dev = None
+
def _load_env_file(self):
env = os.environ.copy()
@@ -48,6 +51,22 @@ class NetDrvEnvBase:
env[pair[0]] = pair[1]
return ksft_setup(env)
+ def __del__(self):
+ pass
+
+ def __enter__(self):
+ ip(f"link set dev {self.dev['ifname']} up")
+ wait_file(f"/sys/class/net/{self.dev['ifname']}/carrier",
+ lambda x: x.strip() == "1")
+
+ return self
+
+ def __exit__(self, ex_type, ex_value, ex_tb):
+ """
+ __exit__ gets called at the end of a "with" block.
+ """
+ self.__del__()
+
class NetDrvEnv(NetDrvEnvBase):
"""
@@ -72,17 +91,6 @@ class NetDrvEnv(NetDrvEnvBase):
self.ifname = self.dev['ifname']
self.ifindex = self.dev['ifindex']
- def __enter__(self):
- ip(f"link set dev {self.dev['ifname']} up")
-
- return self
-
- def __exit__(self, ex_type, ex_value, ex_tb):
- """
- __exit__ gets called at the end of a "with" block.
- """
- self.__del__()
-
def __del__(self):
if self._ns:
self._ns.remove()
@@ -160,6 +168,8 @@ class NetDrvEpEnv(NetDrvEnvBase):
# resolve remote interface name
self.remote_ifname = self.resolve_remote_ifc()
+ self.remote_dev = ip("-d link show dev " + self.remote_ifname,
+ host=self.remote, json=True)[0]
self._required_cmd = {}
@@ -219,15 +229,6 @@ class NetDrvEpEnv(NetDrvEnvBase):
raise Exception("Can't resolve remote interface name, multiple interfaces match")
return v6[0]["ifname"] if v6 else v4[0]["ifname"]
- def __enter__(self):
- return self
-
- def __exit__(self, ex_type, ex_value, ex_tb):
- """
- __exit__ gets called at the end of a "with" block.
- """
- self.__del__()
-
def __del__(self):
if self._ns:
self._ns.remove()
@@ -246,6 +247,10 @@ class NetDrvEpEnv(NetDrvEnvBase):
if not self.addr_v[ipver] or not self.remote_addr_v[ipver]:
raise KsftSkipEx(f"Test requires IPv{ipver} connectivity")
+ def require_nsim(self):
+ if self._ns is None:
+ raise KsftXfailEx("Test only works on netdevsim")
+
def _require_cmd(self, comm, key, host=None):
cached = self._required_cmd.get(comm, {})
if cached.get(key) is None:
@@ -259,7 +264,7 @@ class NetDrvEpEnv(NetDrvEnvBase):
if not self._require_cmd(comm, "local"):
raise KsftSkipEx("Test requires command: " + comm)
if remote:
- if not self._require_cmd(comm, "remote"):
+ if not self._require_cmd(comm, "remote", host=self.remote):
raise KsftSkipEx("Test requires (remote) command: " + comm)
def wait_hw_stats_settle(self):
diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py
index d9c10613ae67..f181fa2d38fc 100644
--- a/tools/testing/selftests/drivers/net/lib/py/load.py
+++ b/tools/testing/selftests/drivers/net/lib/py/load.py
@@ -1,22 +1,90 @@
# SPDX-License-Identifier: GPL-2.0
+import re
import time
+import json
from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen
-class GenerateTraffic:
- def __init__(self, env, port=None):
- env.require_cmd("iperf3", remote=True)
+class Iperf3Runner:
+ """
+ Sets up and runs iperf3 traffic.
+ """
+ def __init__(self, env, port=None, server_ip=None, client_ip=None):
+ env.require_cmd("iperf3", local=True, remote=True)
self.env = env
+ self.port = rand_port() if port is None else port
+ self.server_ip = server_ip
+ self.client_ip = client_ip
+
+ def _build_server(self):
+ cmdline = f"iperf3 -s -1 -p {self.port}"
+ if self.server_ip:
+ cmdline += f" -B {self.server_ip}"
+ return cmdline
+
+ def _build_client(self, streams, duration, reverse):
+ host = self.env.addr if self.server_ip is None else self.server_ip
+ cmdline = f"iperf3 -c {host} -p {self.port} -P {streams} -t {duration} -J"
+ if self.client_ip:
+ cmdline += f" -B {self.client_ip}"
+ if reverse:
+ cmdline += " --reverse"
+ return cmdline
- if port is None:
- port = rand_port()
- self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True)
- wait_port_listen(port)
+ def start_server(self):
+ """
+ Starts an iperf3 server with optional bind IP.
+ """
+ cmdline = self._build_server()
+ proc = cmd(cmdline, background=True)
+ wait_port_listen(self.port)
time.sleep(0.1)
- self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400",
- background=True, host=env.remote)
+ return proc
+
+ def start_client(self, background=False, streams=1, duration=10, reverse=False):
+ """
+ Starts the iperf3 client with the configured options.
+ """
+ cmdline = self._build_client(streams, duration, reverse)
+ return cmd(cmdline, background=background, host=self.env.remote)
+
+ def measure_bandwidth(self, reverse=False):
+ """
+ Runs an iperf3 measurement and returns the average bandwidth (Gbps).
+ Discards the first and last few reporting intervals and uses only the
+ middle part of the run where throughput is typically stable.
+ """
+ self.start_server()
+ result = self.start_client(duration=10, reverse=reverse)
+
+ if result.ret != 0:
+ raise RuntimeError("iperf3 failed to run successfully")
+ try:
+ out = json.loads(result.stdout)
+ except json.JSONDecodeError as exc:
+ raise ValueError("Failed to parse iperf3 JSON output") from exc
+
+ intervals = out.get("intervals", [])
+ samples = [i["sum"]["bits_per_second"] / 1e9 for i in intervals]
+ if len(samples) < 10:
+ raise ValueError(f"iperf3 returned too few intervals: {len(samples)}")
+ # Discard potentially unstable first and last 3 seconds.
+ stable = samples[3:-3]
+
+ avg = sum(stable) / len(stable)
+
+ return avg
+
+
+class GenerateTraffic:
+ def __init__(self, env, port=None):
+ self.env = env
+ self.runner = Iperf3Runner(env, port)
+
+ self._iperf_server = self.runner.start_server()
+ self._iperf_client = self.runner.start_client(background=True, streams=16, duration=86400)
# Wait for traffic to ramp up
if not self._wait_pkts(pps=1000):
@@ -56,3 +124,16 @@ class GenerateTraffic:
ksft_pr(">> Server:")
ksft_pr(self._iperf_server.stdout)
ksft_pr(self._iperf_server.stderr)
+ self._wait_client_stopped()
+
+ def _wait_client_stopped(self, sleep=0.005, timeout=5):
+ end = time.monotonic() + timeout
+
+ live_port_pattern = re.compile(fr":{self.runner.port:04X} 0[^6] ")
+
+ while time.monotonic() < end:
+ data = cmd("cat /proc/net/tcp*", host=self.env.remote).stdout
+ if not live_port_pattern.search(data):
+ return
+ time.sleep(sleep)
+ raise Exception(f"Waiting for client to stop timed out after {timeout}s")
diff --git a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh
index 29b01b8e2215..ae8abff4be40 100644
--- a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh
+++ b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh
@@ -11,9 +11,13 @@ set -euo pipefail
LIBDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
SRCIF="" # to be populated later
-SRCIP=192.0.2.1
+SRCIP="" # to be populated later
+SRCIP4="192.0.2.1"
+SRCIP6="fc00::1"
DSTIF="" # to be populated later
-DSTIP=192.0.2.2
+DSTIP="" # to be populated later
+DSTIP4="192.0.2.2"
+DSTIP6="fc00::2"
PORT="6666"
MSG="netconsole selftest"
@@ -26,17 +30,23 @@ NETCONS_PATH="${NETCONS_CONFIGFS}"/"${TARGET}"
# NAMESPACE will be populated by setup_ns with a random value
NAMESPACE=""
-# IDs for netdevsim
+# IDs for netdevsim. We either use NSIM_DEV_{1,2}_ID for standard test
+# or NSIM_BOND_{T,R}X_{1,2} for the bonding tests. Not both at the
+# same time.
NSIM_DEV_1_ID=$((256 + RANDOM % 256))
NSIM_DEV_2_ID=$((512 + RANDOM % 256))
+NSIM_BOND_TX_1=$((768 + RANDOM % 256))
+NSIM_BOND_TX_2=$((1024 + RANDOM % 256))
+NSIM_BOND_RX_1=$((1280 + RANDOM % 256))
+NSIM_BOND_RX_2=$((1536 + RANDOM % 256))
NSIM_DEV_SYS_NEW="/sys/bus/netdevsim/new_device"
+NSIM_DEV_SYS_LINK="/sys/bus/netdevsim/link_device"
# Used to create and delete namespaces
source "${LIBDIR}"/../../../../net/lib.sh
# Create netdevsim interfaces
create_ifaces() {
-
echo "$NSIM_DEV_2_ID" > "$NSIM_DEV_SYS_NEW"
echo "$NSIM_DEV_1_ID" > "$NSIM_DEV_SYS_NEW"
udevadm settle 2> /dev/null || true
@@ -80,7 +90,23 @@ function configure_ip() {
ip link set "${SRCIF}" up
}
+function select_ipv4_or_ipv6()
+{
+ local VERSION=${1}
+
+ if [[ "$VERSION" == "ipv6" ]]
+ then
+ DSTIP="${DSTIP6}"
+ SRCIP="${SRCIP6}"
+ else
+ DSTIP="${DSTIP4}"
+ SRCIP="${SRCIP4}"
+ fi
+}
+
function set_network() {
+ local IP_VERSION=${1:-"ipv4"}
+
# setup_ns function is coming from lib.sh
setup_ns NAMESPACE
@@ -91,22 +117,66 @@ function set_network() {
# Link both interfaces back to back
link_ifaces
+ select_ipv4_or_ipv6 "${IP_VERSION}"
configure_ip
}
-function create_dynamic_target() {
+function _create_dynamic_target() {
+ local FORMAT="${1:?FORMAT parameter required}"
+ local NCPATH="${2:?NCPATH parameter required}"
+
DSTMAC=$(ip netns exec "${NAMESPACE}" \
ip link show "${DSTIF}" | awk '/ether/ {print $2}')
# Create a dynamic target
- mkdir "${NETCONS_PATH}"
+ mkdir "${NCPATH}"
- echo "${DSTIP}" > "${NETCONS_PATH}"/remote_ip
- echo "${SRCIP}" > "${NETCONS_PATH}"/local_ip
- echo "${DSTMAC}" > "${NETCONS_PATH}"/remote_mac
- echo "${SRCIF}" > "${NETCONS_PATH}"/dev_name
+ echo "${DSTIP}" > "${NCPATH}"/remote_ip
+ echo "${SRCIP}" > "${NCPATH}"/local_ip
+ echo "${DSTMAC}" > "${NCPATH}"/remote_mac
+ echo "${SRCIF}" > "${NCPATH}"/dev_name
- echo 1 > "${NETCONS_PATH}"/enabled
+ if [ "${FORMAT}" == "basic" ]
+ then
+ # Basic target does not support release
+ echo 0 > "${NCPATH}"/release
+ echo 0 > "${NCPATH}"/extended
+ elif [ "${FORMAT}" == "extended" ]
+ then
+ echo 1 > "${NCPATH}"/extended
+ fi
+}
+
+function create_dynamic_target() {
+ local FORMAT=${1:-"extended"}
+ local NCPATH=${2:-"$NETCONS_PATH"}
+ _create_dynamic_target "${FORMAT}" "${NCPATH}"
+
+ echo 1 > "${NCPATH}"/enabled
+
+ # This will make sure that the kernel was able to
+ # load the netconsole driver configuration. The console message
+ # gets more organized/sequential as well.
+ sleep 1
+}
+
+# Generate the command line argument for netconsole following:
+# netconsole=[+][src-port]@[src-ip]/[<dev>],[tgt-port]@<tgt-ip>/[tgt-macaddr]
+function create_cmdline_str() {
+ local BINDMODE=${1:-"ifname"}
+ if [ "${BINDMODE}" == "ifname" ]
+ then
+ SRCDEV=${SRCIF}
+ else
+ SRCDEV=$(mac_get "${SRCIF}")
+ fi
+
+ DSTMAC=$(ip netns exec "${NAMESPACE}" \
+ ip link show "${DSTIF}" | awk '/ether/ {print $2}')
+ SRCPORT="1514"
+ TGTPORT="6666"
+
+ echo "netconsole=\"+${SRCPORT}@${SRCIP}/${SRCDEV},${TGTPORT}@${DSTIP}/${DSTMAC}\""
}
# Do not append the release to the header of the message
@@ -116,16 +186,9 @@ function disable_release_append() {
echo 1 > "${NETCONS_PATH}"/enabled
}
-function cleanup() {
+function do_cleanup() {
local NSIM_DEV_SYS_DEL="/sys/bus/netdevsim/del_device"
- # delete netconsole dynamic reconfiguration
- echo 0 > "${NETCONS_PATH}"/enabled
- # Remove all the keys that got created during the selftest
- find "${NETCONS_PATH}/userdata/" -mindepth 1 -type d -delete
- # Remove the configfs entry
- rmdir "${NETCONS_PATH}"
-
# Delete netdevsim devices
echo "$NSIM_DEV_2_ID" > "$NSIM_DEV_SYS_DEL"
echo "$NSIM_DEV_1_ID" > "$NSIM_DEV_SYS_DEL"
@@ -137,6 +200,29 @@ function cleanup() {
echo "${DEFAULT_PRINTK_VALUES}" > /proc/sys/kernel/printk
}
+function cleanup_netcons() {
+ # delete netconsole dynamic reconfiguration
+ # do not fail if the target is already disabled
+ if [[ ! -d "${NETCONS_PATH}" ]]
+ then
+ # in some cases this is called before netcons path is created
+ return
+ fi
+ if [[ $(cat "${NETCONS_PATH}"/enabled) != 0 ]]
+ then
+ echo 0 > "${NETCONS_PATH}"/enabled || true
+ fi
+ # Remove all the keys that got created during the selftest
+ find "${NETCONS_PATH}/userdata/" -mindepth 1 -type d -delete
+ # Remove the configfs entry
+ rmdir "${NETCONS_PATH}"
+}
+
+function cleanup() {
+ cleanup_netcons
+ do_cleanup
+}
+
function set_user_data() {
if [[ ! -d "${NETCONS_PATH}""/userdata" ]]
then
@@ -152,18 +238,24 @@ function set_user_data() {
function listen_port_and_save_to() {
local OUTPUT=${1}
+ local IPVERSION=${2:-"ipv4"}
+
+ if [ "${IPVERSION}" == "ipv4" ]
+ then
+ SOCAT_MODE="UDP-LISTEN"
+ else
+ SOCAT_MODE="UDP6-LISTEN"
+ fi
+
# Just wait for 2 seconds
timeout 2 ip netns exec "${NAMESPACE}" \
- socat UDP-LISTEN:"${PORT}",fork "${OUTPUT}"
+ socat "${SOCAT_MODE}":"${PORT}",fork "${OUTPUT}" 2> /dev/null
}
-function validate_result() {
+# Only validate that the message arrived properly
+function validate_msg() {
local TMPFILENAME="$1"
- # TMPFILENAME will contain something like:
- # 6.11.1-0_fbk0_rc13_509_g30d75cea12f7,13,1822,115075213798,-;netconsole selftest: netcons_gtJHM
- # key=value
-
# Check if the file exists
if [ ! -f "$TMPFILENAME" ]; then
echo "FAIL: File was not generated." >&2
@@ -175,17 +267,32 @@ function validate_result() {
cat "${TMPFILENAME}" >&2
exit "${ksft_fail}"
fi
+}
- if ! grep -q "${USERDATA_KEY}=${USERDATA_VALUE}" "${TMPFILENAME}"; then
- echo "FAIL: ${USERDATA_KEY}=${USERDATA_VALUE} not found in ${TMPFILENAME}" >&2
- cat "${TMPFILENAME}" >&2
- exit "${ksft_fail}"
+# Validate the message and userdata
+function validate_result() {
+ local TMPFILENAME="$1"
+
+ # TMPFILENAME will contain something like:
+ # 6.11.1-0_fbk0_rc13_509_g30d75cea12f7,13,1822,115075213798,-;netconsole selftest: netcons_gtJHM
+ # key=value
+
+ validate_msg "${TMPFILENAME}"
+
+ # userdata is not supported on basic format target,
+ # thus, do not validate it.
+ if [ "${FORMAT}" != "basic" ];
+ then
+ if ! grep -q "${USERDATA_KEY}=${USERDATA_VALUE}" "${TMPFILENAME}"; then
+ echo "FAIL: ${USERDATA_KEY}=${USERDATA_VALUE} not found in ${TMPFILENAME}" >&2
+ cat "${TMPFILENAME}" >&2
+ exit "${ksft_fail}"
+ fi
fi
# Delete the file once it is validated, otherwise keep it
# for debugging purposes
rm "${TMPFILENAME}"
- exit "${ksft_pass}"
}
function check_for_dependencies() {
@@ -209,6 +316,11 @@ function check_for_dependencies() {
exit "${ksft_skip}"
fi
+ if [ ! -f /proc/net/if_inet6 ]; then
+ echo "SKIP: IPv6 not configured. Check if CONFIG_IPV6 is enabled" >&2
+ exit "${ksft_skip}"
+ fi
+
if [ ! -f "${NSIM_DEV_SYS_NEW}" ]; then
echo "SKIP: file ${NSIM_DEV_SYS_NEW} does not exist. Check if CONFIG_NETDEVSIM is enabled" >&2
exit "${ksft_skip}"
@@ -224,8 +336,15 @@ function check_for_dependencies() {
exit "${ksft_skip}"
fi
- if ip addr list | grep -E "inet.*(${SRCIP}|${DSTIP})" 2> /dev/null; then
- echo "SKIP: IPs already in use. Skipping it" >&2
+ REGEXP4="inet.*(${SRCIP4}|${DSTIP4})"
+ REGEXP6="inet.*(${SRCIP6}|${DSTIP6})"
+ if ip addr list | grep -E "${REGEXP4}" 2> /dev/null; then
+ echo "SKIP: IPv4s already in use. Skipping it" >&2
+ exit "${ksft_skip}"
+ fi
+
+ if ip addr list | grep -E "${REGEXP6}" 2> /dev/null; then
+ echo "SKIP: IPv6s already in use. Skipping it" >&2
exit "${ksft_skip}"
fi
}
@@ -239,10 +358,62 @@ function check_for_taskset() {
# This is necessary if running multiple tests in a row
function pkill_socat() {
- PROCESS_NAME="socat UDP-LISTEN:6666,fork ${OUTPUT_FILE}"
+ PROCESS_NAME4="socat UDP-LISTEN:6666,fork ${OUTPUT_FILE}"
+ PROCESS_NAME6="socat UDP6-LISTEN:6666,fork ${OUTPUT_FILE}"
# socat runs under timeout(1), kill it if it is still alive
# do not fail if socat doesn't exist anymore
set +e
- pkill -f "${PROCESS_NAME}"
+ pkill -f "${PROCESS_NAME4}"
+ pkill -f "${PROCESS_NAME6}"
set -e
}
+
+# Check if netconsole was compiled as a module, otherwise exit
+function check_netconsole_module() {
+ if modinfo netconsole | grep filename: | grep -q builtin
+ then
+ echo "SKIP: netconsole should be compiled as a module" >&2
+ exit "${ksft_skip}"
+ fi
+}
+
+# A wrapper to translate protocol version to udp version
+function wait_for_port() {
+ local NAMESPACE=${1}
+ local PORT=${2}
+ IP_VERSION=${3}
+
+ if [ "${IP_VERSION}" == "ipv6" ]
+ then
+ PROTOCOL="udp6"
+ else
+ PROTOCOL="udp"
+ fi
+
+ wait_local_port_listen "${NAMESPACE}" "${PORT}" "${PROTOCOL}"
+ # even after the port is open, let's wait 1 second before writing
+ # otherwise the packet could be missed, and the test will fail. Happens
+ # more frequently on IPv6
+ sleep 1
+}
+
+# Clean up netdevsim ifaces created for bonding test
+function cleanup_bond_nsim() {
+ ip -n "${TXNS}" \
+ link delete "${BOND_TX_MAIN_IF}" type bond || true
+ ip -n "${RXNS}" \
+ link delete "${BOND_RX_MAIN_IF}" type bond || true
+
+ cleanup_netdevsim "$NSIM_BOND_TX_1"
+ cleanup_netdevsim "$NSIM_BOND_TX_2"
+ cleanup_netdevsim "$NSIM_BOND_RX_1"
+ cleanup_netdevsim "$NSIM_BOND_RX_2"
+}
+
+# cleanup tests that use bonding interfaces
+function cleanup_bond() {
+ cleanup_netcons
+ cleanup_bond_nsim
+ cleanup_all_ns
+ ip link delete "${VETH0}" || true
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
index 29a672c2270f..e212ad8ccef6 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
@@ -44,8 +44,7 @@ source $lib_dir/devlink_lib.sh
h1_create()
{
- simple_if_init $h1 192.0.2.1/24
- defer simple_if_fini $h1 192.0.2.1/24
+ adf_simple_if_init $h1 192.0.2.1/24
mtu_set $h1 10000
defer mtu_restore $h1
@@ -56,8 +55,7 @@ h1_create()
h2_create()
{
- simple_if_init $h2 198.51.100.1/24
- defer simple_if_fini $h2 198.51.100.1/24
+ adf_simple_if_init $h2 198.51.100.1/24
mtu_set $h2 10000
defer mtu_restore $h2
@@ -106,8 +104,7 @@ setup_prepare()
# Reload to ensure devlink-trap settings are back to default.
defer devlink_reload
- vrf_prepare
- defer vrf_cleanup
+ adf_vrf_prepare
h1_create
h2_create
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
index d5b6f2cc9a29..9ca340c5f3a6 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
@@ -57,8 +57,7 @@ source qos_lib.sh
h1_create()
{
- simple_if_init $h1
- defer simple_if_fini $h1
+ adf_simple_if_init $h1
mtu_set $h1 10000
defer mtu_restore $h1
@@ -70,8 +69,7 @@ h1_create()
h2_create()
{
- simple_if_init $h2
- defer simple_if_fini $h2
+ adf_simple_if_init $h2
mtu_set $h2 10000
defer mtu_restore $h2
@@ -83,8 +81,7 @@ h2_create()
h3_create()
{
- simple_if_init $h3
- defer simple_if_fini $h3
+ adf_simple_if_init $h3
mtu_set $h3 10000
defer mtu_restore $h3
@@ -225,8 +222,7 @@ setup_prepare()
h3mac=$(mac_get $h3)
- vrf_prepare
- defer vrf_cleanup
+ adf_vrf_prepare
h1_create
h2_create
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh
index 2b5d2c2751d5..a4a25637fe2a 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh
@@ -68,8 +68,7 @@ mlxsw_only_on_spectrum 2+ || exit
h1_create()
{
- simple_if_init $h1
- defer simple_if_fini $h1
+ adf_simple_if_init $h1
vlan_create $h1 111 v$h1 192.0.2.33/28
defer vlan_destroy $h1 111
@@ -78,8 +77,7 @@ h1_create()
h2_create()
{
- simple_if_init $h2
- defer simple_if_fini $h2
+ adf_simple_if_init $h2
vlan_create $h2 111 v$h2 192.0.2.34/28
defer vlan_destroy $h2 111
@@ -178,8 +176,7 @@ setup_prepare()
h2mac=$(mac_get $h2)
- vrf_prepare
- defer vrf_cleanup
+ adf_vrf_prepare
h1_create
h2_create
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index cd4a5c21360c..d8f8ae8533cd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -72,8 +72,7 @@ source qos_lib.sh
h1_create()
{
- simple_if_init $h1 192.0.2.65/28
- defer simple_if_fini $h1 192.0.2.65/28
+ adf_simple_if_init $h1 192.0.2.65/28
mtu_set $h1 10000
defer mtu_restore $h1
@@ -81,8 +80,7 @@ h1_create()
h2_create()
{
- simple_if_init $h2
- defer simple_if_fini $h2
+ adf_simple_if_init $h2
mtu_set $h2 10000
defer mtu_restore $h2
@@ -94,8 +92,7 @@ h2_create()
h3_create()
{
- simple_if_init $h3 192.0.2.66/28
- defer simple_if_fini $h3 192.0.2.66/28
+ adf_simple_if_init $h3 192.0.2.66/28
mtu_set $h3 10000
defer mtu_restore $h3
@@ -196,8 +193,7 @@ setup_prepare()
h3mac=$(mac_get $h3)
- vrf_prepare
- defer vrf_cleanup
+ adf_vrf_prepare
h1_create
h2_create
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
index 537d6baa77b7..47d2ffcf366e 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
@@ -100,8 +100,7 @@ host_create()
local dev=$1; shift
local host=$1; shift
- simple_if_init $dev
- defer simple_if_fini $dev
+ adf_simple_if_init $dev
mtu_set $dev 10000
defer mtu_restore $dev
@@ -250,8 +249,7 @@ setup_prepare()
h3_mac=$(mac_get $h3)
- vrf_prepare
- defer vrf_cleanup
+ adf_vrf_prepare
h1_create
h2_create
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
index 899b6892603f..d7505b933aef 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
@@ -51,7 +51,7 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
fi
${current_test}_setup_prepare
- setup_wait $num_netifs
+ setup_wait_n $num_netifs
# Update target in case occupancy of a certain resource changed
# following the test setup.
target=$(${current_test}_get_target "$should_fail")
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
index 482ebb744eba..7b98cdd0580d 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -55,7 +55,7 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
continue
fi
${current_test}_setup_prepare
- setup_wait $num_netifs
+ setup_wait_n $num_netifs
# Update target in case occupancy of a certain resource
# changed following the test setup.
target=$(${current_test}_get_target "$should_fail")
diff --git a/tools/testing/selftests/drivers/net/napi_id.py b/tools/testing/selftests/drivers/net/napi_id.py
index 356bac46ba04..d05eddcad539 100755
--- a/tools/testing/selftests/drivers/net/napi_id.py
+++ b/tools/testing/selftests/drivers/net/napi_id.py
@@ -7,10 +7,10 @@ from lib.py import bkg, cmd, rand_port, NetNSEnter
def test_napi_id(cfg) -> None:
port = rand_port()
- listen_cmd = f"{cfg.test_dir}/napi_id_helper {cfg.addr_v['4']} {port}"
+ listen_cmd = f"{cfg.test_dir}/napi_id_helper {cfg.addr} {port}"
with bkg(listen_cmd, ksft_wait=3) as server:
- cmd(f"echo a | socat - TCP:{cfg.addr_v['4']}:{port}", host=cfg.remote, shell=True)
+ cmd(f"echo a | socat - TCP:{cfg.baddr}:{port}", host=cfg.remote, shell=True)
ksft_eq(0, server.ret)
diff --git a/tools/testing/selftests/drivers/net/napi_id_helper.c b/tools/testing/selftests/drivers/net/napi_id_helper.c
index eecd610c2109..7f49ca6c8637 100644
--- a/tools/testing/selftests/drivers/net/napi_id_helper.c
+++ b/tools/testing/selftests/drivers/net/napi_id_helper.c
@@ -7,41 +7,58 @@
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/socket.h>
+#include <netdb.h>
#include "../../net/lib/ksft.h"
int main(int argc, char *argv[])
{
- struct sockaddr_in address;
+ struct sockaddr_storage address;
+ struct addrinfo *result;
+ struct addrinfo hints;
unsigned int napi_id;
- unsigned int port;
+ socklen_t addr_len;
socklen_t optlen;
char buf[1024];
int opt = 1;
+ int family;
int server;
int client;
int ret;
- server = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_PASSIVE;
+
+ ret = getaddrinfo(argv[1], argv[2], &hints, &result);
+ if (ret != 0) {
+ fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(ret));
+ return 1;
+ }
+
+ family = result->ai_family;
+ addr_len = result->ai_addrlen;
+
+ server = socket(family, SOCK_STREAM, IPPROTO_TCP);
if (server < 0) {
perror("socket creation failed");
+ freeaddrinfo(result);
if (errno == EAFNOSUPPORT)
return -1;
return 1;
}
- port = atoi(argv[2]);
-
if (setsockopt(server, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt))) {
perror("setsockopt");
+ freeaddrinfo(result);
return 1;
}
- address.sin_family = AF_INET;
- inet_pton(AF_INET, argv[1], &address.sin_addr);
- address.sin_port = htons(port);
+ memcpy(&address, result->ai_addr, result->ai_addrlen);
+ freeaddrinfo(result);
- if (bind(server, (struct sockaddr *)&address, sizeof(address)) < 0) {
+ if (bind(server, (struct sockaddr *)&address, addr_len) < 0) {
perror("bind failed");
return 1;
}
diff --git a/tools/testing/selftests/drivers/net/napi_threaded.py b/tools/testing/selftests/drivers/net/napi_threaded.py
new file mode 100755
index 000000000000..f4be72b2145a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/napi_threaded.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Test napi threaded states.
+"""
+
+from lib.py import ksft_run, ksft_exit
+from lib.py import ksft_eq, ksft_ne, ksft_ge
+from lib.py import NetDrvEnv, NetdevFamily
+from lib.py import cmd, defer, ethtool
+
+
+def _assert_napi_threaded_enabled(nl, napi_id) -> None:
+ napi = nl.napi_get({'id': napi_id})
+ ksft_eq(napi['threaded'], 'enabled')
+ ksft_ne(napi.get('pid'), None)
+
+
+def _assert_napi_threaded_disabled(nl, napi_id) -> None:
+ napi = nl.napi_get({'id': napi_id})
+ ksft_eq(napi['threaded'], 'disabled')
+ ksft_eq(napi.get('pid'), None)
+
+
+def _set_threaded_state(cfg, threaded) -> None:
+ with open(f"/sys/class/net/{cfg.ifname}/threaded", "wb") as fp:
+ fp.write(str(threaded).encode('utf-8'))
+
+
+def _setup_deferred_cleanup(cfg) -> None:
+ combined = ethtool(f"-l {cfg.ifname}", json=True)[0].get("combined", 0)
+ ksft_ge(combined, 2)
+ defer(ethtool, f"-L {cfg.ifname} combined {combined}")
+
+ threaded = cmd(f"cat /sys/class/net/{cfg.ifname}/threaded").stdout
+ defer(_set_threaded_state, cfg, threaded)
+
+ return combined
+
+
+def napi_init(cfg, nl) -> None:
+ """
+ Test that threaded state (in the persistent NAPI config) gets updated
+ even when NAPI with given ID is not allocated at the time.
+ """
+
+ qcnt = _setup_deferred_cleanup(cfg)
+
+ _set_threaded_state(cfg, 1)
+ cmd(f"ethtool -L {cfg.ifname} combined 1")
+ _set_threaded_state(cfg, 0)
+ cmd(f"ethtool -L {cfg.ifname} combined {qcnt}")
+
+ napis = nl.napi_get({'ifindex': cfg.ifindex}, dump=True)
+ for napi in napis:
+ ksft_eq(napi['threaded'], 'disabled')
+ ksft_eq(napi.get('pid'), None)
+
+ cmd(f"ethtool -L {cfg.ifname} combined 1")
+ _set_threaded_state(cfg, 1)
+ cmd(f"ethtool -L {cfg.ifname} combined {qcnt}")
+
+ napis = nl.napi_get({'ifindex': cfg.ifindex}, dump=True)
+ for napi in napis:
+ ksft_eq(napi['threaded'], 'enabled')
+ ksft_ne(napi.get('pid'), None)
+
+
+def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None:
+ """
+ Test that when napi threaded is enabled at device level and
+ then disabled at napi level for one napi, the threaded state
+ of all napis is preserved after a change in number of queues.
+ """
+
+ napis = nl.napi_get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_ge(len(napis), 2)
+
+ napi0_id = napis[0]['id']
+ napi1_id = napis[1]['id']
+
+ qcnt = _setup_deferred_cleanup(cfg)
+
+ # set threaded
+ _set_threaded_state(cfg, 1)
+
+ # check napi threaded is set for both napis
+ _assert_napi_threaded_enabled(nl, napi0_id)
+ _assert_napi_threaded_enabled(nl, napi1_id)
+
+ # disable threaded for napi1
+ nl.napi_set({'id': napi1_id, 'threaded': 'disabled'})
+
+ cmd(f"ethtool -L {cfg.ifname} combined 1")
+ cmd(f"ethtool -L {cfg.ifname} combined {qcnt}")
+ _assert_napi_threaded_enabled(nl, napi0_id)
+ _assert_napi_threaded_disabled(nl, napi1_id)
+
+
+def change_num_queues(cfg, nl) -> None:
+ """
+ Test that when napi threaded is enabled at device level,
+ the napi threaded state is preserved after a change in
+ number of queues.
+ """
+
+ napis = nl.napi_get({'ifindex': cfg.ifindex}, dump=True)
+ ksft_ge(len(napis), 2)
+
+ napi0_id = napis[0]['id']
+ napi1_id = napis[1]['id']
+
+ qcnt = _setup_deferred_cleanup(cfg)
+
+ # set threaded
+ _set_threaded_state(cfg, 1)
+
+ # check napi threaded is set for both napis
+ _assert_napi_threaded_enabled(nl, napi0_id)
+ _assert_napi_threaded_enabled(nl, napi1_id)
+
+ cmd(f"ethtool -L {cfg.ifname} combined 1")
+ cmd(f"ethtool -L {cfg.ifname} combined {qcnt}")
+
+ # check napi threaded is set for both napis
+ _assert_napi_threaded_enabled(nl, napi0_id)
+ _assert_napi_threaded_enabled(nl, napi1_id)
+
+
+def main() -> None:
+ """ Ksft boiler plate main """
+
+ with NetDrvEnv(__file__, queue_count=2) as cfg:
+ ksft_run([napi_init,
+ change_num_queues,
+ enable_dev_threaded_disable_napi_threaded],
+ args=(cfg, NetdevFamily()))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/netcons_basic.sh b/tools/testing/selftests/drivers/net/netcons_basic.sh
index fe765da498e8..2022f3061738 100755
--- a/tools/testing/selftests/drivers/net/netcons_basic.sh
+++ b/tools/testing/selftests/drivers/net/netcons_basic.sh
@@ -28,25 +28,47 @@ OUTPUT_FILE="/tmp/${TARGET}"
# Check for basic system dependency and exit if not found
check_for_dependencies
-# Set current loglevel to KERN_INFO(6), and default to KERN_NOTICE(5)
-echo "6 5" > /proc/sys/kernel/printk
# Remove the namespace, interfaces and netconsole target on exit
trap cleanup EXIT
-# Create one namespace and two interfaces
-set_network
-# Create a dynamic target for netconsole
-create_dynamic_target
-# Set userdata "key" with the "value" value
-set_user_data
-# Listed for netconsole port inside the namespace and destination interface
-listen_port_and_save_to "${OUTPUT_FILE}" &
-# Wait for socat to start and listen to the port.
-wait_local_port_listen "${NAMESPACE}" "${PORT}" udp
-# Send the message
-echo "${MSG}: ${TARGET}" > /dev/kmsg
-# Wait until socat saves the file to disk
-busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}"
-
-# Make sure the message was received in the dst part
-# and exit
-validate_result "${OUTPUT_FILE}"
+
+# Run the test twice, with different format modes
+for FORMAT in "basic" "extended"
+do
+ for IP_VERSION in "ipv6" "ipv4"
+ do
+ echo "Running with target mode: ${FORMAT} (${IP_VERSION})"
+ # Set current loglevel to KERN_INFO(6), and default to
+ # KERN_NOTICE(5)
+ echo "6 5" > /proc/sys/kernel/printk
+ # Create one namespace and two interfaces
+ set_network "${IP_VERSION}"
+ # Create a dynamic target for netconsole
+ create_dynamic_target "${FORMAT}"
+ # Only set userdata for extended format
+ if [ "$FORMAT" == "extended" ]
+ then
+ # Set userdata "key" with the "value" value
+ set_user_data
+ fi
+ # Listed for netconsole port inside the namespace and
+ # destination interface
+ listen_port_and_save_to "${OUTPUT_FILE}" "${IP_VERSION}" &
+ # Wait for socat to start and listen to the port.
+ wait_for_port "${NAMESPACE}" "${PORT}" "${IP_VERSION}"
+ # Send the message
+ echo "${MSG}: ${TARGET}" > /dev/kmsg
+ # Wait until socat saves the file to disk
+ busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}"
+
+ # Make sure the message was received in the dst part
+ # and exit
+ validate_result "${OUTPUT_FILE}" "${FORMAT}"
+ # kill socat in case it is still running
+ pkill_socat
+ cleanup
+ echo "${FORMAT} : ${IP_VERSION} : Test passed" >&2
+ done
+done
+
+trap - EXIT
+exit "${ksft_pass}"
diff --git a/tools/testing/selftests/drivers/net/netcons_cmdline.sh b/tools/testing/selftests/drivers/net/netcons_cmdline.sh
new file mode 100755
index 000000000000..d1d23dc67f99
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netcons_cmdline.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This is a selftest to test cmdline arguments on netconsole.
+# It exercises loading of netconsole from cmdline instead of the dynamic
+# reconfiguration. This includes parsing the long netconsole= line and all the
+# flow through init_netconsole().
+#
+# Author: Breno Leitao <leitao@debian.org>
+
+set -euo pipefail
+
+SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
+
+source "${SCRIPTDIR}"/lib/sh/lib_netcons.sh
+
+check_netconsole_module
+
+modprobe netdevsim 2> /dev/null || true
+rmmod netconsole 2> /dev/null || true
+
+# Check for basic system dependency and exit if not found
+# check_for_dependencies
+# Set current loglevel to KERN_INFO(6), and default to KERN_NOTICE(5)
+echo "6 5" > /proc/sys/kernel/printk
+# Remove the namespace and network interfaces
+trap do_cleanup EXIT
+# Create one namespace and two interfaces
+set_network
+
+# Run the test twice, with different cmdline parameters
+for BINDMODE in "ifname" "mac"
+do
+ echo "Running with bind mode: ${BINDMODE}" >&2
+ # Create the command line for netconsole, with the configuration from
+ # the function above
+ CMDLINE=$(create_cmdline_str "${BINDMODE}")
+
+ # The content of kmsg will be save to the following file
+ OUTPUT_FILE="/tmp/${TARGET}-${BINDMODE}"
+
+ # Load the module, with the cmdline set
+ modprobe netconsole "${CMDLINE}"
+
+ # Listed for netconsole port inside the namespace and destination
+ # interface
+ listen_port_and_save_to "${OUTPUT_FILE}" &
+ # Wait for socat to start and listen to the port.
+ wait_local_port_listen "${NAMESPACE}" "${PORT}" udp
+ # Send the message
+ echo "${MSG}: ${TARGET}" > /dev/kmsg
+ # Wait until socat saves the file to disk
+ busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}"
+ # Make sure the message was received in the dst part
+ # and exit
+ validate_msg "${OUTPUT_FILE}"
+
+ # kill socat in case it is still running
+ pkill_socat
+ # Unload the module
+ rmmod netconsole
+ echo "${BINDMODE} : Test passed" >&2
+done
+
+exit "${ksft_pass}"
diff --git a/tools/testing/selftests/drivers/net/netcons_overflow.sh b/tools/testing/selftests/drivers/net/netcons_overflow.sh
index 29bad56448a2..06089643b771 100755
--- a/tools/testing/selftests/drivers/net/netcons_overflow.sh
+++ b/tools/testing/selftests/drivers/net/netcons_overflow.sh
@@ -15,7 +15,7 @@ SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
source "${SCRIPTDIR}"/lib/sh/lib_netcons.sh
# This is coming from netconsole code. Check for it in drivers/net/netconsole.c
-MAX_USERDATA_ITEMS=16
+MAX_USERDATA_ITEMS=256
# Function to create userdata entries
function create_userdata_max_entries() {
diff --git a/tools/testing/selftests/drivers/net/netcons_sysdata.sh b/tools/testing/selftests/drivers/net/netcons_sysdata.sh
index a737e377bf08..baf69031089e 100755
--- a/tools/testing/selftests/drivers/net/netcons_sysdata.sh
+++ b/tools/testing/selftests/drivers/net/netcons_sysdata.sh
@@ -53,6 +53,17 @@ function set_release() {
echo 1 > "${NETCONS_PATH}/userdata/release_enabled"
}
+# Enable the msgid to be appended to sysdata
+function set_msgid() {
+ if [[ ! -f "${NETCONS_PATH}/userdata/msgid_enabled" ]]
+ then
+ echo "Not able to enable msgid sysdata append. Configfs not available in ${NETCONS_PATH}/userdata/msgid_enabled" >&2
+ exit "${ksft_skip}"
+ fi
+
+ echo 1 > "${NETCONS_PATH}/userdata/msgid_enabled"
+}
+
# Disable the sysdata cpu_nr feature
function unset_cpu_nr() {
echo 0 > "${NETCONS_PATH}/userdata/cpu_nr_enabled"
@@ -67,6 +78,10 @@ function unset_release() {
echo 0 > "${NETCONS_PATH}/userdata/release_enabled"
}
+function unset_msgid() {
+ echo 0 > "${NETCONS_PATH}/userdata/msgid_enabled"
+}
+
# Test if MSG contains sysdata
function validate_sysdata() {
# OUTPUT_FILE will contain something like:
@@ -74,6 +89,7 @@ function validate_sysdata() {
# userdatakey=userdatavalue
# cpu=X
# taskname=<taskname>
+ # msgid=<id>
# Echo is what this test uses to create the message. See runtest()
# function
@@ -104,6 +120,12 @@ function validate_sysdata() {
exit "${ksft_fail}"
fi
+ if ! grep -q "msgid=[0-9]\+$" "${OUTPUT_FILE}"; then
+ echo "FAIL: 'msgid=<id>' not found in ${OUTPUT_FILE}" >&2
+ cat "${OUTPUT_FILE}" >&2
+ exit "${ksft_fail}"
+ fi
+
rm "${OUTPUT_FILE}"
pkill_socat
}
@@ -155,6 +177,12 @@ function validate_no_sysdata() {
exit "${ksft_fail}"
fi
+ if grep -q "msgid=" "${OUTPUT_FILE}"; then
+ echo "FAIL: 'msgid= found in ${OUTPUT_FILE}" >&2
+ cat "${OUTPUT_FILE}" >&2
+ exit "${ksft_fail}"
+ fi
+
rm "${OUTPUT_FILE}"
}
@@ -206,6 +234,7 @@ set_cpu_nr
# Enable taskname to be appended to sysdata
set_taskname
set_release
+set_msgid
runtest
# Make sure the message was received in the dst part
# and exit
@@ -235,6 +264,7 @@ MSG="Test #3 from CPU${CPU}"
unset_cpu_nr
unset_taskname
unset_release
+unset_msgid
runtest
# At this time, cpu= shouldn't be present in the msg
validate_no_sysdata
diff --git a/tools/testing/selftests/drivers/net/netcons_torture.sh b/tools/testing/selftests/drivers/net/netcons_torture.sh
new file mode 100755
index 000000000000..2ce9ee3719d1
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netcons_torture.sh
@@ -0,0 +1,130 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Repeatedly send kernel messages, toggles netconsole targets on and off,
+# creates and deletes targets in parallel, and toggles the source interface to
+# simulate stress conditions.
+#
+# This test aims to verify the robustness of netconsole under dynamic
+# configurations and concurrent operations.
+#
+# The major goal is to run this test with LOCKDEP, Kmemleak and KASAN to make
+# sure no issues is reported.
+#
+# Author: Breno Leitao <leitao@debian.org>
+
+set -euo pipefail
+
+SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
+
+source "${SCRIPTDIR}"/lib/sh/lib_netcons.sh
+
+# Number of times the main loop run
+ITERATIONS=${1:-150}
+
+# Only test extended format
+FORMAT="extended"
+# And ipv6 only
+IP_VERSION="ipv6"
+
+# Create, enable and delete some targets.
+create_and_delete_random_target() {
+ COUNT=2
+ RND_PREFIX=$(mktemp -u netcons_rnd_XXXX_)
+
+ if [ -d "${NETCONS_CONFIGFS}/${RND_PREFIX}${COUNT}" ] || \
+ [ -d "${NETCONS_CONFIGFS}/${RND_PREFIX}0" ]; then
+ echo "Function didn't finish yet, skipping it." >&2
+ return
+ fi
+
+ # enable COUNT targets
+ for i in $(seq ${COUNT})
+ do
+ RND_TARGET="${RND_PREFIX}"${i}
+ RND_TARGET_PATH="${NETCONS_CONFIGFS}"/"${RND_TARGET}"
+
+ # Basic population so the target can come up
+ _create_dynamic_target "${FORMAT}" "${RND_TARGET_PATH}"
+ done
+
+ echo "netconsole selftest: ${COUNT} additional targets were created" > /dev/kmsg
+ # disable them all
+ for i in $(seq ${COUNT})
+ do
+ RND_TARGET="${RND_PREFIX}"${i}
+ RND_TARGET_PATH="${NETCONS_CONFIGFS}"/"${RND_TARGET}"
+ if [[ $(cat "${RND_TARGET_PATH}/enabled") -eq 1 ]]
+ then
+ echo 0 > "${RND_TARGET_PATH}"/enabled
+ fi
+ rmdir "${RND_TARGET_PATH}"
+ done
+}
+
+# Disable and enable the target mid-air, while messages
+# are being transmitted.
+toggle_netcons_target() {
+ for i in $(seq 2)
+ do
+ if [ ! -d "${NETCONS_PATH}" ]
+ then
+ break
+ fi
+ echo 0 > "${NETCONS_PATH}"/enabled 2> /dev/null || true
+ # Try to enable a bit harder, given it might fail to enable
+ # Write to `enabled` might fail depending on the lock, which is
+ # highly contentious here
+ for _ in $(seq 5)
+ do
+ echo 1 > "${NETCONS_PATH}"/enabled 2> /dev/null || true
+ done
+ done
+}
+
+toggle_iface(){
+ ip link set "${SRCIF}" down
+ ip link set "${SRCIF}" up
+}
+
+# Start here
+
+modprobe netdevsim 2> /dev/null || true
+modprobe netconsole 2> /dev/null || true
+
+# Check for basic system dependency and exit if not found
+check_for_dependencies
+# Set current loglevel to KERN_INFO(6), and default to KERN_NOTICE(5)
+echo "6 5" > /proc/sys/kernel/printk
+# Remove the namespace, interfaces and netconsole target on exit
+trap cleanup EXIT
+# Create one namespace and two interfaces
+set_network "${IP_VERSION}"
+# Create a dynamic target for netconsole
+create_dynamic_target "${FORMAT}"
+
+for i in $(seq "$ITERATIONS")
+do
+ for _ in $(seq 10)
+ do
+ echo "${MSG}: ${TARGET} ${i}" > /dev/kmsg
+ done
+ wait
+
+ if (( i % 30 == 0 )); then
+ toggle_netcons_target &
+ fi
+
+ if (( i % 50 == 0 )); then
+ # create some targets, enable them, send msg and disable
+ # all in a parallel thread
+ create_and_delete_random_target &
+ fi
+
+ if (( i % 70 == 0 )); then
+ toggle_iface &
+ fi
+done
+wait
+
+exit "${EXIT_STATUS}"
diff --git a/tools/testing/selftests/drivers/net/netdevsim/Makefile b/tools/testing/selftests/drivers/net/netdevsim/Makefile
index 07b7c46d3311..1a228c5430f5 100644
--- a/tools/testing/selftests/drivers/net/netdevsim/Makefile
+++ b/tools/testing/selftests/drivers/net/netdevsim/Makefile
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: GPL-2.0+ OR MIT
-TEST_PROGS = devlink.sh \
+TEST_PROGS := \
+ devlink.sh \
devlink_in_netns.sh \
devlink_trap.sh \
ethtool-coalesce.sh \
ethtool-features.sh \
ethtool-fec.sh \
ethtool-pause.sh \
- ethtool-ring.sh \
fib.sh \
fib_notifications.sh \
hw_stats_l3.sh \
@@ -17,5 +17,10 @@ TEST_PROGS = devlink.sh \
psample.sh \
tc-mq-visibility.sh \
udp_tunnel_nic.sh \
+# end of TEST_PROGS
+
+TEST_FILES := \
+ ethtool-common.sh
+# end of TEST_FILES
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index b5ea2526f23c..1b529ccaf050 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -3,7 +3,8 @@
lib_dir=$(dirname $0)/../../../net/forwarding
-ALL_TESTS="fw_flash_test params_test regions_test reload_test \
+ALL_TESTS="fw_flash_test params_test \
+ params_default_test regions_test reload_test \
netns_reload_test resource_test dev_info_test \
empty_reporter_test dummy_reporter_test rate_test"
NUM_NETIFS=0
@@ -40,6 +41,8 @@ fw_flash_test()
return
fi
+ echo "10"> $DEBUGFS_DIR/fw_update_flash_chunk_time_ms
+
devlink dev flash $DL_HANDLE file $DUMMYFILE
check_err $? "Failed to flash with status updates on"
@@ -76,17 +79,28 @@ fw_flash_test()
param_get()
{
local name=$1
+ local attr=${2:-value}
+ local cmode=${3:-driverinit}
cmd_jq "devlink dev param show $DL_HANDLE name $name -j" \
- '.[][][].values[] | select(.cmode == "driverinit").value'
+ '.[][][].values[] | select(.cmode == "'"$cmode"'").'"$attr"
}
param_set()
{
local name=$1
local value=$2
+ local cmode=${3:-driverinit}
- devlink dev param set $DL_HANDLE name $name cmode driverinit value $value
+ devlink dev param set $DL_HANDLE name $name cmode $cmode value $value
+}
+
+param_set_default()
+{
+ local name=$1
+ local cmode=${2:-driverinit}
+
+ devlink dev param set $DL_HANDLE name $name default cmode $cmode
}
check_value()
@@ -95,12 +109,18 @@ check_value()
local phase_name=$2
local expected_param_value=$3
local expected_debugfs_value=$4
+ local cmode=${5:-driverinit}
local value
+ local attr="value"
+
+ if [[ "$phase_name" == *"default"* ]]; then
+ attr="default"
+ fi
- value=$(param_get $name)
- check_err $? "Failed to get $name param value"
+ value=$(param_get $name $attr $cmode)
+ check_err $? "Failed to get $name param $attr"
[ "$value" == "$expected_param_value" ]
- check_err $? "Unexpected $phase_name $name param value"
+ check_err $? "Unexpected $phase_name $name param $attr"
value=$(<$DEBUGFS_DIR/$name)
check_err $? "Failed to get $name debugfs value"
[ "$value" == "$expected_debugfs_value" ]
@@ -133,6 +153,92 @@ params_test()
log_test "params test"
}
+value_to_debugfs()
+{
+ local value=$1
+
+ case "$value" in
+ true)
+ echo "Y"
+ ;;
+ false)
+ echo "N"
+ ;;
+ *)
+ echo "$value"
+ ;;
+ esac
+}
+
+test_default()
+{
+ local param_name=$1
+ local new_value=$2
+ local expected_default=$3
+ local cmode=${4:-driverinit}
+ local default_debugfs
+ local new_debugfs
+ local expected_debugfs
+
+ default_debugfs=$(value_to_debugfs $expected_default)
+ new_debugfs=$(value_to_debugfs $new_value)
+
+ expected_debugfs=$default_debugfs
+ check_value $param_name initial-default $expected_default $expected_debugfs $cmode
+
+ param_set $param_name $new_value $cmode
+ check_err $? "Failed to set $param_name to $new_value"
+
+ expected_debugfs=$([ "$cmode" == "runtime" ] && echo "$new_debugfs" || echo "$default_debugfs")
+ check_value $param_name post-set $new_value $expected_debugfs $cmode
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload device"
+
+ expected_debugfs=$new_debugfs
+ check_value $param_name post-reload-new-value $new_value $expected_debugfs $cmode
+
+ param_set_default $param_name $cmode
+ check_err $? "Failed to set $param_name to default"
+
+ expected_debugfs=$([ "$cmode" == "runtime" ] && echo "$default_debugfs" || echo "$new_debugfs")
+ check_value $param_name post-set-default $expected_default $expected_debugfs $cmode
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload device"
+
+ expected_debugfs=$default_debugfs
+ check_value $param_name post-reload-default $expected_default $expected_debugfs $cmode
+}
+
+params_default_test()
+{
+ RET=0
+
+ if ! devlink dev param help 2>&1 | grep -q "value VALUE | default"; then
+ echo "SKIP: devlink cli missing default feature"
+ return
+ fi
+
+ # Remove side effects of previous tests. Use plain param_set, because
+ # param_set_default is a feature under test here.
+ param_set max_macs 32 driverinit
+ check_err $? "Failed to reset max_macs to default value"
+ param_set test1 true driverinit
+ check_err $? "Failed to reset test1 to default value"
+ param_set test2 1234 runtime
+ check_err $? "Failed to reset test2 to default value"
+
+ devlink dev reload $DL_HANDLE
+ check_err $? "Failed to reload device for clean state"
+
+ test_default max_macs 16 32 driverinit
+ test_default test1 false true driverinit
+ test_default test2 100 1234 runtime
+
+ log_test "params default test"
+}
+
check_region_size()
{
local name=$1
@@ -608,6 +714,46 @@ rate_attr_parent_check()
check_err $? "Unexpected parent attr value $api_value != $parent"
}
+rate_attr_tc_bw_check()
+{
+ local handle=$1
+ local tc_bw=$2
+ local debug_file=$3
+
+ local tc_bw_str=""
+ for bw in $tc_bw; do
+ local tc=${bw%%:*}
+ local value=${bw##*:}
+ tc_bw_str="$tc_bw_str $tc:$value"
+ done
+ tc_bw_str=${tc_bw_str# }
+
+ rate_attr_set "$handle" tc-bw "$tc_bw_str"
+ check_err $? "Failed to set tc-bw values"
+
+ for bw in $tc_bw; do
+ local tc=${bw%%:*}
+ local value=${bw##*:}
+ local debug_value
+ debug_value=$(cat "$debug_file"/tc"${tc}"_bw)
+ check_err $? "Failed to read tc-bw value from debugfs for tc$tc"
+ [ "$debug_value" == "$value" ]
+ check_err $? "Unexpected tc-bw debug value for tc$tc: $debug_value != $value"
+ done
+
+ for bw in $tc_bw; do
+ local tc=${bw%%:*}
+ local expected_value=${bw##*:}
+ local api_value
+ api_value=$(rate_attr_get "$handle" tc_"$tc")
+ if [ "$api_value" = "null" ]; then
+ api_value=0
+ fi
+ [ "$api_value" == "$expected_value" ]
+ check_err $? "Unexpected tc-bw value for tc$tc: $api_value != $expected_value"
+ done
+}
+
rate_node_add()
{
local handle=$1
@@ -649,6 +795,13 @@ rate_test()
rate=$(($rate+100))
done
+ local tc_bw="0:0 1:40 2:0 3:0 4:0 5:0 6:60 7:0"
+ for r_obj in $leafs
+ do
+ rate_attr_tc_bw_check "$r_obj" "$tc_bw" \
+ "$DEBUGFS_DIR"/ports/"${r_obj##*/}"
+ done
+
local node1_name='group1'
local node1="$DL_HANDLE/$node1_name"
rate_node_add "$node1"
@@ -666,6 +819,12 @@ rate_test()
rate_attr_tx_rate_check $node1 tx_max $node_tx_max \
$DEBUGFS_DIR/rate_nodes/${node1##*/}/tx_max
+
+ local tc_bw="0:20 1:0 2:0 3:0 4:0 5:20 6:60 7:0"
+ rate_attr_tc_bw_check $node1 "$tc_bw" \
+ "$DEBUGFS_DIR"/rate_nodes/"${node1##*/}"
+
+
rate_node_del "$node1"
check_err $? "Failed to delete node $node1"
local num_nodes=`rate_nodes_get $DL_HANDLE | wc -w`
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh
deleted file mode 100755
index c969559ffa7a..000000000000
--- a/tools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0-only
-
-source ethtool-common.sh
-
-function get_value {
- local query="${SETTINGS_MAP[$1]}"
-
- echo $(ethtool -g $NSIM_NETDEV | \
- tail -n +$CURR_SETT_LINE | \
- awk -F':' -v pattern="$query:" '$0 ~ pattern {gsub(/[\t ]/, "", $2); print $2}')
-}
-
-function update_current_settings {
- for key in ${!SETTINGS_MAP[@]}; do
- CURRENT_SETTINGS[$key]=$(get_value $key)
- done
- echo ${CURRENT_SETTINGS[@]}
-}
-
-if ! ethtool -h | grep -q set-ring >/dev/null; then
- echo "SKIP: No --set-ring support in ethtool"
- exit 4
-fi
-
-NSIM_NETDEV=$(make_netdev)
-
-set -o pipefail
-
-declare -A SETTINGS_MAP=(
- ["rx"]="RX"
- ["rx-mini"]="RX Mini"
- ["rx-jumbo"]="RX Jumbo"
- ["tx"]="TX"
-)
-
-declare -A EXPECTED_SETTINGS=(
- ["rx"]=""
- ["rx-mini"]=""
- ["rx-jumbo"]=""
- ["tx"]=""
-)
-
-declare -A CURRENT_SETTINGS=(
- ["rx"]=""
- ["rx-mini"]=""
- ["rx-jumbo"]=""
- ["tx"]=""
-)
-
-MAX_VALUE=$((RANDOM % $((2**32-1))))
-RING_MAX_LIST=$(ls $NSIM_DEV_DFS/ethtool/ring/)
-
-for ring_max_entry in $RING_MAX_LIST; do
- echo $MAX_VALUE > $NSIM_DEV_DFS/ethtool/ring/$ring_max_entry
-done
-
-CURR_SETT_LINE=$(ethtool -g $NSIM_NETDEV | grep -i -m1 -n 'Current hardware settings' | cut -f1 -d:)
-
-# populate the expected settings map
-for key in ${!SETTINGS_MAP[@]}; do
- EXPECTED_SETTINGS[$key]=$(get_value $key)
-done
-
-# test
-for key in ${!SETTINGS_MAP[@]}; do
- value=$((RANDOM % $MAX_VALUE))
-
- ethtool -G $NSIM_NETDEV "$key" "$value"
-
- EXPECTED_SETTINGS[$key]="$value"
- expected=${EXPECTED_SETTINGS[@]}
- current=$(update_current_settings)
-
- check $? "$current" "$expected"
- set +x
-done
-
-if [ $num_errors -eq 0 ]; then
- echo "PASSED all $((num_passes)) checks"
- exit 0
-else
- echo "FAILED $num_errors/$((num_errors+num_passes)) checks"
- exit 1
-fi
diff --git a/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh b/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
index e8e0dc088d6a..01d0c044a5fc 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/nexthop.sh
@@ -1053,6 +1053,6 @@ trap cleanup EXIT
setup_prepare
-tests_run
+xfail_on_slow tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/netdevsim/peer.sh b/tools/testing/selftests/drivers/net/netdevsim/peer.sh
index 1bb46ec435d4..7f32b5600925 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/peer.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/peer.sh
@@ -1,7 +1,8 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0-only
-source ../../../net/lib.sh
+lib_dir=$(dirname $0)/../../../net
+source $lib_dir/lib.sh
NSIM_DEV_1_ID=$((256 + RANDOM % 256))
NSIM_DEV_1_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_DEV_1_ID
diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
index 92c2f0376c08..4c859ecdad94 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
@@ -266,7 +266,6 @@ for port in 0 1; do
echo $NSIM_ID > /sys/bus/netdevsim/new_device
else
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
echo 1 > $NSIM_DEV_SYS/new_port
fi
NSIM_NETDEV=`get_netdev_name old_netdevs`
@@ -350,23 +349,11 @@ old_netdevs=$(ls /sys/class/net)
port=0
echo $NSIM_ID > /sys/bus/netdevsim/new_device
echo 0 > $NSIM_DEV_SYS/del_port
-echo 1000 > $NSIM_DEV_DFS/udp_ports_sleep
echo 0 > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
msg="create VxLANs"
-exp0=( 0 0 0 0 ) # sleep is longer than out wait
-new_vxlan vxlan0 10000 $NSIM_NETDEV
-
-modprobe -r vxlan
-modprobe -r udp_tunnel
-
-msg="remove tunnels"
-exp0=( 0 0 0 0 )
-check_tables
-
-msg="create VxLANs"
-exp0=( 0 0 0 0 ) # sleep is longer than out wait
+exp0=( `mke 10000 1` 0 0 0 )
new_vxlan vxlan0 10000 $NSIM_NETDEV
exp0=( 0 0 0 0 )
@@ -428,7 +415,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -486,7 +472,6 @@ echo 1 > $NSIM_DEV_DFS/udp_ports_sync_all
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -543,7 +528,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -573,7 +557,6 @@ echo 1 > $NSIM_DEV_DFS/udp_ports_ipv4_only
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -634,7 +617,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -690,7 +672,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -750,7 +731,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -809,7 +789,6 @@ echo $NSIM_ID > /sys/bus/netdevsim/new_device
echo 0 > $NSIM_DEV_SYS/del_port
echo 0 > $NSIM_DEV_DFS/udp_ports_open_only
-echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
echo 1 > $NSIM_DEV_DFS/udp_ports_shared
old_netdevs=$(ls /sys/class/net)
diff --git a/tools/testing/selftests/drivers/net/netpoll_basic.py b/tools/testing/selftests/drivers/net/netpoll_basic.py
new file mode 100755
index 000000000000..408bd54d6779
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netpoll_basic.py
@@ -0,0 +1,396 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# Author: Breno Leitao <leitao@debian.org>
+"""
+ This test aims to evaluate the netpoll polling mechanism (as in
+ netpoll_poll_dev()). It presents a complex scenario where the network
+ attempts to send a packet but fails, prompting it to poll the NIC from within
+ the netpoll TX side.
+
+ This has been a crucial path in netpoll that was previously untested. Jakub
+ suggested using a single RX/TX queue, pushing traffic to the NIC, and then
+ sending netpoll messages (via netconsole) to trigger the poll.
+
+ In parallel, bpftrace is used to detect if netpoll_poll_dev() was called. If
+ so, the test passes, otherwise it will be skipped. This test is very dependent on
+ the driver and environment, given we are trying to trigger a tricky scenario.
+"""
+
+import errno
+import logging
+import os
+import random
+import string
+import threading
+import time
+from typing import Optional
+
+from lib.py import (
+ bpftrace,
+ CmdExitFailure,
+ defer,
+ ethtool,
+ GenerateTraffic,
+ ksft_exit,
+ ksft_pr,
+ ksft_run,
+ KsftFailEx,
+ KsftSkipEx,
+ NetDrvEpEnv,
+ KsftXfailEx,
+)
+
+# Configure logging
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s - %(levelname)s - %(message)s",
+)
+
+NETCONSOLE_CONFIGFS_PATH: str = "/sys/kernel/config/netconsole"
+NETCONS_REMOTE_PORT: int = 6666
+NETCONS_LOCAL_PORT: int = 1514
+
+# Max number of netcons messages to send. Each iteration will setup
+# netconsole and send MAX_WRITES messages
+ITERATIONS: int = 20
+# Number of writes to /dev/kmsg per iteration
+MAX_WRITES: int = 40
+# MAPS contains the information coming from bpftrace it will have only one
+# key: "hits", which tells the number of times netpoll_poll_dev() was called
+MAPS: dict[str, int] = {}
+# Thread to run bpftrace in parallel
+BPF_THREAD: Optional[threading.Thread] = None
+# Time bpftrace will be running in parallel.
+BPFTRACE_TIMEOUT: int = 10
+
+
+def ethtool_get_ringsize(interface_name: str) -> tuple[int, int]:
+ """
+ Read the ringsize using ethtool. This will be used to restore it after the test
+ """
+ try:
+ ethtool_result = ethtool(f"-g {interface_name}", json=True)[0]
+ rxs = ethtool_result["rx"]
+ txs = ethtool_result["tx"]
+ except (KeyError, IndexError) as exception:
+ raise KsftSkipEx(
+ f"Failed to read RX/TX ringsize: {exception}. Not going to mess with them."
+ ) from exception
+
+ return rxs, txs
+
+
+def ethtool_set_ringsize(interface_name: str, ring_size: tuple[int, int]) -> bool:
+ """Try to the number of RX and TX ringsize."""
+ rxs = ring_size[0]
+ txs = ring_size[1]
+
+ logging.debug("Setting ring size to %d/%d", rxs, txs)
+ try:
+ ethtool(f"-G {interface_name} rx {rxs} tx {txs}")
+ except CmdExitFailure:
+ # This might fail on real device, retry with a higher value,
+ # worst case, keep it as it is.
+ return False
+
+ return True
+
+
+def ethtool_get_queues_cnt(interface_name: str) -> tuple[int, int, int]:
+ """Read the number of RX, TX and combined queues using ethtool"""
+
+ try:
+ ethtool_result = ethtool(f"-l {interface_name}", json=True)[0]
+ rxq = ethtool_result.get("rx", -1)
+ txq = ethtool_result.get("tx", -1)
+ combined = ethtool_result.get("combined", -1)
+
+ except IndexError as exception:
+ raise KsftSkipEx(
+ f"Failed to read queues numbers: {exception}. Not going to mess with them."
+ ) from exception
+
+ return rxq, txq, combined
+
+
+def ethtool_set_queues_cnt(interface_name: str, queues: tuple[int, int, int]) -> None:
+ """Set the number of RX, TX and combined queues using ethtool"""
+ rxq, txq, combined = queues
+
+ cmdline = f"-L {interface_name}"
+
+ if rxq != -1:
+ cmdline += f" rx {rxq}"
+ if txq != -1:
+ cmdline += f" tx {txq}"
+ if combined != -1:
+ cmdline += f" combined {combined}"
+
+ logging.debug("calling: ethtool %s", cmdline)
+
+ try:
+ ethtool(cmdline)
+ except CmdExitFailure as exception:
+ raise KsftSkipEx(
+ f"Failed to configure RX/TX queues: {exception}. Ethtool not available?"
+ ) from exception
+
+
+def netcons_generate_random_target_name() -> str:
+ """Generate a random target name starting with 'netcons'"""
+ random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=8))
+ return f"netcons_{random_suffix}"
+
+
+def netcons_create_target(
+ config_data: dict[str, str],
+ target_name: str,
+) -> None:
+ """Create a netconsole dynamic target against the interfaces"""
+ logging.debug("Using netconsole name: %s", target_name)
+ try:
+ os.makedirs(f"{NETCONSOLE_CONFIGFS_PATH}/{target_name}", exist_ok=True)
+ logging.debug(
+ "Created target directory: %s/%s", NETCONSOLE_CONFIGFS_PATH, target_name
+ )
+ except OSError as exception:
+ if exception.errno != errno.EEXIST:
+ raise KsftFailEx(
+ f"Failed to create netconsole target directory: {exception}"
+ ) from exception
+
+ try:
+ for key, value in config_data.items():
+ path = f"{NETCONSOLE_CONFIGFS_PATH}/{target_name}/{key}"
+ logging.debug("Writing %s to %s", key, path)
+ with open(path, "w", encoding="utf-8") as file:
+ # Always convert to string to write to file
+ file.write(str(value))
+
+ # Read all configuration values for debugging purposes
+ for debug_key in config_data.keys():
+ with open(
+ f"{NETCONSOLE_CONFIGFS_PATH}/{target_name}/{debug_key}",
+ "r",
+ encoding="utf-8",
+ ) as file:
+ content = file.read()
+ logging.debug(
+ "%s/%s/%s : %s",
+ NETCONSOLE_CONFIGFS_PATH,
+ target_name,
+ debug_key,
+ content.strip(),
+ )
+
+ except Exception as exception:
+ raise KsftFailEx(
+ f"Failed to configure netconsole target: {exception}"
+ ) from exception
+
+
+def netcons_configure_target(
+ cfg: NetDrvEpEnv, interface_name: str, target_name: str
+) -> None:
+ """Configure netconsole on the interface with the given target name"""
+ config_data = {
+ "extended": "1",
+ "dev_name": interface_name,
+ "local_port": NETCONS_LOCAL_PORT,
+ "remote_port": NETCONS_REMOTE_PORT,
+ "local_ip": cfg.addr,
+ "remote_ip": cfg.remote_addr,
+ "remote_mac": "00:00:00:00:00:00", # Not important for this test
+ "enabled": "1",
+ }
+
+ netcons_create_target(config_data, target_name)
+ logging.debug(
+ "Created netconsole target: %s on interface %s", target_name, interface_name
+ )
+
+
+def netcons_delete_target(name: str) -> None:
+ """Delete a netconsole dynamic target"""
+ target_path = f"{NETCONSOLE_CONFIGFS_PATH}/{name}"
+ try:
+ if os.path.exists(target_path):
+ os.rmdir(target_path)
+ except OSError as exception:
+ raise KsftFailEx(
+ f"Failed to delete netconsole target: {exception}"
+ ) from exception
+
+
+def netcons_load_module() -> None:
+ """Try to load the netconsole module"""
+ os.system("modprobe netconsole")
+
+
+def bpftrace_call() -> None:
+ """Call bpftrace to find how many times netpoll_poll_dev() is called.
+ Output is saved in the global variable `maps`"""
+
+ # This is going to update the global variable, that will be seen by the
+ # main function
+ global MAPS # pylint: disable=W0603
+
+ # This will be passed to bpftrace as in bpftrace -e "expr"
+ expr = "kprobe:netpoll_poll_dev { @hits = count(); }"
+
+ MAPS = bpftrace(expr, timeout=BPFTRACE_TIMEOUT, json=True)
+ logging.debug("BPFtrace output: %s", MAPS)
+
+
+def bpftrace_start():
+ """Start a thread to call `call_bpf` in a parallel thread"""
+ global BPF_THREAD # pylint: disable=W0603
+
+ BPF_THREAD = threading.Thread(target=bpftrace_call)
+ BPF_THREAD.start()
+ if not BPF_THREAD.is_alive():
+ raise KsftSkipEx("BPFtrace thread is not alive. Skipping test")
+
+
+def bpftrace_stop() -> None:
+ """Stop the bpftrace thread"""
+ if BPF_THREAD:
+ BPF_THREAD.join()
+
+
+def bpftrace_any_hit(join: bool) -> bool:
+ """Check if netpoll_poll_dev() was called by checking the global variable `maps`"""
+ if not BPF_THREAD:
+ raise KsftFailEx("BPFtrace didn't start")
+
+ if BPF_THREAD.is_alive():
+ if join:
+ # Wait for bpftrace to finish
+ BPF_THREAD.join()
+ else:
+ # bpftrace is still running, so, we will not check the result yet
+ return False
+
+ logging.debug("MAPS coming from bpftrace = %s", MAPS)
+ if "hits" not in MAPS.keys():
+ raise KsftFailEx(f"bpftrace failed to run!?: {MAPS}")
+
+ logging.debug("Got a total of %d hits", MAPS["hits"])
+ return MAPS["hits"] > 0
+
+
+def do_netpoll_flush_monitored(cfg: NetDrvEpEnv, ifname: str, target_name: str) -> None:
+ """Print messages to the console, trying to trigger a netpoll poll"""
+ # Start bpftrace in parallel, so, it is watching
+ # netpoll_poll_dev() while we are sending netconsole messages
+ bpftrace_start()
+ defer(bpftrace_stop)
+
+ do_netpoll_flush(cfg, ifname, target_name)
+
+ if bpftrace_any_hit(join=True):
+ ksft_pr("netpoll_poll_dev() was called. Success")
+ return
+
+ raise KsftXfailEx("netpoll_poll_dev() was not called during the test...")
+
+
+def do_netpoll_flush(cfg: NetDrvEpEnv, ifname: str, target_name: str) -> None:
+ """Print messages to the console, trying to trigger a netpoll poll"""
+ netcons_configure_target(cfg, ifname, target_name)
+ retry = 0
+
+ for i in range(int(ITERATIONS)):
+ if not BPF_THREAD.is_alive() or bpftrace_any_hit(join=False):
+ # bpftrace is done, stop sending messages
+ break
+
+ msg = f"netcons test #{i}"
+ with open("/dev/kmsg", "w", encoding="utf-8") as kmsg:
+ for j in range(MAX_WRITES):
+ try:
+ kmsg.write(f"{msg}-{j}\n")
+ except OSError as exception:
+ # in some cases, kmsg can be busy, so, we will retry
+ time.sleep(1)
+ retry += 1
+ if retry < 5:
+ logging.info("Failed to write to kmsg. Retrying")
+ # Just retry a few times
+ continue
+ raise KsftFailEx(
+ f"Failed to write to kmsg: {exception}"
+ ) from exception
+
+ netcons_delete_target(target_name)
+ netcons_configure_target(cfg, ifname, target_name)
+ # If we sleep here, we will have a better chance of triggering
+ # This number is based on a few tests I ran while developing this test
+ time.sleep(0.4)
+
+
+def configure_network(ifname: str) -> None:
+ """Configure ring size and queue numbers"""
+
+ # Set defined queues to 1 to force congestion
+ prev_queues = ethtool_get_queues_cnt(ifname)
+ logging.debug("RX/TX/combined queues: %s", prev_queues)
+ # Only set the queues to 1 if they exists in the device. I.e, they are > 0
+ ethtool_set_queues_cnt(ifname, tuple(1 if x > 0 else x for x in prev_queues))
+ defer(ethtool_set_queues_cnt, ifname, prev_queues)
+
+ # Try to set the ring size to some low value.
+ # Do not fail if the hardware do not accepted desired values
+ prev_ring_size = ethtool_get_ringsize(ifname)
+ for size in [(1, 1), (128, 128), (256, 256)]:
+ if ethtool_set_ringsize(ifname, size):
+ # hardware accepted the desired ringsize
+ logging.debug("Set RX/TX ringsize to: %s from %s", size, prev_ring_size)
+ break
+ defer(ethtool_set_ringsize, ifname, prev_ring_size)
+
+
+def test_netpoll(cfg: NetDrvEpEnv) -> None:
+ """
+ Test netpoll by sending traffic to the interface and then sending
+ netconsole messages to trigger a poll
+ """
+
+ ifname = cfg.ifname
+ configure_network(ifname)
+ target_name = netcons_generate_random_target_name()
+ traffic = None
+
+ try:
+ traffic = GenerateTraffic(cfg)
+ do_netpoll_flush_monitored(cfg, ifname, target_name)
+ finally:
+ if traffic:
+ traffic.stop()
+
+ # Revert RX/TX queues
+ netcons_delete_target(target_name)
+
+
+def test_check_dependencies() -> None:
+ """Check if the dependencies are met"""
+ if not os.path.exists(NETCONSOLE_CONFIGFS_PATH):
+ raise KsftSkipEx(
+ f"Directory {NETCONSOLE_CONFIGFS_PATH} does not exist. CONFIG_NETCONSOLE_DYNAMIC might not be set." # pylint: disable=C0301
+ )
+
+
+def main() -> None:
+ """Main function to run the test"""
+ netcons_load_module()
+ test_check_dependencies()
+ with NetDrvEpEnv(__file__) as cfg:
+ ksft_run(
+ [test_netpoll],
+ args=(cfg,),
+ )
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/ping.py b/tools/testing/selftests/drivers/net/ping.py
index e0f114612c1a..da3623c5e8a9 100755
--- a/tools/testing/selftests/drivers/net/ping.py
+++ b/tools/testing/selftests/drivers/net/ping.py
@@ -30,7 +30,7 @@ def _test_v6(cfg) -> None:
cmd("ping -s 65000 -c 1 -W0.5 " + cfg.addr_v["6"], host=cfg.remote)
def _test_tcp(cfg) -> None:
- cfg.require_cmd("socat", remote=True)
+ cfg.require_cmd("socat", local=False, remote=True)
port = rand_port()
listen_cmd = f"socat -{cfg.addr_ipver} -t 2 -u TCP-LISTEN:{port},reuseport STDOUT"
diff --git a/tools/testing/selftests/drivers/net/psp.py b/tools/testing/selftests/drivers/net/psp.py
new file mode 100755
index 000000000000..06559ef49b9a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/psp.py
@@ -0,0 +1,640 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""Test suite for PSP capable drivers."""
+
+import errno
+import fcntl
+import socket
+import struct
+import termios
+import time
+
+from lib.py import defer
+from lib.py import ksft_run, ksft_exit, ksft_pr
+from lib.py import ksft_true, ksft_eq, ksft_ne, ksft_gt, ksft_raises
+from lib.py import ksft_not_none
+from lib.py import KsftSkipEx
+from lib.py import NetDrvEpEnv, PSPFamily, NlError
+from lib.py import bkg, rand_port, wait_port_listen
+
+
+def _get_outq(s):
+ one = b'\0' * 4
+ outq = fcntl.ioctl(s.fileno(), termios.TIOCOUTQ, one)
+ return struct.unpack("I", outq)[0]
+
+
+def _send_with_ack(cfg, msg):
+ cfg.comm_sock.send(msg)
+ response = cfg.comm_sock.recv(4)
+ if response != b'ack\0':
+ raise RuntimeError("Unexpected server response", response)
+
+
+def _remote_read_len(cfg):
+ cfg.comm_sock.send(b'read len\0')
+ return int(cfg.comm_sock.recv(1024)[:-1].decode('utf-8'))
+
+
+def _make_clr_conn(cfg, ipver=None):
+ _send_with_ack(cfg, b'conn clr\0')
+ remote_addr = cfg.remote_addr_v[ipver] if ipver else cfg.remote_addr
+ s = socket.create_connection((remote_addr, cfg.comm_port), )
+ return s
+
+
+def _make_psp_conn(cfg, version=0, ipver=None):
+ _send_with_ack(cfg, b'conn psp\0' + struct.pack('BB', version, version))
+ remote_addr = cfg.remote_addr_v[ipver] if ipver else cfg.remote_addr
+ s = socket.create_connection((remote_addr, cfg.comm_port), )
+ return s
+
+
+def _close_conn(cfg, s):
+ _send_with_ack(cfg, b'data close\0')
+ s.close()
+
+
+def _close_psp_conn(cfg, s):
+ _close_conn(cfg, s)
+
+
+def _spi_xchg(s, rx):
+ s.send(struct.pack('I', rx['spi']) + rx['key'])
+ tx = s.recv(4 + len(rx['key']))
+ return {
+ 'spi': struct.unpack('I', tx[:4])[0],
+ 'key': tx[4:]
+ }
+
+
+def _send_careful(cfg, s, rounds):
+ data = b'0123456789' * 200
+ for i in range(rounds):
+ n = 0
+ for _ in range(10): # allow 10 retries
+ try:
+ n += s.send(data[n:], socket.MSG_DONTWAIT)
+ if n == len(data):
+ break
+ except BlockingIOError:
+ time.sleep(0.05)
+ else:
+ rlen = _remote_read_len(cfg)
+ outq = _get_outq(s)
+ report = f'sent: {i * len(data) + n} remote len: {rlen} outq: {outq}'
+ raise RuntimeError(report)
+
+ return len(data) * rounds
+
+
+def _check_data_rx(cfg, exp_len):
+ read_len = -1
+ for _ in range(30):
+ cfg.comm_sock.send(b'read len\0')
+ read_len = int(cfg.comm_sock.recv(1024)[:-1].decode('utf-8'))
+ if read_len == exp_len:
+ break
+ time.sleep(0.01)
+ ksft_eq(read_len, exp_len)
+
+
+def _check_data_outq(s, exp_len, force_wait=False):
+ outq = 0
+ for _ in range(10):
+ outq = _get_outq(s)
+ if not force_wait and outq == exp_len:
+ break
+ time.sleep(0.01)
+ ksft_eq(outq, exp_len)
+
+
+def _get_stat(cfg, key):
+ return cfg.pspnl.get_stats({'dev-id': cfg.psp_dev_id})[key]
+
+#
+# Test case boiler plate
+#
+
+def _init_psp_dev(cfg):
+ if not hasattr(cfg, 'psp_dev_id'):
+ # Figure out which local device we are testing against
+ for dev in cfg.pspnl.dev_get({}, dump=True):
+ if dev['ifindex'] == cfg.ifindex:
+ cfg.psp_info = dev
+ cfg.psp_dev_id = cfg.psp_info['id']
+ break
+ else:
+ raise KsftSkipEx("No PSP devices found")
+
+ # Enable PSP if necessary
+ cap = cfg.psp_info['psp-versions-cap']
+ ena = cfg.psp_info['psp-versions-ena']
+ if cap != ena:
+ cfg.pspnl.dev_set({'id': cfg.psp_dev_id, 'psp-versions-ena': cap})
+ defer(cfg.pspnl.dev_set, {'id': cfg.psp_dev_id,
+ 'psp-versions-ena': ena })
+
+#
+# Test cases
+#
+
+def dev_list_devices(cfg):
+ """ Dump all devices """
+ _init_psp_dev(cfg)
+
+ devices = cfg.pspnl.dev_get({}, dump=True)
+
+ found = False
+ for dev in devices:
+ found |= dev['id'] == cfg.psp_dev_id
+ ksft_true(found)
+
+
+def dev_get_device(cfg):
+ """ Get the device we intend to use """
+ _init_psp_dev(cfg)
+
+ dev = cfg.pspnl.dev_get({'id': cfg.psp_dev_id})
+ ksft_eq(dev['id'], cfg.psp_dev_id)
+
+
+def dev_get_device_bad(cfg):
+ """ Test getting device which doesn't exist """
+ raised = False
+ try:
+ cfg.pspnl.dev_get({'id': 1234567})
+ except NlError as e:
+ ksft_eq(e.nl_msg.error, -errno.ENODEV)
+ raised = True
+ ksft_true(raised)
+
+
+def dev_rotate(cfg):
+ """ Test key rotation """
+ _init_psp_dev(cfg)
+
+ prev_rotations = _get_stat(cfg, 'key-rotations')
+
+ rot = cfg.pspnl.key_rotate({"id": cfg.psp_dev_id})
+ ksft_eq(rot['id'], cfg.psp_dev_id)
+ rot = cfg.pspnl.key_rotate({"id": cfg.psp_dev_id})
+ ksft_eq(rot['id'], cfg.psp_dev_id)
+
+ cur_rotations = _get_stat(cfg, 'key-rotations')
+ ksft_eq(cur_rotations, prev_rotations + 2)
+
+
+def dev_rotate_spi(cfg):
+ """ Test key rotation and SPI check """
+ _init_psp_dev(cfg)
+
+ top_a = top_b = 0
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ assoc_a = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ top_a = assoc_a['rx-key']['spi'] >> 31
+ s.close()
+ rot = cfg.pspnl.key_rotate({"id": cfg.psp_dev_id})
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ ksft_eq(rot['id'], cfg.psp_dev_id)
+ assoc_b = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ top_b = assoc_b['rx-key']['spi'] >> 31
+ s.close()
+ ksft_ne(top_a, top_b)
+
+
+def assoc_basic(cfg):
+ """ Test creating associations """
+ _init_psp_dev(cfg)
+
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ ksft_eq(assoc['dev-id'], cfg.psp_dev_id)
+ ksft_gt(assoc['rx-key']['spi'], 0)
+ ksft_eq(len(assoc['rx-key']['key']), 16)
+
+ assoc = cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": 0,
+ "tx-key": assoc['rx-key'],
+ "sock-fd": s.fileno()})
+ ksft_eq(len(assoc), 0)
+ s.close()
+
+
+def assoc_bad_dev(cfg):
+ """ Test creating associations with bad device ID """
+ _init_psp_dev(cfg)
+
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ with ksft_raises(NlError) as cm:
+ cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id + 1234567,
+ "sock-fd": s.fileno()})
+ ksft_eq(cm.exception.nl_msg.error, -errno.ENODEV)
+
+
+def assoc_sk_only_conn(cfg):
+ """ Test creating associations based on socket """
+ _init_psp_dev(cfg)
+
+ with _make_clr_conn(cfg) as s:
+ assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "sock-fd": s.fileno()})
+ ksft_eq(assoc['dev-id'], cfg.psp_dev_id)
+ cfg.pspnl.tx_assoc({"version": 0,
+ "tx-key": assoc['rx-key'],
+ "sock-fd": s.fileno()})
+ _close_conn(cfg, s)
+
+
+def assoc_sk_only_mismatch(cfg):
+ """ Test creating associations based on socket (dev mismatch) """
+ _init_psp_dev(cfg)
+
+ with _make_clr_conn(cfg) as s:
+ with ksft_raises(NlError) as cm:
+ cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id + 1234567,
+ "sock-fd": s.fileno()})
+ the_exception = cm.exception
+ ksft_eq(the_exception.nl_msg.extack['bad-attr'], ".dev-id")
+ ksft_eq(the_exception.nl_msg.error, -errno.EINVAL)
+
+
+def assoc_sk_only_mismatch_tx(cfg):
+ """ Test creating associations based on socket (dev mismatch) """
+ _init_psp_dev(cfg)
+
+ with _make_clr_conn(cfg) as s:
+ with ksft_raises(NlError) as cm:
+ assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "sock-fd": s.fileno()})
+ cfg.pspnl.tx_assoc({"version": 0,
+ "tx-key": assoc['rx-key'],
+ "dev-id": cfg.psp_dev_id + 1234567,
+ "sock-fd": s.fileno()})
+ the_exception = cm.exception
+ ksft_eq(the_exception.nl_msg.extack['bad-attr'], ".dev-id")
+ ksft_eq(the_exception.nl_msg.error, -errno.EINVAL)
+
+
+def assoc_sk_only_unconn(cfg):
+ """ Test creating associations based on socket (unconnected, should fail) """
+ _init_psp_dev(cfg)
+
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ with ksft_raises(NlError) as cm:
+ cfg.pspnl.rx_assoc({"version": 0,
+ "sock-fd": s.fileno()})
+ the_exception = cm.exception
+ ksft_eq(the_exception.nl_msg.extack['miss-type'], "dev-id")
+ ksft_eq(the_exception.nl_msg.error, -errno.EINVAL)
+
+
+def assoc_version_mismatch(cfg):
+ """ Test creating associations where Rx and Tx PSP versions do not match """
+ _init_psp_dev(cfg)
+
+ versions = list(cfg.psp_info['psp-versions-cap'])
+ if len(versions) < 2:
+ raise KsftSkipEx("Not enough PSP versions supported by the device for the test")
+
+ # Translate versions to integers
+ versions = [cfg.pspnl.consts["version"].entries[v].value for v in versions]
+
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ rx = cfg.pspnl.rx_assoc({"version": versions[0],
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+
+ for version in versions[1:]:
+ with ksft_raises(NlError) as cm:
+ cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": version,
+ "tx-key": rx['rx-key'],
+ "sock-fd": s.fileno()})
+ the_exception = cm.exception
+ ksft_eq(the_exception.nl_msg.error, -errno.EINVAL)
+
+
+def assoc_twice(cfg):
+ """ Test reusing Tx assoc for two sockets """
+ _init_psp_dev(cfg)
+
+ def rx_assoc_check(s):
+ assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ ksft_eq(assoc['dev-id'], cfg.psp_dev_id)
+ ksft_gt(assoc['rx-key']['spi'], 0)
+ ksft_eq(len(assoc['rx-key']['key']), 16)
+
+ return assoc
+
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ assoc = rx_assoc_check(s)
+ tx = cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": 0,
+ "tx-key": assoc['rx-key'],
+ "sock-fd": s.fileno()})
+ ksft_eq(len(tx), 0)
+
+ # Use the same Tx assoc second time
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s2:
+ rx_assoc_check(s2)
+ tx = cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": 0,
+ "tx-key": assoc['rx-key'],
+ "sock-fd": s2.fileno()})
+ ksft_eq(len(tx), 0)
+
+ s.close()
+
+
+def _data_basic_send(cfg, version, ipver):
+ """ Test basic data send """
+ _init_psp_dev(cfg)
+
+ # Version 0 is required by spec, don't let it skip
+ if version:
+ name = cfg.pspnl.consts["version"].entries_by_val[version].name
+ if name not in cfg.psp_info['psp-versions-cap']:
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
+ with ksft_raises(NlError) as cm:
+ cfg.pspnl.rx_assoc({"version": version,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ ksft_eq(cm.exception.nl_msg.error, -errno.EOPNOTSUPP)
+ raise KsftSkipEx("PSP version not supported", name)
+
+ s = _make_psp_conn(cfg, version, ipver)
+
+ rx_assoc = cfg.pspnl.rx_assoc({"version": version,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ rx = rx_assoc['rx-key']
+ tx = _spi_xchg(s, rx)
+
+ cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": version,
+ "tx-key": tx,
+ "sock-fd": s.fileno()})
+
+ data_len = _send_careful(cfg, s, 100)
+ _check_data_rx(cfg, data_len)
+ _close_psp_conn(cfg, s)
+
+
+def __bad_xfer_do(cfg, s, tx, version='hdr0-aes-gcm-128'):
+ # Make sure we accept the ACK for the SPI before we seal with the bad assoc
+ _check_data_outq(s, 0)
+
+ cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": version,
+ "tx-key": tx,
+ "sock-fd": s.fileno()})
+
+ data_len = _send_careful(cfg, s, 20)
+ _check_data_outq(s, data_len, force_wait=True)
+ _check_data_rx(cfg, 0)
+ _close_psp_conn(cfg, s)
+
+
+def data_send_bad_key(cfg):
+ """ Test send data with bad key """
+ _init_psp_dev(cfg)
+
+ s = _make_psp_conn(cfg)
+
+ rx_assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ rx = rx_assoc['rx-key']
+ tx = _spi_xchg(s, rx)
+ tx['key'] = (tx['key'][0] ^ 0xff).to_bytes(1, 'little') + tx['key'][1:]
+ __bad_xfer_do(cfg, s, tx)
+
+
+def data_send_disconnect(cfg):
+ """ Test socket close after sending data """
+ _init_psp_dev(cfg)
+
+ with _make_psp_conn(cfg) as s:
+ assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "sock-fd": s.fileno()})
+ tx = _spi_xchg(s, assoc['rx-key'])
+ cfg.pspnl.tx_assoc({"version": 0,
+ "tx-key": tx,
+ "sock-fd": s.fileno()})
+
+ data_len = _send_careful(cfg, s, 100)
+ _check_data_rx(cfg, data_len)
+
+ s.shutdown(socket.SHUT_RDWR)
+ s.close()
+
+
+def _data_mss_adjust(cfg, ipver):
+ _init_psp_dev(cfg)
+
+ # First figure out what the MSS would be without any adjustments
+ s = _make_clr_conn(cfg, ipver)
+ s.send(b"0123456789abcdef" * 1024)
+ _check_data_rx(cfg, 16 * 1024)
+ mss = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_MAXSEG)
+ _close_conn(cfg, s)
+
+ s = _make_psp_conn(cfg, 0, ipver)
+ try:
+ rx_assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ rx = rx_assoc['rx-key']
+ tx = _spi_xchg(s, rx)
+
+ rxmss = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_MAXSEG)
+ ksft_eq(mss, rxmss)
+
+ cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": 0,
+ "tx-key": tx,
+ "sock-fd": s.fileno()})
+
+ txmss = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_MAXSEG)
+ ksft_eq(mss, txmss + 40)
+
+ data_len = _send_careful(cfg, s, 100)
+ _check_data_rx(cfg, data_len)
+ _check_data_outq(s, 0)
+
+ txmss = s.getsockopt(socket.IPPROTO_TCP, socket.TCP_MAXSEG)
+ ksft_eq(mss, txmss + 40)
+ finally:
+ _close_psp_conn(cfg, s)
+
+
+def data_stale_key(cfg):
+ """ Test send on a double-rotated key """
+ _init_psp_dev(cfg)
+
+ prev_stale = _get_stat(cfg, 'stale-events')
+ s = _make_psp_conn(cfg)
+ try:
+ rx_assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ rx = rx_assoc['rx-key']
+ tx = _spi_xchg(s, rx)
+
+ cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": 0,
+ "tx-key": tx,
+ "sock-fd": s.fileno()})
+
+ data_len = _send_careful(cfg, s, 100)
+ _check_data_rx(cfg, data_len)
+ _check_data_outq(s, 0)
+
+ cfg.pspnl.key_rotate({"id": cfg.psp_dev_id})
+ cfg.pspnl.key_rotate({"id": cfg.psp_dev_id})
+
+ cur_stale = _get_stat(cfg, 'stale-events')
+ ksft_gt(cur_stale, prev_stale)
+
+ s.send(b'0123456789' * 200)
+ _check_data_outq(s, 2000, force_wait=True)
+ finally:
+ _close_psp_conn(cfg, s)
+
+
+def __nsim_psp_rereg(cfg):
+ # The PSP dev ID will change, remember what was there before
+ before = set([x['id'] for x in cfg.pspnl.dev_get({}, dump=True)])
+
+ cfg._ns.nsims[0].dfs_write('psp_rereg', '1')
+
+ after = set([x['id'] for x in cfg.pspnl.dev_get({}, dump=True)])
+
+ new_devs = list(after - before)
+ ksft_eq(len(new_devs), 1)
+ cfg.psp_dev_id = list(after - before)[0]
+
+
+def removal_device_rx(cfg):
+ """ Test removing a netdev / PSD with active Rx assoc """
+
+ # We could technically devlink reload real devices, too
+ # but that kills the control socket. So test this on
+ # netdevsim only for now
+ cfg.require_nsim()
+
+ s = _make_clr_conn(cfg)
+ try:
+ rx_assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ ksft_not_none(rx_assoc)
+
+ __nsim_psp_rereg(cfg)
+ finally:
+ _close_conn(cfg, s)
+
+
+def removal_device_bi(cfg):
+ """ Test removing a netdev / PSD with active Rx/Tx assoc """
+
+ # We could technically devlink reload real devices, too
+ # but that kills the control socket. So test this on
+ # netdevsim only for now
+ cfg.require_nsim()
+
+ s = _make_clr_conn(cfg)
+ try:
+ rx_assoc = cfg.pspnl.rx_assoc({"version": 0,
+ "dev-id": cfg.psp_dev_id,
+ "sock-fd": s.fileno()})
+ cfg.pspnl.tx_assoc({"dev-id": cfg.psp_dev_id,
+ "version": 0,
+ "tx-key": rx_assoc['rx-key'],
+ "sock-fd": s.fileno()})
+ __nsim_psp_rereg(cfg)
+ finally:
+ _close_conn(cfg, s)
+
+
+def psp_ip_ver_test_builder(name, test_func, psp_ver, ipver):
+ """Build test cases for each combo of PSP version and IP version"""
+ def test_case(cfg):
+ cfg.require_ipver(ipver)
+ test_case.__name__ = f"{name}_v{psp_ver}_ip{ipver}"
+ test_func(cfg, psp_ver, ipver)
+ return test_case
+
+
+def ipver_test_builder(name, test_func, ipver):
+ """Build test cases for each IP version"""
+ def test_case(cfg):
+ cfg.require_ipver(ipver)
+ test_case.__name__ = f"{name}_ip{ipver}"
+ test_func(cfg, ipver)
+ return test_case
+
+
+def main() -> None:
+ """ Ksft boiler plate main """
+
+ with NetDrvEpEnv(__file__) as cfg:
+ cfg.pspnl = PSPFamily()
+
+ # Set up responder and communication sock
+ responder = cfg.remote.deploy("psp_responder")
+
+ cfg.comm_port = rand_port()
+ srv = None
+ try:
+ with bkg(responder + f" -p {cfg.comm_port}", host=cfg.remote,
+ exit_wait=True) as srv:
+ wait_port_listen(cfg.comm_port, host=cfg.remote)
+
+ cfg.comm_sock = socket.create_connection((cfg.remote_addr,
+ cfg.comm_port),
+ timeout=1)
+
+ cases = [
+ psp_ip_ver_test_builder(
+ "data_basic_send", _data_basic_send, version, ipver
+ )
+ for version in range(0, 4)
+ for ipver in ("4", "6")
+ ]
+ cases += [
+ ipver_test_builder("data_mss_adjust", _data_mss_adjust, ipver)
+ for ipver in ("4", "6")
+ ]
+
+ ksft_run(cases=cases, globs=globals(),
+ case_pfx={"dev_", "data_", "assoc_", "removal_"},
+ args=(cfg, ))
+
+ cfg.comm_sock.send(b"exit\0")
+ cfg.comm_sock.close()
+ finally:
+ if srv and (srv.stdout or srv.stderr):
+ ksft_pr("")
+ ksft_pr(f"Responder logs ({srv.ret}):")
+ if srv and srv.stdout:
+ ksft_pr("STDOUT:\n# " + srv.stdout.strip().replace("\n", "\n# "))
+ if srv and srv.stderr:
+ ksft_pr("STDERR:\n# " + srv.stderr.strip().replace("\n", "\n# "))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/psp_responder.c b/tools/testing/selftests/drivers/net/psp_responder.c
new file mode 100644
index 000000000000..f309e0d73cbf
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/psp_responder.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/poll.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <netinet/in.h>
+#include <unistd.h>
+
+#include <ynl.h>
+
+#include "psp-user.h"
+
+#define dbg(msg...) \
+do { \
+ if (opts->verbose) \
+ fprintf(stderr, "DEBUG: " msg); \
+} while (0)
+
+static bool should_quit;
+
+struct opts {
+ int port;
+ int devid;
+ bool verbose;
+};
+
+enum accept_cfg {
+ ACCEPT_CFG_NONE = 0,
+ ACCEPT_CFG_CLEAR,
+ ACCEPT_CFG_PSP,
+};
+
+static struct {
+ unsigned char tx;
+ unsigned char rx;
+} psp_vers;
+
+static int conn_setup_psp(struct ynl_sock *ys, struct opts *opts, int data_sock)
+{
+ struct psp_rx_assoc_rsp *rsp;
+ struct psp_rx_assoc_req *req;
+ struct psp_tx_assoc_rsp *tsp;
+ struct psp_tx_assoc_req *teq;
+ char info[300];
+ int key_len;
+ ssize_t sz;
+ __u32 spi;
+
+ dbg("create PSP connection\n");
+
+ // Rx assoc alloc
+ req = psp_rx_assoc_req_alloc();
+
+ psp_rx_assoc_req_set_sock_fd(req, data_sock);
+ psp_rx_assoc_req_set_version(req, psp_vers.rx);
+
+ rsp = psp_rx_assoc(ys, req);
+ psp_rx_assoc_req_free(req);
+
+ if (!rsp) {
+ perror("ERROR: failed to Rx assoc");
+ return -1;
+ }
+
+ // SPI exchange
+ key_len = rsp->rx_key._len.key;
+ memcpy(info, &rsp->rx_key.spi, sizeof(spi));
+ memcpy(&info[sizeof(spi)], rsp->rx_key.key, key_len);
+ sz = sizeof(spi) + key_len;
+
+ send(data_sock, info, sz, MSG_WAITALL);
+ psp_rx_assoc_rsp_free(rsp);
+
+ sz = recv(data_sock, info, sz, MSG_WAITALL);
+ if (sz < 0) {
+ perror("ERROR: failed to read PSP key from sock");
+ return -1;
+ }
+ memcpy(&spi, info, sizeof(spi));
+
+ // Setup Tx assoc
+ teq = psp_tx_assoc_req_alloc();
+
+ psp_tx_assoc_req_set_sock_fd(teq, data_sock);
+ psp_tx_assoc_req_set_version(teq, psp_vers.tx);
+ psp_tx_assoc_req_set_tx_key_spi(teq, spi);
+ psp_tx_assoc_req_set_tx_key_key(teq, &info[sizeof(spi)], key_len);
+
+ tsp = psp_tx_assoc(ys, teq);
+ psp_tx_assoc_req_free(teq);
+ if (!tsp) {
+ perror("ERROR: failed to Tx assoc");
+ return -1;
+ }
+ psp_tx_assoc_rsp_free(tsp);
+
+ return 0;
+}
+
+static void send_ack(int sock)
+{
+ send(sock, "ack", 4, MSG_WAITALL);
+}
+
+static void send_err(int sock)
+{
+ send(sock, "err", 4, MSG_WAITALL);
+}
+
+static void send_str(int sock, int value)
+{
+ char buf[128];
+ int ret;
+
+ ret = snprintf(buf, sizeof(buf), "%d", value);
+ send(sock, buf, ret + 1, MSG_WAITALL);
+}
+
+static void
+run_session(struct ynl_sock *ys, struct opts *opts,
+ int server_sock, int comm_sock)
+{
+ enum accept_cfg accept_cfg = ACCEPT_CFG_NONE;
+ struct pollfd pfds[3];
+ size_t data_read = 0;
+ int data_sock = -1;
+
+ while (true) {
+ bool race_close = false;
+ int nfds;
+
+ memset(pfds, 0, sizeof(pfds));
+
+ pfds[0].fd = server_sock;
+ pfds[0].events = POLLIN;
+
+ pfds[1].fd = comm_sock;
+ pfds[1].events = POLLIN;
+
+ nfds = 2;
+ if (data_sock >= 0) {
+ pfds[2].fd = data_sock;
+ pfds[2].events = POLLIN;
+ nfds++;
+ }
+
+ dbg(" ...\n");
+ if (poll(pfds, nfds, -1) < 0) {
+ perror("poll");
+ break;
+ }
+
+ /* data sock */
+ if (pfds[2].revents & POLLIN) {
+ char buf[8192];
+ ssize_t n;
+
+ n = recv(data_sock, buf, sizeof(buf), 0);
+ if (n <= 0) {
+ if (n < 0)
+ perror("data read");
+ close(data_sock);
+ data_sock = -1;
+ dbg("data sock closed\n");
+ } else {
+ data_read += n;
+ dbg("data read %zd\n", data_read);
+ }
+ }
+
+ /* comm sock */
+ if (pfds[1].revents & POLLIN) {
+ static char buf[4096];
+ static ssize_t off;
+ bool consumed;
+ ssize_t n;
+
+ n = recv(comm_sock, &buf[off], sizeof(buf) - off, 0);
+ if (n <= 0) {
+ if (n < 0)
+ perror("comm read");
+ return;
+ }
+
+ off += n;
+ n = off;
+
+#define __consume(sz) \
+ ({ \
+ if (n == (sz)) { \
+ off = 0; \
+ } else { \
+ off -= (sz); \
+ memmove(buf, &buf[(sz)], off); \
+ } \
+ })
+
+#define cmd(_name) \
+ ({ \
+ ssize_t sz = sizeof(_name); \
+ bool match = n >= sz && !memcmp(buf, _name, sz); \
+ \
+ if (match) { \
+ dbg("command: " _name "\n"); \
+ __consume(sz); \
+ } \
+ consumed |= match; \
+ match; \
+ })
+
+ do {
+ consumed = false;
+
+ if (cmd("read len"))
+ send_str(comm_sock, data_read);
+
+ if (cmd("data echo")) {
+ if (data_sock >= 0)
+ send(data_sock, "echo", 5,
+ MSG_WAITALL);
+ else
+ fprintf(stderr, "WARN: echo but no data sock\n");
+ send_ack(comm_sock);
+ }
+ if (cmd("data close")) {
+ if (data_sock >= 0) {
+ close(data_sock);
+ data_sock = -1;
+ send_ack(comm_sock);
+ } else {
+ race_close = true;
+ }
+ }
+ if (cmd("conn psp")) {
+ if (accept_cfg != ACCEPT_CFG_NONE)
+ fprintf(stderr, "WARN: old conn config still set!\n");
+ accept_cfg = ACCEPT_CFG_PSP;
+ send_ack(comm_sock);
+ /* next two bytes are versions */
+ if (off >= 2) {
+ memcpy(&psp_vers, buf, 2);
+ __consume(2);
+ } else {
+ fprintf(stderr, "WARN: short conn psp command!\n");
+ }
+ }
+ if (cmd("conn clr")) {
+ if (accept_cfg != ACCEPT_CFG_NONE)
+ fprintf(stderr, "WARN: old conn config still set!\n");
+ accept_cfg = ACCEPT_CFG_CLEAR;
+ send_ack(comm_sock);
+ }
+ if (cmd("exit"))
+ should_quit = true;
+#undef cmd
+
+ if (!consumed) {
+ fprintf(stderr, "WARN: unknown cmd: [%zd] %s\n",
+ off, buf);
+ }
+ } while (consumed && off);
+ }
+
+ /* server sock */
+ if (pfds[0].revents & POLLIN) {
+ if (data_sock >= 0) {
+ fprintf(stderr, "WARN: new data sock but old one still here\n");
+ close(data_sock);
+ data_sock = -1;
+ }
+ data_sock = accept(server_sock, NULL, NULL);
+ if (data_sock < 0) {
+ perror("accept");
+ continue;
+ }
+ data_read = 0;
+
+ if (accept_cfg == ACCEPT_CFG_CLEAR) {
+ dbg("new data sock: clear\n");
+ /* nothing to do */
+ } else if (accept_cfg == ACCEPT_CFG_PSP) {
+ dbg("new data sock: psp\n");
+ conn_setup_psp(ys, opts, data_sock);
+ } else {
+ fprintf(stderr, "WARN: new data sock but no config\n");
+ }
+ accept_cfg = ACCEPT_CFG_NONE;
+ }
+
+ if (race_close) {
+ if (data_sock >= 0) {
+ /* indeed, ordering problem, handle the close */
+ close(data_sock);
+ data_sock = -1;
+ send_ack(comm_sock);
+ } else {
+ fprintf(stderr, "WARN: close but no data sock\n");
+ send_err(comm_sock);
+ }
+ }
+ }
+ dbg("session ending\n");
+}
+
+static int spawn_server(struct opts *opts)
+{
+ struct sockaddr_in6 addr;
+ int fd;
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (fd < 0) {
+ perror("can't open socket");
+ return -1;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+
+ addr.sin6_family = AF_INET6;
+ addr.sin6_addr = in6addr_any;
+ addr.sin6_port = htons(opts->port);
+
+ if (bind(fd, (struct sockaddr *)&addr, sizeof(addr))) {
+ perror("can't bind socket");
+ return -1;
+ }
+
+ if (listen(fd, 5)) {
+ perror("can't listen");
+ return -1;
+ }
+
+ return fd;
+}
+
+static int run_responder(struct ynl_sock *ys, struct opts *opts)
+{
+ int server_sock, comm;
+
+ server_sock = spawn_server(opts);
+ if (server_sock < 0)
+ return 4;
+
+ while (!should_quit) {
+ comm = accept(server_sock, NULL, NULL);
+ if (comm < 0) {
+ perror("accept failed");
+ } else {
+ run_session(ys, opts, server_sock, comm);
+ close(comm);
+ }
+ }
+
+ return 0;
+}
+
+static void usage(const char *name, const char *miss)
+{
+ if (miss)
+ fprintf(stderr, "Missing argument: %s\n", miss);
+
+ fprintf(stderr, "Usage: %s -p port [-v] [-d psp-dev-id]\n", name);
+ exit(EXIT_FAILURE);
+}
+
+static void parse_cmd_opts(int argc, char **argv, struct opts *opts)
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "vp:d:")) != -1) {
+ switch (opt) {
+ case 'v':
+ opts->verbose = 1;
+ break;
+ case 'p':
+ opts->port = atoi(optarg);
+ break;
+ case 'd':
+ opts->devid = atoi(optarg);
+ break;
+ default:
+ usage(argv[0], NULL);
+ }
+ }
+}
+
+static int psp_dev_set_ena(struct ynl_sock *ys, __u32 dev_id, __u32 versions)
+{
+ struct psp_dev_set_req *sreq;
+ struct psp_dev_set_rsp *srsp;
+
+ fprintf(stderr, "Set PSP enable on device %d to 0x%x\n",
+ dev_id, versions);
+
+ sreq = psp_dev_set_req_alloc();
+
+ psp_dev_set_req_set_id(sreq, dev_id);
+ psp_dev_set_req_set_psp_versions_ena(sreq, versions);
+
+ srsp = psp_dev_set(ys, sreq);
+ psp_dev_set_req_free(sreq);
+ if (!srsp)
+ return 10;
+
+ psp_dev_set_rsp_free(srsp);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ struct psp_dev_get_list *dev_list;
+ bool devid_found = false;
+ __u32 ver_ena, ver_cap;
+ struct opts opts = {};
+ struct ynl_error yerr;
+ struct ynl_sock *ys;
+ int first_id = 0;
+ int ret;
+
+ parse_cmd_opts(argc, argv, &opts);
+ if (!opts.port)
+ usage(argv[0], "port"); // exits
+
+ ys = ynl_sock_create(&ynl_psp_family, &yerr);
+ if (!ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return 1;
+ }
+
+ dev_list = psp_dev_get_dump(ys);
+ if (ynl_dump_empty(dev_list)) {
+ if (ys->err.code)
+ goto err_close;
+ fprintf(stderr, "No PSP devices\n");
+ goto err_close_silent;
+ }
+
+ ynl_dump_foreach(dev_list, d) {
+ if (opts.devid) {
+ devid_found = true;
+ ver_ena = d->psp_versions_ena;
+ ver_cap = d->psp_versions_cap;
+ } else if (!first_id) {
+ first_id = d->id;
+ ver_ena = d->psp_versions_ena;
+ ver_cap = d->psp_versions_cap;
+ } else {
+ fprintf(stderr, "Multiple PSP devices found\n");
+ goto err_close_silent;
+ }
+ }
+ psp_dev_get_list_free(dev_list);
+
+ if (opts.devid && !devid_found) {
+ fprintf(stderr, "PSP device %d requested on cmdline, not found\n",
+ opts.devid);
+ goto err_close_silent;
+ } else if (!opts.devid) {
+ opts.devid = first_id;
+ }
+
+ if (ver_ena != ver_cap) {
+ ret = psp_dev_set_ena(ys, opts.devid, ver_cap);
+ if (ret)
+ goto err_close;
+ }
+
+ ret = run_responder(ys, &opts);
+
+ if (ver_ena != ver_cap && psp_dev_set_ena(ys, opts.devid, ver_ena))
+ fprintf(stderr, "WARN: failed to set the PSP versions back\n");
+
+ ynl_sock_destroy(ys);
+
+ return ret;
+
+err_close:
+ fprintf(stderr, "YNL: %s\n", ys->err.msg);
+err_close_silent:
+ ynl_sock_destroy(ys);
+ return 2;
+}
diff --git a/tools/testing/selftests/drivers/net/ring_reconfig.py b/tools/testing/selftests/drivers/net/ring_reconfig.py
new file mode 100755
index 000000000000..f9530a8b0856
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/ring_reconfig.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Test channel and ring size configuration via ethtool (-L / -G).
+"""
+
+from lib.py import ksft_run, ksft_exit, ksft_pr
+from lib.py import ksft_eq
+from lib.py import NetDrvEpEnv, EthtoolFamily, GenerateTraffic
+from lib.py import defer, NlError
+
+
+def channels(cfg) -> None:
+ """
+ Twiddle channel counts in various combinations of parameters.
+ We're only looking for driver adhering to the requested config
+ if the config is accepted and crashes.
+ """
+ ehdr = {'header':{'dev-index': cfg.ifindex}}
+ chans = cfg.eth.channels_get(ehdr)
+
+ all_keys = ["rx", "tx", "combined"]
+ mixes = [{"combined"}, {"rx", "tx"}, {"rx", "combined"}, {"tx", "combined"},
+ {"rx", "tx", "combined"},]
+
+ # Get the set of keys that device actually supports
+ restore = {}
+ supported = set()
+ for key in all_keys:
+ if key + "-max" in chans:
+ supported.add(key)
+ restore |= {key + "-count": chans[key + "-count"]}
+
+ defer(cfg.eth.channels_set, ehdr | restore)
+
+ def test_config(config):
+ try:
+ cfg.eth.channels_set(ehdr | config)
+ get = cfg.eth.channels_get(ehdr)
+ for k, v in config.items():
+ ksft_eq(get.get(k, 0), v)
+ except NlError as e:
+ failed.append(mix)
+ ksft_pr("Can't set", config, e)
+ else:
+ ksft_pr("Okay", config)
+
+ failed = []
+ for mix in mixes:
+ if not mix.issubset(supported):
+ continue
+
+ # Set all the values in the mix to 1, other supported to 0
+ config = {}
+ for key in all_keys:
+ config[key + "-count"] = 1 if key in mix else 0
+ test_config(config)
+
+ for mix in mixes:
+ if not mix.issubset(supported):
+ continue
+ if mix in failed:
+ continue
+
+ # Set all the values in the mix to max, other supported to 0
+ config = {}
+ for key in all_keys:
+ config[key + "-count"] = chans[key + '-max'] if key in mix else 0
+ test_config(config)
+
+
+def _configure_min_ring_cnt(cfg) -> None:
+ """ Try to configure a single Rx/Tx ring. """
+ ehdr = {'header':{'dev-index': cfg.ifindex}}
+ chans = cfg.eth.channels_get(ehdr)
+
+ all_keys = ["rx-count", "tx-count", "combined-count"]
+ restore = {}
+ config = {}
+ for key in all_keys:
+ if key in chans:
+ restore[key] = chans[key]
+ config[key] = 0
+
+ if chans.get('combined-count', 0) > 1:
+ config['combined-count'] = 1
+ elif chans.get('rx-count', 0) > 1 and chans.get('tx-count', 0) > 1:
+ config['tx-count'] = 1
+ config['rx-count'] = 1
+ else:
+ # looks like we're already on 1 channel
+ return
+
+ cfg.eth.channels_set(ehdr | config)
+ defer(cfg.eth.channels_set, ehdr | restore)
+
+
+def ringparam(cfg) -> None:
+ """
+ Tweak the ringparam configuration. Try to run some traffic over min
+ ring size to make sure it actually functions.
+ """
+ ehdr = {'header':{'dev-index': cfg.ifindex}}
+ rings = cfg.eth.rings_get(ehdr)
+
+ restore = {}
+ maxes = {}
+ params = set()
+ for key in rings.keys():
+ if 'max' in key:
+ param = key[:-4]
+ maxes[param] = rings[key]
+ params.add(param)
+ restore[param] = rings[param]
+
+ defer(cfg.eth.rings_set, ehdr | restore)
+
+ # Speed up the reconfig by configuring just one ring
+ _configure_min_ring_cnt(cfg)
+
+ # Try to reach min on all settings
+ for param in params:
+ val = rings[param]
+ while True:
+ try:
+ cfg.eth.rings_set({'header':{'dev-index': cfg.ifindex},
+ param: val // 2})
+ if val == 0:
+ break
+ val //= 2
+ except NlError:
+ break
+
+ get = cfg.eth.rings_get(ehdr)
+ ksft_eq(get[param], val)
+
+ ksft_pr(f"Reached min for '{param}' at {val} (max {rings[param]})")
+
+ GenerateTraffic(cfg).wait_pkts_and_stop(10000)
+
+ # Try max across all params, if the driver supports large rings
+ # this may OOM so we ignore errors
+ try:
+ ksft_pr("Applying max settings")
+ config = {p: maxes[p] for p in params}
+ cfg.eth.rings_set(ehdr | config)
+ except NlError as e:
+ ksft_pr("Can't set max params", config, e)
+ else:
+ GenerateTraffic(cfg).wait_pkts_and_stop(10000)
+
+
+def main() -> None:
+ """ Ksft boiler plate main """
+
+ with NetDrvEpEnv(__file__) as cfg:
+ cfg.eth = EthtoolFamily()
+
+ ksft_run([channels,
+ ringparam],
+ args=(cfg, ))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/stats.py b/tools/testing/selftests/drivers/net/stats.py
index efcc1e10575b..b08e4d48b15c 100755
--- a/tools/testing/selftests/drivers/net/stats.py
+++ b/tools/testing/selftests/drivers/net/stats.py
@@ -1,12 +1,16 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
+"""
+Tests related to standard netdevice statistics.
+"""
+
import errno
import subprocess
import time
from lib.py import ksft_run, ksft_exit, ksft_pr
from lib.py import ksft_ge, ksft_eq, ksft_is, ksft_in, ksft_lt, ksft_true, ksft_raises
-from lib.py import KsftSkipEx, KsftXfailEx
+from lib.py import KsftSkipEx, KsftFailEx
from lib.py import ksft_disruptive
from lib.py import EthtoolFamily, NetdevFamily, RtnlFamily, NlError
from lib.py import NetDrvEnv
@@ -18,13 +22,16 @@ rtnl = RtnlFamily()
def check_pause(cfg) -> None:
- global ethnl
+ """
+ Check that drivers which support Pause config also report standard
+ pause stats.
+ """
try:
ethnl.pause_get({"header": {"dev-index": cfg.ifindex}})
except NlError as e:
if e.error == errno.EOPNOTSUPP:
- raise KsftXfailEx("pause not supported by the device")
+ raise KsftSkipEx("pause not supported by the device") from e
raise
data = ethnl.pause_get({"header": {"dev-index": cfg.ifindex,
@@ -33,13 +40,16 @@ def check_pause(cfg) -> None:
def check_fec(cfg) -> None:
- global ethnl
+ """
+ Check that drivers which support FEC config also report standard
+ FEC stats.
+ """
try:
ethnl.fec_get({"header": {"dev-index": cfg.ifindex}})
except NlError as e:
if e.error == errno.EOPNOTSUPP:
- raise KsftXfailEx("FEC not supported by the device")
+ raise KsftSkipEx("FEC not supported by the device") from e
raise
data = ethnl.fec_get({"header": {"dev-index": cfg.ifindex,
@@ -47,16 +57,48 @@ def check_fec(cfg) -> None:
ksft_true(data['stats'], "driver does not report stats")
+def check_fec_hist(cfg) -> None:
+ """
+ Check that drivers which support FEC histogram statistics report
+ reasonable values.
+ """
+
+ try:
+ data = ethnl.fec_get({"header": {"dev-index": cfg.ifindex,
+ "flags": {'stats'}}})
+ except NlError as e:
+ if e.error == errno.EOPNOTSUPP:
+ raise KsftSkipEx("FEC not supported by the device") from e
+ raise
+ if 'stats' not in data:
+ raise KsftSkipEx("FEC stats not supported by the device")
+ if 'hist' not in data['stats']:
+ raise KsftSkipEx("FEC histogram not supported by the device")
+
+ hist = data['stats']['hist']
+ for fec_bin in hist:
+ for key in ['bin-low', 'bin-high', 'bin-val']:
+ ksft_in(key, fec_bin,
+ "Drivers should always report FEC bin range and value")
+ ksft_ge(fec_bin['bin-high'], fec_bin['bin-low'],
+ "FEC bin range should be valid")
+ if 'bin-val-per-lane' in fec_bin:
+ ksft_eq(sum(fec_bin['bin-val-per-lane']), fec_bin['bin-val'],
+ "FEC bin value should be equal to sum of per-plane values")
+
+
def pkt_byte_sum(cfg) -> None:
- global netfam, rtnl
+ """
+ Check that qstat and interface stats match in value.
+ """
def get_qstat(test):
- global netfam
stats = netfam.qstats_get({}, dump=True)
if stats:
for qs in stats:
if qs["ifindex"]== test.ifindex:
return qs
+ return None
qstat = get_qstat(cfg)
if qstat is None:
@@ -77,15 +119,14 @@ def pkt_byte_sum(cfg) -> None:
for _ in range(10):
rtstat = rtnl.getlink({"ifi-index": cfg.ifindex})['stats64']
if stat_cmp(rtstat, qstat) < 0:
- raise Exception("RTNL stats are lower, fetched later")
+ raise KsftFailEx("RTNL stats are lower, fetched later")
qstat = get_qstat(cfg)
if stat_cmp(rtstat, qstat) > 0:
- raise Exception("Qstats are lower, fetched later")
+ raise KsftFailEx("Qstats are lower, fetched later")
def qstat_by_ifindex(cfg) -> None:
- global netfam
- global rtnl
+ """ Qstats Netlink API tests - querying by ifindex. """
# Construct a map ifindex -> [dump, by-index, dump]
ifindexes = {}
@@ -93,7 +134,7 @@ def qstat_by_ifindex(cfg) -> None:
for entry in stats:
ifindexes[entry['ifindex']] = [entry, None, None]
- for ifindex in ifindexes.keys():
+ for ifindex in ifindexes:
entry = netfam.qstats_get({"ifindex": ifindex}, dump=True)
ksft_eq(len(entry), 1)
ifindexes[entry[0]['ifindex']][1] = entry[0]
@@ -145,7 +186,7 @@ def qstat_by_ifindex(cfg) -> None:
# Try to get stats for lowest unused ifindex but not 0
devs = rtnl.getlink({}, dump=True)
- all_ifindexes = set([dev["ifi-index"] for dev in devs])
+ all_ifindexes = set(dev["ifi-index"] for dev in devs)
lowest = 2
while lowest in all_ifindexes:
lowest += 1
@@ -158,18 +199,20 @@ def qstat_by_ifindex(cfg) -> None:
@ksft_disruptive
def check_down(cfg) -> None:
+ """ Test statistics (interface and qstat) are not impacted by ifdown """
+
try:
qstat = netfam.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
except NlError as e:
if e.error == errno.EOPNOTSUPP:
- raise KsftSkipEx("qstats not supported by the device")
+ raise KsftSkipEx("qstats not supported by the device") from e
raise
ip(f"link set dev {cfg.dev['ifname']} down")
defer(ip, f"link set dev {cfg.dev['ifname']} up")
qstat2 = netfam.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
- for k, v in qstat.items():
+ for k in qstat:
ksft_ge(qstat2[k], qstat[k], comment=f"{k} went backwards on device down")
# exercise per-queue API to make sure that "device down" state
@@ -220,14 +263,15 @@ def procfs_downup_hammer(cfg) -> None:
Reading stats via procfs only holds the RCU lock, drivers often try
to sleep when reading the stats, or don't protect against races.
"""
- # Max out the queues, we'll flip between max and 1
+ # Set a large number of queues,
+ # we'll flip between min(max_queues, 64) and 1
channels = ethnl.channels_get({'header': {'dev-index': cfg.ifindex}})
if channels['combined-count'] == 0:
rx_type = 'rx'
else:
rx_type = 'combined'
cur_queue_cnt = channels[f'{rx_type}-count']
- max_queue_cnt = channels[f'{rx_type}-max']
+ max_queue_cnt = min(channels[f'{rx_type}-max'], 64)
cmd(f"ethtool -L {cfg.ifname} {rx_type} {max_queue_cnt}")
defer(cmd, f"ethtool -L {cfg.ifname} {rx_type} {cur_queue_cnt}")
@@ -263,9 +307,12 @@ def procfs_downup_hammer(cfg) -> None:
def main() -> None:
+ """ Ksft boiler plate main """
+
with NetDrvEnv(__file__, queue_count=100) as cfg:
- ksft_run([check_pause, check_fec, pkt_byte_sum, qstat_by_ifindex,
- check_down, procfs_hammer, procfs_downup_hammer],
+ ksft_run([check_pause, check_fec, check_fec_hist, pkt_byte_sum,
+ qstat_by_ifindex, check_down, procfs_hammer,
+ procfs_downup_hammer],
args=(cfg, ))
ksft_exit()
diff --git a/tools/testing/selftests/drivers/net/team/Makefile b/tools/testing/selftests/drivers/net/team/Makefile
index eaf6938f100e..1340b3df9c31 100644
--- a/tools/testing/selftests/drivers/net/team/Makefile
+++ b/tools/testing/selftests/drivers/net/team/Makefile
@@ -1,11 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for net selftests
-TEST_PROGS := dev_addr_lists.sh propagation.sh
+TEST_PROGS := \
+ dev_addr_lists.sh \
+ options.sh \
+ propagation.sh \
+# end of TEST_PROGS
TEST_INCLUDES := \
../bonding/lag_lib.sh \
../../../net/forwarding/lib.sh \
- ../../../net/lib.sh
+ ../../../net/in_netns.sh \
+ ../../../net/lib.sh \
+ ../../../net/lib/sh/defer.sh \
+# end of TEST_INCLUDES
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/team/config b/tools/testing/selftests/drivers/net/team/config
index 636b3525b679..558e1d0cf565 100644
--- a/tools/testing/selftests/drivers/net/team/config
+++ b/tools/testing/selftests/drivers/net/team/config
@@ -3,4 +3,5 @@ CONFIG_IPV6=y
CONFIG_MACVLAN=y
CONFIG_NETDEVSIM=m
CONFIG_NET_TEAM=y
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=y
CONFIG_NET_TEAM_MODE_LOADBALANCE=y
diff --git a/tools/testing/selftests/drivers/net/team/options.sh b/tools/testing/selftests/drivers/net/team/options.sh
new file mode 100755
index 000000000000..44888f32b513
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/team/options.sh
@@ -0,0 +1,188 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# These tests verify basic set and get functionality of the team
+# driver options over netlink.
+
+# Run in private netns.
+test_dir="$(dirname "$0")"
+if [[ $# -eq 0 ]]; then
+ "${test_dir}"/../../../net/in_netns.sh "$0" __subprocess
+ exit $?
+fi
+
+ALL_TESTS="
+ team_test_options
+"
+
+source "${test_dir}/../../../net/lib.sh"
+
+TEAM_PORT="team0"
+MEMBER_PORT="dummy0"
+
+setup()
+{
+ ip link add name "${MEMBER_PORT}" type dummy
+ ip link add name "${TEAM_PORT}" type team
+}
+
+get_and_check_value()
+{
+ local option_name="$1"
+ local expected_value="$2"
+ local port_flag="$3"
+
+ local value_from_get
+
+ if ! value_from_get=$(teamnl "${TEAM_PORT}" getoption "${option_name}" \
+ "${port_flag}"); then
+ echo "Could not get option '${option_name}'" >&2
+ return 1
+ fi
+
+ if [[ "${value_from_get}" != "${expected_value}" ]]; then
+ echo "Incorrect value for option '${option_name}'" >&2
+ echo "get (${value_from_get}) != set (${expected_value})" >&2
+ return 1
+ fi
+}
+
+set_and_check_get()
+{
+ local option_name="$1"
+ local option_value="$2"
+ local port_flag="$3"
+
+ local value_from_get
+
+ if ! teamnl "${TEAM_PORT}" setoption "${option_name}" \
+ "${option_value}" "${port_flag}"; then
+ echo "'setoption ${option_name} ${option_value}' failed" >&2
+ return 1
+ fi
+
+ get_and_check_value "${option_name}" "${option_value}" "${port_flag}"
+ return $?
+}
+
+# Get a "port flag" to pass to the `teamnl` command.
+# E.g. $1="dummy0" -> "port=dummy0",
+# $1="" -> ""
+get_port_flag()
+{
+ local port_name="$1"
+
+ if [[ -n "${port_name}" ]]; then
+ echo "--port=${port_name}"
+ fi
+}
+
+attach_port_if_specified()
+{
+ local port_name="$1"
+
+ if [[ -n "${port_name}" ]]; then
+ ip link set dev "${port_name}" master "${TEAM_PORT}"
+ return $?
+ fi
+}
+
+detach_port_if_specified()
+{
+ local port_name="$1"
+
+ if [[ -n "${port_name}" ]]; then
+ ip link set dev "${port_name}" nomaster
+ return $?
+ fi
+}
+
+# Test that an option's get value matches its set value.
+# Globals:
+# RET - Used by testing infra like `check_err`.
+# EXIT_STATUS - Used by `log_test` for whole script exit value.
+# Arguments:
+# option_name - The name of the option.
+# value_1 - The first value to try setting.
+# value_2 - The second value to try setting.
+# port_name - The (optional) name of the attached port.
+team_test_option()
+{
+ local option_name="$1"
+ local value_1="$2"
+ local value_2="$3"
+ local possible_values="$2 $3 $2"
+ local port_name="$4"
+ local port_flag
+
+ RET=0
+
+ echo "Setting '${option_name}' to '${value_1}' and '${value_2}'"
+
+ attach_port_if_specified "${port_name}"
+ check_err $? "Couldn't attach ${port_name} to master"
+ port_flag=$(get_port_flag "${port_name}")
+
+ # Set and get both possible values.
+ for value in ${possible_values}; do
+ set_and_check_get "${option_name}" "${value}" "${port_flag}"
+ check_err $? "Failed to set '${option_name}' to '${value}'"
+ done
+
+ detach_port_if_specified "${port_name}"
+ check_err $? "Couldn't detach ${port_name} from its master"
+
+ log_test "Set + Get '${option_name}' test"
+}
+
+# Test that getting a non-existant option fails.
+# Globals:
+# RET - Used by testing infra like `check_err`.
+# EXIT_STATUS - Used by `log_test` for whole script exit value.
+# Arguments:
+# option_name - The name of the option.
+# port_name - The (optional) name of the attached port.
+team_test_get_option_fails()
+{
+ local option_name="$1"
+ local port_name="$2"
+ local port_flag
+
+ RET=0
+
+ attach_port_if_specified "${port_name}"
+ check_err $? "Couldn't attach ${port_name} to master"
+ port_flag=$(get_port_flag "${port_name}")
+
+ # Just confirm that getting the value fails.
+ teamnl "${TEAM_PORT}" getoption "${option_name}" "${port_flag}"
+ check_fail $? "Shouldn't be able to get option '${option_name}'"
+
+ detach_port_if_specified "${port_name}"
+
+ log_test "Get '${option_name}' fails"
+}
+
+team_test_options()
+{
+ # Wrong option name behavior.
+ team_test_get_option_fails fake_option1
+ team_test_get_option_fails fake_option2 "${MEMBER_PORT}"
+
+ # Correct set and get behavior.
+ team_test_option mode activebackup loadbalance
+ team_test_option notify_peers_count 0 5
+ team_test_option notify_peers_interval 0 5
+ team_test_option mcast_rejoin_count 0 5
+ team_test_option mcast_rejoin_interval 0 5
+ team_test_option enabled true false "${MEMBER_PORT}"
+ team_test_option user_linkup true false "${MEMBER_PORT}"
+ team_test_option user_linkup_enabled true false "${MEMBER_PORT}"
+ team_test_option priority 10 20 "${MEMBER_PORT}"
+ team_test_option queue_id 0 1 "${MEMBER_PORT}"
+}
+
+require_command teamnl
+setup
+tests_run
+exit "${EXIT_STATUS}"
diff --git a/tools/testing/selftests/drivers/net/virtio_net/Makefile b/tools/testing/selftests/drivers/net/virtio_net/Makefile
index 7ec7cd3ab2cc..868ece3fea1f 100644
--- a/tools/testing/selftests/drivers/net/virtio_net/Makefile
+++ b/tools/testing/selftests/drivers/net/virtio_net/Makefile
@@ -1,15 +1,12 @@
# SPDX-License-Identifier: GPL-2.0+ OR MIT
-TEST_PROGS = basic_features.sh \
- #
+TEST_PROGS = basic_features.sh
-TEST_FILES = \
- virtio_net_common.sh \
- #
+TEST_FILES = virtio_net_common.sh
TEST_INCLUDES = \
- ../../../net/forwarding/lib.sh \
- ../../../net/lib.sh \
- #
+ ../../../net/forwarding/lib.sh \
+ ../../../net/lib.sh \
+# end of TEST_INCLUDES
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/xdp.py b/tools/testing/selftests/drivers/net/xdp.py
new file mode 100755
index 000000000000..e54df158dfe9
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/xdp.py
@@ -0,0 +1,779 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+This file contains tests to verify native XDP support in network drivers.
+The tests utilize the BPF program `xdp_native.bpf.o` from the `selftests.net.lib`
+directory, with each test focusing on a specific aspect of XDP functionality.
+"""
+import random
+import string
+from dataclasses import dataclass
+from enum import Enum
+
+from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_ge, ksft_ne, ksft_pr
+from lib.py import KsftNamedVariant, ksft_variants
+from lib.py import KsftFailEx, NetDrvEpEnv
+from lib.py import EthtoolFamily, NetdevFamily, NlError
+from lib.py import bkg, cmd, rand_port, wait_port_listen
+from lib.py import ip, bpftool, defer
+
+
+class TestConfig(Enum):
+ """Enum for XDP configuration options."""
+ MODE = 0 # Configures the BPF program for a specific test
+ PORT = 1 # Port configuration to communicate with the remote host
+ ADJST_OFFSET = 2 # Tail/Head adjustment offset for extension/shrinking
+ ADJST_TAG = 3 # Adjustment tag to annotate the start and end of extension
+
+
+class XDPAction(Enum):
+ """Enum for XDP actions."""
+ PASS = 0 # Pass the packet up to the stack
+ DROP = 1 # Drop the packet
+ TX = 2 # Route the packet to the remote host
+ TAIL_ADJST = 3 # Adjust the tail of the packet
+ HEAD_ADJST = 4 # Adjust the head of the packet
+
+
+class XDPStats(Enum):
+ """Enum for XDP statistics."""
+ RX = 0 # Count of valid packets received for testing
+ PASS = 1 # Count of packets passed up to the stack
+ DROP = 2 # Count of packets dropped
+ TX = 3 # Count of incoming packets routed to the remote host
+ ABORT = 4 # Count of packets that were aborted
+
+
+@dataclass
+class BPFProgInfo:
+ """Data class to store information about a BPF program."""
+ name: str # Name of the BPF program
+ file: str # BPF program object file
+ xdp_sec: str = "xdp" # XDP section name (e.g., "xdp" or "xdp.frags")
+ mtu: int = 1500 # Maximum Transmission Unit, default is 1500
+
+
+def _exchg_udp(cfg, port, test_string):
+ """
+ Exchanges UDP packets between a local and remote host using the socat tool.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ port: Port number to use for the UDP communication.
+ test_string: String that the remote host will send.
+
+ Returns:
+ The string received by the test host.
+ """
+ cfg.require_cmd("socat", remote=True)
+
+ rx_udp_cmd = f"socat -{cfg.addr_ipver} -T 2 -u UDP-RECV:{port},reuseport STDOUT"
+ tx_udp_cmd = f"echo -n {test_string} | socat -t 2 -u STDIN UDP:{cfg.baddr}:{port}"
+
+ with bkg(rx_udp_cmd, exit_wait=True) as nc:
+ wait_port_listen(port, proto="udp")
+ cmd(tx_udp_cmd, host=cfg.remote, shell=True)
+
+ return nc.stdout.strip()
+
+
+def _test_udp(cfg, port, size=256):
+ """
+ Tests UDP packet exchange between a local and remote host.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ port: Port number to use for the UDP communication.
+ size: The length of the test string to be exchanged, default is 256 characters.
+
+ Returns:
+ bool: True if the received string matches the sent string, False otherwise.
+ """
+ test_str = "".join(random.choice(string.ascii_lowercase) for _ in range(size))
+ recvd_str = _exchg_udp(cfg, port, test_str)
+
+ return recvd_str == test_str
+
+
+def _load_xdp_prog(cfg, bpf_info):
+ """
+ Loads an XDP program onto a network interface.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ bpf_info: BPFProgInfo object containing information about the BPF program.
+
+ Returns:
+ dict: A dictionary containing the XDP program ID, name, and associated map IDs.
+ """
+ abs_path = cfg.net_lib_dir / bpf_info.file
+ prog_info = {}
+
+ cmd(f"ip link set dev {cfg.remote_ifname} mtu {bpf_info.mtu}", shell=True, host=cfg.remote)
+ defer(ip, f"link set dev {cfg.remote_ifname} mtu 1500", host=cfg.remote)
+
+ cmd(
+ f"ip link set dev {cfg.ifname} mtu {bpf_info.mtu} xdpdrv obj {abs_path} sec {bpf_info.xdp_sec}",
+ shell=True
+ )
+ defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdpdrv off")
+
+ xdp_info = ip(f"-d link show dev {cfg.ifname}", json=True)[0]
+ prog_info["id"] = xdp_info["xdp"]["prog"]["id"]
+ prog_info["name"] = xdp_info["xdp"]["prog"]["name"]
+ prog_id = prog_info["id"]
+
+ map_ids = bpftool(f"prog show id {prog_id}", json=True)["map_ids"]
+ prog_info["maps"] = {}
+ for map_id in map_ids:
+ name = bpftool(f"map show id {map_id}", json=True)["name"]
+ prog_info["maps"][name] = map_id
+
+ return prog_info
+
+
+def format_hex_bytes(value):
+ """
+ Helper function that converts an integer into a formatted hexadecimal byte string.
+
+ Args:
+ value: An integer representing the number to be converted.
+
+ Returns:
+ A string representing hexadecimal equivalent of value, with bytes separated by spaces.
+ """
+ hex_str = value.to_bytes(4, byteorder='little', signed=True)
+ return ' '.join(f'{byte:02x}' for byte in hex_str)
+
+
+def _set_xdp_map(map_name, key, value):
+ """
+ Updates an XDP map with a given key-value pair using bpftool.
+
+ Args:
+ map_name: The name of the XDP map to update.
+ key: The key to update in the map, formatted as a hexadecimal string.
+ value: The value to associate with the key, formatted as a hexadecimal string.
+ """
+ key_formatted = format_hex_bytes(key)
+ value_formatted = format_hex_bytes(value)
+ bpftool(
+ f"map update name {map_name} key hex {key_formatted} value hex {value_formatted}"
+ )
+
+
+def _get_stats(xdp_map_id):
+ """
+ Retrieves and formats statistics from an XDP map.
+
+ Args:
+ xdp_map_id: The ID of the XDP map from which to retrieve statistics.
+
+ Returns:
+ A dictionary containing formatted packet statistics for various XDP actions.
+ The keys are based on the XDPStats Enum values.
+
+ Raises:
+ KsftFailEx: If the stats retrieval fails.
+ """
+ stats_dump = bpftool(f"map dump id {xdp_map_id}", json=True)
+ if not stats_dump:
+ raise KsftFailEx(f"Failed to get stats for map {xdp_map_id}")
+
+ stats_formatted = {}
+ for key in range(0, 5):
+ val = stats_dump[key]["formatted"]["value"]
+ if stats_dump[key]["formatted"]["key"] == XDPStats.RX.value:
+ stats_formatted[XDPStats.RX.value] = val
+ elif stats_dump[key]["formatted"]["key"] == XDPStats.PASS.value:
+ stats_formatted[XDPStats.PASS.value] = val
+ elif stats_dump[key]["formatted"]["key"] == XDPStats.DROP.value:
+ stats_formatted[XDPStats.DROP.value] = val
+ elif stats_dump[key]["formatted"]["key"] == XDPStats.TX.value:
+ stats_formatted[XDPStats.TX.value] = val
+ elif stats_dump[key]["formatted"]["key"] == XDPStats.ABORT.value:
+ stats_formatted[XDPStats.ABORT.value] = val
+
+ return stats_formatted
+
+
+def _test_pass(cfg, bpf_info, msg_sz):
+ """
+ Tests the XDP_PASS action by exchanging UDP packets.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ bpf_info: BPFProgInfo object containing information about the BPF program.
+ msg_sz: Size of the test message to send.
+ """
+
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.PASS.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ ksft_eq(_test_udp(cfg, port, msg_sz), True, "UDP packet exchange failed")
+ stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
+
+ ksft_ne(stats[XDPStats.RX.value], 0, "RX stats should not be zero")
+ ksft_eq(stats[XDPStats.RX.value], stats[XDPStats.PASS.value], "RX and PASS stats mismatch")
+
+
+def test_xdp_native_pass_sb(cfg):
+ """
+ Tests the XDP_PASS action for single buffer case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog", "xdp_native.bpf.o", "xdp", 1500)
+
+ _test_pass(cfg, bpf_info, 256)
+
+
+def test_xdp_native_pass_mb(cfg):
+ """
+ Tests the XDP_PASS action for a multi-buff size.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog_frags", "xdp_native.bpf.o", "xdp.frags", 9000)
+
+ _test_pass(cfg, bpf_info, 8000)
+
+
+def _test_drop(cfg, bpf_info, msg_sz):
+ """
+ Tests the XDP_DROP action by exchanging UDP packets.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ bpf_info: BPFProgInfo object containing information about the BPF program.
+ msg_sz: Size of the test message to send.
+ """
+
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.DROP.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ ksft_eq(_test_udp(cfg, port, msg_sz), False, "UDP packet exchange should fail")
+ stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
+
+ ksft_ne(stats[XDPStats.RX.value], 0, "RX stats should be zero")
+ ksft_eq(stats[XDPStats.RX.value], stats[XDPStats.DROP.value], "RX and DROP stats mismatch")
+
+
+def test_xdp_native_drop_sb(cfg):
+ """
+ Tests the XDP_DROP action for a signle-buff case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog", "xdp_native.bpf.o", "xdp", 1500)
+
+ _test_drop(cfg, bpf_info, 256)
+
+
+def test_xdp_native_drop_mb(cfg):
+ """
+ Tests the XDP_DROP action for a multi-buff case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog_frags", "xdp_native.bpf.o", "xdp.frags", 9000)
+
+ _test_drop(cfg, bpf_info, 8000)
+
+
+def _test_xdp_native_tx(cfg, bpf_info, payload_lens):
+ """
+ Tests the XDP_TX action.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ bpf_info: BPFProgInfo object containing the BPF program metadata.
+ payload_lens: Array of packet lengths to send.
+ """
+ cfg.require_cmd("socat", remote=True)
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.TX.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ expected_pkts = 0
+ for payload_len in payload_lens:
+ test_string = "".join(
+ random.choice(string.ascii_lowercase) for _ in range(payload_len)
+ )
+
+ rx_udp = f"socat -{cfg.addr_ipver} -T 2 " + \
+ f"-u UDP-RECV:{port},reuseport STDOUT"
+
+ # Writing zero bytes to stdin gets ignored by socat,
+ # but with the shut-null flag socat generates a zero sized packet
+ # when the socket is closed.
+ tx_cmd_suffix = ",shut-null" if payload_len == 0 else ""
+ tx_udp = f"echo -n {test_string} | socat -t 2 " + \
+ f"-u STDIN UDP:{cfg.baddr}:{port}{tx_cmd_suffix}"
+
+ with bkg(rx_udp, host=cfg.remote, exit_wait=True) as rnc:
+ wait_port_listen(port, proto="udp", host=cfg.remote)
+ cmd(tx_udp, host=cfg.remote, shell=True)
+
+ ksft_eq(rnc.stdout.strip(), test_string, "UDP packet exchange failed")
+
+ expected_pkts += 1
+ stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
+ ksft_eq(stats[XDPStats.RX.value], expected_pkts, "RX stats mismatch")
+ ksft_eq(stats[XDPStats.TX.value], expected_pkts, "TX stats mismatch")
+
+
+def test_xdp_native_tx_sb(cfg):
+ """
+ Tests the XDP_TX action for a single-buff case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog", "xdp_native.bpf.o", "xdp", 1500)
+
+ # Ensure there's enough room for an ETH / IP / UDP header
+ pkt_hdr_len = 42 if cfg.addr_ipver == "4" else 62
+
+ _test_xdp_native_tx(cfg, bpf_info, [0, 1500 // 2, 1500 - pkt_hdr_len])
+
+
+def test_xdp_native_tx_mb(cfg):
+ """
+ Tests the XDP_TX action for a multi-buff case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog_frags", "xdp_native.bpf.o",
+ "xdp.frags", 9000)
+ # The first packet ensures we exercise the fragmented code path.
+ # And the subsequent 0-sized packet ensures the driver
+ # reinitializes xdp_buff correctly.
+ _test_xdp_native_tx(cfg, bpf_info, [8000, 0])
+
+
+def _validate_res(res, offset_lst, pkt_sz_lst):
+ """
+ Validates the result of a test.
+
+ Args:
+ res: The result of the test, which should be a dictionary with a "status" key.
+
+ Raises:
+ KsftFailEx: If the test fails to pass any combination of offset and packet size.
+ """
+ if "status" not in res:
+ raise KsftFailEx("Missing 'status' key in result dictionary")
+
+ # Validate that not a single case was successful
+ if res["status"] == "fail":
+ if res["offset"] == offset_lst[0] and res["pkt_sz"] == pkt_sz_lst[0]:
+ raise KsftFailEx(f"{res['reason']}")
+
+ # Get the previous offset and packet size to report the successful run
+ tmp_idx = offset_lst.index(res["offset"])
+ prev_offset = offset_lst[tmp_idx - 1]
+ if tmp_idx == 0:
+ tmp_idx = pkt_sz_lst.index(res["pkt_sz"])
+ prev_pkt_sz = pkt_sz_lst[tmp_idx - 1]
+ else:
+ prev_pkt_sz = res["pkt_sz"]
+
+ # Use these values for error reporting
+ ksft_pr(
+ f"Failed run: pkt_sz {res['pkt_sz']}, offset {res['offset']}. "
+ f"Last successful run: pkt_sz {prev_pkt_sz}, offset {prev_offset}. "
+ f"Reason: {res['reason']}"
+ )
+
+
+def _check_for_failures(recvd_str, stats):
+ """
+ Checks for common failures while adjusting headroom or tailroom.
+
+ Args:
+ recvd_str: The string received from the remote host after sending a test string.
+ stats: A dictionary containing formatted packet statistics for various XDP actions.
+
+ Returns:
+ str: A string describing the failure reason if a failure is detected, otherwise None.
+ """
+
+ # Any adjustment failure result in an abort hence, we track this counter
+ if stats[XDPStats.ABORT.value] != 0:
+ return "Adjustment failed"
+
+ # Since we are using aggregate stats for a single test across all offsets and packet sizes
+ # we can't use RX stats only to track data exchange failure without taking a previous
+ # snapshot. An easier way is to simply check for non-zero length of received string.
+ if len(recvd_str) == 0:
+ return "Data exchange failed"
+
+ # Check for RX and PASS stats mismatch. Ideally, they should be equal for a successful run
+ if stats[XDPStats.RX.value] != stats[XDPStats.PASS.value]:
+ return "RX stats mismatch"
+
+ return None
+
+
+def _test_xdp_native_tail_adjst(cfg, pkt_sz_lst, offset_lst):
+ """
+ Tests the XDP tail adjustment functionality.
+
+ This function loads the appropriate XDP program based on the provided
+ program name and configures the XDP map for tail adjustment. It then
+ validates the tail adjustment by sending and receiving UDP packets
+ with specified packet sizes and offsets.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ prog: Name of the XDP program to load.
+ pkt_sz_lst: List of packet sizes to test.
+ offset_lst: List of offsets to validate support for tail adjustment.
+
+ Returns:
+ dict: A dictionary with test status and failure details if applicable.
+ """
+ port = rand_port()
+ bpf_info = BPFProgInfo("xdp_prog_frags", "xdp_native.bpf.o", "xdp.frags", 9000)
+
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+
+ # Configure the XDP map for tail adjustment
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.TAIL_ADJST.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ for offset in offset_lst:
+ tag = format(random.randint(65, 90), "02x")
+
+ _set_xdp_map("map_xdp_setup", TestConfig.ADJST_OFFSET.value, offset)
+ if offset > 0:
+ _set_xdp_map("map_xdp_setup", TestConfig.ADJST_TAG.value, int(tag, 16))
+
+ for pkt_sz in pkt_sz_lst:
+ test_str = "".join(random.choice(string.ascii_lowercase) for _ in range(pkt_sz))
+ recvd_str = _exchg_udp(cfg, port, test_str)
+ stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
+
+ failure = _check_for_failures(recvd_str, stats)
+ if failure is not None:
+ return {
+ "status": "fail",
+ "reason": failure,
+ "offset": offset,
+ "pkt_sz": pkt_sz,
+ }
+
+ # Validate data content based on offset direction
+ expected_data = None
+ if offset > 0:
+ expected_data = test_str + (offset * chr(int(tag, 16)))
+ else:
+ expected_data = test_str[0:pkt_sz + offset]
+
+ if recvd_str != expected_data:
+ return {
+ "status": "fail",
+ "reason": "Data mismatch",
+ "offset": offset,
+ "pkt_sz": pkt_sz,
+ }
+
+ return {"status": "pass"}
+
+
+def test_xdp_native_adjst_tail_grow_data(cfg):
+ """
+ Tests the XDP tail adjustment by growing packet data.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ pkt_sz_lst = [512, 1024, 2048]
+ offset_lst = [1, 16, 32, 64, 128, 256]
+ res = _test_xdp_native_tail_adjst(
+ cfg,
+ pkt_sz_lst,
+ offset_lst,
+ )
+
+ _validate_res(res, offset_lst, pkt_sz_lst)
+
+
+def test_xdp_native_adjst_tail_shrnk_data(cfg):
+ """
+ Tests the XDP tail adjustment by shrinking packet data.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ pkt_sz_lst = [512, 1024, 2048]
+ offset_lst = [-16, -32, -64, -128, -256]
+ res = _test_xdp_native_tail_adjst(
+ cfg,
+ pkt_sz_lst,
+ offset_lst,
+ )
+
+ _validate_res(res, offset_lst, pkt_sz_lst)
+
+
+def get_hds_thresh(cfg):
+ """
+ Retrieves the header data split (HDS) threshold for a network interface.
+
+ Args:
+ cfg: Configuration object containing network settings.
+
+ Returns:
+ The HDS threshold value. If the threshold is not supported or an error occurs,
+ a default value of 1500 is returned.
+ """
+ ethnl = cfg.ethnl
+ hds_thresh = 1500
+
+ try:
+ rings = ethnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+ if 'hds-thresh' not in rings:
+ ksft_pr(f'hds-thresh not supported. Using default: {hds_thresh}')
+ return hds_thresh
+ hds_thresh = rings['hds-thresh']
+ except NlError as e:
+ ksft_pr(f"Failed to get rings: {e}. Using default: {hds_thresh}")
+
+ return hds_thresh
+
+
+def _test_xdp_native_head_adjst(cfg, prog, pkt_sz_lst, offset_lst):
+ """
+ Tests the XDP head adjustment action for a multi-buffer case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ ethnl: Network namespace or link object (not used in this function).
+
+ This function sets up the packet size and offset lists, then performs
+ the head adjustment test by sending and receiving UDP packets.
+ """
+ cfg.require_cmd("socat", remote=True)
+
+ prog_info = _load_xdp_prog(cfg, BPFProgInfo(prog, "xdp_native.bpf.o", "xdp.frags", 9000))
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.HEAD_ADJST.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ hds_thresh = get_hds_thresh(cfg)
+ for offset in offset_lst:
+ for pkt_sz in pkt_sz_lst:
+ # The "head" buffer must contain at least the Ethernet header
+ # after we eat into it. We send large-enough packets, but if HDS
+ # is enabled head will only contain headers. Don't try to eat
+ # more than 28 bytes (UDPv4 + eth hdr left: (14 + 20 + 8) - 14)
+ l2_cut_off = 28 if cfg.addr_ipver == 4 else 48
+ if pkt_sz > hds_thresh and offset > l2_cut_off:
+ ksft_pr(
+ f"Failed run: pkt_sz ({pkt_sz}) > HDS threshold ({hds_thresh}) and "
+ f"offset {offset} > {l2_cut_off}"
+ )
+ return {"status": "pass"}
+
+ test_str = ''.join(random.choice(string.ascii_lowercase) for _ in range(pkt_sz))
+ tag = format(random.randint(65, 90), '02x')
+
+ _set_xdp_map("map_xdp_setup",
+ TestConfig.ADJST_OFFSET.value,
+ offset)
+ _set_xdp_map("map_xdp_setup", TestConfig.ADJST_TAG.value, int(tag, 16))
+ _set_xdp_map("map_xdp_setup", TestConfig.ADJST_OFFSET.value, offset)
+
+ recvd_str = _exchg_udp(cfg, port, test_str)
+
+ # Check for failures around adjustment and data exchange
+ failure = _check_for_failures(recvd_str, _get_stats(prog_info['maps']['map_xdp_stats']))
+ if failure is not None:
+ return {
+ "status": "fail",
+ "reason": failure,
+ "offset": offset,
+ "pkt_sz": pkt_sz
+ }
+
+ # Validate data content based on offset direction
+ expected_data = None
+ if offset < 0:
+ expected_data = chr(int(tag, 16)) * (0 - offset) + test_str
+ else:
+ expected_data = test_str[offset:]
+
+ if recvd_str != expected_data:
+ return {
+ "status": "fail",
+ "reason": "Data mismatch",
+ "offset": offset,
+ "pkt_sz": pkt_sz
+ }
+
+ return {"status": "pass"}
+
+
+def test_xdp_native_adjst_head_grow_data(cfg):
+ """
+ Tests the XDP headroom growth support.
+
+ Args:
+ cfg: Configuration object containing network settings.
+
+ This function sets up the packet size and offset lists, then calls the
+ _test_xdp_native_head_adjst_mb function to perform the actual test. The
+ test is passed if the headroom is successfully extended for given packet
+ sizes and offsets.
+ """
+ pkt_sz_lst = [512, 1024, 2048]
+
+ # Negative values result in headroom shrinking, resulting in growing of payload
+ offset_lst = [-16, -32, -64, -128, -256]
+ res = _test_xdp_native_head_adjst(cfg, "xdp_prog_frags", pkt_sz_lst, offset_lst)
+
+ _validate_res(res, offset_lst, pkt_sz_lst)
+
+
+def test_xdp_native_adjst_head_shrnk_data(cfg):
+ """
+ Tests the XDP headroom shrinking support.
+
+ Args:
+ cfg: Configuration object containing network settings.
+
+ This function sets up the packet size and offset lists, then calls the
+ _test_xdp_native_head_adjst_mb function to perform the actual test. The
+ test is passed if the headroom is successfully shrunk for given packet
+ sizes and offsets.
+ """
+ pkt_sz_lst = [512, 1024, 2048]
+
+ # Positive values result in headroom growing, resulting in shrinking of payload
+ offset_lst = [16, 32, 64, 128, 256]
+ res = _test_xdp_native_head_adjst(cfg, "xdp_prog_frags", pkt_sz_lst, offset_lst)
+
+ _validate_res(res, offset_lst, pkt_sz_lst)
+
+
+@ksft_variants([
+ KsftNamedVariant("pass", XDPAction.PASS),
+ KsftNamedVariant("drop", XDPAction.DROP),
+ KsftNamedVariant("tx", XDPAction.TX),
+])
+def test_xdp_native_qstats(cfg, act):
+ """
+ Send 1000 messages. Expect XDP action specified in @act.
+ Make sure the packets were counted to interface level qstats
+ (Rx, and Tx if act is TX).
+ """
+
+ cfg.require_cmd("socat")
+
+ bpf_info = BPFProgInfo("xdp_prog", "xdp_native.bpf.o", "xdp", 1500)
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, act.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ # Discard the input, but we need a listener to avoid ICMP errors
+ rx_udp = f"socat -{cfg.addr_ipver} -T 2 -u UDP-RECV:{port},reuseport " + \
+ "/dev/null"
+ # Listener runs on "remote" in case of XDP_TX
+ rx_host = cfg.remote if act == XDPAction.TX else None
+ # We want to spew 1000 packets quickly, bash seems to do a good enough job
+ # Each reopening of the socket gives us a differenot local port (for RSS)
+ tx_udp = "for _ in `seq 20`; do " \
+ f"exec 5<>/dev/udp/{cfg.addr}/{port}; " \
+ "for i in `seq 50`; do echo a >&5; done; " \
+ "exec 5>&-; done"
+
+ cfg.wait_hw_stats_settle()
+ # Qstats have more clearly defined semantics than rtnetlink.
+ # XDP is the "first layer of the stack" so XDP packets should be counted
+ # as received and sent as if the decision was made in the routing layer.
+ before = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
+
+ with bkg(rx_udp, host=rx_host, exit_wait=True):
+ wait_port_listen(port, proto="udp", host=rx_host)
+ cmd(tx_udp, host=cfg.remote, shell=True)
+
+ cfg.wait_hw_stats_settle()
+ after = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
+
+ expected_pkts = 1000
+ ksft_ge(after['rx-packets'] - before['rx-packets'], expected_pkts)
+ if act == XDPAction.TX:
+ ksft_ge(after['tx-packets'] - before['tx-packets'], expected_pkts)
+
+ stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
+ ksft_eq(stats[XDPStats.RX.value], expected_pkts, "XDP RX stats mismatch")
+ if act == XDPAction.TX:
+ ksft_eq(stats[XDPStats.TX.value], expected_pkts, "XDP TX stats mismatch")
+
+ # Flip the ring count back and forth to make sure the stats from XDP rings
+ # don't get lost.
+ chans = cfg.ethnl.channels_get({'header': {'dev-index': cfg.ifindex}})
+ if chans.get('combined-count', 0) > 1:
+ cfg.ethnl.channels_set({'header': {'dev-index': cfg.ifindex},
+ 'combined-count': 1})
+ cfg.ethnl.channels_set({'header': {'dev-index': cfg.ifindex},
+ 'combined-count': chans['combined-count']})
+ before = after
+ after = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
+
+ ksft_ge(after['rx-packets'], before['rx-packets'])
+ if act == XDPAction.TX:
+ ksft_ge(after['tx-packets'], before['tx-packets'])
+
+
+def main():
+ """
+ Main function to execute the XDP tests.
+
+ This function runs a series of tests to validate the XDP support for
+ both the single and multi-buffer. It uses the NetDrvEpEnv context
+ manager to manage the network driver environment and the ksft_run
+ function to execute the tests.
+ """
+ with NetDrvEpEnv(__file__) as cfg:
+ cfg.ethnl = EthtoolFamily()
+ cfg.netnl = NetdevFamily()
+ ksft_run(
+ [
+ test_xdp_native_pass_sb,
+ test_xdp_native_pass_mb,
+ test_xdp_native_drop_sb,
+ test_xdp_native_drop_mb,
+ test_xdp_native_tx_sb,
+ test_xdp_native_tx_mb,
+ test_xdp_native_adjst_tail_grow_data,
+ test_xdp_native_adjst_tail_shrnk_data,
+ test_xdp_native_adjst_head_grow_data,
+ test_xdp_native_adjst_head_shrnk_data,
+ test_xdp_native_qstats,
+ ],
+ args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/ntsync/ntsync.c b/tools/testing/selftests/drivers/ntsync/ntsync.c
index 3aad311574c4..e6a37214aa46 100644
--- a/tools/testing/selftests/drivers/ntsync/ntsync.c
+++ b/tools/testing/selftests/drivers/ntsync/ntsync.c
@@ -12,7 +12,7 @@
#include <time.h>
#include <pthread.h>
#include <linux/ntsync.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
static int read_sem_state(int sem, __u32 *count, __u32 *max)
{
diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c b/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c
index 7ee7492138c6..14df9aa07308 100644
--- a/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c
+++ b/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c
@@ -14,7 +14,7 @@
#include <asm/uvdevice.h>
-#include "../../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define UV_PATH "/dev/uv"
#define BUFFER_SIZE 0x200
diff --git a/tools/testing/selftests/exec/check-exec.c b/tools/testing/selftests/exec/check-exec.c
index 55bce47e56b7..f2397e75aa7c 100644
--- a/tools/testing/selftests/exec/check-exec.c
+++ b/tools/testing/selftests/exec/check-exec.c
@@ -30,7 +30,7 @@
#define _ASM_GENERIC_FCNTL_H
#include <linux/fcntl.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
static int sys_execveat(int dirfd, const char *pathname, char *const argv[],
char *const envp[], int flags)
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 8fb7395fd35b..d37c068ed5fe 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -21,7 +21,7 @@
#include <string.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define TESTS_EXPECTED 54
#define TEST_NAME_LEN (PATH_MAX * 4)
diff --git a/tools/testing/selftests/exec/load_address.c b/tools/testing/selftests/exec/load_address.c
index 8257fddba8c8..55fd3732f029 100644
--- a/tools/testing/selftests/exec/load_address.c
+++ b/tools/testing/selftests/exec/load_address.c
@@ -6,7 +6,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
-#include "../kselftest.h"
+#include "kselftest.h"
struct Statistics {
unsigned long long load_address;
diff --git a/tools/testing/selftests/exec/non-regular.c b/tools/testing/selftests/exec/non-regular.c
index cd3a34aca93e..14ac36487df5 100644
--- a/tools/testing/selftests/exec/non-regular.c
+++ b/tools/testing/selftests/exec/non-regular.c
@@ -9,7 +9,7 @@
#include <sys/sysmacros.h>
#include <sys/types.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
/* Remove a file, ignoring the result if it didn't exist. */
void rm(struct __test_metadata *_metadata, const char *pathname,
diff --git a/tools/testing/selftests/exec/null-argv.c b/tools/testing/selftests/exec/null-argv.c
index c19726e710d1..4940aee5bb38 100644
--- a/tools/testing/selftests/exec/null-argv.c
+++ b/tools/testing/selftests/exec/null-argv.c
@@ -5,7 +5,7 @@
#include <sys/types.h>
#include <sys/wait.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define FORK(exec) \
do { \
diff --git a/tools/testing/selftests/exec/recursion-depth.c b/tools/testing/selftests/exec/recursion-depth.c
index 438c8ff2fd26..7b5c4f6d1928 100644
--- a/tools/testing/selftests/exec/recursion-depth.c
+++ b/tools/testing/selftests/exec/recursion-depth.c
@@ -23,7 +23,7 @@
#include <fcntl.h>
#include <sys/mount.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
int main(void)
{
diff --git a/tools/testing/selftests/fchmodat2/fchmodat2_test.c b/tools/testing/selftests/fchmodat2/fchmodat2_test.c
index e0319417124d..e397339495f6 100644
--- a/tools/testing/selftests/fchmodat2/fchmodat2_test.c
+++ b/tools/testing/selftests/fchmodat2/fchmodat2_test.c
@@ -7,7 +7,7 @@
#include <syscall.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
int sys_fchmodat2(int dfd, const char *filename, mode_t mode, int flags)
{
diff --git a/tools/testing/selftests/filelock/ofdlocks.c b/tools/testing/selftests/filelock/ofdlocks.c
index a55b79810ab2..ff8d47fc373a 100644
--- a/tools/testing/selftests/filelock/ofdlocks.c
+++ b/tools/testing/selftests/filelock/ofdlocks.c
@@ -6,7 +6,7 @@
#include <stdio.h>
#include <unistd.h>
#include <string.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static int lock_set(int fd, struct flock *fl)
{
diff --git a/tools/testing/selftests/filesystems/.gitignore b/tools/testing/selftests/filesystems/.gitignore
index 7afa58e2bb20..64ac0dfa46b7 100644
--- a/tools/testing/selftests/filesystems/.gitignore
+++ b/tools/testing/selftests/filesystems/.gitignore
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
dnotify_test
devpts_pts
+fclog
file_stressor
anon_inode_test
+kernfs_test
diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile
index b02326193fee..85427d7f19b9 100644
--- a/tools/testing/selftests/filesystems/Makefile
+++ b/tools/testing/selftests/filesystems/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += $(KHDR_INCLUDES)
-TEST_GEN_PROGS := devpts_pts file_stressor anon_inode_test
+TEST_GEN_PROGS := devpts_pts file_stressor anon_inode_test kernfs_test fclog
TEST_GEN_PROGS_EXTENDED := dnotify_test
include ../lib.mk
diff --git a/tools/testing/selftests/filesystems/anon_inode_test.c b/tools/testing/selftests/filesystems/anon_inode_test.c
index 73e0a4d4fb2f..94c6c81c2301 100644
--- a/tools/testing/selftests/filesystems/anon_inode_test.c
+++ b/tools/testing/selftests/filesystems/anon_inode_test.c
@@ -6,7 +6,7 @@
#include <stdio.h>
#include <sys/stat.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "wrappers.h"
TEST(anon_inode_no_chown)
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
index 81db85a5cc16..a1a79a6fef17 100644
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -21,7 +21,7 @@
#include <linux/android/binder.h>
#include <linux/android/binderfs.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define DEFAULT_THREADS 4
@@ -65,6 +65,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
"oneway_spam_detection",
"extended_error",
"freeze_notification",
+ "transaction_report",
};
change_mountns(_metadata);
diff --git a/tools/testing/selftests/filesystems/devpts_pts.c b/tools/testing/selftests/filesystems/devpts_pts.c
index b1fc9b916ace..54fea349204e 100644
--- a/tools/testing/selftests/filesystems/devpts_pts.c
+++ b/tools/testing/selftests/filesystems/devpts_pts.c
@@ -11,7 +11,7 @@
#include <asm/ioctls.h>
#include <sys/mount.h>
#include <sys/wait.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static bool terminal_dup2(int duplicate, int original)
{
diff --git a/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c b/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c
index 65ede506305c..8bc57a2ef966 100644
--- a/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c
+++ b/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c
@@ -11,7 +11,7 @@
#include <sys/epoll.h>
#include <sys/socket.h>
#include <sys/eventfd.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
struct epoll_mtcontext
{
diff --git a/tools/testing/selftests/filesystems/eventfd/eventfd_test.c b/tools/testing/selftests/filesystems/eventfd/eventfd_test.c
index 72d51ad0ee0e..1b48f267157d 100644
--- a/tools/testing/selftests/filesystems/eventfd/eventfd_test.c
+++ b/tools/testing/selftests/filesystems/eventfd/eventfd_test.c
@@ -11,7 +11,7 @@
#include <pthread.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define EVENTFD_TEST_ITERATIONS 100000UL
diff --git a/tools/testing/selftests/filesystems/fclog.c b/tools/testing/selftests/filesystems/fclog.c
new file mode 100644
index 000000000000..551c4a0f395a
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fclog.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Author: Aleksa Sarai <cyphar@cyphar.com>
+ * Copyright (C) 2025 SUSE LLC.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mount.h>
+
+#include "kselftest_harness.h"
+
+#define ASSERT_ERRNO(expected, _t, seen) \
+ __EXPECT(expected, #expected, \
+ ({__typeof__(seen) _tmp_seen = (seen); \
+ _tmp_seen >= 0 ? _tmp_seen : -errno; }), #seen, _t, 1)
+
+#define ASSERT_ERRNO_EQ(expected, seen) \
+ ASSERT_ERRNO(expected, ==, seen)
+
+#define ASSERT_SUCCESS(seen) \
+ ASSERT_ERRNO(0, <=, seen)
+
+FIXTURE(ns)
+{
+ int host_mntns;
+};
+
+FIXTURE_SETUP(ns)
+{
+ /* Stash the old mntns. */
+ self->host_mntns = open("/proc/self/ns/mnt", O_RDONLY|O_CLOEXEC);
+ ASSERT_SUCCESS(self->host_mntns);
+
+ /* Create a new mount namespace and make it private. */
+ ASSERT_SUCCESS(unshare(CLONE_NEWNS));
+ ASSERT_SUCCESS(mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL));
+}
+
+FIXTURE_TEARDOWN(ns)
+{
+ ASSERT_SUCCESS(setns(self->host_mntns, CLONE_NEWNS));
+ ASSERT_SUCCESS(close(self->host_mntns));
+}
+
+TEST_F(ns, fscontext_log_enodata)
+{
+ int fsfd = fsopen("tmpfs", FSOPEN_CLOEXEC);
+ ASSERT_SUCCESS(fsfd);
+
+ /* A brand new fscontext has no log entries. */
+ char buf[128] = {};
+ for (int i = 0; i < 16; i++)
+ ASSERT_ERRNO_EQ(-ENODATA, read(fsfd, buf, sizeof(buf)));
+
+ ASSERT_SUCCESS(close(fsfd));
+}
+
+TEST_F(ns, fscontext_log_errorfc)
+{
+ int fsfd = fsopen("tmpfs", FSOPEN_CLOEXEC);
+ ASSERT_SUCCESS(fsfd);
+
+ ASSERT_ERRNO_EQ(-EINVAL, fsconfig(fsfd, FSCONFIG_SET_STRING, "invalid-arg", "123", 0));
+
+ char buf[128] = {};
+ ASSERT_SUCCESS(read(fsfd, buf, sizeof(buf)));
+ EXPECT_STREQ("e tmpfs: Unknown parameter 'invalid-arg'\n", buf);
+
+ /* The message has been consumed. */
+ ASSERT_ERRNO_EQ(-ENODATA, read(fsfd, buf, sizeof(buf)));
+ ASSERT_SUCCESS(close(fsfd));
+}
+
+TEST_F(ns, fscontext_log_errorfc_after_fsmount)
+{
+ int fsfd = fsopen("tmpfs", FSOPEN_CLOEXEC);
+ ASSERT_SUCCESS(fsfd);
+
+ ASSERT_ERRNO_EQ(-EINVAL, fsconfig(fsfd, FSCONFIG_SET_STRING, "invalid-arg", "123", 0));
+
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0));
+ int mfd = fsmount(fsfd, FSMOUNT_CLOEXEC, MOUNT_ATTR_NOEXEC | MOUNT_ATTR_NOSUID);
+ ASSERT_SUCCESS(mfd);
+ ASSERT_SUCCESS(move_mount(mfd, "", AT_FDCWD, "/tmp", MOVE_MOUNT_F_EMPTY_PATH));
+
+ /*
+ * The fscontext log should still contain data even after
+ * FSCONFIG_CMD_CREATE and fsmount().
+ */
+ char buf[128] = {};
+ ASSERT_SUCCESS(read(fsfd, buf, sizeof(buf)));
+ EXPECT_STREQ("e tmpfs: Unknown parameter 'invalid-arg'\n", buf);
+
+ /* The message has been consumed. */
+ ASSERT_ERRNO_EQ(-ENODATA, read(fsfd, buf, sizeof(buf)));
+ ASSERT_SUCCESS(close(fsfd));
+}
+
+TEST_F(ns, fscontext_log_emsgsize)
+{
+ int fsfd = fsopen("tmpfs", FSOPEN_CLOEXEC);
+ ASSERT_SUCCESS(fsfd);
+
+ ASSERT_ERRNO_EQ(-EINVAL, fsconfig(fsfd, FSCONFIG_SET_STRING, "invalid-arg", "123", 0));
+
+ char buf[128] = {};
+ /*
+ * Attempting to read a message with too small a buffer should not
+ * result in the message getting consumed.
+ */
+ ASSERT_ERRNO_EQ(-EMSGSIZE, read(fsfd, buf, 0));
+ ASSERT_ERRNO_EQ(-EMSGSIZE, read(fsfd, buf, 1));
+ for (int i = 0; i < 16; i++)
+ ASSERT_ERRNO_EQ(-EMSGSIZE, read(fsfd, buf, 16));
+
+ ASSERT_SUCCESS(read(fsfd, buf, sizeof(buf)));
+ EXPECT_STREQ("e tmpfs: Unknown parameter 'invalid-arg'\n", buf);
+
+ /* The message has been consumed. */
+ ASSERT_ERRNO_EQ(-ENODATA, read(fsfd, buf, sizeof(buf)));
+ ASSERT_SUCCESS(close(fsfd));
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/file_stressor.c b/tools/testing/selftests/filesystems/file_stressor.c
index 01dd89f8e52f..141badd671a9 100644
--- a/tools/testing/selftests/filesystems/file_stressor.c
+++ b/tools/testing/selftests/filesystems/file_stressor.c
@@ -12,7 +12,7 @@
#include <sys/mount.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <linux/types.h>
#include <linux/mount.h>
diff --git a/tools/testing/selftests/filesystems/fuse/.gitignore b/tools/testing/selftests/filesystems/fuse/.gitignore
new file mode 100644
index 000000000000..3e72e742d08e
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fuse/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+fuse_mnt
+fusectl_test
diff --git a/tools/testing/selftests/filesystems/fuse/Makefile b/tools/testing/selftests/filesystems/fuse/Makefile
new file mode 100644
index 000000000000..612aad69a93a
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fuse/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES)
+
+TEST_GEN_PROGS := fusectl_test
+TEST_GEN_FILES := fuse_mnt
+
+include ../../lib.mk
+
+VAR_CFLAGS := $(shell pkg-config fuse --cflags 2>/dev/null)
+ifeq ($(VAR_CFLAGS),)
+VAR_CFLAGS := -D_FILE_OFFSET_BITS=64 -I/usr/include/fuse
+endif
+
+VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
+ifeq ($(VAR_LDLIBS),)
+VAR_LDLIBS := -lfuse -pthread
+endif
+
+$(OUTPUT)/fuse_mnt: CFLAGS += $(VAR_CFLAGS)
+$(OUTPUT)/fuse_mnt: LDLIBS += $(VAR_LDLIBS)
diff --git a/tools/testing/selftests/filesystems/fuse/fuse_mnt.c b/tools/testing/selftests/filesystems/fuse/fuse_mnt.c
new file mode 100644
index 000000000000..d12b17f30fad
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fuse/fuse_mnt.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fusectl test file-system
+ * Creates a simple FUSE filesystem with a single read-write file (/test)
+ */
+
+#define FUSE_USE_VERSION 26
+
+#include <fuse.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+static char *content;
+static size_t content_size = 0;
+static const char test_path[] = "/test";
+
+static int test_getattr(const char *path, struct stat *st)
+{
+ memset(st, 0, sizeof(*st));
+
+ if (!strcmp(path, "/")) {
+ st->st_mode = S_IFDIR | 0755;
+ st->st_nlink = 2;
+ return 0;
+ }
+
+ if (!strcmp(path, test_path)) {
+ st->st_mode = S_IFREG | 0664;
+ st->st_nlink = 1;
+ st->st_size = content_size;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int test_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
+ off_t offset, struct fuse_file_info *fi)
+{
+ if (strcmp(path, "/"))
+ return -ENOENT;
+
+ filler(buf, ".", NULL, 0);
+ filler(buf, "..", NULL, 0);
+ filler(buf, test_path + 1, NULL, 0);
+
+ return 0;
+}
+
+static int test_open(const char *path, struct fuse_file_info *fi)
+{
+ if (strcmp(path, test_path))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int test_read(const char *path, char *buf, size_t size, off_t offset,
+ struct fuse_file_info *fi)
+{
+ if (strcmp(path, test_path) != 0)
+ return -ENOENT;
+
+ if (!content || content_size == 0)
+ return 0;
+
+ if (offset >= content_size)
+ return 0;
+
+ if (offset + size > content_size)
+ size = content_size - offset;
+
+ memcpy(buf, content + offset, size);
+
+ return size;
+}
+
+static int test_write(const char *path, const char *buf, size_t size,
+ off_t offset, struct fuse_file_info *fi)
+{
+ size_t new_size;
+
+ if (strcmp(path, test_path) != 0)
+ return -ENOENT;
+
+ if(offset > content_size)
+ return -EINVAL;
+
+ new_size = MAX(offset + size, content_size);
+
+ if (new_size > content_size)
+ content = realloc(content, new_size);
+
+ content_size = new_size;
+
+ if (!content)
+ return -ENOMEM;
+
+ memcpy(content + offset, buf, size);
+
+ return size;
+}
+
+static int test_truncate(const char *path, off_t size)
+{
+ if (strcmp(path, test_path) != 0)
+ return -ENOENT;
+
+ if (size == 0) {
+ free(content);
+ content = NULL;
+ content_size = 0;
+ return 0;
+ }
+
+ content = realloc(content, size);
+
+ if (!content)
+ return -ENOMEM;
+
+ if (size > content_size)
+ memset(content + content_size, 0, size - content_size);
+
+ content_size = size;
+ return 0;
+}
+
+static struct fuse_operations memfd_ops = {
+ .getattr = test_getattr,
+ .readdir = test_readdir,
+ .open = test_open,
+ .read = test_read,
+ .write = test_write,
+ .truncate = test_truncate,
+};
+
+int main(int argc, char *argv[])
+{
+ return fuse_main(argc, argv, &memfd_ops, NULL);
+}
diff --git a/tools/testing/selftests/filesystems/fuse/fusectl_test.c b/tools/testing/selftests/filesystems/fuse/fusectl_test.c
new file mode 100644
index 000000000000..0d1d012c35ed
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fuse/fusectl_test.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Chen Linxuan <chenlinxuan@uniontech.com>
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <sched.h>
+#include <linux/limits.h>
+
+#include "kselftest_harness.h"
+
+#define FUSECTL_MOUNTPOINT "/sys/fs/fuse/connections"
+#define FUSE_MOUNTPOINT "/tmp/fuse_mnt_XXXXXX"
+#define FUSE_DEVICE "/dev/fuse"
+#define FUSECTL_TEST_VALUE "1"
+
+static void write_file(struct __test_metadata *const _metadata,
+ const char *path, const char *val)
+{
+ int fd = open(path, O_WRONLY);
+ size_t len = strlen(val);
+
+ ASSERT_GE(fd, 0);
+ ASSERT_EQ(write(fd, val, len), len);
+ ASSERT_EQ(close(fd), 0);
+}
+
+FIXTURE(fusectl){
+ char fuse_mountpoint[sizeof(FUSE_MOUNTPOINT)];
+ int connection;
+};
+
+FIXTURE_SETUP(fusectl)
+{
+ const char *fuse_mnt_prog = "./fuse_mnt";
+ int status, pid;
+ struct stat statbuf;
+ uid_t uid = getuid();
+ gid_t gid = getgid();
+ char buf[32];
+
+ /* Setup userns */
+ ASSERT_EQ(unshare(CLONE_NEWNS|CLONE_NEWUSER), 0);
+ sprintf(buf, "0 %d 1", uid);
+ write_file(_metadata, "/proc/self/uid_map", buf);
+ write_file(_metadata, "/proc/self/setgroups", "deny");
+ sprintf(buf, "0 %d 1", gid);
+ write_file(_metadata, "/proc/self/gid_map", buf);
+ ASSERT_EQ(mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL), 0);
+
+ strcpy(self->fuse_mountpoint, FUSE_MOUNTPOINT);
+
+ if (!mkdtemp(self->fuse_mountpoint))
+ SKIP(return,
+ "Failed to create FUSE mountpoint %s",
+ strerror(errno));
+
+ if (access(FUSECTL_MOUNTPOINT, F_OK))
+ SKIP(return,
+ "FUSE control filesystem not mounted");
+
+ pid = fork();
+ if (pid < 0)
+ SKIP(return,
+ "Failed to fork FUSE daemon process: %s",
+ strerror(errno));
+
+ if (pid == 0) {
+ execlp(fuse_mnt_prog, fuse_mnt_prog, self->fuse_mountpoint, NULL);
+ exit(errno);
+ }
+
+ waitpid(pid, &status, 0);
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ SKIP(return,
+ "Failed to start FUSE daemon %s",
+ strerror(WEXITSTATUS(status)));
+ }
+
+ if (stat(self->fuse_mountpoint, &statbuf))
+ SKIP(return,
+ "Failed to stat FUSE mountpoint %s",
+ strerror(errno));
+
+ self->connection = statbuf.st_dev;
+}
+
+FIXTURE_TEARDOWN(fusectl)
+{
+ umount2(self->fuse_mountpoint, MNT_DETACH);
+ rmdir(self->fuse_mountpoint);
+}
+
+TEST_F(fusectl, abort)
+{
+ char path_buf[PATH_MAX];
+ int abort_fd, test_fd, ret;
+
+ sprintf(path_buf, "/sys/fs/fuse/connections/%d/abort", self->connection);
+
+ ASSERT_EQ(0, access(path_buf, F_OK));
+
+ abort_fd = open(path_buf, O_WRONLY);
+ ASSERT_GE(abort_fd, 0);
+
+ sprintf(path_buf, "%s/test", self->fuse_mountpoint);
+
+ test_fd = open(path_buf, O_RDWR);
+ ASSERT_GE(test_fd, 0);
+
+ ret = read(test_fd, path_buf, sizeof(path_buf));
+ ASSERT_EQ(ret, 0);
+
+ ret = write(test_fd, "test", sizeof("test"));
+ ASSERT_EQ(ret, sizeof("test"));
+
+ ret = lseek(test_fd, 0, SEEK_SET);
+ ASSERT_GE(ret, 0);
+
+ ret = write(abort_fd, FUSECTL_TEST_VALUE, sizeof(FUSECTL_TEST_VALUE));
+ ASSERT_GT(ret, 0);
+
+ close(abort_fd);
+
+ ret = read(test_fd, path_buf, sizeof(path_buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENOTCONN);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/kernfs_test.c b/tools/testing/selftests/filesystems/kernfs_test.c
new file mode 100644
index 000000000000..84c2b910a60d
--- /dev/null
+++ b/tools/testing/selftests/filesystems/kernfs_test.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/xattr.h>
+
+#include "kselftest_harness.h"
+#include "wrappers.h"
+
+TEST(kernfs_listxattr)
+{
+ int fd;
+
+ /* Read-only file that can never have any extended attributes set. */
+ fd = open("/sys/kernel/warn_count", O_RDONLY | O_CLOEXEC);
+ ASSERT_GE(fd, 0);
+ ASSERT_EQ(flistxattr(fd, NULL, 0), 0);
+ EXPECT_EQ(close(fd), 0);
+}
+
+TEST(kernfs_getxattr)
+{
+ int fd;
+ char buf[1];
+
+ /* Read-only file that can never have any extended attributes set. */
+ fd = open("/sys/kernel/warn_count", O_RDONLY | O_CLOEXEC);
+ ASSERT_GE(fd, 0);
+ ASSERT_LT(fgetxattr(fd, "user.foo", buf, sizeof(buf)), 0);
+ ASSERT_EQ(errno, ENODATA);
+ EXPECT_EQ(close(fd), 0);
+}
+
+TEST_HARNESS_MAIN
+
diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
index 63ce708d93ed..6381af6a40e3 100644
--- a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
+++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
@@ -2,6 +2,13 @@
// Copyright (c) 2025 Miklos Szeredi <miklos@szeredi.hu>
#define _GNU_SOURCE
+
+// Needed for linux/fanotify.h
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+#define __kernel_fsid_t __kernel_fsid_t
+
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
@@ -10,20 +17,12 @@
#include <sys/mount.h>
#include <unistd.h>
#include <sys/syscall.h>
+#include <sys/fanotify.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../statmount/statmount.h"
#include "../utils.h"
-// Needed for linux/fanotify.h
-#ifndef __kernel_fsid_t
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-#endif
-
-#include <sys/fanotify.h>
-
static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX";
static const int mark_cmds[] = {
diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c
index 090a5ca65004..320ee25dc8a5 100644
--- a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c
+++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c
@@ -2,6 +2,13 @@
// Copyright (c) 2025 Miklos Szeredi <miklos@szeredi.hu>
#define _GNU_SOURCE
+
+// Needed for linux/fanotify.h
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+#define __kernel_fsid_t __kernel_fsid_t
+
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
@@ -10,21 +17,12 @@
#include <sys/mount.h>
#include <unistd.h>
#include <sys/syscall.h>
+#include <sys/fanotify.h>
-#include "../../kselftest_harness.h"
-#include "../../pidfd/pidfd.h"
+#include "kselftest_harness.h"
#include "../statmount/statmount.h"
#include "../utils.h"
-// Needed for linux/fanotify.h
-#ifndef __kernel_fsid_t
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-#endif
-
-#include <sys/fanotify.h>
-
static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX";
static const int mark_types[] = {
diff --git a/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
index a3d8015897e9..61e55dfbf121 100644
--- a/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
+++ b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
@@ -12,7 +12,7 @@
#include <sys/mount.h>
#include <unistd.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define MNT_NS_COUNT 11
#define MNT_NS_LAST_INDEX 10
diff --git a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
index 31db54b00e64..8924cea6aa4b 100644
--- a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
+++ b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
@@ -15,7 +15,7 @@
#include <sched.h>
#include <fcntl.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#include "log.h"
#include "../wrappers.h"
diff --git a/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c b/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
index dc0449fa628f..3c0b93183348 100644
--- a/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
+++ b/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
@@ -12,7 +12,7 @@
#include <sys/mount.h>
#include <unistd.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../../pidfd/pidfd.h"
#include "log.h"
#include "../utils.h"
diff --git a/tools/testing/selftests/filesystems/statmount/listmount_test.c b/tools/testing/selftests/filesystems/statmount/listmount_test.c
index 15f0834f7557..8bc82f38c42f 100644
--- a/tools/testing/selftests/filesystems/statmount/listmount_test.c
+++ b/tools/testing/selftests/filesystems/statmount/listmount_test.c
@@ -11,7 +11,7 @@
#include <unistd.h>
#include "statmount.h"
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#ifndef LISTMOUNT_REVERSE
#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */
diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test.c b/tools/testing/selftests/filesystems/statmount/statmount_test.c
index f048042e53e9..6e53430423d2 100644
--- a/tools/testing/selftests/filesystems/statmount/statmount_test.c
+++ b/tools/testing/selftests/filesystems/statmount/statmount_test.c
@@ -13,7 +13,7 @@
#include <linux/stat.h>
#include "statmount.h"
-#include "../../kselftest.h"
+#include "kselftest.h"
static const char *const known_fs[] = {
"9p", "adfs", "affs", "afs", "aio", "anon_inodefs", "apparmorfs",
diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c b/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c
index 605a3fa16bf7..d56d4103182f 100644
--- a/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c
+++ b/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c
@@ -15,7 +15,7 @@
#include "statmount.h"
#include "../utils.h"
-#include "../../kselftest.h"
+#include "kselftest.h"
#define NSID_PASS 0
#define NSID_FAIL 1
diff --git a/tools/testing/selftests/filesystems/utils.c b/tools/testing/selftests/filesystems/utils.c
index c43a69dffd83..c9dd5412b37b 100644
--- a/tools/testing/selftests/filesystems/utils.c
+++ b/tools/testing/selftests/filesystems/utils.c
@@ -20,7 +20,7 @@
#include <sys/xattr.h>
#include <sys/mount.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "wrappers.h"
#include "utils.h"
@@ -487,7 +487,7 @@ int setup_userns(void)
uid_t uid = getuid();
gid_t gid = getgid();
- ret = unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWPID);
+ ret = unshare(CLONE_NEWNS|CLONE_NEWUSER);
if (ret) {
ksft_exit_fail_msg("unsharing mountns and userns: %s\n",
strerror(errno));
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index cce72f8b03dc..3230bd54dba8 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -22,6 +22,7 @@ echo " --fail-unresolved Treat UNRESOLVED as a failure"
echo " -d|--debug Debug mode (trace all shell commands)"
echo " -l|--logdir <dir> Save logs on the <dir>"
echo " If <dir> is -, all logs output in console only"
+echo " --rv Run RV selftests instead of ftrace ones"
exit $1
}
@@ -133,6 +134,10 @@ parse_opts() { # opts
LINK_PTR=
shift 2
;;
+ --rv)
+ RV_TEST=1
+ shift 1
+ ;;
*.tc)
if [ -f "$1" ]; then
OPT_TEST_CASES="$OPT_TEST_CASES `abspath $1`"
@@ -152,9 +157,13 @@ parse_opts() { # opts
;;
esac
done
- if [ ! -z "$OPT_TEST_CASES" ]; then
+ if [ -n "$OPT_TEST_CASES" ]; then
TEST_CASES=$OPT_TEST_CASES
fi
+ if [ -n "$OPT_TEST_DIR" -a -f "$OPT_TEST_DIR"/test.d/functions ]; then
+ TOP_DIR=$OPT_TEST_DIR
+ TEST_DIR=$TOP_DIR/test.d
+ fi
}
# Parameters
@@ -190,10 +199,6 @@ fi
TOP_DIR=`absdir $0`
TEST_DIR=$TOP_DIR/test.d
TEST_CASES=`find_testcases $TEST_DIR`
-LOG_TOP_DIR=$TOP_DIR/logs
-LOG_DATE=`date +%Y%m%d-%H%M%S`
-LOG_DIR=$LOG_TOP_DIR/$LOG_DATE/
-LINK_PTR=$LOG_TOP_DIR/latest
KEEP_LOG=0
KTAP=0
DEBUG=0
@@ -201,14 +206,23 @@ VERBOSE=0
UNSUPPORTED_RESULT=0
UNRESOLVED_RESULT=0
STOP_FAILURE=0
+RV_TEST=0
# Parse command-line options
parse_opts $*
+LOG_TOP_DIR=$TOP_DIR/logs
+LOG_DATE=`date +%Y%m%d-%H%M%S`
+LOG_DIR=$LOG_TOP_DIR/$LOG_DATE/
+LINK_PTR=$LOG_TOP_DIR/latest
+
[ $DEBUG -ne 0 ] && set -x
-# Verify parameters
-if [ -z "$TRACING_DIR" -o ! -d "$TRACING_DIR" ]; then
- errexit "No ftrace directory found"
+if [ $RV_TEST -ne 0 ]; then
+ TRACING_DIR=$TRACING_DIR/rv
+ if [ ! -d "$TRACING_DIR" ]; then
+ err_ret=$err_skip
+ errexit "rv is not configured in this kernel"
+ fi
fi
# Preparing logs
@@ -419,7 +433,7 @@ trap 'SIG_RESULT=$XFAIL' $SIG_XFAIL
__run_test() { # testfile
# setup PID and PPID, $$ is not updated.
(cd $TRACING_DIR; read PID _ < /proc/self/stat; set -e; set -x;
- checkreq $1; initialize_ftrace; . $1)
+ checkreq $1; initialize_system; . $1)
[ $? -ne 0 ] && kill -s $SIG_FAIL $SIG_PID
}
@@ -496,7 +510,7 @@ for t in $TEST_CASES; do
exit 1
fi
done
-(cd $TRACING_DIR; finish_ftrace) # for cleanup
+(cd $TRACING_DIR; finish_system) # for cleanup
prlog ""
prlog "# of passed: " `echo $PASSED_CASES | wc -w`
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc b/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
index 8a7ce647a60d..318939451caf 100644
--- a/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
+++ b/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
@@ -28,7 +28,7 @@ unmount_tracefs() {
local mount_point="$1"
# Need to make sure the mount isn't busy so that we can umount it
- (cd $mount_point; finish_ftrace;)
+ (cd $mount_point; finish_system;)
cleanup
}
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc b/tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc
new file mode 100644
index 000000000000..7daf7292209e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/trace_marker_raw.tc
@@ -0,0 +1,107 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Basic tests on writing to trace_marker_raw
+# requires: trace_marker_raw
+# flags: instance
+
+is_little_endian() {
+ if lscpu | grep -q 'Little Endian'; then
+ echo 1;
+ else
+ echo 0;
+ fi
+}
+
+little=`is_little_endian`
+
+make_str() {
+ id=$1
+ cnt=$2
+
+ if [ $little -eq 1 ]; then
+ val=`printf "\\%03o\\%03o\\%03o\\%03o" \
+ $(($id & 0xff)) \
+ $((($id >> 8) & 0xff)) \
+ $((($id >> 16) & 0xff)) \
+ $((($id >> 24) & 0xff))`
+ else
+ val=`printf "\\%03o\\%03o\\%03o\\%03o" \
+ $((($id >> 24) & 0xff)) \
+ $((($id >> 16) & 0xff)) \
+ $((($id >> 8) & 0xff)) \
+ $(($id & 0xff))`
+ fi
+
+ data=`printf -- 'X%.0s' $(seq $cnt)`
+
+ printf "${val}${data}"
+}
+
+write_buffer() {
+ id=$1
+ size=$2
+
+ # write the string into the raw marker
+ make_str $id $size > trace_marker_raw
+}
+
+
+test_multiple_writes() {
+
+ # Write a bunch of data where the id is the count of
+ # data to write
+ for i in `seq 1 10` `seq 101 110` `seq 1001 1010`; do
+ write_buffer $i $i
+ done
+
+ # add a little buffer
+ echo stop > trace_marker
+
+ # Check to make sure the number of entries is the id (rounded up by 4)
+ awk '/.*: # [0-9a-f]* / {
+ print;
+ cnt = -1;
+ for (i = 0; i < NF; i++) {
+ # The counter is after the "#" marker
+ if ( $i == "#" ) {
+ i++;
+ cnt = strtonum("0x" $i);
+ num = NF - (i + 1);
+ # The number of items is always rounded up by 4
+ cnt2 = int((cnt + 3) / 4) * 4;
+ if (cnt2 != num) {
+ exit 1;
+ }
+ break;
+ }
+ }
+ }
+ // { if (NR > 30) { exit 0; } } ' trace_pipe;
+}
+
+
+get_buffer_data_size() {
+ sed -ne 's/^.*data.*size:\([0-9][0-9]*\).*/\1/p' events/header_page
+}
+
+test_buffer() {
+
+ # The id must be four bytes, test that 3 bytes fails a write
+ if echo -n abc > ./trace_marker_raw ; then
+ echo "Too small of write expected to fail but did not"
+ exit_fail
+ fi
+
+ size=`get_buffer_data_size`
+ echo size = $size
+
+ # Now add a little more than what it can handle
+
+ if write_buffer 0xdeadbeef $size ; then
+ echo "Too big of write expected to fail but did not"
+ exit_fail
+ fi
+}
+
+test_buffer
+test_multiple_writes
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
index 73f6c6fcecab..47067a5e3cb0 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
@@ -16,47 +16,43 @@ ocnt=`cat enabled_functions | wc -l`
echo "f:myevent1 $PLACE" >> dynamic_events
-# Make sure the event is attached and is the only one
-grep -q $PLACE enabled_functions
-cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne $((ocnt + 1)) ]; then
- exit_fail
-fi
-
echo "f:myevent2 $PLACE%return" >> dynamic_events
-# It should till be the only attached function
-cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne $((ocnt + 1)) ]; then
- exit_fail
-fi
-
# add another event
echo "f:myevent3 $PLACE2" >> dynamic_events
-grep -q $PLACE2 enabled_functions
-cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne $((ocnt + 2)) ]; then
- exit_fail
-fi
-
grep -q myevent1 dynamic_events
grep -q myevent2 dynamic_events
grep -q myevent3 dynamic_events
test -d events/fprobes/myevent1
test -d events/fprobes/myevent2
-echo "-:myevent2" >> dynamic_events
+echo 1 > events/fprobes/myevent1/enable
+# Make sure the event is attached.
+grep -q $PLACE enabled_functions
+cnt=`cat enabled_functions | wc -l`
+if [ $cnt -eq $ocnt ]; then
+ exit_fail
+fi
-grep -q myevent1 dynamic_events
-! grep -q myevent2 dynamic_events
+echo 1 > events/fprobes/myevent2/enable
+cnt2=`cat enabled_functions | wc -l`
-# should still have 2 left
+echo 1 > events/fprobes/myevent3/enable
+# If the function is different, the attached function should be increased
+grep -q $PLACE2 enabled_functions
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne $((ocnt + 2)) ]; then
+if [ $cnt -eq $cnt2 ]; then
exit_fail
fi
+echo 0 > events/fprobes/myevent2/enable
+echo "-:myevent2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+! grep -q myevent2 dynamic_events
+
+echo 0 > events/fprobes/enable
echo > dynamic_events
# Should have none left
@@ -67,12 +63,14 @@ fi
echo "f:myevent4 $PLACE" >> dynamic_events
+echo 1 > events/fprobes/myevent4/enable
# Should only have one enabled
cnt=`cat enabled_functions | wc -l`
if [ $cnt -ne $((ocnt + 1)) ]; then
exit_fail
fi
+echo 0 > events/fprobes/enable
echo > dynamic_events
# Should have none left
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc
new file mode 100644
index 000000000000..c1f1cafa30f3
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/enable_disable_tprobe.tc
@@ -0,0 +1,40 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - enable/disable tracepoint probe events
+# requires: dynamic_events "t[:[<group>/][<event>]] <tracepoint> [<args>]":README
+
+echo 0 > events/enable
+echo > dynamic_events
+
+TRACEPOINT=sched_switch
+ENABLEFILE=events/tracepoints/myprobe/enable
+
+:;: "Add tracepoint event on $TRACEPOINT" ;:
+
+echo "t:myprobe ${TRACEPOINT}" >> dynamic_events
+
+:;: "Check enable/disable to ensure it works" ;:
+
+echo 1 > $ENABLEFILE
+
+grep -q $TRACEPOINT trace
+
+echo 0 > $ENABLEFILE
+
+echo > trace
+
+! grep -q $TRACEPOINT trace
+
+:;: "Repeat enable/disable to ensure it works" ;:
+
+echo 1 > $ENABLEFILE
+
+grep -q $TRACEPOINT trace
+
+echo 0 > $ENABLEFILE
+
+echo > trace
+
+! grep -q $TRACEPOINT trace
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
index b7c8f29c09a9..65916bb55dfb 100644
--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
@@ -14,11 +14,35 @@ fail() { #msg
exit_fail
}
+# As reading trace can last forever, simply look for 3 different
+# events then exit out of reading the file. If there's not 3 different
+# events, then the test has failed.
+check_unique() {
+ cat trace | grep -v '^#' | awk '
+ BEGIN { cnt = 0; }
+ {
+ for (i = 0; i < cnt; i++) {
+ if (event[i] == $5) {
+ break;
+ }
+ }
+ if (i == cnt) {
+ event[cnt++] = $5;
+ if (cnt > 2) {
+ exit;
+ }
+ }
+ }
+ END {
+ printf "%d", cnt;
+ }'
+}
+
echo 'sched:*' > set_event
yield
-count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`check_unique`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
@@ -29,7 +53,7 @@ echo 1 > events/sched/enable
yield
-count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`check_unique`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
index c62165fabd0c..cfa16aa1f39a 100644
--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
@@ -20,6 +20,10 @@ sample_events() {
echo 0 > tracing_on
echo 0 > events/enable
+# Clear functions caused by page cache; run sample_events twice
+sample_events
+sample_events
+
echo "Get the most frequently calling function"
echo > trace
sample_events
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 4b994b6df5ac..ed81eaf2afd6 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -29,7 +29,7 @@ ftrace_filter_check 'schedule*' '^schedule.*$'
ftrace_filter_check '*pin*lock' '.*pin.*lock$'
# filter by start*mid*
-ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
+ftrace_filter_check 'mutex*unl*' '^mutex.*unl.*'
# Advanced full-glob matching feature is recently supported.
# Skip the tests if we are sure the kernel does not support it.
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index a1052bf460fc..e8e718139294 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -104,7 +104,7 @@ clear_dynamic_events() { # reset all current dynamic events
done
}
-initialize_ftrace() { # Reset ftrace to initial-state
+initialize_system() { # Reset ftrace to initial-state
# As the initial state, ftrace will be set to nop tracer,
# no events, no triggers, no filters, no function filters,
# no probes, and tracing on.
@@ -134,8 +134,8 @@ initialize_ftrace() { # Reset ftrace to initial-state
enable_tracing
}
-finish_ftrace() {
- initialize_ftrace
+finish_system() {
+ initialize_system
# And recover it to default.
[ -f options/pause-on-trace ] && echo 0 > options/pause-on-trace
}
diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
index 7b24ae89594a..776ad658f75e 100644
--- a/tools/testing/selftests/futex/functional/.gitignore
+++ b/tools/testing/selftests/futex/functional/.gitignore
@@ -11,3 +11,4 @@ futex_wait_timeout
futex_wait_uninitialized_heap
futex_wait_wouldblock
futex_waitv
+futex_numa
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 8cfb87f7f7c5..490ace1f017e 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
+PKG_CONFIG ?= pkg-config
+LIBNUMA_TEST = $(shell sh -c "$(PKG_CONFIG) numa --atleast-version 2.0.16 > /dev/null 2>&1 && echo SUFFICIENT || echo NO")
+
INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
-CFLAGS := $(CFLAGS) -g -O2 -Wall -pthread $(INCLUDES) $(KHDR_INCLUDES)
+CFLAGS := $(CFLAGS) -g -O2 -Wall -pthread -D_FILE_OFFSET_BITS=64 -D_TIME_BITS=64 $(INCLUDES) $(KHDR_INCLUDES) -DLIBNUMA_VER_$(LIBNUMA_TEST)=1
LDLIBS := -lpthread -lrt -lnuma
LOCAL_HDRS := \
../include/futextest.h \
- ../include/atomic.h \
- ../include/logging.h
+ ../include/atomic.h
TEST_GEN_PROGS := \
futex_wait_timeout \
futex_wait_wouldblock \
diff --git a/tools/testing/selftests/futex/functional/futex_numa.c b/tools/testing/selftests/futex/functional/futex_numa.c
index f29e4d627e79..e0a33510ccb6 100644
--- a/tools/testing/selftests/futex/functional/futex_numa.c
+++ b/tools/testing/selftests/futex/functional/futex_numa.c
@@ -5,9 +5,10 @@
#include <sys/mman.h>
#include <fcntl.h>
#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
#include <time.h>
#include <assert.h>
-#include "logging.h"
#include "futextest.h"
#include "futex2test.h"
diff --git a/tools/testing/selftests/futex/functional/futex_numa_mpol.c b/tools/testing/selftests/futex/functional/futex_numa_mpol.c
index 20a9d3ecf743..ab8555752137 100644
--- a/tools/testing/selftests/futex/functional/futex_numa_mpol.c
+++ b/tools/testing/selftests/futex/functional/futex_numa_mpol.c
@@ -16,9 +16,9 @@
#include <linux/futex.h>
#include <sys/mman.h>
-#include "logging.h"
#include "futextest.h"
#include "futex2test.h"
+#include "kselftest_harness.h"
#define MAX_THREADS 64
@@ -77,7 +77,7 @@ static void join_max_threads(void)
}
}
-static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flags)
+static void __test_futex(void *futex_ptr, int err_value, unsigned int futex_flags)
{
int to_wake, ret, i, need_exit = 0;
@@ -88,11 +88,17 @@ static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flag
do {
ret = futex2_wake(futex_ptr, to_wake, futex_flags);
- if (must_fail) {
- if (ret < 0)
- break;
- ksft_exit_fail_msg("futex2_wake(%d, 0x%x) should fail, but didn't\n",
- to_wake, futex_flags);
+
+ if (err_value) {
+ if (ret >= 0)
+ ksft_exit_fail_msg("futex2_wake(%d, 0x%x) should fail, but didn't\n",
+ to_wake, futex_flags);
+
+ if (errno != err_value)
+ ksft_exit_fail_msg("futex2_wake(%d, 0x%x) expected error was %d, but returned %d (%s)\n",
+ to_wake, futex_flags, err_value, errno, strerror(errno));
+
+ break;
}
if (ret < 0) {
ksft_exit_fail_msg("Failed futex2_wake(%d, 0x%x): %m\n",
@@ -106,12 +112,12 @@ static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flag
join_max_threads();
for (i = 0; i < MAX_THREADS; i++) {
- if (must_fail && thread_args[i].result != -1) {
+ if (err_value && thread_args[i].result != -1) {
ksft_print_msg("Thread %d should fail but succeeded (%d)\n",
i, thread_args[i].result);
need_exit = 1;
}
- if (!must_fail && thread_args[i].result != 0) {
+ if (!err_value && thread_args[i].result != 0) {
ksft_print_msg("Thread %d failed (%d)\n", i, thread_args[i].result);
need_exit = 1;
}
@@ -120,58 +126,30 @@ static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flag
ksft_exit_fail_msg("Aborting due to earlier errors.\n");
}
-static void test_futex(void *futex_ptr, int must_fail)
+static void test_futex(void *futex_ptr, int err_value)
{
- __test_futex(futex_ptr, must_fail, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA);
+ __test_futex(futex_ptr, err_value, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA);
}
-static void test_futex_mpol(void *futex_ptr, int must_fail)
+static void test_futex_mpol(void *futex_ptr, int err_value)
{
- __test_futex(futex_ptr, must_fail, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL);
+ __test_futex(futex_ptr, err_value, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL);
}
-static void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
-int main(int argc, char *argv[])
+TEST(futex_numa_mpol)
{
struct futex32_numa *futex_numa;
- int mem_size, i;
void *futex_ptr;
- char c;
-
- while ((c = getopt(argc, argv, "chv:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- break;
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(1);
+ int mem_size;
mem_size = sysconf(_SC_PAGE_SIZE);
- futex_ptr = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ futex_ptr = mmap(NULL, mem_size * 2, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
if (futex_ptr == MAP_FAILED)
ksft_exit_fail_msg("mmap() for %d bytes failed\n", mem_size);
+ /* Create an invalid memory region for the "Memory out of range" test */
+ mprotect(futex_ptr + mem_size, mem_size, PROT_NONE);
+
futex_numa = futex_ptr;
ksft_print_msg("Regular test\n");
@@ -182,27 +160,31 @@ int main(int argc, char *argv[])
if (futex_numa->numa == FUTEX_NO_NODE)
ksft_exit_fail_msg("NUMA node is left uninitialized\n");
- ksft_print_msg("Memory too small\n");
- test_futex(futex_ptr + mem_size - 4, 1);
+ /* FUTEX2_NUMA futex must be 8-byte aligned */
+ ksft_print_msg("Mis-aligned futex\n");
+ test_futex(futex_ptr + mem_size - 4, EINVAL);
ksft_print_msg("Memory out of range\n");
- test_futex(futex_ptr + mem_size, 1);
+ test_futex(futex_ptr + mem_size, EFAULT);
futex_numa->numa = FUTEX_NO_NODE;
mprotect(futex_ptr, mem_size, PROT_READ);
ksft_print_msg("Memory, RO\n");
- test_futex(futex_ptr, 1);
+ test_futex(futex_ptr, EFAULT);
mprotect(futex_ptr, mem_size, PROT_NONE);
ksft_print_msg("Memory, no access\n");
- test_futex(futex_ptr, 1);
+ test_futex(futex_ptr, EFAULT);
mprotect(futex_ptr, mem_size, PROT_READ | PROT_WRITE);
ksft_print_msg("Memory back to RW\n");
test_futex(futex_ptr, 0);
+ ksft_test_result_pass("futex2 memory boundary tests passed\n");
+
/* MPOL test. Does not work as expected */
- for (i = 0; i < 4; i++) {
+#ifdef LIBNUMA_VER_SUFFICIENT
+ for (int i = 0; i < 4; i++) {
unsigned long nodemask;
int ret;
@@ -210,6 +192,10 @@ int main(int argc, char *argv[])
ret = mbind(futex_ptr, mem_size, MPOL_BIND, &nodemask,
sizeof(nodemask) * 8, 0);
if (ret == 0) {
+ ret = numa_set_mempolicy_home_node(futex_ptr, mem_size, i, 0);
+ if (ret != 0)
+ ksft_exit_fail_msg("Failed to set home node: %m, %d\n", errno);
+
ksft_print_msg("Node %d test\n", i);
futex_numa->futex = 0;
futex_numa->numa = FUTEX_NO_NODE;
@@ -217,15 +203,17 @@ int main(int argc, char *argv[])
ret = futex2_wake(futex_ptr, 0, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL);
if (ret < 0)
ksft_test_result_fail("Failed to wake 0 with MPOL: %m\n");
- if (0)
- test_futex_mpol(futex_numa, 0);
if (futex_numa->numa != i) {
- ksft_test_result_fail("Returned NUMA node is %d expected %d\n",
- futex_numa->numa, i);
+ ksft_exit_fail_msg("Returned NUMA node is %d expected %d\n",
+ futex_numa->numa, i);
}
}
}
- ksft_test_result_pass("NUMA MPOL tests passed\n");
- ksft_finished();
- return 0;
+ ksft_test_result_pass("futex2 MPOL hints test passed\n");
+#else
+ ksft_test_result_skip("futex2 MPOL hints test requires libnuma 2.0.16+\n");
+#endif
+ munmap(futex_ptr, mem_size * 2);
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_priv_hash.c b/tools/testing/selftests/futex/functional/futex_priv_hash.c
index 2dca18fefedc..e8079d7c65e8 100644
--- a/tools/testing/selftests/futex/functional/futex_priv_hash.c
+++ b/tools/testing/selftests/futex/functional/futex_priv_hash.c
@@ -14,7 +14,7 @@
#include <linux/prctl.h>
#include <sys/prctl.h>
-#include "logging.h"
+#include "kselftest_harness.h"
#define MAX_THREADS 64
@@ -26,14 +26,12 @@ static int counter;
#ifndef PR_FUTEX_HASH
#define PR_FUTEX_HASH 78
# define PR_FUTEX_HASH_SET_SLOTS 1
-# define FH_FLAG_IMMUTABLE (1ULL << 0)
# define PR_FUTEX_HASH_GET_SLOTS 2
-# define PR_FUTEX_HASH_GET_IMMUTABLE 3
#endif
-static int futex_hash_slots_set(unsigned int slots, int flags)
+static int futex_hash_slots_set(unsigned int slots)
{
- return prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_SET_SLOTS, slots, flags);
+ return prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_SET_SLOTS, slots, 0);
}
static int futex_hash_slots_get(void)
@@ -41,16 +39,11 @@ static int futex_hash_slots_get(void)
return prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_GET_SLOTS);
}
-static int futex_hash_immutable_get(void)
-{
- return prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_GET_IMMUTABLE);
-}
-
static void futex_hash_slots_set_verify(int slots)
{
int ret;
- ret = futex_hash_slots_set(slots, 0);
+ ret = futex_hash_slots_set(slots);
if (ret != 0) {
ksft_test_result_fail("Failed to set slots to %d: %m\n", slots);
ksft_finished();
@@ -64,13 +57,13 @@ static void futex_hash_slots_set_verify(int slots)
ksft_test_result_pass("SET and GET slots %d passed\n", slots);
}
-static void futex_hash_slots_set_must_fail(int slots, int flags)
+static void futex_hash_slots_set_must_fail(int slots)
{
int ret;
- ret = futex_hash_slots_set(slots, flags);
- ksft_test_result(ret < 0, "futex_hash_slots_set(%d, %d)\n",
- slots, flags);
+ ret = futex_hash_slots_set(slots);
+ ksft_test_result(ret < 0, "futex_hash_slots_set(%d)\n",
+ slots);
}
static void *thread_return_fn(void *arg)
@@ -111,50 +104,38 @@ static void join_max_threads(void)
}
}
-static void usage(char *prog)
+#define SEC_IN_NSEC 1000000000
+#define MSEC_IN_NSEC 1000000
+
+static void futex_dummy_op(void)
{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -g Test global hash instead intead local immutable \n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
+ pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
+ struct timespec timeout;
+ int ret;
+
+ pthread_mutex_lock(&lock);
+ clock_gettime(CLOCK_REALTIME, &timeout);
+ timeout.tv_nsec += 100 * MSEC_IN_NSEC;
+ if (timeout.tv_nsec >= SEC_IN_NSEC) {
+ timeout.tv_nsec -= SEC_IN_NSEC;
+ timeout.tv_sec++;
+ }
+ ret = pthread_mutex_timedlock(&lock, &timeout);
+ if (ret == 0)
+ ksft_exit_fail_msg("Successfully locked an already locked mutex.\n");
+
+ if (ret != ETIMEDOUT)
+ ksft_exit_fail_msg("pthread_mutex_timedlock() did not timeout: %d.\n", ret);
}
static const char *test_msg_auto_create = "Automatic hash bucket init on thread creation.\n";
static const char *test_msg_auto_inc = "Automatic increase with more than 16 CPUs\n";
-int main(int argc, char *argv[])
+TEST(priv_hash)
{
int futex_slots1, futex_slotsn, online_cpus;
pthread_mutexattr_t mutex_attr_pi;
- int use_global_hash = 0;
- int ret;
- char c;
-
- while ((c = getopt(argc, argv, "cghv:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'g':
- use_global_hash = 1;
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- break;
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(22);
+ int ret, retry = 20;
ret = pthread_mutexattr_init(&mutex_attr_pi);
ret |= pthread_mutexattr_setprotocol(&mutex_attr_pi, PTHREAD_PRIO_INHERIT);
@@ -167,10 +148,6 @@ int main(int argc, char *argv[])
if (ret != 0)
ksft_exit_fail_msg("futex_hash_slots_get() failed: %d, %m\n", ret);
- ret = futex_hash_immutable_get();
- if (ret != 0)
- ksft_exit_fail_msg("futex_hash_immutable_get() failed: %d, %m\n", ret);
-
ksft_test_result_pass("Basic get slots and immutable status.\n");
ret = pthread_create(&threads[0], NULL, thread_return_fn, NULL);
if (ret != 0)
@@ -180,14 +157,14 @@ int main(int argc, char *argv[])
if (ret != 0)
ksft_exit_fail_msg("pthread_join() failed: %d, %m\n", ret);
- /* First thread, has to initialiaze private hash */
+ /* First thread, has to initialize private hash */
futex_slots1 = futex_hash_slots_get();
if (futex_slots1 <= 0) {
ksft_print_msg("Current hash buckets: %d\n", futex_slots1);
- ksft_exit_fail_msg(test_msg_auto_create);
+ ksft_exit_fail_msg("%s", test_msg_auto_create);
}
- ksft_test_result_pass(test_msg_auto_create);
+ ksft_test_result_pass("%s", test_msg_auto_create);
online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
ret = pthread_barrier_init(&barrier_main, NULL, MAX_THREADS + 1);
@@ -208,15 +185,31 @@ int main(int argc, char *argv[])
*/
ksft_print_msg("Online CPUs: %d\n", online_cpus);
if (online_cpus > 16) {
+retry_getslots:
futex_slotsn = futex_hash_slots_get();
if (futex_slotsn < 0 || futex_slots1 == futex_slotsn) {
+ retry--;
+ /*
+ * Auto scaling on thread creation can be slightly delayed
+ * because it waits for a RCU grace period twice. The new
+ * private hash is assigned upon the first futex operation
+ * after grace period.
+ * To cover all this for testing purposes the function
+ * below will acquire a lock and acquire it again with a
+ * 100ms timeout which must timeout. This ensures we
+ * sleep for 100ms and issue a futex operation.
+ */
+ if (retry > 0) {
+ futex_dummy_op();
+ goto retry_getslots;
+ }
ksft_print_msg("Expected increase of hash buckets but got: %d -> %d\n",
futex_slots1, futex_slotsn);
- ksft_exit_fail_msg(test_msg_auto_inc);
+ ksft_exit_fail_msg("%s", test_msg_auto_inc);
}
- ksft_test_result_pass(test_msg_auto_inc);
+ ksft_test_result_pass("%s", test_msg_auto_inc);
} else {
- ksft_test_result_skip(test_msg_auto_inc);
+ ksft_test_result_skip("%s", test_msg_auto_inc);
}
ret = pthread_mutex_unlock(&global_lock);
@@ -227,66 +220,51 @@ int main(int argc, char *argv[])
futex_hash_slots_set_verify(32);
futex_hash_slots_set_verify(16);
- ret = futex_hash_slots_set(15, 0);
+ ret = futex_hash_slots_set(15);
ksft_test_result(ret < 0, "Use 15 slots\n");
futex_hash_slots_set_verify(2);
join_max_threads();
- ksft_test_result(counter == MAX_THREADS, "Created of waited for %d of %d threads\n",
+ ksft_test_result(counter == MAX_THREADS, "Created and waited for %d of %d threads\n",
counter, MAX_THREADS);
counter = 0;
- /* Once the user set something, auto reisze must be disabled */
+ /* Once the user set something, auto resize must be disabled */
ret = pthread_barrier_init(&barrier_main, NULL, MAX_THREADS);
create_max_threads(thread_lock_fn);
join_max_threads();
ret = futex_hash_slots_get();
- ksft_test_result(ret == 2, "No more auto-resize after manaul setting, got %d\n",
+ ksft_test_result(ret == 2, "No more auto-resize after manual setting, got %d\n",
ret);
- futex_hash_slots_set_must_fail(1 << 29, 0);
+ futex_hash_slots_set_must_fail(1 << 29);
+ futex_hash_slots_set_verify(4);
/*
- * Once the private hash has been made immutable or global hash has been requested,
- * then this requested can not be undone.
+ * Once the global hash has been requested, then this requested can not
+ * be undone.
*/
- if (use_global_hash) {
- ret = futex_hash_slots_set(0, 0);
- ksft_test_result(ret == 0, "Global hash request\n");
- } else {
- ret = futex_hash_slots_set(4, FH_FLAG_IMMUTABLE);
- ksft_test_result(ret == 0, "Immutable resize to 4\n");
- }
+ ret = futex_hash_slots_set(0);
+ ksft_test_result(ret == 0, "Global hash request\n");
if (ret != 0)
- goto out;
+ return;
- futex_hash_slots_set_must_fail(4, 0);
- futex_hash_slots_set_must_fail(4, FH_FLAG_IMMUTABLE);
- futex_hash_slots_set_must_fail(8, 0);
- futex_hash_slots_set_must_fail(8, FH_FLAG_IMMUTABLE);
- futex_hash_slots_set_must_fail(0, FH_FLAG_IMMUTABLE);
- futex_hash_slots_set_must_fail(6, FH_FLAG_IMMUTABLE);
+ futex_hash_slots_set_must_fail(4);
+ futex_hash_slots_set_must_fail(8);
+ futex_hash_slots_set_must_fail(8);
+ futex_hash_slots_set_must_fail(0);
+ futex_hash_slots_set_must_fail(6);
ret = pthread_barrier_init(&barrier_main, NULL, MAX_THREADS);
- if (ret != 0) {
+ if (ret != 0)
ksft_exit_fail_msg("pthread_barrier_init failed: %m\n");
- return 1;
- }
+
create_max_threads(thread_lock_fn);
join_max_threads();
ret = futex_hash_slots_get();
- if (use_global_hash) {
- ksft_test_result(ret == 0, "Continue to use global hash\n");
- } else {
- ksft_test_result(ret == 4, "Continue to use the 4 hash buckets\n");
- }
-
- ret = futex_hash_immutable_get();
- ksft_test_result(ret == 1, "Hash reports to be immutable\n");
-
-out:
- ksft_finished();
- return 0;
+ ksft_test_result(ret == 0, "Continue to use global hash\n");
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_requeue.c b/tools/testing/selftests/futex/functional/futex_requeue.c
index 51485be6eb2f..35d4be23db5d 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue.c
@@ -7,24 +7,15 @@
#include <pthread.h>
#include <limits.h>
-#include "logging.h"
+
#include "futextest.h"
+#include "kselftest_harness.h"
-#define TEST_NAME "futex-requeue"
#define timeout_ns 30000000
#define WAKE_WAIT_US 10000
volatile futex_t *f1;
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
void *waiterfn(void *arg)
{
struct timespec to;
@@ -38,67 +29,49 @@ void *waiterfn(void *arg)
return NULL;
}
-int main(int argc, char *argv[])
+TEST(requeue_single)
{
- pthread_t waiter[10];
- int res, ret = RET_PASS;
- int c, i;
volatile futex_t _f1 = 0;
volatile futex_t f2 = 0;
+ pthread_t waiter[10];
+ int res;
f1 = &_f1;
- while ((c = getopt(argc, argv, "cht:v:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(2);
- ksft_print_msg("%s: Test futex_requeue\n",
- basename(argv[0]));
-
/*
* Requeue a waiter from f1 to f2, and wake f2.
*/
if (pthread_create(&waiter[0], NULL, waiterfn, NULL))
- error("pthread_create failed\n", errno);
+ ksft_exit_fail_msg("pthread_create failed\n");
usleep(WAKE_WAIT_US);
- info("Requeuing 1 futex from f1 to f2\n");
+ ksft_print_dbg_msg("Requeuing 1 futex from f1 to f2\n");
res = futex_cmp_requeue(f1, 0, &f2, 0, 1, 0);
- if (res != 1) {
+ if (res != 1)
ksft_test_result_fail("futex_requeue simple returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
- }
-
- info("Waking 1 futex at f2\n");
+ ksft_print_dbg_msg("Waking 1 futex at f2\n");
res = futex_wake(&f2, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_requeue simple returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_requeue simple succeeds\n");
}
+}
+
+TEST(requeue_multiple)
+{
+ volatile futex_t _f1 = 0;
+ volatile futex_t f2 = 0;
+ pthread_t waiter[10];
+ int res, i;
+ f1 = &_f1;
/*
* Create 10 waiters at f1. At futex_requeue, wake 3 and requeue 7.
@@ -106,31 +79,28 @@ int main(int argc, char *argv[])
*/
for (i = 0; i < 10; i++) {
if (pthread_create(&waiter[i], NULL, waiterfn, NULL))
- error("pthread_create failed\n", errno);
+ ksft_exit_fail_msg("pthread_create failed\n");
}
usleep(WAKE_WAIT_US);
- info("Waking 3 futexes at f1 and requeuing 7 futexes from f1 to f2\n");
+ ksft_print_dbg_msg("Waking 3 futexes at f1 and requeuing 7 futexes from f1 to f2\n");
res = futex_cmp_requeue(f1, 0, &f2, 3, 7, 0);
if (res != 10) {
ksft_test_result_fail("futex_requeue many returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
}
- info("Waking INT_MAX futexes at f2\n");
+ ksft_print_dbg_msg("Waking INT_MAX futexes at f2\n");
res = futex_wake(&f2, INT_MAX, 0);
if (res != 7) {
ksft_test_result_fail("futex_requeue many returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_requeue many succeeds\n");
}
-
- ksft_print_cnts();
- return ret;
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
index 215c6cb539b4..46d2858e15a8 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
@@ -26,11 +26,11 @@
#include <stdlib.h>
#include <signal.h>
#include <string.h>
+
#include "atomic.h"
#include "futextest.h"
-#include "logging.h"
+#include "kselftest_harness.h"
-#define TEST_NAME "futex-requeue-pi"
#define MAX_WAKE_ITERS 1000
#define THREAD_MAX 10
#define SIGNAL_PERIOD_US 100
@@ -42,12 +42,6 @@ futex_t f1 = FUTEX_INITIALIZER;
futex_t f2 = FUTEX_INITIALIZER;
futex_t wake_complete = FUTEX_INITIALIZER;
-/* Test option defaults */
-static long timeout_ns;
-static int broadcast;
-static int owner;
-static int locked;
-
struct thread_arg {
long id;
struct timespec *timeout;
@@ -56,18 +50,73 @@ struct thread_arg {
};
#define THREAD_ARG_INITIALIZER { 0, NULL, 0, 0 }
-void usage(char *prog)
+FIXTURE(args)
{
- printf("Usage: %s\n", prog);
- printf(" -b Broadcast wakeup (all waiters)\n");
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -l Lock the pi futex across requeue\n");
- printf(" -o Use a third party pi futex owner during requeue (cancels -l)\n");
- printf(" -t N Timeout in nanoseconds (default: 0)\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
+};
+
+FIXTURE_SETUP(args)
+{
+};
+
+FIXTURE_TEARDOWN(args)
+{
+};
+
+FIXTURE_VARIANT(args)
+{
+ long timeout_ns;
+ bool broadcast;
+ bool owner;
+ bool locked;
+};
+
+/*
+ * For a given timeout value, this macro creates a test input with all the
+ * possible combinations of valid arguments
+ */
+#define FIXTURE_VARIANT_ADD_TIMEOUT(timeout) \
+ \
+FIXTURE_VARIANT_ADD(args, t_##timeout) \
+{ \
+ .timeout_ns = timeout, \
+}; \
+ \
+FIXTURE_VARIANT_ADD(args, t_##timeout##_broadcast) \
+{ \
+ .timeout_ns = timeout, \
+ .broadcast = true, \
+}; \
+ \
+FIXTURE_VARIANT_ADD(args, t_##timeout##_broadcast_locked) \
+{ \
+ .timeout_ns = timeout, \
+ .broadcast = true, \
+ .locked = true, \
+}; \
+ \
+FIXTURE_VARIANT_ADD(args, t_##timeout##_broadcast_owner) \
+{ \
+ .timeout_ns = timeout, \
+ .broadcast = true, \
+ .owner = true, \
+}; \
+ \
+FIXTURE_VARIANT_ADD(args, t_##timeout##_locked) \
+{ \
+ .timeout_ns = timeout, \
+ .locked = true, \
+}; \
+ \
+FIXTURE_VARIANT_ADD(args, t_##timeout##_owner) \
+{ \
+ .timeout_ns = timeout, \
+ .owner = true, \
+}; \
+
+FIXTURE_VARIANT_ADD_TIMEOUT(0);
+FIXTURE_VARIANT_ADD_TIMEOUT(5000);
+FIXTURE_VARIANT_ADD_TIMEOUT(500000);
+FIXTURE_VARIANT_ADD_TIMEOUT(2000000000);
int create_rt_thread(pthread_t *pth, void*(*func)(void *), void *arg,
int policy, int prio)
@@ -81,26 +130,26 @@ int create_rt_thread(pthread_t *pth, void*(*func)(void *), void *arg,
ret = pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
if (ret) {
- error("pthread_attr_setinheritsched\n", ret);
+ ksft_exit_fail_msg("pthread_attr_setinheritsched\n");
return -1;
}
ret = pthread_attr_setschedpolicy(&attr, policy);
if (ret) {
- error("pthread_attr_setschedpolicy\n", ret);
+ ksft_exit_fail_msg("pthread_attr_setschedpolicy\n");
return -1;
}
schedp.sched_priority = prio;
ret = pthread_attr_setschedparam(&attr, &schedp);
if (ret) {
- error("pthread_attr_setschedparam\n", ret);
+ ksft_exit_fail_msg("pthread_attr_setschedparam\n");
return -1;
}
ret = pthread_create(pth, &attr, func, arg);
if (ret) {
- error("pthread_create\n", ret);
+ ksft_exit_fail_msg("pthread_create\n");
return -1;
}
return 0;
@@ -112,7 +161,7 @@ void *waiterfn(void *arg)
struct thread_arg *args = (struct thread_arg *)arg;
futex_t old_val;
- info("Waiter %ld: running\n", args->id);
+ ksft_print_dbg_msg("Waiter %ld: running\n", args->id);
/* Each thread sleeps for a different amount of time
* This is to avoid races, because we don't lock the
* external mutex here */
@@ -120,26 +169,25 @@ void *waiterfn(void *arg)
old_val = f1;
atomic_inc(&waiters_blocked);
- info("Calling futex_wait_requeue_pi: %p (%u) -> %p\n",
+ ksft_print_dbg_msg("Calling futex_wait_requeue_pi: %p (%u) -> %p\n",
&f1, f1, &f2);
args->ret = futex_wait_requeue_pi(&f1, old_val, &f2, args->timeout,
FUTEX_PRIVATE_FLAG);
- info("waiter %ld woke with %d %s\n", args->id, args->ret,
+ ksft_print_dbg_msg("waiter %ld woke with %d %s\n", args->id, args->ret,
args->ret < 0 ? strerror(errno) : "");
atomic_inc(&waiters_woken);
if (args->ret < 0) {
if (args->timeout && errno == ETIMEDOUT)
args->ret = 0;
else {
- args->ret = RET_ERROR;
- error("futex_wait_requeue_pi\n", errno);
+ ksft_exit_fail_msg("futex_wait_requeue_pi\n");
}
futex_lock_pi(&f2, NULL, 0, FUTEX_PRIVATE_FLAG);
}
futex_unlock_pi(&f2, FUTEX_PRIVATE_FLAG);
- info("Waiter %ld: exiting with %d\n", args->id, args->ret);
+ ksft_print_dbg_msg("Waiter %ld: exiting with %d\n", args->id, args->ret);
pthread_exit((void *)&args->ret);
}
@@ -152,14 +200,14 @@ void *broadcast_wakerfn(void *arg)
int nr_wake = 1;
int i = 0;
- info("Waker: waiting for waiters to block\n");
+ ksft_print_dbg_msg("Waker: waiting for waiters to block\n");
while (waiters_blocked.val < THREAD_MAX)
usleep(1000);
usleep(1000);
- info("Waker: Calling broadcast\n");
+ ksft_print_dbg_msg("Waker: Calling broadcast\n");
if (args->lock) {
- info("Calling FUTEX_LOCK_PI on mutex=%x @ %p\n", f2, &f2);
+ ksft_print_dbg_msg("Calling FUTEX_LOCK_PI on mutex=%x @ %p\n", f2, &f2);
futex_lock_pi(&f2, NULL, 0, FUTEX_PRIVATE_FLAG);
}
continue_requeue:
@@ -167,16 +215,14 @@ void *broadcast_wakerfn(void *arg)
args->ret = futex_cmp_requeue_pi(&f1, old_val, &f2, nr_wake, nr_requeue,
FUTEX_PRIVATE_FLAG);
if (args->ret < 0) {
- args->ret = RET_ERROR;
- error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
+ ksft_exit_fail_msg("FUTEX_CMP_REQUEUE_PI failed\n");
} else if (++i < MAX_WAKE_ITERS) {
task_count += args->ret;
if (task_count < THREAD_MAX - waiters_woken.val)
goto continue_requeue;
} else {
- error("max broadcast iterations (%d) reached with %d/%d tasks woken or requeued\n",
- 0, MAX_WAKE_ITERS, task_count, THREAD_MAX);
- args->ret = RET_ERROR;
+ ksft_exit_fail_msg("max broadcast iterations (%d) reached with %d/%d tasks woken or requeued\n",
+ MAX_WAKE_ITERS, task_count, THREAD_MAX);
}
futex_wake(&wake_complete, 1, FUTEX_PRIVATE_FLAG);
@@ -187,7 +233,7 @@ void *broadcast_wakerfn(void *arg)
if (args->ret > 0)
args->ret = task_count;
- info("Waker: exiting with %d\n", args->ret);
+ ksft_print_dbg_msg("Waker: exiting with %d\n", args->ret);
pthread_exit((void *)&args->ret);
}
@@ -200,20 +246,20 @@ void *signal_wakerfn(void *arg)
int nr_wake = 1;
int i = 0;
- info("Waker: waiting for waiters to block\n");
+ ksft_print_dbg_msg("Waker: waiting for waiters to block\n");
while (waiters_blocked.val < THREAD_MAX)
usleep(1000);
usleep(1000);
while (task_count < THREAD_MAX && waiters_woken.val < THREAD_MAX) {
- info("task_count: %d, waiters_woken: %d\n",
+ ksft_print_dbg_msg("task_count: %d, waiters_woken: %d\n",
task_count, waiters_woken.val);
if (args->lock) {
- info("Calling FUTEX_LOCK_PI on mutex=%x @ %p\n",
- f2, &f2);
+ ksft_print_dbg_msg("Calling FUTEX_LOCK_PI on mutex=%x @ %p\n",
+ f2, &f2);
futex_lock_pi(&f2, NULL, 0, FUTEX_PRIVATE_FLAG);
}
- info("Waker: Calling signal\n");
+ ksft_print_dbg_msg("Waker: Calling signal\n");
/* cond_signal */
old_val = f1;
args->ret = futex_cmp_requeue_pi(&f1, old_val, &f2,
@@ -221,28 +267,23 @@ void *signal_wakerfn(void *arg)
FUTEX_PRIVATE_FLAG);
if (args->ret < 0)
args->ret = -errno;
- info("futex: %x\n", f2);
+ ksft_print_dbg_msg("futex: %x\n", f2);
if (args->lock) {
- info("Calling FUTEX_UNLOCK_PI on mutex=%x @ %p\n",
- f2, &f2);
+ ksft_print_dbg_msg("Calling FUTEX_UNLOCK_PI on mutex=%x @ %p\n",
+ f2, &f2);
futex_unlock_pi(&f2, FUTEX_PRIVATE_FLAG);
}
- info("futex: %x\n", f2);
- if (args->ret < 0) {
- error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
- args->ret = RET_ERROR;
- break;
- }
+ ksft_print_dbg_msg("futex: %x\n", f2);
+ if (args->ret < 0)
+ ksft_exit_fail_msg("FUTEX_CMP_REQUEUE_PI failed\n");
task_count += args->ret;
usleep(SIGNAL_PERIOD_US);
i++;
/* we have to loop at least THREAD_MAX times */
if (i > MAX_WAKE_ITERS + THREAD_MAX) {
- error("max signaling iterations (%d) reached, giving up on pending waiters.\n",
- 0, MAX_WAKE_ITERS + THREAD_MAX);
- args->ret = RET_ERROR;
- break;
+ ksft_exit_fail_msg("max signaling iterations (%d) reached, giving up on pending waiters.\n",
+ MAX_WAKE_ITERS + THREAD_MAX);
}
}
@@ -251,8 +292,8 @@ void *signal_wakerfn(void *arg)
if (args->ret >= 0)
args->ret = task_count;
- info("Waker: exiting with %d\n", args->ret);
- info("Waker: waiters_woken: %d\n", waiters_woken.val);
+ ksft_print_dbg_msg("Waker: exiting with %d\n", args->ret);
+ ksft_print_dbg_msg("Waker: waiters_woken: %d\n", waiters_woken.val);
pthread_exit((void *)&args->ret);
}
@@ -269,35 +310,40 @@ void *third_party_blocker(void *arg)
ret2 = futex_unlock_pi(&f2, FUTEX_PRIVATE_FLAG);
out:
- if (args->ret || ret2) {
- error("third_party_blocker() futex error", 0);
- args->ret = RET_ERROR;
- }
+ if (args->ret || ret2)
+ ksft_exit_fail_msg("third_party_blocker() futex error");
pthread_exit((void *)&args->ret);
}
-int unit_test(int broadcast, long lock, int third_party_owner, long timeout_ns)
+TEST_F(args, futex_requeue_pi)
{
- void *(*wakerfn)(void *) = signal_wakerfn;
struct thread_arg blocker_arg = THREAD_ARG_INITIALIZER;
struct thread_arg waker_arg = THREAD_ARG_INITIALIZER;
pthread_t waiter[THREAD_MAX], waker, blocker;
- struct timespec ts, *tsp = NULL;
+ void *(*wakerfn)(void *) = signal_wakerfn;
+ bool third_party_owner = variant->owner;
+ long timeout_ns = variant->timeout_ns;
+ bool broadcast = variant->broadcast;
struct thread_arg args[THREAD_MAX];
- int *waiter_ret;
- int i, ret = RET_PASS;
+ struct timespec ts, *tsp = NULL;
+ bool lock = variant->locked;
+ int *waiter_ret, i, ret = 0;
+
+ ksft_print_msg(
+ "\tArguments: broadcast=%d locked=%d owner=%d timeout=%ldns\n",
+ broadcast, lock, third_party_owner, timeout_ns);
if (timeout_ns) {
time_t secs;
- info("timeout_ns = %ld\n", timeout_ns);
+ ksft_print_dbg_msg("timeout_ns = %ld\n", timeout_ns);
ret = clock_gettime(CLOCK_MONOTONIC, &ts);
secs = (ts.tv_nsec + timeout_ns) / 1000000000;
ts.tv_nsec = ((int64_t)ts.tv_nsec + timeout_ns) % 1000000000;
ts.tv_sec += secs;
- info("ts.tv_sec = %ld\n", ts.tv_sec);
- info("ts.tv_nsec = %ld\n", ts.tv_nsec);
+ ksft_print_dbg_msg("ts.tv_sec = %ld\n", ts.tv_sec);
+ ksft_print_dbg_msg("ts.tv_nsec = %ld\n", ts.tv_nsec);
tsp = &ts;
}
@@ -307,10 +353,7 @@ int unit_test(int broadcast, long lock, int third_party_owner, long timeout_ns)
if (third_party_owner) {
if (create_rt_thread(&blocker, third_party_blocker,
(void *)&blocker_arg, SCHED_FIFO, 1)) {
- error("Creating third party blocker thread failed\n",
- errno);
- ret = RET_ERROR;
- goto out;
+ ksft_exit_fail_msg("Creating third party blocker thread failed\n");
}
}
@@ -318,20 +361,16 @@ int unit_test(int broadcast, long lock, int third_party_owner, long timeout_ns)
for (i = 0; i < THREAD_MAX; i++) {
args[i].id = i;
args[i].timeout = tsp;
- info("Starting thread %d\n", i);
+ ksft_print_dbg_msg("Starting thread %d\n", i);
if (create_rt_thread(&waiter[i], waiterfn, (void *)&args[i],
SCHED_FIFO, 1)) {
- error("Creating waiting thread failed\n", errno);
- ret = RET_ERROR;
- goto out;
+ ksft_exit_fail_msg("Creating waiting thread failed\n");
}
}
waker_arg.lock = lock;
if (create_rt_thread(&waker, wakerfn, (void *)&waker_arg,
SCHED_FIFO, 1)) {
- error("Creating waker thread failed\n", errno);
- ret = RET_ERROR;
- goto out;
+ ksft_exit_fail_msg("Creating waker thread failed\n");
}
/* Wait for threads to finish */
@@ -345,7 +384,6 @@ int unit_test(int broadcast, long lock, int third_party_owner, long timeout_ns)
pthread_join(blocker, NULL);
pthread_join(waker, NULL);
-out:
if (!ret) {
if (*waiter_ret)
ret = *waiter_ret;
@@ -355,66 +393,8 @@ out:
ret = blocker_arg.ret;
}
- return ret;
+ if (ret)
+ ksft_test_result_fail("fail");
}
-int main(int argc, char *argv[])
-{
- char *test_name;
- int c, ret;
-
- while ((c = getopt(argc, argv, "bchlot:v:")) != -1) {
- switch (c) {
- case 'b':
- broadcast = 1;
- break;
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'l':
- locked = 1;
- break;
- case 'o':
- owner = 1;
- locked = 0;
- break;
- case 't':
- timeout_ns = atoi(optarg);
- break;
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(1);
- ksft_print_msg("%s: Test requeue functionality\n", basename(argv[0]));
- ksft_print_msg(
- "\tArguments: broadcast=%d locked=%d owner=%d timeout=%ldns\n",
- broadcast, locked, owner, timeout_ns);
-
- ret = asprintf(&test_name,
- "%s broadcast=%d locked=%d owner=%d timeout=%ldns",
- TEST_NAME, broadcast, locked, owner, timeout_ns);
- if (ret < 0) {
- ksft_print_msg("Failed to generate test name\n");
- test_name = TEST_NAME;
- }
-
- /*
- * FIXME: unit_test is obsolete now that we parse options and the
- * various style of runs are done by run.sh - simplify the code and move
- * unit_test into main()
- */
- ret = unit_test(broadcast, locked, owner, timeout_ns);
-
- print_result(test_name, ret);
- return ret;
-}
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
index d0a4d332ea44..f686e605359c 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
@@ -23,67 +23,32 @@
#include <stdlib.h>
#include <string.h>
#include <time.h>
-#include "futextest.h"
-#include "logging.h"
-#define TEST_NAME "futex-requeue-pi-mismatched-ops"
+#include "futextest.h"
+#include "kselftest_harness.h"
futex_t f1 = FUTEX_INITIALIZER;
futex_t f2 = FUTEX_INITIALIZER;
int child_ret = 0;
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
void *blocking_child(void *arg)
{
child_ret = futex_wait(&f1, f1, NULL, FUTEX_PRIVATE_FLAG);
if (child_ret < 0) {
child_ret = -errno;
- error("futex_wait\n", errno);
+ ksft_exit_fail_msg("futex_wait\n");
}
return (void *)&child_ret;
}
-int main(int argc, char *argv[])
+TEST(requeue_pi_mismatched_ops)
{
- int ret = RET_PASS;
pthread_t child;
- int c;
+ int ret;
- while ((c = getopt(argc, argv, "chv:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(1);
- ksft_print_msg("%s: Detect mismatched requeue_pi operations\n",
- basename(argv[0]));
+ if (pthread_create(&child, NULL, blocking_child, NULL))
+ ksft_exit_fail_msg("pthread_create\n");
- if (pthread_create(&child, NULL, blocking_child, NULL)) {
- error("pthread_create\n", errno);
- ret = RET_ERROR;
- goto out;
- }
/* Allow the child to block in the kernel. */
sleep(1);
@@ -102,34 +67,27 @@ int main(int argc, char *argv[])
* FUTEX_WAKE.
*/
ret = futex_wake(&f1, 1, FUTEX_PRIVATE_FLAG);
- if (ret == 1) {
- ret = RET_PASS;
- } else if (ret < 0) {
- error("futex_wake\n", errno);
- ret = RET_ERROR;
- } else {
- error("futex_wake did not wake the child\n", 0);
- ret = RET_ERROR;
- }
+ if (ret == 1)
+ ret = 0;
+ else if (ret < 0)
+ ksft_exit_fail_msg("futex_wake\n");
+ else
+ ksft_exit_fail_msg("futex_wake did not wake the child\n");
} else {
- error("futex_cmp_requeue_pi\n", errno);
- ret = RET_ERROR;
+ ksft_exit_fail_msg("futex_cmp_requeue_pi\n");
}
} else if (ret > 0) {
- fail("futex_cmp_requeue_pi failed to detect the mismatch\n");
- ret = RET_FAIL;
+ ksft_test_result_fail("futex_cmp_requeue_pi failed to detect the mismatch\n");
} else {
- error("futex_cmp_requeue_pi found no waiters\n", 0);
- ret = RET_ERROR;
+ ksft_exit_fail_msg("futex_cmp_requeue_pi found no waiters\n");
}
pthread_join(child, NULL);
- if (!ret)
- ret = child_ret;
-
- out:
- /* If the kernel crashes, we shouldn't return at all. */
- print_result(TEST_NAME, ret);
- return ret;
+ if (!ret && !child_ret)
+ ksft_test_result_pass("futex_requeue_pi_mismatched_ops passed\n");
+ else
+ ksft_test_result_pass("futex_requeue_pi_mismatched_ops failed\n");
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index c6b8f32990c8..a18ccae73eb1 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -24,11 +24,11 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+
#include "atomic.h"
#include "futextest.h"
-#include "logging.h"
+#include "kselftest_harness.h"
-#define TEST_NAME "futex-requeue-pi-signal-restart"
#define DELAY_US 100
futex_t f1 = FUTEX_INITIALIZER;
@@ -37,15 +37,6 @@ atomic_t requeued = ATOMIC_INITIALIZER;
int waiter_ret = 0;
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
int create_rt_thread(pthread_t *pth, void*(*func)(void *), void *arg,
int policy, int prio)
{
@@ -57,35 +48,28 @@ int create_rt_thread(pthread_t *pth, void*(*func)(void *), void *arg,
memset(&schedp, 0, sizeof(schedp));
ret = pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
- if (ret) {
- error("pthread_attr_setinheritsched\n", ret);
- return -1;
- }
+ if (ret)
+ ksft_exit_fail_msg("pthread_attr_setinheritsched\n");
ret = pthread_attr_setschedpolicy(&attr, policy);
- if (ret) {
- error("pthread_attr_setschedpolicy\n", ret);
- return -1;
- }
+ if (ret)
+ ksft_exit_fail_msg("pthread_attr_setschedpolicy\n");
schedp.sched_priority = prio;
ret = pthread_attr_setschedparam(&attr, &schedp);
- if (ret) {
- error("pthread_attr_setschedparam\n", ret);
- return -1;
- }
+ if (ret)
+ ksft_exit_fail_msg("pthread_attr_setschedparam\n");
ret = pthread_create(pth, &attr, func, arg);
- if (ret) {
- error("pthread_create\n", ret);
- return -1;
- }
+ if (ret)
+ ksft_exit_fail_msg("pthread_create\n");
+
return 0;
}
void handle_signal(int signo)
{
- info("signal received %s requeue\n",
+ ksft_print_dbg_msg("signal received %s requeue\n",
requeued.val ? "after" : "prior to");
}
@@ -94,78 +78,46 @@ void *waiterfn(void *arg)
unsigned int old_val;
int res;
- waiter_ret = RET_PASS;
-
- info("Waiter running\n");
- info("Calling FUTEX_LOCK_PI on f2=%x @ %p\n", f2, &f2);
+ ksft_print_dbg_msg("Waiter running\n");
+ ksft_print_dbg_msg("Calling FUTEX_LOCK_PI on f2=%x @ %p\n", f2, &f2);
old_val = f1;
res = futex_wait_requeue_pi(&f1, old_val, &(f2), NULL,
FUTEX_PRIVATE_FLAG);
if (!requeued.val || errno != EWOULDBLOCK) {
- fail("unexpected return from futex_wait_requeue_pi: %d (%s)\n",
+ ksft_test_result_fail("unexpected return from futex_wait_requeue_pi: %d (%s)\n",
res, strerror(errno));
- info("w2:futex: %x\n", f2);
+ ksft_print_dbg_msg("w2:futex: %x\n", f2);
if (!res)
futex_unlock_pi(&f2, FUTEX_PRIVATE_FLAG);
- waiter_ret = RET_FAIL;
}
- info("Waiter exiting with %d\n", waiter_ret);
pthread_exit(NULL);
}
-int main(int argc, char *argv[])
+TEST(futex_requeue_pi_signal_restart)
{
unsigned int old_val;
struct sigaction sa;
pthread_t waiter;
- int c, res, ret = RET_PASS;
-
- while ((c = getopt(argc, argv, "chv:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(1);
- ksft_print_msg("%s: Test signal handling during requeue_pi\n",
- basename(argv[0]));
- ksft_print_msg("\tArguments: <none>\n");
+ int res;
sa.sa_handler = handle_signal;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
- if (sigaction(SIGUSR1, &sa, NULL)) {
- error("sigaction\n", errno);
- exit(1);
- }
+ if (sigaction(SIGUSR1, &sa, NULL))
+ ksft_exit_fail_msg("sigaction\n");
- info("m1:f2: %x\n", f2);
- info("Creating waiter\n");
+ ksft_print_dbg_msg("m1:f2: %x\n", f2);
+ ksft_print_dbg_msg("Creating waiter\n");
res = create_rt_thread(&waiter, waiterfn, NULL, SCHED_FIFO, 1);
- if (res) {
- error("Creating waiting thread failed", res);
- ret = RET_ERROR;
- goto out;
- }
+ if (res)
+ ksft_exit_fail_msg("Creating waiting thread failed");
- info("Calling FUTEX_LOCK_PI on f2=%x @ %p\n", f2, &f2);
- info("m2:f2: %x\n", f2);
+ ksft_print_dbg_msg("Calling FUTEX_LOCK_PI on f2=%x @ %p\n", f2, &f2);
+ ksft_print_dbg_msg("m2:f2: %x\n", f2);
futex_lock_pi(&f2, 0, 0, FUTEX_PRIVATE_FLAG);
- info("m3:f2: %x\n", f2);
+ ksft_print_dbg_msg("m3:f2: %x\n", f2);
while (1) {
/*
@@ -173,11 +125,11 @@ int main(int argc, char *argv[])
* restart futex_wait_requeue_pi() in the kernel. Wait for the
* waiter to block on f1 again.
*/
- info("Issuing SIGUSR1 to waiter\n");
+ ksft_print_dbg_msg("Issuing SIGUSR1 to waiter\n");
pthread_kill(waiter, SIGUSR1);
usleep(DELAY_US);
- info("Requeueing waiter via FUTEX_CMP_REQUEUE_PI\n");
+ ksft_print_dbg_msg("Requeueing waiter via FUTEX_CMP_REQUEUE_PI\n");
old_val = f1;
res = futex_cmp_requeue_pi(&f1, old_val, &(f2), 1, 0,
FUTEX_PRIVATE_FLAG);
@@ -191,12 +143,10 @@ int main(int argc, char *argv[])
atomic_set(&requeued, 1);
break;
} else if (res < 0) {
- error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
- ret = RET_ERROR;
- break;
+ ksft_exit_fail_msg("FUTEX_CMP_REQUEUE_PI failed\n");
}
}
- info("m4:f2: %x\n", f2);
+ ksft_print_dbg_msg("m4:f2: %x\n", f2);
/*
* Signal the waiter after requeue, waiter should return from
@@ -204,19 +154,14 @@ int main(int argc, char *argv[])
* futex_unlock_pi() can't happen before the signal wakeup is detected
* in the kernel.
*/
- info("Issuing SIGUSR1 to waiter\n");
+ ksft_print_dbg_msg("Issuing SIGUSR1 to waiter\n");
pthread_kill(waiter, SIGUSR1);
- info("Waiting for waiter to return\n");
+ ksft_print_dbg_msg("Waiting for waiter to return\n");
pthread_join(waiter, NULL);
- info("Calling FUTEX_UNLOCK_PI on mutex=%x @ %p\n", f2, &f2);
+ ksft_print_dbg_msg("Calling FUTEX_UNLOCK_PI on mutex=%x @ %p\n", f2, &f2);
futex_unlock_pi(&f2, FUTEX_PRIVATE_FLAG);
- info("m5:f2: %x\n", f2);
-
- out:
- if (ret == RET_PASS && waiter_ret)
- ret = waiter_ret;
-
- print_result(TEST_NAME, ret);
- return ret;
+ ksft_print_dbg_msg("m5:f2: %x\n", f2);
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_wait.c b/tools/testing/selftests/futex/functional/futex_wait.c
index 685140d9b93d..0e69c53524c1 100644
--- a/tools/testing/selftests/futex/functional/futex_wait.c
+++ b/tools/testing/selftests/futex/functional/futex_wait.c
@@ -9,25 +9,16 @@
#include <sys/shm.h>
#include <sys/mman.h>
#include <fcntl.h>
-#include "logging.h"
+
#include "futextest.h"
+#include "kselftest_harness.h"
-#define TEST_NAME "futex-wait"
#define timeout_ns 30000000
#define WAKE_WAIT_US 10000
#define SHM_PATH "futex_shm_file"
void *futex;
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
static void *waiterfn(void *arg)
{
struct timespec to;
@@ -45,53 +36,37 @@ static void *waiterfn(void *arg)
return NULL;
}
-int main(int argc, char *argv[])
+TEST(private_futex)
{
- int res, ret = RET_PASS, fd, c, shm_id;
- u_int32_t f_private = 0, *shared_data;
unsigned int flags = FUTEX_PRIVATE_FLAG;
+ u_int32_t f_private = 0;
pthread_t waiter;
- void *shm;
+ int res;
futex = &f_private;
- while ((c = getopt(argc, argv, "cht:v:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(3);
- ksft_print_msg("%s: Test futex_wait\n", basename(argv[0]));
-
/* Testing a private futex */
- info("Calling private futex_wait on futex: %p\n", futex);
+ ksft_print_dbg_msg("Calling private futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, (void *) &flags))
- error("pthread_create failed\n", errno);
+ ksft_exit_fail_msg("pthread_create failed\n");
usleep(WAKE_WAIT_US);
- info("Calling private futex_wake on futex: %p\n", futex);
+ ksft_print_dbg_msg("Calling private futex_wake on futex: %p\n", futex);
res = futex_wake(futex, 1, FUTEX_PRIVATE_FLAG);
if (res != 1) {
ksft_test_result_fail("futex_wake private returned: %d %s\n",
errno, strerror(errno));
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake private succeeds\n");
}
+}
+
+TEST(anon_page)
+{
+ u_int32_t *shared_data;
+ pthread_t waiter;
+ int res, shm_id;
/* Testing an anon page shared memory */
shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
@@ -105,67 +80,65 @@ int main(int argc, char *argv[])
*shared_data = 0;
futex = shared_data;
- info("Calling shared (page anon) futex_wait on futex: %p\n", futex);
+ ksft_print_dbg_msg("Calling shared (page anon) futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, NULL))
- error("pthread_create failed\n", errno);
+ ksft_exit_fail_msg("pthread_create failed\n");
usleep(WAKE_WAIT_US);
- info("Calling shared (page anon) futex_wake on futex: %p\n", futex);
+ ksft_print_dbg_msg("Calling shared (page anon) futex_wake on futex: %p\n", futex);
res = futex_wake(futex, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_wake shared (page anon) returned: %d %s\n",
errno, strerror(errno));
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake shared (page anon) succeeds\n");
}
+ shmdt(shared_data);
+}
+
+TEST(file_backed)
+{
+ u_int32_t f_private = 0;
+ pthread_t waiter;
+ int res, fd;
+ void *shm;
/* Testing a file backed shared memory */
fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
- if (fd < 0) {
- perror("open");
- exit(1);
- }
+ if (fd < 0)
+ ksft_exit_fail_msg("open");
- if (ftruncate(fd, sizeof(f_private))) {
- perror("ftruncate");
- exit(1);
- }
+ if (ftruncate(fd, sizeof(f_private)))
+ ksft_exit_fail_msg("ftruncate");
shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if (shm == MAP_FAILED) {
- perror("mmap");
- exit(1);
- }
+ if (shm == MAP_FAILED)
+ ksft_exit_fail_msg("mmap");
memcpy(shm, &f_private, sizeof(f_private));
futex = shm;
- info("Calling shared (file backed) futex_wait on futex: %p\n", futex);
+ ksft_print_dbg_msg("Calling shared (file backed) futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, NULL))
- error("pthread_create failed\n", errno);
+ ksft_exit_fail_msg("pthread_create failed\n");
usleep(WAKE_WAIT_US);
- info("Calling shared (file backed) futex_wake on futex: %p\n", futex);
+ ksft_print_dbg_msg("Calling shared (file backed) futex_wake on futex: %p\n", futex);
res = futex_wake(shm, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_wake shared (file backed) returned: %d %s\n",
errno, strerror(errno));
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake shared (file backed) succeeds\n");
}
- /* Freeing resources */
- shmdt(shared_data);
munmap(shm, sizeof(f_private));
remove(SHM_PATH);
close(fd);
-
- ksft_print_cnts();
- return ret;
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
index fb4148f23fa3..2a749f9b14eb 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
@@ -27,10 +27,9 @@
#include <libgen.h>
#include <signal.h>
-#include "logging.h"
#include "futextest.h"
+#include "kselftest_harness.h"
-#define TEST_NAME "futex-wait-private-mapped-file"
#define PAGE_SZ 4096
char pad[PAGE_SZ] = {1};
@@ -40,86 +39,44 @@ char pad2[PAGE_SZ] = {1};
#define WAKE_WAIT_US 3000000
struct timespec wait_timeout = { .tv_sec = 5, .tv_nsec = 0};
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
void *thr_futex_wait(void *arg)
{
int ret;
- info("futex wait\n");
+ ksft_print_dbg_msg("futex wait\n");
ret = futex_wait(&val, 1, &wait_timeout, 0);
- if (ret && errno != EWOULDBLOCK && errno != ETIMEDOUT) {
- error("futex error.\n", errno);
- print_result(TEST_NAME, RET_ERROR);
- exit(RET_ERROR);
- }
+ if (ret && errno != EWOULDBLOCK && errno != ETIMEDOUT)
+ ksft_exit_fail_msg("futex error.\n");
if (ret && errno == ETIMEDOUT)
- fail("waiter timedout\n");
+ ksft_exit_fail_msg("waiter timedout\n");
- info("futex_wait: ret = %d, errno = %d\n", ret, errno);
+ ksft_print_dbg_msg("futex_wait: ret = %d, errno = %d\n", ret, errno);
return NULL;
}
-int main(int argc, char **argv)
+TEST(wait_private_mapped_file)
{
pthread_t thr;
- int ret = RET_PASS;
int res;
- int c;
-
- while ((c = getopt(argc, argv, "chv:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(1);
- ksft_print_msg(
- "%s: Test the futex value of private file mappings in FUTEX_WAIT\n",
- basename(argv[0]));
-
- ret = pthread_create(&thr, NULL, thr_futex_wait, NULL);
- if (ret < 0) {
- fprintf(stderr, "pthread_create error\n");
- ret = RET_ERROR;
- goto out;
- }
-
- info("wait a while\n");
+
+ res = pthread_create(&thr, NULL, thr_futex_wait, NULL);
+ if (res < 0)
+ ksft_exit_fail_msg("pthread_create error\n");
+
+ ksft_print_dbg_msg("wait a while\n");
usleep(WAKE_WAIT_US);
val = 2;
res = futex_wake(&val, 1, 0);
- info("futex_wake %d\n", res);
- if (res != 1) {
- fail("FUTEX_WAKE didn't find the waiting thread.\n");
- ret = RET_FAIL;
- }
+ ksft_print_dbg_msg("futex_wake %d\n", res);
+ if (res != 1)
+ ksft_exit_fail_msg("FUTEX_WAKE didn't find the waiting thread.\n");
- info("join\n");
+ ksft_print_dbg_msg("join\n");
pthread_join(thr, NULL);
- out:
- print_result(TEST_NAME, ret);
- return ret;
+ ksft_test_result_pass("wait_private_mapped_file");
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
index d183f878360b..674dd13af421 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
@@ -16,26 +16,15 @@
*****************************************************************************/
#include <pthread.h>
+
#include "futextest.h"
#include "futex2test.h"
-#include "logging.h"
-
-#define TEST_NAME "futex-wait-timeout"
+#include "kselftest_harness.h"
static long timeout_ns = 100000; /* 100us default timeout */
static futex_t futex_pi;
static pthread_barrier_t barrier;
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -t N Timeout in nanoseconds (default: 100,000)\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
/*
* Get a PI lock and hold it forever, so the main thread lock_pi will block
* and we can test the timeout
@@ -47,13 +36,13 @@ void *get_pi_lock(void *arg)
ret = futex_lock_pi(&futex_pi, NULL, 0, 0);
if (ret != 0)
- error("futex_lock_pi failed\n", ret);
+ ksft_exit_fail_msg("futex_lock_pi failed\n");
pthread_barrier_wait(&barrier);
/* Blocks forever */
ret = futex_wait(&lock, 0, NULL, 0);
- error("futex_wait failed\n", ret);
+ ksft_exit_fail_msg("futex_wait failed\n");
return NULL;
}
@@ -61,12 +50,11 @@ void *get_pi_lock(void *arg)
/*
* Check if the function returned the expected error
*/
-static void test_timeout(int res, int *ret, char *test_name, int err)
+static void test_timeout(int res, char *test_name, int err)
{
if (!res || errno != err) {
ksft_test_result_fail("%s returned %d\n", test_name,
res < 0 ? errno : res);
- *ret = RET_FAIL;
} else {
ksft_test_result_pass("%s succeeds\n", test_name);
}
@@ -78,10 +66,8 @@ static void test_timeout(int res, int *ret, char *test_name, int err)
static int futex_get_abs_timeout(clockid_t clockid, struct timespec *to,
long timeout_ns)
{
- if (clock_gettime(clockid, to)) {
- error("clock_gettime failed\n", errno);
- return errno;
- }
+ if (clock_gettime(clockid, to))
+ ksft_exit_fail_msg("clock_gettime failed\n");
to->tv_nsec += timeout_ns;
@@ -93,83 +79,66 @@ static int futex_get_abs_timeout(clockid_t clockid, struct timespec *to,
return 0;
}
-int main(int argc, char *argv[])
+TEST(wait_bitset)
{
futex_t f1 = FUTEX_INITIALIZER;
- int res, ret = RET_PASS;
struct timespec to;
- pthread_t thread;
- int c;
- struct futex_waitv waitv = {
- .uaddr = (uintptr_t)&f1,
- .val = f1,
- .flags = FUTEX_32,
- .__reserved = 0
- };
-
- while ((c = getopt(argc, argv, "cht:v:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 't':
- timeout_ns = atoi(optarg);
- break;
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(9);
- ksft_print_msg("%s: Block on a futex and wait for timeout\n",
- basename(argv[0]));
- ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
-
- pthread_barrier_init(&barrier, NULL, 2);
- pthread_create(&thread, NULL, get_pi_lock, NULL);
+ int res;
/* initialize relative timeout */
to.tv_sec = 0;
to.tv_nsec = timeout_ns;
res = futex_wait(&f1, f1, &to, 0);
- test_timeout(res, &ret, "futex_wait relative", ETIMEDOUT);
+ test_timeout(res, "futex_wait relative", ETIMEDOUT);
/* FUTEX_WAIT_BITSET with CLOCK_REALTIME */
if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
- return RET_FAIL;
+ ksft_test_result_error("get_time error");
res = futex_wait_bitset(&f1, f1, &to, 1, FUTEX_CLOCK_REALTIME);
- test_timeout(res, &ret, "futex_wait_bitset realtime", ETIMEDOUT);
+ test_timeout(res, "futex_wait_bitset realtime", ETIMEDOUT);
/* FUTEX_WAIT_BITSET with CLOCK_MONOTONIC */
if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
- return RET_FAIL;
+ ksft_test_result_error("get_time error");
res = futex_wait_bitset(&f1, f1, &to, 1, 0);
- test_timeout(res, &ret, "futex_wait_bitset monotonic", ETIMEDOUT);
+ test_timeout(res, "futex_wait_bitset monotonic", ETIMEDOUT);
+}
+
+TEST(requeue_pi)
+{
+ futex_t f1 = FUTEX_INITIALIZER;
+ struct timespec to;
+ int res;
/* FUTEX_WAIT_REQUEUE_PI with CLOCK_REALTIME */
if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
- return RET_FAIL;
+ ksft_test_result_error("get_time error");
res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, FUTEX_CLOCK_REALTIME);
- test_timeout(res, &ret, "futex_wait_requeue_pi realtime", ETIMEDOUT);
+ test_timeout(res, "futex_wait_requeue_pi realtime", ETIMEDOUT);
/* FUTEX_WAIT_REQUEUE_PI with CLOCK_MONOTONIC */
if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
- return RET_FAIL;
+ ksft_test_result_error("get_time error");
res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0);
- test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
+ test_timeout(res, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
+
+}
+
+TEST(lock_pi)
+{
+ struct timespec to;
+ pthread_t thread;
+ int res;
+
+ /* Create a thread that will lock forever so any waiter will timeout */
+ pthread_barrier_init(&barrier, NULL, 2);
+ pthread_create(&thread, NULL, get_pi_lock, NULL);
/* Wait until the other thread calls futex_lock_pi() */
pthread_barrier_wait(&barrier);
pthread_barrier_destroy(&barrier);
+
/*
* FUTEX_LOCK_PI with CLOCK_REALTIME
* Due to historical reasons, FUTEX_LOCK_PI supports only realtime
@@ -181,26 +150,38 @@ int main(int argc, char *argv[])
* smaller than realtime and the syscall will timeout immediately.
*/
if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
- return RET_FAIL;
+ ksft_test_result_error("get_time error");
res = futex_lock_pi(&futex_pi, &to, 0, 0);
- test_timeout(res, &ret, "futex_lock_pi realtime", ETIMEDOUT);
+ test_timeout(res, "futex_lock_pi realtime", ETIMEDOUT);
/* Test operations that don't support FUTEX_CLOCK_REALTIME */
res = futex_lock_pi(&futex_pi, NULL, 0, FUTEX_CLOCK_REALTIME);
- test_timeout(res, &ret, "futex_lock_pi invalid timeout flag", ENOSYS);
+ test_timeout(res, "futex_lock_pi invalid timeout flag", ENOSYS);
+}
+
+TEST(waitv)
+{
+ futex_t f1 = FUTEX_INITIALIZER;
+ struct futex_waitv waitv = {
+ .uaddr = (uintptr_t)&f1,
+ .val = f1,
+ .flags = FUTEX_32,
+ .__reserved = 0,
+ };
+ struct timespec to;
+ int res;
/* futex_waitv with CLOCK_MONOTONIC */
if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
- return RET_FAIL;
+ ksft_test_result_error("get_time error");
res = futex_waitv(&waitv, 1, 0, &to, CLOCK_MONOTONIC);
- test_timeout(res, &ret, "futex_waitv monotonic", ETIMEDOUT);
+ test_timeout(res, "futex_waitv monotonic", ETIMEDOUT);
/* futex_waitv with CLOCK_REALTIME */
if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
- return RET_FAIL;
+ ksft_test_result_error("get_time error");
res = futex_waitv(&waitv, 1, 0, &to, CLOCK_REALTIME);
- test_timeout(res, &ret, "futex_waitv realtime", ETIMEDOUT);
-
- ksft_print_cnts();
- return ret;
+ test_timeout(res, "futex_waitv realtime", ETIMEDOUT);
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
index ed9cd07e31c1..b07d68a67f31 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
@@ -29,95 +29,55 @@
#include <linux/futex.h>
#include <libgen.h>
-#include "logging.h"
#include "futextest.h"
+#include "kselftest_harness.h"
-#define TEST_NAME "futex-wait-uninitialized-heap"
#define WAIT_US 5000000
static int child_blocked = 1;
-static int child_ret;
+static bool child_ret;
void *buf;
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
void *wait_thread(void *arg)
{
int res;
- child_ret = RET_PASS;
+ child_ret = true;
res = futex_wait(buf, 1, NULL, 0);
child_blocked = 0;
if (res != 0 && errno != EWOULDBLOCK) {
- error("futex failure\n", errno);
- child_ret = RET_ERROR;
+ ksft_exit_fail_msg("futex failure\n");
+ child_ret = false;
}
pthread_exit(NULL);
}
-int main(int argc, char **argv)
+TEST(futex_wait_uninitialized_heap)
{
- int c, ret = RET_PASS;
long page_size;
pthread_t thr;
-
- while ((c = getopt(argc, argv, "chv:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
+ int ret;
page_size = sysconf(_SC_PAGESIZE);
buf = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
- if (buf == (void *)-1) {
- error("mmap\n", errno);
- exit(1);
- }
-
- ksft_print_header();
- ksft_set_plan(1);
- ksft_print_msg("%s: Test the uninitialized futex value in FUTEX_WAIT\n",
- basename(argv[0]));
-
+ if (buf == (void *)-1)
+ ksft_exit_fail_msg("mmap\n");
ret = pthread_create(&thr, NULL, wait_thread, NULL);
- if (ret) {
- error("pthread_create\n", errno);
- ret = RET_ERROR;
- goto out;
- }
+ if (ret)
+ ksft_exit_fail_msg("pthread_create\n");
- info("waiting %dus for child to return\n", WAIT_US);
+ ksft_print_dbg_msg("waiting %dus for child to return\n", WAIT_US);
usleep(WAIT_US);
- ret = child_ret;
- if (child_blocked) {
- fail("child blocked in kernel\n");
- ret = RET_FAIL;
- }
+ if (child_blocked)
+ ksft_test_result_fail("child blocked in kernel\n");
- out:
- print_result(TEST_NAME, ret);
- return ret;
+ if (!child_ret)
+ ksft_test_result_fail("child error\n");
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
index 2d8230da9064..9ff936ecf164 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
@@ -21,72 +21,44 @@
#include <stdlib.h>
#include <string.h>
#include <time.h>
+
#include "futextest.h"
#include "futex2test.h"
-#include "logging.h"
+#include "kselftest_harness.h"
-#define TEST_NAME "futex-wait-wouldblock"
#define timeout_ns 100000
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
-int main(int argc, char *argv[])
+TEST(futex_wait_wouldblock)
{
struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
futex_t f1 = FUTEX_INITIALIZER;
- int res, ret = RET_PASS;
- int c;
- struct futex_waitv waitv = {
- .uaddr = (uintptr_t)&f1,
- .val = f1+1,
- .flags = FUTEX_32,
- .__reserved = 0
- };
+ int res;
- while ((c = getopt(argc, argv, "cht:v:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(2);
- ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n",
- basename(argv[0]));
-
- info("Calling futex_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
+ ksft_print_dbg_msg("Calling futex_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
res = futex_wait(&f1, f1+1, &to, FUTEX_PRIVATE_FLAG);
if (!res || errno != EWOULDBLOCK) {
ksft_test_result_fail("futex_wait returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wait\n");
}
+}
- if (clock_gettime(CLOCK_MONOTONIC, &to)) {
- error("clock_gettime failed\n", errno);
- return errno;
- }
+TEST(futex_waitv_wouldblock)
+{
+ struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
+ futex_t f1 = FUTEX_INITIALIZER;
+ struct futex_waitv waitv = {
+ .uaddr = (uintptr_t)&f1,
+ .val = f1 + 1,
+ .flags = FUTEX_32,
+ .__reserved = 0,
+ };
+ int res;
+
+ if (clock_gettime(CLOCK_MONOTONIC, &to))
+ ksft_exit_fail_msg("clock_gettime failed %d\n", errno);
to.tv_nsec += timeout_ns;
@@ -95,17 +67,15 @@ int main(int argc, char *argv[])
to.tv_nsec -= 1000000000;
}
- info("Calling futex_waitv on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
+ ksft_print_dbg_msg("Calling futex_waitv on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
res = futex_waitv(&waitv, 1, 0, &to, CLOCK_MONOTONIC);
if (!res || errno != EWOULDBLOCK) {
ksft_test_result_fail("futex_waitv returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_waitv\n");
}
-
- ksft_print_cnts();
- return ret;
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/futex_waitv.c b/tools/testing/selftests/futex/functional/futex_waitv.c
index a94337f677e1..d60876164d4b 100644
--- a/tools/testing/selftests/futex/functional/futex_waitv.c
+++ b/tools/testing/selftests/futex/functional/futex_waitv.c
@@ -15,25 +15,16 @@
#include <pthread.h>
#include <stdint.h>
#include <sys/shm.h>
+
#include "futextest.h"
#include "futex2test.h"
-#include "logging.h"
+#include "kselftest_harness.h"
-#define TEST_NAME "futex-wait"
#define WAKE_WAIT_US 10000
#define NR_FUTEXES 30
static struct futex_waitv waitv[NR_FUTEXES];
u_int32_t futexes[NR_FUTEXES] = {0};
-void usage(char *prog)
-{
- printf("Usage: %s\n", prog);
- printf(" -c Use color\n");
- printf(" -h Display this help message\n");
- printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
- VQUIET, VCRITICAL, VINFO);
-}
-
void *waiterfn(void *arg)
{
struct timespec to;
@@ -41,7 +32,7 @@ void *waiterfn(void *arg)
/* setting absolute timeout for futex2 */
if (clock_gettime(CLOCK_MONOTONIC, &to))
- error("gettime64 failed\n", errno);
+ ksft_exit_fail_msg("gettime64 failed\n");
to.tv_sec++;
@@ -57,34 +48,10 @@ void *waiterfn(void *arg)
return NULL;
}
-int main(int argc, char *argv[])
+TEST(private_waitv)
{
pthread_t waiter;
- int res, ret = RET_PASS;
- struct timespec to;
- int c, i;
-
- while ((c = getopt(argc, argv, "cht:v:")) != -1) {
- switch (c) {
- case 'c':
- log_color(1);
- break;
- case 'h':
- usage(basename(argv[0]));
- exit(0);
- case 'v':
- log_verbosity(atoi(optarg));
- break;
- default:
- usage(basename(argv[0]));
- exit(1);
- }
- }
-
- ksft_print_header();
- ksft_set_plan(7);
- ksft_print_msg("%s: Test FUTEX_WAITV\n",
- basename(argv[0]));
+ int res, i;
for (i = 0; i < NR_FUTEXES; i++) {
waitv[i].uaddr = (uintptr_t)&futexes[i];
@@ -95,7 +62,7 @@ int main(int argc, char *argv[])
/* Private waitv */
if (pthread_create(&waiter, NULL, waiterfn, NULL))
- error("pthread_create failed\n", errno);
+ ksft_exit_fail_msg("pthread_create failed\n");
usleep(WAKE_WAIT_US);
@@ -104,10 +71,15 @@ int main(int argc, char *argv[])
ksft_test_result_fail("futex_wake private returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_waitv private\n");
}
+}
+
+TEST(shared_waitv)
+{
+ pthread_t waiter;
+ int res, i;
/* Shared waitv */
for (i = 0; i < NR_FUTEXES; i++) {
@@ -128,7 +100,7 @@ int main(int argc, char *argv[])
}
if (pthread_create(&waiter, NULL, waiterfn, NULL))
- error("pthread_create failed\n", errno);
+ ksft_exit_fail_msg("pthread_create failed\n");
usleep(WAKE_WAIT_US);
@@ -137,19 +109,24 @@ int main(int argc, char *argv[])
ksft_test_result_fail("futex_wake shared returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_waitv shared\n");
}
for (i = 0; i < NR_FUTEXES; i++)
shmdt(u64_to_ptr(waitv[i].uaddr));
+}
+
+TEST(invalid_flag)
+{
+ struct timespec to;
+ int res;
/* Testing a waiter without FUTEX_32 flag */
waitv[0].flags = FUTEX_PRIVATE_FLAG;
if (clock_gettime(CLOCK_MONOTONIC, &to))
- error("gettime64 failed\n", errno);
+ ksft_exit_fail_msg("gettime64 failed\n");
to.tv_sec++;
@@ -158,17 +135,22 @@ int main(int argc, char *argv[])
ksft_test_result_fail("futex_waitv private returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_waitv without FUTEX_32\n");
}
+}
+
+TEST(unaligned_address)
+{
+ struct timespec to;
+ int res;
/* Testing a waiter with an unaligned address */
waitv[0].flags = FUTEX_PRIVATE_FLAG | FUTEX_32;
waitv[0].uaddr = 1;
if (clock_gettime(CLOCK_MONOTONIC, &to))
- error("gettime64 failed\n", errno);
+ ksft_exit_fail_msg("gettime64 failed\n");
to.tv_sec++;
@@ -177,16 +159,21 @@ int main(int argc, char *argv[])
ksft_test_result_fail("futex_wake private returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_waitv with an unaligned address\n");
}
+}
+
+TEST(null_address)
+{
+ struct timespec to;
+ int res;
/* Testing a NULL address for waiters.uaddr */
waitv[0].uaddr = 0x00000000;
if (clock_gettime(CLOCK_MONOTONIC, &to))
- error("gettime64 failed\n", errno);
+ ksft_exit_fail_msg("gettime64 failed\n");
to.tv_sec++;
@@ -195,14 +182,13 @@ int main(int argc, char *argv[])
ksft_test_result_fail("futex_waitv private returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_waitv NULL address in waitv.uaddr\n");
}
/* Testing a NULL address for *waiters */
if (clock_gettime(CLOCK_MONOTONIC, &to))
- error("gettime64 failed\n", errno);
+ ksft_exit_fail_msg("gettime64 failed\n");
to.tv_sec++;
@@ -211,14 +197,19 @@ int main(int argc, char *argv[])
ksft_test_result_fail("futex_waitv private returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_waitv NULL address in *waiters\n");
}
+}
+
+TEST(invalid_clockid)
+{
+ struct timespec to;
+ int res;
/* Testing an invalid clockid */
if (clock_gettime(CLOCK_MONOTONIC, &to))
- error("gettime64 failed\n", errno);
+ ksft_exit_fail_msg("gettime64 failed\n");
to.tv_sec++;
@@ -227,11 +218,9 @@ int main(int argc, char *argv[])
ksft_test_result_fail("futex_waitv private returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
- ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_waitv invalid clockid\n");
}
-
- ksft_print_cnts();
- return ret;
}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
index 81739849f299..e88545c06d57 100755
--- a/tools/testing/selftests/futex/functional/run.sh
+++ b/tools/testing/selftests/futex/functional/run.sh
@@ -18,74 +18,36 @@
#
###############################################################################
-# Test for a color capable console
-if [ -z "$USE_COLOR" ]; then
- tput setf 7 || tput setaf 7
- if [ $? -eq 0 ]; then
- USE_COLOR=1
- tput sgr0
- fi
-fi
-if [ "$USE_COLOR" -eq 1 ]; then
- COLOR="-c"
-fi
-
-
echo
-# requeue pi testing
-# without timeouts
-./futex_requeue_pi $COLOR
-./futex_requeue_pi $COLOR -b
-./futex_requeue_pi $COLOR -b -l
-./futex_requeue_pi $COLOR -b -o
-./futex_requeue_pi $COLOR -l
-./futex_requeue_pi $COLOR -o
-# with timeouts
-./futex_requeue_pi $COLOR -b -l -t 5000
-./futex_requeue_pi $COLOR -l -t 5000
-./futex_requeue_pi $COLOR -b -l -t 500000
-./futex_requeue_pi $COLOR -l -t 500000
-./futex_requeue_pi $COLOR -b -t 5000
-./futex_requeue_pi $COLOR -t 5000
-./futex_requeue_pi $COLOR -b -t 500000
-./futex_requeue_pi $COLOR -t 500000
-./futex_requeue_pi $COLOR -b -o -t 5000
-./futex_requeue_pi $COLOR -l -t 5000
-./futex_requeue_pi $COLOR -b -o -t 500000
-./futex_requeue_pi $COLOR -l -t 500000
-# with long timeout
-./futex_requeue_pi $COLOR -b -l -t 2000000000
-./futex_requeue_pi $COLOR -l -t 2000000000
-
+./futex_requeue_pi
echo
-./futex_requeue_pi_mismatched_ops $COLOR
+./futex_requeue_pi_mismatched_ops
echo
-./futex_requeue_pi_signal_restart $COLOR
+./futex_requeue_pi_signal_restart
echo
-./futex_wait_timeout $COLOR
+./futex_wait_timeout
echo
-./futex_wait_wouldblock $COLOR
+./futex_wait_wouldblock
echo
-./futex_wait_uninitialized_heap $COLOR
-./futex_wait_private_mapped_file $COLOR
+./futex_wait_uninitialized_heap
+./futex_wait_private_mapped_file
echo
-./futex_wait $COLOR
+./futex_wait
echo
-./futex_requeue $COLOR
+./futex_requeue
echo
-./futex_waitv $COLOR
+./futex_waitv
echo
-./futex_priv_hash $COLOR
-./futex_priv_hash -g $COLOR
+./futex_priv_hash
echo
-./futex_numa_mpol $COLOR
+./futex_numa_mpol
diff --git a/tools/testing/selftests/futex/include/futex2test.h b/tools/testing/selftests/futex/include/futex2test.h
index ea79662405bc..1f625b39948a 100644
--- a/tools/testing/selftests/futex/include/futex2test.h
+++ b/tools/testing/selftests/futex/include/futex2test.h
@@ -4,6 +4,7 @@
*
* Copyright 2021 Collabora Ltd.
*/
+#include <linux/time_types.h>
#include <stdint.h>
#define u64_to_ptr(x) ((void *)(uintptr_t)(x))
@@ -65,7 +66,12 @@ struct futex32_numa {
static inline int futex_waitv(volatile struct futex_waitv *waiters, unsigned long nr_waiters,
unsigned long flags, struct timespec *timo, clockid_t clockid)
{
- return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo, clockid);
+ struct __kernel_timespec ts = {
+ .tv_sec = timo->tv_sec,
+ .tv_nsec = timo->tv_nsec,
+ };
+
+ return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, &ts, clockid);
}
/*
diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h
index ddbcfc9b7bac..3d48e9789d9f 100644
--- a/tools/testing/selftests/futex/include/futextest.h
+++ b/tools/testing/selftests/futex/include/futextest.h
@@ -47,6 +47,28 @@ typedef volatile u_int32_t futex_t;
FUTEX_PRIVATE_FLAG)
#endif
+/*
+ * SYS_futex is expected from system C library, in glibc some 32-bit
+ * architectures (e.g. RV32) are using 64-bit time_t, therefore it doesn't have
+ * SYS_futex defined but just SYS_futex_time64. Define SYS_futex as
+ * SYS_futex_time64 in this situation to ensure the compilation and the
+ * compatibility.
+ */
+#if !defined(SYS_futex) && defined(SYS_futex_time64)
+#define SYS_futex SYS_futex_time64
+#endif
+
+/*
+ * On 32bit systems if we use "-D_FILE_OFFSET_BITS=64 -D_TIME_BITS=64" or if
+ * we are using a newer compiler then the size of the timestamps will be 64bit,
+ * however, the SYS_futex will still point to the 32bit futex system call.
+ */
+#if __SIZEOF_POINTER__ == 4 && defined(SYS_futex_time64) && \
+ defined(_TIME_BITS) && _TIME_BITS == 64
+# undef SYS_futex
+# define SYS_futex SYS_futex_time64
+#endif
+
/**
* futex() - SYS_futex syscall wrapper
* @uaddr: address of first futex
diff --git a/tools/testing/selftests/futex/include/logging.h b/tools/testing/selftests/futex/include/logging.h
deleted file mode 100644
index 874c69ce5cce..000000000000
--- a/tools/testing/selftests/futex/include/logging.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/******************************************************************************
- *
- * Copyright © International Business Machines Corp., 2009
- *
- * DESCRIPTION
- * Glibc independent futex library for testing kernel functionality.
- *
- * AUTHOR
- * Darren Hart <dvhart@linux.intel.com>
- *
- * HISTORY
- * 2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
- *
- *****************************************************************************/
-
-#ifndef _LOGGING_H
-#define _LOGGING_H
-
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <linux/futex.h>
-#include "kselftest.h"
-
-/*
- * Define PASS, ERROR, and FAIL strings with and without color escape
- * sequences, default to no color.
- */
-#define ESC 0x1B, '['
-#define BRIGHT '1'
-#define GREEN '3', '2'
-#define YELLOW '3', '3'
-#define RED '3', '1'
-#define ESCEND 'm'
-#define BRIGHT_GREEN ESC, BRIGHT, ';', GREEN, ESCEND
-#define BRIGHT_YELLOW ESC, BRIGHT, ';', YELLOW, ESCEND
-#define BRIGHT_RED ESC, BRIGHT, ';', RED, ESCEND
-#define RESET_COLOR ESC, '0', 'm'
-static const char PASS_COLOR[] = {BRIGHT_GREEN, ' ', 'P', 'A', 'S', 'S',
- RESET_COLOR, 0};
-static const char ERROR_COLOR[] = {BRIGHT_YELLOW, 'E', 'R', 'R', 'O', 'R',
- RESET_COLOR, 0};
-static const char FAIL_COLOR[] = {BRIGHT_RED, ' ', 'F', 'A', 'I', 'L',
- RESET_COLOR, 0};
-static const char INFO_NORMAL[] = " INFO";
-static const char PASS_NORMAL[] = " PASS";
-static const char ERROR_NORMAL[] = "ERROR";
-static const char FAIL_NORMAL[] = " FAIL";
-const char *INFO = INFO_NORMAL;
-const char *PASS = PASS_NORMAL;
-const char *ERROR = ERROR_NORMAL;
-const char *FAIL = FAIL_NORMAL;
-
-/* Verbosity setting for INFO messages */
-#define VQUIET 0
-#define VCRITICAL 1
-#define VINFO 2
-#define VMAX VINFO
-int _verbose = VCRITICAL;
-
-/* Functional test return codes */
-#define RET_PASS 0
-#define RET_ERROR -1
-#define RET_FAIL -2
-
-/**
- * log_color() - Use colored output for PASS, ERROR, and FAIL strings
- * @use_color: use color (1) or not (0)
- */
-void log_color(int use_color)
-{
- if (use_color) {
- PASS = PASS_COLOR;
- ERROR = ERROR_COLOR;
- FAIL = FAIL_COLOR;
- } else {
- PASS = PASS_NORMAL;
- ERROR = ERROR_NORMAL;
- FAIL = FAIL_NORMAL;
- }
-}
-
-/**
- * log_verbosity() - Set verbosity of test output
- * @verbose: Enable (1) verbose output or not (0)
- *
- * Currently setting verbose=1 will enable INFO messages and 0 will disable
- * them. FAIL and ERROR messages are always displayed.
- */
-void log_verbosity(int level)
-{
- if (level > VMAX)
- level = VMAX;
- else if (level < 0)
- level = 0;
- _verbose = level;
-}
-
-/**
- * print_result() - Print standard PASS | ERROR | FAIL results
- * @ret: the return value to be considered: 0 | RET_ERROR | RET_FAIL
- *
- * print_result() is primarily intended for functional tests.
- */
-void print_result(const char *test_name, int ret)
-{
- switch (ret) {
- case RET_PASS:
- ksft_test_result_pass("%s\n", test_name);
- ksft_print_cnts();
- return;
- case RET_ERROR:
- ksft_test_result_error("%s\n", test_name);
- ksft_print_cnts();
- return;
- case RET_FAIL:
- ksft_test_result_fail("%s\n", test_name);
- ksft_print_cnts();
- return;
- }
-}
-
-/* log level macros */
-#define info(message, vargs...) \
-do { \
- if (_verbose >= VINFO) \
- fprintf(stderr, "\t%s: "message, INFO, ##vargs); \
-} while (0)
-
-#define error(message, err, args...) \
-do { \
- if (_verbose >= VCRITICAL) {\
- if (err) \
- fprintf(stderr, "\t%s: %s: "message, \
- ERROR, strerror(err), ##args); \
- else \
- fprintf(stderr, "\t%s: "message, ERROR, ##args); \
- } \
-} while (0)
-
-#define fail(message, args...) \
-do { \
- if (_verbose >= VCRITICAL) \
- fprintf(stderr, "\t%s: "message, FAIL, ##args); \
-} while (0)
-
-#endif
diff --git a/tools/testing/selftests/hid/config.common b/tools/testing/selftests/hid/config.common
index b1f40857307d..38c51158adf8 100644
--- a/tools/testing/selftests/hid/config.common
+++ b/tools/testing/selftests/hid/config.common
@@ -135,6 +135,7 @@ CONFIG_NET_EMATCH=y
CONFIG_NETFILTER_NETLINK_LOG=y
CONFIG_NETFILTER_NETLINK_QUEUE=y
CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XTABLES_LEGACY=y
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
diff --git a/tools/testing/selftests/hid/hid_common.h b/tools/testing/selftests/hid/hid_common.h
index f77f69c6657d..e3b267446fa0 100644
--- a/tools/testing/selftests/hid/hid_common.h
+++ b/tools/testing/selftests/hid/hid_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2022-2024 Red Hat */
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <fcntl.h>
#include <fnmatch.h>
@@ -230,6 +230,12 @@ static int uhid_event(struct __test_metadata *_metadata, int fd)
break;
case UHID_SET_REPORT:
UHID_LOG("UHID_SET_REPORT from uhid-dev");
+
+ answer.type = UHID_SET_REPORT_REPLY;
+ answer.u.set_report_reply.id = ev.u.set_report.id;
+ answer.u.set_report_reply.err = 0; /* success */
+
+ uhid_write(_metadata, fd, &answer);
break;
default:
TH_LOG("Invalid event from uhid-dev: %u", ev.type);
diff --git a/tools/testing/selftests/hid/hidraw.c b/tools/testing/selftests/hid/hidraw.c
index 821db37ba4bb..d625772f8b7c 100644
--- a/tools/testing/selftests/hid/hidraw.c
+++ b/tools/testing/selftests/hid/hidraw.c
@@ -2,6 +2,9 @@
/* Copyright (c) 2022-2024 Red Hat */
#include "hid_common.h"
+#include <linux/input.h>
+#include <string.h>
+#include <sys/ioctl.h>
/* for older kernels */
#ifndef HIDIOCREVOKE
@@ -215,6 +218,476 @@ TEST_F(hidraw, write_event_revoked)
pthread_mutex_unlock(&uhid_output_mtx);
}
+/*
+ * Test HIDIOCGRDESCSIZE ioctl to get report descriptor size
+ */
+TEST_F(hidraw, ioctl_rdescsize)
+{
+ int desc_size = 0;
+ int err;
+
+ /* call HIDIOCGRDESCSIZE ioctl */
+ err = ioctl(self->hidraw_fd, HIDIOCGRDESCSIZE, &desc_size);
+ ASSERT_EQ(err, 0) TH_LOG("HIDIOCGRDESCSIZE ioctl failed");
+
+ /* verify the size matches our test report descriptor */
+ ASSERT_EQ(desc_size, sizeof(rdesc))
+ TH_LOG("expected size %zu, got %d", sizeof(rdesc), desc_size);
+}
+
+/*
+ * Test HIDIOCGRDESC ioctl to get report descriptor data
+ */
+TEST_F(hidraw, ioctl_rdesc)
+{
+ struct hidraw_report_descriptor desc;
+ int err;
+
+ /* get the full report descriptor */
+ desc.size = sizeof(rdesc);
+ err = ioctl(self->hidraw_fd, HIDIOCGRDESC, &desc);
+ ASSERT_EQ(err, 0) TH_LOG("HIDIOCGRDESC ioctl failed");
+
+ /* verify the descriptor data matches our test descriptor */
+ ASSERT_EQ(memcmp(desc.value, rdesc, sizeof(rdesc)), 0)
+ TH_LOG("report descriptor data mismatch");
+}
+
+/*
+ * Test HIDIOCGRDESC ioctl with smaller buffer size
+ */
+TEST_F(hidraw, ioctl_rdesc_small_buffer)
+{
+ struct hidraw_report_descriptor desc;
+ int err;
+ size_t small_size = sizeof(rdesc) / 2; /* request half the descriptor size */
+
+ /* get partial report descriptor */
+ desc.size = small_size;
+ err = ioctl(self->hidraw_fd, HIDIOCGRDESC, &desc);
+ ASSERT_EQ(err, 0) TH_LOG("HIDIOCGRDESC ioctl failed with small buffer");
+
+ /* verify we got the first part of the descriptor */
+ ASSERT_EQ(memcmp(desc.value, rdesc, small_size), 0)
+ TH_LOG("partial report descriptor data mismatch");
+}
+
+/*
+ * Test HIDIOCGRAWINFO ioctl to get device information
+ */
+TEST_F(hidraw, ioctl_rawinfo)
+{
+ struct hidraw_devinfo devinfo;
+ int err;
+
+ /* get device info */
+ err = ioctl(self->hidraw_fd, HIDIOCGRAWINFO, &devinfo);
+ ASSERT_EQ(err, 0) TH_LOG("HIDIOCGRAWINFO ioctl failed");
+
+ /* verify device info matches our test setup */
+ ASSERT_EQ(devinfo.bustype, BUS_USB)
+ TH_LOG("expected bustype 0x03, got 0x%x", devinfo.bustype);
+ ASSERT_EQ(devinfo.vendor, 0x0001)
+ TH_LOG("expected vendor 0x0001, got 0x%x", devinfo.vendor);
+ ASSERT_EQ(devinfo.product, 0x0a37)
+ TH_LOG("expected product 0x0a37, got 0x%x", devinfo.product);
+}
+
+/*
+ * Test HIDIOCGFEATURE ioctl to get feature report
+ */
+TEST_F(hidraw, ioctl_gfeature)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* set report ID 1 in first byte */
+ buf[0] = 1;
+
+ /* get feature report */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_EQ(err, sizeof(feature_data)) TH_LOG("HIDIOCGFEATURE ioctl failed, got %d", err);
+
+ /* verify we got the expected feature data */
+ ASSERT_EQ(buf[0], feature_data[0])
+ TH_LOG("expected feature_data[0] = %d, got %d", feature_data[0], buf[0]);
+ ASSERT_EQ(buf[1], feature_data[1])
+ TH_LOG("expected feature_data[1] = %d, got %d", feature_data[1], buf[1]);
+}
+
+/*
+ * Test HIDIOCGFEATURE ioctl with invalid report ID
+ */
+TEST_F(hidraw, ioctl_gfeature_invalid)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* set invalid report ID (not 1) */
+ buf[0] = 2;
+
+ /* try to get feature report */
+ err = ioctl(self->hidraw_fd, HIDIOCGFEATURE(sizeof(buf)), buf);
+ ASSERT_LT(err, 0) TH_LOG("HIDIOCGFEATURE should have failed with invalid report ID");
+ ASSERT_EQ(errno, EIO) TH_LOG("expected EIO, got errno %d", errno);
+}
+
+/*
+ * Test ioctl with incorrect nr bits
+ */
+TEST_F(hidraw, ioctl_invalid_nr)
+{
+ char buf[256] = {0};
+ int err;
+ unsigned int bad_cmd;
+
+ /*
+ * craft an ioctl command with wrong _IOC_NR bits
+ */
+ bad_cmd = _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x00, sizeof(buf)); /* 0 is not valid */
+
+ /* test the ioctl */
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("ioctl read-write with wrong _IOC_NR (0) should have failed");
+ ASSERT_EQ(errno, ENOTTY)
+ TH_LOG("expected ENOTTY for wrong read-write _IOC_NR (0), got errno %d", errno);
+
+ /*
+ * craft an ioctl command with wrong _IOC_NR bits
+ */
+ bad_cmd = _IOC(_IOC_READ, 'H', 0x00, sizeof(buf)); /* 0 is not valid */
+
+ /* test the ioctl */
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("ioctl read-only with wrong _IOC_NR (0) should have failed");
+ ASSERT_EQ(errno, ENOTTY)
+ TH_LOG("expected ENOTTY for wrong read-only _IOC_NR (0), got errno %d", errno);
+
+ /* also test with bigger number */
+ bad_cmd = _IOC(_IOC_READ, 'H', 0x42, sizeof(buf)); /* 0x42 is not valid as well */
+
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("ioctl read-only with wrong _IOC_NR (0x42) should have failed");
+ ASSERT_EQ(errno, ENOTTY)
+ TH_LOG("expected ENOTTY for wrong read-only _IOC_NR (0x42), got errno %d", errno);
+
+ /* also test with bigger number: 0x42 is not valid as well */
+ bad_cmd = _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x42, sizeof(buf));
+
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("ioctl read-write with wrong _IOC_NR (0x42) should have failed");
+ ASSERT_EQ(errno, ENOTTY)
+ TH_LOG("expected ENOTTY for wrong read-write _IOC_NR (0x42), got errno %d", errno);
+}
+
+/*
+ * Test ioctl with incorrect type bits
+ */
+TEST_F(hidraw, ioctl_invalid_type)
+{
+ char buf[256] = {0};
+ int err;
+ unsigned int bad_cmd;
+
+ /*
+ * craft an ioctl command with wrong _IOC_TYPE bits
+ */
+ bad_cmd = _IOC(_IOC_WRITE|_IOC_READ, 'I', 0x01, sizeof(buf)); /* 'I' should be 'H' */
+
+ /* test the ioctl */
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("ioctl with wrong _IOC_TYPE (I) should have failed");
+ ASSERT_EQ(errno, EINVAL) TH_LOG("expected EINVAL for wrong _IOC_NR, got errno %d", errno);
+}
+
+/*
+ * Test HIDIOCGFEATURE ioctl with incorrect _IOC_DIR bits
+ */
+TEST_F(hidraw, ioctl_gfeature_invalid_dir)
+{
+ __u8 buf[10] = {0};
+ int err;
+ unsigned int bad_cmd;
+
+ /* set report ID 1 in first byte */
+ buf[0] = 1;
+
+ /*
+ * craft an ioctl command with wrong _IOC_DIR bits
+ * HIDIOCGFEATURE should have _IOC_WRITE|_IOC_READ, let's use only _IOC_WRITE
+ */
+ bad_cmd = _IOC(_IOC_WRITE, 'H', 0x07, sizeof(buf)); /* should be _IOC_WRITE|_IOC_READ */
+
+ /* try to get feature report with wrong direction bits */
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("HIDIOCGFEATURE with wrong _IOC_DIR should have failed");
+ ASSERT_EQ(errno, EINVAL) TH_LOG("expected EINVAL for wrong _IOC_DIR, got errno %d", errno);
+
+ /* also test with only _IOC_READ */
+ bad_cmd = _IOC(_IOC_READ, 'H', 0x07, sizeof(buf)); /* should be _IOC_WRITE|_IOC_READ */
+
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("HIDIOCGFEATURE with wrong _IOC_DIR should have failed");
+ ASSERT_EQ(errno, EINVAL) TH_LOG("expected EINVAL for wrong _IOC_DIR, got errno %d", errno);
+}
+
+/*
+ * Test read-only ioctl with incorrect _IOC_DIR bits
+ */
+TEST_F(hidraw, ioctl_readonly_invalid_dir)
+{
+ char buf[256] = {0};
+ int err;
+ unsigned int bad_cmd;
+
+ /*
+ * craft an ioctl command with wrong _IOC_DIR bits
+ * HIDIOCGRAWNAME should have _IOC_READ, let's use _IOC_WRITE
+ */
+ bad_cmd = _IOC(_IOC_WRITE, 'H', 0x04, sizeof(buf)); /* should be _IOC_READ */
+
+ /* try to get device name with wrong direction bits */
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("HIDIOCGRAWNAME with wrong _IOC_DIR should have failed");
+ ASSERT_EQ(errno, EINVAL) TH_LOG("expected EINVAL for wrong _IOC_DIR, got errno %d", errno);
+
+ /* also test with _IOC_WRITE|_IOC_READ */
+ bad_cmd = _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x04, sizeof(buf)); /* should be only _IOC_READ */
+
+ err = ioctl(self->hidraw_fd, bad_cmd, buf);
+ ASSERT_LT(err, 0) TH_LOG("HIDIOCGRAWNAME with wrong _IOC_DIR should have failed");
+ ASSERT_EQ(errno, EINVAL) TH_LOG("expected EINVAL for wrong _IOC_DIR, got errno %d", errno);
+}
+
+/*
+ * Test HIDIOCSFEATURE ioctl to set feature report
+ */
+TEST_F(hidraw, ioctl_sfeature)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* prepare feature report data */
+ buf[0] = 1; /* report ID */
+ buf[1] = 0x42;
+ buf[2] = 0x24;
+
+ /* set feature report */
+ err = ioctl(self->hidraw_fd, HIDIOCSFEATURE(3), buf);
+ ASSERT_EQ(err, 3) TH_LOG("HIDIOCSFEATURE ioctl failed, got %d", err);
+
+ /*
+ * Note: The uhid mock doesn't validate the set report data,
+ * so we just verify the ioctl succeeds
+ */
+}
+
+/*
+ * Test HIDIOCGINPUT ioctl to get input report
+ */
+TEST_F(hidraw, ioctl_ginput)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* set report ID 1 in first byte */
+ buf[0] = 1;
+
+ /* get input report */
+ err = ioctl(self->hidraw_fd, HIDIOCGINPUT(sizeof(buf)), buf);
+ ASSERT_EQ(err, sizeof(feature_data)) TH_LOG("HIDIOCGINPUT ioctl failed, got %d", err);
+
+ /* verify we got the expected input data */
+ ASSERT_EQ(buf[0], feature_data[0])
+ TH_LOG("expected feature_data[0] = %d, got %d", feature_data[0], buf[0]);
+ ASSERT_EQ(buf[1], feature_data[1])
+ TH_LOG("expected feature_data[1] = %d, got %d", feature_data[1], buf[1]);
+}
+
+/*
+ * Test HIDIOCGINPUT ioctl with invalid report ID
+ */
+TEST_F(hidraw, ioctl_ginput_invalid)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* set invalid report ID (not 1) */
+ buf[0] = 2;
+
+ /* try to get input report */
+ err = ioctl(self->hidraw_fd, HIDIOCGINPUT(sizeof(buf)), buf);
+ ASSERT_LT(err, 0) TH_LOG("HIDIOCGINPUT should have failed with invalid report ID");
+ ASSERT_EQ(errno, EIO) TH_LOG("expected EIO, got errno %d", errno);
+}
+
+/*
+ * Test HIDIOCSINPUT ioctl to set input report
+ */
+TEST_F(hidraw, ioctl_sinput)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* prepare input report data */
+ buf[0] = 1; /* report ID */
+ buf[1] = 0x55;
+ buf[2] = 0xAA;
+
+ /* set input report */
+ err = ioctl(self->hidraw_fd, HIDIOCSINPUT(3), buf);
+ ASSERT_EQ(err, 3) TH_LOG("HIDIOCSINPUT ioctl failed, got %d", err);
+
+ /*
+ * Note: The uhid mock doesn't validate the set report data,
+ * so we just verify the ioctl succeeds
+ */
+}
+
+/*
+ * Test HIDIOCGOUTPUT ioctl to get output report
+ */
+TEST_F(hidraw, ioctl_goutput)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* set report ID 1 in first byte */
+ buf[0] = 1;
+
+ /* get output report */
+ err = ioctl(self->hidraw_fd, HIDIOCGOUTPUT(sizeof(buf)), buf);
+ ASSERT_EQ(err, sizeof(feature_data)) TH_LOG("HIDIOCGOUTPUT ioctl failed, got %d", err);
+
+ /* verify we got the expected output data */
+ ASSERT_EQ(buf[0], feature_data[0])
+ TH_LOG("expected feature_data[0] = %d, got %d", feature_data[0], buf[0]);
+ ASSERT_EQ(buf[1], feature_data[1])
+ TH_LOG("expected feature_data[1] = %d, got %d", feature_data[1], buf[1]);
+}
+
+/*
+ * Test HIDIOCGOUTPUT ioctl with invalid report ID
+ */
+TEST_F(hidraw, ioctl_goutput_invalid)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* set invalid report ID (not 1) */
+ buf[0] = 2;
+
+ /* try to get output report */
+ err = ioctl(self->hidraw_fd, HIDIOCGOUTPUT(sizeof(buf)), buf);
+ ASSERT_LT(err, 0) TH_LOG("HIDIOCGOUTPUT should have failed with invalid report ID");
+ ASSERT_EQ(errno, EIO) TH_LOG("expected EIO, got errno %d", errno);
+}
+
+/*
+ * Test HIDIOCSOUTPUT ioctl to set output report
+ */
+TEST_F(hidraw, ioctl_soutput)
+{
+ __u8 buf[10] = {0};
+ int err;
+
+ /* prepare output report data */
+ buf[0] = 1; /* report ID */
+ buf[1] = 0x33;
+ buf[2] = 0xCC;
+
+ /* set output report */
+ err = ioctl(self->hidraw_fd, HIDIOCSOUTPUT(3), buf);
+ ASSERT_EQ(err, 3) TH_LOG("HIDIOCSOUTPUT ioctl failed, got %d", err);
+
+ /*
+ * Note: The uhid mock doesn't validate the set report data,
+ * so we just verify the ioctl succeeds
+ */
+}
+
+/*
+ * Test HIDIOCGRAWNAME ioctl to get device name string
+ */
+TEST_F(hidraw, ioctl_rawname)
+{
+ char name[256] = {0};
+ char expected_name[64];
+ int err;
+
+ /* get device name */
+ err = ioctl(self->hidraw_fd, HIDIOCGRAWNAME(sizeof(name)), name);
+ ASSERT_GT(err, 0) TH_LOG("HIDIOCGRAWNAME ioctl failed, got %d", err);
+
+ /* construct expected name based on device id */
+ snprintf(expected_name, sizeof(expected_name), "test-uhid-device-%d", self->hid.dev_id);
+
+ /* verify the name matches expected pattern */
+ ASSERT_EQ(strcmp(name, expected_name), 0)
+ TH_LOG("expected name '%s', got '%s'", expected_name, name);
+}
+
+/*
+ * Test HIDIOCGRAWPHYS ioctl to get device physical address string
+ */
+TEST_F(hidraw, ioctl_rawphys)
+{
+ char phys[256] = {0};
+ char expected_phys[64];
+ int err;
+
+ /* get device physical address */
+ err = ioctl(self->hidraw_fd, HIDIOCGRAWPHYS(sizeof(phys)), phys);
+ ASSERT_GT(err, 0) TH_LOG("HIDIOCGRAWPHYS ioctl failed, got %d", err);
+
+ /* construct expected phys based on device id */
+ snprintf(expected_phys, sizeof(expected_phys), "%d", self->hid.dev_id);
+
+ /* verify the phys matches expected value */
+ ASSERT_EQ(strcmp(phys, expected_phys), 0)
+ TH_LOG("expected phys '%s', got '%s'", expected_phys, phys);
+}
+
+/*
+ * Test HIDIOCGRAWUNIQ ioctl to get device unique identifier string
+ */
+TEST_F(hidraw, ioctl_rawuniq)
+{
+ char uniq[256] = {0};
+ int err;
+
+ /* get device unique identifier */
+ err = ioctl(self->hidraw_fd, HIDIOCGRAWUNIQ(sizeof(uniq)), uniq);
+ ASSERT_GE(err, 0) TH_LOG("HIDIOCGRAWUNIQ ioctl failed, got %d", err);
+
+ /* uniq is typically empty in our test setup */
+ ASSERT_EQ(strlen(uniq), 0) TH_LOG("expected empty uniq, got '%s'", uniq);
+}
+
+/*
+ * Test device string ioctls with small buffer sizes
+ */
+TEST_F(hidraw, ioctl_strings_small_buffer)
+{
+ char small_buf[8] = {0};
+ char expected_name[64];
+ int err;
+
+ /* test HIDIOCGRAWNAME with small buffer */
+ err = ioctl(self->hidraw_fd, HIDIOCGRAWNAME(sizeof(small_buf)), small_buf);
+ ASSERT_EQ(err, sizeof(small_buf))
+ TH_LOG("HIDIOCGRAWNAME with small buffer failed, got %d", err);
+
+ /* construct expected truncated name */
+ snprintf(expected_name, sizeof(expected_name), "test-uhid-device-%d", self->hid.dev_id);
+
+ /* verify we got truncated name (first 8 chars, no null terminator guaranteed) */
+ ASSERT_EQ(strncmp(small_buf, expected_name, sizeof(small_buf)), 0)
+ TH_LOG("expected truncated name to match first %zu chars", sizeof(small_buf));
+
+ /* Note: hidraw driver doesn't guarantee null termination when buffer is too small */
+}
+
int main(int argc, char **argv)
{
return test_harness_run(argc, argv);
diff --git a/tools/testing/selftests/hid/tests/base.py b/tools/testing/selftests/hid/tests/base.py
index 3a465768e507..5175cf235b2f 100644
--- a/tools/testing/selftests/hid/tests/base.py
+++ b/tools/testing/selftests/hid/tests/base.py
@@ -5,6 +5,7 @@
# Copyright (c) 2017 Benjamin Tissoires <benjamin.tissoires@gmail.com>
# Copyright (c) 2017 Red Hat, Inc.
+import dataclasses
import libevdev
import os
import pytest
@@ -145,6 +146,18 @@ class UHIDTestDevice(BaseDevice):
self.name = name
+@dataclasses.dataclass
+class HidBpf:
+ object_name: str
+ has_rdesc_fixup: bool
+
+
+@dataclasses.dataclass
+class KernelModule:
+ driver_name: str
+ module_name: str
+
+
class BaseTestCase:
class TestUhid(object):
syn_event = libevdev.InputEvent(libevdev.EV_SYN.SYN_REPORT) # type: ignore
@@ -155,20 +168,20 @@ class BaseTestCase:
# List of kernel modules to load before starting the test
# if any module is not available (not compiled), the test will skip.
- # Each element is a tuple '(kernel driver name, kernel module)',
- # for example ("playstation", "hid-playstation")
- kernel_modules: List[Tuple[str, str]] = []
+ # Each element is a KernelModule object, for example
+ # KernelModule("playstation", "hid-playstation")
+ kernel_modules: List[KernelModule] = []
# List of in kernel HID-BPF object files to load
# before starting the test
# Any existing pre-loaded HID-BPF module will be removed
# before the ones in this list will be manually loaded.
- # Each Element is a tuple '(hid_bpf_object, rdesc_fixup_present)',
- # for example '("xppen-ArtistPro16Gen2.bpf.o", True)'
- # If 'rdesc_fixup_present' is True, the test needs to wait
+ # Each Element is a HidBpf object, for example
+ # 'HidBpf("xppen-ArtistPro16Gen2.bpf.o", True)'
+ # If 'has_rdesc_fixup' is True, the test needs to wait
# for one unbind and rebind before it can be sure the kernel is
# ready
- hid_bpfs: List[Tuple[str, bool]] = []
+ hid_bpfs: List[HidBpf] = []
def assertInputEventsIn(self, expected_events, effective_events):
effective_events = effective_events.copy()
@@ -232,25 +245,26 @@ class BaseTestCase:
@pytest.fixture()
def load_kernel_module(self):
- for kernel_driver, kernel_module in self.kernel_modules:
- self._load_kernel_module(kernel_driver, kernel_module)
+ for k in self.kernel_modules:
+ self._load_kernel_module(k.driver_name, k.module_name)
yield
def load_hid_bpfs(self):
+ # this function will only work when run in the kernel tree
script_dir = Path(os.path.dirname(os.path.realpath(__file__)))
root_dir = (script_dir / "../../../../..").resolve()
bpf_dir = root_dir / "drivers/hid/bpf/progs"
+ if not bpf_dir.exists():
+ pytest.skip("looks like we are not in the kernel tree, skipping")
+
udev_hid_bpf = shutil.which("udev-hid-bpf")
if not udev_hid_bpf:
pytest.skip("udev-hid-bpf not found in $PATH, skipping")
- wait = False
- for _, rdesc_fixup in self.hid_bpfs:
- if rdesc_fixup:
- wait = True
+ wait = any(b.has_rdesc_fixup for b in self.hid_bpfs)
- for hid_bpf, _ in self.hid_bpfs:
+ for hid_bpf in self.hid_bpfs:
# We need to start `udev-hid-bpf` in the background
# and dispatch uhid events in case the kernel needs
# to fetch features on the device
@@ -260,13 +274,13 @@ class BaseTestCase:
"--verbose",
"add",
str(self.uhdev.sys_path),
- str(bpf_dir / hid_bpf),
+ str(bpf_dir / hid_bpf.object_name),
],
)
while process.poll() is None:
self.uhdev.dispatch(1)
- if process.poll() != 0:
+ if process.returncode != 0:
pytest.fail(
f"Couldn't insert hid-bpf program '{hid_bpf}', marking the test as failed"
)
diff --git a/tools/testing/selftests/hid/tests/base_device.py b/tools/testing/selftests/hid/tests/base_device.py
index e0515be97f83..59465c58d94d 100644
--- a/tools/testing/selftests/hid/tests/base_device.py
+++ b/tools/testing/selftests/hid/tests/base_device.py
@@ -18,10 +18,12 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import dataclasses
import fcntl
import functools
import libevdev
import os
+import threading
try:
import pyudev
@@ -104,6 +106,12 @@ class PowerSupply(object):
return self._type.str_value
+@dataclasses.dataclass
+class HidReadiness:
+ is_ready: bool = False
+ count: int = 0
+
+
class HIDIsReady(object):
"""
Companion class that binds to a kernel mechanism
@@ -115,18 +123,18 @@ class HIDIsReady(object):
def __init__(self: "HIDIsReady", uhid: UHIDDevice) -> None:
self.uhid = uhid
- def is_ready(self: "HIDIsReady") -> bool:
+ def is_ready(self: "HIDIsReady") -> HidReadiness:
"""
Overwrite in subclasses: should return True or False whether
the attached uhid device is ready or not.
"""
- return False
+ return HidReadiness()
class UdevHIDIsReady(HIDIsReady):
_pyudev_context: ClassVar[Optional[pyudev.Context]] = None
_pyudev_monitor: ClassVar[Optional[pyudev.Monitor]] = None
- _uhid_devices: ClassVar[Dict[int, Tuple[bool, int]]] = {}
+ _uhid_devices: ClassVar[Dict[int, HidReadiness]] = {}
def __init__(self: "UdevHIDIsReady", uhid: UHIDDevice) -> None:
super().__init__(uhid)
@@ -157,18 +165,19 @@ class UdevHIDIsReady(HIDIsReady):
id = int(event.sys_path.strip().split(".")[-1], 16)
- device_ready, count = cls._uhid_devices.get(id, (False, 0))
+ readiness = cls._uhid_devices.setdefault(id, HidReadiness())
ready = event.action == "bind"
- if not device_ready and ready:
- count += 1
- cls._uhid_devices[id] = (ready, count)
+ if not readiness.is_ready and ready:
+ readiness.count += 1
+
+ readiness.is_ready = ready
- def is_ready(self: "UdevHIDIsReady") -> Tuple[bool, int]:
+ def is_ready(self: "UdevHIDIsReady") -> HidReadiness:
try:
return self._uhid_devices[self.uhid.hid_id]
except KeyError:
- return (False, 0)
+ return HidReadiness()
class EvdevMatch(object):
@@ -322,11 +331,11 @@ class BaseDevice(UHIDDevice):
@property
def kernel_is_ready(self: "BaseDevice") -> bool:
- return self._kernel_is_ready.is_ready()[0] and self.started
+ return self._kernel_is_ready.is_ready().is_ready and self.started
@property
def kernel_ready_count(self: "BaseDevice") -> int:
- return self._kernel_is_ready.is_ready()[1]
+ return self._kernel_is_ready.is_ready().count
@property
def input_nodes(self: "BaseDevice") -> List[EvdevDevice]:
@@ -336,10 +345,28 @@ class BaseDevice(UHIDDevice):
if not self.kernel_is_ready or not self.started:
return []
+ # Starting with kernel v6.16, an event is emitted when
+ # userspace opens a kernel device, and for some devices
+ # this translates into a SET_REPORT.
+ # Because EvdevDevice(path) opens every single evdev node
+ # we need to have a separate thread to process the incoming
+ # SET_REPORT or we end up having to wait for the kernel
+ # timeout of 5 seconds.
+ done = False
+
+ def dispatch():
+ while not done:
+ self.dispatch(1)
+
+ t = threading.Thread(target=dispatch)
+ t.start()
+
self._input_nodes = [
EvdevDevice(path)
for path in self.walk_sysfs("input", "input/input*/event*")
]
+ done = True
+ t.join()
return self._input_nodes
def match_evdev_rule(self, application, evdev):
diff --git a/tools/testing/selftests/hid/tests/test_apple_keyboard.py b/tools/testing/selftests/hid/tests/test_apple_keyboard.py
index f81071d46166..0e17588b945c 100644
--- a/tools/testing/selftests/hid/tests/test_apple_keyboard.py
+++ b/tools/testing/selftests/hid/tests/test_apple_keyboard.py
@@ -8,13 +8,14 @@
from .test_keyboard import ArrayKeyboard, TestArrayKeyboard
from hidtools.util import BusType
+from . import base
import libevdev
import logging
logger = logging.getLogger("hidtools.test.apple-keyboard")
-KERNEL_MODULE = ("apple", "hid-apple")
+KERNEL_MODULE = base.KernelModule("apple", "hid-apple")
class KbdData(object):
diff --git a/tools/testing/selftests/hid/tests/test_gamepad.py b/tools/testing/selftests/hid/tests/test_gamepad.py
index 8d5b5ffdae49..612197805931 100644
--- a/tools/testing/selftests/hid/tests/test_gamepad.py
+++ b/tools/testing/selftests/hid/tests/test_gamepad.py
@@ -12,6 +12,7 @@ import pytest
from .base_gamepad import BaseGamepad, JoystickGamepad, AxisMapping
from hidtools.util import BusType
+from .base import HidBpf
import logging
@@ -654,7 +655,7 @@ class TestAsusGamepad(BaseTest.TestGamepad):
class TestRaptorMach2Joystick(BaseTest.TestGamepad):
- hid_bpfs = [("FR-TEC__Raptor-Mach-2.bpf.o", True)]
+ hid_bpfs = [HidBpf("FR-TEC__Raptor-Mach-2.bpf.o", True)]
def create_device(self):
return RaptorMach2Joystick(
diff --git a/tools/testing/selftests/hid/tests/test_ite_keyboard.py b/tools/testing/selftests/hid/tests/test_ite_keyboard.py
index 38550c167bae..f695eaad1648 100644
--- a/tools/testing/selftests/hid/tests/test_ite_keyboard.py
+++ b/tools/testing/selftests/hid/tests/test_ite_keyboard.py
@@ -11,10 +11,11 @@ from hidtools.util import BusType
import libevdev
import logging
+from . import base
logger = logging.getLogger("hidtools.test.ite-keyboard")
-KERNEL_MODULE = ("itetech", "hid_ite")
+KERNEL_MODULE = base.KernelModule("itetech", "hid_ite")
class KbdData(object):
diff --git a/tools/testing/selftests/hid/tests/test_mouse.py b/tools/testing/selftests/hid/tests/test_mouse.py
index 66daf7e5975c..eb4e15a0e53b 100644
--- a/tools/testing/selftests/hid/tests/test_mouse.py
+++ b/tools/testing/selftests/hid/tests/test_mouse.py
@@ -439,6 +439,68 @@ class BadResolutionMultiplierMouse(ResolutionMultiplierMouse):
return 32 # EPIPE
+class BadReportDescriptorMouse(BaseMouse):
+ """
+ This "device" was one autogenerated by syzbot. There are a lot of issues in
+ it, and the most problematic is that it declares features that have no
+ size.
+
+ This leads to report->size being set to 0 and can mess up with usbhid
+ internals. Fortunately, uhid merely passes the incoming buffer, without
+ touching it so a buffer of size 0 will be translated to [] without
+ triggering a kernel oops.
+
+ Because the report descriptor is wrong, no input are created, and we need
+ to tweak a little bit the parameters to make it look correct.
+ """
+
+ # fmt: off
+ report_descriptor = [
+ 0x96, 0x01, 0x00, # Report Count (1) 0
+ 0x06, 0x01, 0x00, # Usage Page (Generic Desktop) 3
+ # 0x03, 0x00, 0x00, 0x00, 0x00, # Ignored by the kernel somehow
+ 0x2a, 0x90, 0xa0, # Usage Maximum (41104) 6
+ 0x27, 0x00, 0x00, 0x00, 0x00, # Logical Maximum (0) 9
+ 0xb3, 0x81, 0x3e, 0x25, 0x03, # Feature (Cnst,Arr,Abs,Vol) 14
+ 0x1b, 0xdd, 0xe8, 0x40, 0x50, # Usage Minimum (1346431197) 19
+ 0x3b, 0x5d, 0x8c, 0x3d, 0xda, # Designator Index 24
+ ]
+ # fmt: on
+
+ def __init__(
+ self, rdesc=report_descriptor, name=None, input_info=(3, 0x045E, 0x07DA)
+ ):
+ super().__init__(rdesc, name, input_info)
+ self.high_resolution_report_called = False
+
+ def get_evdev(self, application=None):
+ assert self._input_nodes is None
+ return (
+ "Ok" # should be a list or None, but both would fail, so abusing the system
+ )
+
+ def next_sync_events(self, application=None):
+ # there are no evdev nodes, so no events
+ return []
+
+ def is_ready(self):
+ # we wait for the SET_REPORT command to come
+ return self.high_resolution_report_called
+
+ def set_report(self, req, rnum, rtype, data):
+ if rtype != self.UHID_FEATURE_REPORT:
+ raise InvalidHIDCommunication(f"Unexpected report type: {rtype}")
+ if rnum != 0x0:
+ raise InvalidHIDCommunication(f"Unexpected report number: {rnum}")
+
+ if len(data) != 1:
+ raise InvalidHIDCommunication(f"Unexpected data: {data}, expected '[0]'")
+
+ self.high_resolution_report_called = True
+
+ return 0
+
+
class ResolutionMultiplierHWheelMouse(TwoWheelMouse):
# fmt: off
report_descriptor = [
@@ -975,3 +1037,11 @@ class TestMiMouse(TestWheelMouse):
# assert below print out the real error
pass
assert remaining == []
+
+
+class TestBadReportDescriptorMouse(base.BaseTestCase.TestUhid):
+ def create_device(self):
+ return BadReportDescriptorMouse()
+
+ def assertName(self, uhdev):
+ pass
diff --git a/tools/testing/selftests/hid/tests/test_multitouch.py b/tools/testing/selftests/hid/tests/test_multitouch.py
index 4265012231c6..ece0ba8e7d34 100644
--- a/tools/testing/selftests/hid/tests/test_multitouch.py
+++ b/tools/testing/selftests/hid/tests/test_multitouch.py
@@ -17,7 +17,7 @@ import time
logger = logging.getLogger("hidtools.test.multitouch")
-KERNEL_MODULE = ("hid-multitouch", "hid_multitouch")
+KERNEL_MODULE = base.KernelModule("hid-multitouch", "hid_multitouch")
def BIT(x):
@@ -1752,6 +1752,52 @@ class TestWin8TSConfidence(BaseTest.TestWin8Multitouch):
assert evdev.slots[0][libevdev.EV_ABS.ABS_MT_TRACKING_ID] == -1
+ @pytest.mark.skip_if_uhdev(
+ lambda uhdev: "Confidence" not in uhdev.fields,
+ "Device not compatible, missing Confidence usage",
+ )
+ def test_mt_confidence_bad_multi_release(self):
+ """Check for the sticky finger being properly detected.
+
+ We first inject 3 fingers, then release only the second.
+ After 100 ms, we should receive a generated event about the
+ 2 missing fingers being released.
+ """
+ uhdev = self.uhdev
+ evdev = uhdev.get_evdev()
+
+ # send 3 touches
+ t0 = Touch(1, 50, 10)
+ t1 = Touch(2, 150, 100)
+ t2 = Touch(3, 250, 200)
+ r = uhdev.event([t0, t1, t2])
+ events = uhdev.next_sync_events()
+ self.debug_reports(r, uhdev, events)
+
+ # release the second
+ t1.tipswitch = False
+ r = uhdev.event([t1])
+ events = uhdev.next_sync_events()
+ self.debug_reports(r, uhdev, events)
+
+ # only the second is released
+ assert evdev.slots[0][libevdev.EV_ABS.ABS_MT_TRACKING_ID] != -1
+ assert evdev.slots[1][libevdev.EV_ABS.ABS_MT_TRACKING_ID] == -1
+ assert evdev.slots[2][libevdev.EV_ABS.ABS_MT_TRACKING_ID] != -1
+
+ # wait for the timer to kick in
+ time.sleep(0.2)
+
+ events = uhdev.next_sync_events()
+ self.debug_reports([], uhdev, events)
+
+ # now all 3 fingers are released
+ assert libevdev.InputEvent(libevdev.EV_KEY.BTN_TOUCH, 0) in events
+ assert evdev.slots[0][libevdev.EV_ABS.ABS_MT_TRACKING_ID] == -1
+ assert evdev.slots[1][libevdev.EV_ABS.ABS_MT_TRACKING_ID] == -1
+ assert evdev.slots[2][libevdev.EV_ABS.ABS_MT_TRACKING_ID] == -1
+
+
class TestElanXPS9360(BaseTest.TestWin8Multitouch):
def create_device(self):
return Digitizer(
@@ -2086,3 +2132,12 @@ class Testsynaptics_06cb_ce08(BaseTest.TestPTP):
input_info=(BusType.I2C, 0x06CB, 0xCE08),
rdesc="05 01 09 02 a1 01 85 02 09 01 a1 00 05 09 19 01 29 02 15 00 25 01 75 01 95 02 81 02 95 06 81 01 05 01 09 30 09 31 15 81 25 7f 75 08 95 02 81 06 c0 c0 05 01 09 02 a1 01 85 18 09 01 a1 00 05 09 19 01 29 03 46 00 00 15 00 25 01 75 01 95 03 81 02 95 05 81 01 05 01 09 30 09 31 15 81 25 7f 75 08 95 02 81 06 c0 c0 06 00 ff 09 02 a1 01 85 20 09 01 a1 00 09 03 15 00 26 ff 00 35 00 46 ff 00 75 08 95 05 81 02 c0 c0 05 0d 09 05 a1 01 85 03 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 f8 04 75 10 55 0e 65 11 09 30 35 00 46 24 04 95 01 81 02 46 30 02 26 a0 02 09 31 81 02 c0 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 f8 04 75 10 55 0e 65 11 09 30 35 00 46 24 04 95 01 81 02 46 30 02 26 a0 02 09 31 81 02 c0 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 f8 04 75 10 55 0e 65 11 09 30 35 00 46 24 04 95 01 81 02 46 30 02 26 a0 02 09 31 81 02 c0 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 f8 04 75 10 55 0e 65 11 09 30 35 00 46 24 04 95 01 81 02 46 30 02 26 a0 02 09 31 81 02 c0 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 f8 04 75 10 55 0e 65 11 09 30 35 00 46 24 04 95 01 81 02 46 30 02 26 a0 02 09 31 81 02 c0 05 0d 55 0c 66 01 10 47 ff ff 00 00 27 ff ff 00 00 75 10 95 01 09 56 81 02 09 54 25 7f 95 01 75 08 81 02 05 09 09 01 25 01 75 01 95 01 81 02 95 07 81 03 05 0d 85 08 09 55 09 59 75 04 95 02 25 0f b1 02 85 0d 09 60 75 01 95 01 15 00 25 01 b1 02 95 07 b1 03 85 07 06 00 ff 09 c5 15 00 26 ff 00 75 08 96 00 01 b1 02 c0 05 0d 09 0e a1 01 85 04 09 22 a1 02 09 52 15 00 25 0a 75 08 95 01 b1 02 c0 09 22 a1 00 85 06 09 57 09 58 75 01 95 02 25 01 b1 02 95 06 b1 03 c0 c0 06 00 ff 09 01 a1 01 85 09 09 02 15 00 26 ff 00 75 08 95 14 91 02 85 0a 09 03 15 00 26 ff 00 75 08 95 14 91 02 85 0b 09 04 15 00 26 ff 00 75 08 95 45 81 02 85 0c 09 05 15 00 26 ff 00 75 08 95 45 81 02 85 0f 09 06 15 00 26 ff 00 75 08 95 03 b1 02 85 0e 09 07 15 00 26 ff 00 75 08 95 01 b1 02 c0",
)
+
+class Testsynaptics_06cb_ce26(TestWin8TSConfidence):
+ def create_device(self):
+ return PTP(
+ "uhid test synaptics_06cb_ce26",
+ max_contacts=5,
+ input_info=(BusType.I2C, 0x06CB, 0xCE26),
+ rdesc="05 01 09 02 a1 01 85 02 09 01 a1 00 05 09 19 01 29 02 15 00 25 01 75 01 95 02 81 02 95 06 81 01 05 01 09 30 09 31 15 81 25 7f 75 08 95 02 81 06 c0 c0 05 0d 09 05 a1 01 85 03 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 45 05 75 10 55 0e 65 11 09 30 35 00 46 64 04 95 01 81 02 46 a2 02 26 29 03 09 31 81 02 c0 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 45 05 75 10 55 0e 65 11 09 30 35 00 46 64 04 95 01 81 02 46 a2 02 26 29 03 09 31 81 02 c0 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 45 05 75 10 55 0e 65 11 09 30 35 00 46 64 04 95 01 81 02 46 a2 02 26 29 03 09 31 81 02 c0 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 45 05 75 10 55 0e 65 11 09 30 35 00 46 64 04 95 01 81 02 46 a2 02 26 29 03 09 31 81 02 c0 05 0d 09 22 a1 02 15 00 25 01 09 47 09 42 95 02 75 01 81 02 95 01 75 03 25 05 09 51 81 02 75 01 95 03 81 03 05 01 15 00 26 45 05 75 10 55 0e 65 11 09 30 35 00 46 64 04 95 01 81 02 46 a2 02 26 29 03 09 31 81 02 c0 05 0d 55 0c 66 01 10 47 ff ff 00 00 27 ff ff 00 00 75 10 95 01 09 56 81 02 09 54 25 7f 95 01 75 08 81 02 05 09 09 01 25 01 75 01 95 01 81 02 95 07 81 03 05 0d 85 08 09 55 09 59 75 04 95 02 25 0f b1 02 85 0d 09 60 75 01 95 01 15 00 25 01 b1 02 95 07 b1 03 85 07 06 00 ff 09 c5 15 00 26 ff 00 75 08 96 00 01 b1 02 c0 05 0d 09 0e a1 01 85 04 09 22 a1 02 09 52 15 00 25 0a 75 08 95 01 b1 02 c0 09 22 a1 00 85 06 09 57 09 58 75 01 95 02 25 01 b1 02 95 06 b1 03 c0 c0 06 00 ff 09 01 a1 01 85 09 09 02 15 00 26 ff 00 75 08 95 14 91 02 85 0a 09 03 15 00 26 ff 00 75 08 95 14 91 02 85 0b 09 04 15 00 26 ff 00 75 08 95 3d 81 02 85 0c 09 05 15 00 26 ff 00 75 08 95 3d 81 02 85 0f 09 06 15 00 26 ff 00 75 08 95 03 b1 02 85 0e 09 07 15 00 26 ff 00 75 08 95 01 b1 02 c0",
+ )
diff --git a/tools/testing/selftests/hid/tests/test_sony.py b/tools/testing/selftests/hid/tests/test_sony.py
index 7e52c28e59c5..7fd3a8e6137d 100644
--- a/tools/testing/selftests/hid/tests/test_sony.py
+++ b/tools/testing/selftests/hid/tests/test_sony.py
@@ -7,6 +7,7 @@
#
from .base import application_matches
+from .base import KernelModule
from .test_gamepad import BaseTest
from hidtools.device.sony_gamepad import (
PS3Controller,
@@ -24,9 +25,9 @@ import pytest
logger = logging.getLogger("hidtools.test.sony")
-PS3_MODULE = ("sony", "hid_sony")
-PS4_MODULE = ("playstation", "hid_playstation")
-PS5_MODULE = ("playstation", "hid_playstation")
+PS3_MODULE = KernelModule("sony", "hid_sony")
+PS4_MODULE = KernelModule("playstation", "hid_playstation")
+PS5_MODULE = KernelModule("playstation", "hid_playstation")
class SonyBaseTest:
diff --git a/tools/testing/selftests/hid/tests/test_tablet.py b/tools/testing/selftests/hid/tests/test_tablet.py
index a9e2de1e8861..5b9abb616db4 100644
--- a/tools/testing/selftests/hid/tests/test_tablet.py
+++ b/tools/testing/selftests/hid/tests/test_tablet.py
@@ -10,6 +10,7 @@ from . import base
import copy
from enum import Enum
from hidtools.util import BusType
+from .base import HidBpf
import libevdev
import logging
import pytest
@@ -451,6 +452,7 @@ class Pen(object):
def __init__(self, x, y):
self.x = x
self.y = y
+ self.distance = -10
self.tipswitch = False
self.tippressure = 15
self.azimuth = 0
@@ -472,6 +474,7 @@ class Pen(object):
for i in [
"x",
"y",
+ "distance",
"tippressure",
"azimuth",
"width",
@@ -553,6 +556,7 @@ class PenDigitizer(base.UHIDTestDevice):
pen.tipswitch = False
pen.tippressure = 0
pen.azimuth = 0
+ pen.distance = 0
pen.inrange = False
pen.width = 0
pen.height = 0
@@ -867,6 +871,29 @@ class BaseTest:
state machine."""
self._test_states(state_list, scribble, allow_intermediate_states=True)
+ @pytest.mark.skip_if_uhdev(
+ lambda uhdev: "Z" not in uhdev.fields,
+ "Device not compatible, missing Z usage",
+ )
+ @pytest.mark.parametrize("scribble", [True, False], ids=["scribble", "static"])
+ @pytest.mark.parametrize(
+ "state_list",
+ [pytest.param(v, id=k) for k, v in PenState.legal_transitions().items()],
+ )
+ def test_z_reported_as_distance(self, state_list, scribble):
+ """Verify stylus Z values are reported as ABS_DISTANCE."""
+ self._test_states(state_list, scribble, allow_intermediate_states=False)
+
+ uhdev = self.uhdev
+ evdev = uhdev.get_evdev()
+ p = Pen(0, 0)
+ uhdev.move_to(p, PenState.PEN_IS_OUT_OF_RANGE, None)
+ p = Pen(100, 200)
+ uhdev.move_to(p, PenState.PEN_IS_IN_RANGE, None)
+ p.distance = -1
+ events = self.post(uhdev, p, None)
+ assert evdev.value[libevdev.EV_ABS.ABS_DISTANCE] == -1
+
class GXTP_pen(PenDigitizer):
def event(self, pen, test_button):
@@ -1228,9 +1255,9 @@ class Huion_Kamvas_Pro_19_256c_006b(PenDigitizer):
pen.current_state = state
def call_input_event(self, report):
- if report[0] == 0x0a:
+ if report[0] == 0x0A:
# ensures the original second Eraser usage is null
- report[1] &= 0xdf
+ report[1] &= 0xDF
# ensures the original last bit is equal to bit 6 (In Range)
if report[1] & 0x40:
@@ -1291,6 +1318,35 @@ class Huion_Kamvas_Pro_19_256c_006b(PenDigitizer):
return rs
+class Wacom_2d1f_014b(PenDigitizer):
+ """
+ Pen that reports distance values with HID_GD_Z usage.
+ """
+ def __init__(
+ self,
+ name,
+ rdesc_str=None,
+ rdesc=None,
+ application="Pen",
+ physical="Stylus",
+ input_info=(BusType.USB, 0x2D1F, 0x014B),
+ evdev_name_suffix=None,
+ ):
+ super().__init__(
+ name, rdesc_str, rdesc, application, physical, input_info, evdev_name_suffix
+ )
+
+ def match_evdev_rule(self, application, evdev):
+ # there are 2 nodes created by the device, only one matters
+ return evdev.name.endswith("Stylus")
+
+ def event(self, pen, test_button):
+ # this device reports the distance through Z
+ pen.z = pen.distance
+
+ return super().event(pen, test_button)
+
+
################################################################################
#
# Windows 7 compatible devices
@@ -1472,7 +1528,7 @@ class TestGoodix_27c6_0e00(BaseTest.TestTablet):
class TestXPPen_ArtistPro16Gen2_28bd_095b(BaseTest.TestTablet):
- hid_bpfs = [("XPPen__ArtistPro16Gen2.bpf.o", True)]
+ hid_bpfs = [HidBpf("XPPen__ArtistPro16Gen2.bpf.o", True)]
def create_device(self):
dev = XPPen_ArtistPro16Gen2_28bd_095b(
@@ -1484,7 +1540,7 @@ class TestXPPen_ArtistPro16Gen2_28bd_095b(BaseTest.TestTablet):
class TestXPPen_Artist24_28bd_093a(BaseTest.TestTablet):
- hid_bpfs = [("XPPen__Artist24.bpf.o", True)]
+ hid_bpfs = [HidBpf("XPPen__Artist24.bpf.o", True)]
def create_device(self):
return XPPen_Artist24_28bd_093a(
@@ -1495,7 +1551,7 @@ class TestXPPen_Artist24_28bd_093a(BaseTest.TestTablet):
class TestHuion_Kamvas_Pro_19_256c_006b(BaseTest.TestTablet):
- hid_bpfs = [("Huion__Kamvas-Pro-19.bpf.o", True)]
+ hid_bpfs = [HidBpf("Huion__Kamvas-Pro-19.bpf.o", True)]
def create_device(self):
return Huion_Kamvas_Pro_19_256c_006b(
@@ -1503,3 +1559,19 @@ class TestHuion_Kamvas_Pro_19_256c_006b(BaseTest.TestTablet):
rdesc="05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 43 09 3c 09 45 15 00 25 01 75 01 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff 7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 3f 75 10 95 01 81 02 09 3d 09 3e 15 a6 25 5a 75 08 95 02 81 02 c0 c0 05 0d 09 04 a1 01 85 04 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 56 55 00 65 00 27 ff ff ff 7f 95 01 75 20 81 02 09 54 25 7f 95 01 75 08 81 02 75 08 95 08 81 03 85 05 09 55 25 0a 75 08 95 01 b1 02 06 00 ff 09 c5 85 06 15 00 26 ff 00 75 08 96 00 01 b1 02 c0",
input_info=(BusType.USB, 0x256C, 0x006B),
)
+
+
+################################################################################
+#
+# Devices Reporting Distance
+#
+################################################################################
+
+
+class TestWacom_2d1f_014b(BaseTest.TestTablet):
+ def create_device(self):
+ return Wacom_2d1f_014b(
+ "uhid test Wacom 2d1f_014b",
+ rdesc="05 0d 09 02 a1 01 85 02 09 20 a1 00 09 42 09 44 09 45 09 3c 09 5a 09 32 15 00 25 01 75 01 95 06 81 02 95 02 81 03 05 01 09 30 26 88 3e 46 88 3e 65 11 55 0d 75 10 95 01 81 02 09 31 26 60 53 46 60 53 81 02 05 0d 09 30 26 ff 0f 45 00 65 00 55 00 81 02 06 00 ff 09 04 75 08 26 ff 00 46 ff 00 65 11 55 0e 81 02 05 0d 09 3d 75 10 16 d8 dc 26 28 23 36 d8 dc 46 28 23 65 14 81 02 09 3e 81 02 05 01 09 32 16 01 ff 25 00 36 01 ff 45 00 65 11 81 02 05 0d 09 56 15 00 27 ff ff 00 00 35 00 47 ff ff 00 00 66 01 10 55 0c 81 02 45 00 65 00 55 00 c0 09 00 75 08 26 ff 00 b1 12 85 03 09 00 95 12 b1 12 85 05 09 00 95 04 b1 02 85 06 09 00 95 24 b1 02 85 16 09 00 15 00 26 ff 00 95 06 b1 02 85 17 09 00 95 0c b1 02 85 19 09 00 95 01 b1 02 85 0a 09 00 75 08 95 01 15 10 26 ff 00 b1 02 85 1e 09 00 95 10 b1 02 c0 06 00 ff 09 00 a1 01 85 09 05 0d 09 20 a1 00 09 00 15 00 26 ff 00 75 08 95 10 81 02 c0 09 00 95 03 b1 12 c0 06 00 ff 09 02 a1 01 85 07 09 00 96 09 01 b1 02 85 08 09 00 95 03 81 02 09 00 b1 02 85 0e 09 00 96 0a 01 b1 02 c0 05 0d 09 02 a1 01 85 1a 09 20 a1 00 09 42 09 44 09 45 09 3c 09 5a 09 32 15 00 25 01 75 01 95 06 81 02 09 38 25 03 75 02 95 01 81 02 05 01 09 30 26 88 3e 46 88 3e 65 11 55 0d 75 10 95 01 81 02 09 31 26 60 53 46 60 53 81 02 05 0d 09 30 26 ff 0f 46 b0 0f 66 11 e1 55 02 81 02 06 00 ff 09 04 75 08 26 ff 00 46 ff 00 65 11 55 0e 81 02 05 0d 09 3d 75 10 16 d8 dc 26 28 23 36 d8 dc 46 28 23 65 14 81 02 09 3e 81 02 05 01 09 32 16 01 ff 25 00 36 01 ff 45 00 65 11 81 02 05 0d 09 56 15 00 27 ff ff 00 00 35 00 47 ff ff 00 00 66 01 10 55 0c 81 02 45 00 65 00 55 00 c0 c0 06 00 ff 09 00 a1 01 85 1b 05 0d 09 20 a1 00 09 00 26 ff 00 75 08 95 10 81 02 c0 c0",
+ input_info=(BusType.USB, 0x2D1F, 0x014B),
+ )
diff --git a/tools/testing/selftests/hid/tests/test_wacom_generic.py b/tools/testing/selftests/hid/tests/test_wacom_generic.py
index b62c7dba6777..2d6d04f0ff80 100644
--- a/tools/testing/selftests/hid/tests/test_wacom_generic.py
+++ b/tools/testing/selftests/hid/tests/test_wacom_generic.py
@@ -40,7 +40,7 @@ import logging
logger = logging.getLogger("hidtools.test.wacom")
-KERNEL_MODULE = ("wacom", "wacom")
+KERNEL_MODULE = base.KernelModule("wacom", "wacom")
class ProximityState(Enum):
@@ -892,9 +892,9 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
locations. The value of `t` may be incremented over time to move the
points along a linear path.
"""
- return [ self.make_contact(id, t) for id in range(0, n) ]
+ return [self.make_contact(id, t) for id in range(0, n)]
- def assert_contact(self, uhdev, evdev, contact_ids, t=0):
+ def assert_contact(self, evdev, contact_ids, t=0):
"""
Assert properties of a contact generated by make_contact.
"""
@@ -916,12 +916,12 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
assert evdev.slots[slot_num][libevdev.EV_ABS.ABS_MT_POSITION_X] == x
assert evdev.slots[slot_num][libevdev.EV_ABS.ABS_MT_POSITION_Y] == y
- def assert_contacts(self, uhdev, evdev, data, t=0):
+ def assert_contacts(self, evdev, data, t=0):
"""
Assert properties of a list of contacts generated by make_contacts.
"""
for contact_ids in data:
- self.assert_contact(uhdev, evdev, contact_ids, t)
+ self.assert_contact(evdev, contact_ids, t)
def test_contact_id_0(self):
"""
@@ -997,12 +997,16 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
assert libevdev.InputEvent(libevdev.EV_KEY.BTN_TOUCH, 1) in events
- self.assert_contacts(uhdev, evdev,
- [ self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = None),
- self.ContactIds(contact_id = 1, tracking_id = 0, slot_num = 0),
- self.ContactIds(contact_id = 2, tracking_id = -1, slot_num = None),
- self.ContactIds(contact_id = 3, tracking_id = 1, slot_num = 1),
- self.ContactIds(contact_id = 4, tracking_id = -1, slot_num = None) ])
+ self.assert_contacts(
+ evdev,
+ [
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=None),
+ self.ContactIds(contact_id=1, tracking_id=0, slot_num=0),
+ self.ContactIds(contact_id=2, tracking_id=-1, slot_num=None),
+ self.ContactIds(contact_id=3, tracking_id=1, slot_num=1),
+ self.ContactIds(contact_id=4, tracking_id=-1, slot_num=None),
+ ],
+ )
def confidence_change_assert_playback(self, uhdev, evdev, timeline):
"""
@@ -1026,8 +1030,8 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
events = uhdev.next_sync_events()
self.debug_reports(r, uhdev, events)
- ids = [ x[0] for x in state ]
- self.assert_contacts(uhdev, evdev, ids, t)
+ ids = [x[0] for x in state]
+ self.assert_contacts(evdev, ids, t)
t += 1
@@ -1044,27 +1048,68 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
uhdev = self.uhdev
evdev = uhdev.get_evdev()
- self.confidence_change_assert_playback(uhdev, evdev, [
- # t=0: Contact 0 == Down + confident; Contact 1 == Down + confident
- # Both fingers confidently in contact
- [(self.ContactIds(contact_id = 0, tracking_id = 0, slot_num = 0), True, True),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=1: Contact 0 == !Down + confident; Contact 1 == Down + confident
- # First finger looses confidence and clears only the tipswitch flag
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, True),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=2: Contact 0 == !Down + !confident; Contact 1 == Down + confident
- # First finger has lost confidence and has both flags cleared
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=3: Contact 0 == !Down + !confident; Contact 1 == Down + confident
- # First finger has lost confidence and has both flags cleared
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)]
- ])
+ self.confidence_change_assert_playback(
+ uhdev,
+ evdev,
+ [
+ # t=0: Contact 0 == Down + confident; Contact 1 == Down + confident
+ # Both fingers confidently in contact
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=0, slot_num=0),
+ True,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=1: Contact 0 == !Down + confident; Contact 1 == Down + confident
+ # First finger looses confidence and clears only the tipswitch flag
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=2: Contact 0 == !Down + !confident; Contact 1 == Down + confident
+ # First finger has lost confidence and has both flags cleared
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=3: Contact 0 == !Down + !confident; Contact 1 == Down + confident
+ # First finger has lost confidence and has both flags cleared
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ ],
+ )
def test_confidence_loss_b(self):
"""
@@ -1079,27 +1124,68 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
uhdev = self.uhdev
evdev = uhdev.get_evdev()
- self.confidence_change_assert_playback(uhdev, evdev, [
- # t=0: Contact 0 == Down + confident; Contact 1 == Down + confident
- # Both fingers confidently in contact
- [(self.ContactIds(contact_id = 0, tracking_id = 0, slot_num = 0), True, True),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=1: Contact 0 == !Down + !confident; Contact 1 == Down + confident
- # First finger looses confidence and has both flags cleared simultaneously
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=2: Contact 0 == !Down + !confident; Contact 1 == Down + confident
- # First finger has lost confidence and has both flags cleared
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=3: Contact 0 == !Down + !confident; Contact 1 == Down + confident
- # First finger has lost confidence and has both flags cleared
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)]
- ])
+ self.confidence_change_assert_playback(
+ uhdev,
+ evdev,
+ [
+ # t=0: Contact 0 == Down + confident; Contact 1 == Down + confident
+ # Both fingers confidently in contact
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=0, slot_num=0),
+ True,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=1: Contact 0 == !Down + !confident; Contact 1 == Down + confident
+ # First finger looses confidence and has both flags cleared simultaneously
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=2: Contact 0 == !Down + !confident; Contact 1 == Down + confident
+ # First finger has lost confidence and has both flags cleared
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=3: Contact 0 == !Down + !confident; Contact 1 == Down + confident
+ # First finger has lost confidence and has both flags cleared
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ ],
+ )
def test_confidence_loss_c(self):
"""
@@ -1113,27 +1199,68 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
uhdev = self.uhdev
evdev = uhdev.get_evdev()
- self.confidence_change_assert_playback(uhdev, evdev, [
- # t=0: Contact 0 == Down + confident; Contact 1 == Down + confident
- # Both fingers confidently in contact
- [(self.ContactIds(contact_id = 0, tracking_id = 0, slot_num = 0), True, True),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=1: Contact 0 == Down + !confident; Contact 1 == Down + confident
- # First finger looses confidence and clears only the confidence flag
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), True, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=2: Contact 0 == !Down + !confident; Contact 1 == Down + confident
- # First finger has lost confidence and has both flags cleared
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=3: Contact 0 == !Down + !confident; Contact 1 == Down + confident
- # First finger has lost confidence and has both flags cleared
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)]
- ])
+ self.confidence_change_assert_playback(
+ uhdev,
+ evdev,
+ [
+ # t=0: Contact 0 == Down + confident; Contact 1 == Down + confident
+ # Both fingers confidently in contact
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=0, slot_num=0),
+ True,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=1: Contact 0 == Down + !confident; Contact 1 == Down + confident
+ # First finger looses confidence and clears only the confidence flag
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ True,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=2: Contact 0 == !Down + !confident; Contact 1 == Down + confident
+ # First finger has lost confidence and has both flags cleared
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=3: Contact 0 == !Down + !confident; Contact 1 == Down + confident
+ # First finger has lost confidence and has both flags cleared
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ ],
+ )
def test_confidence_gain_a(self):
"""
@@ -1144,27 +1271,68 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
uhdev = self.uhdev
evdev = uhdev.get_evdev()
- self.confidence_change_assert_playback(uhdev, evdev, [
- # t=0: Contact 0 == Down + !confident; Contact 1 == Down + confident
- # Only second finger is confidently in contact
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = None), True, False),
- (self.ContactIds(contact_id = 1, tracking_id = 0, slot_num = 0), True, True)],
-
- # t=1: Contact 0 == Down + !confident; Contact 1 == Down + confident
- # First finger gains confidence
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = None), True, False),
- (self.ContactIds(contact_id = 1, tracking_id = 0, slot_num = 0), True, True)],
-
- # t=2: Contact 0 == Down + confident; Contact 1 == Down + confident
- # First finger remains confident
- [(self.ContactIds(contact_id = 0, tracking_id = 1, slot_num = 1), True, True),
- (self.ContactIds(contact_id = 1, tracking_id = 0, slot_num = 0), True, True)],
-
- # t=3: Contact 0 == Down + confident; Contact 1 == Down + confident
- # First finger remains confident
- [(self.ContactIds(contact_id = 0, tracking_id = 1, slot_num = 1), True, True),
- (self.ContactIds(contact_id = 1, tracking_id = 0, slot_num = 0), True, True)]
- ])
+ self.confidence_change_assert_playback(
+ uhdev,
+ evdev,
+ [
+ # t=0: Contact 0 == Down + !confident; Contact 1 == Down + confident
+ # Only second finger is confidently in contact
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=None),
+ True,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=0, slot_num=0),
+ True,
+ True,
+ ),
+ ],
+ # t=1: Contact 0 == Down + !confident; Contact 1 == Down + confident
+ # First finger gains confidence
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=None),
+ True,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=0, slot_num=0),
+ True,
+ True,
+ ),
+ ],
+ # t=2: Contact 0 == Down + confident; Contact 1 == Down + confident
+ # First finger remains confident
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=0, slot_num=0),
+ True,
+ True,
+ ),
+ ],
+ # t=3: Contact 0 == Down + confident; Contact 1 == Down + confident
+ # First finger remains confident
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=0, slot_num=0),
+ True,
+ True,
+ ),
+ ],
+ ],
+ )
def test_confidence_gain_b(self):
"""
@@ -1175,24 +1343,65 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
uhdev = self.uhdev
evdev = uhdev.get_evdev()
- self.confidence_change_assert_playback(uhdev, evdev, [
- # t=0: Contact 0 == Down + confident; Contact 1 == Down + confident
- # First and second finger confidently in contact
- [(self.ContactIds(contact_id = 0, tracking_id = 0, slot_num = 0), True, True),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=1: Contact 0 == Down + !confident; Contact 1 == Down + confident
- # Firtst finger looses confidence
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), True, False),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=2: Contact 0 == Down + confident; Contact 1 == Down + confident
- # First finger gains confidence
- [(self.ContactIds(contact_id = 0, tracking_id = 2, slot_num = 0), True, True),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)],
-
- # t=3: Contact 0 == !Down + confident; Contact 1 == Down + confident
- # First finger goes up
- [(self.ContactIds(contact_id = 0, tracking_id = -1, slot_num = 0), False, True),
- (self.ContactIds(contact_id = 1, tracking_id = 1, slot_num = 1), True, True)]
- ])
+ self.confidence_change_assert_playback(
+ uhdev,
+ evdev,
+ [
+ # t=0: Contact 0 == Down + confident; Contact 1 == Down + confident
+ # First and second finger confidently in contact
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=0, slot_num=0),
+ True,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=1: Contact 0 == Down + !confident; Contact 1 == Down + confident
+ # Firtst finger looses confidence
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ True,
+ False,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=2: Contact 0 == Down + confident; Contact 1 == Down + confident
+ # First finger gains confidence
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=2, slot_num=0),
+ True,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ # t=3: Contact 0 == !Down + confident; Contact 1 == Down + confident
+ # First finger goes up
+ [
+ (
+ self.ContactIds(contact_id=0, tracking_id=-1, slot_num=0),
+ False,
+ True,
+ ),
+ (
+ self.ContactIds(contact_id=1, tracking_id=1, slot_num=1),
+ True,
+ True,
+ ),
+ ],
+ ],
+ )
diff --git a/tools/testing/selftests/hid/vmtest.sh b/tools/testing/selftests/hid/vmtest.sh
index db534e9099a8..ecbd57f775a0 100755
--- a/tools/testing/selftests/hid/vmtest.sh
+++ b/tools/testing/selftests/hid/vmtest.sh
@@ -1,296 +1,474 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2025 Red Hat
+# Copyright (c) 2025 Meta Platforms, Inc. and affiliates
+#
+# Dependencies:
+# * virtme-ng
+# * busybox-static (used by virtme-ng)
+# * qemu (used by virtme-ng)
+
+readonly SCRIPT_DIR="$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
+readonly KERNEL_CHECKOUT=$(realpath "${SCRIPT_DIR}"/../../../../)
+
+source "${SCRIPT_DIR}"/../kselftest/ktap_helpers.sh
+
+readonly HID_BPF_TEST="${SCRIPT_DIR}"/hid_bpf
+readonly HIDRAW_TEST="${SCRIPT_DIR}"/hidraw
+readonly HID_BPF_PROGS="${KERNEL_CHECKOUT}/drivers/hid/bpf/progs"
+readonly SSH_GUEST_PORT=22
+readonly WAIT_PERIOD=3
+readonly WAIT_PERIOD_MAX=60
+readonly WAIT_TOTAL=$(( WAIT_PERIOD * WAIT_PERIOD_MAX ))
+readonly QEMU_PIDFILE=$(mktemp /tmp/qemu_hid_vmtest_XXXX.pid)
+
+readonly QEMU_OPTS="\
+ --pidfile ${QEMU_PIDFILE} \
+"
+readonly KERNEL_CMDLINE=""
+readonly LOG=$(mktemp /tmp/hid_vmtest_XXXX.log)
+readonly TEST_NAMES=(vm_hid_bpf vm_hidraw vm_pytest)
+readonly TEST_DESCS=(
+ "Run hid_bpf tests in the VM."
+ "Run hidraw tests in the VM."
+ "Run the hid-tools test-suite in the VM."
+)
+
+VERBOSE=0
+SHELL_MODE=0
+BUILD_HOST=""
+BUILD_HOST_PODMAN_CONTAINER_NAME=""
+
+usage() {
+ local name
+ local desc
+ local i
+
+ echo
+ echo "$0 [OPTIONS] [TEST]... [-- tests-args]"
+ echo "If no TEST argument is given, all tests will be run."
+ echo
+ echo "Options"
+ echo " -b: build the kernel from the current source tree and use it for guest VMs"
+ echo " -H: hostname for remote build host (used with -b)"
+ echo " -p: podman container name for remote build host (used with -b)"
+ echo " Example: -H beefyserver -p vng"
+ echo " -q: set the path to or name of qemu binary"
+ echo " -s: start a shell in the VM instead of running tests"
+ echo " -v: more verbose output (can be repeated multiple times)"
+ echo
+ echo "Available tests"
+
+ for ((i = 0; i < ${#TEST_NAMES[@]}; i++)); do
+ name=${TEST_NAMES[${i}]}
+ desc=${TEST_DESCS[${i}]}
+ printf "\t%-35s%-35s\n" "${name}" "${desc}"
+ done
+ echo
-set -u
-set -e
-
-# This script currently only works for x86_64
-ARCH="$(uname -m)"
-case "${ARCH}" in
-x86_64)
- QEMU_BINARY=qemu-system-x86_64
- BZIMAGE="arch/x86/boot/bzImage"
- ;;
-*)
- echo "Unsupported architecture"
exit 1
- ;;
-esac
-SCRIPT_DIR="$(dirname $(realpath $0))"
-OUTPUT_DIR="$SCRIPT_DIR/results"
-KCONFIG_REL_PATHS=("${SCRIPT_DIR}/config" "${SCRIPT_DIR}/config.common" "${SCRIPT_DIR}/config.${ARCH}")
-B2C_URL="https://gitlab.freedesktop.org/gfx-ci/boot2container/-/raw/main/vm2c.py"
-NUM_COMPILE_JOBS="$(nproc)"
-LOG_FILE_BASE="$(date +"hid_selftests.%Y-%m-%d_%H-%M-%S")"
-LOG_FILE="${LOG_FILE_BASE}.log"
-EXIT_STATUS_FILE="${LOG_FILE_BASE}.exit_status"
-CONTAINER_IMAGE="registry.freedesktop.org/bentiss/hid/fedora/39:2023-11-22.1"
-
-TARGETS="${TARGETS:=$(basename ${SCRIPT_DIR})}"
-DEFAULT_COMMAND="pip3 install hid-tools; make -C tools/testing/selftests TARGETS=${TARGETS} run_tests"
-
-usage()
-{
- cat <<EOF
-Usage: $0 [-j N] [-s] [-b] [-d <output_dir>] -- [<command>]
-
-<command> is the command you would normally run when you are in
-the source kernel direcory. e.g:
-
- $0 -- ./tools/testing/selftests/hid/hid_bpf
-
-If no command is specified and a debug shell (-s) is not requested,
-"${DEFAULT_COMMAND}" will be run by default.
-
-If you build your kernel using KBUILD_OUTPUT= or O= options, these
-can be passed as environment variables to the script:
-
- O=<kernel_build_path> $0 -- ./tools/testing/selftests/hid/hid_bpf
-
-or
-
- KBUILD_OUTPUT=<kernel_build_path> $0 -- ./tools/testing/selftests/hid/hid_bpf
-
-Options:
-
- -u) Update the boot2container script to a newer version.
- -d) Update the output directory (default: ${OUTPUT_DIR})
- -b) Run only the build steps for the kernel and the selftests
- -j) Number of jobs for compilation, similar to -j in make
- (default: ${NUM_COMPILE_JOBS})
- -s) Instead of powering off the VM, start an interactive
- shell. If <command> is specified, the shell runs after
- the command finishes executing
-EOF
}
-download()
-{
- local file="$1"
+die() {
+ echo "$*" >&2
+ exit "${KSFT_FAIL}"
+}
- echo "Downloading $file..." >&2
- curl -Lsf "$file" -o "${@:2}"
+vm_ssh() {
+ # vng --ssh-client keeps shouting "Warning: Permanently added 'virtme-ng%22'
+ # (ED25519) to the list of known hosts.",
+ # So replace the command with what's actually called and add the "-q" option
+ stdbuf -oL ssh -q \
+ -F ${HOME}/.cache/virtme-ng/.ssh/virtme-ng-ssh.conf \
+ -l root virtme-ng%${SSH_GUEST_PORT} \
+ "$@"
+ return $?
}
-recompile_kernel()
-{
- local kernel_checkout="$1"
- local make_command="$2"
+cleanup() {
+ if [[ -s "${QEMU_PIDFILE}" ]]; then
+ pkill -SIGTERM -F "${QEMU_PIDFILE}" > /dev/null 2>&1
+ fi
- cd "${kernel_checkout}"
+ # If failure occurred during or before qemu start up, then we need
+ # to clean this up ourselves.
+ if [[ -e "${QEMU_PIDFILE}" ]]; then
+ rm "${QEMU_PIDFILE}"
+ fi
+}
+
+check_args() {
+ local found
- ${make_command} olddefconfig
- ${make_command} headers
- ${make_command}
+ for arg in "$@"; do
+ found=0
+ for name in "${TEST_NAMES[@]}"; do
+ if [[ "${name}" = "${arg}" ]]; then
+ found=1
+ break
+ fi
+ done
+
+ if [[ "${found}" -eq 0 ]]; then
+ echo "${arg} is not an available test" >&2
+ usage
+ fi
+ done
+
+ for arg in "$@"; do
+ if ! command -v > /dev/null "test_${arg}"; then
+ echo "Test ${arg} not found" >&2
+ usage
+ fi
+ done
+}
+
+check_deps() {
+ for dep in vng ${QEMU} busybox pkill ssh pytest; do
+ if [[ ! -x $(command -v "${dep}") ]]; then
+ echo -e "skip: dependency ${dep} not found!\n"
+ exit "${KSFT_SKIP}"
+ fi
+ done
+
+ if [[ ! -x $(command -v "${HID_BPF_TEST}") ]]; then
+ printf "skip: %s not found!" "${HID_BPF_TEST}"
+ printf " Please build the kselftest hid_bpf target.\n"
+ exit "${KSFT_SKIP}"
+ fi
+
+ if [[ ! -x $(command -v "${HIDRAW_TEST}") ]]; then
+ printf "skip: %s not found!" "${HIDRAW_TEST}"
+ printf " Please build the kselftest hidraw target.\n"
+ exit "${KSFT_SKIP}"
+ fi
}
-update_selftests()
-{
- local kernel_checkout="$1"
- local selftests_dir="${kernel_checkout}/tools/testing/selftests/hid"
+check_vng() {
+ local tested_versions
+ local version
+ local ok
- cd "${selftests_dir}"
- ${make_command}
+ tested_versions=("1.36" "1.37")
+ version="$(vng --version)"
+
+ ok=0
+ for tv in "${tested_versions[@]}"; do
+ if [[ "${version}" == *"${tv}"* ]]; then
+ ok=1
+ break
+ fi
+ done
+
+ if [[ ! "${ok}" -eq 1 ]]; then
+ printf "warning: vng version '%s' has not been tested and may " "${version}" >&2
+ printf "not function properly.\n\tThe following versions have been tested: " >&2
+ echo "${tested_versions[@]}" >&2
+ fi
}
-run_vm()
-{
- local run_dir="$1"
- local b2c="$2"
- local kernel_bzimage="$3"
- local command="$4"
- local post_command=""
-
- cd "${run_dir}"
-
- if ! which "${QEMU_BINARY}" &> /dev/null; then
- cat <<EOF
-Could not find ${QEMU_BINARY}
-Please install qemu or set the QEMU_BINARY environment variable.
-EOF
+handle_build() {
+ if [[ ! "${BUILD}" -eq 1 ]]; then
+ return
+ fi
+
+ if [[ ! -d "${KERNEL_CHECKOUT}" ]]; then
+ echo "-b requires vmtest.sh called from the kernel source tree" >&2
exit 1
fi
- # alpine (used in post-container requires the PATH to have /bin
- export PATH=$PATH:/bin
+ pushd "${KERNEL_CHECKOUT}" &>/dev/null
- if [[ "${debug_shell}" != "yes" ]]
- then
- touch ${OUTPUT_DIR}/${LOG_FILE}
- command="mount bpffs -t bpf /sys/fs/bpf/; set -o pipefail ; ${command} 2>&1 | tee ${OUTPUT_DIR}/${LOG_FILE}"
- post_command="cat ${OUTPUT_DIR}/${LOG_FILE}"
- else
- command="mount bpffs -t bpf /sys/fs/bpf/; ${command}"
+ if ! vng --kconfig --config "${SCRIPT_DIR}"/config; then
+ die "failed to generate .config for kernel source tree (${KERNEL_CHECKOUT})"
fi
- set +e
- $b2c --command "${command}" \
- --kernel ${kernel_bzimage} \
- --workdir ${OUTPUT_DIR} \
- --image ${CONTAINER_IMAGE}
+ local vng_args=("-v" "--config" "${SCRIPT_DIR}/config" "--build")
- echo $? > ${OUTPUT_DIR}/${EXIT_STATUS_FILE}
+ if [[ -n "${BUILD_HOST}" ]]; then
+ vng_args+=("--build-host" "${BUILD_HOST}")
+ fi
- set -e
+ if [[ -n "${BUILD_HOST_PODMAN_CONTAINER_NAME}" ]]; then
+ vng_args+=("--build-host-exec-prefix" \
+ "podman exec -ti ${BUILD_HOST_PODMAN_CONTAINER_NAME}")
+ fi
- ${post_command}
-}
+ if ! vng "${vng_args[@]}"; then
+ die "failed to build kernel from source tree (${KERNEL_CHECKOUT})"
+ fi
-is_rel_path()
-{
- local path="$1"
+ if ! make -j$(nproc) -C "${HID_BPF_PROGS}"; then
+ die "failed to build HID bpf objects from source tree (${HID_BPF_PROGS})"
+ fi
- [[ ${path:0:1} != "/" ]]
+ if ! make -j$(nproc) -C "${SCRIPT_DIR}"; then
+ die "failed to build HID selftests from source tree (${SCRIPT_DIR})"
+ fi
+
+ popd &>/dev/null
}
-do_update_kconfig()
-{
- local kernel_checkout="$1"
- local kconfig_file="$2"
+vm_start() {
+ local logfile=/dev/null
+ local verbose_opt=""
+ local kernel_opt=""
+ local qemu
- rm -f "$kconfig_file" 2> /dev/null
+ qemu=$(command -v "${QEMU}")
- for config in "${KCONFIG_REL_PATHS[@]}"; do
- local kconfig_src="${config}"
- cat "$kconfig_src" >> "$kconfig_file"
- done
-}
+ if [[ "${VERBOSE}" -eq 2 ]]; then
+ verbose_opt="--verbose"
+ logfile=/dev/stdout
+ fi
-update_kconfig()
-{
- local kernel_checkout="$1"
- local kconfig_file="$2"
-
- if [[ -f "${kconfig_file}" ]]; then
- local local_modified="$(stat -c %Y "${kconfig_file}")"
-
- for config in "${KCONFIG_REL_PATHS[@]}"; do
- local kconfig_src="${config}"
- local src_modified="$(stat -c %Y "${kconfig_src}")"
- # Only update the config if it has been updated after the
- # previously cached config was created. This avoids
- # unnecessarily compiling the kernel and selftests.
- if [[ "${src_modified}" -gt "${local_modified}" ]]; then
- do_update_kconfig "$kernel_checkout" "$kconfig_file"
- # Once we have found one outdated configuration
- # there is no need to check other ones.
- break
- fi
- done
- else
- do_update_kconfig "$kernel_checkout" "$kconfig_file"
+ # If we are running from within the kernel source tree, use the kernel source tree
+ # as the kernel to boot, otherwise use the currently running kernel.
+ if [[ "$(realpath "$(pwd)")" == "${KERNEL_CHECKOUT}"* ]]; then
+ kernel_opt="${KERNEL_CHECKOUT}"
fi
-}
-main()
-{
- local script_dir="$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
- local kernel_checkout=$(realpath "${script_dir}"/../../../../)
- # By default the script searches for the kernel in the checkout directory but
- # it also obeys environment variables O= and KBUILD_OUTPUT=
- local kernel_bzimage="${kernel_checkout}/${BZIMAGE}"
- local command="${DEFAULT_COMMAND}"
- local update_b2c="no"
- local debug_shell="no"
- local build_only="no"
-
- while getopts ':hsud:j:b' opt; do
- case ${opt} in
- u)
- update_b2c="yes"
- ;;
- d)
- OUTPUT_DIR="$OPTARG"
- ;;
- j)
- NUM_COMPILE_JOBS="$OPTARG"
- ;;
- s)
- command="/bin/sh"
- debug_shell="yes"
- ;;
- b)
- build_only="yes"
- ;;
- h)
- usage
- exit 0
- ;;
- \? )
- echo "Invalid Option: -$OPTARG"
- usage
- exit 1
- ;;
- : )
- echo "Invalid Option: -$OPTARG requires an argument"
- usage
- exit 1
- ;;
- esac
- done
- shift $((OPTIND -1))
-
- # trap 'catch "$?"' EXIT
- if [[ "${build_only}" == "no" && "${debug_shell}" == "no" ]]; then
- if [[ $# -eq 0 ]]; then
- echo "No command specified, will run ${DEFAULT_COMMAND} in the vm"
- else
- command="$@"
-
- if [[ "${command}" == "/bin/bash" || "${command}" == "bash" ]]
- then
- debug_shell="yes"
- fi
+ vng \
+ --run \
+ ${kernel_opt} \
+ ${verbose_opt} \
+ --qemu-opts="${QEMU_OPTS}" \
+ --qemu="${qemu}" \
+ --user root \
+ --append "${KERNEL_CMDLINE}" \
+ --ssh "${SSH_GUEST_PORT}" \
+ --rw &> ${logfile} &
+
+ local vng_pid=$!
+ local elapsed=0
+
+ while [[ ! -s "${QEMU_PIDFILE}" ]]; do
+ if ! kill -0 "${vng_pid}" 2>/dev/null; then
+ echo "vng process (PID ${vng_pid}) exited early, check logs for details" >&2
+ die "failed to boot VM"
fi
- fi
- local kconfig_file="${OUTPUT_DIR}/latest.config"
- local make_command="make -j ${NUM_COMPILE_JOBS} KCONFIG_CONFIG=${kconfig_file}"
+ if [[ ${elapsed} -ge ${WAIT_TOTAL} ]]; then
+ echo "Timed out after ${WAIT_TOTAL} seconds waiting for VM to boot" >&2
+ die "failed to boot VM"
+ fi
- # Figure out where the kernel is being built.
- # O takes precedence over KBUILD_OUTPUT.
- if [[ "${O:=""}" != "" ]]; then
- if is_rel_path "${O}"; then
- O="$(realpath "${PWD}/${O}")"
+ sleep 1
+ elapsed=$((elapsed + 1))
+ done
+}
+
+vm_wait_for_ssh() {
+ local i
+
+ i=0
+ while true; do
+ if [[ ${i} -gt ${WAIT_PERIOD_MAX} ]]; then
+ die "Timed out waiting for guest ssh"
fi
- kernel_bzimage="${O}/${BZIMAGE}"
- make_command="${make_command} O=${O}"
- elif [[ "${KBUILD_OUTPUT:=""}" != "" ]]; then
- if is_rel_path "${KBUILD_OUTPUT}"; then
- KBUILD_OUTPUT="$(realpath "${PWD}/${KBUILD_OUTPUT}")"
+ if vm_ssh -- true; then
+ break
fi
- kernel_bzimage="${KBUILD_OUTPUT}/${BZIMAGE}"
- make_command="${make_command} KBUILD_OUTPUT=${KBUILD_OUTPUT}"
+ i=$(( i + 1 ))
+ sleep ${WAIT_PERIOD}
+ done
+}
+
+vm_mount_bpffs() {
+ vm_ssh -- mount bpffs -t bpf /sys/fs/bpf
+}
+
+__log_stdin() {
+ stdbuf -oL awk '{ printf "%s:\t%s\n","'"${prefix}"'", $0; fflush() }'
+}
+
+__log_args() {
+ echo "$*" | awk '{ printf "%s:\t%s\n","'"${prefix}"'", $0 }'
+}
+
+log() {
+ local verbose="$1"
+ shift
+
+ local prefix="$1"
+
+ shift
+ local redirect=
+ if [[ ${verbose} -le 0 ]]; then
+ redirect=/dev/null
+ else
+ redirect=/dev/stdout
+ fi
+
+ if [[ "$#" -eq 0 ]]; then
+ __log_stdin | tee -a "${LOG}" > ${redirect}
+ else
+ __log_args "$@" | tee -a "${LOG}" > ${redirect}
fi
+}
- local b2c="${OUTPUT_DIR}/vm2c.py"
+log_setup() {
+ log $((VERBOSE-1)) "setup" "$@"
+}
- echo "Output directory: ${OUTPUT_DIR}"
+log_host() {
+ local testname=$1
- mkdir -p "${OUTPUT_DIR}"
- update_kconfig "${kernel_checkout}" "${kconfig_file}"
+ shift
+ log $((VERBOSE-1)) "test:${testname}:host" "$@"
+}
- recompile_kernel "${kernel_checkout}" "${make_command}"
- update_selftests "${kernel_checkout}" "${make_command}"
+log_guest() {
+ local testname=$1
- if [[ "${build_only}" == "no" ]]; then
- if [[ "${update_b2c}" == "no" && ! -f "${b2c}" ]]; then
- echo "vm2c script not found in ${b2c}"
- update_b2c="yes"
- fi
+ shift
+ log ${VERBOSE} "# test:${testname}" "$@"
+}
- if [[ "${update_b2c}" == "yes" ]]; then
- download $B2C_URL $b2c
- chmod +x $b2c
- fi
+test_vm_hid_bpf() {
+ local testname="${FUNCNAME[0]#test_}"
- run_vm "${kernel_checkout}" $b2c "${kernel_bzimage}" "${command}"
- if [[ "${debug_shell}" != "yes" ]]; then
- echo "Logs saved in ${OUTPUT_DIR}/${LOG_FILE}"
- fi
+ vm_ssh -- "${HID_BPF_TEST}" \
+ 2>&1 | log_guest "${testname}"
+
+ return ${PIPESTATUS[0]}
+}
+
+test_vm_hidraw() {
+ local testname="${FUNCNAME[0]#test_}"
+
+ vm_ssh -- "${HIDRAW_TEST}" \
+ 2>&1 | log_guest "${testname}"
+
+ return ${PIPESTATUS[0]}
+}
+
+test_vm_pytest() {
+ local testname="${FUNCNAME[0]#test_}"
- exit $(cat ${OUTPUT_DIR}/${EXIT_STATUS_FILE})
+ shift
+
+ vm_ssh -- pytest ${SCRIPT_DIR}/tests --color=yes "$@" \
+ 2>&1 | log_guest "${testname}"
+
+ return ${PIPESTATUS[0]}
+}
+
+run_test() {
+ local vm_oops_cnt_before
+ local vm_warn_cnt_before
+ local vm_oops_cnt_after
+ local vm_warn_cnt_after
+ local name
+ local rc
+
+ vm_oops_cnt_before=$(vm_ssh -- dmesg | grep -c -i 'Oops')
+ vm_error_cnt_before=$(vm_ssh -- dmesg --level=err | wc -l)
+
+ name=$(echo "${1}" | awk '{ print $1 }')
+ eval test_"${name}" "$@"
+ rc=$?
+
+ vm_oops_cnt_after=$(vm_ssh -- dmesg | grep -i 'Oops' | wc -l)
+ if [[ ${vm_oops_cnt_after} -gt ${vm_oops_cnt_before} ]]; then
+ echo "FAIL: kernel oops detected on vm" | log_host "${name}"
+ rc=$KSFT_FAIL
+ fi
+
+ vm_error_cnt_after=$(vm_ssh -- dmesg --level=err | wc -l)
+ if [[ ${vm_error_cnt_after} -gt ${vm_error_cnt_before} ]]; then
+ echo "FAIL: kernel error detected on vm" | log_host "${name}"
+ vm_ssh -- dmesg --level=err | log_host "${name}"
+ rc=$KSFT_FAIL
fi
+
+ return "${rc}"
}
-main "$@"
+QEMU="qemu-system-$(uname -m)"
+
+while getopts :hvsbq:H:p: o
+do
+ case $o in
+ v) VERBOSE=$((VERBOSE+1));;
+ s) SHELL_MODE=1;;
+ b) BUILD=1;;
+ q) QEMU=$OPTARG;;
+ H) BUILD_HOST=$OPTARG;;
+ p) BUILD_HOST_PODMAN_CONTAINER_NAME=$OPTARG;;
+ h|*) usage;;
+ esac
+done
+shift $((OPTIND-1))
+
+trap cleanup EXIT
+
+PARAMS=""
+
+if [[ ${#} -eq 0 ]]; then
+ ARGS=("${TEST_NAMES[@]}")
+else
+ ARGS=()
+ COUNT=0
+ for arg in $@; do
+ COUNT=$((COUNT+1))
+ if [[ x"$arg" == x"--" ]]; then
+ break
+ fi
+ ARGS+=($arg)
+ done
+ shift $COUNT
+ PARAMS="$@"
+fi
+
+if [[ "${SHELL_MODE}" -eq 0 ]]; then
+ check_args "${ARGS[@]}"
+ echo "1..${#ARGS[@]}"
+fi
+check_deps
+check_vng
+handle_build
+
+log_setup "Booting up VM"
+vm_start
+vm_wait_for_ssh
+vm_mount_bpffs
+log_setup "VM booted up"
+
+if [[ "${SHELL_MODE}" -eq 1 ]]; then
+ log_setup "Starting interactive shell in VM"
+ echo "Starting shell in VM. Use 'exit' to quit and shutdown the VM."
+ CURRENT_DIR="$(pwd)"
+ vm_ssh -t -- "cd '${CURRENT_DIR}' && exec bash -l"
+ exit "$KSFT_PASS"
+fi
+
+cnt_pass=0
+cnt_fail=0
+cnt_skip=0
+cnt_total=0
+for arg in "${ARGS[@]}"; do
+ run_test "${arg}" "${PARAMS}"
+ rc=$?
+ if [[ ${rc} -eq $KSFT_PASS ]]; then
+ cnt_pass=$(( cnt_pass + 1 ))
+ echo "ok ${cnt_total} ${arg}"
+ elif [[ ${rc} -eq $KSFT_SKIP ]]; then
+ cnt_skip=$(( cnt_skip + 1 ))
+ echo "ok ${cnt_total} ${arg} # SKIP"
+ elif [[ ${rc} -eq $KSFT_FAIL ]]; then
+ cnt_fail=$(( cnt_fail + 1 ))
+ echo "not ok ${cnt_total} ${arg} # exit=$rc"
+ fi
+ cnt_total=$(( cnt_total + 1 ))
+done
+
+echo "SUMMARY: PASS=${cnt_pass} SKIP=${cnt_skip} FAIL=${cnt_fail}"
+echo "Log: ${LOG}"
+
+if [ $((cnt_pass + cnt_skip)) -eq ${cnt_total} ]; then
+ exit "$KSFT_PASS"
+else
+ exit "$KSFT_FAIL"
+fi
diff --git a/tools/testing/selftests/intel_pstate/aperf.c b/tools/testing/selftests/intel_pstate/aperf.c
index a8acf3996973..953b63e5aa6a 100644
--- a/tools/testing/selftests/intel_pstate/aperf.c
+++ b/tools/testing/selftests/intel_pstate/aperf.c
@@ -11,7 +11,7 @@
#include <errno.h>
#include <string.h>
#include <time.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define MSEC_PER_SEC 1000L
#define NSEC_PER_MSEC 1000000L
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index 1a8e85afe9aa..10e051b6f592 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -13,9 +13,6 @@
static unsigned long HUGEPAGE_SIZE;
-#define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
-#define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
-
static unsigned long get_huge_page_size(void)
{
char buf[80];
@@ -54,6 +51,8 @@ static __attribute__((constructor)) void setup_sizes(void)
mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
&mfd);
+ assert(mfd_buffer != MAP_FAILED);
+ assert(mfd > 0);
}
FIXTURE(iommufd)
@@ -764,19 +763,34 @@ TEST_F(iommufd_ioas, get_hw_info)
uint8_t max_pasid = 0;
/* Provide a zero-size user_buffer */
- test_cmd_get_hw_info(self->device_id, NULL, 0);
+ test_cmd_get_hw_info(self->device_id,
+ IOMMU_HW_INFO_TYPE_DEFAULT, NULL, 0);
/* Provide a user_buffer with exact size */
- test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
+ test_cmd_get_hw_info(self->device_id,
+ IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
+ sizeof(buffer_exact));
+
+ /* Request for a wrong data_type, and a correct one */
+ test_err_get_hw_info(EOPNOTSUPP, self->device_id,
+ IOMMU_HW_INFO_TYPE_SELFTEST + 1,
+ &buffer_exact, sizeof(buffer_exact));
+ test_cmd_get_hw_info(self->device_id,
+ IOMMU_HW_INFO_TYPE_SELFTEST, &buffer_exact,
+ sizeof(buffer_exact));
/*
* Provide a user_buffer with size larger than the exact size to check if
* kernel zero the trailing bytes.
*/
- test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
+ test_cmd_get_hw_info(self->device_id,
+ IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
+ sizeof(buffer_larger));
/*
* Provide a user_buffer with size smaller than the exact size to check if
* the fields within the size range still gets updated.
*/
- test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
+ test_cmd_get_hw_info(self->device_id,
+ IOMMU_HW_INFO_TYPE_DEFAULT,
+ &buffer_smaller, sizeof(buffer_smaller));
test_cmd_get_hw_info_pasid(self->device_id, &max_pasid);
ASSERT_EQ(0, max_pasid);
if (variant->pasid_capable) {
@@ -786,9 +800,11 @@ TEST_F(iommufd_ioas, get_hw_info)
}
} else {
test_err_get_hw_info(ENOENT, self->device_id,
- &buffer_exact, sizeof(buffer_exact));
+ IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact,
+ sizeof(buffer_exact));
test_err_get_hw_info(ENOENT, self->device_id,
- &buffer_larger, sizeof(buffer_larger));
+ IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger,
+ sizeof(buffer_larger));
}
}
@@ -951,6 +967,33 @@ TEST_F(iommufd_ioas, area_auto_iova)
test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
}
+/* https://lore.kernel.org/r/685af644.a00a0220.2e5631.0094.GAE@google.com */
+TEST_F(iommufd_ioas, reserved_overflow)
+{
+ struct iommu_test_cmd test_cmd = {
+ .size = sizeof(test_cmd),
+ .op = IOMMU_TEST_OP_ADD_RESERVED,
+ .id = self->ioas_id,
+ .add_reserved.start = 6,
+ };
+ unsigned int map_len;
+ __u64 iova;
+
+ if (PAGE_SIZE == 4096) {
+ test_cmd.add_reserved.length = 0xffffffffffff8001;
+ map_len = 0x5000;
+ } else {
+ test_cmd.add_reserved.length =
+ 0xffffffffffffffff - MOCK_PAGE_SIZE * 16;
+ map_len = MOCK_PAGE_SIZE * 10;
+ }
+
+ ASSERT_EQ(0,
+ ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
+ &test_cmd));
+ test_err_ioctl_ioas_map(ENOSPC, buffer, map_len, &iova);
+}
+
TEST_F(iommufd_ioas, area_allowed)
{
struct iommu_test_cmd test_cmd = {
@@ -1528,6 +1571,49 @@ TEST_F(iommufd_ioas, copy_sweep)
test_ioctl_destroy(dst_ioas_id);
}
+TEST_F(iommufd_ioas, dmabuf_simple)
+{
+ size_t buf_size = PAGE_SIZE*4;
+ __u64 iova;
+ int dfd;
+
+ test_cmd_get_dmabuf(buf_size, &dfd);
+ test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, 0, &iova);
+ test_err_ioctl_ioas_map_file(EINVAL, dfd, buf_size, buf_size, &iova);
+ test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, buf_size + 1, &iova);
+ test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
+
+ close(dfd);
+}
+
+TEST_F(iommufd_ioas, dmabuf_revoke)
+{
+ size_t buf_size = PAGE_SIZE*4;
+ __u32 hwpt_id;
+ __u64 iova;
+ __u64 iova2;
+ int dfd;
+
+ test_cmd_get_dmabuf(buf_size, &dfd);
+ test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
+ test_cmd_revoke_dmabuf(dfd, true);
+
+ if (variant->mock_domains)
+ test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
+ &hwpt_id);
+
+ test_err_ioctl_ioas_map_file(ENODEV, dfd, 0, buf_size, &iova2);
+
+ test_cmd_revoke_dmabuf(dfd, false);
+ test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova2);
+
+ /* Restore the iova back */
+ test_ioctl_ioas_unmap(iova, buf_size);
+ test_ioctl_ioas_map_fixed_file(dfd, 0, buf_size, iova);
+
+ close(dfd);
+}
+
FIXTURE(iommufd_mock_domain)
{
int fd;
@@ -1746,13 +1832,15 @@ TEST_F(iommufd_mock_domain, all_aligns)
unsigned int end;
uint8_t *buf;
int prot = PROT_READ | PROT_WRITE;
- int mfd;
+ int mfd = -1;
if (variant->file)
buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
else
buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
ASSERT_NE(MAP_FAILED, buf);
+ if (variant->file)
+ ASSERT_GT(mfd, 0);
check_refs(buf, buf_size, 0);
/*
@@ -1798,13 +1886,15 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
unsigned int end;
uint8_t *buf;
int prot = PROT_READ | PROT_WRITE;
- int mfd;
+ int mfd = -1;
if (variant->file)
buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
else
buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
ASSERT_NE(MAP_FAILED, buf);
+ if (variant->file)
+ ASSERT_GT(mfd, 0);
check_refs(buf, buf_size, 0);
/*
@@ -2008,6 +2098,13 @@ FIXTURE_VARIANT(iommufd_dirty_tracking)
FIXTURE_SETUP(iommufd_dirty_tracking)
{
+ struct iommu_option cmd = {
+ .size = sizeof(cmd),
+ .option_id = IOMMU_OPTION_HUGE_PAGES,
+ .op = IOMMU_OPTION_OP_SET,
+ .val64 = 0,
+ };
+ size_t mmap_buffer_size;
unsigned long size;
int mmap_flags;
void *vrc;
@@ -2015,29 +2112,40 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
if (variant->buffer_size < MOCK_PAGE_SIZE) {
SKIP(return,
- "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
+ "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%u",
variant->buffer_size, MOCK_PAGE_SIZE);
}
self->fd = open("/dev/iommu", O_RDWR);
ASSERT_NE(-1, self->fd);
- rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
- if (rc || !self->buffer) {
- SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
- variant->buffer_size, rc);
- }
-
mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
+ mmap_buffer_size = variant->buffer_size;
if (variant->hugepages) {
/*
* MAP_POPULATE will cause the kernel to fail mmap if THPs are
* not available.
*/
mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
+
+ /*
+ * Allocation must be aligned to the HUGEPAGE_SIZE, because the
+ * following mmap() will automatically align the length to be a
+ * multiple of the underlying huge page size. Failing to do the
+ * same at this allocation will result in a memory overwrite by
+ * the mmap().
+ */
+ if (mmap_buffer_size < HUGEPAGE_SIZE)
+ mmap_buffer_size = HUGEPAGE_SIZE;
+ }
+
+ rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
+ if (rc || !self->buffer) {
+ SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
+ mmap_buffer_size, rc);
}
assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
- vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
+ vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
mmap_flags, -1, 0);
assert(vrc == self->buffer);
@@ -2052,22 +2160,24 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
test_ioctl_ioas_alloc(&self->ioas_id);
- /* Enable 1M mock IOMMU hugepages */
- if (variant->hugepages) {
- test_cmd_mock_domain_flags(self->ioas_id,
- MOCK_FLAGS_DEVICE_HUGE_IOVA,
- &self->stdev_id, &self->hwpt_id,
- &self->idev_id);
- } else {
- test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
- &self->hwpt_id, &self->idev_id);
- }
+
+ /*
+ * For dirty testing it is important that the page size fed into
+ * the iommu page tables matches the size the dirty logic
+ * expects, or set_dirty can touch too much stuff.
+ */
+ cmd.object_id = self->ioas_id;
+ if (!variant->hugepages)
+ ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
+
+ test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
+ &self->idev_id);
}
FIXTURE_TEARDOWN(iommufd_dirty_tracking)
{
- munmap(self->buffer, variant->buffer_size);
- munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
+ free(self->buffer);
+ free(self->bitmap);
teardown_iommufd(self->fd, _metadata);
}
@@ -2175,8 +2285,7 @@ TEST_F(iommufd_dirty_tracking, device_dirty_capability)
test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
- test_cmd_get_hw_capabilities(self->idev_id, caps,
- IOMMU_HW_CAP_DIRTY_TRACKING);
+ test_cmd_get_hw_capabilities(self->idev_id, caps);
ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
caps & IOMMU_HW_CAP_DIRTY_TRACKING);
@@ -2187,18 +2296,23 @@ TEST_F(iommufd_dirty_tracking, device_dirty_capability)
TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
{
uint32_t page_size = MOCK_PAGE_SIZE;
+ uint32_t ioas_id = self->ioas_id;
uint32_t hwpt_id;
- uint32_t ioas_id;
if (variant->hugepages)
page_size = MOCK_HUGE_PAGE_SIZE;
- test_ioctl_ioas_alloc(&ioas_id);
test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
variant->buffer_size, MOCK_APERTURE_START);
- test_cmd_hwpt_alloc(self->idev_id, ioas_id,
- IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
+ if (variant->hugepages)
+ test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
+ MOCK_IOMMUPT_HUGE, &hwpt_id);
+ else
+ test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
+ MOCK_IOMMUPT_DEFAULT, &hwpt_id);
test_cmd_set_dirty_tracking(hwpt_id, true);
@@ -2224,18 +2338,24 @@ TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
{
uint32_t page_size = MOCK_PAGE_SIZE;
+ uint32_t ioas_id = self->ioas_id;
uint32_t hwpt_id;
- uint32_t ioas_id;
if (variant->hugepages)
page_size = MOCK_HUGE_PAGE_SIZE;
- test_ioctl_ioas_alloc(&ioas_id);
test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
variant->buffer_size, MOCK_APERTURE_START);
- test_cmd_hwpt_alloc(self->idev_id, ioas_id,
- IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
+
+ if (variant->hugepages)
+ test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
+ MOCK_IOMMUPT_HUGE, &hwpt_id);
+ else
+ test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
+ MOCK_IOMMUPT_DEFAULT, &hwpt_id);
test_cmd_set_dirty_tracking(hwpt_id, true);
@@ -2577,6 +2697,8 @@ TEST_F(vfio_compat_mock_domain, map)
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
+ /* Unmap of empty is success */
+ ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
/* UNMAP_FLAG_ALL requires 0 iova/size */
ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
@@ -2688,7 +2810,7 @@ FIXTURE_SETUP(iommufd_viommu)
/* Allocate a vIOMMU taking refcount of the parent hwpt */
test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
- IOMMU_VIOMMU_TYPE_SELFTEST,
+ IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
&self->viommu_id);
/* Allocate a regular nested hwpt */
@@ -2727,24 +2849,27 @@ TEST_F(iommufd_viommu, viommu_negative_tests)
if (self->device_id) {
/* Negative test -- invalid hwpt (hwpt_id=0) */
test_err_viommu_alloc(ENOENT, device_id, 0,
- IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
+ IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
+ NULL);
/* Negative test -- not a nesting parent hwpt */
test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
- IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
+ IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
+ NULL);
test_ioctl_destroy(hwpt_id);
/* Negative test -- unsupported viommu type */
test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
- 0xdead, NULL);
+ 0xdead, NULL, 0, NULL);
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, self->hwpt_id));
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, self->viommu_id));
} else {
test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
- IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
+ IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
+ NULL);
}
}
@@ -2760,35 +2885,66 @@ TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
uint32_t fault_fd;
uint32_t vdev_id;
- if (self->device_id) {
- test_ioctl_fault_alloc(&fault_id, &fault_fd);
- test_err_hwpt_alloc_iopf(
- ENOENT, dev_id, viommu_id, UINT32_MAX,
- IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
- IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
- test_err_hwpt_alloc_iopf(
- EOPNOTSUPP, dev_id, viommu_id, fault_id,
- IOMMU_HWPT_FAULT_ID_VALID | (1 << 31), &iopf_hwpt_id,
- IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
- test_cmd_hwpt_alloc_iopf(
- dev_id, viommu_id, fault_id, IOMMU_HWPT_FAULT_ID_VALID,
- &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
- sizeof(data));
+ if (!dev_id)
+ SKIP(return, "Skipping test for variant no_viommu");
- /* Must allocate vdevice before attaching to a nested hwpt */
- test_err_mock_domain_replace(ENOENT, self->stdev_id,
- iopf_hwpt_id);
- test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
- test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
- EXPECT_ERRNO(EBUSY,
- _test_ioctl_destroy(self->fd, iopf_hwpt_id));
- test_cmd_trigger_iopf(dev_id, fault_fd);
+ test_ioctl_fault_alloc(&fault_id, &fault_fd);
+ test_err_hwpt_alloc_iopf(ENOENT, dev_id, viommu_id, UINT32_MAX,
+ IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
+ IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
+ test_err_hwpt_alloc_iopf(EOPNOTSUPP, dev_id, viommu_id, fault_id,
+ IOMMU_HWPT_FAULT_ID_VALID | (1 << 31),
+ &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
+ sizeof(data));
+ test_cmd_hwpt_alloc_iopf(dev_id, viommu_id, fault_id,
+ IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
+ IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
- test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
- test_ioctl_destroy(iopf_hwpt_id);
- close(fault_fd);
- test_ioctl_destroy(fault_id);
- }
+ /* Must allocate vdevice before attaching to a nested hwpt */
+ test_err_mock_domain_replace(ENOENT, self->stdev_id, iopf_hwpt_id);
+ test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
+ test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
+ EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, iopf_hwpt_id));
+ test_cmd_trigger_iopf(dev_id, fault_fd);
+
+ test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
+ test_ioctl_destroy(iopf_hwpt_id);
+ close(fault_fd);
+ test_ioctl_destroy(fault_id);
+}
+
+TEST_F(iommufd_viommu, viommu_alloc_with_data)
+{
+ struct iommu_viommu_selftest data = {
+ .in_data = 0xbeef,
+ };
+ uint32_t *test;
+
+ if (!self->device_id)
+ SKIP(return, "Skipping test for variant no_viommu");
+
+ test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
+ IOMMU_VIOMMU_TYPE_SELFTEST, &data, sizeof(data),
+ &self->viommu_id);
+ ASSERT_EQ(data.out_data, data.in_data);
+
+ /* Negative mmap tests -- offset and length cannot be changed */
+ test_err_mmap(ENXIO, data.out_mmap_length,
+ data.out_mmap_offset + PAGE_SIZE);
+ test_err_mmap(ENXIO, data.out_mmap_length,
+ data.out_mmap_offset + PAGE_SIZE * 2);
+ test_err_mmap(ENXIO, data.out_mmap_length / 2, data.out_mmap_offset);
+ test_err_mmap(ENXIO, data.out_mmap_length * 2, data.out_mmap_offset);
+
+ /* Now do a correct mmap for a loopback test */
+ test = mmap(NULL, data.out_mmap_length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, self->fd, data.out_mmap_offset);
+ ASSERT_NE(MAP_FAILED, test);
+ ASSERT_EQ(data.in_data, *test);
+
+ /* The owner of the mmap region should be blocked */
+ EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, self->viommu_id));
+ munmap(test, data.out_mmap_length);
}
TEST_F(iommufd_viommu, vdevice_alloc)
@@ -2849,169 +3005,234 @@ TEST_F(iommufd_viommu, vdevice_cache)
uint32_t vdev_id = 0;
uint32_t num_inv;
- if (dev_id) {
- test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
-
- test_cmd_dev_check_cache_all(dev_id,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
-
- /* Check data_type by passing zero-length array */
- num_inv = 0;
- test_cmd_viommu_invalidate(viommu_id, inv_reqs,
- sizeof(*inv_reqs), &num_inv);
- assert(!num_inv);
-
- /* Negative test: Invalid data_type */
- num_inv = 1;
- test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
- sizeof(*inv_reqs), &num_inv);
- assert(!num_inv);
-
- /* Negative test: structure size sanity */
- num_inv = 1;
- test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- sizeof(*inv_reqs) + 1, &num_inv);
- assert(!num_inv);
-
- num_inv = 1;
- test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- 1, &num_inv);
- assert(!num_inv);
-
- /* Negative test: invalid flag is passed */
- num_inv = 1;
- inv_reqs[0].flags = 0xffffffff;
- inv_reqs[0].vdev_id = 0x99;
- test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- sizeof(*inv_reqs), &num_inv);
- assert(!num_inv);
-
- /* Negative test: invalid data_uptr when array is not empty */
- num_inv = 1;
- inv_reqs[0].flags = 0;
- inv_reqs[0].vdev_id = 0x99;
- test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- sizeof(*inv_reqs), &num_inv);
- assert(!num_inv);
-
- /* Negative test: invalid entry_len when array is not empty */
- num_inv = 1;
- inv_reqs[0].flags = 0;
- inv_reqs[0].vdev_id = 0x99;
- test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- 0, &num_inv);
- assert(!num_inv);
+ if (!dev_id)
+ SKIP(return, "Skipping test for variant no_viommu");
+
+ test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
+
+ test_cmd_dev_check_cache_all(dev_id, IOMMU_TEST_DEV_CACHE_DEFAULT);
+
+ /* Check data_type by passing zero-length array */
+ num_inv = 0;
+ test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
+ &num_inv);
+ assert(!num_inv);
+
+ /* Negative test: Invalid data_type */
+ num_inv = 1;
+ test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
+ sizeof(*inv_reqs), &num_inv);
+ assert(!num_inv);
+
+ /* Negative test: structure size sanity */
+ num_inv = 1;
+ test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
+ sizeof(*inv_reqs) + 1, &num_inv);
+ assert(!num_inv);
+
+ num_inv = 1;
+ test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 1,
+ &num_inv);
+ assert(!num_inv);
+
+ /* Negative test: invalid flag is passed */
+ num_inv = 1;
+ inv_reqs[0].flags = 0xffffffff;
+ inv_reqs[0].vdev_id = 0x99;
+ test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
+ sizeof(*inv_reqs), &num_inv);
+ assert(!num_inv);
+
+ /* Negative test: invalid data_uptr when array is not empty */
+ num_inv = 1;
+ inv_reqs[0].flags = 0;
+ inv_reqs[0].vdev_id = 0x99;
+ test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
+ sizeof(*inv_reqs), &num_inv);
+ assert(!num_inv);
+
+ /* Negative test: invalid entry_len when array is not empty */
+ num_inv = 1;
+ inv_reqs[0].flags = 0;
+ inv_reqs[0].vdev_id = 0x99;
+ test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, 0,
+ &num_inv);
+ assert(!num_inv);
+
+ /* Negative test: invalid cache_id */
+ num_inv = 1;
+ inv_reqs[0].flags = 0;
+ inv_reqs[0].vdev_id = 0x99;
+ inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
+ test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
+ sizeof(*inv_reqs), &num_inv);
+ assert(!num_inv);
+
+ /* Negative test: invalid vdev_id */
+ num_inv = 1;
+ inv_reqs[0].flags = 0;
+ inv_reqs[0].vdev_id = 0x9;
+ inv_reqs[0].cache_id = 0;
+ test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
+ sizeof(*inv_reqs), &num_inv);
+ assert(!num_inv);
- /* Negative test: invalid cache_id */
- num_inv = 1;
- inv_reqs[0].flags = 0;
- inv_reqs[0].vdev_id = 0x99;
- inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
- test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- sizeof(*inv_reqs), &num_inv);
- assert(!num_inv);
+ /*
+ * Invalidate the 1st cache entry but fail the 2nd request
+ * due to invalid flags configuration in the 2nd request.
+ */
+ num_inv = 2;
+ inv_reqs[0].flags = 0;
+ inv_reqs[0].vdev_id = 0x99;
+ inv_reqs[0].cache_id = 0;
+ inv_reqs[1].flags = 0xffffffff;
+ inv_reqs[1].vdev_id = 0x99;
+ inv_reqs[1].cache_id = 1;
+ test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
+ sizeof(*inv_reqs), &num_inv);
+ assert(num_inv == 1);
+ test_cmd_dev_check_cache(dev_id, 0, 0);
+ test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
+ test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
+ test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
- /* Negative test: invalid vdev_id */
- num_inv = 1;
- inv_reqs[0].flags = 0;
- inv_reqs[0].vdev_id = 0x9;
- inv_reqs[0].cache_id = 0;
- test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- sizeof(*inv_reqs), &num_inv);
- assert(!num_inv);
+ /*
+ * Invalidate the 1st cache entry but fail the 2nd request
+ * due to invalid cache_id configuration in the 2nd request.
+ */
+ num_inv = 2;
+ inv_reqs[0].flags = 0;
+ inv_reqs[0].vdev_id = 0x99;
+ inv_reqs[0].cache_id = 0;
+ inv_reqs[1].flags = 0;
+ inv_reqs[1].vdev_id = 0x99;
+ inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
+ test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
+ IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
+ sizeof(*inv_reqs), &num_inv);
+ assert(num_inv == 1);
+ test_cmd_dev_check_cache(dev_id, 0, 0);
+ test_cmd_dev_check_cache(dev_id, 1, IOMMU_TEST_DEV_CACHE_DEFAULT);
+ test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
+ test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
+
+ /* Invalidate the 2nd cache entry and verify */
+ num_inv = 1;
+ inv_reqs[0].flags = 0;
+ inv_reqs[0].vdev_id = 0x99;
+ inv_reqs[0].cache_id = 1;
+ test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
+ &num_inv);
+ assert(num_inv == 1);
+ test_cmd_dev_check_cache(dev_id, 0, 0);
+ test_cmd_dev_check_cache(dev_id, 1, 0);
+ test_cmd_dev_check_cache(dev_id, 2, IOMMU_TEST_DEV_CACHE_DEFAULT);
+ test_cmd_dev_check_cache(dev_id, 3, IOMMU_TEST_DEV_CACHE_DEFAULT);
+
+ /* Invalidate the 3rd and 4th cache entries and verify */
+ num_inv = 2;
+ inv_reqs[0].flags = 0;
+ inv_reqs[0].vdev_id = 0x99;
+ inv_reqs[0].cache_id = 2;
+ inv_reqs[1].flags = 0;
+ inv_reqs[1].vdev_id = 0x99;
+ inv_reqs[1].cache_id = 3;
+ test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
+ &num_inv);
+ assert(num_inv == 2);
+ test_cmd_dev_check_cache_all(dev_id, 0);
+
+ /* Invalidate all cache entries for nested_dev_id[1] and verify */
+ num_inv = 1;
+ inv_reqs[0].vdev_id = 0x99;
+ inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
+ test_cmd_viommu_invalidate(viommu_id, inv_reqs, sizeof(*inv_reqs),
+ &num_inv);
+ assert(num_inv == 1);
+ test_cmd_dev_check_cache_all(dev_id, 0);
+ test_ioctl_destroy(vdev_id);
+}
+
+TEST_F(iommufd_viommu, hw_queue)
+{
+ __u64 iova = MOCK_APERTURE_START, iova2;
+ uint32_t viommu_id = self->viommu_id;
+ uint32_t hw_queue_id[2];
+
+ if (!viommu_id)
+ SKIP(return, "Skipping test for variant no_viommu");
+
+ /* Fail IOMMU_HW_QUEUE_TYPE_DEFAULT */
+ test_err_hw_queue_alloc(EOPNOTSUPP, viommu_id,
+ IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
+ &hw_queue_id[0]);
+ /* Fail queue addr and length */
+ test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
+ 0, iova, 0, &hw_queue_id[0]);
+ test_err_hw_queue_alloc(EOVERFLOW, viommu_id,
+ IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, ~(uint64_t)0,
+ PAGE_SIZE, &hw_queue_id[0]);
+ /* Fail missing iova */
+ test_err_hw_queue_alloc(ENOENT, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
+ 0, iova, PAGE_SIZE, &hw_queue_id[0]);
+
+ /* Map iova */
+ test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
+ test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
+
+ /* Fail index=1 and =MAX; must start from index=0 */
+ test_err_hw_queue_alloc(EIO, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
+ iova, PAGE_SIZE, &hw_queue_id[0]);
+ test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
+ IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
+ &hw_queue_id[0]);
+
+ /* Allocate index=0, declare ownership of the iova */
+ test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 0,
+ iova, PAGE_SIZE, &hw_queue_id[0]);
+ /* Fail duplicated index */
+ test_err_hw_queue_alloc(EEXIST, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
+ 0, iova, PAGE_SIZE, &hw_queue_id[0]);
+ /* Fail unmap, due to iova ownership */
+ test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
+ /* The 2nd page is not pinned, so it can be unmmap */
+ test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
+
+ /* Allocate index=1, with an unaligned case */
+ test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
+ iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
+ &hw_queue_id[1]);
+ /* Fail to destroy, due to dependency */
+ EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hw_queue_id[0]));
+
+ /* Destroy in descending order */
+ test_ioctl_destroy(hw_queue_id[1]);
+ test_ioctl_destroy(hw_queue_id[0]);
+ /* Now it can unmap the first page */
+ test_ioctl_ioas_unmap(iova, PAGE_SIZE);
+}
- /*
- * Invalidate the 1st cache entry but fail the 2nd request
- * due to invalid flags configuration in the 2nd request.
- */
- num_inv = 2;
- inv_reqs[0].flags = 0;
- inv_reqs[0].vdev_id = 0x99;
- inv_reqs[0].cache_id = 0;
- inv_reqs[1].flags = 0xffffffff;
- inv_reqs[1].vdev_id = 0x99;
- inv_reqs[1].cache_id = 1;
- test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- sizeof(*inv_reqs), &num_inv);
- assert(num_inv == 1);
- test_cmd_dev_check_cache(dev_id, 0, 0);
- test_cmd_dev_check_cache(dev_id, 1,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
- test_cmd_dev_check_cache(dev_id, 2,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
- test_cmd_dev_check_cache(dev_id, 3,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
+TEST_F(iommufd_viommu, vdevice_tombstone)
+{
+ uint32_t viommu_id = self->viommu_id;
+ uint32_t dev_id = self->device_id;
+ uint32_t vdev_id = 0;
- /*
- * Invalidate the 1st cache entry but fail the 2nd request
- * due to invalid cache_id configuration in the 2nd request.
- */
- num_inv = 2;
- inv_reqs[0].flags = 0;
- inv_reqs[0].vdev_id = 0x99;
- inv_reqs[0].cache_id = 0;
- inv_reqs[1].flags = 0;
- inv_reqs[1].vdev_id = 0x99;
- inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
- test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
- IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
- sizeof(*inv_reqs), &num_inv);
- assert(num_inv == 1);
- test_cmd_dev_check_cache(dev_id, 0, 0);
- test_cmd_dev_check_cache(dev_id, 1,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
- test_cmd_dev_check_cache(dev_id, 2,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
- test_cmd_dev_check_cache(dev_id, 3,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
-
- /* Invalidate the 2nd cache entry and verify */
- num_inv = 1;
- inv_reqs[0].flags = 0;
- inv_reqs[0].vdev_id = 0x99;
- inv_reqs[0].cache_id = 1;
- test_cmd_viommu_invalidate(viommu_id, inv_reqs,
- sizeof(*inv_reqs), &num_inv);
- assert(num_inv == 1);
- test_cmd_dev_check_cache(dev_id, 0, 0);
- test_cmd_dev_check_cache(dev_id, 1, 0);
- test_cmd_dev_check_cache(dev_id, 2,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
- test_cmd_dev_check_cache(dev_id, 3,
- IOMMU_TEST_DEV_CACHE_DEFAULT);
-
- /* Invalidate the 3rd and 4th cache entries and verify */
- num_inv = 2;
- inv_reqs[0].flags = 0;
- inv_reqs[0].vdev_id = 0x99;
- inv_reqs[0].cache_id = 2;
- inv_reqs[1].flags = 0;
- inv_reqs[1].vdev_id = 0x99;
- inv_reqs[1].cache_id = 3;
- test_cmd_viommu_invalidate(viommu_id, inv_reqs,
- sizeof(*inv_reqs), &num_inv);
- assert(num_inv == 2);
- test_cmd_dev_check_cache_all(dev_id, 0);
+ if (!dev_id)
+ SKIP(return, "Skipping test for variant no_viommu");
- /* Invalidate all cache entries for nested_dev_id[1] and verify */
- num_inv = 1;
- inv_reqs[0].vdev_id = 0x99;
- inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
- test_cmd_viommu_invalidate(viommu_id, inv_reqs,
- sizeof(*inv_reqs), &num_inv);
- assert(num_inv == 1);
- test_cmd_dev_check_cache_all(dev_id, 0);
- test_ioctl_destroy(vdev_id);
- }
+ test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
+ test_ioctl_destroy(self->stdev_id);
+ EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, vdev_id));
}
FIXTURE(iommufd_device_pasid)
@@ -3105,8 +3326,7 @@ TEST_F(iommufd_device_pasid, pasid_attach)
/* Allocate a regular nested hwpt based on viommu */
test_cmd_viommu_alloc(self->device_id, parent_hwpt_id,
- IOMMU_VIOMMU_TYPE_SELFTEST,
- &viommu_id);
+ IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0, &viommu_id);
test_cmd_hwpt_alloc_nested(self->device_id, viommu_id,
IOMMU_HWPT_ALLOC_PASID,
&nested_hwpt_id[2],
diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c
index e11ec4b121fc..45c14323a618 100644
--- a/tools/testing/selftests/iommu/iommufd_fail_nth.c
+++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c
@@ -113,7 +113,7 @@ static bool fail_nth_next(struct __test_metadata *_metadata,
* necessarily mean a test failure, just that the limit has to be made
* bigger.
*/
- ASSERT_GT(400, nth_state->iteration);
+ ASSERT_GT(1000, nth_state->iteration);
if (nth_state->iteration != 0) {
ssize_t res;
ssize_t res2;
@@ -634,6 +634,7 @@ TEST_FAIL_NTH(basic_fail_nth, device)
uint32_t idev_id;
uint32_t hwpt_id;
uint32_t viommu_id;
+ uint32_t hw_queue_id;
uint32_t vdev_id;
__u64 iova;
@@ -666,8 +667,8 @@ TEST_FAIL_NTH(basic_fail_nth, device)
&self->stdev_id, NULL, &idev_id))
return -1;
- if (_test_cmd_get_hw_info(self->fd, idev_id, &info,
- sizeof(info), NULL, NULL))
+ if (_test_cmd_get_hw_info(self->fd, idev_id, IOMMU_HW_INFO_TYPE_DEFAULT,
+ &info, sizeof(info), NULL, NULL))
return -1;
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
@@ -688,13 +689,19 @@ TEST_FAIL_NTH(basic_fail_nth, device)
IOMMU_HWPT_DATA_NONE, 0, 0))
return -1;
- if (_test_cmd_viommu_alloc(self->fd, idev_id, hwpt_id,
- IOMMU_VIOMMU_TYPE_SELFTEST, 0, &viommu_id))
+ if (_test_cmd_viommu_alloc(self->fd, idev_id, hwpt_id, 0,
+ IOMMU_VIOMMU_TYPE_SELFTEST, NULL, 0,
+ &viommu_id))
return -1;
if (_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, 0, &vdev_id))
return -1;
+ if (_test_cmd_hw_queue_alloc(self->fd, viommu_id,
+ IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, iova,
+ PAGE_SIZE, &hw_queue_id))
+ return -1;
+
if (_test_ioctl_fault_alloc(self->fd, &fault_id, &fault_fd))
return -1;
close(fault_fd);
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index 72f6636e5d90..5502751d500c 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -11,7 +11,7 @@
#include <assert.h>
#include <poll.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../../../../drivers/iommu/iommufd/iommufd_test.h"
/* Hack to make assertions more readable */
@@ -56,17 +56,26 @@ static unsigned long PAGE_SIZE;
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+#define test_err_mmap(_errno, length, offset) \
+ EXPECT_ERRNO(_errno, (long)mmap(NULL, length, PROT_READ | PROT_WRITE, \
+ MAP_SHARED, self->fd, offset))
+
static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
{
int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;
int mfd = memfd_create("buffer", mfd_flags);
+ void *buf = MAP_FAILED;
if (mfd <= 0)
return MAP_FAILED;
if (ftruncate(mfd, length))
- return MAP_FAILED;
+ goto out;
*mfd_p = mfd;
- return mmap(0, length, prot, flags, mfd, 0);
+ buf = mmap(0, length, prot, flags, mfd, 0);
+out:
+ if (buf == MAP_FAILED)
+ close(mfd);
+ return buf;
}
/*
@@ -206,6 +215,18 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_i
ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
0))
+#define test_cmd_hwpt_alloc_iommupt(device_id, pt_id, flags, iommupt_type, \
+ hwpt_id) \
+ ({ \
+ struct iommu_hwpt_selftest user_cfg = { \
+ .pagetable_type = iommupt_type \
+ }; \
+ \
+ ASSERT_EQ(0, _test_cmd_hwpt_alloc( \
+ self->fd, device_id, pt_id, 0, flags, \
+ hwpt_id, IOMMU_HWPT_DATA_SELFTEST, \
+ &user_cfg, sizeof(user_cfg))); \
+ })
#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
self->fd, device_id, pt_id, 0, flags, \
@@ -539,6 +560,39 @@ static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
self->fd, access_id, access_pages_id))
+static int _test_cmd_get_dmabuf(int fd, size_t len, int *out_fd)
+{
+ struct iommu_test_cmd cmd = {
+ .size = sizeof(cmd),
+ .op = IOMMU_TEST_OP_DMABUF_GET,
+ .dmabuf_get = { .length = len, .open_flags = O_CLOEXEC },
+ };
+
+ *out_fd = ioctl(fd, IOMMU_TEST_CMD, &cmd);
+ if (*out_fd < 0)
+ return -1;
+ return 0;
+}
+#define test_cmd_get_dmabuf(len, out_fd) \
+ ASSERT_EQ(0, _test_cmd_get_dmabuf(self->fd, len, out_fd))
+
+static int _test_cmd_revoke_dmabuf(int fd, int dmabuf_fd, bool revoked)
+{
+ struct iommu_test_cmd cmd = {
+ .size = sizeof(cmd),
+ .op = IOMMU_TEST_OP_DMABUF_REVOKE,
+ .dmabuf_revoke = { .dmabuf_fd = dmabuf_fd, .revoked = revoked },
+ };
+ int ret;
+
+ ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+#define test_cmd_revoke_dmabuf(dmabuf_fd, revoke) \
+ ASSERT_EQ(0, _test_cmd_revoke_dmabuf(self->fd, dmabuf_fd, revoke))
+
static int _test_ioctl_destroy(int fd, unsigned int id)
{
struct iommu_destroy cmd = {
@@ -709,6 +763,17 @@ static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
self->fd, ioas_id, mfd, start, length, iova_p, \
IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
+#define test_ioctl_ioas_map_fixed_file(mfd, start, length, iova) \
+ ({ \
+ __u64 __iova = iova; \
+ ASSERT_EQ(0, _test_ioctl_ioas_map_file( \
+ self->fd, self->ioas_id, mfd, start, \
+ length, &__iova, \
+ IOMMU_IOAS_MAP_FIXED_IOVA | \
+ IOMMU_IOAS_MAP_WRITEABLE | \
+ IOMMU_IOAS_MAP_READABLE)); \
+ })
+
static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
{
struct iommu_test_cmd memlimit_cmd = {
@@ -757,20 +822,24 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
#endif
/* @data can be NULL */
-static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
- size_t data_len, uint32_t *capabilities,
- uint8_t *max_pasid)
+static int _test_cmd_get_hw_info(int fd, __u32 device_id, __u32 data_type,
+ void *data, size_t data_len,
+ uint32_t *capabilities, uint8_t *max_pasid)
{
struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
struct iommu_hw_info cmd = {
.size = sizeof(cmd),
.dev_id = device_id,
.data_len = data_len,
+ .in_data_type = data_type,
.data_uptr = (uint64_t)data,
.out_capabilities = 0,
};
int ret;
+ if (data_type != IOMMU_HW_INFO_TYPE_DEFAULT)
+ cmd.flags |= IOMMU_HW_INFO_FLAG_INPUT_TYPE;
+
ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
if (ret)
return ret;
@@ -813,20 +882,23 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
return 0;
}
-#define test_cmd_get_hw_info(device_id, data, data_len) \
- ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
- data_len, NULL, NULL))
+#define test_cmd_get_hw_info(device_id, data_type, data, data_len) \
+ ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data_type, \
+ data, data_len, NULL, NULL))
-#define test_err_get_hw_info(_errno, device_id, data, data_len) \
- EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
- data_len, NULL, NULL))
+#define test_err_get_hw_info(_errno, device_id, data_type, data, data_len) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_get_hw_info(self->fd, device_id, data_type, \
+ data, data_len, NULL, NULL))
-#define test_cmd_get_hw_capabilities(device_id, caps, mask) \
- ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, \
+#define test_cmd_get_hw_capabilities(device_id, caps) \
+ ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \
+ IOMMU_HW_INFO_TYPE_DEFAULT, NULL, \
0, &caps, NULL))
-#define test_cmd_get_hw_info_pasid(device_id, max_pasid) \
- ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, \
+#define test_cmd_get_hw_info_pasid(device_id, max_pasid) \
+ ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \
+ IOMMU_HW_INFO_TYPE_DEFAULT, NULL, \
0, NULL, max_pasid))
static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
@@ -897,7 +969,8 @@ static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 pasid,
pasid, fault_fd))
static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
- __u32 type, __u32 flags, __u32 *viommu_id)
+ __u32 flags, __u32 type, void *data,
+ __u32 data_len, __u32 *viommu_id)
{
struct iommu_viommu_alloc cmd = {
.size = sizeof(cmd),
@@ -905,6 +978,8 @@ static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
.type = type,
.dev_id = device_id,
.hwpt_id = hwpt_id,
+ .data_uptr = (uint64_t)data,
+ .data_len = data_len,
};
int ret;
@@ -916,13 +991,15 @@ static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
return 0;
}
-#define test_cmd_viommu_alloc(device_id, hwpt_id, type, viommu_id) \
- ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
- type, 0, viommu_id))
-#define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, viommu_id) \
- EXPECT_ERRNO(_errno, \
- _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
- type, 0, viommu_id))
+#define test_cmd_viommu_alloc(device_id, hwpt_id, type, data, data_len, \
+ viommu_id) \
+ ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, 0, \
+ type, data, data_len, viommu_id))
+#define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, data, \
+ data_len, viommu_id) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, 0, \
+ type, data, data_len, viommu_id))
static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
__u64 virt_id, __u32 *vdev_id)
@@ -951,6 +1028,37 @@ static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
virt_id, vdev_id))
+static int _test_cmd_hw_queue_alloc(int fd, __u32 viommu_id, __u32 type,
+ __u32 idx, __u64 base_addr, __u64 length,
+ __u32 *hw_queue_id)
+{
+ struct iommu_hw_queue_alloc cmd = {
+ .size = sizeof(cmd),
+ .viommu_id = viommu_id,
+ .type = type,
+ .index = idx,
+ .nesting_parent_iova = base_addr,
+ .length = length,
+ };
+ int ret;
+
+ ret = ioctl(fd, IOMMU_HW_QUEUE_ALLOC, &cmd);
+ if (ret)
+ return ret;
+ if (hw_queue_id)
+ *hw_queue_id = cmd.out_hw_queue_id;
+ return 0;
+}
+
+#define test_cmd_hw_queue_alloc(viommu_id, type, idx, base_addr, len, out_qid) \
+ ASSERT_EQ(0, _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
+ base_addr, len, out_qid))
+#define test_err_hw_queue_alloc(_errno, viommu_id, type, idx, base_addr, len, \
+ out_qid) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
+ base_addr, len, out_qid))
+
static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
__u32 *veventq_id, __u32 *veventq_fd)
{
@@ -990,15 +1098,13 @@ static int _test_cmd_trigger_vevents(int fd, __u32 dev_id, __u32 nvevents)
.dev_id = dev_id,
},
};
- int ret;
while (nvevents--) {
- ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
- &trigger_vevent_cmd);
- if (ret < 0)
+ if (ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
+ &trigger_vevent_cmd))
return -1;
}
- return ret;
+ return 0;
}
#define test_cmd_trigger_vevents(dev_id, nvevents) \
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index e9dbb84c100a..e107379d185c 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -7,7 +7,7 @@
#include <sys/msg.h>
#include <fcntl.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define MAX_MSG_SIZE 32
@@ -39,26 +39,26 @@ int restore_queue(struct msgque_data *msgque)
fd = open("/proc/sys/kernel/msg_next_id", O_WRONLY);
if (fd == -1) {
- printf("Failed to open /proc/sys/kernel/msg_next_id\n");
+ ksft_test_result_fail("Failed to open /proc/sys/kernel/msg_next_id\n");
return -errno;
}
sprintf(buf, "%d", msgque->msq_id);
ret = write(fd, buf, strlen(buf));
if (ret != strlen(buf)) {
- printf("Failed to write to /proc/sys/kernel/msg_next_id\n");
+ ksft_test_result_fail("Failed to write to /proc/sys/kernel/msg_next_id\n");
return -errno;
}
id = msgget(msgque->key, msgque->mode | IPC_CREAT | IPC_EXCL);
if (id == -1) {
- printf("Failed to create queue\n");
+ ksft_test_result_fail("Failed to create queue\n");
return -errno;
}
if (id != msgque->msq_id) {
- printf("Restored queue has wrong id (%d instead of %d)\n",
- id, msgque->msq_id);
+ ksft_test_result_fail("Restored queue has wrong id (%d instead of %d)\n"
+ , id, msgque->msq_id);
ret = -EFAULT;
goto destroy;
}
@@ -66,7 +66,7 @@ int restore_queue(struct msgque_data *msgque)
for (i = 0; i < msgque->qnum; i++) {
if (msgsnd(msgque->msq_id, &msgque->messages[i].mtype,
msgque->messages[i].msize, IPC_NOWAIT) != 0) {
- printf("msgsnd failed (%m)\n");
+ ksft_test_result_fail("msgsnd failed (%m)\n");
ret = -errno;
goto destroy;
}
@@ -90,23 +90,22 @@ int check_and_destroy_queue(struct msgque_data *msgque)
if (ret < 0) {
if (errno == ENOMSG)
break;
- printf("Failed to read IPC message: %m\n");
+ ksft_test_result_fail("Failed to read IPC message: %m\n");
ret = -errno;
goto err;
}
if (ret != msgque->messages[cnt].msize) {
- printf("Wrong message size: %d (expected %d)\n", ret,
- msgque->messages[cnt].msize);
+ ksft_test_result_fail("Wrong message size: %d (expected %d)\n", ret, msgque->messages[cnt].msize);
ret = -EINVAL;
goto err;
}
if (message.mtype != msgque->messages[cnt].mtype) {
- printf("Wrong message type\n");
+ ksft_test_result_fail("Wrong message type\n");
ret = -EINVAL;
goto err;
}
if (memcmp(message.mtext, msgque->messages[cnt].mtext, ret)) {
- printf("Wrong message content\n");
+ ksft_test_result_fail("Wrong message content\n");
ret = -EINVAL;
goto err;
}
@@ -114,7 +113,7 @@ int check_and_destroy_queue(struct msgque_data *msgque)
}
if (cnt != msgque->qnum) {
- printf("Wrong message number\n");
+ ksft_test_result_fail("Wrong message number\n");
ret = -EINVAL;
goto err;
}
@@ -139,7 +138,7 @@ int dump_queue(struct msgque_data *msgque)
if (ret < 0) {
if (errno == EINVAL)
continue;
- printf("Failed to get stats for IPC queue with id %d\n",
+ ksft_test_result_fail("Failed to get stats for IPC queue with id %d\n",
kern_id);
return -errno;
}
@@ -150,7 +149,7 @@ int dump_queue(struct msgque_data *msgque)
msgque->messages = malloc(sizeof(struct msg1) * ds.msg_qnum);
if (msgque->messages == NULL) {
- printf("Failed to get stats for IPC queue\n");
+ ksft_test_result_fail("Failed to get stats for IPC queue\n");
return -ENOMEM;
}
@@ -162,7 +161,7 @@ int dump_queue(struct msgque_data *msgque)
ret = msgrcv(msgque->msq_id, &msgque->messages[i].mtype,
MAX_MSG_SIZE, i, IPC_NOWAIT | MSG_COPY);
if (ret < 0) {
- printf("Failed to copy IPC message: %m (%d)\n", errno);
+ ksft_test_result_fail("Failed to copy IPC message: %m (%d)\n", errno);
return -errno;
}
msgque->messages[i].msize = ret;
@@ -178,7 +177,7 @@ int fill_msgque(struct msgque_data *msgque)
memcpy(msgbuf.mtext, TEST_STRING, sizeof(TEST_STRING));
if (msgsnd(msgque->msq_id, &msgbuf.mtype, sizeof(TEST_STRING),
IPC_NOWAIT) != 0) {
- printf("First message send failed (%m)\n");
+ ksft_test_result_fail("First message send failed (%m)\n");
return -errno;
}
@@ -186,7 +185,7 @@ int fill_msgque(struct msgque_data *msgque)
memcpy(msgbuf.mtext, ANOTHER_TEST_STRING, sizeof(ANOTHER_TEST_STRING));
if (msgsnd(msgque->msq_id, &msgbuf.mtype, sizeof(ANOTHER_TEST_STRING),
IPC_NOWAIT) != 0) {
- printf("Second message send failed (%m)\n");
+ ksft_test_result_fail("Second message send failed (%m)\n");
return -errno;
}
return 0;
@@ -202,44 +201,44 @@ int main(int argc, char **argv)
msgque.key = ftok(argv[0], 822155650);
if (msgque.key == -1) {
- printf("Can't make key: %d\n", -errno);
+ ksft_test_result_fail("Can't make key: %d\n", -errno);
ksft_exit_fail();
}
msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
if (msgque.msq_id == -1) {
err = -errno;
- printf("Can't create queue: %d\n", err);
+ ksft_test_result_fail("Can't create queue: %d\n", err);
goto err_out;
}
err = fill_msgque(&msgque);
if (err) {
- printf("Failed to fill queue: %d\n", err);
+ ksft_test_result_fail("Failed to fill queue: %d\n", err);
goto err_destroy;
}
err = dump_queue(&msgque);
if (err) {
- printf("Failed to dump queue: %d\n", err);
+ ksft_test_result_fail("Failed to dump queue: %d\n", err);
goto err_destroy;
}
err = check_and_destroy_queue(&msgque);
if (err) {
- printf("Failed to check and destroy queue: %d\n", err);
+ ksft_test_result_fail("Failed to check and destroy queue: %d\n", err);
goto err_out;
}
err = restore_queue(&msgque);
if (err) {
- printf("Failed to restore queue: %d\n", err);
+ ksft_test_result_fail("Failed to restore queue: %d\n", err);
goto err_destroy;
}
err = check_and_destroy_queue(&msgque);
if (err) {
- printf("Failed to test queue: %d\n", err);
+ ksft_test_result_fail("Failed to test queue: %d\n", err);
goto err_out;
}
ksft_exit_pass();
diff --git a/tools/testing/selftests/ir/ir_loopback.c b/tools/testing/selftests/ir/ir_loopback.c
index f4a15cbdd5ea..adfcf50b1264 100644
--- a/tools/testing/selftests/ir/ir_loopback.c
+++ b/tools/testing/selftests/ir/ir_loopback.c
@@ -23,7 +23,7 @@
#include <dirent.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define TEST_SCANCODES 10
#define SYSFS_PATH_MAX 256
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
index d7a8e321bb16..79aa438b7479 100644
--- a/tools/testing/selftests/kcmp/kcmp_test.c
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -18,7 +18,7 @@
#include <sys/wait.h>
#include <sys/epoll.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static long sys_kcmp(int pid1, int pid2, int type, unsigned long fd1, unsigned long fd2)
{
diff --git a/tools/testing/selftests/kexec/.gitignore b/tools/testing/selftests/kexec/.gitignore
new file mode 100644
index 000000000000..5f3d9e089ae8
--- /dev/null
+++ b/tools/testing/selftests/kexec/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+test_kexec_jump
diff --git a/tools/testing/selftests/kexec/Makefile b/tools/testing/selftests/kexec/Makefile
index e3000ccb9a5d..874cfdd3b75b 100644
--- a/tools/testing/selftests/kexec/Makefile
+++ b/tools/testing/selftests/kexec/Makefile
@@ -12,7 +12,7 @@ include ../../../scripts/Makefile.arch
ifeq ($(IS_64_BIT)$(ARCH_PROCESSED),1x86)
TEST_PROGS += test_kexec_jump.sh
-test_kexec_jump.sh: $(OUTPUT)/test_kexec_jump
+TEST_GEN_PROGS := test_kexec_jump
endif
include ../lib.mk
diff --git a/tools/testing/selftests/kho/arm64.conf b/tools/testing/selftests/kho/arm64.conf
new file mode 100644
index 000000000000..ee696807cd35
--- /dev/null
+++ b/tools/testing/selftests/kho/arm64.conf
@@ -0,0 +1,9 @@
+QEMU_CMD="qemu-system-aarch64 -M virt -cpu max"
+QEMU_KCONFIG="
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+"
+KERNEL_IMAGE="Image"
+KERNEL_CMDLINE="console=ttyAMA0"
diff --git a/tools/testing/selftests/kho/init.c b/tools/testing/selftests/kho/init.c
new file mode 100644
index 000000000000..6d9e91d55d68
--- /dev/null
+++ b/tools/testing/selftests/kho/init.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/syscall.h>
+#include <sys/mount.h>
+#include <sys/reboot.h>
+#include <linux/kexec.h>
+
+/* from arch/x86/include/asm/setup.h */
+#define COMMAND_LINE_SIZE 2048
+
+#define KHO_FINALIZE "/debugfs/kho/out/finalize"
+#define KERNEL_IMAGE "/kernel"
+
+static int mount_filesystems(void)
+{
+ if (mount("debugfs", "/debugfs", "debugfs", 0, NULL) < 0)
+ return -1;
+
+ return mount("proc", "/proc", "proc", 0, NULL);
+}
+
+static int kho_enable(void)
+{
+ const char enable[] = "1";
+ int fd;
+
+ fd = open(KHO_FINALIZE, O_RDWR);
+ if (fd < 0)
+ return -1;
+
+ if (write(fd, enable, sizeof(enable)) != sizeof(enable))
+ return 1;
+
+ close(fd);
+ return 0;
+}
+
+static long kexec_file_load(int kernel_fd, int initrd_fd,
+ unsigned long cmdline_len, const char *cmdline,
+ unsigned long flags)
+{
+ return syscall(__NR_kexec_file_load, kernel_fd, initrd_fd, cmdline_len,
+ cmdline, flags);
+}
+
+static int kexec_load(void)
+{
+ char cmdline[COMMAND_LINE_SIZE];
+ ssize_t len;
+ int fd, err;
+
+ fd = open("/proc/cmdline", O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ len = read(fd, cmdline, sizeof(cmdline));
+ close(fd);
+ if (len < 0)
+ return -1;
+
+ /* replace \n with \0 */
+ cmdline[len - 1] = 0;
+ fd = open(KERNEL_IMAGE, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ err = kexec_file_load(fd, -1, len, cmdline, KEXEC_FILE_NO_INITRAMFS);
+ close(fd);
+
+ return err ? : 0;
+}
+
+int main(int argc, char *argv[])
+{
+ if (mount_filesystems())
+ goto err_reboot;
+
+ if (kho_enable())
+ goto err_reboot;
+
+ if (kexec_load())
+ goto err_reboot;
+
+ if (reboot(RB_KEXEC))
+ goto err_reboot;
+
+ return 0;
+
+err_reboot:
+ reboot(RB_AUTOBOOT);
+ return -1;
+}
diff --git a/tools/testing/selftests/kho/vmtest.sh b/tools/testing/selftests/kho/vmtest.sh
new file mode 100755
index 000000000000..49fdac8e8b15
--- /dev/null
+++ b/tools/testing/selftests/kho/vmtest.sh
@@ -0,0 +1,186 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+set -ue
+
+CROSS_COMPILE="${CROSS_COMPILE:-""}"
+
+test_dir=$(realpath "$(dirname "$0")")
+kernel_dir=$(realpath "$test_dir/../../../..")
+
+tmp_dir=$(mktemp -d /tmp/kho-test.XXXXXXXX)
+headers_dir="$tmp_dir/usr"
+initrd="$tmp_dir/initrd.cpio"
+
+source "$test_dir/../kselftest/ktap_helpers.sh"
+
+function usage() {
+ cat <<EOF
+$0 [-d build_dir] [-j jobs] [-t target_arch] [-h]
+Options:
+ -d) path to the kernel build directory
+ -j) number of jobs for compilation, similar to -j in make
+ -t) run test for target_arch, requires CROSS_COMPILE set
+ supported targets: aarch64, x86_64
+ -h) display this help
+EOF
+}
+
+function cleanup() {
+ rm -fr "$tmp_dir"
+ ktap_finished
+}
+trap cleanup EXIT
+
+function skip() {
+ local msg=${1:-""}
+
+ ktap_test_skip "$msg"
+ exit "$KSFT_SKIP"
+}
+
+function fail() {
+ local msg=${1:-""}
+
+ ktap_test_fail "$msg"
+ exit "$KSFT_FAIL"
+}
+
+function build_kernel() {
+ local build_dir=$1
+ local make_cmd=$2
+ local arch_kconfig=$3
+ local kimage=$4
+
+ local kho_config="$tmp_dir/kho.config"
+ local kconfig="$build_dir/.config"
+
+ # enable initrd, KHO and KHO test in kernel configuration
+ tee "$kconfig" > "$kho_config" <<EOF
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KEXEC_HANDOVER=y
+CONFIG_KEXEC_HANDOVER_DEBUGFS=y
+CONFIG_TEST_KEXEC_HANDOVER=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_VM=y
+$arch_kconfig
+EOF
+
+ make_cmd="$make_cmd -C $kernel_dir O=$build_dir"
+ $make_cmd olddefconfig
+
+ # verify that kernel confiration has all necessary options
+ while read -r opt ; do
+ grep "$opt" "$kconfig" &>/dev/null || skip "$opt is missing"
+ done < "$kho_config"
+
+ $make_cmd "$kimage"
+ $make_cmd headers_install INSTALL_HDR_PATH="$headers_dir"
+}
+
+function mkinitrd() {
+ local kernel=$1
+
+ "$CROSS_COMPILE"gcc -s -static -Os -nostdinc -nostdlib \
+ -fno-asynchronous-unwind-tables -fno-ident \
+ -I "$headers_dir/include" \
+ -I "$kernel_dir/tools/include/nolibc" \
+ -o "$tmp_dir/init" "$test_dir/init.c"
+
+ cat > "$tmp_dir/cpio_list" <<EOF
+dir /dev 0755 0 0
+dir /proc 0755 0 0
+dir /debugfs 0755 0 0
+nod /dev/console 0600 0 0 c 5 1
+file /init $tmp_dir/init 0755 0 0
+file /kernel $kernel 0644 0 0
+EOF
+
+ "$build_dir/usr/gen_init_cpio" "$tmp_dir/cpio_list" > "$initrd"
+}
+
+function run_qemu() {
+ local qemu_cmd=$1
+ local cmdline=$2
+ local kernel=$3
+ local serial="$tmp_dir/qemu.serial"
+
+ cmdline="$cmdline kho=on panic=-1"
+
+ $qemu_cmd -m 1G -smp 2 -no-reboot -nographic -nodefaults \
+ -accel kvm -accel hvf -accel tcg \
+ -serial file:"$serial" \
+ -append "$cmdline" \
+ -kernel "$kernel" \
+ -initrd "$initrd"
+
+ grep "KHO restore succeeded" "$serial" &> /dev/null || fail "KHO failed"
+}
+
+function target_to_arch() {
+ local target=$1
+
+ case $target in
+ aarch64) echo "arm64" ;;
+ x86_64) echo "x86" ;;
+ *) skip "architecture $target is not supported"
+ esac
+}
+
+function main() {
+ local build_dir="$kernel_dir/.kho"
+ local jobs=$(($(nproc) * 2))
+ local target="$(uname -m)"
+
+ # skip the test if any of the preparation steps fails
+ set -o errtrace
+ trap skip ERR
+
+ while getopts 'hd:j:t:' opt; do
+ case $opt in
+ d)
+ build_dir="$OPTARG"
+ ;;
+ j)
+ jobs="$OPTARG"
+ ;;
+ t)
+ target="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo Unknown argument "$opt"
+ usage
+ exit 1
+ ;;
+ esac
+ done
+
+ ktap_print_header
+ ktap_set_plan 1
+
+ if [[ "$target" != "$(uname -m)" ]] && [[ -z "$CROSS_COMPILE" ]]; then
+ skip "Cross-platform testing needs to specify CROSS_COMPILE"
+ fi
+
+ mkdir -p "$build_dir"
+ local arch=$(target_to_arch "$target")
+ source "$test_dir/$arch.conf"
+
+ # build the kernel and create initrd
+ # initrd includes the kernel image that will be kexec'ed
+ local make_cmd="make ARCH=$arch CROSS_COMPILE=$CROSS_COMPILE -j$jobs"
+ build_kernel "$build_dir" "$make_cmd" "$QEMU_KCONFIG" "$KERNEL_IMAGE"
+
+ local kernel="$build_dir/arch/$arch/boot/$KERNEL_IMAGE"
+ mkinitrd "$kernel"
+
+ run_qemu "$QEMU_CMD" "$KERNEL_CMDLINE" "$kernel"
+
+ ktap_test_pass "KHO succeeded"
+}
+
+main "$@"
diff --git a/tools/testing/selftests/kho/x86.conf b/tools/testing/selftests/kho/x86.conf
new file mode 100644
index 000000000000..b419e610ca22
--- /dev/null
+++ b/tools/testing/selftests/kho/x86.conf
@@ -0,0 +1,7 @@
+QEMU_CMD=qemu-system-x86_64
+QEMU_KCONFIG="
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+"
+KERNEL_IMAGE="bzImage"
+KERNEL_CMDLINE="console=ttyS0"
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index c3b6d2604b1e..afbcf8412ae5 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -54,6 +54,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <stdarg.h>
+#include <stdbool.h>
#include <string.h>
#include <stdio.h>
#include <sys/utsname.h>
@@ -92,6 +93,14 @@
#endif
#define __printf(a, b) __attribute__((format(printf, a, b)))
+#ifndef __always_unused
+#define __always_unused __attribute__((__unused__))
+#endif
+
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+#endif
+
/* counters */
struct ksft_count {
unsigned int ksft_pass;
@@ -104,6 +113,7 @@ struct ksft_count {
static struct ksft_count ksft_cnt;
static unsigned int ksft_plan;
+static bool ksft_debug_enabled;
static inline unsigned int ksft_test_num(void)
{
@@ -175,6 +185,18 @@ static inline __printf(1, 2) void ksft_print_msg(const char *msg, ...)
va_end(args);
}
+static inline void ksft_print_dbg_msg(const char *msg, ...)
+{
+ va_list args;
+
+ if (!ksft_debug_enabled)
+ return;
+
+ va_start(args, msg);
+ ksft_print_msg(msg, args);
+ va_end(args);
+}
+
static inline void ksft_perror(const char *msg)
{
ksft_print_msg("%s: %s (%d)\n", msg, strerror(errno), errno);
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
index 2c3c58e65a41..3a62039fa621 100644
--- a/tools/testing/selftests/kselftest/runner.sh
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -44,6 +44,12 @@ tap_timeout()
fi
}
+report_failure()
+{
+ echo "not ok $*"
+ echo "$*" >> "$kselftest_failures_file"
+}
+
run_one()
{
DIR="$1"
@@ -105,7 +111,7 @@ run_one()
echo "# $TEST_HDR_MSG"
if [ ! -e "$TEST" ]; then
echo "# Warning: file $TEST is missing!"
- echo "not ok $test_num $TEST_HDR_MSG"
+ report_failure "$test_num $TEST_HDR_MSG"
else
if [ -x /usr/bin/stdbuf ]; then
stdbuf="/usr/bin/stdbuf --output=L "
@@ -123,7 +129,7 @@ run_one()
interpreter=$(head -n 1 "$TEST" | cut -c 3-)
cmd="$stdbuf $interpreter ./$BASENAME_TEST"
else
- echo "not ok $test_num $TEST_HDR_MSG"
+ report_failure "$test_num $TEST_HDR_MSG"
return
fi
fi
@@ -137,9 +143,9 @@ run_one()
echo "ok $test_num $TEST_HDR_MSG # SKIP"
elif [ $rc -eq $timeout_rc ]; then \
echo "#"
- echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT $kselftest_timeout seconds"
+ report_failure "$test_num $TEST_HDR_MSG # TIMEOUT $kselftest_timeout seconds"
else
- echo "not ok $test_num $TEST_HDR_MSG # exit=$rc"
+ report_failure "$test_num $TEST_HDR_MSG # exit=$rc"
fi)
cd - >/dev/null
fi
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 2925e47db995..baae6b7ded41 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -14,7 +14,7 @@
*
* .. code-block:: c
*
- * #include "../kselftest_harness.h"
+ * #include "kselftest_harness.h"
*
* TEST(standalone_test) {
* do_some_stuff;
@@ -751,7 +751,7 @@
for (; _metadata->trigger; _metadata->trigger = \
__bail(_assert, _metadata))
-#define is_signed_type(var) (!!(((__typeof__(var))(-1)) < (__typeof__(var))1))
+#define is_signed_var(var) (!!(((__typeof__(var))(-1)) < (__typeof__(var))1))
#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
/* Avoid multiple evaluation of the cases */ \
@@ -759,7 +759,7 @@
__typeof__(_seen) __seen = (_seen); \
if (!(__exp _t __seen)) { \
/* Report with actual signedness to avoid weird output. */ \
- switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \
+ switch (is_signed_var(__exp) * 2 + is_signed_var(__seen)) { \
case 0: { \
uintmax_t __exp_print = (uintmax_t)__exp; \
uintmax_t __seen_print = (uintmax_t)__seen; \
@@ -1091,7 +1091,7 @@ static int test_harness_argv_check(int argc, char **argv)
{
int opt;
- while ((opt = getopt(argc, argv, "hlF:f:V:v:t:T:r:")) != -1) {
+ while ((opt = getopt(argc, argv, "dhlF:f:V:v:t:T:r:")) != -1) {
switch (opt) {
case 'f':
case 'F':
@@ -1104,12 +1104,16 @@ static int test_harness_argv_check(int argc, char **argv)
case 'l':
test_harness_list_tests();
return KSFT_SKIP;
+ case 'd':
+ ksft_debug_enabled = true;
+ break;
case 'h':
default:
fprintf(stderr,
- "Usage: %s [-h|-l] [-t|-T|-v|-V|-f|-F|-r name]\n"
+ "Usage: %s [-h|-l|-d] [-t|-T|-v|-V|-f|-F|-r name]\n"
"\t-h print help\n"
"\t-l list all tests\n"
+ "\t-d enable debug prints\n"
"\n"
"\t-t name include test\n"
"\t-T name exclude test\n"
@@ -1142,8 +1146,9 @@ static bool test_enabled(int argc, char **argv,
int opt;
optind = 1;
- while ((opt = getopt(argc, argv, "F:f:V:v:t:T:r:")) != -1) {
- has_positive |= islower(opt);
+ while ((opt = getopt(argc, argv, "dF:f:V:v:t:T:r:")) != -1) {
+ if (opt != 'd')
+ has_positive |= islower(opt);
switch (tolower(opt)) {
case 't':
diff --git a/tools/testing/selftests/kselftest_harness/Makefile b/tools/testing/selftests/kselftest_harness/Makefile
index 0617535a6ce4..d2369c01701a 100644
--- a/tools/testing/selftests/kselftest_harness/Makefile
+++ b/tools/testing/selftests/kselftest_harness/Makefile
@@ -2,6 +2,7 @@
TEST_GEN_PROGS_EXTENDED := harness-selftest
TEST_PROGS := harness-selftest.sh
+TEST_FILES := harness-selftest.expected
EXTRA_CLEAN := harness-selftest.seen
include ../lib.mk
diff --git a/tools/testing/selftests/kselftest_harness/harness-selftest.c b/tools/testing/selftests/kselftest_harness/harness-selftest.c
index b555493bdb4d..7820bb5d0e6d 100644
--- a/tools/testing/selftests/kselftest_harness/harness-selftest.c
+++ b/tools/testing/selftests/kselftest_harness/harness-selftest.c
@@ -8,7 +8,7 @@
/* Avoid any inconsistencies */
#define TH_LOG_STREAM stdout
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
static void test_helper(struct __test_metadata *_metadata)
{
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index d9fffe06d3ea..f2b223072b62 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -6,7 +6,7 @@ ARCH ?= $(SUBARCH)
ifeq ($(ARCH),$(filter $(ARCH),arm64 s390 riscv x86 x86_64 loongarch))
# Top-level selftests allows ARCH=x86_64 :-(
ifeq ($(ARCH),x86_64)
- ARCH := x86
+ override ARCH := x86
endif
include Makefile.kvm
else
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index 38b95998e1e6..ba5c2b643efa 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -59,6 +59,7 @@ TEST_PROGS_x86 += x86/nx_huge_pages_test.sh
TEST_GEN_PROGS_COMMON = demand_paging_test
TEST_GEN_PROGS_COMMON += dirty_log_test
TEST_GEN_PROGS_COMMON += guest_print_test
+TEST_GEN_PROGS_COMMON += irqfd_test
TEST_GEN_PROGS_COMMON += kvm_binary_stats_test
TEST_GEN_PROGS_COMMON += kvm_create_max_vcpus
TEST_GEN_PROGS_COMMON += kvm_page_table_test
@@ -86,8 +87,13 @@ TEST_GEN_PROGS_x86 += x86/kvm_clock_test
TEST_GEN_PROGS_x86 += x86/kvm_pv_test
TEST_GEN_PROGS_x86 += x86/kvm_buslock_test
TEST_GEN_PROGS_x86 += x86/monitor_mwait_test
+TEST_GEN_PROGS_x86 += x86/msrs_test
+TEST_GEN_PROGS_x86 += x86/nested_close_kvm_test
TEST_GEN_PROGS_x86 += x86/nested_emulation_test
TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
+TEST_GEN_PROGS_x86 += x86/nested_invalid_cr3_test
+TEST_GEN_PROGS_x86 += x86/nested_tsc_adjust_test
+TEST_GEN_PROGS_x86 += x86/nested_tsc_scaling_test
TEST_GEN_PROGS_x86 += x86/platform_info_test
TEST_GEN_PROGS_x86 += x86/pmu_counters_test
TEST_GEN_PROGS_x86 += x86/pmu_event_filter_test
@@ -109,14 +115,12 @@ TEST_GEN_PROGS_x86 += x86/ucna_injection_test
TEST_GEN_PROGS_x86 += x86/userspace_io_test
TEST_GEN_PROGS_x86 += x86/userspace_msr_exit_test
TEST_GEN_PROGS_x86 += x86/vmx_apic_access_test
-TEST_GEN_PROGS_x86 += x86/vmx_close_while_nested_test
TEST_GEN_PROGS_x86 += x86/vmx_dirty_log_test
TEST_GEN_PROGS_x86 += x86/vmx_exception_with_invalid_guest_state
TEST_GEN_PROGS_x86 += x86/vmx_msrs_test
TEST_GEN_PROGS_x86 += x86/vmx_invalid_nested_guest_state
+TEST_GEN_PROGS_x86 += x86/vmx_nested_la57_state_test
TEST_GEN_PROGS_x86 += x86/vmx_set_nested_state_test
-TEST_GEN_PROGS_x86 += x86/vmx_tsc_adjust_test
-TEST_GEN_PROGS_x86 += x86/vmx_nested_tsc_scaling_test
TEST_GEN_PROGS_x86 += x86/apic_bus_clock_test
TEST_GEN_PROGS_x86 += x86/xapic_ipi_test
TEST_GEN_PROGS_x86 += x86/xapic_state_test
@@ -134,6 +138,7 @@ TEST_GEN_PROGS_x86 += x86/amx_test
TEST_GEN_PROGS_x86 += x86/max_vcpuid_cap_test
TEST_GEN_PROGS_x86 += x86/triple_fault_event_test
TEST_GEN_PROGS_x86 += x86/recalc_apic_map_test
+TEST_GEN_PROGS_x86 += x86/aperfmperf_test
TEST_GEN_PROGS_x86 += access_tracking_perf_test
TEST_GEN_PROGS_x86 += coalesced_io_test
TEST_GEN_PROGS_x86 += dirty_log_perf_test
@@ -153,12 +158,15 @@ TEST_GEN_PROGS_EXTENDED_x86 += x86/nx_huge_pages_test
TEST_GEN_PROGS_arm64 = $(TEST_GEN_PROGS_COMMON)
TEST_GEN_PROGS_arm64 += arm64/aarch32_id_regs
TEST_GEN_PROGS_arm64 += arm64/arch_timer_edge_cases
+TEST_GEN_PROGS_arm64 += arm64/at
TEST_GEN_PROGS_arm64 += arm64/debug-exceptions
+TEST_GEN_PROGS_arm64 += arm64/hello_el2
TEST_GEN_PROGS_arm64 += arm64/host_sve
TEST_GEN_PROGS_arm64 += arm64/hypercalls
-TEST_GEN_PROGS_arm64 += arm64/mmio_abort
+TEST_GEN_PROGS_arm64 += arm64/external_aborts
TEST_GEN_PROGS_arm64 += arm64/page_fault_test
TEST_GEN_PROGS_arm64 += arm64/psci_test
+TEST_GEN_PROGS_arm64 += arm64/sea_to_user
TEST_GEN_PROGS_arm64 += arm64/set_id_regs
TEST_GEN_PROGS_arm64 += arm64/smccc_filter
TEST_GEN_PROGS_arm64 += arm64/vcpu_width_config
@@ -167,11 +175,13 @@ TEST_GEN_PROGS_arm64 += arm64/vgic_irq
TEST_GEN_PROGS_arm64 += arm64/vgic_lpi_stress
TEST_GEN_PROGS_arm64 += arm64/vpmu_counter_access
TEST_GEN_PROGS_arm64 += arm64/no-vgic-v3
+TEST_GEN_PROGS_arm64 += arm64/kvm-uuid
TEST_GEN_PROGS_arm64 += access_tracking_perf_test
TEST_GEN_PROGS_arm64 += arch_timer
TEST_GEN_PROGS_arm64 += coalesced_io_test
TEST_GEN_PROGS_arm64 += dirty_log_perf_test
TEST_GEN_PROGS_arm64 += get-reg-list
+TEST_GEN_PROGS_arm64 += guest_memfd_test
TEST_GEN_PROGS_arm64 += memslot_modification_stress_test
TEST_GEN_PROGS_arm64 += memslot_perf_test
TEST_GEN_PROGS_arm64 += mmu_stress_test
@@ -188,16 +198,24 @@ TEST_GEN_PROGS_s390 += s390/debug_test
TEST_GEN_PROGS_s390 += s390/cpumodel_subfuncs_test
TEST_GEN_PROGS_s390 += s390/shared_zeropage_test
TEST_GEN_PROGS_s390 += s390/ucontrol_test
+TEST_GEN_PROGS_s390 += s390/user_operexec
TEST_GEN_PROGS_s390 += rseq_test
TEST_GEN_PROGS_riscv = $(TEST_GEN_PROGS_COMMON)
TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test
TEST_GEN_PROGS_riscv += riscv/ebreak_test
+TEST_GEN_PROGS_riscv += access_tracking_perf_test
TEST_GEN_PROGS_riscv += arch_timer
TEST_GEN_PROGS_riscv += coalesced_io_test
+TEST_GEN_PROGS_riscv += dirty_log_perf_test
TEST_GEN_PROGS_riscv += get-reg-list
+TEST_GEN_PROGS_riscv += memslot_modification_stress_test
+TEST_GEN_PROGS_riscv += memslot_perf_test
+TEST_GEN_PROGS_riscv += mmu_stress_test
+TEST_GEN_PROGS_riscv += rseq_test
TEST_GEN_PROGS_riscv += steal_time
+TEST_GEN_PROGS_loongarch = arch_timer
TEST_GEN_PROGS_loongarch += coalesced_io_test
TEST_GEN_PROGS_loongarch += demand_paging_test
TEST_GEN_PROGS_loongarch += dirty_log_perf_test
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index da7196fd1b23..b058f27b2141 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -50,6 +50,7 @@
#include "memstress.h"
#include "guest_modes.h"
#include "processor.h"
+#include "ucall_common.h"
#include "cgroup_util.h"
#include "lru_gen_util.h"
@@ -596,11 +597,8 @@ int main(int argc, char *argv[])
if (ret)
return ret;
} else {
- page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
- __TEST_REQUIRE(page_idle_fd >= 0,
- "Couldn't open /sys/kernel/mm/page_idle/bitmap. "
- "Is CONFIG_IDLE_PAGE_TRACKING enabled?");
-
+ page_idle_fd = __open_path_or_exit("/sys/kernel/mm/page_idle/bitmap", O_RDWR,
+ "Is CONFIG_IDLE_PAGE_TRACKING enabled?");
close(page_idle_fd);
puts("Using page_idle for aging");
diff --git a/tools/testing/selftests/kvm/arch_timer.c b/tools/testing/selftests/kvm/arch_timer.c
index acb2cb596332..cf8fb67104f1 100644
--- a/tools/testing/selftests/kvm/arch_timer.c
+++ b/tools/testing/selftests/kvm/arch_timer.c
@@ -98,16 +98,11 @@ static uint32_t test_get_pcpu(void)
static int test_migrate_vcpu(unsigned int vcpu_idx)
{
int ret;
- cpu_set_t cpuset;
uint32_t new_pcpu = test_get_pcpu();
- CPU_ZERO(&cpuset);
- CPU_SET(new_pcpu, &cpuset);
-
pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
- ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
- sizeof(cpuset), &cpuset);
+ ret = __pin_task_to_cpu(pt_vcpu_run[vcpu_idx], new_pcpu);
/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
diff --git a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
index cef8f7323ceb..713005b6f508 100644
--- a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
@@ -146,7 +146,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
- el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
+ el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
return el0 == ID_AA64PFR0_EL1_EL0_IMP;
}
diff --git a/tools/testing/selftests/kvm/arm64/arch_timer.c b/tools/testing/selftests/kvm/arm64/arch_timer.c
index eeba1cc87ff8..d592a4515399 100644
--- a/tools/testing/selftests/kvm/arm64/arch_timer.c
+++ b/tools/testing/selftests/kvm/arm64/arch_timer.c
@@ -165,10 +165,8 @@ static void guest_code(void)
static void test_init_timer_irq(struct kvm_vm *vm)
{
/* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
- vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
- vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
+ ptimer_irq = vcpu_get_ptimer_irq(vcpus[0]);
+ vtimer_irq = vcpu_get_vtimer_irq(vcpus[0]);
sync_global_to_guest(vm, ptimer_irq);
sync_global_to_guest(vm, vtimer_irq);
@@ -176,14 +174,14 @@ static void test_init_timer_irq(struct kvm_vm *vm)
pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
}
-static int gic_fd;
-
struct kvm_vm *test_vm_create(void)
{
struct kvm_vm *vm;
unsigned int i;
int nr_vcpus = test_args.nr_vcpus;
+ TEST_REQUIRE(kvm_supports_vgic_v3());
+
vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
vm_init_descriptor_tables(vm);
@@ -204,8 +202,6 @@ struct kvm_vm *test_vm_create(void)
vcpu_init_descriptor_tables(vcpus[i]);
test_init_timer_irq(vm);
- gic_fd = vgic_v3_setup(vm, nr_vcpus, 64);
- __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
@@ -215,6 +211,5 @@ struct kvm_vm *test_vm_create(void)
void test_vm_cleanup(struct kvm_vm *vm)
{
- close(gic_fd);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
index a36a7e2db434..993c9e38e729 100644
--- a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
+++ b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
@@ -22,7 +22,8 @@
#include "gic.h"
#include "vgic.h"
-static const uint64_t CVAL_MAX = ~0ULL;
+/* Depends on counter width. */
+static uint64_t CVAL_MAX;
/* tval is a signed 32-bit int. */
static const int32_t TVAL_MAX = INT32_MAX;
static const int32_t TVAL_MIN = INT32_MIN;
@@ -30,8 +31,8 @@ static const int32_t TVAL_MIN = INT32_MIN;
/* After how much time we say there is no IRQ. */
static const uint32_t TIMEOUT_NO_IRQ_US = 50000;
-/* A nice counter value to use as the starting one for most tests. */
-static const uint64_t DEF_CNT = (CVAL_MAX / 2);
+/* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */
+static uint64_t DEF_CNT;
/* Number of runs. */
static const uint32_t NR_TEST_ITERS_DEF = 5;
@@ -191,8 +192,8 @@ static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles,
{
atomic_set(&shared_data.handled, 0);
atomic_set(&shared_data.spurious, 0);
- timer_set_ctl(timer, ctl);
timer_set_tval(timer, tval_cycles);
+ timer_set_ctl(timer, ctl);
}
static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl,
@@ -732,12 +733,6 @@ static void test_move_counters_ahead_of_timers(enum arch_timer timer)
test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1,
wm);
}
-
- for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
- sleep_method_t sm = sleep_method[i];
-
- test_set_cnt_after_cval_no_irq(timer, 0, DEF_CNT, CVAL_MAX, sm);
- }
}
/*
@@ -849,17 +844,17 @@ static void guest_code(enum arch_timer timer)
GUEST_DONE();
}
+static cpu_set_t default_cpuset;
+
static uint32_t next_pcpu(void)
{
uint32_t max = get_nprocs();
uint32_t cur = sched_getcpu();
uint32_t next = cur;
- cpu_set_t cpuset;
+ cpu_set_t cpuset = default_cpuset;
TEST_ASSERT(max > 1, "Need at least two physical cpus");
- sched_getaffinity(0, sizeof(cpuset), &cpuset);
-
do {
next = (next + 1) % CPU_SETSIZE;
} while (!CPU_ISSET(next, &cpuset));
@@ -867,25 +862,6 @@ static uint32_t next_pcpu(void)
return next;
}
-static void migrate_self(uint32_t new_pcpu)
-{
- int ret;
- cpu_set_t cpuset;
- pthread_t thread;
-
- thread = pthread_self();
-
- CPU_ZERO(&cpuset);
- CPU_SET(new_pcpu, &cpuset);
-
- pr_debug("Migrating from %u to %u\n", sched_getcpu(), new_pcpu);
-
- ret = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
-
- TEST_ASSERT(ret == 0, "Failed to migrate to pCPU: %u; ret: %d\n",
- new_pcpu, ret);
-}
-
static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt,
enum arch_timer timer)
{
@@ -912,7 +888,7 @@ static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
sched_yield();
break;
case USERSPACE_MIGRATE_SELF:
- migrate_self(next_pcpu());
+ pin_self_to_cpu(next_pcpu());
break;
default:
break;
@@ -924,7 +900,7 @@ static void test_run(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
struct ucall uc;
/* Start on CPU 0 */
- migrate_self(0);
+ pin_self_to_cpu(0);
while (true) {
vcpu_run(vcpu);
@@ -948,10 +924,8 @@ static void test_run(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
static void test_init_timer_irq(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
- vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
- vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
+ ptimer_irq = vcpu_get_ptimer_irq(vcpu);
+ vtimer_irq = vcpu_get_vtimer_irq(vcpu);
sync_global_to_guest(vm, ptimer_irq);
sync_global_to_guest(vm, vtimer_irq);
@@ -973,8 +947,15 @@ static void test_vm_create(struct kvm_vm **vm, struct kvm_vcpu **vcpu,
vcpu_args_set(*vcpu, 1, timer);
test_init_timer_irq(*vm, *vcpu);
- vgic_v3_setup(*vm, 1, 64);
+
sync_global_to_guest(*vm, test_args);
+ sync_global_to_guest(*vm, CVAL_MAX);
+ sync_global_to_guest(*vm, DEF_CNT);
+}
+
+static void test_vm_cleanup(struct kvm_vm *vm)
+{
+ kvm_vm_free(vm);
}
static void test_print_help(char *name)
@@ -986,7 +967,7 @@ static void test_print_help(char *name)
pr_info("\t-b: Test both physical and virtual timers (default: true)\n");
pr_info("\t-l: Delta (in ms) used for long wait time test (default: %u)\n",
LONG_WAIT_TEST_MS);
- pr_info("\t-l: Delta (in ms) used for wait times (default: %u)\n",
+ pr_info("\t-w: Delta (in ms) used for wait times (default: %u)\n",
WAIT_TEST_MS);
pr_info("\t-p: Test physical timer (default: true)\n");
pr_info("\t-v: Test virtual timer (default: true)\n");
@@ -1035,6 +1016,17 @@ static bool parse_args(int argc, char *argv[])
return false;
}
+static void set_counter_defaults(void)
+{
+ const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
+ uint64_t freq = read_sysreg(CNTFRQ_EL0);
+ int width = ilog2(MIN_ROLLOVER_SECS * freq);
+
+ width = clamp(width, 56, 64);
+ CVAL_MAX = GENMASK_ULL(width - 1, 0);
+ DEF_CNT = CVAL_MAX / 2;
+}
+
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
@@ -1043,19 +1035,24 @@ int main(int argc, char *argv[])
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
+ TEST_REQUIRE(kvm_supports_vgic_v3());
+
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
+ sched_getaffinity(0, sizeof(default_cpuset), &default_cpuset);
+ set_counter_defaults();
+
if (test_args.test_virtual) {
test_vm_create(&vm, &vcpu, VIRTUAL);
test_run(vm, vcpu);
- kvm_vm_free(vm);
+ test_vm_cleanup(vm);
}
if (test_args.test_physical) {
test_vm_create(&vm, &vcpu, PHYSICAL);
test_run(vm, vcpu);
- kvm_vm_free(vm);
+ test_vm_cleanup(vm);
}
return 0;
diff --git a/tools/testing/selftests/kvm/arm64/at.c b/tools/testing/selftests/kvm/arm64/at.c
new file mode 100644
index 000000000000..c8ee6f520734
--- /dev/null
+++ b/tools/testing/selftests/kvm/arm64/at.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * at - Test for KVM's AT emulation in the EL2&0 and EL1&0 translation regimes.
+ */
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+#include "ucall.h"
+
+#include <asm/sysreg.h>
+
+#define TEST_ADDR 0x80000000
+
+enum {
+ CLEAR_ACCESS_FLAG,
+ TEST_ACCESS_FLAG,
+};
+
+static u64 *ptep_hva;
+
+#define copy_el2_to_el1(reg) \
+ write_sysreg_s(read_sysreg_s(SYS_##reg##_EL1), SYS_##reg##_EL12)
+
+/* Yes, this is an ugly hack */
+#define __at(op, addr) write_sysreg_s(addr, op)
+
+#define test_at_insn(op, expect_fault) \
+do { \
+ u64 par, fsc; \
+ bool fault; \
+ \
+ GUEST_SYNC(CLEAR_ACCESS_FLAG); \
+ \
+ __at(OP_AT_##op, TEST_ADDR); \
+ isb(); \
+ par = read_sysreg(par_el1); \
+ \
+ fault = par & SYS_PAR_EL1_F; \
+ fsc = FIELD_GET(SYS_PAR_EL1_FST, par); \
+ \
+ __GUEST_ASSERT((expect_fault) == fault, \
+ "AT "#op": %sexpected fault (par: %lx)1", \
+ (expect_fault) ? "" : "un", par); \
+ if ((expect_fault)) { \
+ __GUEST_ASSERT(fsc == ESR_ELx_FSC_ACCESS_L(3), \
+ "AT "#op": expected access flag fault (par: %lx)", \
+ par); \
+ } else { \
+ GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_ATTR, par), MAIR_ATTR_NORMAL); \
+ GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_SH, par), PTE_SHARED >> 8); \
+ GUEST_ASSERT_EQ(par & SYS_PAR_EL1_PA, TEST_ADDR); \
+ GUEST_SYNC(TEST_ACCESS_FLAG); \
+ } \
+} while (0)
+
+static void test_at(bool expect_fault)
+{
+ test_at_insn(S1E2R, expect_fault);
+ test_at_insn(S1E2W, expect_fault);
+
+ /* Reuse the stage-1 MMU context from EL2 at EL1 */
+ copy_el2_to_el1(SCTLR);
+ copy_el2_to_el1(MAIR);
+ copy_el2_to_el1(TCR);
+ copy_el2_to_el1(TTBR0);
+ copy_el2_to_el1(TTBR1);
+
+ /* Disable stage-2 translation and enter a non-host context */
+ write_sysreg(0, vtcr_el2);
+ write_sysreg(0, vttbr_el2);
+ sysreg_clear_set(hcr_el2, HCR_EL2_TGE | HCR_EL2_VM, 0);
+ isb();
+
+ test_at_insn(S1E1R, expect_fault);
+ test_at_insn(S1E1W, expect_fault);
+}
+
+static void guest_code(void)
+{
+ sysreg_clear_set(tcr_el1, TCR_HA, 0);
+ isb();
+
+ test_at(true);
+
+ if (!SYS_FIELD_GET(ID_AA64MMFR1_EL1, HAFDBS, read_sysreg(id_aa64mmfr1_el1)))
+ GUEST_DONE();
+
+ /*
+ * KVM's software PTW makes the implementation choice that the AT
+ * instruction sets the access flag.
+ */
+ sysreg_clear_set(tcr_el1, 0, TCR_HA);
+ isb();
+ test_at(false);
+
+ GUEST_DONE();
+}
+
+static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
+{
+ switch (uc->args[1]) {
+ case CLEAR_ACCESS_FLAG:
+ /*
+ * Delete + reinstall the memslot to invalidate stage-2
+ * mappings of the stage-1 page tables, forcing KVM to
+ * use the 'slow' AT emulation path.
+ *
+ * This and clearing the access flag from host userspace
+ * ensures that the access flag cannot be set speculatively
+ * and is reliably cleared at the time of the AT instruction.
+ */
+ clear_bit(__ffs(PTE_AF), ptep_hva);
+ vm_mem_region_reload(vcpu->vm, vcpu->vm->memslots[MEM_REGION_PT]);
+ break;
+ case TEST_ACCESS_FLAG:
+ TEST_ASSERT(test_bit(__ffs(PTE_AF), ptep_hva),
+ "Expected access flag to be set (desc: %lu)", *ptep_hva);
+ break;
+ default:
+ TEST_FAIL("Unexpected SYNC arg: %lu", uc->args[1]);
+ }
+}
+
+static void run_test(struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+
+ while (true) {
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ return;
+ case UCALL_SYNC:
+ handle_sync(vcpu, &uc);
+ continue;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ return;
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+ }
+}
+
+int main(void)
+{
+ struct kvm_vcpu_init init;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_EL2));
+
+ vm = vm_create(1);
+
+ kvm_get_default_vcpu_target(vm, &init);
+ init.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2);
+ vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code);
+ kvm_arch_vm_finalize_vcpus(vm);
+
+ virt_map(vm, TEST_ADDR, TEST_ADDR, 1);
+ ptep_hva = virt_get_pte_hva_at_level(vm, TEST_ADDR, 3);
+ run_test(vcpu);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/arm64/debug-exceptions.c b/tools/testing/selftests/kvm/arm64/debug-exceptions.c
index c7fb55c9135b..1d431de8729c 100644
--- a/tools/testing/selftests/kvm/arm64/debug-exceptions.c
+++ b/tools/testing/selftests/kvm/arm64/debug-exceptions.c
@@ -116,12 +116,12 @@ static void reset_debug_state(void)
/* Reset all bcr/bvr/wcr/wvr registers */
dfr0 = read_sysreg(id_aa64dfr0_el1);
- brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0);
+ brps = FIELD_GET(ID_AA64DFR0_EL1_BRPs, dfr0);
for (i = 0; i <= brps; i++) {
write_dbgbcr(i, 0);
write_dbgbvr(i, 0);
}
- wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0);
+ wrps = FIELD_GET(ID_AA64DFR0_EL1_WRPs, dfr0);
for (i = 0; i <= wrps; i++) {
write_dbgwcr(i, 0);
write_dbgwvr(i, 0);
@@ -140,7 +140,7 @@ static void enable_os_lock(void)
static void enable_monitor_debug_exceptions(void)
{
- uint32_t mdscr;
+ uint64_t mdscr;
asm volatile("msr daifclr, #8");
@@ -223,7 +223,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
static void install_ss(void)
{
- uint32_t mdscr;
+ uint64_t mdscr;
asm volatile("msr daifclr, #8");
@@ -418,7 +418,7 @@ static void guest_code_ss(int test_cnt)
static int debug_version(uint64_t id_aa64dfr0)
{
- return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0);
+ return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0);
}
static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
@@ -539,14 +539,14 @@ void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
int b, w, c;
/* Number of breakpoints */
- brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1;
+ brp_num = FIELD_GET(ID_AA64DFR0_EL1_BRPs, aa64dfr0) + 1;
__TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
/* Number of watchpoints */
- wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1;
+ wrp_num = FIELD_GET(ID_AA64DFR0_EL1_WRPs, aa64dfr0) + 1;
/* Number of context aware breakpoints */
- ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1;
+ ctx_brp_num = FIELD_GET(ID_AA64DFR0_EL1_CTX_CMPs, aa64dfr0) + 1;
pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
brp_num, wrp_num, ctx_brp_num);
diff --git a/tools/testing/selftests/kvm/arm64/external_aborts.c b/tools/testing/selftests/kvm/arm64/external_aborts.c
new file mode 100644
index 000000000000..d8fe17a6cc59
--- /dev/null
+++ b/tools/testing/selftests/kvm/arm64/external_aborts.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * external_abort - Tests for userspace external abort injection
+ *
+ * Copyright (c) 2024 Google LLC
+ */
+#include "processor.h"
+#include "test_util.h"
+
+#define MMIO_ADDR 0x8000000ULL
+#define EXPECTED_SERROR_ISS (ESR_ELx_ISV | 0x1d1ed)
+
+static u64 expected_abort_pc;
+
+static void expect_sea_handler(struct ex_regs *regs)
+{
+ u64 esr = read_sysreg(esr_el1);
+
+ GUEST_ASSERT_EQ(regs->pc, expected_abort_pc);
+ GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
+ GUEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
+
+ GUEST_DONE();
+}
+
+static void unexpected_dabt_handler(struct ex_regs *regs)
+{
+ GUEST_FAIL("Unexpected data abort at PC: %lx\n", regs->pc);
+}
+
+static struct kvm_vm *vm_create_with_dabt_handler(struct kvm_vcpu **vcpu, void *guest_code,
+ handler_fn dabt_handler)
+{
+ struct kvm_vm *vm = vm_create_with_one_vcpu(vcpu, guest_code);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(*vcpu);
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ESR_ELx_EC_DABT_CUR, dabt_handler);
+
+ virt_map(vm, MMIO_ADDR, MMIO_ADDR, 1);
+
+ return vm;
+}
+
+static void vcpu_inject_sea(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_events events = {};
+
+ events.exception.ext_dabt_pending = true;
+ vcpu_events_set(vcpu, &events);
+}
+
+static bool vcpu_has_ras(struct kvm_vcpu *vcpu)
+{
+ u64 pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
+
+ return SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0);
+}
+
+static bool guest_has_ras(void)
+{
+ return SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, read_sysreg(id_aa64pfr0_el1));
+}
+
+static void vcpu_inject_serror(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_events events = {};
+
+ events.exception.serror_pending = true;
+ if (vcpu_has_ras(vcpu)) {
+ events.exception.serror_has_esr = true;
+ events.exception.serror_esr = EXPECTED_SERROR_ISS;
+ }
+
+ vcpu_events_set(vcpu, &events);
+}
+
+static void __vcpu_run_expect(struct kvm_vcpu *vcpu, unsigned int cmd)
+{
+ struct ucall uc;
+
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ default:
+ if (uc.cmd == cmd)
+ return;
+
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+}
+
+static void vcpu_run_expect_done(struct kvm_vcpu *vcpu)
+{
+ __vcpu_run_expect(vcpu, UCALL_DONE);
+}
+
+static void vcpu_run_expect_sync(struct kvm_vcpu *vcpu)
+{
+ __vcpu_run_expect(vcpu, UCALL_SYNC);
+}
+
+extern char test_mmio_abort_insn;
+
+static noinline void test_mmio_abort_guest(void)
+{
+ WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_abort_insn);
+
+ asm volatile("test_mmio_abort_insn:\n\t"
+ "ldr x0, [%0]\n\t"
+ : : "r" (MMIO_ADDR) : "x0", "memory");
+
+ GUEST_FAIL("MMIO instruction should not retire");
+}
+
+/*
+ * Test that KVM doesn't complete MMIO emulation when userspace has made an
+ * external abort pending for the instruction.
+ */
+static void test_mmio_abort(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_abort_guest,
+ expect_sea_handler);
+ struct kvm_run *run = vcpu->run;
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO);
+ TEST_ASSERT_EQ(run->mmio.phys_addr, MMIO_ADDR);
+ TEST_ASSERT_EQ(run->mmio.len, sizeof(unsigned long));
+ TEST_ASSERT(!run->mmio.is_write, "Expected MMIO read");
+
+ vcpu_inject_sea(vcpu);
+ vcpu_run_expect_done(vcpu);
+ kvm_vm_free(vm);
+}
+
+extern char test_mmio_nisv_insn;
+
+static void test_mmio_nisv_guest(void)
+{
+ WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_nisv_insn);
+
+ asm volatile("test_mmio_nisv_insn:\n\t"
+ "ldr x0, [%0], #8\n\t"
+ : : "r" (MMIO_ADDR) : "x0", "memory");
+
+ GUEST_FAIL("MMIO instruction should not retire");
+}
+
+/*
+ * Test that the KVM_RUN ioctl fails for ESR_EL2.ISV=0 MMIO aborts if userspace
+ * hasn't enabled KVM_CAP_ARM_NISV_TO_USER.
+ */
+static void test_mmio_nisv(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
+ unexpected_dabt_handler);
+
+ TEST_ASSERT(_vcpu_run(vcpu), "Expected nonzero return code from KVM_RUN");
+ TEST_ASSERT_EQ(errno, ENOSYS);
+
+ kvm_vm_free(vm);
+}
+
+/*
+ * Test that ESR_EL2.ISV=0 MMIO aborts reach userspace and that an injected SEA
+ * reaches the guest.
+ */
+static void test_mmio_nisv_abort(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
+ expect_sea_handler);
+ struct kvm_run *run = vcpu->run;
+
+ vm_enable_cap(vm, KVM_CAP_ARM_NISV_TO_USER, 1);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_NISV);
+ TEST_ASSERT_EQ(run->arm_nisv.fault_ipa, MMIO_ADDR);
+
+ vcpu_inject_sea(vcpu);
+ vcpu_run_expect_done(vcpu);
+ kvm_vm_free(vm);
+}
+
+static void unexpected_serror_handler(struct ex_regs *regs)
+{
+ GUEST_FAIL("Took unexpected SError exception");
+}
+
+static void test_serror_masked_guest(void)
+{
+ GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
+
+ isb();
+
+ GUEST_DONE();
+}
+
+static void test_serror_masked(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_masked_guest,
+ unexpected_dabt_handler);
+
+ vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, unexpected_serror_handler);
+
+ vcpu_inject_serror(vcpu);
+ vcpu_run_expect_done(vcpu);
+ kvm_vm_free(vm);
+}
+
+static void expect_serror_handler(struct ex_regs *regs)
+{
+ u64 esr = read_sysreg(esr_el1);
+
+ GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_SERROR);
+ if (guest_has_ras())
+ GUEST_ASSERT_EQ(ESR_ELx_ISS(esr), EXPECTED_SERROR_ISS);
+
+ GUEST_DONE();
+}
+
+static void test_serror_guest(void)
+{
+ GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
+
+ local_serror_enable();
+ isb();
+ local_serror_disable();
+
+ GUEST_FAIL("Should've taken pending SError exception");
+}
+
+static void test_serror(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_guest,
+ unexpected_dabt_handler);
+
+ vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
+
+ vcpu_inject_serror(vcpu);
+ vcpu_run_expect_done(vcpu);
+ kvm_vm_free(vm);
+}
+
+static void expect_sea_s1ptw_handler(struct ex_regs *regs)
+{
+ u64 esr = read_sysreg(esr_el1);
+
+ GUEST_ASSERT_EQ(regs->pc, expected_abort_pc);
+ GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
+ GUEST_ASSERT_EQ((esr & ESR_ELx_FSC), ESR_ELx_FSC_SEA_TTW(3));
+
+ GUEST_DONE();
+}
+
+static noinline void test_s1ptw_abort_guest(void)
+{
+ extern char test_s1ptw_abort_insn;
+
+ WRITE_ONCE(expected_abort_pc, (u64)&test_s1ptw_abort_insn);
+
+ asm volatile("test_s1ptw_abort_insn:\n\t"
+ "ldr x0, [%0]\n\t"
+ : : "r" (MMIO_ADDR) : "x0", "memory");
+
+ GUEST_FAIL("Load on S1PTW abort should not retire");
+}
+
+static void test_s1ptw_abort(void)
+{
+ struct kvm_vcpu *vcpu;
+ u64 *ptep, bad_pa;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_s1ptw_abort_guest,
+ expect_sea_s1ptw_handler);
+
+ ptep = virt_get_pte_hva_at_level(vm, MMIO_ADDR, 2);
+ bad_pa = BIT(vm->pa_bits) - vm->page_size;
+
+ *ptep &= ~GENMASK(47, 12);
+ *ptep |= bad_pa;
+
+ vcpu_run_expect_done(vcpu);
+ kvm_vm_free(vm);
+}
+
+static void test_serror_emulated_guest(void)
+{
+ GUEST_ASSERT(!(read_sysreg(isr_el1) & ISR_EL1_A));
+
+ local_serror_enable();
+ GUEST_SYNC(0);
+ local_serror_disable();
+
+ GUEST_FAIL("Should've taken unmasked SError exception");
+}
+
+static void test_serror_emulated(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_emulated_guest,
+ unexpected_dabt_handler);
+
+ vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
+
+ vcpu_run_expect_sync(vcpu);
+ vcpu_inject_serror(vcpu);
+ vcpu_run_expect_done(vcpu);
+ kvm_vm_free(vm);
+}
+
+static void test_mmio_ease_guest(void)
+{
+ sysreg_clear_set_s(SYS_SCTLR2_EL1, 0, SCTLR2_EL1_EASE);
+ isb();
+
+ test_mmio_abort_guest();
+}
+
+/*
+ * Test that KVM doesn't complete MMIO emulation when userspace has made an
+ * external abort pending for the instruction.
+ */
+static void test_mmio_ease(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_ease_guest,
+ unexpected_dabt_handler);
+ struct kvm_run *run = vcpu->run;
+ u64 pfr1;
+
+ pfr1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
+ if (!SYS_FIELD_GET(ID_AA64PFR1_EL1, DF2, pfr1)) {
+ pr_debug("Skipping %s\n", __func__);
+ return;
+ }
+
+ /*
+ * SCTLR2_ELx.EASE changes the exception vector to the SError vector but
+ * doesn't further modify the exception context (e.g. ESR_ELx, FAR_ELx).
+ */
+ vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_sea_handler);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO);
+ TEST_ASSERT_EQ(run->mmio.phys_addr, MMIO_ADDR);
+ TEST_ASSERT_EQ(run->mmio.len, sizeof(unsigned long));
+ TEST_ASSERT(!run->mmio.is_write, "Expected MMIO read");
+
+ vcpu_inject_sea(vcpu);
+ vcpu_run_expect_done(vcpu);
+ kvm_vm_free(vm);
+}
+
+static void test_serror_amo_guest(void)
+{
+ /*
+ * The ISB is entirely unnecessary (and highlights how FEAT_NV2 is borked)
+ * since the write is redirected to memory. But don't write (intentionally)
+ * broken code!
+ */
+ sysreg_clear_set(hcr_el2, HCR_EL2_AMO | HCR_EL2_TGE, 0);
+ isb();
+
+ GUEST_SYNC(0);
+ GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
+
+ /*
+ * KVM treats the effective value of AMO as 1 when
+ * HCR_EL2.{E2H,TGE} = {1, 0}, meaning the SError will be taken when
+ * unmasked.
+ */
+ local_serror_enable();
+ isb();
+ local_serror_disable();
+
+ GUEST_FAIL("Should've taken pending SError exception");
+}
+
+static void test_serror_amo(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_amo_guest,
+ unexpected_dabt_handler);
+
+ vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
+ vcpu_run_expect_sync(vcpu);
+ vcpu_inject_serror(vcpu);
+ vcpu_run_expect_done(vcpu);
+ kvm_vm_free(vm);
+}
+
+int main(void)
+{
+ test_mmio_abort();
+ test_mmio_nisv();
+ test_mmio_nisv_abort();
+ test_serror();
+ test_serror_masked();
+ test_serror_emulated();
+ test_mmio_ease();
+ test_s1ptw_abort();
+
+ if (!test_supports_el2())
+ return 0;
+
+ test_serror_amo();
+}
diff --git a/tools/testing/selftests/kvm/arm64/get-reg-list.c b/tools/testing/selftests/kvm/arm64/get-reg-list.c
index d01798b6b3b4..0a3a94c4cca1 100644
--- a/tools/testing/selftests/kvm/arm64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/arm64/get-reg-list.c
@@ -15,6 +15,12 @@
#include "test_util.h"
#include "processor.h"
+#define SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \
+ sys_reg_Op1(SYS_ ## r), \
+ sys_reg_CRn(SYS_ ## r), \
+ sys_reg_CRm(SYS_ ## r), \
+ sys_reg_Op2(SYS_ ## r))
+
struct feature_id_reg {
__u64 reg;
__u64 id_reg;
@@ -22,37 +28,48 @@ struct feature_id_reg {
__u64 feat_min;
};
-static struct feature_id_reg feat_id_regs[] = {
- {
- ARM64_SYS_REG(3, 0, 2, 0, 3), /* TCR2_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- 0,
- 1
- },
- {
- ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- 8,
- 1
- },
- {
- ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- 8,
- 1
- },
- {
- ARM64_SYS_REG(3, 0, 10, 2, 4), /* POR_EL1 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- 16,
- 1
- },
- {
- ARM64_SYS_REG(3, 3, 10, 2, 4), /* POR_EL0 */
- ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
- 16,
- 1
+#define FEAT(id, f, v) \
+ .id_reg = SYS_REG(id), \
+ .feat_shift = id ## _ ## f ## _SHIFT, \
+ .feat_min = id ## _ ## f ## _ ## v
+
+#define REG_FEAT(r, id, f, v) \
+ { \
+ .reg = SYS_REG(r), \
+ FEAT(id, f, v) \
}
+
+static struct feature_id_reg feat_id_regs[] = {
+ REG_FEAT(TCR2_EL1, ID_AA64MMFR3_EL1, TCRX, IMP),
+ REG_FEAT(TCR2_EL2, ID_AA64MMFR3_EL1, TCRX, IMP),
+ REG_FEAT(PIRE0_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP),
+ REG_FEAT(PIRE0_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP),
+ REG_FEAT(PIR_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP),
+ REG_FEAT(PIR_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP),
+ REG_FEAT(POR_EL1, ID_AA64MMFR3_EL1, S1POE, IMP),
+ REG_FEAT(POR_EL0, ID_AA64MMFR3_EL1, S1POE, IMP),
+ REG_FEAT(POR_EL2, ID_AA64MMFR3_EL1, S1POE, IMP),
+ REG_FEAT(HCRX_EL2, ID_AA64MMFR1_EL1, HCX, IMP),
+ REG_FEAT(HFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
+ REG_FEAT(HFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
+ REG_FEAT(HFGITR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
+ REG_FEAT(HDFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
+ REG_FEAT(HDFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
+ REG_FEAT(HAFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
+ REG_FEAT(HFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
+ REG_FEAT(HFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
+ REG_FEAT(HFGITR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
+ REG_FEAT(HDFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
+ REG_FEAT(HDFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
+ REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
+ REG_FEAT(SCTLR2_EL1, ID_AA64MMFR3_EL1, SCTLRX, IMP),
+ REG_FEAT(SCTLR2_EL2, ID_AA64MMFR3_EL1, SCTLRX, IMP),
+ REG_FEAT(VDISR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
+ REG_FEAT(VSESR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
+ REG_FEAT(VNCR_EL2, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY),
+ REG_FEAT(CNTHV_CTL_EL2, ID_AA64MMFR1_EL1, VH, IMP),
+ REG_FEAT(CNTHV_CVAL_EL2,ID_AA64MMFR1_EL1, VH, IMP),
+ REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
};
bool filter_reg(__u64 reg)
@@ -333,9 +350,20 @@ static __u64 base_regs[] = {
KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(3), /* KVM_REG_ARM_VENDOR_HYP_BMAP_2 */
- ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
- ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
- ARM64_SYS_REG(3, 3, 14, 0, 2),
+
+ /*
+ * EL0 Virtual Timer Registers
+ *
+ * WARNING:
+ * KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined
+ * with the appropriate register encodings. Their values have been
+ * accidentally swapped. As this is set API, the definitions here
+ * must be used, rather than ones derived from the encodings.
+ */
+ KVM_ARM64_SYS_REG(SYS_CNTV_CTL_EL0),
+ KVM_REG_ARM_TIMER_CVAL,
+ KVM_REG_ARM_TIMER_CNT,
+
ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
@@ -469,6 +497,7 @@ static __u64 base_regs[] = {
ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */
ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */
ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */
+ KVM_ARM64_SYS_REG(SYS_SCTLR2_EL1),
ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */
ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */
ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */
@@ -686,6 +715,67 @@ static __u64 pauth_generic_regs[] = {
ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
};
+static __u64 el2_regs[] = {
+ SYS_REG(VPIDR_EL2),
+ SYS_REG(VMPIDR_EL2),
+ SYS_REG(SCTLR_EL2),
+ SYS_REG(ACTLR_EL2),
+ SYS_REG(SCTLR2_EL2),
+ SYS_REG(HCR_EL2),
+ SYS_REG(MDCR_EL2),
+ SYS_REG(CPTR_EL2),
+ SYS_REG(HSTR_EL2),
+ SYS_REG(HFGRTR_EL2),
+ SYS_REG(HFGWTR_EL2),
+ SYS_REG(HFGITR_EL2),
+ SYS_REG(HACR_EL2),
+ SYS_REG(ZCR_EL2),
+ SYS_REG(HCRX_EL2),
+ SYS_REG(TTBR0_EL2),
+ SYS_REG(TTBR1_EL2),
+ SYS_REG(TCR_EL2),
+ SYS_REG(TCR2_EL2),
+ SYS_REG(VTTBR_EL2),
+ SYS_REG(VTCR_EL2),
+ SYS_REG(VNCR_EL2),
+ SYS_REG(HDFGRTR2_EL2),
+ SYS_REG(HDFGWTR2_EL2),
+ SYS_REG(HFGRTR2_EL2),
+ SYS_REG(HFGWTR2_EL2),
+ SYS_REG(HDFGRTR_EL2),
+ SYS_REG(HDFGWTR_EL2),
+ SYS_REG(HAFGRTR_EL2),
+ SYS_REG(HFGITR2_EL2),
+ SYS_REG(SPSR_EL2),
+ SYS_REG(ELR_EL2),
+ SYS_REG(AFSR0_EL2),
+ SYS_REG(AFSR1_EL2),
+ SYS_REG(ESR_EL2),
+ SYS_REG(FAR_EL2),
+ SYS_REG(HPFAR_EL2),
+ SYS_REG(MAIR_EL2),
+ SYS_REG(PIRE0_EL2),
+ SYS_REG(PIR_EL2),
+ SYS_REG(POR_EL2),
+ SYS_REG(AMAIR_EL2),
+ SYS_REG(VBAR_EL2),
+ SYS_REG(CONTEXTIDR_EL2),
+ SYS_REG(TPIDR_EL2),
+ SYS_REG(CNTVOFF_EL2),
+ SYS_REG(CNTHCTL_EL2),
+ SYS_REG(CNTHP_CTL_EL2),
+ SYS_REG(CNTHP_CVAL_EL2),
+ SYS_REG(CNTHV_CTL_EL2),
+ SYS_REG(CNTHV_CVAL_EL2),
+ SYS_REG(SP_EL2),
+ SYS_REG(VDISR_EL2),
+ SYS_REG(VSESR_EL2),
+};
+
+static __u64 el2_e2h0_regs[] = {
+ /* Empty */
+};
+
#define BASE_SUBLIST \
{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
#define VREGS_SUBLIST \
@@ -712,6 +802,23 @@ static __u64 pauth_generic_regs[] = {
.regs = pauth_generic_regs, \
.regs_n = ARRAY_SIZE(pauth_generic_regs), \
}
+#define EL2_SUBLIST \
+ { \
+ .name = "EL2", \
+ .capability = KVM_CAP_ARM_EL2, \
+ .feature = KVM_ARM_VCPU_HAS_EL2, \
+ .regs = el2_regs, \
+ .regs_n = ARRAY_SIZE(el2_regs), \
+ }
+#define EL2_E2H0_SUBLIST \
+ EL2_SUBLIST, \
+ { \
+ .name = "EL2 E2H0", \
+ .capability = KVM_CAP_ARM_EL2_E2H0, \
+ .feature = KVM_ARM_VCPU_HAS_EL2_E2H0, \
+ .regs = el2_e2h0_regs, \
+ .regs_n = ARRAY_SIZE(el2_e2h0_regs), \
+ }
static struct vcpu_reg_list vregs_config = {
.sublists = {
@@ -761,6 +868,124 @@ static struct vcpu_reg_list pauth_pmu_config = {
},
};
+static struct vcpu_reg_list el2_vregs_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_SUBLIST,
+ VREGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_vregs_pmu_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_SUBLIST,
+ VREGS_SUBLIST,
+ PMU_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_sve_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_SUBLIST,
+ SVE_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_sve_pmu_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_SUBLIST,
+ SVE_SUBLIST,
+ PMU_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_pauth_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_SUBLIST,
+ VREGS_SUBLIST,
+ PAUTH_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_pauth_pmu_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_SUBLIST,
+ VREGS_SUBLIST,
+ PAUTH_SUBLIST,
+ PMU_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_e2h0_vregs_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_E2H0_SUBLIST,
+ VREGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_e2h0_vregs_pmu_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_E2H0_SUBLIST,
+ VREGS_SUBLIST,
+ PMU_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_e2h0_sve_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_E2H0_SUBLIST,
+ SVE_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_e2h0_sve_pmu_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_E2H0_SUBLIST,
+ SVE_SUBLIST,
+ PMU_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_e2h0_pauth_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_E2H0_SUBLIST,
+ VREGS_SUBLIST,
+ PAUTH_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list el2_e2h0_pauth_pmu_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ EL2_E2H0_SUBLIST,
+ VREGS_SUBLIST,
+ PAUTH_SUBLIST,
+ PMU_SUBLIST,
+ {0},
+ },
+};
+
struct vcpu_reg_list *vcpu_configs[] = {
&vregs_config,
&vregs_pmu_config,
@@ -768,5 +993,19 @@ struct vcpu_reg_list *vcpu_configs[] = {
&sve_pmu_config,
&pauth_config,
&pauth_pmu_config,
+
+ &el2_vregs_config,
+ &el2_vregs_pmu_config,
+ &el2_sve_config,
+ &el2_sve_pmu_config,
+ &el2_pauth_config,
+ &el2_pauth_pmu_config,
+
+ &el2_e2h0_vregs_config,
+ &el2_e2h0_vregs_pmu_config,
+ &el2_e2h0_sve_config,
+ &el2_e2h0_sve_pmu_config,
+ &el2_e2h0_pauth_config,
+ &el2_e2h0_pauth_pmu_config,
};
int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
diff --git a/tools/testing/selftests/kvm/arm64/hello_el2.c b/tools/testing/selftests/kvm/arm64/hello_el2.c
new file mode 100644
index 000000000000..bbe6862c6ab1
--- /dev/null
+++ b/tools/testing/selftests/kvm/arm64/hello_el2.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * hello_el2 - Basic KVM selftest for VM running at EL2 with E2H=RES1
+ *
+ * Copyright 2025 Google LLC
+ */
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+#include "ucall.h"
+
+#include <asm/sysreg.h>
+
+static void guest_code(void)
+{
+ u64 mmfr0 = read_sysreg_s(SYS_ID_AA64MMFR0_EL1);
+ u64 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
+ u64 mmfr4 = read_sysreg_s(SYS_ID_AA64MMFR4_EL1);
+ u8 e2h0 = SYS_FIELD_GET(ID_AA64MMFR4_EL1, E2H0, mmfr4);
+
+ GUEST_ASSERT_EQ(get_current_el(), 2);
+ GUEST_ASSERT(read_sysreg(hcr_el2) & HCR_EL2_E2H);
+ GUEST_ASSERT_EQ(SYS_FIELD_GET(ID_AA64MMFR1_EL1, VH, mmfr1),
+ ID_AA64MMFR1_EL1_VH_IMP);
+
+ /*
+ * Traps of the complete ID register space are IMPDEF without FEAT_FGT,
+ * which is really annoying to deal with in KVM describing E2H as RES1.
+ *
+ * If the implementation doesn't honor the trap then expect the register
+ * to return all zeros.
+ */
+ if (e2h0 == ID_AA64MMFR4_EL1_E2H0_IMP)
+ GUEST_ASSERT_EQ(SYS_FIELD_GET(ID_AA64MMFR0_EL1, FGT, mmfr0),
+ ID_AA64MMFR0_EL1_FGT_NI);
+ else
+ GUEST_ASSERT_EQ(e2h0, ID_AA64MMFR4_EL1_E2H0_NI_NV1);
+
+ GUEST_DONE();
+}
+
+int main(void)
+{
+ struct kvm_vcpu_init init;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_EL2));
+
+ vm = vm_create(1);
+
+ kvm_get_default_vcpu_target(vm, &init);
+ init.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2);
+ vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code);
+ kvm_arch_vm_finalize_vcpus(vm);
+
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ default:
+ TEST_FAIL("Unhandled ucall: %ld\n", uc.cmd);
+ }
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/arm64/hypercalls.c b/tools/testing/selftests/kvm/arm64/hypercalls.c
index 44cfcf8a7f46..bf038a0371f4 100644
--- a/tools/testing/selftests/kvm/arm64/hypercalls.c
+++ b/tools/testing/selftests/kvm/arm64/hypercalls.c
@@ -108,7 +108,7 @@ static void guest_test_hvc(const struct test_hvc_info *hc_info)
for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
memset(&res, 0, sizeof(res));
- smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
+ do_smccc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
switch (stage) {
case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
diff --git a/tools/testing/selftests/kvm/arm64/kvm-uuid.c b/tools/testing/selftests/kvm/arm64/kvm-uuid.c
new file mode 100644
index 000000000000..b5be9133535a
--- /dev/null
+++ b/tools/testing/selftests/kvm/arm64/kvm-uuid.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Check that nobody has tampered with KVM's UID
+
+#include <errno.h>
+#include <linux/arm-smccc.h>
+#include <asm/kvm.h>
+#include <kvm_util.h>
+
+#include "processor.h"
+
+/*
+ * Do NOT redefine these constants, or try to replace them with some
+ * "common" version. They are hardcoded here to detect any potential
+ * breakage happening in the rest of the kernel.
+ *
+ * KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74
+ */
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
+
+static void guest_code(void)
+{
+ struct arm_smccc_res res = {};
+
+ do_smccc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ __GUEST_ASSERT(res.a0 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 &&
+ res.a1 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 &&
+ res.a2 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 &&
+ res.a3 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3,
+ "Unexpected KVM-specific UID %lx %lx %lx %lx\n", res.a0, res.a1, res.a2, res.a3);
+ GUEST_DONE();
+}
+
+int main (int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct ucall uc;
+ bool guest_done = false;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ while (!guest_done) {
+ vcpu_run(vcpu);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ break;
+ case UCALL_DONE:
+ guest_done = true;
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ case UCALL_PRINTF:
+ printf("%s", uc.buffer);
+ break;
+ default:
+ TEST_FAIL("Unexpected guest exit");
+ }
+ }
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/arm64/mmio_abort.c b/tools/testing/selftests/kvm/arm64/mmio_abort.c
deleted file mode 100644
index 8b7a80a51b1c..000000000000
--- a/tools/testing/selftests/kvm/arm64/mmio_abort.c
+++ /dev/null
@@ -1,159 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * mmio_abort - Tests for userspace MMIO abort injection
- *
- * Copyright (c) 2024 Google LLC
- */
-#include "processor.h"
-#include "test_util.h"
-
-#define MMIO_ADDR 0x8000000ULL
-
-static u64 expected_abort_pc;
-
-static void expect_sea_handler(struct ex_regs *regs)
-{
- u64 esr = read_sysreg(esr_el1);
-
- GUEST_ASSERT_EQ(regs->pc, expected_abort_pc);
- GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
- GUEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
-
- GUEST_DONE();
-}
-
-static void unexpected_dabt_handler(struct ex_regs *regs)
-{
- GUEST_FAIL("Unexpected data abort at PC: %lx\n", regs->pc);
-}
-
-static struct kvm_vm *vm_create_with_dabt_handler(struct kvm_vcpu **vcpu, void *guest_code,
- handler_fn dabt_handler)
-{
- struct kvm_vm *vm = vm_create_with_one_vcpu(vcpu, guest_code);
-
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(*vcpu);
- vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ESR_ELx_EC_DABT_CUR, dabt_handler);
-
- virt_map(vm, MMIO_ADDR, MMIO_ADDR, 1);
-
- return vm;
-}
-
-static void vcpu_inject_extabt(struct kvm_vcpu *vcpu)
-{
- struct kvm_vcpu_events events = {};
-
- events.exception.ext_dabt_pending = true;
- vcpu_events_set(vcpu, &events);
-}
-
-static void vcpu_run_expect_done(struct kvm_vcpu *vcpu)
-{
- struct ucall uc;
-
- vcpu_run(vcpu);
- switch (get_ucall(vcpu, &uc)) {
- case UCALL_ABORT:
- REPORT_GUEST_ASSERT(uc);
- break;
- case UCALL_DONE:
- break;
- default:
- TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
- }
-}
-
-extern char test_mmio_abort_insn;
-
-static void test_mmio_abort_guest(void)
-{
- WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_abort_insn);
-
- asm volatile("test_mmio_abort_insn:\n\t"
- "ldr x0, [%0]\n\t"
- : : "r" (MMIO_ADDR) : "x0", "memory");
-
- GUEST_FAIL("MMIO instruction should not retire");
-}
-
-/*
- * Test that KVM doesn't complete MMIO emulation when userspace has made an
- * external abort pending for the instruction.
- */
-static void test_mmio_abort(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_abort_guest,
- expect_sea_handler);
- struct kvm_run *run = vcpu->run;
-
- vcpu_run(vcpu);
- TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO);
- TEST_ASSERT_EQ(run->mmio.phys_addr, MMIO_ADDR);
- TEST_ASSERT_EQ(run->mmio.len, sizeof(unsigned long));
- TEST_ASSERT(!run->mmio.is_write, "Expected MMIO read");
-
- vcpu_inject_extabt(vcpu);
- vcpu_run_expect_done(vcpu);
- kvm_vm_free(vm);
-}
-
-extern char test_mmio_nisv_insn;
-
-static void test_mmio_nisv_guest(void)
-{
- WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_nisv_insn);
-
- asm volatile("test_mmio_nisv_insn:\n\t"
- "ldr x0, [%0], #8\n\t"
- : : "r" (MMIO_ADDR) : "x0", "memory");
-
- GUEST_FAIL("MMIO instruction should not retire");
-}
-
-/*
- * Test that the KVM_RUN ioctl fails for ESR_EL2.ISV=0 MMIO aborts if userspace
- * hasn't enabled KVM_CAP_ARM_NISV_TO_USER.
- */
-static void test_mmio_nisv(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
- unexpected_dabt_handler);
-
- TEST_ASSERT(_vcpu_run(vcpu), "Expected nonzero return code from KVM_RUN");
- TEST_ASSERT_EQ(errno, ENOSYS);
-
- kvm_vm_free(vm);
-}
-
-/*
- * Test that ESR_EL2.ISV=0 MMIO aborts reach userspace and that an injected SEA
- * reaches the guest.
- */
-static void test_mmio_nisv_abort(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
- expect_sea_handler);
- struct kvm_run *run = vcpu->run;
-
- vm_enable_cap(vm, KVM_CAP_ARM_NISV_TO_USER, 1);
-
- vcpu_run(vcpu);
- TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_NISV);
- TEST_ASSERT_EQ(run->arm_nisv.fault_ipa, MMIO_ADDR);
-
- vcpu_inject_extabt(vcpu);
- vcpu_run_expect_done(vcpu);
- kvm_vm_free(vm);
-}
-
-int main(void)
-{
- test_mmio_abort();
- test_mmio_nisv();
- test_mmio_nisv_abort();
-}
diff --git a/tools/testing/selftests/kvm/arm64/no-vgic-v3.c b/tools/testing/selftests/kvm/arm64/no-vgic-v3.c
index ebd70430c89d..152c34776981 100644
--- a/tools/testing/selftests/kvm/arm64/no-vgic-v3.c
+++ b/tools/testing/selftests/kvm/arm64/no-vgic-v3.c
@@ -54,7 +54,7 @@ static void guest_code(void)
* Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
* hidden the feature at runtime without any other userspace action.
*/
- __GUEST_ASSERT(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC),
+ __GUEST_ASSERT(FIELD_GET(ID_AA64PFR0_EL1_GIC,
read_sysreg(id_aa64pfr0_el1)) == 0,
"GICv3 wrongly advertised");
@@ -163,9 +163,11 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
uint64_t pfr0;
+ test_disable_default_vgic();
+
vm = vm_create_with_one_vcpu(&vcpu, NULL);
pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
- __TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0),
+ __TEST_REQUIRE(FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr0),
"GICv3 not supported.");
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/arm64/page_fault_test.c b/tools/testing/selftests/kvm/arm64/page_fault_test.c
index dc6559dad9d8..4ccbd389d133 100644
--- a/tools/testing/selftests/kvm/arm64/page_fault_test.c
+++ b/tools/testing/selftests/kvm/arm64/page_fault_test.c
@@ -95,14 +95,14 @@ static bool guest_check_lse(void)
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
uint64_t atomic;
- atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0);
+ atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0);
return atomic >= 2;
}
static bool guest_check_dc_zva(void)
{
uint64_t dczid = read_sysreg(dczid_el0);
- uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid);
+ uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
return dzp == 0;
}
@@ -195,7 +195,7 @@ static bool guest_set_ha(void)
uint64_t hadbs, tcr;
/* Skip if HA is not supported. */
- hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1);
+ hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1);
if (hadbs == 0)
return false;
diff --git a/tools/testing/selftests/kvm/arm64/psci_test.c b/tools/testing/selftests/kvm/arm64/psci_test.c
index ab491ee9e5f7..98e49f710aef 100644
--- a/tools/testing/selftests/kvm/arm64/psci_test.c
+++ b/tools/testing/selftests/kvm/arm64/psci_test.c
@@ -27,7 +27,7 @@ static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
{
struct arm_smccc_res res;
- smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id,
+ do_smccc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id,
0, 0, 0, 0, &res);
return res.a0;
@@ -38,7 +38,7 @@ static uint64_t psci_affinity_info(uint64_t target_affinity,
{
struct arm_smccc_res res;
- smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level,
+ do_smccc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level,
0, 0, 0, 0, 0, &res);
return res.a0;
@@ -48,7 +48,7 @@ static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
{
struct arm_smccc_res res;
- smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id,
+ do_smccc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id,
0, 0, 0, 0, 0, &res);
return res.a0;
@@ -58,7 +58,7 @@ static uint64_t psci_system_off2(uint64_t type, uint64_t cookie)
{
struct arm_smccc_res res;
- smccc_hvc(PSCI_1_3_FN64_SYSTEM_OFF2, type, cookie, 0, 0, 0, 0, 0, &res);
+ do_smccc(PSCI_1_3_FN64_SYSTEM_OFF2, type, cookie, 0, 0, 0, 0, 0, &res);
return res.a0;
}
@@ -67,7 +67,7 @@ static uint64_t psci_features(uint32_t func_id)
{
struct arm_smccc_res res;
- smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res);
+ do_smccc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res);
return res.a0;
}
@@ -89,12 +89,13 @@ static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
vm = vm_create(2);
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
+ kvm_get_default_vcpu_target(vm, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
*source = aarch64_vcpu_add(vm, 0, &init, guest_code);
*target = aarch64_vcpu_add(vm, 1, &init, guest_code);
+ kvm_arch_vm_finalize_vcpus(vm);
return vm;
}
diff --git a/tools/testing/selftests/kvm/arm64/sea_to_user.c b/tools/testing/selftests/kvm/arm64/sea_to_user.c
new file mode 100644
index 000000000000..573dd790aeb8
--- /dev/null
+++ b/tools/testing/selftests/kvm/arm64/sea_to_user.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test KVM returns to userspace with KVM_EXIT_ARM_SEA if host APEI fails
+ * to handle SEA and userspace has opt-ed in KVM_CAP_ARM_SEA_TO_USER.
+ *
+ * After reaching userspace with expected arm_sea info, also test userspace
+ * injecting a synchronous external data abort into the guest.
+ *
+ * This test utilizes EINJ to generate a REAL synchronous external data
+ * abort by consuming a recoverable uncorrectable memory error. Therefore
+ * the device under test must support EINJ in both firmware and host kernel,
+ * including the notrigger feature. Otherwise the test will be skipped.
+ * The under-test platform's APEI should be unable to claim SEA. Otherwise
+ * the test will also be skipped.
+ */
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "guest_modes.h"
+
+#define PAGE_PRESENT (1ULL << 63)
+#define PAGE_PHYSICAL 0x007fffffffffffffULL
+#define PAGE_ADDR_MASK (~(0xfffULL))
+
+/* Group ISV and ISS[23:14]. */
+#define ESR_ELx_INST_SYNDROME ((ESR_ELx_ISV) | (ESR_ELx_SAS) | \
+ (ESR_ELx_SSE) | (ESR_ELx_SRT_MASK) | \
+ (ESR_ELx_SF) | (ESR_ELx_AR))
+
+#define EINJ_ETYPE "/sys/kernel/debug/apei/einj/error_type"
+#define EINJ_ADDR "/sys/kernel/debug/apei/einj/param1"
+#define EINJ_MASK "/sys/kernel/debug/apei/einj/param2"
+#define EINJ_FLAGS "/sys/kernel/debug/apei/einj/flags"
+#define EINJ_NOTRIGGER "/sys/kernel/debug/apei/einj/notrigger"
+#define EINJ_DOIT "/sys/kernel/debug/apei/einj/error_inject"
+/* Memory Uncorrectable non-fatal. */
+#define ERROR_TYPE_MEMORY_UER 0x10
+/* Memory address and mask valid (param1 and param2). */
+#define MASK_MEMORY_UER 0b10
+
+/* Guest virtual address region = [2G, 3G). */
+#define START_GVA 0x80000000UL
+#define VM_MEM_SIZE 0x40000000UL
+/* Note: EINJ_OFFSET must < VM_MEM_SIZE. */
+#define EINJ_OFFSET 0x01234badUL
+#define EINJ_GVA ((START_GVA) + (EINJ_OFFSET))
+
+static vm_paddr_t einj_gpa;
+static void *einj_hva;
+static uint64_t einj_hpa;
+static bool far_invalid;
+
+static uint64_t translate_to_host_paddr(unsigned long vaddr)
+{
+ uint64_t pinfo;
+ int64_t offset = vaddr / getpagesize() * sizeof(pinfo);
+ int fd;
+ uint64_t page_addr;
+ uint64_t paddr;
+
+ fd = open("/proc/self/pagemap", O_RDONLY);
+ if (fd < 0)
+ ksft_exit_fail_perror("Failed to open /proc/self/pagemap");
+ if (pread(fd, &pinfo, sizeof(pinfo), offset) != sizeof(pinfo)) {
+ close(fd);
+ ksft_exit_fail_perror("Failed to read /proc/self/pagemap");
+ }
+
+ close(fd);
+
+ if ((pinfo & PAGE_PRESENT) == 0)
+ ksft_exit_fail_perror("Page not present");
+
+ page_addr = (pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT;
+ paddr = page_addr + (vaddr & (getpagesize() - 1));
+ return paddr;
+}
+
+static void write_einj_entry(const char *einj_path, uint64_t val)
+{
+ char cmd[256] = {0};
+ FILE *cmdfile = NULL;
+
+ sprintf(cmd, "echo %#lx > %s", val, einj_path);
+ cmdfile = popen(cmd, "r");
+
+ if (pclose(cmdfile) == 0)
+ ksft_print_msg("echo %#lx > %s - done\n", val, einj_path);
+ else
+ ksft_exit_fail_perror("Failed to write EINJ entry");
+}
+
+static void inject_uer(uint64_t paddr)
+{
+ if (access("/sys/firmware/acpi/tables/EINJ", R_OK) == -1)
+ ksft_test_result_skip("EINJ table no available in firmware");
+
+ if (access(EINJ_ETYPE, R_OK | W_OK) == -1)
+ ksft_test_result_skip("EINJ module probably not loaded?");
+
+ write_einj_entry(EINJ_ETYPE, ERROR_TYPE_MEMORY_UER);
+ write_einj_entry(EINJ_FLAGS, MASK_MEMORY_UER);
+ write_einj_entry(EINJ_ADDR, paddr);
+ write_einj_entry(EINJ_MASK, ~0x0UL);
+ write_einj_entry(EINJ_NOTRIGGER, 1);
+ write_einj_entry(EINJ_DOIT, 1);
+}
+
+/*
+ * When host APEI successfully claims the SEA caused by guest_code, kernel
+ * will send SIGBUS signal with BUS_MCEERR_AR to test thread.
+ *
+ * We set up this SIGBUS handler to skip the test for that case.
+ */
+static void sigbus_signal_handler(int sig, siginfo_t *si, void *v)
+{
+ ksft_print_msg("SIGBUS (%d) received, dumping siginfo...\n", sig);
+ ksft_print_msg("si_signo=%d, si_errno=%d, si_code=%d, si_addr=%p\n",
+ si->si_signo, si->si_errno, si->si_code, si->si_addr);
+ if (si->si_code == BUS_MCEERR_AR)
+ ksft_test_result_skip("SEA is claimed by host APEI\n");
+ else
+ ksft_test_result_fail("Exit with signal unhandled\n");
+
+ exit(0);
+}
+
+static void setup_sigbus_handler(void)
+{
+ struct sigaction act;
+
+ memset(&act, 0, sizeof(act));
+ sigemptyset(&act.sa_mask);
+ act.sa_sigaction = sigbus_signal_handler;
+ act.sa_flags = SA_SIGINFO;
+ TEST_ASSERT(sigaction(SIGBUS, &act, NULL) == 0,
+ "Failed to setup SIGBUS handler");
+}
+
+static void guest_code(void)
+{
+ uint64_t guest_data;
+
+ /* Consumes error will cause a SEA. */
+ guest_data = *(uint64_t *)EINJ_GVA;
+
+ GUEST_FAIL("Poison not protected by SEA: gva=%#lx, guest_data=%#lx\n",
+ EINJ_GVA, guest_data);
+}
+
+static void expect_sea_handler(struct ex_regs *regs)
+{
+ u64 esr = read_sysreg(esr_el1);
+ u64 far = read_sysreg(far_el1);
+ bool expect_far_invalid = far_invalid;
+
+ GUEST_PRINTF("Handling Guest SEA\n");
+ GUEST_PRINTF("ESR_EL1=%#lx, FAR_EL1=%#lx\n", esr, far);
+
+ GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
+ GUEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
+
+ if (expect_far_invalid) {
+ GUEST_ASSERT_EQ(esr & ESR_ELx_FnV, ESR_ELx_FnV);
+ GUEST_PRINTF("Guest observed garbage value in FAR\n");
+ } else {
+ GUEST_ASSERT_EQ(esr & ESR_ELx_FnV, 0);
+ GUEST_ASSERT_EQ(far, EINJ_GVA);
+ }
+
+ GUEST_DONE();
+}
+
+static void vcpu_inject_sea(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_events events = {};
+
+ events.exception.ext_dabt_pending = true;
+ vcpu_events_set(vcpu, &events);
+}
+
+static void run_vm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+ bool guest_done = false;
+ struct kvm_run *run = vcpu->run;
+ u64 esr;
+
+ /* Resume the vCPU after error injection to consume the error. */
+ vcpu_run(vcpu);
+
+ ksft_print_msg("Dump kvm_run info about KVM_EXIT_%s\n",
+ exit_reason_str(run->exit_reason));
+ ksft_print_msg("kvm_run.arm_sea: esr=%#llx, flags=%#llx\n",
+ run->arm_sea.esr, run->arm_sea.flags);
+ ksft_print_msg("kvm_run.arm_sea: gva=%#llx, gpa=%#llx\n",
+ run->arm_sea.gva, run->arm_sea.gpa);
+
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_SEA);
+
+ esr = run->arm_sea.esr;
+ TEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_LOW);
+ TEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
+ TEST_ASSERT_EQ(ESR_ELx_ISS2(esr), 0);
+ TEST_ASSERT_EQ((esr & ESR_ELx_INST_SYNDROME), 0);
+ TEST_ASSERT_EQ(esr & ESR_ELx_VNCR, 0);
+
+ if (!(esr & ESR_ELx_FnV)) {
+ ksft_print_msg("Expect gva to match given FnV bit is 0\n");
+ TEST_ASSERT_EQ(run->arm_sea.gva, EINJ_GVA);
+ }
+
+ if (run->arm_sea.flags & KVM_EXIT_ARM_SEA_FLAG_GPA_VALID) {
+ ksft_print_msg("Expect gpa to match given KVM_EXIT_ARM_SEA_FLAG_GPA_VALID is set\n");
+ TEST_ASSERT_EQ(run->arm_sea.gpa, einj_gpa & PAGE_ADDR_MASK);
+ }
+
+ far_invalid = esr & ESR_ELx_FnV;
+
+ /* Inject a SEA into guest and expect handled in SEA handler. */
+ vcpu_inject_sea(vcpu);
+
+ /* Expect the guest to reach GUEST_DONE gracefully. */
+ do {
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_PRINTF:
+ ksft_print_msg("From guest: %s", uc.buffer);
+ break;
+ case UCALL_DONE:
+ ksft_print_msg("Guest done gracefully!\n");
+ guest_done = 1;
+ break;
+ case UCALL_ABORT:
+ ksft_print_msg("Guest aborted!\n");
+ guest_done = 1;
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ default:
+ TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd);
+ }
+ } while (!guest_done);
+}
+
+static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
+{
+ size_t backing_page_size;
+ size_t guest_page_size;
+ size_t alignment;
+ uint64_t num_guest_pages;
+ vm_paddr_t start_gpa;
+ enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB;
+ struct kvm_vm *vm;
+
+ backing_page_size = get_backing_src_pagesz(src_type);
+ guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
+ alignment = max(backing_page_size, guest_page_size);
+ num_guest_pages = VM_MEM_SIZE / guest_page_size;
+
+ vm = __vm_create_with_one_vcpu(vcpu, num_guest_pages, guest_code);
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(*vcpu);
+
+ vm_install_sync_handler(vm,
+ /*vector=*/VECTOR_SYNC_CURRENT,
+ /*ec=*/ESR_ELx_EC_DABT_CUR,
+ /*handler=*/expect_sea_handler);
+
+ start_gpa = (vm->max_gfn - num_guest_pages) * guest_page_size;
+ start_gpa = align_down(start_gpa, alignment);
+
+ vm_userspace_mem_region_add(
+ /*vm=*/vm,
+ /*src_type=*/src_type,
+ /*guest_paddr=*/start_gpa,
+ /*slot=*/1,
+ /*npages=*/num_guest_pages,
+ /*flags=*/0);
+
+ virt_map(vm, START_GVA, start_gpa, num_guest_pages);
+
+ ksft_print_msg("Mapped %#lx pages: gva=%#lx to gpa=%#lx\n",
+ num_guest_pages, START_GVA, start_gpa);
+ return vm;
+}
+
+static void vm_inject_memory_uer(struct kvm_vm *vm)
+{
+ uint64_t guest_data;
+
+ einj_gpa = addr_gva2gpa(vm, EINJ_GVA);
+ einj_hva = addr_gva2hva(vm, EINJ_GVA);
+
+ /* Populate certain data before injecting UER. */
+ *(uint64_t *)einj_hva = 0xBAADCAFE;
+ guest_data = *(uint64_t *)einj_hva;
+ ksft_print_msg("Before EINJect: data=%#lx\n",
+ guest_data);
+
+ einj_hpa = translate_to_host_paddr((unsigned long)einj_hva);
+
+ ksft_print_msg("EINJ_GVA=%#lx, einj_gpa=%#lx, einj_hva=%p, einj_hpa=%#lx\n",
+ EINJ_GVA, einj_gpa, einj_hva, einj_hpa);
+
+ inject_uer(einj_hpa);
+ ksft_print_msg("Memory UER EINJected\n");
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SEA_TO_USER));
+
+ setup_sigbus_handler();
+
+ vm = vm_create_with_sea_handler(&vcpu);
+ vm_enable_cap(vm, KVM_CAP_ARM_SEA_TO_USER, 0);
+ vm_inject_memory_uer(vm);
+ run_vm(vm, vcpu);
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index 8f422bfdfcb9..c4815d365816 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -15,8 +15,6 @@
#include "test_util.h"
#include <linux/bitfield.h>
-bool have_cap_arm_mte;
-
enum ftr_type {
FTR_EXACT, /* Use a predefined safe value */
FTR_LOWER_SAFE, /* Smaller value is safe */
@@ -125,6 +123,13 @@ static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
REG_FTR_END,
};
+static const struct reg_ftr_bits ftr_id_aa64isar3_el1[] = {
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FPRCVT, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSFE, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FAMINMAX, 0),
+ REG_FTR_END,
+};
+
static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
@@ -139,6 +144,7 @@ static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
};
static const struct reg_ftr_bits ftr_id_aa64pfr1_el1[] = {
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, DF2, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, CSV2_frac, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, SSBS, ID_AA64PFR1_EL1_SSBS_NI),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, BT, 0),
@@ -164,7 +170,9 @@ static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HCX, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TWED, 0),
REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
@@ -187,6 +195,14 @@ static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
REG_FTR_END,
};
+static const struct reg_ftr_bits ftr_id_aa64mmfr3_el1[] = {
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, S1POE, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, S1PIE, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, SCTLRX, 0),
+ REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, TCRX, 0),
+ REG_FTR_END,
+};
+
static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
@@ -212,11 +228,13 @@ static struct test_feature_reg test_regs[] = {
TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
+ TEST_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3_el1),
TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
TEST_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1_el1),
TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
+ TEST_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3_el1),
TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
};
@@ -229,11 +247,16 @@ static void guest_code(void)
GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
+ GUEST_REG_SYNC(SYS_ID_AA64ISAR3_EL1);
GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
+ GUEST_REG_SYNC(SYS_ID_AA64PFR1_EL1);
GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
+ GUEST_REG_SYNC(SYS_ID_AA64MMFR3_EL1);
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
+ GUEST_REG_SYNC(SYS_MPIDR_EL1);
+ GUEST_REG_SYNC(SYS_CLIDR_EL1);
GUEST_REG_SYNC(SYS_CTR_EL0);
GUEST_REG_SYNC(SYS_MIDR_EL1);
GUEST_REG_SYNC(SYS_REVIDR_EL1);
@@ -245,7 +268,9 @@ static void guest_code(void)
/* Return a safe value to a given ftr_bits an ftr value */
uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
{
- uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
+ uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift;
+
+ TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features");
if (ftr_bits->sign == FTR_UNSIGNED) {
switch (ftr_bits->type) {
@@ -297,7 +322,9 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
/* Return an invalid value to a given ftr_bits an ftr value */
uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
{
- uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
+ uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift;
+
+ TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features");
if (ftr_bits->sign == FTR_UNSIGNED) {
switch (ftr_bits->type) {
@@ -557,7 +584,9 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
uint64_t mte_frac;
int idx, err;
- if (!have_cap_arm_mte) {
+ val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
+ mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val);
+ if (!mte) {
ksft_test_result_skip("MTE capability not supported, nothing to test\n");
return;
}
@@ -582,10 +611,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
* from unsupported (0xF) to supported (0).
*
*/
- val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
-
- mte = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), val);
- mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
+ mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||
mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {
ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n");
@@ -602,7 +628,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
}
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
- mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
+ mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n");
else
@@ -650,7 +676,7 @@ static void test_clidr(struct kvm_vcpu *vcpu)
clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1));
/* find the first empty level in the cache hierarchy */
- for (level = 1; level < 7; level++) {
+ for (level = 1; level <= 7; level++) {
if (!CLIDR_CTYPE(clidr, level))
break;
}
@@ -739,43 +765,35 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
ksft_test_result_pass("%s\n", __func__);
}
-void kvm_arch_vm_post_create(struct kvm_vm *vm)
-{
- if (vm_check_cap(vm, KVM_CAP_ARM_MTE)) {
- vm_enable_cap(vm, KVM_CAP_ARM_MTE, 0);
- have_cap_arm_mte = true;
- }
-}
-
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
bool aarch64_only;
uint64_t val, el0;
- int test_cnt;
+ int test_cnt, i, j;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_WRITABLE_IMP_ID_REGS));
+ test_wants_mte();
+
vm = vm_create(1);
vm_enable_cap(vm, KVM_CAP_ARM_WRITABLE_IMP_ID_REGS, 0);
vcpu = vm_vcpu_add(vm, 0, guest_code);
+ kvm_arch_vm_finalize_vcpus(vm);
/* Check for AARCH64 only system */
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
- el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
+ el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
ksft_print_header();
- test_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
- ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
- ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
- ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
- ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
- ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 3 +
- MPAM_IDREG_TEST + MTE_IDREG_TEST;
+ test_cnt = 3 + MPAM_IDREG_TEST + MTE_IDREG_TEST;
+ for (i = 0; i < ARRAY_SIZE(test_regs); i++)
+ for (j = 0; test_regs[i].ftr_bits[j].type != FTR_END; j++)
+ test_cnt++;
ksft_set_plan(test_cnt);
diff --git a/tools/testing/selftests/kvm/arm64/smccc_filter.c b/tools/testing/selftests/kvm/arm64/smccc_filter.c
index 2d189f3da228..1763b9d45400 100644
--- a/tools/testing/selftests/kvm/arm64/smccc_filter.c
+++ b/tools/testing/selftests/kvm/arm64/smccc_filter.c
@@ -22,8 +22,20 @@ enum smccc_conduit {
SMC_INSN,
};
+static bool test_runs_at_el2(void)
+{
+ struct kvm_vm *vm = vm_create(1);
+ struct kvm_vcpu_init init;
+
+ kvm_get_default_vcpu_target(vm, &init);
+ kvm_vm_free(vm);
+
+ return init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2);
+}
+
#define for_each_conduit(conduit) \
- for (conduit = HVC_INSN; conduit <= SMC_INSN; conduit++)
+ for (conduit = test_runs_at_el2() ? SMC_INSN : HVC_INSN; \
+ conduit <= SMC_INSN; conduit++)
static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
{
@@ -64,7 +76,7 @@ static struct kvm_vm *setup_vm(struct kvm_vcpu **vcpu)
struct kvm_vm *vm;
vm = vm_create(1);
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
+ kvm_get_default_vcpu_target(vm, &init);
/*
* Enable in-kernel emulation of PSCI to ensure that calls are denied
@@ -73,6 +85,7 @@ static struct kvm_vm *setup_vm(struct kvm_vcpu **vcpu)
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
*vcpu = aarch64_vcpu_add(vm, 0, &init, guest_main);
+ kvm_arch_vm_finalize_vcpus(vm);
return vm;
}
diff --git a/tools/testing/selftests/kvm/arm64/vgic_init.c b/tools/testing/selftests/kvm/arm64/vgic_init.c
index b3b5fb0ff0a9..8d6d3a4ae4db 100644
--- a/tools/testing/selftests/kvm/arm64/vgic_init.c
+++ b/tools/testing/selftests/kvm/arm64/vgic_init.c
@@ -9,17 +9,18 @@
#include <asm/kvm.h>
#include <asm/kvm_para.h>
+#include <arm64/gic_v3.h>
+
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vgic.h"
+#include "gic_v3.h"
#define NR_VCPUS 4
#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset)
-#define GICR_TYPER 0x8
-
#define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2)
#define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3)
@@ -675,6 +676,44 @@ static void test_v3_its_region(void)
vm_gic_destroy(&v);
}
+static void test_v3_nassgicap(void)
+{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
+ bool has_nassgicap;
+ struct vm_gic vm;
+ u32 typer2;
+ int ret;
+
+ vm = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
+ kvm_device_attr_get(vm.gic_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+ GICD_TYPER2, &typer2);
+ has_nassgicap = typer2 & GICD_TYPER2_nASSGIcap;
+
+ typer2 |= GICD_TYPER2_nASSGIcap;
+ ret = __kvm_device_attr_set(vm.gic_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+ GICD_TYPER2, &typer2);
+ if (has_nassgicap)
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEVICE_ATTR_SET, ret));
+ else
+ TEST_ASSERT(ret && errno == EINVAL,
+ "Enabled nASSGIcap even though it's unavailable");
+
+ typer2 &= ~GICD_TYPER2_nASSGIcap;
+ kvm_device_attr_set(vm.gic_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+ GICD_TYPER2, &typer2);
+
+ kvm_device_attr_set(vm.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
+
+ typer2 ^= GICD_TYPER2_nASSGIcap;
+ ret = __kvm_device_attr_set(vm.gic_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+ GICD_TYPER2, &typer2);
+ TEST_ASSERT(ret && errno == EBUSY,
+ "Changed nASSGIcap after initializing the VGIC");
+
+ vm_gic_destroy(&vm);
+}
+
/*
* Returns 0 if it's possible to create GIC device of a given type (V2 or V3).
*/
@@ -715,6 +754,220 @@ int test_kvm_device(uint32_t gic_dev_type)
return 0;
}
+struct sr_def {
+ const char *name;
+ u32 encoding;
+};
+
+#define PACK_SR(r) \
+ ((sys_reg_Op0(r) << 14) | \
+ (sys_reg_Op1(r) << 11) | \
+ (sys_reg_CRn(r) << 7) | \
+ (sys_reg_CRm(r) << 3) | \
+ (sys_reg_Op2(r)))
+
+#define SR(r) \
+ { \
+ .name = #r, \
+ .encoding = r, \
+ }
+
+static const struct sr_def sysregs_el1[] = {
+ SR(SYS_ICC_PMR_EL1),
+ SR(SYS_ICC_BPR0_EL1),
+ SR(SYS_ICC_AP0R0_EL1),
+ SR(SYS_ICC_AP0R1_EL1),
+ SR(SYS_ICC_AP0R2_EL1),
+ SR(SYS_ICC_AP0R3_EL1),
+ SR(SYS_ICC_AP1R0_EL1),
+ SR(SYS_ICC_AP1R1_EL1),
+ SR(SYS_ICC_AP1R2_EL1),
+ SR(SYS_ICC_AP1R3_EL1),
+ SR(SYS_ICC_BPR1_EL1),
+ SR(SYS_ICC_CTLR_EL1),
+ SR(SYS_ICC_SRE_EL1),
+ SR(SYS_ICC_IGRPEN0_EL1),
+ SR(SYS_ICC_IGRPEN1_EL1),
+};
+
+static const struct sr_def sysregs_el2[] = {
+ SR(SYS_ICH_AP0R0_EL2),
+ SR(SYS_ICH_AP0R1_EL2),
+ SR(SYS_ICH_AP0R2_EL2),
+ SR(SYS_ICH_AP0R3_EL2),
+ SR(SYS_ICH_AP1R0_EL2),
+ SR(SYS_ICH_AP1R1_EL2),
+ SR(SYS_ICH_AP1R2_EL2),
+ SR(SYS_ICH_AP1R3_EL2),
+ SR(SYS_ICH_HCR_EL2),
+ SR(SYS_ICC_SRE_EL2),
+ SR(SYS_ICH_VTR_EL2),
+ SR(SYS_ICH_VMCR_EL2),
+ SR(SYS_ICH_LR0_EL2),
+ SR(SYS_ICH_LR1_EL2),
+ SR(SYS_ICH_LR2_EL2),
+ SR(SYS_ICH_LR3_EL2),
+ SR(SYS_ICH_LR4_EL2),
+ SR(SYS_ICH_LR5_EL2),
+ SR(SYS_ICH_LR6_EL2),
+ SR(SYS_ICH_LR7_EL2),
+ SR(SYS_ICH_LR8_EL2),
+ SR(SYS_ICH_LR9_EL2),
+ SR(SYS_ICH_LR10_EL2),
+ SR(SYS_ICH_LR11_EL2),
+ SR(SYS_ICH_LR12_EL2),
+ SR(SYS_ICH_LR13_EL2),
+ SR(SYS_ICH_LR14_EL2),
+ SR(SYS_ICH_LR15_EL2),
+};
+
+static void test_sysreg_array(int gic, const struct sr_def *sr, int nr,
+ int (*check)(int, const struct sr_def *, const char *))
+{
+ for (int i = 0; i < nr; i++) {
+ u64 val;
+ u64 attr;
+ int ret;
+
+ /* Assume MPIDR_EL1.Aff*=0 */
+ attr = PACK_SR(sr[i].encoding);
+
+ /*
+ * The API is braindead. A register can be advertised as
+ * available, and yet not be readable or writable.
+ * ICC_APnR{1,2,3}_EL1 are examples of such non-sense, and
+ * ICH_APnR{1,2,3}_EL2 do follow suit for consistency.
+ *
+ * On the bright side, no known HW is implementing more than
+ * 5 bits of priority, so we're safe. Sort of...
+ */
+ ret = __kvm_has_device_attr(gic, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
+ attr);
+ TEST_ASSERT(ret == 0, "%s unavailable", sr[i].name);
+
+ /* Check that we can write back what we read */
+ ret = __kvm_device_attr_get(gic, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
+ attr, &val);
+ TEST_ASSERT(ret == 0 || !check(gic, &sr[i], "read"), "%s unreadable", sr[i].name);
+ ret = __kvm_device_attr_set(gic, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
+ attr, &val);
+ TEST_ASSERT(ret == 0 || !check(gic, &sr[i], "write"), "%s unwritable", sr[i].name);
+ }
+}
+
+static u8 get_ctlr_pribits(int gic)
+{
+ int ret;
+ u64 val;
+ u8 pri;
+
+ ret = __kvm_device_attr_get(gic, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
+ PACK_SR(SYS_ICC_CTLR_EL1), &val);
+ TEST_ASSERT(ret == 0, "ICC_CTLR_EL1 unreadable");
+
+ pri = FIELD_GET(ICC_CTLR_EL1_PRI_BITS_MASK, val) + 1;
+ TEST_ASSERT(pri >= 5 && pri <= 7, "Bad pribits %d", pri);
+
+ return pri;
+}
+
+static int check_unaccessible_el1_regs(int gic, const struct sr_def *sr, const char *what)
+{
+ switch (sr->encoding) {
+ case SYS_ICC_AP0R1_EL1:
+ case SYS_ICC_AP1R1_EL1:
+ if (get_ctlr_pribits(gic) >= 6)
+ return -EINVAL;
+ break;
+ case SYS_ICC_AP0R2_EL1:
+ case SYS_ICC_AP0R3_EL1:
+ case SYS_ICC_AP1R2_EL1:
+ case SYS_ICC_AP1R3_EL1:
+ if (get_ctlr_pribits(gic) == 7)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_info("SKIP %s for %s\n", sr->name, what);
+ return 0;
+}
+
+static u8 get_vtr_pribits(int gic)
+{
+ int ret;
+ u64 val;
+ u8 pri;
+
+ ret = __kvm_device_attr_get(gic, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
+ PACK_SR(SYS_ICH_VTR_EL2), &val);
+ TEST_ASSERT(ret == 0, "ICH_VTR_EL2 unreadable");
+
+ pri = FIELD_GET(ICH_VTR_EL2_PRIbits, val) + 1;
+ TEST_ASSERT(pri >= 5 && pri <= 7, "Bad pribits %d", pri);
+
+ return pri;
+}
+
+static int check_unaccessible_el2_regs(int gic, const struct sr_def *sr, const char *what)
+{
+ switch (sr->encoding) {
+ case SYS_ICH_AP0R1_EL2:
+ case SYS_ICH_AP1R1_EL2:
+ if (get_vtr_pribits(gic) >= 6)
+ return -EINVAL;
+ break;
+ case SYS_ICH_AP0R2_EL2:
+ case SYS_ICH_AP0R3_EL2:
+ case SYS_ICH_AP1R2_EL2:
+ case SYS_ICH_AP1R3_EL2:
+ if (get_vtr_pribits(gic) == 7)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_info("SKIP %s for %s\n", sr->name, what);
+ return 0;
+}
+
+static void test_v3_sysregs(void)
+{
+ struct kvm_vcpu_init init = {};
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ u32 feat = 0;
+ int gic;
+
+ if (kvm_check_cap(KVM_CAP_ARM_EL2))
+ feat |= BIT(KVM_ARM_VCPU_HAS_EL2);
+
+ vm = vm_create(1);
+
+ vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
+ init.features[0] |= feat;
+
+ vcpu = aarch64_vcpu_add(vm, 0, &init, NULL);
+ TEST_ASSERT(vcpu, "Can't create a vcpu?");
+
+ gic = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3);
+ TEST_ASSERT(gic >= 0, "No GIC???");
+
+ kvm_device_attr_set(gic, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
+
+ test_sysreg_array(gic, sysregs_el1, ARRAY_SIZE(sysregs_el1), check_unaccessible_el1_regs);
+ if (feat)
+ test_sysreg_array(gic, sysregs_el2, ARRAY_SIZE(sysregs_el2), check_unaccessible_el2_regs);
+ else
+ pr_info("SKIP EL2 registers, not available\n");
+
+ close(gic);
+ kvm_vm_free(vm);
+}
+
void run_tests(uint32_t gic_dev_type)
{
test_vcpus_then_vgic(gic_dev_type);
@@ -730,6 +983,8 @@ void run_tests(uint32_t gic_dev_type)
test_v3_last_bit_single_rdist();
test_v3_redist_ipa_range_check_at_vcpu_run();
test_v3_its_region();
+ test_v3_sysregs();
+ test_v3_nassgicap();
}
}
@@ -739,6 +994,8 @@ int main(int ac, char **av)
int pa_bits;
int cnt_impl = 0;
+ test_disable_default_vgic();
+
pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits;
max_phys_size = 1ULL << pa_bits;
diff --git a/tools/testing/selftests/kvm/arm64/vgic_irq.c b/tools/testing/selftests/kvm/arm64/vgic_irq.c
index f4ac28d53747..2fb2c7939fe9 100644
--- a/tools/testing/selftests/kvm/arm64/vgic_irq.c
+++ b/tools/testing/selftests/kvm/arm64/vgic_irq.c
@@ -29,6 +29,7 @@ struct test_args {
bool level_sensitive; /* 1 is level, 0 is edge */
int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
+ uint32_t shared_data;
};
/*
@@ -205,7 +206,7 @@ static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
do { \
uint32_t _intid; \
_intid = gic_get_and_ack_irq(); \
- GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
+ GUEST_ASSERT(_intid == IAR_SPURIOUS); \
} while (0)
#define CAT_HELPER(a, b) a ## b
@@ -359,8 +360,9 @@ static uint32_t wait_for_and_activate_irq(void)
* interrupts for the whole test.
*/
static void test_inject_preemption(struct test_args *args,
- uint32_t first_intid, int num,
- kvm_inject_cmd cmd)
+ uint32_t first_intid, int num,
+ const unsigned long *exclude,
+ kvm_inject_cmd cmd)
{
uint32_t intid, prio, step = KVM_PRIO_STEPS;
int i;
@@ -379,6 +381,10 @@ static void test_inject_preemption(struct test_args *args,
for (i = 0; i < num; i++) {
uint32_t tmp;
intid = i + first_intid;
+
+ if (exclude && test_bit(i, exclude))
+ continue;
+
KVM_INJECT(cmd, intid);
/* Each successive IRQ will preempt the previous one. */
tmp = wait_for_and_activate_irq();
@@ -390,15 +396,33 @@ static void test_inject_preemption(struct test_args *args,
/* finish handling the IRQs starting with the highest priority one. */
for (i = 0; i < num; i++) {
intid = num - i - 1 + first_intid;
+
+ if (exclude && test_bit(intid - first_intid, exclude))
+ continue;
+
gic_set_eoi(intid);
- if (args->eoi_split)
- gic_set_dir(intid);
+ }
+
+ if (args->eoi_split) {
+ for (i = 0; i < num; i++) {
+ intid = i + first_intid;
+
+ if (exclude && test_bit(i, exclude))
+ continue;
+
+ if (args->eoi_split)
+ gic_set_dir(intid);
+ }
}
local_irq_enable();
- for (i = 0; i < num; i++)
+ for (i = 0; i < num; i++) {
+ if (exclude && test_bit(i, exclude))
+ continue;
+
GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
+ }
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
GUEST_ASSERT_IAR_EMPTY();
@@ -436,33 +460,32 @@ static void test_injection_failure(struct test_args *args,
static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
{
- /*
- * Test up to 4 levels of preemption. The reason is that KVM doesn't
- * currently implement the ability to have more than the number-of-LRs
- * number of concurrently active IRQs. The number of LRs implemented is
- * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
- */
+ /* Timer PPIs cannot be injected from userspace */
+ static const unsigned long ppi_exclude = (BIT(27 - MIN_PPI) |
+ BIT(30 - MIN_PPI) |
+ BIT(28 - MIN_PPI) |
+ BIT(26 - MIN_PPI));
+
if (f->sgi)
- test_inject_preemption(args, MIN_SGI, 4, f->cmd);
+ test_inject_preemption(args, MIN_SGI, 16, NULL, f->cmd);
if (f->ppi)
- test_inject_preemption(args, MIN_PPI, 4, f->cmd);
+ test_inject_preemption(args, MIN_PPI, 16, &ppi_exclude, f->cmd);
if (f->spi)
- test_inject_preemption(args, MIN_SPI, 4, f->cmd);
+ test_inject_preemption(args, MIN_SPI, 31, NULL, f->cmd);
}
static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
{
- /* Test up to 4 active IRQs. Same reason as in test_preemption. */
if (f->sgi)
- guest_restore_active(args, MIN_SGI, 4, f->cmd);
+ guest_restore_active(args, MIN_SGI, 16, f->cmd);
if (f->ppi)
- guest_restore_active(args, MIN_PPI, 4, f->cmd);
+ guest_restore_active(args, MIN_PPI, 16, f->cmd);
if (f->spi)
- guest_restore_active(args, MIN_SPI, 4, f->cmd);
+ guest_restore_active(args, MIN_SPI, 31, f->cmd);
}
static void guest_code(struct test_args *args)
@@ -473,12 +496,12 @@ static void guest_code(struct test_args *args)
gic_init(GIC_V3, 1);
- for (i = 0; i < nr_irqs; i++)
- gic_irq_enable(i);
-
for (i = MIN_SPI; i < nr_irqs; i++)
gic_irq_set_config(i, !level_sensitive);
+ for (i = 0; i < nr_irqs; i++)
+ gic_irq_enable(i);
+
gic_set_eoi_split(args->eoi_split);
reset_priorities(args);
@@ -620,18 +643,12 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
* that no actual interrupt was injected for those cases.
*/
- for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
- fd[f] = eventfd(0, 0);
- TEST_ASSERT(fd[f] != -1, __KVM_SYSCALL_ERROR("eventfd()", fd[f]));
- }
+ for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
+ fd[f] = kvm_new_eventfd();
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
- struct kvm_irqfd irqfd = {
- .fd = fd[f],
- .gsi = i - MIN_SPI,
- };
assert(i <= (uint64_t)UINT_MAX);
- vm_ioctl(vm, KVM_IRQFD, &irqfd);
+ kvm_assign_irqfd(vm, i - MIN_SPI, fd[f]);
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
@@ -642,7 +659,7 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
- close(fd[f]);
+ kvm_close(fd[f]);
}
/* handles the valid case: intid=0xffffffff num=1 */
@@ -758,7 +775,6 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
vcpu_args_set(vcpu, 1, args_gva);
gic_fd = vgic_v3_setup(vm, 1, nr_irqs);
- __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
@@ -786,6 +802,221 @@ done:
kvm_vm_free(vm);
}
+static void guest_code_asym_dir(struct test_args *args, int cpuid)
+{
+ gic_init(GIC_V3, 2);
+
+ gic_set_eoi_split(1);
+ gic_set_priority_mask(CPU_PRIO_MASK);
+
+ if (cpuid == 0) {
+ uint32_t intid;
+
+ local_irq_disable();
+
+ gic_set_priority(MIN_PPI, IRQ_DEFAULT_PRIO);
+ gic_irq_enable(MIN_SPI);
+ gic_irq_set_pending(MIN_SPI);
+
+ intid = wait_for_and_activate_irq();
+ GUEST_ASSERT_EQ(intid, MIN_SPI);
+
+ gic_set_eoi(intid);
+ isb();
+
+ WRITE_ONCE(args->shared_data, MIN_SPI);
+ dsb(ishst);
+
+ do {
+ dsb(ishld);
+ } while (READ_ONCE(args->shared_data) == MIN_SPI);
+ GUEST_ASSERT(!gic_irq_get_active(MIN_SPI));
+ } else {
+ do {
+ dsb(ishld);
+ } while (READ_ONCE(args->shared_data) != MIN_SPI);
+
+ gic_set_dir(MIN_SPI);
+ isb();
+
+ WRITE_ONCE(args->shared_data, 0);
+ dsb(ishst);
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_code_group_en(struct test_args *args, int cpuid)
+{
+ uint32_t intid;
+
+ gic_init(GIC_V3, 2);
+
+ gic_set_eoi_split(0);
+ gic_set_priority_mask(CPU_PRIO_MASK);
+ /* SGI0 is G0, which is disabled */
+ gic_irq_set_group(0, 0);
+
+ /* Configure all SGIs with decreasing priority */
+ for (intid = 0; intid < MIN_PPI; intid++) {
+ gic_set_priority(intid, (intid + 1) * 8);
+ gic_irq_enable(intid);
+ gic_irq_set_pending(intid);
+ }
+
+ /* Ack and EOI all G1 interrupts */
+ for (int i = 1; i < MIN_PPI; i++) {
+ intid = wait_for_and_activate_irq();
+
+ GUEST_ASSERT(intid < MIN_PPI);
+ gic_set_eoi(intid);
+ isb();
+ }
+
+ /*
+ * Check that SGI0 is still pending, inactive, and that we cannot
+ * ack anything.
+ */
+ GUEST_ASSERT(gic_irq_get_pending(0));
+ GUEST_ASSERT(!gic_irq_get_active(0));
+ GUEST_ASSERT_IAR_EMPTY();
+ GUEST_ASSERT(read_sysreg_s(SYS_ICC_IAR0_EL1) == IAR_SPURIOUS);
+
+ /* Open the G0 gates, and verify we can ack SGI0 */
+ write_sysreg_s(1, SYS_ICC_IGRPEN0_EL1);
+ isb();
+
+ do {
+ intid = read_sysreg_s(SYS_ICC_IAR0_EL1);
+ } while (intid == IAR_SPURIOUS);
+
+ GUEST_ASSERT(intid == 0);
+ GUEST_DONE();
+}
+
+static void guest_code_timer_spi(struct test_args *args, int cpuid)
+{
+ uint32_t intid;
+ u64 val;
+
+ gic_init(GIC_V3, 2);
+
+ gic_set_eoi_split(1);
+ gic_set_priority_mask(CPU_PRIO_MASK);
+
+ /* Add a pending SPI so that KVM starts trapping DIR */
+ gic_set_priority(MIN_SPI + cpuid, IRQ_DEFAULT_PRIO);
+ gic_irq_set_pending(MIN_SPI + cpuid);
+
+ /* Configure the timer with a higher priority, make it pending */
+ gic_set_priority(27, IRQ_DEFAULT_PRIO - 8);
+
+ isb();
+ val = read_sysreg(cntvct_el0);
+ write_sysreg(val, cntv_cval_el0);
+ write_sysreg(1, cntv_ctl_el0);
+ isb();
+
+ GUEST_ASSERT(gic_irq_get_pending(27));
+
+ /* Enable both interrupts */
+ gic_irq_enable(MIN_SPI + cpuid);
+ gic_irq_enable(27);
+
+ /* The timer must fire */
+ intid = wait_for_and_activate_irq();
+ GUEST_ASSERT(intid == 27);
+
+ /* Check that we can deassert it */
+ write_sysreg(0, cntv_ctl_el0);
+ isb();
+
+ GUEST_ASSERT(!gic_irq_get_pending(27));
+
+ /*
+ * Priority drop, deactivation -- we expect that the host
+ * deactivation will have been effective
+ */
+ gic_set_eoi(27);
+ gic_set_dir(27);
+
+ GUEST_ASSERT(!gic_irq_get_active(27));
+
+ /* Do it one more time */
+ isb();
+ val = read_sysreg(cntvct_el0);
+ write_sysreg(val, cntv_cval_el0);
+ write_sysreg(1, cntv_ctl_el0);
+ isb();
+
+ GUEST_ASSERT(gic_irq_get_pending(27));
+
+ /* The timer must fire again */
+ intid = wait_for_and_activate_irq();
+ GUEST_ASSERT(intid == 27);
+
+ GUEST_DONE();
+}
+
+static void *test_vcpu_run(void *arg)
+{
+ struct kvm_vcpu *vcpu = arg;
+ struct ucall uc;
+
+ while (1) {
+ vcpu_run(vcpu);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ case UCALL_DONE:
+ return NULL;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+ return NULL;
+}
+
+static void test_vgic_two_cpus(void *gcode)
+{
+ pthread_t thr[2];
+ struct kvm_vcpu *vcpus[2];
+ struct test_args args = {};
+ struct kvm_vm *vm;
+ vm_vaddr_t args_gva;
+ int gic_fd, ret;
+
+ vm = vm_create_with_vcpus(2, gcode, vcpus);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpus[0]);
+ vcpu_init_descriptor_tables(vcpus[1]);
+
+ /* Setup the guest args page (so it gets the args). */
+ args_gva = vm_vaddr_alloc_page(vm);
+ memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
+ vcpu_args_set(vcpus[0], 2, args_gva, 0);
+ vcpu_args_set(vcpus[1], 2, args_gva, 1);
+
+ gic_fd = vgic_v3_setup(vm, 2, 64);
+
+ ret = pthread_create(&thr[0], NULL, test_vcpu_run, vcpus[0]);
+ if (ret)
+ TEST_FAIL("Can't create thread for vcpu 0 (%d)\n", ret);
+ ret = pthread_create(&thr[1], NULL, test_vcpu_run, vcpus[1]);
+ if (ret)
+ TEST_FAIL("Can't create thread for vcpu 1 (%d)\n", ret);
+
+ pthread_join(thr[0], NULL);
+ pthread_join(thr[1], NULL);
+
+ close(gic_fd);
+ kvm_vm_free(vm);
+}
+
static void help(const char *name)
{
printf(
@@ -808,6 +1039,9 @@ int main(int argc, char **argv)
int opt;
bool eoi_split = false;
+ TEST_REQUIRE(kvm_supports_vgic_v3());
+ test_disable_default_vgic();
+
while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
switch (opt) {
case 'n':
@@ -839,6 +1073,9 @@ int main(int argc, char **argv)
test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
+ test_vgic_two_cpus(guest_code_asym_dir);
+ test_vgic_two_cpus(guest_code_group_en);
+ test_vgic_two_cpus(guest_code_timer_spi);
} else {
test_vgic(nr_irqs, level_sensitive, eoi_split);
}
diff --git a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c
index fc4fe52fb6f8..e857a605f577 100644
--- a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c
+++ b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c
@@ -27,7 +27,7 @@ static vm_paddr_t gpa_base;
static struct kvm_vm *vm;
static struct kvm_vcpu **vcpus;
-static int gic_fd, its_fd;
+static int its_fd;
static struct test_data {
bool request_vcpus_stop;
@@ -118,11 +118,16 @@ static void guest_setup_gic(void)
guest_setup_its_mappings();
guest_invalidate_all_rdists();
+
+ /* SYNC to ensure ITS setup is complete */
+ for (cpuid = 0; cpuid < test_data.nr_cpus; cpuid++)
+ its_send_sync_cmd(test_data.cmdq_base_va, cpuid);
}
static void guest_code(size_t nr_lpis)
{
guest_setup_gic();
+ local_irq_enable();
GUEST_SYNC(0);
@@ -214,9 +219,6 @@ static void setup_test_data(void)
static void setup_gic(void)
{
- gic_fd = vgic_v3_setup(vm, test_data.nr_cpus, 64);
- __TEST_REQUIRE(gic_fd >= 0, "Failed to create GICv3");
-
its_fd = vgic_its_setup(vm);
}
@@ -334,7 +336,7 @@ static void setup_vm(void)
{
int i;
- vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu));
+ vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu *));
TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
vm = vm_create_with_vcpus(test_data.nr_cpus, guest_code, vcpus);
@@ -355,7 +357,6 @@ static void setup_vm(void)
static void destroy_vm(void)
{
close(its_fd);
- close(gic_fd);
kvm_vm_free(vm);
free(vcpus);
}
@@ -374,6 +375,8 @@ int main(int argc, char **argv)
u32 nr_threads;
int c;
+ TEST_REQUIRE(kvm_supports_vgic_v3());
+
while ((c = getopt(argc, argv, "hv:d:e:i:")) != -1) {
switch (c) {
case 'v':
diff --git a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
index f16b3b27e32e..ae36325c022f 100644
--- a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
@@ -28,7 +28,6 @@
struct vpmu_vm {
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
- int gic_fd;
};
static struct vpmu_vm vpmu_vm;
@@ -45,11 +44,6 @@ static uint64_t get_pmcr_n(uint64_t pmcr)
return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
}
-static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
-{
- u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N);
-}
-
static uint64_t get_counters_mask(uint64_t n)
{
uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
@@ -415,10 +409,6 @@ static void create_vpmu_vm(void *guest_code)
.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
.addr = (uint64_t)&irq,
};
- struct kvm_device_attr init_attr = {
- .group = KVM_ARM_VCPU_PMU_V3_CTRL,
- .attr = KVM_ARM_VCPU_PMU_V3_INIT,
- };
/* The test creates the vpmu_vm multiple times. Ensure a clean state */
memset(&vpmu_vm, 0, sizeof(vpmu_vm));
@@ -431,29 +421,25 @@ static void create_vpmu_vm(void *guest_code)
}
/* Create vCPU with PMUv3 */
- vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
+ kvm_get_default_vcpu_target(vpmu_vm.vm, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
vcpu_init_descriptor_tables(vpmu_vm.vcpu);
- vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64);
- __TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
- "Failed to create vgic-v3, skipping");
+
+ kvm_arch_vm_finalize_vcpus(vpmu_vm.vm);
/* Make sure that PMUv3 support is indicated in the ID register */
dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
- pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
+ pmuver = FIELD_GET(ID_AA64DFR0_EL1_PMUVer, dfr0);
TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
"Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
- /* Initialize vPMU */
vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
- vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
}
static void destroy_vpmu_vm(void)
{
- close(vpmu_vm.gic_fd);
kvm_vm_free(vpmu_vm.vm);
}
@@ -475,33 +461,28 @@ static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
}
}
-static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
+static void test_create_vpmu_vm_with_nr_counters(unsigned int nr_counters, bool expect_fail)
{
struct kvm_vcpu *vcpu;
- uint64_t pmcr, pmcr_orig;
+ unsigned int prev;
+ int ret;
create_vpmu_vm(guest_code);
vcpu = vpmu_vm.vcpu;
- pmcr_orig = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
- pmcr = pmcr_orig;
+ prev = get_pmcr_n(vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)));
- /*
- * Setting a larger value of PMCR.N should not modify the field, and
- * return a success.
- */
- set_pmcr_n(&pmcr, pmcr_n);
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
- pmcr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
+ ret = __vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PMU_V3_CTRL,
+ KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS, &nr_counters);
if (expect_fail)
- TEST_ASSERT(pmcr_orig == pmcr,
- "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
- pmcr, pmcr_n);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "Setting more PMU counters (%u) than available (%u) unexpectedly succeeded",
+ nr_counters, prev);
else
- TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
- "Failed to update PMCR.N to %lu (received: %lu)",
- pmcr_n, get_pmcr_n(pmcr));
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
+
+ vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PMU_V3_CTRL, KVM_ARM_VCPU_PMU_V3_INIT, NULL);
}
/*
@@ -516,11 +497,11 @@ static void run_access_test(uint64_t pmcr_n)
pr_debug("Test with pmcr_n %lu\n", pmcr_n);
- test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
+ test_create_vpmu_vm_with_nr_counters(pmcr_n, false);
vcpu = vpmu_vm.vcpu;
/* Save the initial sp to restore them later to run the guest again */
- sp = vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1));
+ sp = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1));
run_vcpu(vcpu, pmcr_n);
@@ -528,11 +509,11 @@ static void run_access_test(uint64_t pmcr_n)
* Reset and re-initialize the vCPU, and run the guest code again to
* check if PMCR_EL0.N is preserved.
*/
- vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
+ kvm_get_default_vcpu_target(vpmu_vm.vm, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
aarch64_vcpu_setup(vcpu, &init);
vcpu_init_descriptor_tables(vcpu);
- vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), sp);
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
run_vcpu(vcpu, pmcr_n);
@@ -557,7 +538,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
uint64_t set_reg_id, clr_reg_id, reg_val;
uint64_t valid_counters_mask, max_counters_mask;
- test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
+ test_create_vpmu_vm_with_nr_counters(pmcr_n, false);
vcpu = vpmu_vm.vcpu;
valid_counters_mask = get_counters_mask(pmcr_n);
@@ -611,7 +592,7 @@ static void run_error_test(uint64_t pmcr_n)
{
pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
- test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
+ test_create_vpmu_vm_with_nr_counters(pmcr_n, true);
destroy_vpmu_vm();
}
@@ -629,11 +610,25 @@ static uint64_t get_pmcr_n_limit(void)
return get_pmcr_n(pmcr);
}
+static bool kvm_supports_nr_counters_attr(void)
+{
+ bool supported;
+
+ create_vpmu_vm(NULL);
+ supported = !__vcpu_has_device_attr(vpmu_vm.vcpu, KVM_ARM_VCPU_PMU_V3_CTRL,
+ KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS);
+ destroy_vpmu_vm();
+
+ return supported;
+}
+
int main(void)
{
uint64_t i, pmcr_n;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
+ TEST_REQUIRE(kvm_supports_vgic_v3());
+ TEST_REQUIRE(kvm_supports_nr_counters_attr());
pmcr_n = get_pmcr_n_limit();
for (i = 0; i <= pmcr_n; i++) {
diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
index 8835fed09e9f..96d874b239eb 100644
--- a/tools/testing/selftests/kvm/config
+++ b/tools/testing/selftests/kvm/config
@@ -1,5 +1,6 @@
CONFIG_KVM=y
CONFIG_KVM_INTEL=y
CONFIG_KVM_AMD=y
+CONFIG_EVENTFD=y
CONFIG_USERFAULTFD=y
CONFIG_IDLE_PAGE_TRACKING=y
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index e79817bd0e29..0a1ea1d1e2d8 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -20,38 +20,6 @@
#include "guest_modes.h"
#include "ucall_common.h"
-#ifdef __aarch64__
-#include "arm64/vgic.h"
-
-static int gic_fd;
-
-static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
-{
- /*
- * The test can still run even if hardware does not support GICv3, as it
- * is only an optimization to reduce guest exits.
- */
- gic_fd = vgic_v3_setup(vm, nr_vcpus, 64);
-}
-
-static void arch_cleanup_vm(struct kvm_vm *vm)
-{
- if (gic_fd > 0)
- close(gic_fd);
-}
-
-#else /* __aarch64__ */
-
-static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
-{
-}
-
-static void arch_cleanup_vm(struct kvm_vm *vm)
-{
-}
-
-#endif
-
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
#define TEST_HOST_LOOP_N 2UL
@@ -166,8 +134,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
dirty_log_manual_caps);
- arch_setup_vm(vm, nr_vcpus);
-
/* Start the iterations */
iteration = 0;
host_quit = false;
@@ -285,7 +251,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
}
memstress_free_bitmaps(bitmaps, p->slots);
- arch_cleanup_vm(vm);
memstress_destroy_vm(vm);
}
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 23593d9eeba9..d58a641b0e6a 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -585,6 +585,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
log_mode_create_vm_done(vm);
*vcpu = vm_vcpu_add(vm, 0, guest_code);
+ kvm_arch_vm_finalize_vcpus(vm);
return vm;
}
diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
index 91f05f78e824..f4644c9d2d3b 100644
--- a/tools/testing/selftests/kvm/get-reg-list.c
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -116,10 +116,13 @@ void __weak finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
}
#ifdef __aarch64__
-static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
+static void prepare_vcpu_init(struct kvm_vm *vm, struct vcpu_reg_list *c,
+ struct kvm_vcpu_init *init)
{
struct vcpu_reg_sublist *s;
+ vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, init);
+
for_each_sublist(c, s)
if (s->capability)
init->features[s->feature / 32] |= 1 << (s->feature % 32);
@@ -127,10 +130,10 @@ static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *ini
static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
{
- struct kvm_vcpu_init init = { .target = -1, };
+ struct kvm_vcpu_init init;
struct kvm_vcpu *vcpu;
- prepare_vcpu_init(c, &init);
+ prepare_vcpu_init(vm, c, &init);
vcpu = __vm_vcpu_add(vm, 0);
aarch64_vcpu_setup(vcpu, &init);
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
index ce687f8d248f..618c937f3c90 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -13,14 +13,19 @@
#include <linux/bitmap.h>
#include <linux/falloc.h>
+#include <linux/sizes.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "kvm_util.h"
+#include "numaif.h"
#include "test_util.h"
+#include "ucall_common.h"
-static void test_file_read_write(int fd)
+static size_t page_size;
+
+static void test_file_read_write(int fd, size_t total_size)
{
char buf[64];
@@ -34,15 +39,177 @@ static void test_file_read_write(int fd)
"pwrite on a guest_mem fd should fail");
}
-static void test_mmap(int fd, size_t page_size)
+static void test_mmap_cow(int fd, size_t size)
+{
+ void *mem;
+
+ mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+ TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd.");
+}
+
+static void test_mmap_supported(int fd, size_t total_size)
+{
+ const char val = 0xaa;
+ char *mem;
+ size_t i;
+ int ret;
+
+ mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
+
+ memset(mem, val, total_size);
+ for (i = 0; i < total_size; i++)
+ TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
+
+ ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0,
+ page_size);
+ TEST_ASSERT(!ret, "fallocate the first page should succeed.");
+
+ for (i = 0; i < page_size; i++)
+ TEST_ASSERT_EQ(READ_ONCE(mem[i]), 0x00);
+ for (; i < total_size; i++)
+ TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
+
+ memset(mem, val, page_size);
+ for (i = 0; i < total_size; i++)
+ TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
+
+ kvm_munmap(mem, total_size);
+}
+
+static void test_mbind(int fd, size_t total_size)
+{
+ const unsigned long nodemask_0 = 1; /* nid: 0 */
+ unsigned long nodemask = 0;
+ unsigned long maxnode = 8;
+ int policy;
+ char *mem;
+ int ret;
+
+ if (!is_multi_numa_node_system())
+ return;
+
+ mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
+
+ /* Test MPOL_INTERLEAVE policy */
+ kvm_mbind(mem, page_size * 2, MPOL_INTERLEAVE, &nodemask_0, maxnode, 0);
+ kvm_get_mempolicy(&policy, &nodemask, maxnode, mem, MPOL_F_ADDR);
+ TEST_ASSERT(policy == MPOL_INTERLEAVE && nodemask == nodemask_0,
+ "Wanted MPOL_INTERLEAVE (%u) and nodemask 0x%lx, got %u and 0x%lx",
+ MPOL_INTERLEAVE, nodemask_0, policy, nodemask);
+
+ /* Test basic MPOL_BIND policy */
+ kvm_mbind(mem + page_size * 2, page_size * 2, MPOL_BIND, &nodemask_0, maxnode, 0);
+ kvm_get_mempolicy(&policy, &nodemask, maxnode, mem + page_size * 2, MPOL_F_ADDR);
+ TEST_ASSERT(policy == MPOL_BIND && nodemask == nodemask_0,
+ "Wanted MPOL_BIND (%u) and nodemask 0x%lx, got %u and 0x%lx",
+ MPOL_BIND, nodemask_0, policy, nodemask);
+
+ /* Test MPOL_DEFAULT policy */
+ kvm_mbind(mem, total_size, MPOL_DEFAULT, NULL, 0, 0);
+ kvm_get_mempolicy(&policy, &nodemask, maxnode, mem, MPOL_F_ADDR);
+ TEST_ASSERT(policy == MPOL_DEFAULT && !nodemask,
+ "Wanted MPOL_DEFAULT (%u) and nodemask 0x0, got %u and 0x%lx",
+ MPOL_DEFAULT, policy, nodemask);
+
+ /* Test with invalid policy */
+ ret = mbind(mem, page_size, 999, &nodemask_0, maxnode, 0);
+ TEST_ASSERT(ret == -1 && errno == EINVAL,
+ "mbind with invalid policy should fail with EINVAL");
+
+ kvm_munmap(mem, total_size);
+}
+
+static void test_numa_allocation(int fd, size_t total_size)
+{
+ unsigned long node0_mask = 1; /* Node 0 */
+ unsigned long node1_mask = 2; /* Node 1 */
+ unsigned long maxnode = 8;
+ void *pages[4];
+ int status[4];
+ char *mem;
+ int i;
+
+ if (!is_multi_numa_node_system())
+ return;
+
+ mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
+
+ for (i = 0; i < 4; i++)
+ pages[i] = (char *)mem + page_size * i;
+
+ /* Set NUMA policy after allocation */
+ memset(mem, 0xaa, page_size);
+ kvm_mbind(pages[0], page_size, MPOL_BIND, &node0_mask, maxnode, 0);
+ kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, page_size);
+
+ /* Set NUMA policy before allocation */
+ kvm_mbind(pages[0], page_size * 2, MPOL_BIND, &node1_mask, maxnode, 0);
+ kvm_mbind(pages[2], page_size * 2, MPOL_BIND, &node0_mask, maxnode, 0);
+ memset(mem, 0xaa, total_size);
+
+ /* Validate if pages are allocated on specified NUMA nodes */
+ kvm_move_pages(0, 4, pages, NULL, status, 0);
+ TEST_ASSERT(status[0] == 1, "Expected page 0 on node 1, got it on node %d", status[0]);
+ TEST_ASSERT(status[1] == 1, "Expected page 1 on node 1, got it on node %d", status[1]);
+ TEST_ASSERT(status[2] == 0, "Expected page 2 on node 0, got it on node %d", status[2]);
+ TEST_ASSERT(status[3] == 0, "Expected page 3 on node 0, got it on node %d", status[3]);
+
+ /* Punch hole for all pages */
+ kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, total_size);
+
+ /* Change NUMA policy nodes and reallocate */
+ kvm_mbind(pages[0], page_size * 2, MPOL_BIND, &node0_mask, maxnode, 0);
+ kvm_mbind(pages[2], page_size * 2, MPOL_BIND, &node1_mask, maxnode, 0);
+ memset(mem, 0xaa, total_size);
+
+ kvm_move_pages(0, 4, pages, NULL, status, 0);
+ TEST_ASSERT(status[0] == 0, "Expected page 0 on node 0, got it on node %d", status[0]);
+ TEST_ASSERT(status[1] == 0, "Expected page 1 on node 0, got it on node %d", status[1]);
+ TEST_ASSERT(status[2] == 1, "Expected page 2 on node 1, got it on node %d", status[2]);
+ TEST_ASSERT(status[3] == 1, "Expected page 3 on node 1, got it on node %d", status[3]);
+
+ kvm_munmap(mem, total_size);
+}
+
+static void test_fault_sigbus(int fd, size_t accessible_size, size_t map_size)
+{
+ const char val = 0xaa;
+ char *mem;
+ size_t i;
+
+ mem = kvm_mmap(map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
+
+ TEST_EXPECT_SIGBUS(memset(mem, val, map_size));
+ TEST_EXPECT_SIGBUS((void)READ_ONCE(mem[accessible_size]));
+
+ for (i = 0; i < accessible_size; i++)
+ TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
+
+ kvm_munmap(mem, map_size);
+}
+
+static void test_fault_overflow(int fd, size_t total_size)
+{
+ test_fault_sigbus(fd, total_size, total_size * 4);
+}
+
+static void test_fault_private(int fd, size_t total_size)
+{
+ test_fault_sigbus(fd, 0, total_size);
+}
+
+static void test_mmap_not_supported(int fd, size_t total_size)
{
char *mem;
mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
TEST_ASSERT_EQ(mem, MAP_FAILED);
+
+ mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ TEST_ASSERT_EQ(mem, MAP_FAILED);
}
-static void test_file_size(int fd, size_t page_size, size_t total_size)
+static void test_file_size(int fd, size_t total_size)
{
struct stat sb;
int ret;
@@ -53,7 +220,7 @@ static void test_file_size(int fd, size_t page_size, size_t total_size)
TEST_ASSERT_EQ(sb.st_blksize, page_size);
}
-static void test_fallocate(int fd, size_t page_size, size_t total_size)
+static void test_fallocate(int fd, size_t total_size)
{
int ret;
@@ -90,7 +257,7 @@ static void test_fallocate(int fd, size_t page_size, size_t total_size)
TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
}
-static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
+static void test_invalid_punch_hole(int fd, size_t total_size)
{
struct {
off_t offset;
@@ -120,26 +287,18 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
}
}
-static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
+static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
+ uint64_t guest_memfd_flags)
{
- size_t page_size = getpagesize();
- uint64_t flag;
size_t size;
int fd;
for (size = 1; size < page_size; size++) {
- fd = __vm_create_guest_memfd(vm, size, 0);
- TEST_ASSERT(fd == -1 && errno == EINVAL,
+ fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags);
+ TEST_ASSERT(fd < 0 && errno == EINVAL,
"guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
size);
}
-
- for (flag = BIT(0); flag; flag <<= 1) {
- fd = __vm_create_guest_memfd(vm, page_size, flag);
- TEST_ASSERT(fd == -1 && errno == EINVAL,
- "guest_memfd() with flag '0x%lx' should fail with EINVAL",
- flag);
- }
}
static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
@@ -147,53 +306,187 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
int fd1, fd2, ret;
struct stat st1, st2;
- fd1 = __vm_create_guest_memfd(vm, 4096, 0);
+ fd1 = __vm_create_guest_memfd(vm, page_size, 0);
TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
ret = fstat(fd1, &st1);
TEST_ASSERT(ret != -1, "memfd fstat should succeed");
- TEST_ASSERT(st1.st_size == 4096, "memfd st_size should match requested size");
+ TEST_ASSERT(st1.st_size == page_size, "memfd st_size should match requested size");
- fd2 = __vm_create_guest_memfd(vm, 8192, 0);
+ fd2 = __vm_create_guest_memfd(vm, page_size * 2, 0);
TEST_ASSERT(fd2 != -1, "memfd creation should succeed");
ret = fstat(fd2, &st2);
TEST_ASSERT(ret != -1, "memfd fstat should succeed");
- TEST_ASSERT(st2.st_size == 8192, "second memfd st_size should match requested size");
+ TEST_ASSERT(st2.st_size == page_size * 2, "second memfd st_size should match requested size");
ret = fstat(fd1, &st1);
TEST_ASSERT(ret != -1, "memfd fstat should succeed");
- TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
+ TEST_ASSERT(st1.st_size == page_size, "first memfd st_size should still match requested size");
TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
close(fd2);
close(fd1);
}
-int main(int argc, char *argv[])
+static void test_guest_memfd_flags(struct kvm_vm *vm)
{
- size_t page_size;
- size_t total_size;
+ uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
+ uint64_t flag;
int fd;
+
+ for (flag = BIT(0); flag; flag <<= 1) {
+ fd = __vm_create_guest_memfd(vm, page_size, flag);
+ if (flag & valid_flags) {
+ TEST_ASSERT(fd >= 0,
+ "guest_memfd() with flag '0x%lx' should succeed",
+ flag);
+ close(fd);
+ } else {
+ TEST_ASSERT(fd < 0 && errno == EINVAL,
+ "guest_memfd() with flag '0x%lx' should fail with EINVAL",
+ flag);
+ }
+ }
+}
+
+#define gmem_test(__test, __vm, __flags) \
+do { \
+ int fd = vm_create_guest_memfd(__vm, page_size * 4, __flags); \
+ \
+ test_##__test(fd, page_size * 4); \
+ close(fd); \
+} while (0)
+
+static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
+{
+ test_create_guest_memfd_multiple(vm);
+ test_create_guest_memfd_invalid_sizes(vm, flags);
+
+ gmem_test(file_read_write, vm, flags);
+
+ if (flags & GUEST_MEMFD_FLAG_MMAP) {
+ if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) {
+ gmem_test(mmap_supported, vm, flags);
+ gmem_test(fault_overflow, vm, flags);
+ gmem_test(numa_allocation, vm, flags);
+ } else {
+ gmem_test(fault_private, vm, flags);
+ }
+
+ gmem_test(mmap_cow, vm, flags);
+ gmem_test(mbind, vm, flags);
+ } else {
+ gmem_test(mmap_not_supported, vm, flags);
+ }
+
+ gmem_test(file_size, vm, flags);
+ gmem_test(fallocate, vm, flags);
+ gmem_test(invalid_punch_hole, vm, flags);
+}
+
+static void test_guest_memfd(unsigned long vm_type)
+{
+ struct kvm_vm *vm = vm_create_barebones_type(vm_type);
+ uint64_t flags;
+
+ test_guest_memfd_flags(vm);
+
+ __test_guest_memfd(vm, 0);
+
+ flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
+ if (flags & GUEST_MEMFD_FLAG_MMAP)
+ __test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP);
+
+ /* MMAP should always be supported if INIT_SHARED is supported. */
+ if (flags & GUEST_MEMFD_FLAG_INIT_SHARED)
+ __test_guest_memfd(vm, GUEST_MEMFD_FLAG_MMAP |
+ GUEST_MEMFD_FLAG_INIT_SHARED);
+
+ kvm_vm_free(vm);
+}
+
+static void guest_code(uint8_t *mem, uint64_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ __GUEST_ASSERT(mem[i] == 0xaa,
+ "Guest expected 0xaa at offset %lu, got 0x%x", i, mem[i]);
+
+ memset(mem, 0xff, size);
+ GUEST_DONE();
+}
+
+static void test_guest_memfd_guest(void)
+{
+ /*
+ * Skip the first 4gb and slot0. slot0 maps <1gb and is used to back
+ * the guest's code, stack, and page tables, and low memory contains
+ * the PCI hole and other MMIO regions that need to be avoided.
+ */
+ const uint64_t gpa = SZ_4G;
+ const int slot = 1;
+
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
+ uint8_t *mem;
+ size_t size;
+ int fd, i;
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+ if (!kvm_check_cap(KVM_CAP_GUEST_MEMFD_FLAGS))
+ return;
- page_size = getpagesize();
- total_size = page_size * 4;
+ vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, guest_code);
- vm = vm_create_barebones();
+ TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_MMAP,
+ "Default VM type should support MMAP, supported flags = 0x%x",
+ vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));
+ TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS) & GUEST_MEMFD_FLAG_INIT_SHARED,
+ "Default VM type should support INIT_SHARED, supported flags = 0x%x",
+ vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS));
- test_create_guest_memfd_invalid(vm);
- test_create_guest_memfd_multiple(vm);
+ size = vm->page_size;
+ fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP |
+ GUEST_MEMFD_FLAG_INIT_SHARED);
+ vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0);
+
+ mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
+ memset(mem, 0xaa, size);
+ kvm_munmap(mem, size);
- fd = vm_create_guest_memfd(vm, total_size, 0);
+ virt_pg_map(vm, gpa, gpa);
+ vcpu_args_set(vcpu, 2, gpa, size);
+ vcpu_run(vcpu);
- test_file_read_write(fd);
- test_mmap(fd, page_size);
- test_file_size(fd, page_size, total_size);
- test_fallocate(fd, page_size, total_size);
- test_invalid_punch_hole(fd, page_size, total_size);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+
+ mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
+ for (i = 0; i < size; i++)
+ TEST_ASSERT_EQ(mem[i], 0xff);
close(fd);
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ unsigned long vm_types, vm_type;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+
+ page_size = getpagesize();
+
+ /*
+ * Not all architectures support KVM_CAP_VM_TYPES. However, those that
+ * support guest_memfd have that support for the default VM type.
+ */
+ vm_types = kvm_check_cap(KVM_CAP_VM_TYPES);
+ if (!vm_types)
+ vm_types = BIT(VM_TYPE_DEFAULT);
+
+ for_each_set_bit(vm_type, &vm_types, BITS_PER_TYPE(vm_types))
+ test_guest_memfd(vm_type);
+
+ test_guest_memfd_guest();
}
diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h
index bf461de34785..e2c4e9f0010f 100644
--- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h
+++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h
@@ -155,4 +155,28 @@ static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec)
timer_set_tval(timer, msec_to_cycles(msec));
}
+static inline u32 vcpu_get_vtimer_irq(struct kvm_vcpu *vcpu)
+{
+ u32 intid;
+ u64 attr;
+
+ attr = vcpu_has_el2(vcpu) ? KVM_ARM_VCPU_TIMER_IRQ_HVTIMER :
+ KVM_ARM_VCPU_TIMER_IRQ_VTIMER;
+ vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL, attr, &intid);
+
+ return intid;
+}
+
+static inline u32 vcpu_get_ptimer_irq(struct kvm_vcpu *vcpu)
+{
+ u32 intid;
+ u64 attr;
+
+ attr = vcpu_has_el2(vcpu) ? KVM_ARM_VCPU_TIMER_IRQ_HPTIMER :
+ KVM_ARM_VCPU_TIMER_IRQ_PTIMER;
+ vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL, attr, &intid);
+
+ return intid;
+}
+
#endif /* SELFTEST_KVM_ARCH_TIMER_H */
diff --git a/tools/testing/selftests/kvm/include/arm64/gic.h b/tools/testing/selftests/kvm/include/arm64/gic.h
index baeb3c859389..cc7a7f34ed37 100644
--- a/tools/testing/selftests/kvm/include/arm64/gic.h
+++ b/tools/testing/selftests/kvm/include/arm64/gic.h
@@ -57,6 +57,7 @@ void gic_irq_set_pending(unsigned int intid);
void gic_irq_clear_pending(unsigned int intid);
bool gic_irq_get_pending(unsigned int intid);
void gic_irq_set_config(unsigned int intid, bool is_edge);
+void gic_irq_set_group(unsigned int intid, bool group);
void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
vm_paddr_t pend_table);
diff --git a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
index 3722ed9c8f96..58feef3eb386 100644
--- a/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
+++ b/tools/testing/selftests/kvm/include/arm64/gic_v3_its.h
@@ -15,5 +15,6 @@ void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool val
void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id,
u32 collection_id, u32 intid);
void its_send_invall_cmd(void *cmdq_base, u32 collection_id);
+void its_send_sync_cmd(void *cmdq_base, u32 vcpu_id);
#endif // __SELFTESTS_GIC_V3_ITS_H__
diff --git a/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h b/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h
index e43a57d99b56..b973bb2c64a6 100644
--- a/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h
+++ b/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h
@@ -2,6 +2,9 @@
#ifndef SELFTEST_KVM_UTIL_ARCH_H
#define SELFTEST_KVM_UTIL_ARCH_H
-struct kvm_vm_arch {};
+struct kvm_vm_arch {
+ bool has_gic;
+ int gic_fd;
+};
#endif // SELFTEST_KVM_UTIL_ARCH_H
diff --git a/tools/testing/selftests/kvm/include/arm64/processor.h b/tools/testing/selftests/kvm/include/arm64/processor.h
index b0fc0f945766..ff928716574d 100644
--- a/tools/testing/selftests/kvm/include/arm64/processor.h
+++ b/tools/testing/selftests/kvm/include/arm64/processor.h
@@ -175,6 +175,7 @@ void vm_install_exception_handler(struct kvm_vm *vm,
void vm_install_sync_handler(struct kvm_vm *vm,
int vector, int ec, handler_fn handler);
+uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level);
uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);
static inline void cpu_relax(void)
@@ -254,6 +255,16 @@ static inline void local_irq_disable(void)
asm volatile("msr daifset, #3" : : : "memory");
}
+static inline void local_serror_enable(void)
+{
+ asm volatile("msr daifclr, #4" : : : "memory");
+}
+
+static inline void local_serror_disable(void)
+{
+ asm volatile("msr daifset, #4" : : : "memory");
+}
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -290,4 +301,87 @@ void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
/* Execute a Wait For Interrupt instruction. */
void wfi(void);
+void test_wants_mte(void);
+void test_disable_default_vgic(void);
+
+bool vm_supports_el2(struct kvm_vm *vm);
+
+static inline bool test_supports_el2(void)
+{
+ struct kvm_vm *vm = vm_create(1);
+ bool supported = vm_supports_el2(vm);
+
+ kvm_vm_free(vm);
+ return supported;
+}
+
+static inline bool vcpu_has_el2(struct kvm_vcpu *vcpu)
+{
+ return vcpu->init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2);
+}
+
+#define MAPPED_EL2_SYSREG(el2, el1) \
+ case SYS_##el1: \
+ if (vcpu_has_el2(vcpu)) \
+ alias = SYS_##el2; \
+ break
+
+
+static __always_inline u64 ctxt_reg_alias(struct kvm_vcpu *vcpu, u32 encoding)
+{
+ u32 alias = encoding;
+
+ BUILD_BUG_ON(!__builtin_constant_p(encoding));
+
+ switch (encoding) {
+ MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1);
+ MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1);
+ MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1);
+ MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1);
+ MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1);
+ MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1);
+ MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1);
+ MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1);
+ MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1);
+ MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1);
+ MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1);
+ MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1);
+ MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1);
+ MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1);
+ MAPPED_EL2_SYSREG(POR_EL2, POR_EL1);
+ MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1);
+ MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1);
+ MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1);
+ MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1);
+ MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1);
+ MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1);
+ MAPPED_EL2_SYSREG(CNTHCTL_EL2, CNTKCTL_EL1);
+ case SYS_SP_EL1:
+ if (!vcpu_has_el2(vcpu))
+ return ARM64_CORE_REG(sp_el1);
+
+ alias = SYS_SP_EL2;
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ return KVM_ARM64_SYS_REG(alias);
+}
+
+void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init);
+
+static inline unsigned int get_current_el(void)
+{
+ return (read_sysreg(CurrentEL) >> 2) & 0x3;
+}
+
+#define do_smccc(...) \
+do { \
+ if (get_current_el() == 2) \
+ smccc_smc(__VA_ARGS__); \
+ else \
+ smccc_hvc(__VA_ARGS__); \
+} while (0)
+
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/arm64/vgic.h b/tools/testing/selftests/kvm/include/arm64/vgic.h
index c481d0c00a5d..688beccc9436 100644
--- a/tools/testing/selftests/kvm/include/arm64/vgic.h
+++ b/tools/testing/selftests/kvm/include/arm64/vgic.h
@@ -16,6 +16,9 @@
((uint64_t)(flags) << 12) | \
index)
+bool kvm_supports_vgic_v3(void);
+int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs);
+void __vgic_v3_init(int fd);
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs);
#define VGIC_MAX_RESERVED 1023
diff --git a/tools/testing/selftests/kvm/include/kvm_syscalls.h b/tools/testing/selftests/kvm/include/kvm_syscalls.h
new file mode 100644
index 000000000000..d4e613162bba
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/kvm_syscalls.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_SYSCALLS_H
+#define SELFTEST_KVM_SYSCALLS_H
+
+#include <sys/syscall.h>
+
+#define MAP_ARGS0(m,...)
+#define MAP_ARGS1(m,t,a,...) m(t,a)
+#define MAP_ARGS2(m,t,a,...) m(t,a), MAP_ARGS1(m,__VA_ARGS__)
+#define MAP_ARGS3(m,t,a,...) m(t,a), MAP_ARGS2(m,__VA_ARGS__)
+#define MAP_ARGS4(m,t,a,...) m(t,a), MAP_ARGS3(m,__VA_ARGS__)
+#define MAP_ARGS5(m,t,a,...) m(t,a), MAP_ARGS4(m,__VA_ARGS__)
+#define MAP_ARGS6(m,t,a,...) m(t,a), MAP_ARGS5(m,__VA_ARGS__)
+#define MAP_ARGS(n,...) MAP_ARGS##n(__VA_ARGS__)
+
+#define __DECLARE_ARGS(t, a) t a
+#define __UNPACK_ARGS(t, a) a
+
+#define DECLARE_ARGS(nr_args, args...) MAP_ARGS(nr_args, __DECLARE_ARGS, args)
+#define UNPACK_ARGS(nr_args, args...) MAP_ARGS(nr_args, __UNPACK_ARGS, args)
+
+#define __KVM_SYSCALL_ERROR(_name, _ret) \
+ "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
+
+/* Define a kvm_<syscall>() API to assert success. */
+#define __KVM_SYSCALL_DEFINE(name, nr_args, args...) \
+static inline void kvm_##name(DECLARE_ARGS(nr_args, args)) \
+{ \
+ int r; \
+ \
+ r = name(UNPACK_ARGS(nr_args, args)); \
+ TEST_ASSERT(!r, __KVM_SYSCALL_ERROR(#name, r)); \
+}
+
+/*
+ * Macro to define syscall APIs, either because KVM selftests doesn't link to
+ * the standard library, e.g. libnuma, or because there is no library that yet
+ * provides the syscall. These
+ */
+#define KVM_SYSCALL_DEFINE(name, nr_args, args...) \
+static inline long name(DECLARE_ARGS(nr_args, args)) \
+{ \
+ return syscall(__NR_##name, UNPACK_ARGS(nr_args, args)); \
+} \
+__KVM_SYSCALL_DEFINE(name, nr_args, args)
+
+/*
+ * Special case mmap(), as KVM selftest rarely/never specific an address,
+ * rarely specify an offset, and because the unique return code requires
+ * special handling anyways.
+ */
+static inline void *__kvm_mmap(size_t size, int prot, int flags, int fd,
+ off_t offset)
+{
+ void *mem;
+
+ mem = mmap(NULL, size, prot, flags, fd, offset);
+ TEST_ASSERT(mem != MAP_FAILED, __KVM_SYSCALL_ERROR("mmap()",
+ (int)(unsigned long)MAP_FAILED));
+ return mem;
+}
+
+static inline void *kvm_mmap(size_t size, int prot, int flags, int fd)
+{
+ return __kvm_mmap(size, prot, flags, fd, 0);
+}
+
+static inline int kvm_dup(int fd)
+{
+ int new_fd = dup(fd);
+
+ TEST_ASSERT(new_fd >= 0, __KVM_SYSCALL_ERROR("dup()", new_fd));
+ return new_fd;
+}
+
+__KVM_SYSCALL_DEFINE(munmap, 2, void *, mem, size_t, size);
+__KVM_SYSCALL_DEFINE(close, 1, int, fd);
+__KVM_SYSCALL_DEFINE(fallocate, 4, int, fd, int, mode, loff_t, offset, loff_t, len);
+__KVM_SYSCALL_DEFINE(ftruncate, 2, unsigned int, fd, off_t, length);
+
+#endif /* SELFTEST_KVM_SYSCALLS_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index bee65ca08721..81f4355ff28a 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -18,8 +18,12 @@
#include <asm/atomic.h>
#include <asm/kvm.h>
+#include <sys/eventfd.h>
#include <sys/ioctl.h>
+#include <pthread.h>
+
+#include "kvm_syscalls.h"
#include "kvm_util_arch.h"
#include "kvm_util_types.h"
#include "sparsebit.h"
@@ -61,6 +65,9 @@ struct kvm_vcpu {
#ifdef __x86_64__
struct kvm_cpuid2 *cpuid;
#endif
+#ifdef __aarch64__
+ struct kvm_vcpu_init init;
+#endif
struct kvm_binary_stats stats;
struct kvm_dirty_gfn *dirty_gfns;
uint32_t fetch_index;
@@ -171,7 +178,7 @@ enum vm_guest_mode {
VM_MODE_P40V48_4K,
VM_MODE_P40V48_16K,
VM_MODE_P40V48_64K,
- VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
+ VM_MODE_PXXVYY_4K, /* For 48-bit or 57-bit VA, depending on host support */
VM_MODE_P47V64_4K,
VM_MODE_P44V64_4K,
VM_MODE_P36V48_4K,
@@ -213,7 +220,7 @@ extern enum vm_guest_mode vm_mode_default;
#elif defined(__x86_64__)
-#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
+#define VM_MODE_DEFAULT VM_MODE_PXXVYY_4K
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 8)
@@ -253,16 +260,22 @@ struct vm_guest_mode_params {
};
extern const struct vm_guest_mode_params vm_guest_mode_params[];
+int __open_path_or_exit(const char *path, int flags, const char *enoent_help);
int open_path_or_exit(const char *path, int flags);
int open_kvm_dev_path_or_exit(void);
-bool get_kvm_param_bool(const char *param);
-bool get_kvm_intel_param_bool(const char *param);
-bool get_kvm_amd_param_bool(const char *param);
+int kvm_get_module_param_integer(const char *module_name, const char *param);
+bool kvm_get_module_param_bool(const char *module_name, const char *param);
+
+static inline bool get_kvm_param_bool(const char *param)
+{
+ return kvm_get_module_param_bool("kvm", param);
+}
-int get_kvm_param_integer(const char *param);
-int get_kvm_intel_param_integer(const char *param);
-int get_kvm_amd_param_integer(const char *param);
+static inline int get_kvm_param_integer(const char *param)
+{
+ return kvm_get_module_param_integer("kvm", param);
+}
unsigned int kvm_check_cap(long cap);
@@ -271,9 +284,6 @@ static inline bool kvm_has_cap(long cap)
return kvm_check_cap(cap);
}
-#define __KVM_SYSCALL_ERROR(_name, _ret) \
- "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
-
/*
* Use the "inner", double-underscore macro when reporting errors from within
* other macros so that the name of ioctl() and not its literal numeric value
@@ -502,6 +512,45 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm)
return fd;
}
+static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
+ uint32_t flags)
+{
+ struct kvm_irqfd irqfd = {
+ .fd = eventfd,
+ .gsi = gsi,
+ .flags = flags,
+ .resamplefd = -1,
+ };
+
+ return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
+}
+
+static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
+ uint32_t flags)
+{
+ int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
+
+ TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
+}
+
+static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
+{
+ kvm_irqfd(vm, gsi, eventfd, 0);
+}
+
+static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
+{
+ kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
+}
+
+static inline int kvm_new_eventfd(void)
+{
+ int fd = eventfd(0, 0);
+
+ TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("eventfd()", fd));
+ return fd;
+}
+
static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
{
ssize_t ret;
@@ -624,12 +673,12 @@ int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flag
uint32_t guest_memfd, uint64_t guest_memfd_offset);
void vm_userspace_mem_region_add(struct kvm_vm *vm,
- enum vm_mem_backing_src_type src_type,
- uint64_t guest_paddr, uint32_t slot, uint64_t npages,
- uint32_t flags);
+ enum vm_mem_backing_src_type src_type,
+ uint64_t gpa, uint32_t slot, uint64_t npages,
+ uint32_t flags);
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
- uint64_t guest_paddr, uint32_t slot, uint64_t npages,
- uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
+ uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags,
+ int guest_memfd_fd, uint64_t guest_memfd_offset);
#ifndef vm_arch_has_protected_memory
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
@@ -639,6 +688,7 @@ static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
#endif
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
+void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
@@ -1013,7 +1063,34 @@ struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
void kvm_set_files_rlimit(uint32_t nr_vcpus);
-void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
+int __pin_task_to_cpu(pthread_t task, int cpu);
+
+static inline void pin_task_to_cpu(pthread_t task, int cpu)
+{
+ int r;
+
+ r = __pin_task_to_cpu(task, cpu);
+ TEST_ASSERT(!r, "Failed to set thread affinity to pCPU '%u'", cpu);
+}
+
+static inline int pin_task_to_any_cpu(pthread_t task)
+{
+ int cpu = sched_getcpu();
+
+ pin_task_to_cpu(task, cpu);
+ return cpu;
+}
+
+static inline void pin_self_to_cpu(int cpu)
+{
+ pin_task_to_cpu(pthread_self(), cpu);
+}
+
+static inline int pin_self_to_any_cpu(void)
+{
+ return pin_task_to_any_cpu(pthread_self());
+}
+
void kvm_print_vcpu_pinning_help(void);
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
int nr_vcpus);
@@ -1127,6 +1204,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
virt_arch_pg_map(vm, vaddr, paddr);
+ sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
}
@@ -1187,10 +1265,14 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
*/
void kvm_selftest_arch_init(void);
-void kvm_arch_vm_post_create(struct kvm_vm *vm);
+void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);
+void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
+void kvm_arch_vm_release(struct kvm_vm *vm);
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
uint32_t guest_get_vcpuid(void);
+bool kvm_arch_has_default_irqchip(void);
+
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/loongarch/arch_timer.h b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h
new file mode 100644
index 000000000000..2ed106b32c81
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/loongarch/arch_timer.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LoongArch Constant Timer specific interface
+ */
+#ifndef SELFTEST_KVM_ARCH_TIMER_H
+#define SELFTEST_KVM_ARCH_TIMER_H
+
+#include "processor.h"
+
+/* LoongArch timer frequency is constant 100MHZ */
+#define TIMER_FREQ (100UL << 20)
+#define msec_to_cycles(msec) (TIMER_FREQ * (unsigned long)(msec) / 1000)
+#define usec_to_cycles(usec) (TIMER_FREQ * (unsigned long)(usec) / 1000000)
+#define cycles_to_usec(cycles) ((unsigned long)(cycles) * 1000000 / TIMER_FREQ)
+
+static inline unsigned long timer_get_cycles(void)
+{
+ unsigned long val = 0;
+
+ __asm__ __volatile__(
+ "rdtime.d %0, $zero\n\t"
+ : "=r"(val)
+ :
+ );
+
+ return val;
+}
+
+static inline unsigned long timer_get_cfg(void)
+{
+ return csr_read(LOONGARCH_CSR_TCFG);
+}
+
+static inline unsigned long timer_get_val(void)
+{
+ return csr_read(LOONGARCH_CSR_TVAL);
+}
+
+static inline void disable_timer(void)
+{
+ csr_write(0, LOONGARCH_CSR_TCFG);
+}
+
+static inline void timer_irq_enable(void)
+{
+ unsigned long val;
+
+ val = csr_read(LOONGARCH_CSR_ECFG);
+ val |= ECFGF_TIMER;
+ csr_write(val, LOONGARCH_CSR_ECFG);
+}
+
+static inline void timer_irq_disable(void)
+{
+ unsigned long val;
+
+ val = csr_read(LOONGARCH_CSR_ECFG);
+ val &= ~ECFGF_TIMER;
+ csr_write(val, LOONGARCH_CSR_ECFG);
+}
+
+static inline void timer_set_next_cmp_ms(unsigned int msec, bool period)
+{
+ unsigned long val;
+
+ val = msec_to_cycles(msec) & CSR_TCFG_VAL;
+ val |= CSR_TCFG_EN;
+ if (period)
+ val |= CSR_TCFG_PERIOD;
+ csr_write(val, LOONGARCH_CSR_TCFG);
+}
+
+static inline void __delay(uint64_t cycles)
+{
+ uint64_t start = timer_get_cycles();
+
+ while ((timer_get_cycles() - start) < cycles)
+ cpu_relax();
+}
+
+static inline void udelay(unsigned long usec)
+{
+ __delay(usec_to_cycles(usec));
+}
+#endif /* SELFTEST_KVM_ARCH_TIMER_H */
diff --git a/tools/testing/selftests/kvm/include/loongarch/processor.h b/tools/testing/selftests/kvm/include/loongarch/processor.h
index 6427a3275e6a..76840ddda57d 100644
--- a/tools/testing/selftests/kvm/include/loongarch/processor.h
+++ b/tools/testing/selftests/kvm/include/loongarch/processor.h
@@ -83,7 +83,14 @@
#define LOONGARCH_CSR_PRMD 0x1
#define LOONGARCH_CSR_EUEN 0x2
#define LOONGARCH_CSR_ECFG 0x4
+#define ECFGB_TIMER 11
+#define ECFGF_TIMER (BIT_ULL(ECFGB_TIMER))
#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */
+#define CSR_ESTAT_EXC_SHIFT 16
+#define CSR_ESTAT_EXC_WIDTH 6
+#define CSR_ESTAT_EXC (0x3f << CSR_ESTAT_EXC_SHIFT)
+#define EXCCODE_INT 0 /* Interrupt */
+#define INT_TI 11 /* Timer interrupt*/
#define LOONGARCH_CSR_ERA 0x6 /* ERA */
#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */
#define LOONGARCH_CSR_EENTRY 0xc
@@ -106,6 +113,14 @@
#define LOONGARCH_CSR_KS1 0x31
#define LOONGARCH_CSR_TMID 0x40
#define LOONGARCH_CSR_TCFG 0x41
+#define CSR_TCFG_VAL (BIT_ULL(48) - BIT_ULL(2))
+#define CSR_TCFG_PERIOD_SHIFT 1
+#define CSR_TCFG_PERIOD (0x1UL << CSR_TCFG_PERIOD_SHIFT)
+#define CSR_TCFG_EN (0x1UL)
+#define LOONGARCH_CSR_TVAL 0x42
+#define LOONGARCH_CSR_TINTCLR 0x44 /* Timer interrupt clear */
+#define CSR_TINTCLR_TI_SHIFT 0
+#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT)
/* TLB refill exception entry */
#define LOONGARCH_CSR_TLBRENTRY 0x88
#define LOONGARCH_CSR_TLBRSAVE 0x8b
@@ -113,6 +128,28 @@
#define CSR_TLBREHI_PS_SHIFT 0
#define CSR_TLBREHI_PS (0x3fUL << CSR_TLBREHI_PS_SHIFT)
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__( \
+ "csrrd %[val], %[reg]\n\t" \
+ : [val] "=r" (__v) \
+ : [reg] "i" (csr) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_write(v, csr) \
+({ \
+ register unsigned long __v = v; \
+ __asm__ __volatile__ ( \
+ "csrwr %[val], %[reg]\n\t" \
+ : [val] "+r" (__v) \
+ : [reg] "i" (csr) \
+ : "memory"); \
+ __v; \
+})
+
#define EXREGS_GPRS (32)
#ifndef __ASSEMBLER__
@@ -124,18 +161,60 @@ struct ex_regs {
unsigned long pc;
unsigned long estat;
unsigned long badv;
+ unsigned long prmd;
};
#define PC_OFFSET_EXREGS offsetof(struct ex_regs, pc)
#define ESTAT_OFFSET_EXREGS offsetof(struct ex_regs, estat)
#define BADV_OFFSET_EXREGS offsetof(struct ex_regs, badv)
+#define PRMD_OFFSET_EXREGS offsetof(struct ex_regs, prmd)
#define EXREGS_SIZE sizeof(struct ex_regs)
+#define VECTOR_NUM 64
+
+typedef void(*handler_fn)(struct ex_regs *);
+
+struct handlers {
+ handler_fn exception_handlers[VECTOR_NUM];
+};
+
+void vm_init_descriptor_tables(struct kvm_vm *vm);
+void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler);
+
+static inline void cpu_relax(void)
+{
+ asm volatile("nop" ::: "memory");
+}
+
+static inline void local_irq_enable(void)
+{
+ unsigned int flags = CSR_CRMD_IE;
+ register unsigned int mask asm("$t0") = CSR_CRMD_IE;
+
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
+
+static inline void local_irq_disable(void)
+{
+ unsigned int flags = 0;
+ register unsigned int mask asm("$t0") = CSR_CRMD_IE;
+
+ __asm__ __volatile__(
+ "csrxchg %[val], %[mask], %[reg]\n\t"
+ : [val] "+r" (flags)
+ : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
+ : "memory");
+}
#else
#define PC_OFFSET_EXREGS ((EXREGS_GPRS + 0) * 8)
#define ESTAT_OFFSET_EXREGS ((EXREGS_GPRS + 1) * 8)
#define BADV_OFFSET_EXREGS ((EXREGS_GPRS + 2) * 8)
-#define EXREGS_SIZE ((EXREGS_GPRS + 3) * 8)
+#define PRMD_OFFSET_EXREGS ((EXREGS_GPRS + 3) * 8)
+#define EXREGS_SIZE ((EXREGS_GPRS + 4) * 8)
#endif
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/numaif.h b/tools/testing/selftests/kvm/include/numaif.h
index b020547403fd..29572a6d789c 100644
--- a/tools/testing/selftests/kvm/include/numaif.h
+++ b/tools/testing/selftests/kvm/include/numaif.h
@@ -1,55 +1,83 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * tools/testing/selftests/kvm/include/numaif.h
- *
- * Copyright (C) 2020, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
- * Header file that provides access to NUMA API functions not explicitly
- * exported to user space.
- */
+/* Copyright (C) 2020, Google LLC. */
#ifndef SELFTEST_KVM_NUMAIF_H
#define SELFTEST_KVM_NUMAIF_H
-#define __NR_get_mempolicy 239
-#define __NR_migrate_pages 256
+#include <dirent.h>
-/* System calls */
-long get_mempolicy(int *policy, const unsigned long *nmask,
- unsigned long maxnode, void *addr, int flags)
+#include <linux/mempolicy.h>
+
+#include "kvm_syscalls.h"
+
+KVM_SYSCALL_DEFINE(get_mempolicy, 5, int *, policy, const unsigned long *, nmask,
+ unsigned long, maxnode, void *, addr, int, flags);
+
+KVM_SYSCALL_DEFINE(set_mempolicy, 3, int, mode, const unsigned long *, nmask,
+ unsigned long, maxnode);
+
+KVM_SYSCALL_DEFINE(set_mempolicy_home_node, 4, unsigned long, start,
+ unsigned long, len, unsigned long, home_node,
+ unsigned long, flags);
+
+KVM_SYSCALL_DEFINE(migrate_pages, 4, int, pid, unsigned long, maxnode,
+ const unsigned long *, frommask, const unsigned long *, tomask);
+
+KVM_SYSCALL_DEFINE(move_pages, 6, int, pid, unsigned long, count, void *, pages,
+ const int *, nodes, int *, status, int, flags);
+
+KVM_SYSCALL_DEFINE(mbind, 6, void *, addr, unsigned long, size, int, mode,
+ const unsigned long *, nodemask, unsigned long, maxnode,
+ unsigned int, flags);
+
+static inline int get_max_numa_node(void)
{
- return syscall(__NR_get_mempolicy, policy, nmask,
- maxnode, addr, flags);
+ struct dirent *de;
+ int max_node = 0;
+ DIR *d;
+
+ /*
+ * Assume there's a single node if the kernel doesn't support NUMA,
+ * or if no nodes are found.
+ */
+ d = opendir("/sys/devices/system/node");
+ if (!d)
+ return 0;
+
+ while ((de = readdir(d)) != NULL) {
+ int node_id;
+ char *endptr;
+
+ if (strncmp(de->d_name, "node", 4) != 0)
+ continue;
+
+ node_id = strtol(de->d_name + 4, &endptr, 10);
+ if (*endptr != '\0')
+ continue;
+
+ if (node_id > max_node)
+ max_node = node_id;
+ }
+ closedir(d);
+
+ return max_node;
}
-long migrate_pages(int pid, unsigned long maxnode,
- const unsigned long *frommask,
- const unsigned long *tomask)
+static bool is_numa_available(void)
{
- return syscall(__NR_migrate_pages, pid, maxnode, frommask, tomask);
+ /*
+ * Probe for NUMA by doing a dummy get_mempolicy(). If the syscall
+ * fails with ENOSYS, then the kernel was built without NUMA support.
+ * if the syscall fails with EPERM, then the process/user lacks the
+ * necessary capabilities (CAP_SYS_NICE).
+ */
+ return !get_mempolicy(NULL, NULL, 0, NULL, 0) ||
+ (errno != ENOSYS && errno != EPERM);
}
-/* Policies */
-#define MPOL_DEFAULT 0
-#define MPOL_PREFERRED 1
-#define MPOL_BIND 2
-#define MPOL_INTERLEAVE 3
-
-#define MPOL_MAX MPOL_INTERLEAVE
-
-/* Flags for get_mem_policy */
-#define MPOL_F_NODE (1<<0) /* return next il node or node of address */
- /* Warning: MPOL_F_NODE is unsupported and
- * subject to change. Don't use.
- */
-#define MPOL_F_ADDR (1<<1) /* look up vma using address */
-#define MPOL_F_MEMS_ALLOWED (1<<2) /* query nodes allowed in cpuset */
-
-/* Flags for mbind */
-#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
-#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
-#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
+static inline bool is_multi_numa_node_system(void)
+{
+ return is_numa_available() && get_max_numa_node() >= 1;
+}
#endif /* SELFTEST_KVM_NUMAIF_H */
diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
index 162f303d9daa..e58282488beb 100644
--- a/tools/testing/selftests/kvm/include/riscv/processor.h
+++ b/tools/testing/selftests/kvm/include/riscv/processor.h
@@ -9,6 +9,7 @@
#include <linux/stringify.h>
#include <asm/csr.h>
+#include <asm/vdso/processor.h>
#include "kvm_util.h"
#define INSN_OPCODE_MASK 0x007c
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index c6ef895fbd9a..b4872ba8ed12 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -8,6 +8,8 @@
#ifndef SELFTEST_KVM_TEST_UTIL_H
#define SELFTEST_KVM_TEST_UTIL_H
+#include <setjmp.h>
+#include <signal.h>
#include <stdlib.h>
#include <stdarg.h>
#include <stdbool.h>
@@ -78,6 +80,23 @@ do { \
__builtin_unreachable(); \
} while (0)
+extern sigjmp_buf expect_sigbus_jmpbuf;
+void expect_sigbus_handler(int signum);
+
+#define TEST_EXPECT_SIGBUS(action) \
+do { \
+ struct sigaction sa_old, sa_new = { \
+ .sa_handler = expect_sigbus_handler, \
+ }; \
+ \
+ sigaction(SIGBUS, &sa_new, &sa_old); \
+ if (sigsetjmp(expect_sigbus_jmpbuf, 1) == 0) { \
+ action; \
+ TEST_FAIL("'%s' should have triggered SIGBUS", #action); \
+ } \
+ sigaction(SIGBUS, &sa_old, NULL); \
+} while (0)
+
size_t parse_size(const char *size);
int64_t timespec_to_ns(struct timespec ts);
diff --git a/tools/testing/selftests/kvm/include/x86/pmu.h b/tools/testing/selftests/kvm/include/x86/pmu.h
index 3c10c4dc0ae8..72575eadb63a 100644
--- a/tools/testing/selftests/kvm/include/x86/pmu.h
+++ b/tools/testing/selftests/kvm/include/x86/pmu.h
@@ -5,8 +5,11 @@
#ifndef SELFTEST_KVM_PMU_H
#define SELFTEST_KVM_PMU_H
+#include <stdbool.h>
#include <stdint.h>
+#include <linux/bits.h>
+
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
/*
@@ -61,6 +64,11 @@
#define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00)
#define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00)
#define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01)
+#define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02)
+#define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00)
+#define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01)
+#define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02)
+#define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01)
#define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00)
#define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
@@ -80,6 +88,11 @@ enum intel_pmu_architectural_events {
INTEL_ARCH_BRANCHES_RETIRED_INDEX,
INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
+ INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX,
+ INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX,
+ INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX,
+ INTEL_ARCH_TOPDOWN_RETIRING_INDEX,
+ INTEL_ARCH_LBR_INSERTS_INDEX,
NR_INTEL_ARCH_EVENTS,
};
@@ -94,4 +107,17 @@ enum amd_pmu_zen_events {
extern const uint64_t intel_pmu_arch_events[];
extern const uint64_t amd_pmu_zen_events[];
+enum pmu_errata {
+ INSTRUCTIONS_RETIRED_OVERCOUNT,
+ BRANCHES_RETIRED_OVERCOUNT,
+};
+extern uint64_t pmu_errata_mask;
+
+void kvm_init_pmu_errata(void);
+
+static inline bool this_pmu_has_errata(enum pmu_errata errata)
+{
+ return pmu_errata_mask & BIT_ULL(errata);
+}
+
#endif /* SELFTEST_KVM_PMU_H */
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index b11b5a53ebd5..57d62a425109 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -34,6 +34,8 @@ extern uint64_t guest_tsc_khz;
#define NMI_VECTOR 0x02
+const char *ex_str(int vector);
+
#define X86_EFLAGS_FIXED (1u << 1)
#define X86_CR4_VME (1ul << 0)
@@ -265,7 +267,7 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
-#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
+#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12)
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
@@ -332,6 +334,11 @@ struct kvm_x86_pmu_feature {
#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5)
#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6)
#define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7)
+#define X86_PMU_FEATURE_TOPDOWN_BE_BOUND KVM_X86_PMU_FEATURE(EBX, 8)
+#define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC KVM_X86_PMU_FEATURE(EBX, 9)
+#define X86_PMU_FEATURE_TOPDOWN_FE_BOUND KVM_X86_PMU_FEATURE(EBX, 10)
+#define X86_PMU_FEATURE_TOPDOWN_RETIRING KVM_X86_PMU_FEATURE(EBX, 11)
+#define X86_PMU_FEATURE_LBR_INSERTS KVM_X86_PMU_FEATURE(EBX, 12)
#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0)
#define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
@@ -1150,7 +1157,6 @@ do { \
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
void kvm_init_vm_address_properties(struct kvm_vm *vm);
-bool vm_is_unrestricted_guest(struct kvm_vm *vm);
struct ex_regs {
uint64_t rax, rcx, rdx, rbx;
@@ -1180,6 +1186,12 @@ struct idt_entry {
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *));
+/*
+ * Exception fixup morphs #DE to an arbitrary magic vector so that '0' can be
+ * used to signal "no expcetion".
+ */
+#define KVM_MAGIC_DE_VECTOR 0xff
+
/* If a toddler were to say "abracadabra". */
#define KVM_EXCEPTION_MAGIC 0xabacadabaULL
@@ -1315,6 +1327,26 @@ static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
bool kvm_is_tdp_enabled(void);
+static inline bool get_kvm_intel_param_bool(const char *param)
+{
+ return kvm_get_module_param_bool("kvm_intel", param);
+}
+
+static inline bool get_kvm_amd_param_bool(const char *param)
+{
+ return kvm_get_module_param_bool("kvm_amd", param);
+}
+
+static inline int get_kvm_intel_param_integer(const char *param)
+{
+ return kvm_get_module_param_integer("kvm_intel", param);
+}
+
+static inline int get_kvm_amd_param_integer(const char *param)
+{
+ return kvm_get_module_param_integer("kvm_amd", param);
+}
+
static inline bool kvm_is_pmu_enabled(void)
{
return get_kvm_param_bool("enable_pmu");
@@ -1325,6 +1357,16 @@ static inline bool kvm_is_forced_emulation_enabled(void)
return !!get_kvm_param_integer("force_emulation_prefix");
}
+static inline bool kvm_is_unrestricted_guest_enabled(void)
+{
+ return get_kvm_intel_param_bool("unrestricted_guest");
+}
+
+static inline bool kvm_is_ignore_msrs(void)
+{
+ return get_kvm_param_bool("ignore_msrs");
+}
+
uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
int *level);
uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
@@ -1399,7 +1441,7 @@ enum pg_level {
PG_LEVEL_2M,
PG_LEVEL_1G,
PG_LEVEL_512G,
- PG_LEVEL_NUM
+ PG_LEVEL_256T
};
#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
diff --git a/tools/testing/selftests/kvm/include/x86/vmx.h b/tools/testing/selftests/kvm/include/x86/vmx.h
index edb3c391b982..96e2b4c630a9 100644
--- a/tools/testing/selftests/kvm/include/x86/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86/vmx.h
@@ -568,8 +568,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t addr, uint64_t size);
bool kvm_cpu_has_ept(void);
-void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint32_t eptp_memslot);
+void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
#endif /* SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/irqfd_test.c b/tools/testing/selftests/kvm/irqfd_test.c
new file mode 100644
index 000000000000..5d7590d01868
--- /dev/null
+++ b/tools/testing/selftests/kvm/irqfd_test.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <stdint.h>
+#include <sys/sysinfo.h>
+
+#include "kvm_util.h"
+
+static struct kvm_vm *vm1;
+static struct kvm_vm *vm2;
+static int __eventfd;
+static bool done;
+
+/*
+ * KVM de-assigns based on eventfd *and* GSI, but requires unique eventfds when
+ * assigning (the API isn't symmetrical). Abuse the oddity and use a per-task
+ * GSI base to avoid false failures due to cross-task de-assign, i.e. so that
+ * the secondary doesn't de-assign the primary's eventfd and cause assign to
+ * unexpectedly succeed on the primary.
+ */
+#define GSI_BASE_PRIMARY 0x20
+#define GSI_BASE_SECONDARY 0x30
+
+static void juggle_eventfd_secondary(struct kvm_vm *vm, int eventfd)
+{
+ int r, i;
+
+ /*
+ * The secondary task can encounter EBADF since the primary can close
+ * the eventfd at any time. And because the primary can recreate the
+ * eventfd, at the safe fd in the file table, the secondary can also
+ * encounter "unexpected" success, e.g. if the close+recreate happens
+ * between the first and second assignments. The secondary's role is
+ * mostly to antagonize KVM, not to detect bugs.
+ */
+ for (i = 0; i < 2; i++) {
+ r = __kvm_irqfd(vm, GSI_BASE_SECONDARY, eventfd, 0);
+ TEST_ASSERT(!r || errno == EBUSY || errno == EBADF,
+ "Wanted success, EBUSY, or EBADF, r = %d, errno = %d",
+ r, errno);
+
+ /* De-assign should succeed unless the eventfd was closed. */
+ r = __kvm_irqfd(vm, GSI_BASE_SECONDARY + i, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
+ TEST_ASSERT(!r || errno == EBADF,
+ "De-assign should succeed unless the fd was closed");
+ }
+}
+
+static void *secondary_irqfd_juggler(void *ign)
+{
+ while (!READ_ONCE(done)) {
+ juggle_eventfd_secondary(vm1, READ_ONCE(__eventfd));
+ juggle_eventfd_secondary(vm2, READ_ONCE(__eventfd));
+ }
+
+ return NULL;
+}
+
+static void juggle_eventfd_primary(struct kvm_vm *vm, int eventfd)
+{
+ int r1, r2;
+
+ /*
+ * At least one of the assigns should fail. KVM disallows assigning a
+ * single eventfd to multiple GSIs (or VMs), so it's possible that both
+ * assignments can fail, too.
+ */
+ r1 = __kvm_irqfd(vm, GSI_BASE_PRIMARY, eventfd, 0);
+ TEST_ASSERT(!r1 || errno == EBUSY,
+ "Wanted success or EBUSY, r = %d, errno = %d", r1, errno);
+
+ r2 = __kvm_irqfd(vm, GSI_BASE_PRIMARY + 1, eventfd, 0);
+ TEST_ASSERT(r1 || (r2 && errno == EBUSY),
+ "Wanted failure (EBUSY), r1 = %d, r2 = %d, errno = %d",
+ r1, r2, errno);
+
+ /*
+ * De-assign should always succeed, even if the corresponding assign
+ * failed.
+ */
+ kvm_irqfd(vm, GSI_BASE_PRIMARY, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
+ kvm_irqfd(vm, GSI_BASE_PRIMARY + 1, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
+}
+
+int main(int argc, char *argv[])
+{
+ pthread_t racing_thread;
+ struct kvm_vcpu *unused;
+ int r, i;
+
+ TEST_REQUIRE(kvm_arch_has_default_irqchip());
+
+ /*
+ * Create "full" VMs, as KVM_IRQFD requires an in-kernel IRQ chip. Also
+ * create an unused vCPU as certain architectures (like arm64) need to
+ * complete IRQ chip initialization after all possible vCPUs for a VM
+ * have been created.
+ */
+ vm1 = vm_create_with_one_vcpu(&unused, NULL);
+ vm2 = vm_create_with_one_vcpu(&unused, NULL);
+
+ WRITE_ONCE(__eventfd, kvm_new_eventfd());
+
+ kvm_irqfd(vm1, 10, __eventfd, 0);
+
+ r = __kvm_irqfd(vm1, 11, __eventfd, 0);
+ TEST_ASSERT(r && errno == EBUSY,
+ "Wanted EBUSY, r = %d, errno = %d", r, errno);
+
+ r = __kvm_irqfd(vm2, 12, __eventfd, 0);
+ TEST_ASSERT(r && errno == EBUSY,
+ "Wanted EBUSY, r = %d, errno = %d", r, errno);
+
+ /*
+ * De-assign all eventfds, along with multiple eventfds that were never
+ * assigned. KVM's ABI is that de-assign is allowed so long as the
+ * eventfd itself is valid.
+ */
+ kvm_irqfd(vm1, 11, READ_ONCE(__eventfd), KVM_IRQFD_FLAG_DEASSIGN);
+ kvm_irqfd(vm1, 12, READ_ONCE(__eventfd), KVM_IRQFD_FLAG_DEASSIGN);
+ kvm_irqfd(vm1, 13, READ_ONCE(__eventfd), KVM_IRQFD_FLAG_DEASSIGN);
+ kvm_irqfd(vm1, 14, READ_ONCE(__eventfd), KVM_IRQFD_FLAG_DEASSIGN);
+ kvm_irqfd(vm1, 10, READ_ONCE(__eventfd), KVM_IRQFD_FLAG_DEASSIGN);
+
+ close(__eventfd);
+
+ pthread_create(&racing_thread, NULL, secondary_irqfd_juggler, vm2);
+
+ for (i = 0; i < 10000; i++) {
+ WRITE_ONCE(__eventfd, kvm_new_eventfd());
+
+ juggle_eventfd_primary(vm1, __eventfd);
+ juggle_eventfd_primary(vm2, __eventfd);
+ close(__eventfd);
+ }
+
+ WRITE_ONCE(done, true);
+ pthread_join(racing_thread, NULL);
+}
diff --git a/tools/testing/selftests/kvm/kvm_binary_stats_test.c b/tools/testing/selftests/kvm/kvm_binary_stats_test.c
index f02355c3c4c2..b7dbde9c0843 100644
--- a/tools/testing/selftests/kvm/kvm_binary_stats_test.c
+++ b/tools/testing/selftests/kvm/kvm_binary_stats_test.c
@@ -239,14 +239,14 @@ int main(int argc, char *argv[])
* single stats file works and doesn't cause explosions.
*/
vm_stats_fds = vm_get_stats_fd(vms[i]);
- stats_test(dup(vm_stats_fds));
+ stats_test(kvm_dup(vm_stats_fds));
/* Verify userspace can instantiate multiple stats files. */
stats_test(vm_get_stats_fd(vms[i]));
for (j = 0; j < max_vcpu; ++j) {
vcpu_stats_fds[j] = vcpu_get_stats_fd(vcpus[i * max_vcpu + j]);
- stats_test(dup(vcpu_stats_fds[j]));
+ stats_test(kvm_dup(vcpu_stats_fds[j]));
stats_test(vcpu_get_stats_fd(vcpus[i * max_vcpu + j]));
}
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic.c b/tools/testing/selftests/kvm/lib/arm64/gic.c
index 7abbf8866512..b023868fe0b8 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic.c
@@ -155,3 +155,9 @@ void gic_irq_set_config(unsigned int intid, bool is_edge)
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_config(intid, is_edge);
}
+
+void gic_irq_set_group(unsigned int intid, bool group)
+{
+ GUEST_ASSERT(gic_common_ops);
+ gic_common_ops->gic_irq_set_group(intid, group);
+}
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_private.h b/tools/testing/selftests/kvm/lib/arm64/gic_private.h
index d24e9ecc96c6..b6a7e30c3eb1 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_private.h
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_private.h
@@ -25,6 +25,7 @@ struct gic_common_ops {
void (*gic_irq_clear_pending)(uint32_t intid);
bool (*gic_irq_get_pending)(uint32_t intid);
void (*gic_irq_set_config)(uint32_t intid, bool is_edge);
+ void (*gic_irq_set_group)(uint32_t intid, bool group);
};
extern const struct gic_common_ops gicv3_ops;
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
index 66d05506f78b..50754a27f493 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3.c
@@ -293,17 +293,36 @@ static void gicv3_enable_redist(volatile void *redist_base)
}
}
+static void gicv3_set_group(uint32_t intid, bool grp)
+{
+ uint32_t cpu_or_dist;
+ uint32_t val;
+
+ cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid();
+ val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4);
+ if (grp)
+ val |= BIT(intid % 32);
+ else
+ val &= ~BIT(intid % 32);
+ gicv3_reg_writel(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4, val);
+}
+
static void gicv3_cpu_init(unsigned int cpu)
{
volatile void *sgi_base;
unsigned int i;
volatile void *redist_base_cpu;
+ u64 typer;
GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
redist_base_cpu = gicr_base_cpu(cpu);
sgi_base = sgi_base_from_redist(redist_base_cpu);
+ /* Verify assumption that GICR_TYPER.Processor_number == cpu */
+ typer = readq_relaxed(redist_base_cpu + GICR_TYPER);
+ GUEST_ASSERT_EQ(GICR_TYPER_CPU_NUMBER(typer), cpu);
+
gicv3_enable_redist(redist_base_cpu);
/*
@@ -328,6 +347,8 @@ static void gicv3_cpu_init(unsigned int cpu)
/* Set a default priority threshold */
write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1);
+ /* Disable Group-0 interrupts */
+ write_sysreg_s(ICC_IGRPEN0_EL1_MASK, SYS_ICC_IGRPEN1_EL1);
/* Enable non-secure Group-1 interrupts */
write_sysreg_s(ICC_IGRPEN1_EL1_MASK, SYS_ICC_IGRPEN1_EL1);
}
@@ -400,6 +421,7 @@ const struct gic_common_ops gicv3_ops = {
.gic_irq_clear_pending = gicv3_irq_clear_pending,
.gic_irq_get_pending = gicv3_irq_get_pending,
.gic_irq_set_config = gicv3_irq_set_config,
+ .gic_irq_set_group = gicv3_set_group,
};
void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
diff --git a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
index 09f270545646..7f9fdcf42ae6 100644
--- a/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
+++ b/tools/testing/selftests/kvm/lib/arm64/gic_v3_its.c
@@ -15,6 +15,8 @@
#include "gic_v3.h"
#include "processor.h"
+#define GITS_COLLECTION_TARGET_SHIFT 16
+
static u64 its_read_u64(unsigned long offset)
{
return readq_relaxed(GITS_BASE_GVA + offset);
@@ -163,6 +165,11 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
}
+static u64 procnum_to_rdbase(u32 vcpu_id)
+{
+ return vcpu_id << GITS_COLLECTION_TARGET_SHIFT;
+}
+
#define GITS_CMDQ_POLL_ITERATIONS 0
static void its_send_cmd(void *cmdq_base, struct its_cmd_block *cmd)
@@ -217,7 +224,7 @@ void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool val
its_encode_cmd(&cmd, GITS_CMD_MAPC);
its_encode_collection(&cmd, collection_id);
- its_encode_target(&cmd, vcpu_id);
+ its_encode_target(&cmd, procnum_to_rdbase(vcpu_id));
its_encode_valid(&cmd, valid);
its_send_cmd(cmdq_base, &cmd);
@@ -246,3 +253,13 @@ void its_send_invall_cmd(void *cmdq_base, u32 collection_id)
its_send_cmd(cmdq_base, &cmd);
}
+
+void its_send_sync_cmd(void *cmdq_base, u32 vcpu_id)
+{
+ struct its_cmd_block cmd = {};
+
+ its_encode_cmd(&cmd, GITS_CMD_SYNC);
+ its_encode_target(&cmd, procnum_to_rdbase(vcpu_id));
+
+ its_send_cmd(cmdq_base, &cmd);
+}
diff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c
index 9d69904cb608..d46e4b13b92c 100644
--- a/tools/testing/selftests/kvm/lib/arm64/processor.c
+++ b/tools/testing/selftests/kvm/lib/arm64/processor.c
@@ -12,6 +12,7 @@
#include "kvm_util.h"
#include "processor.h"
#include "ucall_common.h"
+#include "vgic.h"
#include <linux/bitfield.h>
#include <linux/sizes.h>
@@ -185,7 +186,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
_virt_pg_map(vm, vaddr, paddr, attr_idx);
}
-uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
+uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level)
{
uint64_t *ptep;
@@ -195,17 +196,23 @@ uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
if (!ptep)
goto unmapped_gva;
+ if (level == 0)
+ return ptep;
switch (vm->pgtable_levels) {
case 4:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
if (!ptep)
goto unmapped_gva;
+ if (level == 1)
+ break;
/* fall through */
case 3:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
if (!ptep)
goto unmapped_gva;
+ if (level == 2)
+ break;
/* fall through */
case 2:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
@@ -223,6 +230,11 @@ unmapped_gva:
exit(EXIT_FAILURE);
}
+uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ return virt_get_pte_hva_at_level(vm, gva, 3);
+}
+
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint64_t *ptep = virt_get_pte_hva(vm, gva);
@@ -266,35 +278,53 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
}
+bool vm_supports_el2(struct kvm_vm *vm)
+{
+ const char *value = getenv("NV");
+
+ if (value && *value == '0')
+ return false;
+
+ return vm_check_cap(vm, KVM_CAP_ARM_EL2) && vm->arch.has_gic;
+}
+
+void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init)
+{
+ struct kvm_vcpu_init preferred = {};
+
+ vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
+ if (vm_supports_el2(vm))
+ preferred.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2);
+
+ *init = preferred;
+}
+
void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
struct kvm_vm *vm = vcpu->vm;
uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
- if (!init)
+ if (!init) {
+ kvm_get_default_vcpu_target(vm, &default_init);
init = &default_init;
-
- if (init->target == -1) {
- struct kvm_vcpu_init preferred;
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
- init->target = preferred.target;
}
vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
+ vcpu->init = *init;
/*
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
* registers, which the variable argument list macros do.
*/
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_CPACR_EL1), 3 << 20);
- sctlr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1));
- tcr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1));
+ sctlr_el1 = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SCTLR_EL1));
+ tcr_el1 = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TCR_EL1));
/* Configure base granule size */
switch (vm->mode) {
- case VM_MODE_PXXV48_4K:
+ case VM_MODE_PXXVYY_4K:
TEST_FAIL("AArch64 does not support 4K sized pages "
"with ANY-bit physical address ranges");
case VM_MODE_P52V48_64K:
@@ -357,11 +387,17 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
if (use_lpa2_pte_format(vm))
tcr_el1 |= TCR_DS;
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SCTLR_EL1), sctlr_el1);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TCR_EL1), tcr_el1);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TTBR0_EL1), ttbr0_el1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
+
+ if (!vcpu_has_el2(vcpu))
+ return;
+
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_HCR_EL2),
+ HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H);
}
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
@@ -395,7 +431,7 @@ static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
aarch64_vcpu_setup(vcpu, init);
- vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size);
return vcpu;
}
@@ -465,7 +501,7 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
extern char vectors;
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
+ vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (uint64_t)&vectors);
}
void route_exception(struct ex_regs *regs, int vector)
@@ -573,15 +609,15 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
- gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val);
+ gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN4, val);
*ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
- gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val);
+ gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN64, val);
*ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
ID_AA64MMFR0_EL1_TGRAN64_IMP);
- gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val);
+ gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN16, val);
*ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
@@ -653,3 +689,44 @@ void wfi(void)
{
asm volatile("wfi");
}
+
+static bool request_mte;
+static bool request_vgic = true;
+
+void test_wants_mte(void)
+{
+ request_mte = true;
+}
+
+void test_disable_default_vgic(void)
+{
+ request_vgic = false;
+}
+
+void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+ if (request_mte && vm_check_cap(vm, KVM_CAP_ARM_MTE))
+ vm_enable_cap(vm, KVM_CAP_ARM_MTE, 0);
+
+ if (request_vgic && kvm_supports_vgic_v3()) {
+ vm->arch.gic_fd = __vgic_v3_setup(vm, nr_vcpus, 64);
+ vm->arch.has_gic = true;
+ }
+}
+
+void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm)
+{
+ if (vm->arch.has_gic)
+ __vgic_v3_init(vm->arch.gic_fd);
+}
+
+void kvm_arch_vm_release(struct kvm_vm *vm)
+{
+ if (vm->arch.has_gic)
+ close(vm->arch.gic_fd);
+}
+
+bool kvm_arch_has_default_irqchip(void)
+{
+ return request_vgic && kvm_supports_vgic_v3();
+}
diff --git a/tools/testing/selftests/kvm/lib/arm64/vgic.c b/tools/testing/selftests/kvm/lib/arm64/vgic.c
index 4427f43f73ea..d0f7bd0984b8 100644
--- a/tools/testing/selftests/kvm/lib/arm64/vgic.c
+++ b/tools/testing/selftests/kvm/lib/arm64/vgic.c
@@ -15,6 +15,17 @@
#include "gic.h"
#include "gic_v3.h"
+bool kvm_supports_vgic_v3(void)
+{
+ struct kvm_vm *vm = vm_create_barebones();
+ int r;
+
+ r = __kvm_test_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3);
+ kvm_vm_free(vm);
+
+ return !r;
+}
+
/*
* vGIC-v3 default host setup
*
@@ -30,24 +41,11 @@
* redistributor regions of the guest. Since it depends on the number of
* vCPUs for the VM, it must be called after all the vCPUs have been created.
*/
-int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
+int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
{
int gic_fd;
uint64_t attr;
- struct list_head *iter;
- unsigned int nr_gic_pages, nr_vcpus_created = 0;
-
- TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty");
-
- /*
- * Make sure that the caller is infact calling this
- * function after all the vCPUs are added.
- */
- list_for_each(iter, &vm->vcpus)
- nr_vcpus_created++;
- TEST_ASSERT(nr_vcpus == nr_vcpus_created,
- "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)",
- nr_vcpus, nr_vcpus_created);
+ unsigned int nr_gic_pages;
/* Distributor setup */
gic_fd = __kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3);
@@ -56,9 +54,6 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, &nr_irqs);
- kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
-
attr = GICD_BASE_GPA;
kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, &attr);
@@ -73,10 +68,39 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
KVM_VGIC_V3_REDIST_SIZE * nr_vcpus);
virt_map(vm, GICR_BASE_GPA, GICR_BASE_GPA, nr_gic_pages);
- kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ return gic_fd;
+}
+
+void __vgic_v3_init(int fd)
+{
+ kvm_device_attr_set(fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
+}
- return gic_fd;
+int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
+{
+ unsigned int nr_vcpus_created = 0;
+ struct list_head *iter;
+ int fd;
+
+ TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty");
+
+ /*
+ * Make sure that the caller is infact calling this
+ * function after all the vCPUs are added.
+ */
+ list_for_each(iter, &vm->vcpus)
+ nr_vcpus_created++;
+ TEST_ASSERT(nr_vcpus == nr_vcpus_created,
+ "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)",
+ nr_vcpus, nr_vcpus_created);
+
+ fd = __vgic_v3_setup(vm, nr_vcpus, nr_irqs);
+ if (fd < 0)
+ return fd;
+
+ __vgic_v3_init(fd);
+ return fd;
}
/* should only work for level sensitive interrupts */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index a055343a7bf7..8279b6ced8d2 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -24,17 +24,29 @@ uint32_t guest_random_seed;
struct guest_random_state guest_rng;
static uint32_t last_guest_seed;
-static int vcpu_mmap_sz(void);
+static size_t vcpu_mmap_sz(void);
-int open_path_or_exit(const char *path, int flags)
+int __open_path_or_exit(const char *path, int flags, const char *enoent_help)
{
int fd;
fd = open(path, flags);
- __TEST_REQUIRE(fd >= 0 || errno != ENOENT, "Cannot open %s: %s", path, strerror(errno));
- TEST_ASSERT(fd >= 0, "Failed to open '%s'", path);
+ if (fd < 0)
+ goto error;
return fd;
+
+error:
+ if (errno == EACCES || errno == ENOENT)
+ ksft_exit_skip("- Cannot open '%s': %s. %s\n",
+ path, strerror(errno),
+ errno == EACCES ? "Root required?" : enoent_help);
+ TEST_FAIL("Failed to open '%s'", path);
+}
+
+int open_path_or_exit(const char *path, int flags)
+{
+ return __open_path_or_exit(path, flags, "");
}
/*
@@ -48,7 +60,7 @@ int open_path_or_exit(const char *path, int flags)
*/
static int _open_kvm_dev_path_or_exit(int flags)
{
- return open_path_or_exit(KVM_DEV_PATH, flags);
+ return __open_path_or_exit(KVM_DEV_PATH, flags, "Is KVM loaded and enabled?");
}
int open_kvm_dev_path_or_exit(void)
@@ -64,6 +76,9 @@ static ssize_t get_module_param(const char *module_name, const char *param,
ssize_t bytes_read;
int fd, r;
+ /* Verify KVM is loaded, to provide a more helpful SKIP message. */
+ close(open_kvm_dev_path_or_exit());
+
r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",
module_name, param);
TEST_ASSERT(r < path_size,
@@ -80,7 +95,7 @@ static ssize_t get_module_param(const char *module_name, const char *param,
return bytes_read;
}
-static int get_module_param_integer(const char *module_name, const char *param)
+int kvm_get_module_param_integer(const char *module_name, const char *param)
{
/*
* 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the
@@ -104,7 +119,7 @@ static int get_module_param_integer(const char *module_name, const char *param)
return atoi_paranoid(value);
}
-static bool get_module_param_bool(const char *module_name, const char *param)
+bool kvm_get_module_param_bool(const char *module_name, const char *param)
{
char value;
ssize_t r;
@@ -120,36 +135,6 @@ static bool get_module_param_bool(const char *module_name, const char *param)
TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
}
-bool get_kvm_param_bool(const char *param)
-{
- return get_module_param_bool("kvm", param);
-}
-
-bool get_kvm_intel_param_bool(const char *param)
-{
- return get_module_param_bool("kvm_intel", param);
-}
-
-bool get_kvm_amd_param_bool(const char *param)
-{
- return get_module_param_bool("kvm_amd", param);
-}
-
-int get_kvm_param_integer(const char *param)
-{
- return get_module_param_integer("kvm", param);
-}
-
-int get_kvm_intel_param_integer(const char *param)
-{
- return get_module_param_integer("kvm_intel", param);
-}
-
-int get_kvm_amd_param_integer(const char *param)
-{
- return get_module_param_integer("kvm_amd", param);
-}
-
/*
* Capability
*
@@ -216,7 +201,7 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
[VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
- [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
+ [VM_MODE_PXXVYY_4K] = "PA-bits:ANY, VA-bits:48 or 57, 4K pages",
[VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
[VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
[VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
@@ -243,7 +228,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
[VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
[VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
[VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
- [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
+ [VM_MODE_PXXVYY_4K] = { 0, 0, 0x1000, 12 },
[VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
[VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
[VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
@@ -325,24 +310,26 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
case VM_MODE_P36V47_16K:
vm->pgtable_levels = 3;
break;
- case VM_MODE_PXXV48_4K:
+ case VM_MODE_PXXVYY_4K:
#ifdef __x86_64__
kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
kvm_init_vm_address_properties(vm);
- /*
- * Ignore KVM support for 5-level paging (vm->va_bits == 57),
- * it doesn't take effect unless a CR4.LA57 is set, which it
- * isn't for this mode (48-bit virtual address space).
- */
- TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
- "Linear address width (%d bits) not supported",
- vm->va_bits);
+
pr_debug("Guest physical address width detected: %d\n",
vm->pa_bits);
- vm->pgtable_levels = 4;
- vm->va_bits = 48;
+ pr_debug("Guest virtual address width detected: %d\n",
+ vm->va_bits);
+
+ if (vm->va_bits == 57) {
+ vm->pgtable_levels = 5;
+ } else {
+ TEST_ASSERT(vm->va_bits == 48,
+ "Unexpected guest virtual address width: %d",
+ vm->va_bits);
+ vm->pgtable_levels = 4;
+ }
#else
- TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
+ TEST_FAIL("VM_MODE_PXXVYY_4K not supported on non-x86 platforms");
#endif
break;
case VM_MODE_P47V64_4K:
@@ -502,7 +489,7 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
guest_rng = new_guest_random_state(guest_random_seed);
sync_global_to_guest(vm, guest_rng);
- kvm_arch_vm_post_create(vm);
+ kvm_arch_vm_post_create(vm, nr_runnable_vcpus);
return vm;
}
@@ -540,6 +527,7 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
for (i = 0; i < nr_vcpus; ++i)
vcpus[i] = vm_vcpu_add(vm, i, guest_code);
+ kvm_arch_vm_finalize_vcpus(vm);
return vm;
}
@@ -605,15 +593,14 @@ struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
return vm_vcpu_recreate(vm, 0);
}
-void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
+int __pin_task_to_cpu(pthread_t task, int cpu)
{
- cpu_set_t mask;
- int r;
+ cpu_set_t cpuset;
- CPU_ZERO(&mask);
- CPU_SET(pcpu, &mask);
- r = sched_setaffinity(0, sizeof(mask), &mask);
- TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.", pcpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+
+ return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset);
}
static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
@@ -667,7 +654,7 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
/* 2. Check if the main worker needs to be pinned. */
if (cpu) {
- kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask));
+ pin_self_to_cpu(parse_pcpu(cpu, &allowed_mask));
cpu = strtok(NULL, delim);
}
@@ -719,8 +706,6 @@ userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
static void kvm_stats_release(struct kvm_binary_stats *stats)
{
- int ret;
-
if (stats->fd < 0)
return;
@@ -729,8 +714,7 @@ static void kvm_stats_release(struct kvm_binary_stats *stats)
stats->desc = NULL;
}
- ret = close(stats->fd);
- TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
+ kvm_close(stats->fd);
stats->fd = -1;
}
@@ -753,20 +737,14 @@ __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
*/
static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
- int ret;
-
if (vcpu->dirty_gfns) {
- ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
- TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
+ kvm_munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
vcpu->dirty_gfns = NULL;
}
- ret = munmap(vcpu->run, vcpu_mmap_sz());
- TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
-
- ret = close(vcpu->fd);
- TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
+ kvm_munmap(vcpu->run, vcpu_mmap_sz());
+ kvm_close(vcpu->fd);
kvm_stats_release(&vcpu->stats);
list_del(&vcpu->list);
@@ -778,38 +756,32 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
void kvm_vm_release(struct kvm_vm *vmp)
{
struct kvm_vcpu *vcpu, *tmp;
- int ret;
list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
vm_vcpu_rm(vmp, vcpu);
- ret = close(vmp->fd);
- TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
-
- ret = close(vmp->kvm_fd);
- TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
+ kvm_close(vmp->fd);
+ kvm_close(vmp->kvm_fd);
/* Free cached stats metadata and close FD */
kvm_stats_release(&vmp->stats);
+
+ kvm_arch_vm_release(vmp);
}
static void __vm_mem_region_delete(struct kvm_vm *vm,
struct userspace_mem_region *region)
{
- int ret;
-
rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
rb_erase(&region->hva_node, &vm->regions.hva_tree);
hash_del(&region->slot_node);
sparsebit_free(&region->unused_phy_pages);
sparsebit_free(&region->protected_phy_pages);
- ret = munmap(region->mmap_start, region->mmap_size);
- TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
+ kvm_munmap(region->mmap_start, region->mmap_size);
if (region->fd >= 0) {
/* There's an extra map when using shared memory. */
- ret = munmap(region->mmap_alias, region->mmap_size);
- TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
+ kvm_munmap(region->mmap_alias, region->mmap_size);
close(region->fd);
}
if (region->region.guest_memfd >= 0)
@@ -847,7 +819,7 @@ void kvm_vm_free(struct kvm_vm *vmp)
int kvm_memfd_alloc(size_t size, bool hugepages)
{
int memfd_flags = MFD_CLOEXEC;
- int fd, r;
+ int fd;
if (hugepages)
memfd_flags |= MFD_HUGETLB;
@@ -855,11 +827,8 @@ int kvm_memfd_alloc(size_t size, bool hugepages)
fd = memfd_create("kvm_selftest", memfd_flags);
TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
- r = ftruncate(fd, size);
- TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
-
- r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
- TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
+ kvm_ftruncate(fd, size);
+ kvm_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
return fd;
}
@@ -976,8 +945,8 @@ void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags
/* FIXME: This thing needs to be ripped apart and rewritten. */
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
- uint64_t guest_paddr, uint32_t slot, uint64_t npages,
- uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset)
+ uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags,
+ int guest_memfd, uint64_t guest_memfd_offset)
{
int ret;
struct userspace_mem_region *region;
@@ -991,30 +960,29 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
"Number of guest pages is not compatible with the host. "
"Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
- TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
+ TEST_ASSERT((gpa % vm->page_size) == 0, "Guest physical "
"address not on a page boundary.\n"
- " guest_paddr: 0x%lx vm->page_size: 0x%x",
- guest_paddr, vm->page_size);
- TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
+ " gpa: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->page_size);
+ TEST_ASSERT((((gpa >> vm->page_shift) + npages) - 1)
<= vm->max_gfn, "Physical range beyond maximum "
"supported physical address,\n"
- " guest_paddr: 0x%lx npages: 0x%lx\n"
+ " gpa: 0x%lx npages: 0x%lx\n"
" vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- guest_paddr, npages, vm->max_gfn, vm->page_size);
+ gpa, npages, vm->max_gfn, vm->page_size);
/*
* Confirm a mem region with an overlapping address doesn't
* already exist.
*/
region = (struct userspace_mem_region *) userspace_mem_region_find(
- vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
+ vm, gpa, (gpa + npages * vm->page_size) - 1);
if (region != NULL)
TEST_FAIL("overlapping userspace_mem_region already "
"exists\n"
- " requested guest_paddr: 0x%lx npages: 0x%lx "
- "page_size: 0x%x\n"
- " existing guest_paddr: 0x%lx size: 0x%lx",
- guest_paddr, npages, vm->page_size,
+ " requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n"
+ " existing gpa: 0x%lx size: 0x%lx",
+ gpa, npages, vm->page_size,
(uint64_t) region->region.guest_phys_addr,
(uint64_t) region->region.memory_size);
@@ -1028,8 +996,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
"already exists.\n"
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
" existing slot: %u paddr: 0x%lx size: 0x%lx",
- slot, guest_paddr, npages,
- region->region.slot,
+ slot, gpa, npages, region->region.slot,
(uint64_t) region->region.guest_phys_addr,
(uint64_t) region->region.memory_size);
}
@@ -1055,7 +1022,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
alignment = max(backing_src_pagesz, alignment);
- TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
+ TEST_ASSERT_EQ(gpa, align_up(gpa, backing_src_pagesz));
/* Add enough memory to align up if necessary */
if (alignment > 1)
@@ -1066,12 +1033,9 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
region->fd = kvm_memfd_alloc(region->mmap_size,
src_type == VM_MEM_SRC_SHARED_HUGETLB);
- region->mmap_start = mmap(NULL, region->mmap_size,
- PROT_READ | PROT_WRITE,
- vm_mem_backing_src_alias(src_type)->flag,
- region->fd, 0);
- TEST_ASSERT(region->mmap_start != MAP_FAILED,
- __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
+ region->mmap_start = kvm_mmap(region->mmap_size, PROT_READ | PROT_WRITE,
+ vm_mem_backing_src_alias(src_type)->flag,
+ region->fd);
TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
@@ -1106,8 +1070,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
* needing to track if the fd is owned by the framework
* or by the caller.
*/
- guest_memfd = dup(guest_memfd);
- TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd));
+ guest_memfd = kvm_dup(guest_memfd);
}
region->region.guest_memfd = guest_memfd;
@@ -1119,20 +1082,18 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
region->unused_phy_pages = sparsebit_alloc();
if (vm_arch_has_protected_memory(vm))
region->protected_phy_pages = sparsebit_alloc();
- sparsebit_set_num(region->unused_phy_pages,
- guest_paddr >> vm->page_shift, npages);
+ sparsebit_set_num(region->unused_phy_pages, gpa >> vm->page_shift, npages);
region->region.slot = slot;
region->region.flags = flags;
- region->region.guest_phys_addr = guest_paddr;
+ region->region.guest_phys_addr = gpa;
region->region.memory_size = npages * vm->page_size;
region->region.userspace_addr = (uintptr_t) region->host_mem;
ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
" rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n"
- " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d",
- ret, errno, slot, flags,
- guest_paddr, (uint64_t) region->region.memory_size,
+ " guest_phys_addr: 0x%lx size: 0x%llx guest_memfd: %d",
+ ret, errno, slot, flags, gpa, region->region.memory_size,
region->region.guest_memfd);
/* Add to quick lookup data structures */
@@ -1142,12 +1103,10 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
/* If shared memory, create an alias. */
if (region->fd >= 0) {
- region->mmap_alias = mmap(NULL, region->mmap_size,
- PROT_READ | PROT_WRITE,
- vm_mem_backing_src_alias(src_type)->flag,
- region->fd, 0);
- TEST_ASSERT(region->mmap_alias != MAP_FAILED,
- __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
+ region->mmap_alias = kvm_mmap(region->mmap_size,
+ PROT_READ | PROT_WRITE,
+ vm_mem_backing_src_alias(src_type)->flag,
+ region->fd);
/* Align host alias address */
region->host_alias = align_ptr_up(region->mmap_alias, alignment);
@@ -1156,10 +1115,10 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
void vm_userspace_mem_region_add(struct kvm_vm *vm,
enum vm_mem_backing_src_type src_type,
- uint64_t guest_paddr, uint32_t slot,
- uint64_t npages, uint32_t flags)
+ uint64_t gpa, uint32_t slot, uint64_t npages,
+ uint32_t flags)
{
- vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0);
+ vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0);
}
/*
@@ -1225,6 +1184,16 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
ret, errno, slot, flags);
}
+void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot)
+{
+ struct userspace_mem_region *region = memslot2region(vm, slot);
+ struct kvm_userspace_memory_region2 tmp = region->region;
+
+ tmp.memory_size = 0;
+ vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &tmp);
+ vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
+}
+
/*
* VM Memory Region Move
*
@@ -1307,14 +1276,14 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
}
/* Returns the size of a vCPU's kvm_run structure. */
-static int vcpu_mmap_sz(void)
+static size_t vcpu_mmap_sz(void)
{
int dev_fd, ret;
dev_fd = open_kvm_dev_path_or_exit();
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
- TEST_ASSERT(ret >= sizeof(struct kvm_run),
+ TEST_ASSERT(ret >= 0 && ret >= sizeof(struct kvm_run),
KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
close(dev_fd);
@@ -1355,12 +1324,10 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
- "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
+ "smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi",
vcpu_mmap_sz(), sizeof(*vcpu->run));
- vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
- PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
- TEST_ASSERT(vcpu->run != MAP_FAILED,
- __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
+ vcpu->run = kvm_mmap(vcpu_mmap_sz(), PROT_READ | PROT_WRITE,
+ MAP_SHARED, vcpu->fd);
if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
vcpu->stats.fd = vcpu_get_stats_fd(vcpu);
@@ -1482,8 +1449,6 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
pages--, vaddr += vm->page_size, paddr += vm->page_size) {
virt_pg_map(vm, vaddr, paddr);
-
- sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
}
return vaddr_start;
@@ -1597,7 +1562,6 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
while (npages--) {
virt_pg_map(vm, vaddr, paddr);
- sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
vaddr += page_size;
paddr += page_size;
@@ -1716,7 +1680,18 @@ void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
/* Create an interrupt controller chip for the specified VM. */
void vm_create_irqchip(struct kvm_vm *vm)
{
- vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
+ int r;
+
+ /*
+ * Allocate a fully in-kernel IRQ chip by default, but fall back to a
+ * split model (x86 only) if that fails (KVM x86 allows compiling out
+ * support for KVM_CREATE_IRQCHIP).
+ */
+ r = __vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
+ if (r && errno == ENOTTY && kvm_has_cap(KVM_CAP_SPLIT_IRQCHIP))
+ vm_enable_cap(vm, KVM_CAP_SPLIT_IRQCHIP, 24);
+ else
+ TEST_ASSERT_VM_VCPU_IOCTL(!r, KVM_CREATE_IRQCHIP, r, vm);
vm->has_irqchip = true;
}
@@ -1796,9 +1771,8 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
- addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
- page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
- TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
+ addr = __kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
+ page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
vcpu->dirty_gfns = addr;
vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
@@ -2041,6 +2015,7 @@ static struct exit_reason {
KVM_EXIT_STRING(NOTIFY),
KVM_EXIT_STRING(LOONGARCH_IOCSR),
KVM_EXIT_STRING(MEMORY_FAULT),
+ KVM_EXIT_STRING(ARM_SEA),
};
/*
@@ -2305,7 +2280,15 @@ void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
TEST_FAIL("Unable to find stat '%s'", name);
}
-__weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
+__weak void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+}
+
+__weak void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm)
+{
+}
+
+__weak void kvm_arch_vm_release(struct kvm_vm *vm)
{
}
@@ -2313,11 +2296,35 @@ __weak void kvm_selftest_arch_init(void)
{
}
+static void report_unexpected_signal(int signum)
+{
+#define KVM_CASE_SIGNUM(sig) \
+ case sig: TEST_FAIL("Unexpected " #sig " (%d)\n", signum)
+
+ switch (signum) {
+ KVM_CASE_SIGNUM(SIGBUS);
+ KVM_CASE_SIGNUM(SIGSEGV);
+ KVM_CASE_SIGNUM(SIGILL);
+ KVM_CASE_SIGNUM(SIGFPE);
+ default:
+ TEST_FAIL("Unexpected signal %d\n", signum);
+ }
+}
+
void __attribute((constructor)) kvm_selftest_init(void)
{
+ struct sigaction sig_sa = {
+ .sa_handler = report_unexpected_signal,
+ };
+
/* Tell stdout not to buffer its content. */
setbuf(stdout, NULL);
+ sigaction(SIGBUS, &sig_sa, NULL);
+ sigaction(SIGSEGV, &sig_sa, NULL);
+ sigaction(SIGILL, &sig_sa, NULL);
+ sigaction(SIGFPE, &sig_sa, NULL);
+
guest_random_seed = last_guest_seed = random();
pr_info("Random seed: 0x%x\n", guest_random_seed);
@@ -2338,3 +2345,8 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
pg = paddr >> vm->page_shift;
return sparsebit_is_set(region->protected_phy_pages, pg);
}
+
+__weak bool kvm_arch_has_default_irqchip(void)
+{
+ return false;
+}
diff --git a/tools/testing/selftests/kvm/lib/loongarch/exception.S b/tools/testing/selftests/kvm/lib/loongarch/exception.S
index 88bfa505c6f5..3f1e4b67c5ae 100644
--- a/tools/testing/selftests/kvm/lib/loongarch/exception.S
+++ b/tools/testing/selftests/kvm/lib/loongarch/exception.S
@@ -51,9 +51,15 @@ handle_exception:
st.d t0, sp, ESTAT_OFFSET_EXREGS
csrrd t0, LOONGARCH_CSR_BADV
st.d t0, sp, BADV_OFFSET_EXREGS
+ csrrd t0, LOONGARCH_CSR_PRMD
+ st.d t0, sp, PRMD_OFFSET_EXREGS
or a0, sp, zero
bl route_exception
+ ld.d t0, sp, PC_OFFSET_EXREGS
+ csrwr t0, LOONGARCH_CSR_ERA
+ ld.d t0, sp, PRMD_OFFSET_EXREGS
+ csrwr t0, LOONGARCH_CSR_PRMD
restore_gprs sp
csrrd sp, LOONGARCH_CSR_KS0
ertn
diff --git a/tools/testing/selftests/kvm/lib/loongarch/processor.c b/tools/testing/selftests/kvm/lib/loongarch/processor.c
index 0ac1abcb71cb..07c103369ddb 100644
--- a/tools/testing/selftests/kvm/lib/loongarch/processor.c
+++ b/tools/testing/selftests/kvm/lib/loongarch/processor.c
@@ -3,6 +3,7 @@
#include <assert.h>
#include <linux/compiler.h>
+#include <asm/kvm.h>
#include "kvm_util.h"
#include "processor.h"
#include "ucall_common.h"
@@ -11,6 +12,7 @@
#define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000
static vm_paddr_t invalid_pgtable[4];
+static vm_vaddr_t exception_handlers;
static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
{
@@ -183,7 +185,14 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
void route_exception(struct ex_regs *regs)
{
+ int vector;
unsigned long pc, estat, badv;
+ struct handlers *handlers;
+
+ handlers = (struct handlers *)exception_handlers;
+ vector = (regs->estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+ if (handlers && handlers->exception_handlers[vector])
+ return handlers->exception_handlers[vector](regs);
pc = regs->pc;
badv = regs->badv;
@@ -192,6 +201,32 @@ void route_exception(struct ex_regs *regs)
while (1) ;
}
+void vm_init_descriptor_tables(struct kvm_vm *vm)
+{
+ void *addr;
+
+ vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
+ LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
+
+ addr = addr_gva2hva(vm, vm->handlers);
+ memset(addr, 0, vm->page_size);
+ exception_handlers = vm->handlers;
+ sync_global_to_guest(vm, exception_handlers);
+}
+
+void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler)
+{
+ struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
+
+ assert(vector < VECTOR_NUM);
+ handlers->exception_handlers[vector] = handler;
+}
+
+uint32_t guest_get_vcpuid(void)
+{
+ return csr_read(LOONGARCH_CSR_CPUID);
+}
+
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
int i;
@@ -211,6 +246,11 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
vcpu_regs_set(vcpu, &regs);
}
+static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+{
+ __vcpu_set_reg(vcpu, id, val);
+}
+
static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
{
uint64_t csrid;
@@ -242,8 +282,8 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
- /* user mode and page enable mode */
- val = PLV_USER | CSR_CRMD_PG;
+ /* kernel mode and page enable mode */
+ val = PLV_KERN | CSR_CRMD_PG;
loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val);
loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1);
@@ -251,7 +291,10 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0);
loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1);
+ /* time count start from 0 */
val = 0;
+ loongarch_set_reg(vcpu, KVM_REG_LOONGARCH_COUNTER, val);
+
width = vm->page_shift - 3;
switch (vm->pgtable_levels) {
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index 313277486a1d..557c0a0a5658 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -265,7 +265,7 @@ static void *vcpu_thread_main(void *data)
int vcpu_idx = vcpu->vcpu_idx;
if (memstress_args.pin_vcpus)
- kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
+ pin_self_to_cpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
WRITE_ONCE(vcpu->running, true);
diff --git a/tools/testing/selftests/kvm/lib/s390/processor.c b/tools/testing/selftests/kvm/lib/s390/processor.c
index 20cfe970e3e3..8ceeb17c819a 100644
--- a/tools/testing/selftests/kvm/lib/s390/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390/processor.c
@@ -221,3 +221,8 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
}
+
+bool kvm_arch_has_default_irqchip(void)
+{
+ return true;
+}
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c
index cfed9d26cc71..a99188f87a38 100644
--- a/tools/testing/selftests/kvm/lib/sparsebit.c
+++ b/tools/testing/selftests/kvm/lib/sparsebit.c
@@ -116,7 +116,7 @@
*
* + A node with all mask bits set only occurs when the last bit
* described by the previous node is not equal to this nodes
- * starting index - 1. All such occurences of this condition are
+ * starting index - 1. All such occurrences of this condition are
* avoided by moving the setting of the nodes mask bits into
* the previous nodes num_after setting.
*
@@ -592,7 +592,7 @@ static struct node *node_split(struct sparsebit *s, sparsebit_idx_t idx)
*
* + A node with all mask bits set only occurs when the last bit
* described by the previous node is not equal to this nodes
- * starting index - 1. All such occurences of this condition are
+ * starting index - 1. All such occurrences of this condition are
* avoided by moving the setting of the nodes mask bits into
* the previous nodes num_after setting.
*/
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 03eb99af9b8d..8a1848586a85 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -18,6 +18,13 @@
#include "test_util.h"
+sigjmp_buf expect_sigbus_jmpbuf;
+
+void __attribute__((used)) expect_sigbus_handler(int signum)
+{
+ siglongjmp(expect_sigbus_jmpbuf, 1);
+}
+
/*
* Random number generator that is usable from guest code. This is the
* Park-Miller LCG using standard constants.
diff --git a/tools/testing/selftests/kvm/lib/x86/memstress.c b/tools/testing/selftests/kvm/lib/x86/memstress.c
index 7f5d62a65c68..0b1f288ad556 100644
--- a/tools/testing/selftests/kvm/lib/x86/memstress.c
+++ b/tools/testing/selftests/kvm/lib/x86/memstress.c
@@ -63,7 +63,7 @@ void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
{
uint64_t start, end;
- prepare_eptp(vmx, vm, 0);
+ prepare_eptp(vmx, vm);
/*
* Identity map the first 4G and the test region with 1G pages so that
diff --git a/tools/testing/selftests/kvm/lib/x86/pmu.c b/tools/testing/selftests/kvm/lib/x86/pmu.c
index f31f0427c17c..34cb57d1d671 100644
--- a/tools/testing/selftests/kvm/lib/x86/pmu.c
+++ b/tools/testing/selftests/kvm/lib/x86/pmu.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include "kvm_util.h"
+#include "processor.h"
#include "pmu.h"
const uint64_t intel_pmu_arch_events[] = {
@@ -19,6 +20,11 @@ const uint64_t intel_pmu_arch_events[] = {
INTEL_ARCH_BRANCHES_RETIRED,
INTEL_ARCH_BRANCHES_MISPREDICTED,
INTEL_ARCH_TOPDOWN_SLOTS,
+ INTEL_ARCH_TOPDOWN_BE_BOUND,
+ INTEL_ARCH_TOPDOWN_BAD_SPEC,
+ INTEL_ARCH_TOPDOWN_FE_BOUND,
+ INTEL_ARCH_TOPDOWN_RETIRING,
+ INTEL_ARCH_LBR_INSERTS,
};
kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
@@ -29,3 +35,46 @@ const uint64_t amd_pmu_zen_events[] = {
AMD_ZEN_BRANCHES_MISPREDICTED,
};
kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS);
+
+/*
+ * For Intel Atom CPUs, the PMU events "Instruction Retired" or
+ * "Branch Instruction Retired" may be overcounted for some certain
+ * instructions, like FAR CALL/JMP, RETF, IRET, VMENTRY/VMEXIT/VMPTRLD
+ * and complex SGX/SMX/CSTATE instructions/flows.
+ *
+ * The detailed information can be found in the errata (section SRF7):
+ * https://edc.intel.com/content/www/us/en/design/products-and-solutions/processors-and-chipsets/sierra-forest/xeon-6700-series-processor-with-e-cores-specification-update/errata-details/
+ *
+ * For the Atom platforms before Sierra Forest (including Sierra Forest),
+ * Both 2 events "Instruction Retired" and "Branch Instruction Retired" would
+ * be overcounted on these certain instructions, but for Clearwater Forest
+ * only "Instruction Retired" event is overcounted on these instructions.
+ */
+static uint64_t get_pmu_errata(void)
+{
+ if (!this_cpu_is_intel())
+ return 0;
+
+ if (this_cpu_family() != 0x6)
+ return 0;
+
+ switch (this_cpu_model()) {
+ case 0xDD: /* Clearwater Forest */
+ return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT);
+ case 0xAF: /* Sierra Forest */
+ case 0x4D: /* Avaton, Rangely */
+ case 0x5F: /* Denverton */
+ case 0x86: /* Jacobsville */
+ return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT) |
+ BIT_ULL(BRANCHES_RETIRED_OVERCOUNT);
+ default:
+ return 0;
+ }
+}
+
+uint64_t pmu_errata_mask;
+
+void kvm_init_pmu_errata(void)
+{
+ pmu_errata_mask = get_pmu_errata();
+}
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index a92dc1dad085..36104d27f3d9 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -6,6 +6,7 @@
#include "linux/bitmap.h"
#include "test_util.h"
#include "kvm_util.h"
+#include "pmu.h"
#include "processor.h"
#include "sev.h"
@@ -23,6 +24,39 @@ bool host_cpu_is_intel;
bool is_forced_emulation_enabled;
uint64_t guest_tsc_khz;
+const char *ex_str(int vector)
+{
+ switch (vector) {
+#define VEC_STR(v) case v##_VECTOR: return "#" #v
+ case DE_VECTOR: return "no exception";
+ case KVM_MAGIC_DE_VECTOR: return "#DE";
+ VEC_STR(DB);
+ VEC_STR(NMI);
+ VEC_STR(BP);
+ VEC_STR(OF);
+ VEC_STR(BR);
+ VEC_STR(UD);
+ VEC_STR(NM);
+ VEC_STR(DF);
+ VEC_STR(TS);
+ VEC_STR(NP);
+ VEC_STR(SS);
+ VEC_STR(GP);
+ VEC_STR(PF);
+ VEC_STR(MF);
+ VEC_STR(AC);
+ VEC_STR(MC);
+ VEC_STR(XM);
+ VEC_STR(VE);
+ VEC_STR(CP);
+ VEC_STR(HV);
+ VEC_STR(VC);
+ VEC_STR(SX);
+ default: return "#??";
+#undef VEC_STR
+ }
+}
+
static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
{
fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
@@ -124,10 +158,10 @@ bool kvm_is_tdp_enabled(void)
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
- TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
- "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
+ TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
+ "Unknown or unsupported guest mode: 0x%x", vm->mode);
- /* If needed, create page map l4 table. */
+ /* If needed, create the top-level page table. */
if (!vm->pgd_created) {
vm->pgd = vm_alloc_page_table(vm);
vm->pgd_created = true;
@@ -184,11 +218,11 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
{
const uint64_t pg_size = PG_LEVEL_SIZE(level);
- uint64_t *pml4e, *pdpe, *pde;
- uint64_t *pte;
+ uint64_t *pte = &vm->pgd;
+ int current_level;
- TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
- "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
+ TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
+ "Unknown or unsupported guest mode: 0x%x", vm->mode);
TEST_ASSERT((vaddr % pg_size) == 0,
"Virtual address not aligned,\n"
@@ -209,20 +243,17 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
* Allocate upper level page tables, if not already present. Return
* early if a hugepage was created.
*/
- pml4e = virt_create_upper_pte(vm, &vm->pgd, vaddr, paddr, PG_LEVEL_512G, level);
- if (*pml4e & PTE_LARGE_MASK)
- return;
-
- pdpe = virt_create_upper_pte(vm, pml4e, vaddr, paddr, PG_LEVEL_1G, level);
- if (*pdpe & PTE_LARGE_MASK)
- return;
-
- pde = virt_create_upper_pte(vm, pdpe, vaddr, paddr, PG_LEVEL_2M, level);
- if (*pde & PTE_LARGE_MASK)
- return;
+ for (current_level = vm->pgtable_levels;
+ current_level > PG_LEVEL_4K;
+ current_level--) {
+ pte = virt_create_upper_pte(vm, pte, vaddr, paddr,
+ current_level, level);
+ if (*pte & PTE_LARGE_MASK)
+ return;
+ }
/* Fill in page table entry. */
- pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
+ pte = virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
@@ -255,6 +286,8 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
for (i = 0; i < nr_pages; i++) {
__virt_pg_map(vm, vaddr, paddr, level);
+ sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
+ nr_bytes / PAGE_SIZE);
vaddr += pg_size;
paddr += pg_size;
@@ -276,40 +309,38 @@ static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
int *level)
{
- uint64_t *pml4e, *pdpe, *pde;
+ int va_width = 12 + (vm->pgtable_levels) * 9;
+ uint64_t *pte = &vm->pgd;
+ int current_level;
TEST_ASSERT(!vm->arch.is_pt_protected,
"Walking page tables of protected guests is impossible");
- TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM,
+ TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= vm->pgtable_levels,
"Invalid PG_LEVEL_* '%d'", *level);
- TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
- "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
+ TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
+ "Unknown or unsupported guest mode: 0x%x", vm->mode);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
(vaddr >> vm->page_shift)),
"Invalid virtual address, vaddr: 0x%lx",
vaddr);
/*
- * Based on the mode check above there are 48 bits in the vaddr, so
- * shift 16 to sign extend the last bit (bit-47),
+ * Check that the vaddr is a sign-extended va_width value.
*/
- TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16),
- "Canonical check failed. The virtual address is invalid.");
-
- pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G);
- if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G))
- return pml4e;
-
- pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G);
- if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G))
- return pdpe;
-
- pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M);
- if (vm_is_target_pte(pde, level, PG_LEVEL_2M))
- return pde;
+ TEST_ASSERT(vaddr ==
+ (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
+ "Canonical check failed. The virtual address is invalid.");
+
+ for (current_level = vm->pgtable_levels;
+ current_level > PG_LEVEL_4K;
+ current_level--) {
+ pte = virt_get_pte(vm, pte, vaddr, current_level);
+ if (vm_is_target_pte(pte, level, current_level))
+ return pte;
+ }
- return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
+ return virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
}
uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
@@ -492,7 +523,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
struct kvm_sregs sregs;
- TEST_ASSERT_EQ(vm->mode, VM_MODE_PXXV48_4K);
+ TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
+ "Unknown or unsupported guest mode: 0x%x", vm->mode);
/* Set mode specific system register values. */
vcpu_sregs_get(vcpu, &sregs);
@@ -506,6 +538,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
if (kvm_cpu_has(X86_FEATURE_XSAVE))
sregs.cr4 |= X86_CR4_OSXSAVE;
+ if (vm->pgtable_levels == 5)
+ sregs.cr4 |= X86_CR4_LA57;
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
kvm_seg_set_unusable(&sregs.ldt);
@@ -557,7 +591,7 @@ static bool kvm_fixup_exception(struct ex_regs *regs)
return false;
if (regs->vector == DE_VECTOR)
- return false;
+ regs->vector = KVM_MAGIC_DE_VECTOR;
regs->rip = regs->r11;
regs->r9 = regs->vector;
@@ -625,7 +659,7 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
REPORT_GUEST_ASSERT(uc);
}
-void kvm_arch_vm_post_create(struct kvm_vm *vm)
+void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus)
{
int r;
@@ -638,6 +672,7 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
sync_global_to_guest(vm, host_cpu_is_intel);
sync_global_to_guest(vm, host_cpu_is_amd);
sync_global_to_guest(vm, is_forced_emulation_enabled);
+ sync_global_to_guest(vm, pmu_errata_mask);
if (is_sev_vm(vm)) {
struct kvm_sev_init init = { 0 };
@@ -1264,21 +1299,13 @@ done:
return min(max_gfn, ht_gfn - 1);
}
-/* Returns true if kvm_intel was loaded with unrestricted_guest=1. */
-bool vm_is_unrestricted_guest(struct kvm_vm *vm)
-{
- /* Ensure that a KVM vendor-specific module is loaded. */
- if (vm == NULL)
- close(open_kvm_dev_path_or_exit());
-
- return get_kvm_intel_param_bool("unrestricted_guest");
-}
-
void kvm_selftest_arch_init(void)
{
host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd();
is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
+
+ kvm_init_pmu_errata();
}
bool sys_clocksource_is_based_on_tsc(void)
@@ -1291,3 +1318,8 @@ bool sys_clocksource_is_based_on_tsc(void)
return ret;
}
+
+bool kvm_arch_has_default_irqchip(void)
+{
+ return true;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86/vmx.c b/tools/testing/selftests/kvm/lib/x86/vmx.c
index d4d1208dd023..29b082a58daa 100644
--- a/tools/testing/selftests/kvm/lib/x86/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86/vmx.c
@@ -401,11 +401,11 @@ void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
uint16_t index;
- TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
- "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
+ TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
+ "Unknown or unsupported guest mode: 0x%x", vm->mode);
TEST_ASSERT((nested_paddr >> 48) == 0,
- "Nested physical address 0x%lx requires 5-level paging",
+ "Nested physical address 0x%lx is > 48-bits and requires 5-level EPT",
nested_paddr);
TEST_ASSERT((nested_paddr % page_size) == 0,
"Nested physical address not on page boundary,\n"
@@ -534,8 +534,7 @@ bool kvm_cpu_has_ept(void)
return ctrl & SECONDARY_EXEC_ENABLE_EPT;
}
-void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint32_t eptp_memslot)
+void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm)
{
TEST_ASSERT(kvm_cpu_has_ept(), "KVM doesn't support nested EPT");
diff --git a/tools/testing/selftests/kvm/loongarch/arch_timer.c b/tools/testing/selftests/kvm/loongarch/arch_timer.c
new file mode 100644
index 000000000000..355ecac30954
--- /dev/null
+++ b/tools/testing/selftests/kvm/loongarch/arch_timer.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The test validates periodic/one-shot constant timer IRQ using
+ * CSR.TCFG and CSR.TVAL registers.
+ */
+#include "arch_timer.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "timer_test.h"
+#include "ucall_common.h"
+
+static void do_idle(void)
+{
+ unsigned int intid;
+ unsigned long estat;
+
+ __asm__ __volatile__("idle 0" : : : "memory");
+
+ estat = csr_read(LOONGARCH_CSR_ESTAT);
+ intid = !!(estat & BIT(INT_TI));
+
+ /* Make sure pending timer IRQ arrived */
+ GUEST_ASSERT_EQ(intid, 1);
+ csr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
+}
+
+static void guest_irq_handler(struct ex_regs *regs)
+{
+ unsigned int intid;
+ uint32_t cpu = guest_get_vcpuid();
+ uint64_t xcnt, val, cfg, xcnt_diff_us;
+ struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
+
+ intid = !!(regs->estat & BIT(INT_TI));
+
+ /* Make sure we are dealing with the correct timer IRQ */
+ GUEST_ASSERT_EQ(intid, 1);
+
+ cfg = timer_get_cfg();
+ if (cfg & CSR_TCFG_PERIOD) {
+ WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter - 1);
+ if (shared_data->nr_iter == 0)
+ disable_timer();
+ csr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
+ return;
+ }
+
+ /*
+ * On real machine, value of LOONGARCH_CSR_TVAL is BIT_ULL(48) - 1
+ * On virtual machine, its value counts down from BIT_ULL(48) - 1
+ */
+ val = timer_get_val();
+ xcnt = timer_get_cycles();
+ xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
+
+ /* Basic 'timer condition met' check */
+ __GUEST_ASSERT(val > cfg,
+ "val = 0x%lx, cfg = 0x%lx, xcnt_diff_us = 0x%lx",
+ val, cfg, xcnt_diff_us);
+
+ csr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
+ WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
+}
+
+static void guest_test_period_timer(uint32_t cpu)
+{
+ uint32_t irq_iter, config_iter;
+ uint64_t us;
+ struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
+
+ shared_data->nr_iter = test_args.nr_iter;
+ shared_data->xcnt = timer_get_cycles();
+ us = msecs_to_usecs(test_args.timer_period_ms) + test_args.timer_err_margin_us;
+ timer_set_next_cmp_ms(test_args.timer_period_ms, true);
+
+ for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
+ /* Setup a timeout for the interrupt to arrive */
+ udelay(us);
+ }
+
+ irq_iter = READ_ONCE(shared_data->nr_iter);
+ __GUEST_ASSERT(irq_iter == 0,
+ "irq_iter = 0x%x.\n"
+ " Guest period timer interrupt was not triggered within the specified\n"
+ " interval, try to increase the error margin by [-e] option.\n",
+ irq_iter);
+}
+
+static void guest_test_oneshot_timer(uint32_t cpu)
+{
+ uint32_t irq_iter, config_iter;
+ uint64_t us;
+ struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
+
+ shared_data->nr_iter = 0;
+ shared_data->guest_stage = 0;
+ us = msecs_to_usecs(test_args.timer_period_ms) + test_args.timer_err_margin_us;
+ for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
+ shared_data->xcnt = timer_get_cycles();
+
+ /* Setup the next interrupt */
+ timer_set_next_cmp_ms(test_args.timer_period_ms, false);
+ /* Setup a timeout for the interrupt to arrive */
+ udelay(us);
+
+ irq_iter = READ_ONCE(shared_data->nr_iter);
+ __GUEST_ASSERT(config_iter + 1 == irq_iter,
+ "config_iter + 1 = 0x%x, irq_iter = 0x%x.\n"
+ " Guest timer interrupt was not triggered within the specified\n"
+ " interval, try to increase the error margin by [-e] option.\n",
+ config_iter + 1, irq_iter);
+ }
+}
+
+static void guest_test_emulate_timer(uint32_t cpu)
+{
+ uint32_t config_iter;
+ uint64_t xcnt_diff_us, us;
+ struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
+
+ local_irq_disable();
+ shared_data->nr_iter = 0;
+ us = msecs_to_usecs(test_args.timer_period_ms);
+ for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
+ shared_data->xcnt = timer_get_cycles();
+
+ /* Setup the next interrupt */
+ timer_set_next_cmp_ms(test_args.timer_period_ms, false);
+ do_idle();
+
+ xcnt_diff_us = cycles_to_usec(timer_get_cycles() - shared_data->xcnt);
+ __GUEST_ASSERT(xcnt_diff_us >= us,
+ "xcnt_diff_us = 0x%lx, us = 0x%lx.\n",
+ xcnt_diff_us, us);
+ }
+ local_irq_enable();
+}
+
+static void guest_time_count_test(uint32_t cpu)
+{
+ uint32_t config_iter;
+ unsigned long start, end, prev, us;
+
+ /* Assuming that test case starts to run in 1 second */
+ start = timer_get_cycles();
+ us = msec_to_cycles(1000);
+ __GUEST_ASSERT(start <= us,
+ "start = 0x%lx, us = 0x%lx.\n",
+ start, us);
+
+ us = msec_to_cycles(test_args.timer_period_ms);
+ for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
+ start = timer_get_cycles();
+ end = start + us;
+ /* test time count growing up always */
+ while (start < end) {
+ prev = start;
+ start = timer_get_cycles();
+ __GUEST_ASSERT(prev <= start,
+ "prev = 0x%lx, start = 0x%lx.\n",
+ prev, start);
+ }
+ }
+}
+
+static void guest_code(void)
+{
+ uint32_t cpu = guest_get_vcpuid();
+
+ /* must run at first */
+ guest_time_count_test(cpu);
+
+ timer_irq_enable();
+ local_irq_enable();
+ guest_test_period_timer(cpu);
+ guest_test_oneshot_timer(cpu);
+ guest_test_emulate_timer(cpu);
+
+ GUEST_DONE();
+}
+
+struct kvm_vm *test_vm_create(void)
+{
+ struct kvm_vm *vm;
+ int nr_vcpus = test_args.nr_vcpus;
+
+ vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
+ vm_init_descriptor_tables(vm);
+ vm_install_exception_handler(vm, EXCCODE_INT, guest_irq_handler);
+
+ /* Make all the test's cmdline args visible to the guest */
+ sync_global_to_guest(vm, test_args);
+
+ return vm;
+}
+
+void test_vm_cleanup(struct kvm_vm *vm)
+{
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index c81a84990eab..3cdfa3b19b85 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -22,6 +22,7 @@
#include "processor.h"
#include "test_util.h"
#include "guest_modes.h"
+#include "ucall_common.h"
#define DUMMY_MEMSLOT_INDEX 7
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index e3711beff7f3..5087d082c4b0 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -25,6 +25,7 @@
#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>
+#include <ucall_common.h>
#define MEM_EXTRA_SIZE SZ_64K
diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c
index 6a437d2be9fa..51c070556f3e 100644
--- a/tools/testing/selftests/kvm/mmu_stress_test.c
+++ b/tools/testing/selftests/kvm/mmu_stress_test.c
@@ -263,8 +263,10 @@ static void calc_default_nr_vcpus(void)
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)",
errno, strerror(errno));
- nr_vcpus = CPU_COUNT(&possible_mask) * 3/4;
+ nr_vcpus = CPU_COUNT(&possible_mask);
TEST_ASSERT(nr_vcpus > 0, "Uh, no CPUs?");
+ if (nr_vcpus >= 2)
+ nr_vcpus = nr_vcpus * 3/4;
}
int main(int argc, char *argv[])
@@ -339,8 +341,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
fd = kvm_memfd_alloc(slot_size, hugepages);
- mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
+ mem = kvm_mmap(slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
@@ -361,11 +362,9 @@ int main(int argc, char *argv[])
#ifdef __x86_64__
/* Identity map memory in the guest using 1gb pages. */
- for (i = 0; i < slot_size; i += SZ_1G)
- __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
+ virt_map_level(vm, gpa, gpa, slot_size, PG_LEVEL_1G);
#else
- for (i = 0; i < slot_size; i += vm->page_size)
- virt_pg_map(vm, gpa + i, gpa + i);
+ virt_map(vm, gpa, gpa, slot_size >> vm->page_shift);
#endif
}
@@ -413,7 +412,7 @@ int main(int argc, char *argv[])
for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)
vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL);
- munmap(mem, slot_size / 2);
+ kvm_munmap(mem, slot_size / 2);
/* Sanity check that the vCPUs actually ran. */
for (i = 0; i < nr_vcpus; i++)
diff --git a/tools/testing/selftests/kvm/pre_fault_memory_test.c b/tools/testing/selftests/kvm/pre_fault_memory_test.c
index 0350a8896a2f..93e603d91311 100644
--- a/tools/testing/selftests/kvm/pre_fault_memory_test.c
+++ b/tools/testing/selftests/kvm/pre_fault_memory_test.c
@@ -10,19 +10,20 @@
#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>
+#include <pthread.h>
/* Arbitrarily chosen values */
#define TEST_SIZE (SZ_2M + PAGE_SIZE)
#define TEST_NPAGES (TEST_SIZE / PAGE_SIZE)
#define TEST_SLOT 10
-static void guest_code(uint64_t base_gpa)
+static void guest_code(uint64_t base_gva)
{
volatile uint64_t val __used;
int i;
for (i = 0; i < TEST_NPAGES; i++) {
- uint64_t *src = (uint64_t *)(base_gpa + i * PAGE_SIZE);
+ uint64_t *src = (uint64_t *)(base_gva + i * PAGE_SIZE);
val = *src;
}
@@ -30,18 +31,66 @@ static void guest_code(uint64_t base_gpa)
GUEST_DONE();
}
-static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size,
- u64 left)
+struct slot_worker_data {
+ struct kvm_vm *vm;
+ u64 gpa;
+ uint32_t flags;
+ bool worker_ready;
+ bool prefault_ready;
+ bool recreate_slot;
+};
+
+static void *delete_slot_worker(void *__data)
+{
+ struct slot_worker_data *data = __data;
+ struct kvm_vm *vm = data->vm;
+
+ WRITE_ONCE(data->worker_ready, true);
+
+ while (!READ_ONCE(data->prefault_ready))
+ cpu_relax();
+
+ vm_mem_region_delete(vm, TEST_SLOT);
+
+ while (!READ_ONCE(data->recreate_slot))
+ cpu_relax();
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, data->gpa,
+ TEST_SLOT, TEST_NPAGES, data->flags);
+
+ return NULL;
+}
+
+static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 base_gpa, u64 offset,
+ u64 size, u64 expected_left, bool private)
{
struct kvm_pre_fault_memory range = {
- .gpa = gpa,
+ .gpa = base_gpa + offset,
.size = size,
.flags = 0,
};
- u64 prev;
+ struct slot_worker_data data = {
+ .vm = vcpu->vm,
+ .gpa = base_gpa,
+ .flags = private ? KVM_MEM_GUEST_MEMFD : 0,
+ };
+ bool slot_recreated = false;
+ pthread_t slot_worker;
int ret, save_errno;
+ u64 prev;
+
+ /*
+ * Concurrently delete (and recreate) the slot to test KVM's handling
+ * of a racing memslot deletion with prefaulting.
+ */
+ pthread_create(&slot_worker, NULL, delete_slot_worker, &data);
+
+ while (!READ_ONCE(data.worker_ready))
+ cpu_relax();
- do {
+ WRITE_ONCE(data.prefault_ready, true);
+
+ for (;;) {
prev = range.size;
ret = __vcpu_ioctl(vcpu, KVM_PRE_FAULT_MEMORY, &range);
save_errno = errno;
@@ -49,22 +98,70 @@ static void pre_fault_memory(struct kvm_vcpu *vcpu, u64 gpa, u64 size,
"%sexpecting range.size to change on %s",
ret < 0 ? "not " : "",
ret < 0 ? "failure" : "success");
- } while (ret >= 0 ? range.size : save_errno == EINTR);
- TEST_ASSERT(range.size == left,
- "Completed with %lld bytes left, expected %" PRId64,
- range.size, left);
+ /*
+ * Immediately retry prefaulting if KVM was interrupted by an
+ * unrelated signal/event.
+ */
+ if (ret < 0 && save_errno == EINTR)
+ continue;
+
+ /*
+ * Tell the worker to recreate the slot in order to complete
+ * prefaulting (if prefault didn't already succeed before the
+ * slot was deleted) and/or to prepare for the next testcase.
+ * Wait for the worker to exit so that the next invocation of
+ * prefaulting is guaranteed to complete (assuming no KVM bugs).
+ */
+ if (!slot_recreated) {
+ WRITE_ONCE(data.recreate_slot, true);
+ pthread_join(slot_worker, NULL);
+ slot_recreated = true;
+
+ /*
+ * Retry prefaulting to get a stable result, i.e. to
+ * avoid seeing random EAGAIN failures. Don't retry if
+ * prefaulting already succeeded, as KVM disallows
+ * prefaulting with size=0, i.e. blindly retrying would
+ * result in test failures due to EINVAL. KVM should
+ * always return success if all bytes are prefaulted,
+ * i.e. there is no need to guard against EAGAIN being
+ * returned.
+ */
+ if (range.size)
+ continue;
+ }
+
+ /*
+ * All done if there are no remaining bytes to prefault, or if
+ * prefaulting failed (EINTR was handled above, and EAGAIN due
+ * to prefaulting a memslot that's being actively deleted should
+ * be impossible since the memslot has already been recreated).
+ */
+ if (!range.size || ret < 0)
+ break;
+ }
- if (left == 0)
- __TEST_ASSERT_VM_VCPU_IOCTL(!ret, "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm);
+ TEST_ASSERT(range.size == expected_left,
+ "Completed with %llu bytes left, expected %lu",
+ range.size, expected_left);
+
+ /*
+ * Assert success if prefaulting the entire range should succeed, i.e.
+ * complete with no bytes remaining. Otherwise prefaulting should have
+ * failed due to ENOENT (due to RET_PF_EMULATE for emulated MMIO when
+ * no memslot exists).
+ */
+ if (!expected_left)
+ TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_PRE_FAULT_MEMORY, ret, vcpu->vm);
else
- /* No memory slot causes RET_PF_EMULATE. it results in -ENOENT. */
- __TEST_ASSERT_VM_VCPU_IOCTL(ret && save_errno == ENOENT,
- "KVM_PRE_FAULT_MEMORY", ret, vcpu->vm);
+ TEST_ASSERT_VM_VCPU_IOCTL(ret && save_errno == ENOENT,
+ KVM_PRE_FAULT_MEMORY, ret, vcpu->vm);
}
static void __test_pre_fault_memory(unsigned long vm_type, bool private)
{
+ uint64_t gpa, gva, alignment, guest_page_size;
const struct vm_shape shape = {
.mode = VM_MODE_DEFAULT,
.type = vm_type,
@@ -74,34 +171,30 @@ static void __test_pre_fault_memory(unsigned long vm_type, bool private)
struct kvm_vm *vm;
struct ucall uc;
- uint64_t guest_test_phys_mem;
- uint64_t guest_test_virt_mem;
- uint64_t alignment, guest_page_size;
-
vm = vm_create_shape_with_one_vcpu(shape, &vcpu, guest_code);
alignment = guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
- guest_test_phys_mem = (vm->max_gfn - TEST_NPAGES) * guest_page_size;
+ gpa = (vm->max_gfn - TEST_NPAGES) * guest_page_size;
#ifdef __s390x__
alignment = max(0x100000UL, guest_page_size);
#else
alignment = SZ_2M;
#endif
- guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
- guest_test_virt_mem = guest_test_phys_mem & ((1ULL << (vm->va_bits - 1)) - 1);
+ gpa = align_down(gpa, alignment);
+ gva = gpa & ((1ULL << (vm->va_bits - 1)) - 1);
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- guest_test_phys_mem, TEST_SLOT, TEST_NPAGES,
- private ? KVM_MEM_GUEST_MEMFD : 0);
- virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, TEST_NPAGES);
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, TEST_SLOT,
+ TEST_NPAGES, private ? KVM_MEM_GUEST_MEMFD : 0);
+ virt_map(vm, gva, gpa, TEST_NPAGES);
if (private)
- vm_mem_set_private(vm, guest_test_phys_mem, TEST_SIZE);
- pre_fault_memory(vcpu, guest_test_phys_mem, SZ_2M, 0);
- pre_fault_memory(vcpu, guest_test_phys_mem + SZ_2M, PAGE_SIZE * 2, PAGE_SIZE);
- pre_fault_memory(vcpu, guest_test_phys_mem + TEST_SIZE, PAGE_SIZE, PAGE_SIZE);
+ vm_mem_set_private(vm, gpa, TEST_SIZE);
+
+ pre_fault_memory(vcpu, gpa, 0, SZ_2M, 0, private);
+ pre_fault_memory(vcpu, gpa, SZ_2M, PAGE_SIZE * 2, PAGE_SIZE, private);
+ pre_fault_memory(vcpu, gpa, TEST_SIZE, PAGE_SIZE, PAGE_SIZE, private);
- vcpu_args_set(vcpu, 1, guest_test_virt_mem);
+ vcpu_args_set(vcpu, 1, gva);
vcpu_run(vcpu);
run = vcpu->run;
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
index a0b7dabb5040..cb54a56990a0 100644
--- a/tools/testing/selftests/kvm/riscv/get-reg-list.c
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -80,9 +80,11 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCF:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCMOP:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFA:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFBFMIN:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFH:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFHMIN:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOP:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICCRSE:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICNTR:
@@ -103,6 +105,8 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZTSO:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFBFMIN:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFBFWMA:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFHMIN:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKB:
@@ -128,6 +132,8 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN:
case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SUSP:
case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA:
+ case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_FWFT:
+ case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_MPXY:
case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL:
case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR:
return true;
@@ -255,6 +261,8 @@ static const char *config_id_to_str(const char *prefix, __u64 id)
return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
+ case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
+ return "KVM_REG_RISCV_CONFIG_REG(zicbop_block_size)";
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
case KVM_REG_RISCV_CONFIG_REG(marchid):
@@ -532,9 +540,11 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZCF),
KVM_ISA_EXT_ARR(ZCMOP),
KVM_ISA_EXT_ARR(ZFA),
+ KVM_ISA_EXT_ARR(ZFBFMIN),
KVM_ISA_EXT_ARR(ZFH),
KVM_ISA_EXT_ARR(ZFHMIN),
KVM_ISA_EXT_ARR(ZICBOM),
+ KVM_ISA_EXT_ARR(ZICBOP),
KVM_ISA_EXT_ARR(ZICBOZ),
KVM_ISA_EXT_ARR(ZICCRSE),
KVM_ISA_EXT_ARR(ZICNTR),
@@ -555,6 +565,8 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(ZTSO),
KVM_ISA_EXT_ARR(ZVBB),
KVM_ISA_EXT_ARR(ZVBC),
+ KVM_ISA_EXT_ARR(ZVFBFMIN),
+ KVM_ISA_EXT_ARR(ZVFBFWMA),
KVM_ISA_EXT_ARR(ZVFH),
KVM_ISA_EXT_ARR(ZVFHMIN),
KVM_ISA_EXT_ARR(ZVKB),
@@ -627,6 +639,8 @@ static const char *sbi_ext_single_id_to_str(__u64 reg_off)
KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN),
KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SUSP),
KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_STA),
+ KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_FWFT),
+ KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_MPXY),
KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL),
KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR),
};
@@ -683,6 +697,19 @@ static const char *sbi_sta_id_to_str(__u64 reg_off)
return strdup_printf("KVM_REG_RISCV_SBI_STA | %lld /* UNKNOWN */", reg_off);
}
+static const char *sbi_fwft_id_to_str(__u64 reg_off)
+{
+ switch (reg_off) {
+ case 0: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.enable)";
+ case 1: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.flags)";
+ case 2: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.value)";
+ case 3: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.enable)";
+ case 4: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.flags)";
+ case 5: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.value)";
+ }
+ return strdup_printf("KVM_REG_RISCV_SBI_FWFT | %lld /* UNKNOWN */", reg_off);
+}
+
static const char *sbi_id_to_str(const char *prefix, __u64 id)
{
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_STATE);
@@ -695,6 +722,8 @@ static const char *sbi_id_to_str(const char *prefix, __u64 id)
switch (reg_subtype) {
case KVM_REG_RISCV_SBI_STA:
return sbi_sta_id_to_str(reg_off);
+ case KVM_REG_RISCV_SBI_FWFT:
+ return sbi_fwft_id_to_str(reg_off);
}
return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
@@ -780,10 +809,13 @@ void print_reg(const char *prefix, __u64 id)
*/
static __u64 base_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbop_block_size),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
@@ -859,11 +891,26 @@ static __u64 sbi_sta_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi),
};
+static __u64 sbi_fwft_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_FWFT,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.enable),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.flags),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.value),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.enable),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.flags),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.value),
+};
+
static __u64 zicbom_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM,
};
+static __u64 zicbop_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbop_block_size),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOP,
+};
+
static __u64 zicboz_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ,
@@ -1010,8 +1057,13 @@ static __u64 vector_regs[] = {
#define SUBLIST_SBI_STA \
{"sbi-sta", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_STA, \
.regs = sbi_sta_regs, .regs_n = ARRAY_SIZE(sbi_sta_regs),}
+#define SUBLIST_SBI_FWFT \
+ {"sbi-fwft", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_FWFT, \
+ .regs = sbi_fwft_regs, .regs_n = ARRAY_SIZE(sbi_fwft_regs),}
#define SUBLIST_ZICBOM \
{"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),}
+#define SUBLIST_ZICBOP \
+ {"zicbop", .feature = KVM_RISCV_ISA_EXT_ZICBOP, .regs = zicbop_regs, .regs_n = ARRAY_SIZE(zicbop_regs),}
#define SUBLIST_ZICBOZ \
{"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),}
#define SUBLIST_AIA \
@@ -1092,6 +1144,8 @@ KVM_SBI_EXT_SUBLIST_CONFIG(sta, STA);
KVM_SBI_EXT_SIMPLE_CONFIG(pmu, PMU);
KVM_SBI_EXT_SIMPLE_CONFIG(dbcn, DBCN);
KVM_SBI_EXT_SIMPLE_CONFIG(susp, SUSP);
+KVM_SBI_EXT_SIMPLE_CONFIG(mpxy, MPXY);
+KVM_SBI_EXT_SUBLIST_CONFIG(fwft, FWFT);
KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA);
KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F);
@@ -1127,9 +1181,11 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD);
KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF);
KVM_ISA_EXT_SIMPLE_CONFIG(zcmop, ZCMOP);
KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA);
+KVM_ISA_EXT_SIMPLE_CONFIG(zfbfmin, ZFBFMIN);
KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH);
KVM_ISA_EXT_SIMPLE_CONFIG(zfhmin, ZFHMIN);
KVM_ISA_EXT_SUBLIST_CONFIG(zicbom, ZICBOM);
+KVM_ISA_EXT_SUBLIST_CONFIG(zicbop, ZICBOP);
KVM_ISA_EXT_SUBLIST_CONFIG(zicboz, ZICBOZ);
KVM_ISA_EXT_SIMPLE_CONFIG(ziccrse, ZICCRSE);
KVM_ISA_EXT_SIMPLE_CONFIG(zicntr, ZICNTR);
@@ -1150,6 +1206,8 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT);
KVM_ISA_EXT_SIMPLE_CONFIG(ztso, ZTSO);
KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB);
KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvfbfmin, ZVFBFMIN);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvfbfwma, ZVFBFWMA);
KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH);
KVM_ISA_EXT_SIMPLE_CONFIG(zvfhmin, ZVFHMIN);
KVM_ISA_EXT_SIMPLE_CONFIG(zvkb, ZVKB);
@@ -1167,6 +1225,8 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_sbi_pmu,
&config_sbi_dbcn,
&config_sbi_susp,
+ &config_sbi_mpxy,
+ &config_sbi_fwft,
&config_aia,
&config_fp_f,
&config_fp_d,
@@ -1201,9 +1261,11 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_zcf,
&config_zcmop,
&config_zfa,
+ &config_zfbfmin,
&config_zfh,
&config_zfhmin,
&config_zicbom,
+ &config_zicbop,
&config_zicboz,
&config_ziccrse,
&config_zicntr,
@@ -1224,6 +1286,8 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_ztso,
&config_zvbb,
&config_zvbc,
+ &config_zvfbfmin,
+ &config_zvfbfwma,
&config_zvfh,
&config_zvfhmin,
&config_zvkb,
diff --git a/tools/testing/selftests/kvm/s390/cmma_test.c b/tools/testing/selftests/kvm/s390/cmma_test.c
index 85cc8c18d6e7..e39a724fe860 100644
--- a/tools/testing/selftests/kvm/s390/cmma_test.c
+++ b/tools/testing/selftests/kvm/s390/cmma_test.c
@@ -145,7 +145,7 @@ static void finish_vm_setup(struct kvm_vm *vm)
slot0 = memslot2region(vm, 0);
ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
- kvm_arch_vm_post_create(vm);
+ kvm_arch_vm_post_create(vm, 0);
}
static struct kvm_vm *create_vm_two_memslots(void)
diff --git a/tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c b/tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c
index 27255880dabd..aded795d42be 100644
--- a/tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c
+++ b/tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c
@@ -291,7 +291,7 @@ int main(int argc, char *argv[])
ksft_test_result_pass("%s\n", testlist[idx].subfunc_name);
free(array);
} else {
- ksft_test_result_skip("%s feature is not avaialable\n",
+ ksft_test_result_skip("%s feature is not available\n",
testlist[idx].subfunc_name);
}
}
diff --git a/tools/testing/selftests/kvm/s390/ucontrol_test.c b/tools/testing/selftests/kvm/s390/ucontrol_test.c
index d265b34c54be..50bc1c38225a 100644
--- a/tools/testing/selftests/kvm/s390/ucontrol_test.c
+++ b/tools/testing/selftests/kvm/s390/ucontrol_test.c
@@ -142,19 +142,17 @@ FIXTURE_SETUP(uc_kvm)
self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
- self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
- PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
- ASSERT_NE(self->run, MAP_FAILED);
+ self->run = kvm_mmap(self->kvm_run_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, self->vcpu_fd);
/**
* For virtual cpus that have been created with S390 user controlled
* virtual machines, the resulting vcpu fd can be memory mapped at page
* offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
* the virtual cpu's hardware control block.
*/
- self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
- ASSERT_NE(self->sie_block, MAP_FAILED);
+ self->sie_block = __kvm_mmap(PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, self->vcpu_fd,
+ KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
TH_LOG("VM created %p %p", self->run, self->sie_block);
@@ -186,8 +184,8 @@ FIXTURE_SETUP(uc_kvm)
FIXTURE_TEARDOWN(uc_kvm)
{
- munmap(self->sie_block, PAGE_SIZE);
- munmap(self->run, self->kvm_run_size);
+ kvm_munmap(self->sie_block, PAGE_SIZE);
+ kvm_munmap(self->run, self->kvm_run_size);
close(self->vcpu_fd);
close(self->vm_fd);
close(self->kvm_fd);
diff --git a/tools/testing/selftests/kvm/s390/user_operexec.c b/tools/testing/selftests/kvm/s390/user_operexec.c
new file mode 100644
index 000000000000..714906c1d12a
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390/user_operexec.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Test operation exception forwarding.
+ *
+ * Copyright IBM Corp. 2025
+ *
+ * Authors:
+ * Janosch Frank <frankja@linux.ibm.com>
+ */
+#include "kselftest.h"
+#include "kvm_util.h"
+#include "test_util.h"
+#include "sie.h"
+
+#include <linux/kvm.h>
+
+static void guest_code_instr0(void)
+{
+ asm(".word 0x0000");
+}
+
+static void test_user_instr0(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int rc;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_instr0);
+ rc = __vm_enable_cap(vm, KVM_CAP_S390_USER_INSTR0, 0);
+ TEST_ASSERT_EQ(0, rc);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_OPEREXC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0);
+
+ kvm_vm_free(vm);
+}
+
+static void guest_code_user_operexec(void)
+{
+ asm(".word 0x0807");
+}
+
+static void test_user_operexec(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int rc;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_user_operexec);
+ rc = __vm_enable_cap(vm, KVM_CAP_S390_USER_OPEREXEC, 0);
+ TEST_ASSERT_EQ(0, rc);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_OPEREXC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x0807);
+
+ kvm_vm_free(vm);
+
+ /*
+ * Since user_operexec is the superset it can be used for the
+ * 0 instruction.
+ */
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_instr0);
+ rc = __vm_enable_cap(vm, KVM_CAP_S390_USER_OPEREXEC, 0);
+ TEST_ASSERT_EQ(0, rc);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_OPEREXC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0);
+
+ kvm_vm_free(vm);
+}
+
+/* combine user_instr0 and user_operexec */
+static void test_user_operexec_combined(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int rc;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_user_operexec);
+ rc = __vm_enable_cap(vm, KVM_CAP_S390_USER_INSTR0, 0);
+ TEST_ASSERT_EQ(0, rc);
+ rc = __vm_enable_cap(vm, KVM_CAP_S390_USER_OPEREXEC, 0);
+ TEST_ASSERT_EQ(0, rc);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_OPEREXC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x0807);
+
+ kvm_vm_free(vm);
+
+ /* Reverse enablement order */
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_user_operexec);
+ rc = __vm_enable_cap(vm, KVM_CAP_S390_USER_OPEREXEC, 0);
+ TEST_ASSERT_EQ(0, rc);
+ rc = __vm_enable_cap(vm, KVM_CAP_S390_USER_INSTR0, 0);
+ TEST_ASSERT_EQ(0, rc);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_OPEREXC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x0807);
+
+ kvm_vm_free(vm);
+}
+
+/*
+ * Run all tests above.
+ *
+ * Enablement after VCPU has been added is automatically tested since
+ * we enable the capability after VCPU creation.
+ */
+static struct testdef {
+ const char *name;
+ void (*test)(void);
+} testlist[] = {
+ { "instr0", test_user_instr0 },
+ { "operexec", test_user_operexec },
+ { "operexec_combined", test_user_operexec_combined},
+};
+
+int main(int argc, char *argv[])
+{
+ int idx;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_USER_INSTR0));
+
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(testlist));
+ for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
+ testlist[idx].test();
+ ksft_test_result_pass("%s\n", testlist[idx].name);
+ }
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index ce3ac0fd6dfb..7fe427ff9b38 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -433,10 +433,10 @@ static void test_add_max_memory_regions(void)
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
- mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
- TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
+
+ mem = kvm_mmap((size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1);
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
for (slot = 0; slot < max_mem_slots; slot++)
@@ -446,9 +446,8 @@ static void test_add_max_memory_regions(void)
mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);
/* Check it cannot be added memory slots beyond the limit */
- mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host");
+ mem_extra = kvm_mmap(MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1);
ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,
(uint64_t)max_mem_slots * MEM_REGION_SIZE,
@@ -456,8 +455,8 @@ static void test_add_max_memory_regions(void)
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Adding one more memory slot should fail with EINVAL");
- munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
- munmap(mem_extra, MEM_REGION_SIZE);
+ kvm_munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
+ kvm_munmap(mem_extra, MEM_REGION_SIZE);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index cce2520af720..8edc1fca345b 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -118,7 +118,7 @@ static int64_t smccc(uint32_t func, uint64_t arg)
{
struct arm_smccc_res res;
- smccc_hvc(func, arg, 0, 0, 0, 0, 0, 0, &res);
+ do_smccc(func, arg, 0, 0, 0, 0, 0, 0, &res);
return res.a0;
}
diff --git a/tools/testing/selftests/kvm/x86/aperfmperf_test.c b/tools/testing/selftests/kvm/x86/aperfmperf_test.c
new file mode 100644
index 000000000000..8b15a13df939
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/aperfmperf_test.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test for KVM_X86_DISABLE_EXITS_APERFMPERF
+ *
+ * Copyright (C) 2025, Google LLC.
+ *
+ * Test the ability to disable VM-exits for rdmsr of IA32_APERF and
+ * IA32_MPERF. When these VM-exits are disabled, reads of these MSRs
+ * return the host's values.
+ *
+ * Note: Requires read access to /dev/cpu/<lpu>/msr to read host MSRs.
+ */
+
+#include <fcntl.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <asm/msr-index.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "test_util.h"
+#include "vmx.h"
+
+#define NUM_ITERATIONS 10000
+
+static int open_dev_msr(int cpu)
+{
+ char path[PATH_MAX];
+
+ snprintf(path, sizeof(path), "/dev/cpu/%d/msr", cpu);
+ return open_path_or_exit(path, O_RDONLY);
+}
+
+static uint64_t read_dev_msr(int msr_fd, uint32_t msr)
+{
+ uint64_t data;
+ ssize_t rc;
+
+ rc = pread(msr_fd, &data, sizeof(data), msr);
+ TEST_ASSERT(rc == sizeof(data), "Read of MSR 0x%x failed", msr);
+
+ return data;
+}
+
+static void guest_read_aperf_mperf(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_ITERATIONS; i++)
+ GUEST_SYNC2(rdmsr(MSR_IA32_APERF), rdmsr(MSR_IA32_MPERF));
+}
+
+#define L2_GUEST_STACK_SIZE 64
+
+static void l2_guest_code(void)
+{
+ guest_read_aperf_mperf();
+ GUEST_DONE();
+}
+
+static void l1_svm_code(struct svm_test_data *svm)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ generic_svm_setup(svm, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ run_guest(vmcb, svm->vmcb_gpa);
+}
+
+static void l1_vmx_code(struct vmx_pages *vmx)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
+ GUEST_ASSERT_EQ(load_vmcs(vmx), true);
+
+ prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /*
+ * Enable MSR bitmaps (the bitmap itself is allocated, zeroed, and set
+ * in the VMCS by prepare_vmcs()), as MSR exiting mandatory on Intel.
+ */
+ vmwrite(CPU_BASED_VM_EXEC_CONTROL,
+ vmreadz(CPU_BASED_VM_EXEC_CONTROL) | CPU_BASED_USE_MSR_BITMAPS);
+
+ GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_guest_code));
+ GUEST_ASSERT(!vmlaunch());
+}
+
+static void guest_code(void *nested_test_data)
+{
+ guest_read_aperf_mperf();
+
+ if (this_cpu_has(X86_FEATURE_SVM))
+ l1_svm_code(nested_test_data);
+ else if (this_cpu_has(X86_FEATURE_VMX))
+ l1_vmx_code(nested_test_data);
+ else
+ GUEST_DONE();
+
+ TEST_FAIL("L2 should have signaled 'done'");
+}
+
+static void guest_no_aperfmperf(void)
+{
+ uint64_t msr_val;
+ uint8_t vector;
+
+ vector = rdmsr_safe(MSR_IA32_APERF, &msr_val);
+ GUEST_ASSERT(vector == GP_VECTOR);
+
+ vector = rdmsr_safe(MSR_IA32_APERF, &msr_val);
+ GUEST_ASSERT(vector == GP_VECTOR);
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX);
+ uint64_t host_aperf_before, host_mperf_before;
+ vm_vaddr_t nested_test_data_gva;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int msr_fd, cpu, i;
+
+ /* Sanity check that APERF/MPERF are unsupported by default. */
+ vm = vm_create_with_one_vcpu(&vcpu, guest_no_aperfmperf);
+ vcpu_run(vcpu);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+ kvm_vm_free(vm);
+
+ cpu = pin_self_to_any_cpu();
+
+ msr_fd = open_dev_msr(cpu);
+
+ /*
+ * This test requires a non-standard VM initialization, because
+ * KVM_ENABLE_CAP cannot be used on a VM file descriptor after
+ * a VCPU has been created.
+ */
+ vm = vm_create(1);
+
+ TEST_REQUIRE(vm_check_cap(vm, KVM_CAP_X86_DISABLE_EXITS) &
+ KVM_X86_DISABLE_EXITS_APERFMPERF);
+
+ vm_enable_cap(vm, KVM_CAP_X86_DISABLE_EXITS,
+ KVM_X86_DISABLE_EXITS_APERFMPERF);
+
+ vcpu = vm_vcpu_add(vm, 0, guest_code);
+
+ if (!has_nested)
+ nested_test_data_gva = NONCANONICAL;
+ else if (kvm_cpu_has(X86_FEATURE_SVM))
+ vcpu_alloc_svm(vm, &nested_test_data_gva);
+ else
+ vcpu_alloc_vmx(vm, &nested_test_data_gva);
+
+ vcpu_args_set(vcpu, 1, nested_test_data_gva);
+
+ host_aperf_before = read_dev_msr(msr_fd, MSR_IA32_APERF);
+ host_mperf_before = read_dev_msr(msr_fd, MSR_IA32_MPERF);
+
+ for (i = 0; i <= NUM_ITERATIONS * (1 + has_nested); i++) {
+ uint64_t host_aperf_after, host_mperf_after;
+ uint64_t guest_aperf, guest_mperf;
+ struct ucall uc;
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ goto done;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ case UCALL_SYNC:
+ guest_aperf = uc.args[0];
+ guest_mperf = uc.args[1];
+
+ host_aperf_after = read_dev_msr(msr_fd, MSR_IA32_APERF);
+ host_mperf_after = read_dev_msr(msr_fd, MSR_IA32_MPERF);
+
+ TEST_ASSERT(host_aperf_before < guest_aperf,
+ "APERF: host_before (0x%" PRIx64 ") >= guest (0x%" PRIx64 ")",
+ host_aperf_before, guest_aperf);
+ TEST_ASSERT(guest_aperf < host_aperf_after,
+ "APERF: guest (0x%" PRIx64 ") >= host_after (0x%" PRIx64 ")",
+ guest_aperf, host_aperf_after);
+ TEST_ASSERT(host_mperf_before < guest_mperf,
+ "MPERF: host_before (0x%" PRIx64 ") >= guest (0x%" PRIx64 ")",
+ host_mperf_before, guest_mperf);
+ TEST_ASSERT(guest_mperf < host_mperf_after,
+ "MPERF: guest (0x%" PRIx64 ") >= host_after (0x%" PRIx64 ")",
+ guest_mperf, host_mperf_after);
+
+ host_aperf_before = host_aperf_after;
+ host_mperf_before = host_mperf_after;
+
+ break;
+ }
+ }
+ TEST_FAIL("Didn't receive UCALL_DONE\n");
+done:
+ kvm_vm_free(vm);
+ close(msr_fd);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86/fastops_test.c b/tools/testing/selftests/kvm/x86/fastops_test.c
index 2ac89d6c1e46..8926cfe0e209 100644
--- a/tools/testing/selftests/kvm/x86/fastops_test.c
+++ b/tools/testing/selftests/kvm/x86/fastops_test.c
@@ -8,14 +8,21 @@
* to set RFLAGS.CF based on whether or not the input is even or odd, so that
* instructions like ADC and SBB are deterministic.
*/
+#define fastop(__insn) \
+ "bt $0, %[bt_val]\n\t" \
+ __insn "\n\t" \
+ "pushfq\n\t" \
+ "pop %[flags]\n\t"
+
+#define flags_constraint(flags_val) [flags]"=r"(flags_val)
+#define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val)
+
#define guest_execute_fastop_1(FEP, insn, __val, __flags) \
({ \
- __asm__ __volatile__("bt $0, %[val]\n\t" \
- FEP insn " %[val]\n\t" \
- "pushfq\n\t" \
- "pop %[flags]\n\t" \
- : [val]"+r"(__val), [flags]"=r"(__flags) \
- : : "cc", "memory"); \
+ __asm__ __volatile__(fastop(FEP insn " %[val]") \
+ : [val]"+r"(__val), flags_constraint(__flags) \
+ : bt_constraint(__val) \
+ : "cc", "memory"); \
})
#define guest_test_fastop_1(insn, type_t, __val) \
@@ -36,12 +43,10 @@
#define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \
({ \
- __asm__ __volatile__("bt $0, %[output]\n\t" \
- FEP insn " %[input], %[output]\n\t" \
- "pushfq\n\t" \
- "pop %[flags]\n\t" \
- : [output]"+r"(__output), [flags]"=r"(__flags) \
- : [input]"r"(__input) : "cc", "memory"); \
+ __asm__ __volatile__(fastop(FEP insn " %[input], %[output]") \
+ : [output]"+r"(__output), flags_constraint(__flags) \
+ : [input]"r"(__input), bt_constraint(__output) \
+ : "cc", "memory"); \
})
#define guest_test_fastop_2(insn, type_t, __val1, __val2) \
@@ -63,12 +68,10 @@
#define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \
({ \
- __asm__ __volatile__("bt $0, %[output]\n\t" \
- FEP insn " %%cl, %[output]\n\t" \
- "pushfq\n\t" \
- "pop %[flags]\n\t" \
- : [output]"+r"(__output), [flags]"=r"(__flags) \
- : "c"(__shift) : "cc", "memory"); \
+ __asm__ __volatile__(fastop(FEP insn " %%cl, %[output]") \
+ : [output]"+r"(__output), flags_constraint(__flags) \
+ : "c"(__shift), bt_constraint(__output) \
+ : "cc", "memory"); \
})
#define guest_test_fastop_cl(insn, type_t, __val1, __val2) \
@@ -89,6 +92,42 @@
ex_flags, insn, shift, (uint64_t)input, flags); \
})
+#define guest_execute_fastop_div(__KVM_ASM_SAFE, insn, __a, __d, __rm, __flags) \
+({ \
+ uint64_t ign_error_code; \
+ uint8_t vector; \
+ \
+ __asm__ __volatile__(fastop(__KVM_ASM_SAFE(insn " %[denom]")) \
+ : "+a"(__a), "+d"(__d), flags_constraint(__flags), \
+ KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
+ : [denom]"rm"(__rm), bt_constraint(__rm) \
+ : "cc", "memory", KVM_ASM_SAFE_CLOBBERS); \
+ vector; \
+})
+
+#define guest_test_fastop_div(insn, type_t, __val1, __val2) \
+({ \
+ type_t _a = __val1, _d = __val1, rm = __val2; \
+ type_t a = _a, d = _d, ex_a = _a, ex_d = _d; \
+ uint64_t flags, ex_flags; \
+ uint8_t v, ex_v; \
+ \
+ ex_v = guest_execute_fastop_div(KVM_ASM_SAFE, insn, ex_a, ex_d, rm, ex_flags); \
+ v = guest_execute_fastop_div(KVM_ASM_SAFE_FEP, insn, a, d, rm, flags); \
+ \
+ GUEST_ASSERT_EQ(v, ex_v); \
+ __GUEST_ASSERT(v == ex_v, \
+ "Wanted vector 0x%x for '%s 0x%lx:0x%lx/0x%lx', got 0x%x", \
+ ex_v, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, v); \
+ __GUEST_ASSERT(a == ex_a && d == ex_d, \
+ "Wanted 0x%lx:0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx:0x%lx",\
+ (uint64_t)ex_a, (uint64_t)ex_d, insn, (uint64_t)_a, \
+ (uint64_t)_d, (uint64_t)rm, (uint64_t)a, (uint64_t)d); \
+ __GUEST_ASSERT(v || ex_v || (flags == ex_flags), \
+ "Wanted flags 0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx", \
+ ex_flags, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, flags);\
+})
+
static const uint64_t vals[] = {
0,
1,
@@ -115,14 +154,16 @@ do { \
guest_test_fastop_2("add" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("adc" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("and" suffix, type_t, vals[i], vals[j]); \
+if (sizeof(type_t) != 1) { \
guest_test_fastop_2("bsf" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("bsr" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("bt" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("btc" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("btr" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("bts" suffix, type_t, vals[i], vals[j]); \
- guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("imul" suffix, type_t, vals[i], vals[j]); \
+} \
+ guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("or" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("sbb" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_2("sub" suffix, type_t, vals[i], vals[j]); \
@@ -136,12 +177,15 @@ do { \
guest_test_fastop_cl("sar" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_cl("shl" suffix, type_t, vals[i], vals[j]); \
guest_test_fastop_cl("shr" suffix, type_t, vals[i], vals[j]); \
+ \
+ guest_test_fastop_div("div" suffix, type_t, vals[i], vals[j]); \
} \
} \
} while (0)
static void guest_code(void)
{
+ guest_test_fastops(uint8_t, "b");
guest_test_fastops(uint16_t, "w");
guest_test_fastops(uint32_t, "l");
guest_test_fastops(uint64_t, "q");
diff --git a/tools/testing/selftests/kvm/x86/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86/hyperv_cpuid.c
index c863a689aa98..3c21af811d8f 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_cpuid.c
@@ -45,7 +45,7 @@ static void test_hv_cpuid(struct kvm_vcpu *vcpu, bool evmcs_expected)
TEST_ASSERT((entry->function >= 0x40000000) &&
(entry->function <= 0x40000082),
- "function %x is our of supported range",
+ "function %x is out of supported range",
entry->function);
TEST_ASSERT(entry->index == 0,
diff --git a/tools/testing/selftests/kvm/x86/hyperv_features.c b/tools/testing/selftests/kvm/x86/hyperv_features.c
index 068e9c69710d..130b9ce7e5dd 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_features.c
@@ -54,12 +54,12 @@ static void guest_msr(struct msr_data *msr)
if (msr->fault_expected)
__GUEST_ASSERT(vector == GP_VECTOR,
- "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
- msr->write ? "WR" : "RD", msr->idx, vector);
+ "Expected #GP on %sMSR(0x%x), got %s",
+ msr->write ? "WR" : "RD", msr->idx, ex_str(vector));
else
__GUEST_ASSERT(!vector,
- "Expected success on %sMSR(0x%x), got vector '0x%x'",
- msr->write ? "WR" : "RD", msr->idx, vector);
+ "Expected success on %sMSR(0x%x), got %s",
+ msr->write ? "WR" : "RD", msr->idx, ex_str(vector));
if (vector || is_write_only_msr(msr->idx))
goto done;
@@ -94,7 +94,7 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
input = pgs_gpa;
- output = pgs_gpa + 4096;
+ output = pgs_gpa + PAGE_SIZE;
} else {
input = output = 0;
}
@@ -102,12 +102,12 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
vector = __hyperv_hypercall(hcall->control, input, output, &res);
if (hcall->ud_expected) {
__GUEST_ASSERT(vector == UD_VECTOR,
- "Expected #UD for control '%lu', got vector '0x%x'",
- hcall->control, vector);
+ "Expected #UD for control '%lu', got %s",
+ hcall->control, ex_str(vector));
} else {
__GUEST_ASSERT(!vector,
- "Expected no exception for control '%lu', got vector '0x%x'",
- hcall->control, vector);
+ "Expected no exception for control '%lu', got %s",
+ hcall->control, ex_str(vector));
GUEST_ASSERT_EQ(res, hcall->expect);
}
diff --git a/tools/testing/selftests/kvm/x86/hyperv_ipi.c b/tools/testing/selftests/kvm/x86/hyperv_ipi.c
index 2b5b4bc6ef7e..ca61836c4e32 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_ipi.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_ipi.c
@@ -102,7 +102,7 @@ static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
/* 'Slow' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
ipi->vector = IPI_VECTOR;
ipi->cpu_mask = 1 << RECEIVER_VCPU_ID_1;
- hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + 4096);
+ hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + PAGE_SIZE);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
@@ -116,13 +116,13 @@ static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
GUEST_SYNC(stage++);
/* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
- memset(hcall_page, 0, 4096);
+ memset(hcall_page, 0, PAGE_SIZE);
ipi_ex->vector = IPI_VECTOR;
ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
ipi_ex->vp_set.valid_bank_mask = 1 << 0;
ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
- pgs_gpa, pgs_gpa + 4096);
+ pgs_gpa, pgs_gpa + PAGE_SIZE);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
@@ -138,13 +138,13 @@ static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
GUEST_SYNC(stage++);
/* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
- memset(hcall_page, 0, 4096);
+ memset(hcall_page, 0, PAGE_SIZE);
ipi_ex->vector = IPI_VECTOR;
ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
ipi_ex->vp_set.valid_bank_mask = 1 << 1;
ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_2 - 64);
hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
- pgs_gpa, pgs_gpa + 4096);
+ pgs_gpa, pgs_gpa + PAGE_SIZE);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
@@ -160,14 +160,14 @@ static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
GUEST_SYNC(stage++);
/* 'Slow' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1,2} */
- memset(hcall_page, 0, 4096);
+ memset(hcall_page, 0, PAGE_SIZE);
ipi_ex->vector = IPI_VECTOR;
ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
ipi_ex->vp_set.valid_bank_mask = 1 << 1 | 1;
ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
ipi_ex->vp_set.bank_contents[1] = BIT(RECEIVER_VCPU_ID_2 - 64);
hyperv_hypercall(HVCALL_SEND_IPI_EX | (2 << HV_HYPERCALL_VARHEAD_OFFSET),
- pgs_gpa, pgs_gpa + 4096);
+ pgs_gpa, pgs_gpa + PAGE_SIZE);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
@@ -183,10 +183,10 @@ static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
GUEST_SYNC(stage++);
/* 'Slow' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL */
- memset(hcall_page, 0, 4096);
+ memset(hcall_page, 0, PAGE_SIZE);
ipi_ex->vector = IPI_VECTOR;
ipi_ex->vp_set.format = HV_GENERIC_SET_ALL;
- hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + 4096);
+ hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + PAGE_SIZE);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
diff --git a/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c b/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
index 077cd0ec3040..a3b7ce155981 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
@@ -621,7 +621,7 @@ int main(int argc, char *argv[])
for (i = 0; i < NTEST_PAGES; i++) {
pte = vm_get_page_table_entry(vm, data->test_pages + i * PAGE_SIZE);
gpa = addr_hva2gpa(vm, pte);
- __virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK, PG_LEVEL_4K);
+ virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK);
data->test_pages_pte[i] = gva + (gpa & ~PAGE_MASK);
}
diff --git a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
index 390ae2d87493..e45c028d2a7e 100644
--- a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
+++ b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c
@@ -30,12 +30,12 @@ do { \
\
if (fault_wanted) \
__GUEST_ASSERT((vector) == UD_VECTOR, \
- "Expected #UD on " insn " for testcase '0x%x', got '0x%x'", \
- testcase, vector); \
+ "Expected #UD on " insn " for testcase '0x%x', got %s", \
+ testcase, ex_str(vector)); \
else \
__GUEST_ASSERT(!(vector), \
- "Expected success on " insn " for testcase '0x%x', got '0x%x'", \
- testcase, vector); \
+ "Expected success on " insn " for testcase '0x%x', got %s", \
+ testcase, ex_str(vector)); \
} while (0)
static void guest_monitor_wait(void *arg)
@@ -74,6 +74,7 @@ int main(int argc, char *argv[])
int testcase;
char test[80];
+ TEST_REQUIRE(this_cpu_has(X86_FEATURE_MWAIT));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_DISABLE_QUIRKS2));
ksft_print_header();
diff --git a/tools/testing/selftests/kvm/x86/msrs_test.c b/tools/testing/selftests/kvm/x86/msrs_test.c
new file mode 100644
index 000000000000..40d918aedce6
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/msrs_test.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <asm/msr-index.h>
+
+#include <stdint.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+
+/* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */
+#define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR
+
+struct kvm_msr {
+ const struct kvm_x86_cpu_feature feature;
+ const struct kvm_x86_cpu_feature feature2;
+ const char *name;
+ const u64 reset_val;
+ const u64 write_val;
+ const u64 rsvd_val;
+ const u32 index;
+ const bool is_kvm_defined;
+};
+
+#define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2, is_kvm) \
+{ \
+ .index = msr, \
+ .name = str, \
+ .write_val = val, \
+ .rsvd_val = rsvd, \
+ .reset_val = reset, \
+ .feature = X86_FEATURE_ ##feat, \
+ .feature2 = X86_FEATURE_ ##f2, \
+ .is_kvm_defined = is_kvm, \
+}
+
+#define __MSR_TEST(msr, str, val, rsvd, reset, feat) \
+ ____MSR_TEST(msr, str, val, rsvd, reset, feat, feat, false)
+
+#define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat) \
+ __MSR_TEST(msr, #msr, val, rsvd, reset, feat)
+
+#define MSR_TEST(msr, val, rsvd, feat) \
+ __MSR_TEST(msr, #msr, val, rsvd, 0, feat)
+
+#define MSR_TEST2(msr, val, rsvd, feat, f2) \
+ ____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2, false)
+
+/*
+ * Note, use a page aligned value for the canonical value so that the value
+ * is compatible with MSRs that use bits 11:0 for things other than addresses.
+ */
+static const u64 canonical_val = 0x123456789000ull;
+
+/*
+ * Arbitrary value with bits set in every byte, but not all bits set. This is
+ * also a non-canonical value, but that's coincidental (any 64-bit value with
+ * an alternating 0s/1s pattern will be non-canonical).
+ */
+static const u64 u64_val = 0xaaaa5555aaaa5555ull;
+
+#define MSR_TEST_CANONICAL(msr, feat) \
+ __MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
+
+#define MSR_TEST_KVM(msr, val, rsvd, feat) \
+ ____MSR_TEST(KVM_REG_ ##msr, #msr, val, rsvd, 0, feat, feat, true)
+
+/*
+ * The main struct must be scoped to a function due to the use of structures to
+ * define features. For the global structure, allocate enough space for the
+ * foreseeable future without getting too ridiculous, to minimize maintenance
+ * costs (bumping the array size every time an MSR is added is really annoying).
+ */
+static struct kvm_msr msrs[128];
+static int idx;
+
+static bool ignore_unsupported_msrs;
+
+static u64 fixup_rdmsr_val(u32 msr, u64 want)
+{
+ /*
+ * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support. KVM
+ * is supposed to emulate that behavior based on guest vendor model
+ * (which is the same as the host vendor model for this test).
+ */
+ if (!host_cpu_is_amd)
+ return want;
+
+ switch (msr) {
+ case MSR_IA32_SYSENTER_ESP:
+ case MSR_IA32_SYSENTER_EIP:
+ case MSR_TSC_AUX:
+ return want & GENMASK_ULL(31, 0);
+ default:
+ return want;
+ }
+}
+
+static void __rdmsr(u32 msr, u64 want)
+{
+ u64 val;
+ u8 vec;
+
+ vec = rdmsr_safe(msr, &val);
+ __GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);
+
+ __GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",
+ want, msr, val);
+}
+
+static void __wrmsr(u32 msr, u64 val)
+{
+ u8 vec;
+
+ vec = wrmsr_safe(msr, val);
+ __GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",
+ ex_str(vec), msr, val);
+ __rdmsr(msr, fixup_rdmsr_val(msr, val));
+}
+
+static void guest_test_supported_msr(const struct kvm_msr *msr)
+{
+ __rdmsr(msr->index, msr->reset_val);
+ __wrmsr(msr->index, msr->write_val);
+ GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));
+
+ __rdmsr(msr->index, msr->reset_val);
+}
+
+static void guest_test_unsupported_msr(const struct kvm_msr *msr)
+{
+ u64 val;
+ u8 vec;
+
+ /*
+ * KVM's ABI with respect to ignore_msrs is a mess and largely beyond
+ * repair, just skip the unsupported MSR tests.
+ */
+ if (ignore_unsupported_msrs)
+ goto skip_wrmsr_gp;
+
+ /*
+ * {S,U}_CET exist if IBT or SHSTK is supported, but with bits that are
+ * writable only if their associated feature is supported. Skip the
+ * RDMSR #GP test if the secondary feature is supported, but perform
+ * the WRMSR #GP test as the to-be-written value is tied to the primary
+ * feature. For all other MSRs, simply do nothing.
+ */
+ if (this_cpu_has(msr->feature2)) {
+ if (msr->index != MSR_IA32_U_CET &&
+ msr->index != MSR_IA32_S_CET)
+ goto skip_wrmsr_gp;
+
+ goto skip_rdmsr_gp;
+ }
+
+ vec = rdmsr_safe(msr->index, &val);
+ __GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",
+ msr->index, ex_str(vec));
+
+skip_rdmsr_gp:
+ vec = wrmsr_safe(msr->index, msr->write_val);
+ __GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
+ msr->index, msr->write_val, ex_str(vec));
+
+skip_wrmsr_gp:
+ GUEST_SYNC(0);
+}
+
+void guest_test_reserved_val(const struct kvm_msr *msr)
+{
+ /* Skip reserved value checks as well, ignore_msrs is trully a mess. */
+ if (ignore_unsupported_msrs)
+ return;
+
+ /*
+ * If the CPU will truncate the written value (e.g. SYSENTER on AMD),
+ * expect success and a truncated value, not #GP.
+ */
+ if (!this_cpu_has(msr->feature) ||
+ msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
+ u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
+
+ __GUEST_ASSERT(vec == GP_VECTOR,
+ "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
+ msr->index, msr->rsvd_val, ex_str(vec));
+ } else {
+ __wrmsr(msr->index, msr->rsvd_val);
+ __wrmsr(msr->index, msr->reset_val);
+ }
+}
+
+static void guest_main(void)
+{
+ for (;;) {
+ const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];
+
+ if (this_cpu_has(msr->feature))
+ guest_test_supported_msr(msr);
+ else
+ guest_test_unsupported_msr(msr);
+
+ if (msr->rsvd_val)
+ guest_test_reserved_val(msr);
+
+ GUEST_SYNC(msr->reset_val);
+ }
+}
+
+static bool has_one_reg;
+static bool use_one_reg;
+
+#define KVM_X86_MAX_NR_REGS 1
+
+static bool vcpu_has_reg(struct kvm_vcpu *vcpu, u64 reg)
+{
+ struct {
+ struct kvm_reg_list list;
+ u64 regs[KVM_X86_MAX_NR_REGS];
+ } regs = {};
+ int r, i;
+
+ /*
+ * If KVM_GET_REG_LIST succeeds with n=0, i.e. there are no supported
+ * regs, then the vCPU obviously doesn't support the reg.
+ */
+ r = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &regs.list);
+ if (!r)
+ return false;
+
+ TEST_ASSERT_EQ(errno, E2BIG);
+
+ /*
+ * KVM x86 is expected to support enumerating a relative small number
+ * of regs. The majority of registers supported by KVM_{G,S}ET_ONE_REG
+ * are enumerated via other ioctls, e.g. KVM_GET_MSR_INDEX_LIST. For
+ * simplicity, hardcode the maximum number of regs and manually update
+ * the test as necessary.
+ */
+ TEST_ASSERT(regs.list.n <= KVM_X86_MAX_NR_REGS,
+ "KVM reports %llu regs, test expects at most %u regs, stale test?",
+ regs.list.n, KVM_X86_MAX_NR_REGS);
+
+ vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &regs.list);
+ for (i = 0; i < regs.list.n; i++) {
+ if (regs.regs[i] == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static void host_test_kvm_reg(struct kvm_vcpu *vcpu)
+{
+ bool has_reg = vcpu_cpuid_has(vcpu, msrs[idx].feature);
+ u64 reset_val = msrs[idx].reset_val;
+ u64 write_val = msrs[idx].write_val;
+ u64 rsvd_val = msrs[idx].rsvd_val;
+ u32 reg = msrs[idx].index;
+ u64 val;
+ int r;
+
+ if (!use_one_reg)
+ return;
+
+ TEST_ASSERT_EQ(vcpu_has_reg(vcpu, KVM_X86_REG_KVM(reg)), has_reg);
+
+ if (!has_reg) {
+ r = __vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg), &val);
+ TEST_ASSERT(r && errno == EINVAL,
+ "Expected failure on get_reg(0x%x)", reg);
+ rsvd_val = 0;
+ goto out;
+ }
+
+ val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));
+ TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
+ reset_val, reg, val);
+
+ vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), write_val);
+ val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));
+ TEST_ASSERT(val == write_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
+ write_val, reg, val);
+
+out:
+ r = __vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), rsvd_val);
+ TEST_ASSERT(r, "Expected failure on set_reg(0x%x, 0x%lx)", reg, rsvd_val);
+}
+
+static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)
+{
+ u64 reset_val = msrs[idx].reset_val;
+ u32 msr = msrs[idx].index;
+ u64 val;
+
+ if (!kvm_cpu_has(msrs[idx].feature))
+ return;
+
+ val = vcpu_get_msr(vcpu, msr);
+ TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
+ guest_val, msr, val);
+
+ if (use_one_reg)
+ vcpu_set_reg(vcpu, KVM_X86_REG_MSR(msr), reset_val);
+ else
+ vcpu_set_msr(vcpu, msr, reset_val);
+
+ val = vcpu_get_msr(vcpu, msr);
+ TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
+ reset_val, msr, val);
+
+ if (!has_one_reg)
+ return;
+
+ val = vcpu_get_reg(vcpu, KVM_X86_REG_MSR(msr));
+ TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
+ reset_val, msr, val);
+}
+
+static void do_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+
+ for (;;) {
+ vcpu_run(vcpu);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ host_test_msr(vcpu, uc.args[1]);
+ return;
+ case UCALL_PRINTF:
+ pr_info("%s", uc.buffer);
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ case UCALL_DONE:
+ TEST_FAIL("Unexpected UCALL_DONE");
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+ }
+}
+
+static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)
+{
+ int i;
+
+ for (i = 0; i < NR_VCPUS; i++)
+ do_vcpu_run(vcpus[i]);
+}
+
+#define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
+
+static void test_msrs(void)
+{
+ const struct kvm_msr __msrs[] = {
+ MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,
+ MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,
+ MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),
+ MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),
+
+ /*
+ * TSC_AUX is supported if RDTSCP *or* RDPID is supported. Add
+ * entries for each features so that TSC_AUX doesn't exists for
+ * the "unsupported" vCPU, and obviously to test both cases.
+ */
+ MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),
+ MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),
+
+ MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),
+ /*
+ * SYSENTER_{ESP,EIP} are technically non-canonical on Intel,
+ * but KVM doesn't emulate that behavior on emulated writes,
+ * i.e. this test will observe different behavior if the MSR
+ * writes are handed by hardware vs. KVM. KVM's behavior is
+ * intended (though far from ideal), so don't bother testing
+ * non-canonical values.
+ */
+ MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),
+ MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),
+
+ MSR_TEST_CANONICAL(MSR_FS_BASE, LM),
+ MSR_TEST_CANONICAL(MSR_GS_BASE, LM),
+ MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),
+ MSR_TEST_CANONICAL(MSR_LSTAR, LM),
+ MSR_TEST_CANONICAL(MSR_CSTAR, LM),
+ MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),
+
+ MSR_TEST2(MSR_IA32_S_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
+ MSR_TEST2(MSR_IA32_S_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
+ MSR_TEST2(MSR_IA32_U_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
+ MSR_TEST2(MSR_IA32_U_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
+ MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),
+ MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),
+ MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),
+ MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),
+ MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),
+ MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),
+ MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),
+ MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),
+
+ MSR_TEST_KVM(GUEST_SSP, canonical_val, NONCANONICAL, SHSTK),
+ };
+
+ const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE;
+ const struct kvm_x86_cpu_feature feat_lm = X86_FEATURE_LM;
+
+ /*
+ * Create three vCPUs, but run them on the same task, to validate KVM's
+ * context switching of MSR state. Don't pin the task to a pCPU to
+ * also validate KVM's handling of cross-pCPU migration. Use the full
+ * set of features for the first two vCPUs, but clear all features in
+ * third vCPU in order to test both positive and negative paths.
+ */
+ const int NR_VCPUS = 3;
+ struct kvm_vcpu *vcpus[NR_VCPUS];
+ struct kvm_vm *vm;
+ int i;
+
+ kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));
+ kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));
+ memcpy(msrs, __msrs, sizeof(__msrs));
+
+ ignore_unsupported_msrs = kvm_is_ignore_msrs();
+
+ vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);
+
+ sync_global_to_guest(vm, msrs);
+ sync_global_to_guest(vm, ignore_unsupported_msrs);
+
+ /*
+ * Clear features in the "unsupported features" vCPU. This needs to be
+ * done before the first vCPU run as KVM's ABI is that guest CPUID is
+ * immutable once the vCPU has been run.
+ */
+ for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
+ /*
+ * Don't clear LM; selftests are 64-bit only, and KVM doesn't
+ * honor LM=0 for MSRs that are supposed to exist if and only
+ * if the vCPU is a 64-bit model. Ditto for NONE; clearing a
+ * fake feature flag will result in false failures.
+ */
+ if (memcmp(&msrs[idx].feature, &feat_lm, sizeof(feat_lm)) &&
+ memcmp(&msrs[idx].feature, &feat_none, sizeof(feat_none)))
+ vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature);
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
+ struct kvm_msr *msr = &msrs[idx];
+
+ if (msr->is_kvm_defined) {
+ for (i = 0; i < NR_VCPUS; i++)
+ host_test_kvm_reg(vcpus[i]);
+ continue;
+ }
+
+ /*
+ * Verify KVM_GET_SUPPORTED_CPUID and KVM_GET_MSR_INDEX_LIST
+ * are consistent with respect to MSRs whose existence is
+ * enumerated via CPUID. Skip the check for FS/GS.base MSRs,
+ * as they aren't reported in the save/restore list since their
+ * state is managed via SREGS.
+ */
+ TEST_ASSERT(msr->index == MSR_FS_BASE || msr->index == MSR_GS_BASE ||
+ kvm_msr_is_in_save_restore_list(msr->index) ==
+ (kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)),
+ "%s %s in save/restore list, but %s according to CPUID", msr->name,
+ kvm_msr_is_in_save_restore_list(msr->index) ? "is" : "isn't",
+ (kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)) ?
+ "supported" : "unsupported");
+
+ sync_global_to_guest(vm, idx);
+
+ vcpus_run(vcpus, NR_VCPUS);
+ vcpus_run(vcpus, NR_VCPUS);
+ }
+
+ kvm_vm_free(vm);
+}
+
+int main(void)
+{
+ has_one_reg = kvm_has_cap(KVM_CAP_ONE_REG);
+
+ test_msrs();
+
+ if (has_one_reg) {
+ use_one_reg = true;
+ test_msrs();
+ }
+}
diff --git a/tools/testing/selftests/kvm/x86/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
index dad988351493..f001cb836bfa 100644
--- a/tools/testing/selftests/kvm/x86/vmx_close_while_nested_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * vmx_close_while_nested
- *
* Copyright (C) 2019, Red Hat, Inc.
*
* Verify that nothing bad happens if a KVM user exits with open
@@ -12,6 +10,7 @@
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
+#include "svm_util.h"
#include <string.h>
#include <sys/ioctl.h>
@@ -22,6 +21,8 @@ enum {
PORT_L0_EXIT = 0x2000,
};
+#define L2_GUEST_STACK_SIZE 64
+
static void l2_guest_code(void)
{
/* Exit to L0 */
@@ -29,9 +30,8 @@ static void l2_guest_code(void)
: : [port] "d" (PORT_L0_EXIT) : "rax");
}
-static void l1_guest_code(struct vmx_pages *vmx_pages)
+static void l1_vmx_code(struct vmx_pages *vmx_pages)
{
-#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
@@ -45,19 +45,43 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(0);
}
+static void l1_svm_code(struct svm_test_data *svm)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ /* Prepare the VMCB for L2 execution. */
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(0);
+}
+
+static void l1_guest_code(void *data)
+{
+ if (this_cpu_has(X86_FEATURE_VMX))
+ l1_vmx_code(data);
+ else
+ l1_svm_code(data);
+}
+
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva;
+ vm_vaddr_t guest_gva;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
+ kvm_cpu_has(X86_FEATURE_SVM));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
- /* Allocate VMX pages and shared descriptors (vmx_pages). */
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ if (kvm_cpu_has(X86_FEATURE_VMX))
+ vcpu_alloc_vmx(vm, &guest_gva);
+ else
+ vcpu_alloc_svm(vm, &guest_gva);
+
+ vcpu_args_set(vcpu, 1, guest_gva);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
diff --git a/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c b/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c
new file mode 100644
index 000000000000..a6b6da9cf7fe
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/nested_invalid_cr3_test.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, Google LLC.
+ *
+ * This test verifies that L1 fails to enter L2 with an invalid CR3, and
+ * succeeds otherwise.
+ */
+#include "kvm_util.h"
+#include "vmx.h"
+#include "svm_util.h"
+#include "kselftest.h"
+
+
+#define L2_GUEST_STACK_SIZE 64
+
+static void l2_guest_code(void)
+{
+ vmcall();
+}
+
+static void l1_svm_code(struct svm_test_data *svm)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ uintptr_t save_cr3;
+
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /* Try to run L2 with invalid CR3 and make sure it fails */
+ save_cr3 = svm->vmcb->save.cr3;
+ svm->vmcb->save.cr3 = -1ull;
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_ERR);
+
+ /* Now restore CR3 and make sure L2 runs successfully */
+ svm->vmcb->save.cr3 = save_cr3;
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+
+ GUEST_DONE();
+}
+
+static void l1_vmx_code(struct vmx_pages *vmx_pages)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ uintptr_t save_cr3;
+
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /* Try to run L2 with invalid CR3 and make sure it fails */
+ save_cr3 = vmreadz(GUEST_CR3);
+ vmwrite(GUEST_CR3, -1ull);
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
+ (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
+
+ /* Now restore CR3 and make sure L2 runs successfully */
+ vmwrite(GUEST_CR3, save_cr3);
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+
+ GUEST_DONE();
+}
+
+static void l1_guest_code(void *data)
+{
+ if (this_cpu_has(X86_FEATURE_VMX))
+ l1_vmx_code(data);
+ else
+ l1_svm_code(data);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ vm_vaddr_t guest_gva = 0;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
+ kvm_cpu_has(X86_FEATURE_SVM));
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+
+ if (kvm_cpu_has(X86_FEATURE_VMX))
+ vcpu_alloc_vmx(vm, &guest_gva);
+ else
+ vcpu_alloc_svm(vm, &guest_gva);
+
+ vcpu_args_set(vcpu, 1, guest_gva);
+
+ for (;;) {
+ struct ucall uc;
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ case UCALL_SYNC:
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+done:
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
index 2ceb5c78c442..2839f650e5c9 100644
--- a/tools/testing/selftests/kvm/x86/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * vmx_tsc_adjust_test
- *
* Copyright (C) 2018, Google LLC.
*
* IA32_TSC_ADJUST test
@@ -22,6 +20,7 @@
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
+#include "svm_util.h"
#include <string.h>
#include <sys/ioctl.h>
@@ -35,6 +34,8 @@
#define TSC_ADJUST_VALUE (1ll << 32)
#define TSC_OFFSET_VALUE -(1ll << 48)
+#define L2_GUEST_STACK_SIZE 64
+
enum {
PORT_ABORT = 0x1000,
PORT_REPORT,
@@ -72,42 +73,47 @@ static void l2_guest_code(void)
__asm__ __volatile__("vmcall");
}
-static void l1_guest_code(struct vmx_pages *vmx_pages)
+static void l1_guest_code(void *data)
{
-#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- uint32_t control;
- uintptr_t save_cr3;
+ /* Set TSC from L1 and make sure TSC_ADJUST is updated correctly */
GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
- GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
- GUEST_ASSERT(load_vmcs(vmx_pages));
-
- /* Prepare the VMCS for L2 execution. */
- prepare_vmcs(vmx_pages, l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
- control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
- control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
- vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
- vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
-
- /* Jump into L2. First, test failure to load guest CR3. */
- save_cr3 = vmreadz(GUEST_CR3);
- vmwrite(GUEST_CR3, -1ull);
- GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
- (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
- check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
- vmwrite(GUEST_CR3, save_cr3);
-
- GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+ /*
+ * Run L2 with TSC_OFFSET. L2 will write to TSC, and L1 is not
+ * intercepting the write so it should update L1's TSC_ADJUST.
+ */
+ if (this_cpu_has(X86_FEATURE_VMX)) {
+ struct vmx_pages *vmx_pages = data;
+ uint32_t control;
+
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
+ control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
+ vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
+ vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
+
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+ } else {
+ struct svm_test_data *svm = data;
+
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ svm->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+ }
check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
-
GUEST_DONE();
}
@@ -119,16 +125,19 @@ static void report(int64_t val)
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_gva;
+ vm_vaddr_t nested_gva;
struct kvm_vcpu *vcpu;
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
+ kvm_cpu_has(X86_FEATURE_SVM));
- vm = vm_create_with_one_vcpu(&vcpu, (void *) l1_guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+ if (kvm_cpu_has(X86_FEATURE_VMX))
+ vcpu_alloc_vmx(vm, &nested_gva);
+ else
+ vcpu_alloc_svm(vm, &nested_gva);
- /* Allocate VMX pages and shared descriptors (vmx_pages). */
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, nested_gva);
for (;;) {
struct ucall uc;
diff --git a/tools/testing/selftests/kvm/x86/vmx_nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c
index 1759fa5cb3f2..4260c9e4f489 100644
--- a/tools/testing/selftests/kvm/x86/vmx_nested_tsc_scaling_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_tsc_scaling_test.c
@@ -13,6 +13,7 @@
#include "kvm_util.h"
#include "vmx.h"
+#include "svm_util.h"
#include "kselftest.h"
/* L2 is scaled up (from L1's perspective) by this factor */
@@ -79,7 +80,30 @@ static void l2_guest_code(void)
__asm__ __volatile__("vmcall");
}
-static void l1_guest_code(struct vmx_pages *vmx_pages)
+static void l1_svm_code(struct svm_test_data *svm)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ /* check that L1's frequency looks alright before launching L2 */
+ check_tsc_freq(UCHECK_L1);
+
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /* enable TSC scaling for L2 */
+ wrmsr(MSR_AMD64_TSC_RATIO, L2_SCALE_FACTOR << 32);
+
+ /* launch L2 */
+ run_guest(svm->vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+
+ /* check that L1's frequency still looks good */
+ check_tsc_freq(UCHECK_L1);
+
+ GUEST_DONE();
+}
+
+static void l1_vmx_code(struct vmx_pages *vmx_pages)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
uint32_t control;
@@ -116,11 +140,19 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
+static void l1_guest_code(void *data)
+{
+ if (this_cpu_has(X86_FEATURE_VMX))
+ l1_vmx_code(data);
+ else
+ l1_svm_code(data);
+}
+
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- vm_vaddr_t vmx_pages_gva;
+ vm_vaddr_t guest_gva = 0;
uint64_t tsc_start, tsc_end;
uint64_t tsc_khz;
@@ -129,7 +161,8 @@ int main(int argc, char *argv[])
uint64_t l1_tsc_freq = 0;
uint64_t l2_tsc_freq = 0;
- TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
+ kvm_cpu_has(X86_FEATURE_SVM));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
@@ -152,8 +185,13 @@ int main(int argc, char *argv[])
printf("real TSC frequency is around: %"PRIu64"\n", l0_tsc_freq);
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vcpu, 1, vmx_pages_gva);
+
+ if (kvm_cpu_has(X86_FEATURE_VMX))
+ vcpu_alloc_vmx(vm, &guest_gva);
+ else
+ vcpu_alloc_svm(vm, &guest_gva);
+
+ vcpu_args_set(vcpu, 1, guest_gva);
tsc_khz = __vcpu_ioctl(vcpu, KVM_GET_TSC_KHZ, NULL);
TEST_ASSERT(tsc_khz != -1, "vcpu ioctl KVM_GET_TSC_KHZ failed");
diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
index 8aaaf25b6111..3eaa216b96c0 100644
--- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
@@ -14,10 +14,10 @@
#define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS)
/*
- * Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE,
- * 1 LOOP.
+ * Number of instructions in each loop. 1 ENTER, 1 CLFLUSH/CLFLUSHOPT/NOP,
+ * 1 MFENCE, 1 MOV, 1 LEAVE, 1 LOOP.
*/
-#define NUM_INSNS_PER_LOOP 4
+#define NUM_INSNS_PER_LOOP 6
/*
* Number of "extra" instructions that will be counted, i.e. the number of
@@ -75,6 +75,11 @@ static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx)
[INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
[INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
[INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
+ [INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BE_BOUND, X86_PMU_FEATURE_NULL },
+ [INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BAD_SPEC, X86_PMU_FEATURE_NULL },
+ [INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_FE_BOUND, X86_PMU_FEATURE_NULL },
+ [INTEL_ARCH_TOPDOWN_RETIRING_INDEX] = { X86_PMU_FEATURE_TOPDOWN_RETIRING, X86_PMU_FEATURE_NULL },
+ [INTEL_ARCH_LBR_INSERTS_INDEX] = { X86_PMU_FEATURE_LBR_INSERTS, X86_PMU_FEATURE_NULL },
};
kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS);
@@ -158,10 +163,18 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr
switch (idx) {
case INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX:
- GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
+ /* Relax precise count check due to VM-EXIT/VM-ENTRY overcount issue */
+ if (this_pmu_has_errata(INSTRUCTIONS_RETIRED_OVERCOUNT))
+ GUEST_ASSERT(count >= NUM_INSNS_RETIRED);
+ else
+ GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
break;
case INTEL_ARCH_BRANCHES_RETIRED_INDEX:
- GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED);
+ /* Relax precise count check due to VM-EXIT/VM-ENTRY overcount issue */
+ if (this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT))
+ GUEST_ASSERT(count >= NUM_BRANCH_INSNS_RETIRED);
+ else
+ GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED);
break;
case INTEL_ARCH_LLC_REFERENCES_INDEX:
case INTEL_ARCH_LLC_MISSES_INDEX:
@@ -171,9 +184,12 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr
fallthrough;
case INTEL_ARCH_CPU_CYCLES_INDEX:
case INTEL_ARCH_REFERENCE_CYCLES_INDEX:
+ case INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX:
+ case INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX:
GUEST_ASSERT_NE(count, 0);
break;
case INTEL_ARCH_TOPDOWN_SLOTS_INDEX:
+ case INTEL_ARCH_TOPDOWN_RETIRING_INDEX:
__GUEST_ASSERT(count >= NUM_INSNS_RETIRED,
"Expected top-down slots >= %u, got count = %lu",
NUM_INSNS_RETIRED, count);
@@ -210,9 +226,11 @@ do { \
__asm__ __volatile__("wrmsr\n\t" \
" mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \
"1:\n\t" \
+ FEP "enter $0, $0\n\t" \
clflush "\n\t" \
"mfence\n\t" \
"mov %[m], %%eax\n\t" \
+ FEP "leave\n\t" \
FEP "loop 1b\n\t" \
FEP "mov %%edi, %%ecx\n\t" \
FEP "xor %%eax, %%eax\n\t" \
@@ -311,7 +329,7 @@ static void guest_test_arch_events(void)
}
static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
- uint8_t length, uint8_t unavailable_mask)
+ uint8_t length, uint32_t unavailable_mask)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
@@ -320,6 +338,9 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
if (!pmu_version)
return;
+ unavailable_mask &= GENMASK(X86_PROPERTY_PMU_EVENTS_MASK.hi_bit,
+ X86_PROPERTY_PMU_EVENTS_MASK.lo_bit);
+
vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events,
pmu_version, perf_capabilities);
@@ -344,8 +365,8 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
#define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector) \
__GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \
- "Expected %s on " #insn "(0x%x), got vector %u", \
- expect_gp ? "#GP" : "no fault", msr, vector) \
+ "Expected %s on " #insn "(0x%x), got %s", \
+ expect_gp ? "#GP" : "no fault", msr, ex_str(vector)) \
#define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected) \
__GUEST_ASSERT(val == expected, \
@@ -575,6 +596,26 @@ static void test_intel_counters(void)
};
/*
+ * To keep the total runtime reasonable, test only a handful of select,
+ * semi-arbitrary values for the mask of unavailable PMU events. Test
+ * 0 (all events available) and all ones (no events available) as well
+ * as alternating bit sequencues, e.g. to detect if KVM is checking the
+ * wrong bit(s).
+ */
+ const uint32_t unavailable_masks[] = {
+ 0x0,
+ 0xffffffffu,
+ 0xaaaaaaaau,
+ 0x55555555u,
+ 0xf0f0f0f0u,
+ 0x0f0f0f0fu,
+ 0xa0a0a0a0u,
+ 0x0a0a0a0au,
+ 0x50505050u,
+ 0x05050505u,
+ };
+
+ /*
* Test up to PMU v5, which is the current maximum version defined by
* Intel, i.e. is the last version that is guaranteed to be backwards
* compatible with KVM's existing behavior.
@@ -611,16 +652,7 @@ static void test_intel_counters(void)
pr_info("Testing arch events, PMU version %u, perf_caps = %lx\n",
v, perf_caps[i]);
- /*
- * To keep the total runtime reasonable, test every
- * possible non-zero, non-reserved bitmap combination
- * only with the native PMU version and the full bit
- * vector length.
- */
- if (v == pmu_version) {
- for (k = 1; k < (BIT(NR_INTEL_ARCH_EVENTS) - 1); k++)
- test_arch_events(v, perf_caps[i], NR_INTEL_ARCH_EVENTS, k);
- }
+
/*
* Test single bits for all PMU version and lengths up
* the number of events +1 (to verify KVM doesn't do
@@ -629,11 +661,8 @@ static void test_intel_counters(void)
* ones i.e. all events being available and unavailable.
*/
for (j = 0; j <= NR_INTEL_ARCH_EVENTS + 1; j++) {
- test_arch_events(v, perf_caps[i], j, 0);
- test_arch_events(v, perf_caps[i], j, 0xff);
-
- for (k = 0; k < NR_INTEL_ARCH_EVENTS; k++)
- test_arch_events(v, perf_caps[i], j, BIT(k));
+ for (k = 1; k < ARRAY_SIZE(unavailable_masks); k++)
+ test_arch_events(v, perf_caps[i], j, unavailable_masks[k]);
}
pr_info("Testing GP counters, PMU version %u, perf_caps = %lx\n",
diff --git a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
index c15513cd74d1..1c5b7611db24 100644
--- a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c
@@ -214,8 +214,10 @@ static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
do { \
uint64_t br = pmc_results.branches_retired; \
uint64_t ir = pmc_results.instructions_retired; \
+ bool br_matched = this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT) ? \
+ br >= NUM_BRANCHES : br == NUM_BRANCHES; \
\
- if (br && br != NUM_BRANCHES) \
+ if (br && !br_matched) \
pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
__func__, br, NUM_BRANCHES); \
TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \
diff --git a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
index 82a8d88b5338..1969f4ab9b28 100644
--- a/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
+++ b/tools/testing/selftests/kvm/x86/private_mem_conversions_test.c
@@ -380,7 +380,7 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
pthread_t threads[KVM_MAX_VCPUS];
struct kvm_vm *vm;
- int memfd, i, r;
+ int memfd, i;
const struct vm_shape shape = {
.mode = VM_MODE_DEFAULT,
@@ -428,11 +428,8 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t
* should prevent the VM from being fully destroyed until the last
* reference to the guest_memfd is also put.
*/
- r = fallocate(memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, memfd_size);
- TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
-
- r = fallocate(memfd, FALLOC_FL_KEEP_SIZE, 0, memfd_size);
- TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
+ kvm_fallocate(memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, memfd_size);
+ kvm_fallocate(memfd, FALLOC_FL_KEEP_SIZE, 0, memfd_size);
close(memfd);
}
diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
index 77256c89bb8d..86ad1c7d068f 100644
--- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c
+++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
@@ -104,7 +104,7 @@ static void test_sync_vmsa(uint32_t type, uint64_t policy)
vm_sev_launch(vm, policy, NULL);
/* This page is shared, so make it decrypted. */
- memset(hva, 0, 4096);
+ memset(hva, 0, PAGE_SIZE);
vcpu_run(vcpu);
diff --git a/tools/testing/selftests/kvm/x86/state_test.c b/tools/testing/selftests/kvm/x86/state_test.c
index 141b7fc0c965..f2c7a1c297e3 100644
--- a/tools/testing/selftests/kvm/x86/state_test.c
+++ b/tools/testing/selftests/kvm/x86/state_test.c
@@ -141,7 +141,7 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
if (this_cpu_has(X86_FEATURE_XSAVE)) {
uint64_t supported_xcr0 = this_cpu_supported_xcr0();
- uint8_t buffer[4096];
+ uint8_t buffer[PAGE_SIZE];
memset(buffer, 0xcc, sizeof(buffer));
diff --git a/tools/testing/selftests/kvm/x86/userspace_io_test.c b/tools/testing/selftests/kvm/x86/userspace_io_test.c
index 9481cbcf284f..be7d72f3c029 100644
--- a/tools/testing/selftests/kvm/x86/userspace_io_test.c
+++ b/tools/testing/selftests/kvm/x86/userspace_io_test.c
@@ -85,7 +85,7 @@ int main(int argc, char *argv[])
regs.rcx = 1;
if (regs.rcx == 3)
regs.rcx = 8192;
- memset((void *)run + run->io.data_offset, 0xaa, 4096);
+ memset((void *)run + run->io.data_offset, 0xaa, PAGE_SIZE);
vcpu_regs_set(vcpu, &regs);
}
diff --git a/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c
index 32b2794b78fe..8463a9956410 100644
--- a/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c
+++ b/tools/testing/selftests/kvm/x86/userspace_msr_exit_test.c
@@ -343,6 +343,12 @@ static void guest_code_permission_bitmap(void)
data = test_rdmsr(MSR_GS_BASE);
GUEST_ASSERT(data == MSR_GS_BASE);
+ /* Access the MSRs again to ensure KVM has disabled interception.*/
+ data = test_rdmsr(MSR_FS_BASE);
+ GUEST_ASSERT(data != MSR_FS_BASE);
+ data = test_rdmsr(MSR_GS_BASE);
+ GUEST_ASSERT(data != MSR_GS_BASE);
+
GUEST_DONE();
}
@@ -682,6 +688,8 @@ KVM_ONE_VCPU_TEST(user_msr, msr_permission_bitmap, guest_code_permission_bitmap)
"Expected ucall state to be UCALL_SYNC.");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE);
+
+ vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
run_guest_then_process_ucall_done(vcpu);
}
diff --git a/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c
index fa512d033205..98cb6bdab3e6 100644
--- a/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c
@@ -120,17 +120,17 @@ static void test_vmx_dirty_log(bool enable_ept)
* GPAs as the EPT enabled case.
*/
if (enable_ept) {
- prepare_eptp(vmx, vm, 0);
+ prepare_eptp(vmx, vm);
nested_map_memslot(vmx, vm, 0);
- nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
- nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
+ nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE);
+ nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE);
}
bmap = bitmap_zalloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) {
- memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
+ memset(host_test_mem, 0xaa, TEST_MEM_PAGES * PAGE_SIZE);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
@@ -153,9 +153,9 @@ static void test_vmx_dirty_log(bool enable_ept)
}
TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty");
- TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest");
+ TEST_ASSERT(host_test_mem[PAGE_SIZE / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest");
TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty");
- TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest");
+ TEST_ASSERT(host_test_mem[PAGE_SIZE*2 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest");
break;
case UCALL_DONE:
done = true;
diff --git a/tools/testing/selftests/kvm/x86/vmx_exception_with_invalid_guest_state.c b/tools/testing/selftests/kvm/x86/vmx_exception_with_invalid_guest_state.c
index 3fd6eceab46f..2cae86d9d5e2 100644
--- a/tools/testing/selftests/kvm/x86/vmx_exception_with_invalid_guest_state.c
+++ b/tools/testing/selftests/kvm/x86/vmx_exception_with_invalid_guest_state.c
@@ -110,7 +110,7 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
TEST_REQUIRE(host_cpu_is_intel);
- TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
+ TEST_REQUIRE(!kvm_is_unrestricted_guest_enabled());
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
get_set_sigalrm_vcpu(vcpu);
diff --git a/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c b/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c
new file mode 100644
index 000000000000..cf1d2d1f2a8f
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/vmx_nested_la57_state_test.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, Google LLC.
+ *
+ * Test KVM's ability to save and restore nested state when the L1 guest
+ * is using 5-level paging and the L2 guest is using 4-level paging.
+ *
+ * This test would have failed prior to commit 9245fd6b8531 ("KVM: x86:
+ * model canonical checks more precisely").
+ */
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#define LA57_GS_BASE 0xff2bc0311fb00000ull
+
+static void l2_guest_code(void)
+{
+ /*
+ * Sync with L0 to trigger save/restore. After
+ * resuming, execute VMCALL to exit back to L1.
+ */
+ GUEST_SYNC(1);
+ vmcall();
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ u64 guest_cr4;
+ vm_paddr_t pml5_pa, pml4_pa;
+ u64 *pml5;
+ u64 exit_reason;
+
+ /* Set GS_BASE to a value that is only canonical with LA57. */
+ wrmsr(MSR_GS_BASE, LA57_GS_BASE);
+ GUEST_ASSERT(rdmsr(MSR_GS_BASE) == LA57_GS_BASE);
+
+ GUEST_ASSERT(vmx_pages->vmcs_gpa);
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /*
+ * Set up L2 with a 4-level page table by pointing its CR3 to
+ * L1's first PML4 table and clearing CR4.LA57. This creates
+ * the CR4.LA57 mismatch that exercises the bug.
+ */
+ pml5_pa = get_cr3() & PHYSICAL_PAGE_MASK;
+ pml5 = (u64 *)pml5_pa;
+ pml4_pa = pml5[0] & PHYSICAL_PAGE_MASK;
+ vmwrite(GUEST_CR3, pml4_pa);
+
+ guest_cr4 = vmreadz(GUEST_CR4);
+ guest_cr4 &= ~X86_CR4_LA57;
+ vmwrite(GUEST_CR4, guest_cr4);
+
+ GUEST_ASSERT(!vmlaunch());
+
+ exit_reason = vmreadz(VM_EXIT_REASON);
+ GUEST_ASSERT(exit_reason == EXIT_REASON_VMCALL);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+ l1_guest_code(vmx_pages);
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t vmx_pages_gva = 0;
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ struct kvm_x86_state *state;
+ struct ucall uc;
+ int stage;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_LA57));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ /*
+ * L1 needs to read its own PML5 table to set up L2. Identity map
+ * the PML5 table to facilitate this.
+ */
+ virt_map(vm, vm->pgd, vm->pgd, 1);
+
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
+
+ for (stage = 1;; stage++) {
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ /* NOT REACHED */
+ case UCALL_SYNC:
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+
+ TEST_ASSERT(uc.args[1] == stage,
+ "Expected stage %d, got stage %lu", stage, (ulong)uc.args[1]);
+ if (stage == 1) {
+ pr_info("L2 is active; performing save/restore.\n");
+ state = vcpu_save_state(vcpu);
+
+ kvm_vm_release(vm);
+
+ /* Restore state in a new VM. */
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_load_state(vcpu, state);
+ kvm_x86_state_cleanup(state);
+ }
+ }
+
+done:
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c
index a1f5ff45d518..7ff6f62e20a3 100644
--- a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c
+++ b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c
@@ -29,7 +29,7 @@ static union perf_capabilities {
u64 pebs_baseline:1;
u64 perf_metrics:1;
u64 pebs_output_pt_available:1;
- u64 anythread_deprecated:1;
+ u64 pebs_timing_info:1;
};
u64 capabilities;
} host_cap;
@@ -44,6 +44,7 @@ static const union perf_capabilities immutable_caps = {
.pebs_arch_reg = 1,
.pebs_format = -1,
.pebs_baseline = 1,
+ .pebs_timing_info = 1,
};
static const union perf_capabilities format_caps = {
@@ -56,8 +57,8 @@ static void guest_test_perf_capabilities_gp(uint64_t val)
uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
__GUEST_ASSERT(vector == GP_VECTOR,
- "Expected #GP for value '0x%lx', got vector '0x%x'",
- val, vector);
+ "Expected #GP for value '0x%lx', got %s",
+ val, ex_str(vector));
}
static void guest_code(uint64_t current_val)
diff --git a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
index 35cb9de54a82..ae4a4b6c05ca 100644
--- a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
+++ b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
@@ -256,7 +256,7 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
int nodes = 0;
time_t start_time, last_update, now;
time_t interval_secs = 1;
- int i, r;
+ int i;
int from, to;
unsigned long bit;
uint64_t hlt_count;
@@ -267,9 +267,8 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
delay_usecs);
/* Get set of first 64 numa nodes available */
- r = get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8,
+ kvm_get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8,
0, MPOL_F_MEMS_ALLOWED);
- TEST_ASSERT(r == 0, "get_mempolicy failed errno=%d", errno);
fprintf(stderr, "Numa nodes found amongst first %lu possible nodes "
"(each 1-bit indicates node is present): %#lx\n",
diff --git a/tools/testing/selftests/kvm/x86/xapic_state_test.c b/tools/testing/selftests/kvm/x86/xapic_state_test.c
index fdebff1165c7..3b4814c55722 100644
--- a/tools/testing/selftests/kvm/x86/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86/xapic_state_test.c
@@ -120,8 +120,8 @@ static void test_icr(struct xapic_vcpu *x)
__test_icr(x, icr | i);
/*
- * Send all flavors of IPIs to non-existent vCPUs. TODO: use number of
- * vCPUs, not vcpu.id + 1. Arbitrarily use vector 0xff.
+ * Send all flavors of IPIs to non-existent vCPUs. Arbitrarily use
+ * vector 0xff.
*/
icr = APIC_INT_ASSERT | 0xff;
for (i = 0; i < 0xff; i++) {
diff --git a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c
index c8a5c5e51661..d038c1571729 100644
--- a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c
@@ -81,13 +81,13 @@ static void guest_code(void)
vector = xsetbv_safe(0, XFEATURE_MASK_FP);
__GUEST_ASSERT(!vector,
- "Expected success on XSETBV(FP), got vector '0x%x'",
- vector);
+ "Expected success on XSETBV(FP), got %s",
+ ex_str(vector));
vector = xsetbv_safe(0, supported_xcr0);
__GUEST_ASSERT(!vector,
- "Expected success on XSETBV(0x%lx), got vector '0x%x'",
- supported_xcr0, vector);
+ "Expected success on XSETBV(0x%lx), got %s",
+ supported_xcr0, ex_str(vector));
for (i = 0; i < 64; i++) {
if (supported_xcr0 & BIT_ULL(i))
@@ -95,8 +95,8 @@ static void guest_code(void)
vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
__GUEST_ASSERT(vector == GP_VECTOR,
- "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got vector '0x%x'",
- BIT_ULL(i), supported_xcr0, vector);
+ "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got %s",
+ BIT_ULL(i), supported_xcr0, ex_str(vector));
}
GUEST_DONE();
diff --git a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
index 287829f850f7..23909b501ac2 100644
--- a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
@@ -547,15 +547,9 @@ int main(int argc, char *argv[])
int irq_fd[2] = { -1, -1 };
if (do_eventfd_tests) {
- irq_fd[0] = eventfd(0, 0);
- irq_fd[1] = eventfd(0, 0);
+ irq_fd[0] = kvm_new_eventfd();
+ irq_fd[1] = kvm_new_eventfd();
- /* Unexpected, but not a KVM failure */
- if (irq_fd[0] == -1 || irq_fd[1] == -1)
- do_evtchn_tests = do_eventfd_tests = false;
- }
-
- if (do_eventfd_tests) {
irq_routes.info.nr = 2;
irq_routes.entries[0].gsi = 32;
@@ -572,15 +566,8 @@ int main(int argc, char *argv[])
vm_ioctl(vm, KVM_SET_GSI_ROUTING, &irq_routes.info);
- struct kvm_irqfd ifd = { };
-
- ifd.fd = irq_fd[0];
- ifd.gsi = 32;
- vm_ioctl(vm, KVM_IRQFD, &ifd);
-
- ifd.fd = irq_fd[1];
- ifd.gsi = 33;
- vm_ioctl(vm, KVM_IRQFD, &ifd);
+ kvm_assign_irqfd(vm, 32, irq_fd[0]);
+ kvm_assign_irqfd(vm, 33, irq_fd[1]);
struct sigaction sa = { };
sa.sa_handler = handle_alrm;
diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile
index a3f449914bf9..044b83bde16e 100644
--- a/tools/testing/selftests/landlock/Makefile
+++ b/tools/testing/selftests/landlock/Makefile
@@ -4,7 +4,7 @@
CFLAGS += -Wall -O2 $(KHDR_INCLUDES)
-LOCAL_HDRS += common.h
+LOCAL_HDRS += $(wildcard *.h)
src_test := $(wildcard *_test.c)
diff --git a/tools/testing/selftests/landlock/audit.h b/tools/testing/selftests/landlock/audit.h
index 18a6014920b5..44eb433e9666 100644
--- a/tools/testing/selftests/landlock/audit.h
+++ b/tools/testing/selftests/landlock/audit.h
@@ -20,14 +20,12 @@
#include <sys/time.h>
#include <unistd.h>
+#include "kselftest.h"
+
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
-#ifndef __maybe_unused
-#define __maybe_unused __attribute__((__unused__))
-#endif
-
#define REGEX_LANDLOCK_PREFIX "^audit([0-9.:]\\+): domain=\\([0-9a-f]\\+\\)"
struct audit_filter {
@@ -403,11 +401,12 @@ static int audit_init_filter_exe(struct audit_filter *filter, const char *path)
/* It is assume that there is not already filtering rules. */
filter->record_type = AUDIT_EXE;
if (!path) {
- filter->exe_len = readlink("/proc/self/exe", filter->exe,
- sizeof(filter->exe) - 1);
- if (filter->exe_len < 0)
+ int ret = readlink("/proc/self/exe", filter->exe,
+ sizeof(filter->exe) - 1);
+ if (ret < 0)
return -errno;
+ filter->exe_len = ret;
return 0;
}
diff --git a/tools/testing/selftests/landlock/audit_test.c b/tools/testing/selftests/landlock/audit_test.c
index cfc571afd0eb..46d02d49835a 100644
--- a/tools/testing/selftests/landlock/audit_test.c
+++ b/tools/testing/selftests/landlock/audit_test.c
@@ -7,6 +7,7 @@
#define _GNU_SOURCE
#include <errno.h>
+#include <fcntl.h>
#include <limits.h>
#include <linux/landlock.h>
#include <pthread.h>
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
index 88a3c78f5d98..230b75f6015b 100644
--- a/tools/testing/selftests/landlock/common.h
+++ b/tools/testing/selftests/landlock/common.h
@@ -17,15 +17,11 @@
#include <sys/wait.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "wrappers.h"
#define TMP_DIR "tmp"
-#ifndef __maybe_unused
-#define __maybe_unused __attribute__((__unused__))
-#endif
-
/* TEST_F_FORK() should not be used for new tests. */
#define TEST_F_FORK(fixture_name, test_name) TEST_F(fixture_name, test_name)
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 73729382d40f..eee814e09dd7 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -1832,6 +1832,46 @@ TEST_F_FORK(layout1, release_inodes)
ASSERT_EQ(ENOENT, test_open(dir_s3d3, O_RDONLY));
}
+/*
+ * This test checks that a rule on a directory used as a mount point does not
+ * grant access to the mount covering it. It is a generalization of the bind
+ * mount case in layout3_fs.hostfs.release_inodes that tests hidden mount points.
+ */
+TEST_F_FORK(layout1, covered_rule)
+{
+ const struct rule layer1[] = {
+ {
+ .path = dir_s3d2,
+ .access = LANDLOCK_ACCESS_FS_READ_DIR,
+ },
+ {},
+ };
+ int ruleset_fd;
+
+ /* Unmount to simplify FIXTURE_TEARDOWN. */
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, umount(dir_s3d2));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ /* Creates a ruleset with the future hidden directory. */
+ ruleset_fd =
+ create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_DIR, layer1);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Covers with a new mount point. */
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, mount_opt(&mnt_tmp, dir_s3d2));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /* Checks that access to the new mount point is denied. */
+ ASSERT_EQ(EACCES, test_open(dir_s3d2, O_RDONLY));
+}
+
enum relative_access {
REL_OPEN,
REL_CHDIR,
@@ -2227,6 +2267,22 @@ static int test_exchange(const char *const oldpath, const char *const newpath)
return 0;
}
+static int test_renameat(int olddirfd, const char *oldpath, int newdirfd,
+ const char *newpath)
+{
+ if (renameat2(olddirfd, oldpath, newdirfd, newpath, 0))
+ return errno;
+ return 0;
+}
+
+static int test_exchangeat(int olddirfd, const char *oldpath, int newdirfd,
+ const char *newpath)
+{
+ if (renameat2(olddirfd, oldpath, newdirfd, newpath, RENAME_EXCHANGE))
+ return errno;
+ return 0;
+}
+
TEST_F_FORK(layout1, rename_file)
{
const struct rule rules[] = {
@@ -4521,6 +4577,18 @@ TEST_F_FORK(ioctl, handle_file_access_file)
FIXTURE(layout1_bind) {};
/* clang-format on */
+static const char bind_dir_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3";
+static const char bind_file1_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3/f1";
+
+/* Move targets for disconnected path tests. */
+static const char dir_s4d1[] = TMP_DIR "/s4d1";
+static const char file1_s4d1[] = TMP_DIR "/s4d1/f1";
+static const char file2_s4d1[] = TMP_DIR "/s4d1/f2";
+static const char dir_s4d2[] = TMP_DIR "/s4d1/s4d2";
+static const char file1_s4d2[] = TMP_DIR "/s4d1/s4d2/f1";
+static const char file1_name[] = "f1";
+static const char file2_name[] = "f2";
+
FIXTURE_SETUP(layout1_bind)
{
prepare_layout(_metadata);
@@ -4536,14 +4604,14 @@ FIXTURE_TEARDOWN_PARENT(layout1_bind)
{
/* umount(dir_s2d2)) is handled by namespace lifetime. */
+ remove_path(file1_s4d1);
+ remove_path(file2_s4d1);
+
remove_layout1(_metadata);
cleanup_layout(_metadata);
}
-static const char bind_dir_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3";
-static const char bind_file1_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3/f1";
-
/*
* layout1_bind hierarchy:
*
@@ -4554,20 +4622,25 @@ static const char bind_file1_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3/f1";
* │   └── s1d2
* │   ├── f1
* │   ├── f2
- * │   └── s1d3
+ * │   └── s1d3 [disconnected by path_disconnected]
* │   ├── f1
* │   └── f2
* ├── s2d1
* │   ├── f1
- * │   └── s2d2
+ * │   └── s2d2 [bind mount from s1d2]
* │   ├── f1
* │   ├── f2
* │   └── s1d3
* │   ├── f1
* │   └── f2
- * └── s3d1
- * └── s3d2
- * └── s3d3
+ * ├── s3d1
+ * │   └── s3d2
+ * │   └── s3d3
+ * └── s4d1 [renamed from s1d3 by path_disconnected]
+ *    ├── f1
+ *    ├── f2
+ * └── s4d2
+ * └── f1
*/
TEST_F_FORK(layout1_bind, no_restriction)
@@ -4766,6 +4839,1431 @@ TEST_F_FORK(layout1_bind, reparent_cross_mount)
ASSERT_EQ(0, rename(bind_file1_s1d3, file1_s2d2));
}
+/*
+ * Make sure access to file through a disconnected path works as expected.
+ * This test moves s1d3 to s4d1.
+ */
+TEST_F_FORK(layout1_bind, path_disconnected)
+{
+ const struct rule layer1_allow_all[] = {
+ {
+ .path = TMP_DIR,
+ .access = ACCESS_ALL,
+ },
+ {},
+ };
+ const struct rule layer2_allow_just_f1[] = {
+ {
+ .path = file1_s1d3,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {},
+ };
+ const struct rule layer3_only_s1d2[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {},
+ };
+
+ /* Landlock should not deny access just because it is disconnected. */
+ int ruleset_fd_l1 =
+ create_ruleset(_metadata, ACCESS_ALL, layer1_allow_all);
+
+ /* Creates the new ruleset now before we move the dir containing the file. */
+ int ruleset_fd_l2 =
+ create_ruleset(_metadata, ACCESS_RW, layer2_allow_just_f1);
+ int ruleset_fd_l3 =
+ create_ruleset(_metadata, ACCESS_RW, layer3_only_s1d2);
+ int bind_s1d3_fd;
+
+ ASSERT_LE(0, ruleset_fd_l1);
+ ASSERT_LE(0, ruleset_fd_l2);
+ ASSERT_LE(0, ruleset_fd_l3);
+
+ enforce_ruleset(_metadata, ruleset_fd_l1);
+ EXPECT_EQ(0, close(ruleset_fd_l1));
+
+ bind_s1d3_fd = open(bind_dir_s1d3, O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, bind_s1d3_fd);
+
+ /* Tests access is possible before we move. */
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file2_name, O_RDONLY));
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, "..", O_RDONLY | O_DIRECTORY));
+
+ /* Makes it disconnected. */
+ ASSERT_EQ(0, rename(dir_s1d3, dir_s4d1))
+ {
+ TH_LOG("Failed to rename %s to %s: %s", dir_s1d3, dir_s4d1,
+ strerror(errno));
+ }
+
+ /* Tests that access is still possible. */
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file2_name, O_RDONLY));
+
+ /*
+ * Tests that ".." is not possible (not because of Landlock, but just
+ * because it's disconnected).
+ */
+ EXPECT_EQ(ENOENT,
+ test_open_rel(bind_s1d3_fd, "..", O_RDONLY | O_DIRECTORY));
+
+ /* This should still work with a narrower rule. */
+ enforce_ruleset(_metadata, ruleset_fd_l2);
+ EXPECT_EQ(0, close(ruleset_fd_l2));
+
+ EXPECT_EQ(0, test_open(file1_s4d1, O_RDONLY));
+ /*
+ * Accessing a file through a disconnected file descriptor can still be
+ * allowed by a rule tied to this file, even if it is no longer visible in
+ * its mount point.
+ */
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+ EXPECT_EQ(EACCES, test_open_rel(bind_s1d3_fd, file2_name, O_RDONLY));
+
+ enforce_ruleset(_metadata, ruleset_fd_l3);
+ EXPECT_EQ(0, close(ruleset_fd_l3));
+
+ EXPECT_EQ(EACCES, test_open(file1_s4d1, O_RDONLY));
+ /*
+ * Accessing a file through a disconnected file descriptor can still be
+ * allowed by a rule tied to the original mount point, even if it is no
+ * longer visible in its mount point.
+ */
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+ EXPECT_EQ(EACCES, test_open_rel(bind_s1d3_fd, file2_name, O_RDONLY));
+}
+
+/*
+ * Test that renameat with disconnected paths works under Landlock. This test
+ * moves s1d3 to s4d2, so that we can have a rule allowing refers on the move
+ * target's immediate parent.
+ */
+TEST_F_FORK(layout1_bind, path_disconnected_rename)
+{
+ const struct rule layer1[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_MAKE_DIR |
+ LANDLOCK_ACCESS_FS_REMOVE_DIR |
+ LANDLOCK_ACCESS_FS_MAKE_REG |
+ LANDLOCK_ACCESS_FS_REMOVE_FILE |
+ LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {
+ .path = dir_s4d1,
+ .access = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_MAKE_DIR |
+ LANDLOCK_ACCESS_FS_REMOVE_DIR |
+ LANDLOCK_ACCESS_FS_MAKE_REG |
+ LANDLOCK_ACCESS_FS_REMOVE_FILE |
+ LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {}
+ };
+
+ /* This layer only handles LANDLOCK_ACCESS_FS_READ_FILE. */
+ const struct rule layer2_only_s1d2[] = {
+ {
+ .path = dir_s1d2,
+ .access = LANDLOCK_ACCESS_FS_READ_FILE,
+ },
+ {},
+ };
+ int ruleset_fd_l1, ruleset_fd_l2;
+ pid_t child_pid;
+ int bind_s1d3_fd, status;
+
+ ASSERT_EQ(0, mkdir(dir_s4d1, 0755))
+ {
+ TH_LOG("Failed to create %s: %s", dir_s4d1, strerror(errno));
+ }
+ ruleset_fd_l1 = create_ruleset(_metadata, ACCESS_ALL, layer1);
+ ruleset_fd_l2 = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE,
+ layer2_only_s1d2);
+ ASSERT_LE(0, ruleset_fd_l1);
+ ASSERT_LE(0, ruleset_fd_l2);
+
+ enforce_ruleset(_metadata, ruleset_fd_l1);
+ EXPECT_EQ(0, close(ruleset_fd_l1));
+
+ bind_s1d3_fd = open(bind_dir_s1d3, O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, bind_s1d3_fd);
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+
+ /* Tests ENOENT priority over EACCES for disconnected directory. */
+ EXPECT_EQ(EACCES, test_open_rel(bind_s1d3_fd, "..", O_DIRECTORY));
+ ASSERT_EQ(0, rename(dir_s1d3, dir_s4d2))
+ {
+ TH_LOG("Failed to rename %s to %s: %s", dir_s1d3, dir_s4d2,
+ strerror(errno));
+ }
+ EXPECT_EQ(ENOENT, test_open_rel(bind_s1d3_fd, "..", O_DIRECTORY));
+
+ /*
+ * The file is no longer under s1d2 but we should still be able to access it
+ * with layer 2 because its mount point is evaluated as the first valid
+ * directory because it was initially a parent. Do a fork to test this so
+ * we don't prevent ourselves from renaming it back later.
+ */
+ child_pid = fork();
+ ASSERT_LE(0, child_pid);
+ if (child_pid == 0) {
+ enforce_ruleset(_metadata, ruleset_fd_l2);
+ EXPECT_EQ(0, close(ruleset_fd_l2));
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+ EXPECT_EQ(EACCES, test_open(file1_s4d2, O_RDONLY));
+
+ /*
+ * Tests that access widening checks indeed prevents us from renaming it
+ * back.
+ */
+ EXPECT_EQ(-1, rename(dir_s4d2, dir_s1d3));
+ EXPECT_EQ(EXDEV, errno);
+
+ /*
+ * Including through the now disconnected fd (but it should return
+ * EXDEV).
+ */
+ EXPECT_EQ(-1, renameat(bind_s1d3_fd, file1_name, AT_FDCWD,
+ file1_s2d2));
+ EXPECT_EQ(EXDEV, errno);
+ _exit(_metadata->exit_code);
+ return;
+ }
+
+ EXPECT_EQ(child_pid, waitpid(child_pid, &status, 0));
+ EXPECT_EQ(1, WIFEXITED(status));
+ EXPECT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
+
+ ASSERT_EQ(0, rename(dir_s4d2, dir_s1d3))
+ {
+ TH_LOG("Failed to rename %s back to %s: %s", dir_s4d1, dir_s1d3,
+ strerror(errno));
+ }
+
+ /* Now checks that we can access it under l2. */
+ child_pid = fork();
+ ASSERT_LE(0, child_pid);
+ if (child_pid == 0) {
+ enforce_ruleset(_metadata, ruleset_fd_l2);
+ EXPECT_EQ(0, close(ruleset_fd_l2));
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+ EXPECT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+ _exit(_metadata->exit_code);
+ return;
+ }
+
+ EXPECT_EQ(child_pid, waitpid(child_pid, &status, 0));
+ EXPECT_EQ(1, WIFEXITED(status));
+ EXPECT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
+
+ /*
+ * Also test that we can rename via a disconnected path. We move the
+ * dir back to the disconnected place first, then we rename file1 to
+ * file2 through our dir fd.
+ */
+ ASSERT_EQ(0, rename(dir_s1d3, dir_s4d2))
+ {
+ TH_LOG("Failed to rename %s to %s: %s", dir_s1d3, dir_s4d2,
+ strerror(errno));
+ }
+ ASSERT_EQ(0,
+ renameat(bind_s1d3_fd, file1_name, bind_s1d3_fd, file2_name))
+ {
+ TH_LOG("Failed to rename %s to %s within disconnected %s: %s",
+ file1_name, file2_name, bind_dir_s1d3, strerror(errno));
+ }
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file2_name, O_RDONLY));
+ ASSERT_EQ(0, renameat(bind_s1d3_fd, file2_name, AT_FDCWD, file1_s2d2))
+ {
+ TH_LOG("Failed to rename %s to %s through disconnected %s: %s",
+ file2_name, file1_s2d2, bind_dir_s1d3, strerror(errno));
+ }
+ EXPECT_EQ(0, test_open(file1_s2d2, O_RDONLY));
+ EXPECT_EQ(0, test_open(file1_s1d2, O_RDONLY));
+
+ /* Move it back using the disconnected path as the target. */
+ ASSERT_EQ(0, renameat(AT_FDCWD, file1_s2d2, bind_s1d3_fd, file1_name))
+ {
+ TH_LOG("Failed to rename %s to %s through disconnected %s: %s",
+ file1_s1d2, file1_name, bind_dir_s1d3, strerror(errno));
+ }
+
+ /* Now make it connected again. */
+ ASSERT_EQ(0, rename(dir_s4d2, dir_s1d3))
+ {
+ TH_LOG("Failed to rename %s back to %s: %s", dir_s4d2, dir_s1d3,
+ strerror(errno));
+ }
+
+ /* Checks again that we can access it under l2. */
+ enforce_ruleset(_metadata, ruleset_fd_l2);
+ EXPECT_EQ(0, close(ruleset_fd_l2));
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+ EXPECT_EQ(0, test_open(file1_s1d3, O_RDONLY));
+}
+
+/*
+ * Test that linkat(2) with disconnected paths works under Landlock. This
+ * test moves s1d3 to s4d1.
+ */
+TEST_F_FORK(layout1_bind, path_disconnected_link)
+{
+ /* Ruleset to be applied after renaming s1d3 to s4d1. */
+ const struct rule layer1[] = {
+ {
+ .path = dir_s4d1,
+ .access = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG |
+ LANDLOCK_ACCESS_FS_REMOVE_FILE,
+ },
+ {
+ .path = dir_s2d2,
+ .access = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG |
+ LANDLOCK_ACCESS_FS_REMOVE_FILE,
+ },
+ {}
+ };
+ int ruleset_fd, bind_s1d3_fd;
+
+ /* Removes unneeded files created by layout1, otherwise it will EEXIST. */
+ ASSERT_EQ(0, unlink(file1_s1d2));
+ ASSERT_EQ(0, unlink(file2_s1d3));
+
+ bind_s1d3_fd = open(bind_dir_s1d3, O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, bind_s1d3_fd);
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY));
+
+ /* Disconnects bind_s1d3_fd. */
+ ASSERT_EQ(0, rename(dir_s1d3, dir_s4d1))
+ {
+ TH_LOG("Failed to rename %s to %s: %s", dir_s1d3, dir_s4d1,
+ strerror(errno));
+ }
+
+ /* Need this later to test different parent link. */
+ ASSERT_EQ(0, mkdir(dir_s4d2, 0755))
+ {
+ TH_LOG("Failed to create %s: %s", dir_s4d2, strerror(errno));
+ }
+
+ ruleset_fd = create_ruleset(_metadata, ACCESS_ALL, layer1);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+
+ /* From disconnected to connected. */
+ ASSERT_EQ(0, linkat(bind_s1d3_fd, file1_name, AT_FDCWD, file1_s2d2, 0))
+ {
+ TH_LOG("Failed to link %s to %s via disconnected %s: %s",
+ file1_name, file1_s2d2, bind_dir_s1d3, strerror(errno));
+ }
+
+ /* Tests that we can access via the new link... */
+ EXPECT_EQ(0, test_open(file1_s2d2, O_RDONLY))
+ {
+ TH_LOG("Failed to open newly linked %s: %s", file1_s2d2,
+ strerror(errno));
+ }
+
+ /* ...as well as the old one. */
+ EXPECT_EQ(0, test_open(file1_s4d1, O_RDONLY))
+ {
+ TH_LOG("Failed to open original %s: %s", file1_s4d1,
+ strerror(errno));
+ }
+
+ /* From connected to disconnected. */
+ ASSERT_EQ(0, unlink(file1_s4d1));
+ ASSERT_EQ(0, linkat(AT_FDCWD, file1_s2d2, bind_s1d3_fd, file2_name, 0))
+ {
+ TH_LOG("Failed to link %s to %s via disconnected %s: %s",
+ file1_s2d2, file2_name, bind_dir_s1d3, strerror(errno));
+ }
+ EXPECT_EQ(0, test_open(file2_s4d1, O_RDONLY));
+ ASSERT_EQ(0, unlink(file1_s2d2));
+
+ /* From disconnected to disconnected (same parent). */
+ ASSERT_EQ(0,
+ linkat(bind_s1d3_fd, file2_name, bind_s1d3_fd, file1_name, 0))
+ {
+ TH_LOG("Failed to link %s to %s within disconnected %s: %s",
+ file2_name, file1_name, bind_dir_s1d3, strerror(errno));
+ }
+ EXPECT_EQ(0, test_open(file1_s4d1, O_RDONLY))
+ {
+ TH_LOG("Failed to open newly linked %s: %s", file1_s4d1,
+ strerror(errno));
+ }
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, file1_name, O_RDONLY))
+ {
+ TH_LOG("Failed to open %s through newly created link under disconnected path: %s",
+ file1_name, strerror(errno));
+ }
+ ASSERT_EQ(0, unlink(file2_s4d1));
+
+ /* From disconnected to disconnected (different parent). */
+ ASSERT_EQ(0,
+ linkat(bind_s1d3_fd, file1_name, bind_s1d3_fd, "s4d2/f1", 0))
+ {
+ TH_LOG("Failed to link %s to %s within disconnected %s: %s",
+ file1_name, "s4d2/f1", bind_dir_s1d3, strerror(errno));
+ }
+ EXPECT_EQ(0, test_open(file1_s4d2, O_RDONLY))
+ {
+ TH_LOG("Failed to open %s after link: %s", file1_s4d2,
+ strerror(errno));
+ }
+ EXPECT_EQ(0, test_open_rel(bind_s1d3_fd, "s4d2/f1", O_RDONLY))
+ {
+ TH_LOG("Failed to open %s through disconnected path after link: %s",
+ "s4d2/f1", strerror(errno));
+ }
+}
+
+/*
+ * layout4_disconnected_leafs with bind mount and renames:
+ *
+ * tmp
+ * ├── s1d1
+ * │   └── s1d2 [source of the bind mount]
+ * │ ├── s1d31
+ * │   │ └── s1d41 [now renamed beneath s3d1]
+ * │ │ ├── f1
+ * │ │ └── f2
+ * │   └── s1d32
+ * │ └── s1d42 [now renamed beneath s4d1]
+ * │ ├── f3
+ * │ └── f4
+ * ├── s2d1
+ * │   └── s2d2 [bind mount of s1d2]
+ * │ ├── s1d31
+ * │   │ └── s1d41 [opened FD, now renamed beneath s3d1]
+ * │ │ ├── f1
+ * │ │ └── f2
+ * │   └── s1d32
+ * │ └── s1d42 [opened FD, now renamed beneath s4d1]
+ * │ ├── f3
+ * │ └── f4
+ * ├── s3d1
+ * │  └── s1d41 [renamed here]
+ * │ ├── f1
+ * │ └── f2
+ * └── s4d1
+ * └── s1d42 [renamed here]
+ * ├── f3
+ * └── f4
+ */
+/* clang-format off */
+FIXTURE(layout4_disconnected_leafs) {
+ int s2d2_fd;
+};
+/* clang-format on */
+
+FIXTURE_SETUP(layout4_disconnected_leafs)
+{
+ prepare_layout(_metadata);
+
+ create_file(_metadata, TMP_DIR "/s1d1/s1d2/s1d31/s1d41/f1");
+ create_file(_metadata, TMP_DIR "/s1d1/s1d2/s1d31/s1d41/f2");
+ create_file(_metadata, TMP_DIR "/s1d1/s1d2/s1d32/s1d42/f3");
+ create_file(_metadata, TMP_DIR "/s1d1/s1d2/s1d32/s1d42/f4");
+ create_directory(_metadata, TMP_DIR "/s2d1/s2d2");
+ create_directory(_metadata, TMP_DIR "/s3d1");
+ create_directory(_metadata, TMP_DIR "/s4d1");
+
+ self->s2d2_fd =
+ open(TMP_DIR "/s2d1/s2d2", O_DIRECTORY | O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, self->s2d2_fd);
+
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, mount(TMP_DIR "/s1d1/s1d2", TMP_DIR "/s2d1/s2d2", NULL,
+ MS_BIND, NULL));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+FIXTURE_TEARDOWN_PARENT(layout4_disconnected_leafs)
+{
+ /* umount(TMP_DIR "/s2d1") is handled by namespace lifetime. */
+
+ /* Removes files after renames. */
+ remove_path(TMP_DIR "/s3d1/s1d41/f1");
+ remove_path(TMP_DIR "/s3d1/s1d41/f2");
+ remove_path(TMP_DIR "/s4d1/s1d42/f1");
+ remove_path(TMP_DIR "/s4d1/s1d42/f3");
+ remove_path(TMP_DIR "/s4d1/s1d42/f4");
+ remove_path(TMP_DIR "/s4d1/s1d42/f5");
+
+ cleanup_layout(_metadata);
+}
+
+FIXTURE_VARIANT(layout4_disconnected_leafs)
+{
+ /*
+ * Parent of the bind mount source. It should always be ignored when
+ * testing against files under the s1d41 or s1d42 disconnected directories.
+ */
+ const __u64 allowed_s1d1;
+ /*
+ * Source of bind mount (to s2d2). It should always be enforced when
+ * testing against files under the s1d41 or s1d42 disconnected directories.
+ */
+ const __u64 allowed_s1d2;
+ /*
+ * Original parent of s1d41. It should always be ignored when testing
+ * against files under the s1d41 disconnected directory.
+ */
+ const __u64 allowed_s1d31;
+ /*
+ * Original parent of s1d42. It should always be ignored when testing
+ * against files under the s1d42 disconnected directory.
+ */
+ const __u64 allowed_s1d32;
+ /*
+ * Opened and disconnected source directory. It should always be enforced
+ * when testing against files under the s1d41 disconnected directory.
+ */
+ const __u64 allowed_s1d41;
+ /*
+ * Opened and disconnected source directory. It should always be enforced
+ * when testing against files under the s1d42 disconnected directory.
+ */
+ const __u64 allowed_s1d42;
+ /*
+ * File in the s1d41 disconnected directory. It should always be enforced
+ * when testing against itself under the s1d41 disconnected directory.
+ */
+ const __u64 allowed_f1;
+ /*
+ * File in the s1d41 disconnected directory. It should always be enforced
+ * when testing against itself under the s1d41 disconnected directory.
+ */
+ const __u64 allowed_f2;
+ /*
+ * File in the s1d42 disconnected directory. It should always be enforced
+ * when testing against itself under the s1d42 disconnected directory.
+ */
+ const __u64 allowed_f3;
+ /*
+ * Parent of the bind mount destination. It should always be enforced when
+ * testing against files under the s1d41 or s1d42 disconnected directories.
+ */
+ const __u64 allowed_s2d1;
+ /*
+ * Directory covered by the bind mount. It should always be ignored when
+ * testing against files under the s1d41 or s1d42 disconnected directories.
+ */
+ const __u64 allowed_s2d2;
+ /*
+ * New parent of the renamed s1d41. It should always be ignored when
+ * testing against files under the s1d41 disconnected directory.
+ */
+ const __u64 allowed_s3d1;
+ /*
+ * New parent of the renamed s1d42. It should always be ignored when
+ * testing against files under the s1d42 disconnected directory.
+ */
+ const __u64 allowed_s4d1;
+
+ /* Expected result of the call to open([fd:s1d41]/f1, O_RDONLY). */
+ const int expected_read_result;
+ /* Expected result of the call to renameat([fd:s1d41]/f1, [fd:s1d42]/f1). */
+ const int expected_rename_result;
+ /*
+ * Expected result of the call to renameat([fd:s1d41]/f2, [fd:s1d42]/f3,
+ * RENAME_EXCHANGE).
+ */
+ const int expected_exchange_result;
+ /* Expected result of the call to renameat([fd:s1d42]/f4, [fd:s1d42]/f5). */
+ const int expected_same_dir_rename_result;
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d1_mount_src_parent) {
+ /* clang-format on */
+ .allowed_s1d1 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d2_mount_src_refer) {
+ /* clang-format on */
+ .allowed_s1d2 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d2_mount_src_create) {
+ /* clang-format on */
+ .allowed_s1d2 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d2_mount_src_rename) {
+ /* clang-format on */
+ .allowed_s1d2 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d31_s1d32_old_parent) {
+ /* clang-format on */
+ .allowed_s1d31 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .allowed_s1d32 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d41_s1d42_disconnected_refer) {
+ /* clang-format on */
+ .allowed_s1d41 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE,
+ .allowed_s1d42 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d41_s1d42_disconnected_create) {
+ /* clang-format on */
+ .allowed_s1d41 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .allowed_s1d42 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d41_s1d42_disconnected_rename_even) {
+ /* clang-format on */
+ .allowed_s1d41 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .allowed_s1d42 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* The destination directory has more access right. */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d41_s1d42_disconnected_rename_more) {
+ /* clang-format on */
+ .allowed_s1d41 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .allowed_s1d42 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_MAKE_REG |
+ LANDLOCK_ACCESS_FS_EXECUTE,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ /* Access denied. */
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* The destination directory has less access right. */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s1d41_s1d42_disconnected_rename_less) {
+ /* clang-format on */
+ .allowed_s1d41 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_MAKE_REG |
+ LANDLOCK_ACCESS_FS_EXECUTE,
+ .allowed_s1d42 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ /* Access allowed. */
+ .expected_rename_result = 0,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s2d1_mount_dst_parent_create) {
+ /* clang-format on */
+ .allowed_s2d1 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s2d1_mount_dst_parent_refer) {
+ /* clang-format on */
+ .allowed_s2d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s2d1_mount_dst_parent_mini) {
+ /* clang-format on */
+ .allowed_s2d1 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s2d2_covered_by_mount) {
+ /* clang-format on */
+ .allowed_s2d2 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* Tests collect_domain_accesses(). */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s3d1_s4d1_new_parent_refer) {
+ /* clang-format on */
+ .allowed_s3d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .allowed_s4d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s3d1_s4d1_new_parent_create) {
+ /* clang-format on */
+ .allowed_s3d1 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .allowed_s4d1 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs,
+ s3d1_s4d1_disconnected_rename_even){
+ /* clang-format on */
+ .allowed_s3d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .allowed_s4d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* The destination directory has more access right. */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s3d1_s4d1_disconnected_rename_more) {
+ /* clang-format on */
+ .allowed_s3d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .allowed_s4d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG |
+ LANDLOCK_ACCESS_FS_EXECUTE,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ /* Access denied. */
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* The destination directory has less access right. */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, s3d1_s4d1_disconnected_rename_less) {
+ /* clang-format on */
+ .allowed_s3d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG |
+ LANDLOCK_ACCESS_FS_EXECUTE,
+ .allowed_s4d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ /* Access allowed. */
+ .expected_rename_result = 0,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout4_disconnected_leafs, f1_f2_f3) {
+ /* clang-format on */
+ .allowed_f1 = LANDLOCK_ACCESS_FS_READ_FILE,
+ .allowed_f2 = LANDLOCK_ACCESS_FS_READ_FILE,
+ .allowed_f3 = LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+TEST_F_FORK(layout4_disconnected_leafs, read_rename_exchange)
+{
+ const __u64 handled_access =
+ LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE | LANDLOCK_ACCESS_FS_MAKE_REG;
+ const struct rule rules[] = {
+ {
+ .path = TMP_DIR "/s1d1",
+ .access = variant->allowed_s1d1,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2",
+ .access = variant->allowed_s1d2,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2/s1d31",
+ .access = variant->allowed_s1d31,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2/s1d32",
+ .access = variant->allowed_s1d32,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2/s1d31/s1d41",
+ .access = variant->allowed_s1d41,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2/s1d32/s1d42",
+ .access = variant->allowed_s1d42,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2/s1d31/s1d41/f1",
+ .access = variant->allowed_f1,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2/s1d31/s1d41/f2",
+ .access = variant->allowed_f2,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2/s1d32/s1d42/f3",
+ .access = variant->allowed_f3,
+ },
+ {
+ .path = TMP_DIR "/s2d1",
+ .access = variant->allowed_s2d1,
+ },
+ /* s2d2_fd */
+ {
+ .path = TMP_DIR "/s3d1",
+ .access = variant->allowed_s3d1,
+ },
+ {
+ .path = TMP_DIR "/s4d1",
+ .access = variant->allowed_s4d1,
+ },
+ {},
+ };
+ int ruleset_fd, s1d41_bind_fd, s1d42_bind_fd;
+
+ ruleset_fd = create_ruleset(_metadata, handled_access, rules);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Adds rule for the covered directory. */
+ if (variant->allowed_s2d2) {
+ ASSERT_EQ(0, landlock_add_rule(
+ ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &(struct landlock_path_beneath_attr){
+ .parent_fd = self->s2d2_fd,
+ .allowed_access =
+ variant->allowed_s2d2,
+ },
+ 0));
+ }
+ EXPECT_EQ(0, close(self->s2d2_fd));
+
+ s1d41_bind_fd = open(TMP_DIR "/s2d1/s2d2/s1d31/s1d41",
+ O_DIRECTORY | O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, s1d41_bind_fd);
+ s1d42_bind_fd = open(TMP_DIR "/s2d1/s2d2/s1d32/s1d42",
+ O_DIRECTORY | O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, s1d42_bind_fd);
+
+ /* Disconnects and checks source and destination directories. */
+ EXPECT_EQ(0, test_open_rel(s1d41_bind_fd, "..", O_DIRECTORY));
+ EXPECT_EQ(0, test_open_rel(s1d42_bind_fd, "..", O_DIRECTORY));
+ /* Renames to make it accessible through s3d1/s1d41 */
+ ASSERT_EQ(0, test_renameat(AT_FDCWD, TMP_DIR "/s1d1/s1d2/s1d31/s1d41",
+ AT_FDCWD, TMP_DIR "/s3d1/s1d41"));
+ /* Renames to make it accessible through s4d1/s1d42 */
+ ASSERT_EQ(0, test_renameat(AT_FDCWD, TMP_DIR "/s1d1/s1d2/s1d32/s1d42",
+ AT_FDCWD, TMP_DIR "/s4d1/s1d42"));
+ EXPECT_EQ(ENOENT, test_open_rel(s1d41_bind_fd, "..", O_DIRECTORY));
+ EXPECT_EQ(ENOENT, test_open_rel(s1d42_bind_fd, "..", O_DIRECTORY));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+
+ EXPECT_EQ(variant->expected_read_result,
+ test_open_rel(s1d41_bind_fd, "f1", O_RDONLY));
+
+ EXPECT_EQ(variant->expected_rename_result,
+ test_renameat(s1d41_bind_fd, "f1", s1d42_bind_fd, "f1"));
+ EXPECT_EQ(variant->expected_exchange_result,
+ test_exchangeat(s1d41_bind_fd, "f2", s1d42_bind_fd, "f3"));
+
+ EXPECT_EQ(variant->expected_same_dir_rename_result,
+ test_renameat(s1d42_bind_fd, "f4", s1d42_bind_fd, "f5"));
+}
+
+/*
+ * layout5_disconnected_branch before rename:
+ *
+ * tmp
+ * ├── s1d1
+ * │   └── s1d2 [source of the first bind mount]
+ * │   └── s1d3
+ * │   ├── s1d41
+ * │   │   ├── f1
+ * │   │   └── f2
+ * │   └── s1d42
+ * │   ├── f3
+ * │   └── f4
+ * ├── s2d1
+ * │   └── s2d2 [source of the second bind mount]
+ * │   └── s2d3
+ * │   └── s2d4 [first s1d2 bind mount]
+ * │   └── s1d3
+ * │   ├── s1d41
+ * │   │   ├── f1
+ * │   │   └── f2
+ * │   └── s1d42
+ * │   ├── f3
+ * │   └── f4
+ * ├── s3d1
+ * │   └── s3d2 [second s2d2 bind mount]
+ * │   └── s2d3
+ * │   └── s2d4 [first s1d2 bind mount]
+ * │   └── s1d3
+ * │   ├── s1d41
+ * │   │   ├── f1
+ * │   │   └── f2
+ * │   └── s1d42
+ * │   ├── f3
+ * │   └── f4
+ * └── s4d1
+ *
+ * After rename:
+ *
+ * tmp
+ * ├── s1d1
+ * │   └── s1d2 [source of the first bind mount]
+ * │   └── s1d3
+ * │   ├── s1d41
+ * │   │   ├── f1
+ * │   │   └── f2
+ * │   └── s1d42
+ * │   ├── f3
+ * │   └── f4
+ * ├── s2d1
+ * │   └── s2d2 [source of the second bind mount]
+ * ├── s3d1
+ * │   └── s3d2 [second s2d2 bind mount]
+ * └── s4d1
+ * └── s2d3 [renamed here]
+ * └── s2d4 [first s1d2 bind mount]
+ * └── s1d3
+ * ├── s1d41
+ * │   ├── f1
+ * │   └── f2
+ * └── s1d42
+ * ├── f3
+ * └── f4
+ *
+ * Decision path for access from the s3d1/s3d2/s2d3/s2d4/s1d3 file descriptor:
+ * 1. first bind mount: s1d3 -> s1d2
+ * 2. second bind mount: s2d3
+ * 3. tmp mount: s4d1 -> tmp [disconnected branch]
+ * 4. second bind mount: s2d2
+ * 5. tmp mount: s3d1 -> tmp
+ * 6. parent mounts: [...] -> /
+ *
+ * The s4d1 directory is evaluated even if it is not in the s2d2 mount.
+ */
+
+/* clang-format off */
+FIXTURE(layout5_disconnected_branch) {
+ int s2d4_fd, s3d2_fd;
+};
+/* clang-format on */
+
+FIXTURE_SETUP(layout5_disconnected_branch)
+{
+ prepare_layout(_metadata);
+
+ create_file(_metadata, TMP_DIR "/s1d1/s1d2/s1d3/s1d41/f1");
+ create_file(_metadata, TMP_DIR "/s1d1/s1d2/s1d3/s1d41/f2");
+ create_file(_metadata, TMP_DIR "/s1d1/s1d2/s1d3/s1d42/f3");
+ create_file(_metadata, TMP_DIR "/s1d1/s1d2/s1d3/s1d42/f4");
+ create_directory(_metadata, TMP_DIR "/s2d1/s2d2/s2d3/s2d4");
+ create_directory(_metadata, TMP_DIR "/s3d1/s3d2");
+ create_directory(_metadata, TMP_DIR "/s4d1");
+
+ self->s2d4_fd = open(TMP_DIR "/s2d1/s2d2/s2d3/s2d4",
+ O_DIRECTORY | O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, self->s2d4_fd);
+
+ self->s3d2_fd =
+ open(TMP_DIR "/s3d1/s3d2", O_DIRECTORY | O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, self->s3d2_fd);
+
+ set_cap(_metadata, CAP_SYS_ADMIN);
+ ASSERT_EQ(0, mount(TMP_DIR "/s1d1/s1d2", TMP_DIR "/s2d1/s2d2/s2d3/s2d4",
+ NULL, MS_BIND, NULL));
+ ASSERT_EQ(0, mount(TMP_DIR "/s2d1/s2d2", TMP_DIR "/s3d1/s3d2", NULL,
+ MS_BIND | MS_REC, NULL));
+ clear_cap(_metadata, CAP_SYS_ADMIN);
+}
+
+FIXTURE_TEARDOWN_PARENT(layout5_disconnected_branch)
+{
+ /* Bind mounts are handled by namespace lifetime. */
+
+ /* Removes files after renames. */
+ remove_path(TMP_DIR "/s1d1/s1d2/s1d3/s1d41/f1");
+ remove_path(TMP_DIR "/s1d1/s1d2/s1d3/s1d41/f2");
+ remove_path(TMP_DIR "/s1d1/s1d2/s1d3/s1d42/f1");
+ remove_path(TMP_DIR "/s1d1/s1d2/s1d3/s1d42/f3");
+ remove_path(TMP_DIR "/s1d1/s1d2/s1d3/s1d42/f4");
+ remove_path(TMP_DIR "/s1d1/s1d2/s1d3/s1d42/f5");
+
+ cleanup_layout(_metadata);
+}
+
+FIXTURE_VARIANT(layout5_disconnected_branch)
+{
+ /*
+ * Parent of all files. It should always be enforced when testing against
+ * files under the s1d41 or s1d42 disconnected directories.
+ */
+ const __u64 allowed_base;
+ /*
+ * Parent of the first bind mount source. It should always be ignored when
+ * testing against files under the s1d41 or s1d42 disconnected directories.
+ */
+ const __u64 allowed_s1d1;
+ const __u64 allowed_s1d2;
+ const __u64 allowed_s1d3;
+ const __u64 allowed_s2d1;
+ const __u64 allowed_s2d2;
+ const __u64 allowed_s2d3;
+ const __u64 allowed_s2d4;
+ const __u64 allowed_s3d1;
+ const __u64 allowed_s3d2;
+ const __u64 allowed_s4d1;
+
+ /* Expected result of the call to open([fd:s1d3]/s1d41/f1, O_RDONLY). */
+ const int expected_read_result;
+ /*
+ * Expected result of the call to renameat([fd:s1d3]/s1d41/f1,
+ * [fd:s1d3]/s1d42/f1).
+ */
+ const int expected_rename_result;
+ /*
+ * Expected result of the call to renameat([fd:s1d3]/s1d41/f2,
+ * [fd:s1d3]/s1d42/f3, RENAME_EXCHANGE).
+ */
+ const int expected_exchange_result;
+ /*
+ * Expected result of the call to renameat([fd:s1d3]/s1d42/f4,
+ * [fd:s1d3]/s1d42/f5).
+ */
+ const int expected_same_dir_rename_result;
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s1d1_mount1_src_parent) {
+ /* clang-format on */
+ .allowed_s1d1 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s1d2_mount1_src_refer) {
+ /* clang-format on */
+ .allowed_s1d2 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s1d2_mount1_src_create) {
+ /* clang-format on */
+ .allowed_s1d2 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s1d2_mount1_src_rename) {
+ /* clang-format on */
+ .allowed_s1d2 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s1d3_fd_refer) {
+ /* clang-format on */
+ .allowed_s1d3 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s1d3_fd_create) {
+ /* clang-format on */
+ .allowed_s1d3 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s1d3_fd_rename) {
+ /* clang-format on */
+ .allowed_s1d3 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s1d3_fd_full) {
+ /* clang-format on */
+ .allowed_s1d3 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s2d1_mount2_src_parent) {
+ /* clang-format on */
+ .allowed_s2d1 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s2d2_mount2_src_refer) {
+ /* clang-format on */
+ .allowed_s2d2 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s2d2_mount2_src_create) {
+ /* clang-format on */
+ .allowed_s2d2 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s2d2_mount2_src_rename) {
+ /* clang-format on */
+ .allowed_s2d2 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s2d3_mount1_dst_parent_refer) {
+ /* clang-format on */
+ .allowed_s2d3 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s2d3_mount1_dst_parent_create) {
+ /* clang-format on */
+ .allowed_s2d3 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s2d3_mount1_dst_parent_rename) {
+ /* clang-format on */
+ .allowed_s2d3 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s2d4_mount1_dst) {
+ /* clang-format on */
+ .allowed_s2d4 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s3d1_mount2_dst_parent_refer) {
+ /* clang-format on */
+ .allowed_s3d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s3d1_mount2_dst_parent_create) {
+ /* clang-format on */
+ .allowed_s3d1 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s3d1_mount2_dst_parent_rename) {
+ /* clang-format on */
+ .allowed_s3d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s3d2_mount1_dst) {
+ /* clang-format on */
+ .allowed_s3d2 = LANDLOCK_ACCESS_FS_REFER |
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s4d1_rename_parent_refer) {
+ /* clang-format on */
+ .allowed_s4d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = EACCES,
+ .expected_rename_result = EACCES,
+ .expected_exchange_result = EACCES,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s4d1_rename_parent_create) {
+ /* clang-format on */
+ .allowed_s4d1 = LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = 0,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = EXDEV,
+ .expected_exchange_result = EXDEV,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(layout5_disconnected_branch, s4d1_rename_parent_rename) {
+ /* clang-format on */
+ .allowed_s4d1 = LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_MAKE_REG,
+ .expected_read_result = EACCES,
+ .expected_same_dir_rename_result = 0,
+ .expected_rename_result = 0,
+ .expected_exchange_result = 0,
+};
+
+TEST_F_FORK(layout5_disconnected_branch, read_rename_exchange)
+{
+ const __u64 handled_access =
+ LANDLOCK_ACCESS_FS_REFER | LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_EXECUTE | LANDLOCK_ACCESS_FS_MAKE_REG;
+ const struct rule rules[] = {
+ {
+ .path = TMP_DIR "/s1d1",
+ .access = variant->allowed_s1d1,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2",
+ .access = variant->allowed_s1d2,
+ },
+ {
+ .path = TMP_DIR "/s1d1/s1d2/s1d3",
+ .access = variant->allowed_s1d3,
+ },
+ {
+ .path = TMP_DIR "/s2d1",
+ .access = variant->allowed_s2d1,
+ },
+ {
+ .path = TMP_DIR "/s2d1/s2d2",
+ .access = variant->allowed_s2d2,
+ },
+ {
+ .path = TMP_DIR "/s2d1/s2d2/s2d3",
+ .access = variant->allowed_s2d3,
+ },
+ /* s2d4_fd */
+ {
+ .path = TMP_DIR "/s3d1",
+ .access = variant->allowed_s3d1,
+ },
+ /* s3d2_fd */
+ {
+ .path = TMP_DIR "/s4d1",
+ .access = variant->allowed_s4d1,
+ },
+ {},
+ };
+ int ruleset_fd, s1d3_bind_fd;
+
+ ruleset_fd = create_ruleset(_metadata, handled_access, rules);
+ ASSERT_LE(0, ruleset_fd);
+
+ /* Adds rules for the covered directories. */
+ if (variant->allowed_s2d4) {
+ ASSERT_EQ(0, landlock_add_rule(
+ ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &(struct landlock_path_beneath_attr){
+ .parent_fd = self->s2d4_fd,
+ .allowed_access =
+ variant->allowed_s2d4,
+ },
+ 0));
+ }
+ EXPECT_EQ(0, close(self->s2d4_fd));
+
+ if (variant->allowed_s3d2) {
+ ASSERT_EQ(0, landlock_add_rule(
+ ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &(struct landlock_path_beneath_attr){
+ .parent_fd = self->s3d2_fd,
+ .allowed_access =
+ variant->allowed_s3d2,
+ },
+ 0));
+ }
+ EXPECT_EQ(0, close(self->s3d2_fd));
+
+ s1d3_bind_fd = open(TMP_DIR "/s3d1/s3d2/s2d3/s2d4/s1d3",
+ O_DIRECTORY | O_PATH | O_CLOEXEC);
+ ASSERT_LE(0, s1d3_bind_fd);
+
+ /* Disconnects and checks source and destination directories. */
+ EXPECT_EQ(0, test_open_rel(s1d3_bind_fd, "..", O_DIRECTORY));
+ EXPECT_EQ(0, test_open_rel(s1d3_bind_fd, "../..", O_DIRECTORY));
+ /* Renames to make it accessible through s3d1/s1d41 */
+ ASSERT_EQ(0, test_renameat(AT_FDCWD, TMP_DIR "/s2d1/s2d2/s2d3",
+ AT_FDCWD, TMP_DIR "/s4d1/s2d3"));
+ EXPECT_EQ(0, test_open_rel(s1d3_bind_fd, "..", O_DIRECTORY));
+ EXPECT_EQ(ENOENT, test_open_rel(s1d3_bind_fd, "../..", O_DIRECTORY));
+
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+
+ EXPECT_EQ(variant->expected_read_result,
+ test_open_rel(s1d3_bind_fd, "s1d41/f1", O_RDONLY));
+
+ EXPECT_EQ(variant->expected_rename_result,
+ test_renameat(s1d3_bind_fd, "s1d41/f1", s1d3_bind_fd,
+ "s1d42/f1"));
+ EXPECT_EQ(variant->expected_exchange_result,
+ test_exchangeat(s1d3_bind_fd, "s1d41/f2", s1d3_bind_fd,
+ "s1d42/f3"));
+
+ EXPECT_EQ(variant->expected_same_dir_rename_result,
+ test_renameat(s1d3_bind_fd, "s1d42/f4", s1d3_bind_fd,
+ "s1d42/f5"));
+}
+
#define LOWER_BASE TMP_DIR "/lower"
#define LOWER_DATA LOWER_BASE "/data"
static const char lower_fl1[] = LOWER_DATA "/fl1";
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 530390033929..f02cc8a2e4ae 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -199,6 +199,9 @@ clean: $(if $(TEST_GEN_MODS_DIR),clean_mods_dir)
# Build with _GNU_SOURCE by default
CFLAGS += -D_GNU_SOURCE=
+# Additional include paths needed by kselftest.h and local headers
+CFLAGS += -I${top_srcdir}/tools/testing/selftests
+
# Enables to extend CFLAGS and LDFLAGS from command line, e.g.
# make USERCFLAGS=-Werror USERLDFLAGS=-static
CFLAGS += $(USERCFLAGS)
@@ -228,7 +231,10 @@ $(OUTPUT)/%:%.S
$(LINK.S) $^ $(LDLIBS) -o $@
endif
+# Extract the expected header directory
+khdr_output := $(patsubst %/usr/include,%,$(filter %/usr/include,$(KHDR_INCLUDES)))
+
headers:
- $(Q)$(MAKE) -C $(top_srcdir) headers
+ $(Q)$(MAKE) -f $(top_srcdir)/Makefile -C $(khdr_output) headers
.PHONY: run_tests all clean install emit_tests gen_mods_dir clean_mods_dir headers
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index 46991a029f7c..8ec0cb64ad94 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -10,7 +10,11 @@ SYSFS_KERNEL_DIR="/sys/kernel"
SYSFS_KLP_DIR="$SYSFS_KERNEL_DIR/livepatch"
SYSFS_DEBUG_DIR="$SYSFS_KERNEL_DIR/debug"
SYSFS_KPROBES_DIR="$SYSFS_DEBUG_DIR/kprobes"
-SYSFS_TRACING_DIR="$SYSFS_DEBUG_DIR/tracing"
+if [[ -e /sys/kernel/tracing/trace ]]; then
+ SYSFS_TRACING_DIR="$SYSFS_KERNEL_DIR/tracing"
+else
+ SYSFS_TRACING_DIR="$SYSFS_DEBUG_DIR/tracing"
+fi
# Kselftest framework requirement - SKIP code is 4
ksft_skip=4
diff --git a/tools/testing/selftests/liveupdate/.gitignore b/tools/testing/selftests/liveupdate/.gitignore
new file mode 100644
index 000000000000..661827083ab6
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/.gitignore
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+*
+!/**/
+!*.c
+!*.h
+!*.sh
+!.gitignore
+!config
+!Makefile
diff --git a/tools/testing/selftests/liveupdate/Makefile b/tools/testing/selftests/liveupdate/Makefile
new file mode 100644
index 000000000000..080754787ede
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+LIB_C += luo_test_utils.c
+
+TEST_GEN_PROGS += liveupdate
+
+TEST_GEN_PROGS_EXTENDED += luo_kexec_simple
+TEST_GEN_PROGS_EXTENDED += luo_multi_session
+
+TEST_FILES += do_kexec.sh
+
+include ../lib.mk
+
+CFLAGS += $(KHDR_INCLUDES)
+CFLAGS += -Wall -O2 -Wno-unused-function
+CFLAGS += -MD
+
+LIB_O := $(patsubst %.c, $(OUTPUT)/%.o, $(LIB_C))
+TEST_O := $(patsubst %, %.o, $(TEST_GEN_PROGS))
+TEST_O += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
+
+TEST_DEP_FILES := $(patsubst %.o, %.d, $(LIB_O))
+TEST_DEP_FILES += $(patsubst %.o, %.d, $(TEST_O))
+-include $(TEST_DEP_FILES)
+
+$(LIB_O): $(OUTPUT)/%.o: %.c
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
+
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/%: %.o $(LIB_O)
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIB_O) $(LDLIBS) -o $@
+
+EXTRA_CLEAN += $(LIB_O)
+EXTRA_CLEAN += $(TEST_O)
+EXTRA_CLEAN += $(TEST_DEP_FILES)
diff --git a/tools/testing/selftests/liveupdate/config b/tools/testing/selftests/liveupdate/config
new file mode 100644
index 000000000000..91d03f9a6a39
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/config
@@ -0,0 +1,11 @@
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_HANDOVER=y
+CONFIG_KEXEC_HANDOVER_ENABLE_DEFAULT=y
+CONFIG_KEXEC_HANDOVER_DEBUGFS=y
+CONFIG_KEXEC_HANDOVER_DEBUG=y
+CONFIG_LIVEUPDATE=y
+CONFIG_LIVEUPDATE_TEST=y
+CONFIG_MEMFD_CREATE=y
+CONFIG_TMPFS=y
+CONFIG_SHMEM=y
diff --git a/tools/testing/selftests/liveupdate/do_kexec.sh b/tools/testing/selftests/liveupdate/do_kexec.sh
new file mode 100755
index 000000000000..3c7c6cafbef8
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/do_kexec.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+set -e
+
+# Use $KERNEL and $INITRAMFS to pass custom Kernel and optional initramfs
+
+KERNEL="${KERNEL:-/boot/bzImage}"
+set -- -l -s --reuse-cmdline "$KERNEL"
+
+INITRAMFS="${INITRAMFS:-/boot/initramfs}"
+if [ -f "$INITRAMFS" ]; then
+ set -- "$@" --initrd="$INITRAMFS"
+fi
+
+kexec "$@"
+kexec -e
diff --git a/tools/testing/selftests/liveupdate/liveupdate.c b/tools/testing/selftests/liveupdate/liveupdate.c
new file mode 100644
index 000000000000..c2878e3d5ef9
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/liveupdate.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+/*
+ * Selftests for the Live Update Orchestrator.
+ * This test suite verifies the functionality and behavior of the
+ * /dev/liveupdate character device and its session management capabilities.
+ *
+ * Tests include:
+ * - Device access: basic open/close, and enforcement of exclusive access.
+ * - Session management: creation of unique sessions, and duplicate name detection.
+ * - Resource preservation: successfully preserving individual and multiple memfds,
+ * verifying contents remain accessible.
+ * - Complex multi-session scenarios involving mixed empty and populated files.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include <linux/liveupdate.h>
+
+#include "../kselftest.h"
+#include "../kselftest_harness.h"
+
+#define LIVEUPDATE_DEV "/dev/liveupdate"
+
+FIXTURE(liveupdate_device) {
+ int fd1;
+ int fd2;
+};
+
+FIXTURE_SETUP(liveupdate_device)
+{
+ self->fd1 = -1;
+ self->fd2 = -1;
+}
+
+FIXTURE_TEARDOWN(liveupdate_device)
+{
+ if (self->fd1 >= 0)
+ close(self->fd1);
+ if (self->fd2 >= 0)
+ close(self->fd2);
+}
+
+/*
+ * Test Case: Basic Open and Close
+ *
+ * Verifies that the /dev/liveupdate device can be opened and subsequently
+ * closed without errors. Skips if the device does not exist.
+ */
+TEST_F(liveupdate_device, basic_open_close)
+{
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist.", LIVEUPDATE_DEV);
+
+ ASSERT_GE(self->fd1, 0);
+ ASSERT_EQ(close(self->fd1), 0);
+ self->fd1 = -1;
+}
+
+/*
+ * Test Case: Exclusive Open Enforcement
+ *
+ * Verifies that the /dev/liveupdate device can only be opened by one process
+ * at a time. It checks that a second attempt to open the device fails with
+ * the EBUSY error code.
+ */
+TEST_F(liveupdate_device, exclusive_open)
+{
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist.", LIVEUPDATE_DEV);
+
+ ASSERT_GE(self->fd1, 0);
+ self->fd2 = open(LIVEUPDATE_DEV, O_RDWR);
+ EXPECT_LT(self->fd2, 0);
+ EXPECT_EQ(errno, EBUSY);
+}
+
+/* Helper function to create a LUO session via ioctl. */
+static int create_session(int lu_fd, const char *name)
+{
+ struct liveupdate_ioctl_create_session args = {};
+
+ args.size = sizeof(args);
+ strncpy((char *)args.name, name, sizeof(args.name) - 1);
+
+ if (ioctl(lu_fd, LIVEUPDATE_IOCTL_CREATE_SESSION, &args))
+ return -errno;
+
+ return args.fd;
+}
+
+/*
+ * Test Case: Create Duplicate Session
+ *
+ * Verifies that attempting to create two sessions with the same name fails
+ * on the second attempt with EEXIST.
+ */
+TEST_F(liveupdate_device, create_duplicate_session)
+{
+ int session_fd1, session_fd2;
+
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist", LIVEUPDATE_DEV);
+
+ ASSERT_GE(self->fd1, 0);
+
+ session_fd1 = create_session(self->fd1, "duplicate-session-test");
+ ASSERT_GE(session_fd1, 0);
+
+ session_fd2 = create_session(self->fd1, "duplicate-session-test");
+ EXPECT_LT(session_fd2, 0);
+ EXPECT_EQ(-session_fd2, EEXIST);
+
+ ASSERT_EQ(close(session_fd1), 0);
+}
+
+/*
+ * Test Case: Create Distinct Sessions
+ *
+ * Verifies that creating two sessions with different names succeeds.
+ */
+TEST_F(liveupdate_device, create_distinct_sessions)
+{
+ int session_fd1, session_fd2;
+
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist", LIVEUPDATE_DEV);
+
+ ASSERT_GE(self->fd1, 0);
+
+ session_fd1 = create_session(self->fd1, "distinct-session-1");
+ ASSERT_GE(session_fd1, 0);
+
+ session_fd2 = create_session(self->fd1, "distinct-session-2");
+ ASSERT_GE(session_fd2, 0);
+
+ ASSERT_EQ(close(session_fd1), 0);
+ ASSERT_EQ(close(session_fd2), 0);
+}
+
+static int preserve_fd(int session_fd, int fd_to_preserve, __u64 token)
+{
+ struct liveupdate_session_preserve_fd args = {};
+
+ args.size = sizeof(args);
+ args.fd = fd_to_preserve;
+ args.token = token;
+
+ if (ioctl(session_fd, LIVEUPDATE_SESSION_PRESERVE_FD, &args))
+ return -errno;
+
+ return 0;
+}
+
+/*
+ * Test Case: Preserve MemFD
+ *
+ * Verifies that a valid memfd can be successfully preserved in a session and
+ * that its contents remain intact after the preservation call.
+ */
+TEST_F(liveupdate_device, preserve_memfd)
+{
+ const char *test_str = "hello liveupdate";
+ char read_buf[64] = {};
+ int session_fd, mem_fd;
+
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist", LIVEUPDATE_DEV);
+ ASSERT_GE(self->fd1, 0);
+
+ session_fd = create_session(self->fd1, "preserve-memfd-test");
+ ASSERT_GE(session_fd, 0);
+
+ mem_fd = memfd_create("test-memfd", 0);
+ ASSERT_GE(mem_fd, 0);
+
+ ASSERT_EQ(write(mem_fd, test_str, strlen(test_str)), strlen(test_str));
+ ASSERT_EQ(preserve_fd(session_fd, mem_fd, 0x1234), 0);
+ ASSERT_EQ(close(session_fd), 0);
+
+ ASSERT_EQ(lseek(mem_fd, 0, SEEK_SET), 0);
+ ASSERT_EQ(read(mem_fd, read_buf, sizeof(read_buf)), strlen(test_str));
+ ASSERT_STREQ(read_buf, test_str);
+ ASSERT_EQ(close(mem_fd), 0);
+}
+
+/*
+ * Test Case: Preserve Multiple MemFDs
+ *
+ * Verifies that multiple memfds can be preserved in a single session,
+ * each with a unique token, and that their contents remain distinct and
+ * correct after preservation.
+ */
+TEST_F(liveupdate_device, preserve_multiple_memfds)
+{
+ const char *test_str1 = "data for memfd one";
+ const char *test_str2 = "data for memfd two";
+ char read_buf[64] = {};
+ int session_fd, mem_fd1, mem_fd2;
+
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist", LIVEUPDATE_DEV);
+ ASSERT_GE(self->fd1, 0);
+
+ session_fd = create_session(self->fd1, "preserve-multi-memfd-test");
+ ASSERT_GE(session_fd, 0);
+
+ mem_fd1 = memfd_create("test-memfd-1", 0);
+ ASSERT_GE(mem_fd1, 0);
+ mem_fd2 = memfd_create("test-memfd-2", 0);
+ ASSERT_GE(mem_fd2, 0);
+
+ ASSERT_EQ(write(mem_fd1, test_str1, strlen(test_str1)), strlen(test_str1));
+ ASSERT_EQ(write(mem_fd2, test_str2, strlen(test_str2)), strlen(test_str2));
+
+ ASSERT_EQ(preserve_fd(session_fd, mem_fd1, 0xAAAA), 0);
+ ASSERT_EQ(preserve_fd(session_fd, mem_fd2, 0xBBBB), 0);
+
+ memset(read_buf, 0, sizeof(read_buf));
+ ASSERT_EQ(lseek(mem_fd1, 0, SEEK_SET), 0);
+ ASSERT_EQ(read(mem_fd1, read_buf, sizeof(read_buf)), strlen(test_str1));
+ ASSERT_STREQ(read_buf, test_str1);
+
+ memset(read_buf, 0, sizeof(read_buf));
+ ASSERT_EQ(lseek(mem_fd2, 0, SEEK_SET), 0);
+ ASSERT_EQ(read(mem_fd2, read_buf, sizeof(read_buf)), strlen(test_str2));
+ ASSERT_STREQ(read_buf, test_str2);
+
+ ASSERT_EQ(close(mem_fd1), 0);
+ ASSERT_EQ(close(mem_fd2), 0);
+ ASSERT_EQ(close(session_fd), 0);
+}
+
+/*
+ * Test Case: Preserve Complex Scenario
+ *
+ * Verifies a more complex scenario with multiple sessions and a mix of empty
+ * and non-empty memfds distributed across them.
+ */
+TEST_F(liveupdate_device, preserve_complex_scenario)
+{
+ const char *data1 = "data for session 1";
+ const char *data2 = "data for session 2";
+ char read_buf[64] = {};
+ int session_fd1, session_fd2;
+ int mem_fd_data1, mem_fd_empty1, mem_fd_data2, mem_fd_empty2;
+
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist", LIVEUPDATE_DEV);
+ ASSERT_GE(self->fd1, 0);
+
+ session_fd1 = create_session(self->fd1, "complex-session-1");
+ ASSERT_GE(session_fd1, 0);
+ session_fd2 = create_session(self->fd1, "complex-session-2");
+ ASSERT_GE(session_fd2, 0);
+
+ mem_fd_data1 = memfd_create("data1", 0);
+ ASSERT_GE(mem_fd_data1, 0);
+ ASSERT_EQ(write(mem_fd_data1, data1, strlen(data1)), strlen(data1));
+
+ mem_fd_empty1 = memfd_create("empty1", 0);
+ ASSERT_GE(mem_fd_empty1, 0);
+
+ mem_fd_data2 = memfd_create("data2", 0);
+ ASSERT_GE(mem_fd_data2, 0);
+ ASSERT_EQ(write(mem_fd_data2, data2, strlen(data2)), strlen(data2));
+
+ mem_fd_empty2 = memfd_create("empty2", 0);
+ ASSERT_GE(mem_fd_empty2, 0);
+
+ ASSERT_EQ(preserve_fd(session_fd1, mem_fd_data1, 0x1111), 0);
+ ASSERT_EQ(preserve_fd(session_fd1, mem_fd_empty1, 0x2222), 0);
+ ASSERT_EQ(preserve_fd(session_fd2, mem_fd_data2, 0x3333), 0);
+ ASSERT_EQ(preserve_fd(session_fd2, mem_fd_empty2, 0x4444), 0);
+
+ ASSERT_EQ(lseek(mem_fd_data1, 0, SEEK_SET), 0);
+ ASSERT_EQ(read(mem_fd_data1, read_buf, sizeof(read_buf)), strlen(data1));
+ ASSERT_STREQ(read_buf, data1);
+
+ memset(read_buf, 0, sizeof(read_buf));
+ ASSERT_EQ(lseek(mem_fd_data2, 0, SEEK_SET), 0);
+ ASSERT_EQ(read(mem_fd_data2, read_buf, sizeof(read_buf)), strlen(data2));
+ ASSERT_STREQ(read_buf, data2);
+
+ ASSERT_EQ(lseek(mem_fd_empty1, 0, SEEK_SET), 0);
+ ASSERT_EQ(read(mem_fd_empty1, read_buf, sizeof(read_buf)), 0);
+
+ ASSERT_EQ(lseek(mem_fd_empty2, 0, SEEK_SET), 0);
+ ASSERT_EQ(read(mem_fd_empty2, read_buf, sizeof(read_buf)), 0);
+
+ ASSERT_EQ(close(mem_fd_data1), 0);
+ ASSERT_EQ(close(mem_fd_empty1), 0);
+ ASSERT_EQ(close(mem_fd_data2), 0);
+ ASSERT_EQ(close(mem_fd_empty2), 0);
+ ASSERT_EQ(close(session_fd1), 0);
+ ASSERT_EQ(close(session_fd2), 0);
+}
+
+/*
+ * Test Case: Preserve Unsupported File Descriptor
+ *
+ * Verifies that attempting to preserve a file descriptor that does not have
+ * a registered Live Update handler fails gracefully.
+ * Uses /dev/null as a representative of a file type (character device)
+ * that is not supported by the orchestrator.
+ */
+TEST_F(liveupdate_device, preserve_unsupported_fd)
+{
+ int session_fd, unsupported_fd;
+ int ret;
+
+ self->fd1 = open(LIVEUPDATE_DEV, O_RDWR);
+ if (self->fd1 < 0 && errno == ENOENT)
+ SKIP(return, "%s does not exist", LIVEUPDATE_DEV);
+ ASSERT_GE(self->fd1, 0);
+
+ session_fd = create_session(self->fd1, "unsupported-fd-test");
+ ASSERT_GE(session_fd, 0);
+
+ unsupported_fd = open("/dev/null", O_RDWR);
+ ASSERT_GE(unsupported_fd, 0);
+
+ ret = preserve_fd(session_fd, unsupported_fd, 0xDEAD);
+ EXPECT_EQ(ret, -ENOENT);
+
+ ASSERT_EQ(close(unsupported_fd), 0);
+ ASSERT_EQ(close(session_fd), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/liveupdate/luo_kexec_simple.c b/tools/testing/selftests/liveupdate/luo_kexec_simple.c
new file mode 100644
index 000000000000..d7ac1f3dc4cb
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/luo_kexec_simple.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ *
+ * A simple selftest to validate the end-to-end lifecycle of a LUO session
+ * across a single kexec reboot.
+ */
+
+#include "luo_test_utils.h"
+
+#define TEST_SESSION_NAME "test-session"
+#define TEST_MEMFD_TOKEN 0x1A
+#define TEST_MEMFD_DATA "hello kexec world"
+
+/* Constants for the state-tracking mechanism, specific to this test file. */
+#define STATE_SESSION_NAME "kexec_simple_state"
+#define STATE_MEMFD_TOKEN 999
+
+/* Stage 1: Executed before the kexec reboot. */
+static void run_stage_1(int luo_fd)
+{
+ int session_fd;
+
+ ksft_print_msg("[STAGE 1] Starting pre-kexec setup...\n");
+
+ ksft_print_msg("[STAGE 1] Creating state file for next stage (2)...\n");
+ create_state_file(luo_fd, STATE_SESSION_NAME, STATE_MEMFD_TOKEN, 2);
+
+ ksft_print_msg("[STAGE 1] Creating session '%s' and preserving memfd...\n",
+ TEST_SESSION_NAME);
+ session_fd = luo_create_session(luo_fd, TEST_SESSION_NAME);
+ if (session_fd < 0)
+ fail_exit("luo_create_session for '%s'", TEST_SESSION_NAME);
+
+ if (create_and_preserve_memfd(session_fd, TEST_MEMFD_TOKEN,
+ TEST_MEMFD_DATA) < 0) {
+ fail_exit("create_and_preserve_memfd for token %#x",
+ TEST_MEMFD_TOKEN);
+ }
+
+ close(luo_fd);
+ daemonize_and_wait();
+}
+
+/* Stage 2: Executed after the kexec reboot. */
+static void run_stage_2(int luo_fd, int state_session_fd)
+{
+ int session_fd, mfd, stage;
+
+ ksft_print_msg("[STAGE 2] Starting post-kexec verification...\n");
+
+ restore_and_read_stage(state_session_fd, STATE_MEMFD_TOKEN, &stage);
+ if (stage != 2)
+ fail_exit("Expected stage 2, but state file contains %d", stage);
+
+ ksft_print_msg("[STAGE 2] Retrieving session '%s'...\n", TEST_SESSION_NAME);
+ session_fd = luo_retrieve_session(luo_fd, TEST_SESSION_NAME);
+ if (session_fd < 0)
+ fail_exit("luo_retrieve_session for '%s'", TEST_SESSION_NAME);
+
+ ksft_print_msg("[STAGE 2] Restoring and verifying memfd (token %#x)...\n",
+ TEST_MEMFD_TOKEN);
+ mfd = restore_and_verify_memfd(session_fd, TEST_MEMFD_TOKEN,
+ TEST_MEMFD_DATA);
+ if (mfd < 0)
+ fail_exit("restore_and_verify_memfd for token %#x", TEST_MEMFD_TOKEN);
+ close(mfd);
+
+ ksft_print_msg("[STAGE 2] Test data verified successfully.\n");
+ ksft_print_msg("[STAGE 2] Finalizing test session...\n");
+ if (luo_session_finish(session_fd) < 0)
+ fail_exit("luo_session_finish for test session");
+ close(session_fd);
+
+ ksft_print_msg("[STAGE 2] Finalizing state session...\n");
+ if (luo_session_finish(state_session_fd) < 0)
+ fail_exit("luo_session_finish for state session");
+ close(state_session_fd);
+
+ ksft_print_msg("\n--- SIMPLE KEXEC TEST PASSED ---\n");
+}
+
+int main(int argc, char *argv[])
+{
+ return luo_test(argc, argv, STATE_SESSION_NAME,
+ run_stage_1, run_stage_2);
+}
diff --git a/tools/testing/selftests/liveupdate/luo_multi_session.c b/tools/testing/selftests/liveupdate/luo_multi_session.c
new file mode 100644
index 000000000000..0ee2d795beef
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/luo_multi_session.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ *
+ * A selftest to validate the end-to-end lifecycle of multiple LUO sessions
+ * across a kexec reboot, including empty sessions and sessions with multiple
+ * files.
+ */
+
+#include "luo_test_utils.h"
+
+#define SESSION_EMPTY_1 "multi-test-empty-1"
+#define SESSION_EMPTY_2 "multi-test-empty-2"
+#define SESSION_FILES_1 "multi-test-files-1"
+#define SESSION_FILES_2 "multi-test-files-2"
+
+#define MFD1_TOKEN 0x1001
+#define MFD2_TOKEN 0x2002
+#define MFD3_TOKEN 0x3003
+
+#define MFD1_DATA "Data for session files 1"
+#define MFD2_DATA "First file for session files 2"
+#define MFD3_DATA "Second file for session files 2"
+
+#define STATE_SESSION_NAME "kexec_multi_state"
+#define STATE_MEMFD_TOKEN 998
+
+/* Stage 1: Executed before the kexec reboot. */
+static void run_stage_1(int luo_fd)
+{
+ int s_empty1_fd, s_empty2_fd, s_files1_fd, s_files2_fd;
+
+ ksft_print_msg("[STAGE 1] Starting pre-kexec setup for multi-session test...\n");
+
+ ksft_print_msg("[STAGE 1] Creating state file for next stage (2)...\n");
+ create_state_file(luo_fd, STATE_SESSION_NAME, STATE_MEMFD_TOKEN, 2);
+
+ ksft_print_msg("[STAGE 1] Creating empty sessions '%s' and '%s'...\n",
+ SESSION_EMPTY_1, SESSION_EMPTY_2);
+ s_empty1_fd = luo_create_session(luo_fd, SESSION_EMPTY_1);
+ if (s_empty1_fd < 0)
+ fail_exit("luo_create_session for '%s'", SESSION_EMPTY_1);
+
+ s_empty2_fd = luo_create_session(luo_fd, SESSION_EMPTY_2);
+ if (s_empty2_fd < 0)
+ fail_exit("luo_create_session for '%s'", SESSION_EMPTY_2);
+
+ ksft_print_msg("[STAGE 1] Creating session '%s' with one memfd...\n",
+ SESSION_FILES_1);
+
+ s_files1_fd = luo_create_session(luo_fd, SESSION_FILES_1);
+ if (s_files1_fd < 0)
+ fail_exit("luo_create_session for '%s'", SESSION_FILES_1);
+ if (create_and_preserve_memfd(s_files1_fd, MFD1_TOKEN, MFD1_DATA) < 0) {
+ fail_exit("create_and_preserve_memfd for token %#x",
+ MFD1_TOKEN);
+ }
+
+ ksft_print_msg("[STAGE 1] Creating session '%s' with two memfds...\n",
+ SESSION_FILES_2);
+
+ s_files2_fd = luo_create_session(luo_fd, SESSION_FILES_2);
+ if (s_files2_fd < 0)
+ fail_exit("luo_create_session for '%s'", SESSION_FILES_2);
+ if (create_and_preserve_memfd(s_files2_fd, MFD2_TOKEN, MFD2_DATA) < 0) {
+ fail_exit("create_and_preserve_memfd for token %#x",
+ MFD2_TOKEN);
+ }
+ if (create_and_preserve_memfd(s_files2_fd, MFD3_TOKEN, MFD3_DATA) < 0) {
+ fail_exit("create_and_preserve_memfd for token %#x",
+ MFD3_TOKEN);
+ }
+
+ close(luo_fd);
+ daemonize_and_wait();
+}
+
+/* Stage 2: Executed after the kexec reboot. */
+static void run_stage_2(int luo_fd, int state_session_fd)
+{
+ int s_empty1_fd, s_empty2_fd, s_files1_fd, s_files2_fd;
+ int mfd1, mfd2, mfd3, stage;
+
+ ksft_print_msg("[STAGE 2] Starting post-kexec verification...\n");
+
+ restore_and_read_stage(state_session_fd, STATE_MEMFD_TOKEN, &stage);
+ if (stage != 2) {
+ fail_exit("Expected stage 2, but state file contains %d",
+ stage);
+ }
+
+ ksft_print_msg("[STAGE 2] Retrieving all sessions...\n");
+ s_empty1_fd = luo_retrieve_session(luo_fd, SESSION_EMPTY_1);
+ if (s_empty1_fd < 0)
+ fail_exit("luo_retrieve_session for '%s'", SESSION_EMPTY_1);
+
+ s_empty2_fd = luo_retrieve_session(luo_fd, SESSION_EMPTY_2);
+ if (s_empty2_fd < 0)
+ fail_exit("luo_retrieve_session for '%s'", SESSION_EMPTY_2);
+
+ s_files1_fd = luo_retrieve_session(luo_fd, SESSION_FILES_1);
+ if (s_files1_fd < 0)
+ fail_exit("luo_retrieve_session for '%s'", SESSION_FILES_1);
+
+ s_files2_fd = luo_retrieve_session(luo_fd, SESSION_FILES_2);
+ if (s_files2_fd < 0)
+ fail_exit("luo_retrieve_session for '%s'", SESSION_FILES_2);
+
+ ksft_print_msg("[STAGE 2] Verifying contents of session '%s'...\n",
+ SESSION_FILES_1);
+ mfd1 = restore_and_verify_memfd(s_files1_fd, MFD1_TOKEN, MFD1_DATA);
+ if (mfd1 < 0)
+ fail_exit("restore_and_verify_memfd for token %#x", MFD1_TOKEN);
+ close(mfd1);
+
+ ksft_print_msg("[STAGE 2] Verifying contents of session '%s'...\n",
+ SESSION_FILES_2);
+
+ mfd2 = restore_and_verify_memfd(s_files2_fd, MFD2_TOKEN, MFD2_DATA);
+ if (mfd2 < 0)
+ fail_exit("restore_and_verify_memfd for token %#x", MFD2_TOKEN);
+ close(mfd2);
+
+ mfd3 = restore_and_verify_memfd(s_files2_fd, MFD3_TOKEN, MFD3_DATA);
+ if (mfd3 < 0)
+ fail_exit("restore_and_verify_memfd for token %#x", MFD3_TOKEN);
+ close(mfd3);
+
+ ksft_print_msg("[STAGE 2] Test data verified successfully.\n");
+
+ ksft_print_msg("[STAGE 2] Finalizing all test sessions...\n");
+ if (luo_session_finish(s_empty1_fd) < 0)
+ fail_exit("luo_session_finish for '%s'", SESSION_EMPTY_1);
+ close(s_empty1_fd);
+
+ if (luo_session_finish(s_empty2_fd) < 0)
+ fail_exit("luo_session_finish for '%s'", SESSION_EMPTY_2);
+ close(s_empty2_fd);
+
+ if (luo_session_finish(s_files1_fd) < 0)
+ fail_exit("luo_session_finish for '%s'", SESSION_FILES_1);
+ close(s_files1_fd);
+
+ if (luo_session_finish(s_files2_fd) < 0)
+ fail_exit("luo_session_finish for '%s'", SESSION_FILES_2);
+ close(s_files2_fd);
+
+ ksft_print_msg("[STAGE 2] Finalizing state session...\n");
+ if (luo_session_finish(state_session_fd) < 0)
+ fail_exit("luo_session_finish for state session");
+ close(state_session_fd);
+
+ ksft_print_msg("\n--- MULTI-SESSION KEXEC TEST PASSED ---\n");
+}
+
+int main(int argc, char *argv[])
+{
+ return luo_test(argc, argv, STATE_SESSION_NAME,
+ run_stage_1, run_stage_2);
+}
diff --git a/tools/testing/selftests/liveupdate/luo_test_utils.c b/tools/testing/selftests/liveupdate/luo_test_utils.c
new file mode 100644
index 000000000000..3c8721c505df
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/luo_test_utils.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <stdarg.h>
+
+#include "luo_test_utils.h"
+
+int luo_open_device(void)
+{
+ return open(LUO_DEVICE, O_RDWR);
+}
+
+int luo_create_session(int luo_fd, const char *name)
+{
+ struct liveupdate_ioctl_create_session arg = { .size = sizeof(arg) };
+
+ snprintf((char *)arg.name, LIVEUPDATE_SESSION_NAME_LENGTH, "%.*s",
+ LIVEUPDATE_SESSION_NAME_LENGTH - 1, name);
+
+ if (ioctl(luo_fd, LIVEUPDATE_IOCTL_CREATE_SESSION, &arg) < 0)
+ return -errno;
+
+ return arg.fd;
+}
+
+int luo_retrieve_session(int luo_fd, const char *name)
+{
+ struct liveupdate_ioctl_retrieve_session arg = { .size = sizeof(arg) };
+
+ snprintf((char *)arg.name, LIVEUPDATE_SESSION_NAME_LENGTH, "%.*s",
+ LIVEUPDATE_SESSION_NAME_LENGTH - 1, name);
+
+ if (ioctl(luo_fd, LIVEUPDATE_IOCTL_RETRIEVE_SESSION, &arg) < 0)
+ return -errno;
+
+ return arg.fd;
+}
+
+int create_and_preserve_memfd(int session_fd, int token, const char *data)
+{
+ struct liveupdate_session_preserve_fd arg = { .size = sizeof(arg) };
+ long page_size = sysconf(_SC_PAGE_SIZE);
+ void *map = MAP_FAILED;
+ int mfd = -1, ret = -1;
+
+ mfd = memfd_create("test_mfd", 0);
+ if (mfd < 0)
+ return -errno;
+
+ if (ftruncate(mfd, page_size) != 0)
+ goto out;
+
+ map = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, mfd, 0);
+ if (map == MAP_FAILED)
+ goto out;
+
+ snprintf(map, page_size, "%s", data);
+ munmap(map, page_size);
+
+ arg.fd = mfd;
+ arg.token = token;
+ if (ioctl(session_fd, LIVEUPDATE_SESSION_PRESERVE_FD, &arg) < 0)
+ goto out;
+
+ ret = 0;
+out:
+ if (ret != 0 && errno != 0)
+ ret = -errno;
+ if (mfd >= 0)
+ close(mfd);
+ return ret;
+}
+
+int restore_and_verify_memfd(int session_fd, int token,
+ const char *expected_data)
+{
+ struct liveupdate_session_retrieve_fd arg = { .size = sizeof(arg) };
+ long page_size = sysconf(_SC_PAGE_SIZE);
+ void *map = MAP_FAILED;
+ int mfd = -1, ret = -1;
+
+ arg.token = token;
+ if (ioctl(session_fd, LIVEUPDATE_SESSION_RETRIEVE_FD, &arg) < 0)
+ return -errno;
+ mfd = arg.fd;
+
+ map = mmap(NULL, page_size, PROT_READ, MAP_SHARED, mfd, 0);
+ if (map == MAP_FAILED)
+ goto out;
+
+ if (expected_data && strcmp(expected_data, map) != 0) {
+ ksft_print_msg("Data mismatch! Expected '%s', Got '%s'\n",
+ expected_data, (char *)map);
+ ret = -EINVAL;
+ goto out_munmap;
+ }
+
+ ret = mfd;
+out_munmap:
+ munmap(map, page_size);
+out:
+ if (ret < 0 && errno != 0)
+ ret = -errno;
+ if (ret < 0 && mfd >= 0)
+ close(mfd);
+ return ret;
+}
+
+int luo_session_finish(int session_fd)
+{
+ struct liveupdate_session_finish arg = { .size = sizeof(arg) };
+
+ if (ioctl(session_fd, LIVEUPDATE_SESSION_FINISH, &arg) < 0)
+ return -errno;
+
+ return 0;
+}
+
+void create_state_file(int luo_fd, const char *session_name, int token,
+ int next_stage)
+{
+ char buf[32];
+ int state_session_fd;
+
+ state_session_fd = luo_create_session(luo_fd, session_name);
+ if (state_session_fd < 0)
+ fail_exit("luo_create_session for state tracking");
+
+ snprintf(buf, sizeof(buf), "%d", next_stage);
+ if (create_and_preserve_memfd(state_session_fd, token, buf) < 0)
+ fail_exit("create_and_preserve_memfd for state tracking");
+
+ /*
+ * DO NOT close session FD, otherwise it is going to be unpreserved
+ */
+}
+
+void restore_and_read_stage(int state_session_fd, int token, int *stage)
+{
+ char buf[32] = {0};
+ int mfd;
+
+ mfd = restore_and_verify_memfd(state_session_fd, token, NULL);
+ if (mfd < 0)
+ fail_exit("failed to restore state memfd");
+
+ if (read(mfd, buf, sizeof(buf) - 1) < 0)
+ fail_exit("failed to read state mfd");
+
+ *stage = atoi(buf);
+
+ close(mfd);
+}
+
+void daemonize_and_wait(void)
+{
+ pid_t pid;
+
+ ksft_print_msg("[STAGE 1] Forking persistent child to hold sessions...\n");
+
+ pid = fork();
+ if (pid < 0)
+ fail_exit("fork failed");
+
+ if (pid > 0) {
+ ksft_print_msg("[STAGE 1] Child PID: %d. Resources are pinned.\n", pid);
+ ksft_print_msg("[STAGE 1] You may now perform kexec reboot.\n");
+ exit(EXIT_SUCCESS);
+ }
+
+ /* Detach from terminal so closing the window doesn't kill us */
+ if (setsid() < 0)
+ fail_exit("setsid failed");
+
+ close(STDIN_FILENO);
+ close(STDOUT_FILENO);
+ close(STDERR_FILENO);
+
+ /* Change dir to root to avoid locking filesystems */
+ if (chdir("/") < 0)
+ exit(EXIT_FAILURE);
+
+ while (1)
+ sleep(60);
+}
+
+static int parse_stage_args(int argc, char *argv[])
+{
+ static struct option long_options[] = {
+ {"stage", required_argument, 0, 's'},
+ {0, 0, 0, 0}
+ };
+ int option_index = 0;
+ int stage = 1;
+ int opt;
+
+ optind = 1;
+ while ((opt = getopt_long(argc, argv, "s:", long_options, &option_index)) != -1) {
+ switch (opt) {
+ case 's':
+ stage = atoi(optarg);
+ if (stage != 1 && stage != 2)
+ fail_exit("Invalid stage argument");
+ break;
+ default:
+ fail_exit("Unknown argument");
+ }
+ }
+ return stage;
+}
+
+int luo_test(int argc, char *argv[],
+ const char *state_session_name,
+ luo_test_stage1_fn stage1,
+ luo_test_stage2_fn stage2)
+{
+ int target_stage = parse_stage_args(argc, argv);
+ int luo_fd = luo_open_device();
+ int state_session_fd;
+ int detected_stage;
+
+ if (luo_fd < 0) {
+ ksft_exit_skip("Failed to open %s. Is the luo module loaded?\n",
+ LUO_DEVICE);
+ }
+
+ state_session_fd = luo_retrieve_session(luo_fd, state_session_name);
+ if (state_session_fd == -ENOENT)
+ detected_stage = 1;
+ else if (state_session_fd >= 0)
+ detected_stage = 2;
+ else
+ fail_exit("Failed to check for state session");
+
+ if (target_stage != detected_stage) {
+ ksft_exit_fail_msg("Stage mismatch Requested --stage %d, but system is in stage %d.\n"
+ "(State session %s: %s)\n",
+ target_stage, detected_stage, state_session_name,
+ (detected_stage == 2) ? "EXISTS" : "MISSING");
+ }
+
+ if (target_stage == 1)
+ stage1(luo_fd);
+ else
+ stage2(luo_fd, state_session_fd);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/liveupdate/luo_test_utils.h b/tools/testing/selftests/liveupdate/luo_test_utils.h
new file mode 100644
index 000000000000..90099bf49577
--- /dev/null
+++ b/tools/testing/selftests/liveupdate/luo_test_utils.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ *
+ * Utility functions for LUO kselftests.
+ */
+
+#ifndef LUO_TEST_UTILS_H
+#define LUO_TEST_UTILS_H
+
+#include <errno.h>
+#include <string.h>
+#include <linux/liveupdate.h>
+#include "../kselftest.h"
+
+#define LUO_DEVICE "/dev/liveupdate"
+
+#define fail_exit(fmt, ...) \
+ ksft_exit_fail_msg("[%s:%d] " fmt " (errno: %s)\n", \
+ __func__, __LINE__, ##__VA_ARGS__, strerror(errno))
+
+int luo_open_device(void);
+int luo_create_session(int luo_fd, const char *name);
+int luo_retrieve_session(int luo_fd, const char *name);
+int luo_session_finish(int session_fd);
+
+int create_and_preserve_memfd(int session_fd, int token, const char *data);
+int restore_and_verify_memfd(int session_fd, int token, const char *expected_data);
+
+void create_state_file(int luo_fd, const char *session_name, int token,
+ int next_stage);
+void restore_and_read_stage(int state_session_fd, int token, int *stage);
+
+void daemonize_and_wait(void);
+
+typedef void (*luo_test_stage1_fn)(int luo_fd);
+typedef void (*luo_test_stage2_fn)(int luo_fd, int state_session_fd);
+
+int luo_test(int argc, char *argv[], const char *state_session_name,
+ luo_test_stage1_fn stage1, luo_test_stage2_fn stage2);
+
+#endif /* LUO_TEST_UTILS_H */
diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
index 7afe05e8c4d7..bd09fdaf53e0 100644
--- a/tools/testing/selftests/lkdtm/config
+++ b/tools/testing/selftests/lkdtm/config
@@ -2,7 +2,7 @@ CONFIG_LKDTM=y
CONFIG_DEBUG_LIST=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_FORTIFY_SOURCE=y
-CONFIG_GCC_PLUGIN_STACKLEAK=y
+CONFIG_KSTACK_ERASE=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
CONFIG_INIT_ON_FREE_DEFAULT_ON=y
diff --git a/tools/testing/selftests/lsm/lsm_get_self_attr_test.c b/tools/testing/selftests/lsm/lsm_get_self_attr_test.c
index df215e4aa63f..60caf8528f81 100644
--- a/tools/testing/selftests/lsm/lsm_get_self_attr_test.c
+++ b/tools/testing/selftests/lsm/lsm_get_self_attr_test.c
@@ -13,7 +13,7 @@
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "common.h"
static struct lsm_ctx *next_ctx(struct lsm_ctx *ctxp)
diff --git a/tools/testing/selftests/lsm/lsm_list_modules_test.c b/tools/testing/selftests/lsm/lsm_list_modules_test.c
index 1cc8a977c711..54d59044ace1 100644
--- a/tools/testing/selftests/lsm/lsm_list_modules_test.c
+++ b/tools/testing/selftests/lsm/lsm_list_modules_test.c
@@ -12,7 +12,7 @@
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "common.h"
TEST(size_null_lsm_list_modules)
diff --git a/tools/testing/selftests/lsm/lsm_set_self_attr_test.c b/tools/testing/selftests/lsm/lsm_set_self_attr_test.c
index 732e89fe99c0..dcb6f8aa772e 100644
--- a/tools/testing/selftests/lsm/lsm_set_self_attr_test.c
+++ b/tools/testing/selftests/lsm/lsm_set_self_attr_test.c
@@ -12,7 +12,7 @@
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "common.h"
TEST(ctx_null_lsm_set_self_attr)
diff --git a/tools/testing/selftests/media_tests/media_device_open.c b/tools/testing/selftests/media_tests/media_device_open.c
index 93183a37b133..4396bf2273a4 100644
--- a/tools/testing/selftests/media_tests/media_device_open.c
+++ b/tools/testing/selftests/media_tests/media_device_open.c
@@ -34,7 +34,7 @@
#include <sys/stat.h>
#include <linux/media.h>
-#include "../kselftest.h"
+#include "kselftest.h"
int main(int argc, char **argv)
{
diff --git a/tools/testing/selftests/media_tests/media_device_test.c b/tools/testing/selftests/media_tests/media_device_test.c
index 4b9953359e40..6e4a8090a0eb 100644
--- a/tools/testing/selftests/media_tests/media_device_test.c
+++ b/tools/testing/selftests/media_tests/media_device_test.c
@@ -39,7 +39,7 @@
#include <time.h>
#include <linux/media.h>
-#include "../kselftest.h"
+#include "kselftest.h"
int main(int argc, char **argv)
{
diff --git a/tools/testing/selftests/membarrier/membarrier_test_impl.h b/tools/testing/selftests/membarrier/membarrier_test_impl.h
index af89855adb7b..f6d7c44b2288 100644
--- a/tools/testing/selftests/membarrier/membarrier_test_impl.h
+++ b/tools/testing/selftests/membarrier/membarrier_test_impl.h
@@ -7,7 +7,7 @@
#include <string.h>
#include <pthread.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static int registrations;
diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c
index 17ed3e9917ca..cdd022c1c497 100644
--- a/tools/testing/selftests/mincore/mincore_selftest.c
+++ b/tools/testing/selftests/mincore/mincore_selftest.c
@@ -15,8 +15,8 @@
#include <string.h>
#include <fcntl.h>
-#include "../kselftest.h"
-#include "../kselftest_harness.h"
+#include "kselftest.h"
+#include "kselftest_harness.h"
/* Default test file size: 4MB */
#define MB (1UL << 20)
diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore
index 824266982aa3..c2a8586e51a1 100644
--- a/tools/testing/selftests/mm/.gitignore
+++ b/tools/testing/selftests/mm/.gitignore
@@ -21,6 +21,7 @@ on-fault-limit
transhuge-stress
pagemap_ioctl
pfnmap
+process_madv
*.tmp*
protection_keys
protection_keys_32
@@ -38,9 +39,6 @@ map_fixed_noreplace
write_to_hugetlbfs
hmm-tests
memfd_secret
-hugetlb_dio
-pkey_sighandler_tests_32
-pkey_sighandler_tests_64
soft-dirty
split_huge_page_test
ksm_tests
@@ -60,3 +58,5 @@ pkey_sighandler_tests_32
pkey_sighandler_tests_64
guard-regions
merge
+prctl_thp_disable
+rmap
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index ae6f994d3add..eaf9312097f7 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -34,6 +34,7 @@ endif
MAKEFLAGS += --no-builtin-rules
CFLAGS = -Wall -O2 -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES)
+CFLAGS += -Wunreachable-code
LDLIBS = -lrt -lpthread -lm
# Some distributions (such as Ubuntu) configure GCC so that _FORTIFY_SOURCE is
@@ -85,6 +86,8 @@ TEST_GEN_FILES += mseal_test
TEST_GEN_FILES += on-fault-limit
TEST_GEN_FILES += pagemap_ioctl
TEST_GEN_FILES += pfnmap
+TEST_GEN_FILES += process_madv
+TEST_GEN_FILES += prctl_thp_disable
TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += uffd-stress
@@ -100,6 +103,7 @@ TEST_GEN_FILES += hugetlb_dio
TEST_GEN_FILES += droppable
TEST_GEN_FILES += guard-regions
TEST_GEN_FILES += merge
+TEST_GEN_FILES += rmap
ifneq ($(ARCH),arm64)
TEST_GEN_FILES += soft-dirty
@@ -227,6 +231,8 @@ $(OUTPUT)/ksm_tests: LDLIBS += -lnuma
$(OUTPUT)/migration: LDLIBS += -lnuma
+$(OUTPUT)/rmap: LDLIBS += -lnuma
+
local_config.mk local_config.h: check_config.sh
/bin/sh ./check_config.sh $(CC)
diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
index 9bc4591c7b16..30209c40b697 100644
--- a/tools/testing/selftests/mm/compaction_test.c
+++ b/tools/testing/selftests/mm/compaction_test.c
@@ -16,7 +16,7 @@
#include <unistd.h>
#include <string.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define MAP_SIZE_MB 100
#define MAP_SIZE (MAP_SIZE_MB * 1024 * 1024)
diff --git a/tools/testing/selftests/mm/config b/tools/testing/selftests/mm/config
index a28baa536332..deba93379c80 100644
--- a/tools/testing/selftests/mm/config
+++ b/tools/testing/selftests/mm/config
@@ -8,3 +8,6 @@ CONFIG_GUP_TEST=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ANON_VMA_NAME=y
+CONFIG_FTRACE=y
+CONFIG_PROFILING=y
+CONFIG_UPROBES=y
diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
index dbbcc5eb3dce..accfd198dbda 100644
--- a/tools/testing/selftests/mm/cow.c
+++ b/tools/testing/selftests/mm/cow.c
@@ -27,7 +27,7 @@
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
#include "../../../../mm/gup_test.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
#include "thp_settings.h"
@@ -41,11 +41,6 @@ static size_t hugetlbsizes[10];
static int gup_fd;
static bool has_huge_zeropage;
-static int sz2ord(size_t size)
-{
- return __builtin_ctzll(size / pagesize);
-}
-
static int detect_thp_sizes(size_t sizes[], int max)
{
int count = 0;
@@ -57,7 +52,7 @@ static int detect_thp_sizes(size_t sizes[], int max)
if (!pmdsize)
return 0;
- orders = 1UL << sz2ord(pmdsize);
+ orders = 1UL << sz2ord(pmdsize, pagesize);
orders |= thp_supported_orders();
for (i = 0; orders && count < max; i++) {
@@ -72,31 +67,6 @@ static int detect_thp_sizes(size_t sizes[], int max)
return count;
}
-static void detect_huge_zeropage(void)
-{
- int fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page",
- O_RDONLY);
- size_t enabled = 0;
- char buf[15];
- int ret;
-
- if (fd < 0)
- return;
-
- ret = pread(fd, buf, sizeof(buf), 0);
- if (ret > 0 && ret < sizeof(buf)) {
- buf[ret] = 0;
-
- enabled = strtoul(buf, NULL, 10);
- if (enabled == 1) {
- has_huge_zeropage = true;
- ksft_print_msg("[INFO] huge zeropage is enabled\n");
- }
- }
-
- close(fd);
-}
-
static bool range_is_swapped(void *addr, size_t size)
{
for (; size; addr += pagesize, size -= pagesize)
@@ -113,11 +83,11 @@ struct comm_pipes {
static int setup_comm_pipes(struct comm_pipes *comm_pipes)
{
if (pipe(comm_pipes->child_ready) < 0) {
- ksft_perror("pipe()");
+ ksft_perror("pipe() failed");
return -errno;
}
if (pipe(comm_pipes->parent_ready) < 0) {
- ksft_perror("pipe()");
+ ksft_perror("pipe() failed");
close(comm_pipes->child_ready[0]);
close(comm_pipes->child_ready[1]);
return -errno;
@@ -268,8 +238,10 @@ static void do_test_cow_in_parent(char *mem, size_t size, bool do_mprotect,
* fail because (a) harder to fix and (b) nobody really cares.
* Flag them as expected failure for now.
*/
+ ksft_print_msg("Leak from parent into child\n");
log_test_result(KSFT_XFAIL);
} else {
+ ksft_print_msg("Leak from parent into child\n");
log_test_result(KSFT_FAIL);
}
close_comm_pipes:
@@ -332,7 +304,7 @@ static void do_test_vmsplice_in_parent(char *mem, size_t size,
if (before_fork) {
transferred = vmsplice(fds[1], &iov, 1, 0);
if (transferred <= 0) {
- ksft_print_msg("vmsplice() failed\n");
+ ksft_perror("vmsplice() failed\n");
log_test_result(KSFT_FAIL);
goto close_pipe;
}
@@ -397,8 +369,10 @@ static void do_test_vmsplice_in_parent(char *mem, size_t size,
* fail because (a) harder to fix and (b) nobody really cares.
* Flag them as expected failure for now.
*/
+ ksft_print_msg("Leak from child into parent\n");
log_test_result(KSFT_XFAIL);
} else {
+ ksft_print_msg("Leak from child into parent\n");
log_test_result(KSFT_FAIL);
}
close_pipe:
@@ -562,7 +536,7 @@ static void do_test_iouring(char *mem, size_t size, bool use_fork)
while (total < size) {
cur = pread(fd, tmp + total, size - total, total);
if (cur < 0) {
- ksft_print_msg("pread() failed\n");
+ ksft_perror("pread() failed\n");
log_test_result(KSFT_FAIL);
goto quit_child;
}
@@ -570,10 +544,12 @@ static void do_test_iouring(char *mem, size_t size, bool use_fork)
}
/* Finally, check if we read what we expected. */
- if (!memcmp(mem, tmp, size))
+ if (!memcmp(mem, tmp, size)) {
log_test_result(KSFT_PASS);
- else
+ } else {
+ ksft_print_msg("Longtom R/W pin is not reliable\n");
log_test_result(KSFT_FAIL);
+ }
quit_child:
if (use_fork) {
@@ -628,7 +604,7 @@ static void do_test_ro_pin(char *mem, size_t size, enum ro_pin_test test,
tmp = malloc(size);
if (!tmp) {
- ksft_print_msg("malloc() failed\n");
+ ksft_perror("malloc() failed\n");
log_test_result(KSFT_FAIL);
return;
}
@@ -725,10 +701,12 @@ static void do_test_ro_pin(char *mem, size_t size, enum ro_pin_test test,
ksft_perror("PIN_LONGTERM_TEST_READ failed");
log_test_result(KSFT_FAIL);
} else {
- if (!memcmp(mem, tmp, size))
+ if (!memcmp(mem, tmp, size)) {
log_test_result(KSFT_PASS);
- else
+ } else {
+ ksft_print_msg("Longterm R/O pin is not reliable\n");
log_test_result(KSFT_FAIL);
+ }
}
ret = ioctl(gup_fd, PIN_LONGTERM_TEST_STOP);
@@ -1233,8 +1211,8 @@ static void run_anon_test_case(struct test_case const *test_case)
size_t size = thpsizes[i];
struct thp_settings settings = *thp_current_settings();
- settings.hugepages[sz2ord(pmdsize)].enabled = THP_NEVER;
- settings.hugepages[sz2ord(size)].enabled = THP_ALWAYS;
+ settings.hugepages[sz2ord(pmdsize, pagesize)].enabled = THP_NEVER;
+ settings.hugepages[sz2ord(size, pagesize)].enabled = THP_ALWAYS;
thp_push_settings(&settings);
if (size == pmdsize) {
@@ -1417,10 +1395,12 @@ static void do_test_anon_thp_collapse(char *mem, size_t size,
else
ret = -EINVAL;
- if (!ret)
+ if (!ret) {
log_test_result(KSFT_PASS);
- else
+ } else {
+ ksft_print_msg("Leak from parent into child\n");
log_test_result(KSFT_FAIL);
+ }
close_comm_pipes:
close_comm_pipes(&comm_pipes);
}
@@ -1528,10 +1508,12 @@ static void test_cow(char *mem, const char *smem, size_t size)
memset(mem, 0xff, size);
/* See if we still read the old values via the other mapping. */
- if (!memcmp(smem, old, size))
+ if (!memcmp(smem, old, size)) {
log_test_result(KSFT_PASS);
- else
+ } else {
+ ksft_print_msg("Other mapping modified\n");
log_test_result(KSFT_FAIL);
+ }
free(old);
}
@@ -1547,7 +1529,7 @@ static void test_ro_fast_pin(char *mem, const char *smem, size_t size)
static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
{
- char *mem, *smem, tmp;
+ char *mem, *smem;
log_test_start("%s ... with shared zeropage", desc);
@@ -1567,8 +1549,8 @@ static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
}
/* Read from the page to populate the shared zeropage. */
- tmp = *mem + *smem;
- asm volatile("" : "+r" (tmp));
+ FORCE_READ(*mem);
+ FORCE_READ(*smem);
fn(mem, smem, pagesize);
munmap:
@@ -1579,7 +1561,7 @@ munmap:
static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
{
- char *mem, *smem, *mmap_mem, *mmap_smem, tmp;
+ char *mem, *smem, *mmap_mem, *mmap_smem;
size_t mmap_size;
int ret;
@@ -1613,13 +1595,13 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
smem = (char *)(((uintptr_t)mmap_smem + pmdsize) & ~(pmdsize - 1));
ret = madvise(mem, pmdsize, MADV_HUGEPAGE);
- if (ret != 0) {
+ if (ret) {
ksft_perror("madvise()");
log_test_result(KSFT_FAIL);
goto munmap;
}
- ret |= madvise(smem, pmdsize, MADV_HUGEPAGE);
- if (ret != 0) {
+ ret = madvise(smem, pmdsize, MADV_HUGEPAGE);
+ if (ret) {
ksft_perror("madvise()");
log_test_result(KSFT_FAIL);
goto munmap;
@@ -1630,8 +1612,8 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
* the first sub-page and test if we get another sub-page populated
* automatically.
*/
- tmp = *mem + *smem;
- asm volatile("" : "+r" (tmp));
+ FORCE_READ(mem);
+ FORCE_READ(smem);
if (!pagemap_is_populated(pagemap_fd, mem + pagesize) ||
!pagemap_is_populated(pagemap_fd, smem + pagesize)) {
ksft_test_result_skip("Did not get THPs populated\n");
@@ -1647,7 +1629,7 @@ munmap:
static void run_with_memfd(non_anon_test_fn fn, const char *desc)
{
- char *mem, *smem, tmp;
+ char *mem, *smem;
int fd;
log_test_start("%s ... with memfd", desc);
@@ -1681,8 +1663,8 @@ static void run_with_memfd(non_anon_test_fn fn, const char *desc)
}
/* Fault the page in. */
- tmp = *mem + *smem;
- asm volatile("" : "+r" (tmp));
+ FORCE_READ(mem);
+ FORCE_READ(smem);
fn(mem, smem, pagesize);
munmap:
@@ -1695,7 +1677,7 @@ close:
static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
{
- char *mem, *smem, tmp;
+ char *mem, *smem;
FILE *file;
int fd;
@@ -1737,8 +1719,8 @@ static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
}
/* Fault the page in. */
- tmp = *mem + *smem;
- asm volatile("" : "+r" (tmp));
+ FORCE_READ(mem);
+ FORCE_READ(smem);
fn(mem, smem, pagesize);
munmap:
@@ -1753,7 +1735,7 @@ static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
size_t hugetlbsize)
{
int flags = MFD_HUGETLB;
- char *mem, *smem, tmp;
+ char *mem, *smem;
int fd;
log_test_start("%s ... with memfd hugetlb (%zu kB)", desc,
@@ -1791,8 +1773,8 @@ static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
}
/* Fault the page in. */
- tmp = *mem + *smem;
- asm volatile("" : "+r" (tmp));
+ FORCE_READ(mem);
+ FORCE_READ(smem);
fn(mem, smem, hugetlbsize);
munmap:
@@ -1881,7 +1863,7 @@ int main(int argc, char **argv)
if (pmdsize) {
/* Only if THP is supported. */
thp_read_settings(&default_settings);
- default_settings.hugepages[sz2ord(pmdsize)].enabled = THP_INHERIT;
+ default_settings.hugepages[sz2ord(pmdsize, pagesize)].enabled = THP_INHERIT;
thp_save_settings();
thp_push_settings(&default_settings);
@@ -1891,7 +1873,7 @@ int main(int argc, char **argv)
}
nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes,
ARRAY_SIZE(hugetlbsizes));
- detect_huge_zeropage();
+ has_huge_zeropage = detect_huge_zeropage();
ksft_set_plan(ARRAY_SIZE(anon_test_cases) * tests_per_anon_test_case() +
ARRAY_SIZE(anon_thp_test_cases) * tests_per_anon_thp_test_case() +
diff --git a/tools/testing/selftests/mm/droppable.c b/tools/testing/selftests/mm/droppable.c
index f3d9ecf96890..44940f75c461 100644
--- a/tools/testing/selftests/mm/droppable.c
+++ b/tools/testing/selftests/mm/droppable.c
@@ -13,7 +13,7 @@
#include <sys/mman.h>
#include <linux/mman.h>
-#include "../kselftest.h"
+#include "kselftest.h"
int main(int argc, char *argv[])
{
diff --git a/tools/testing/selftests/mm/guard-regions.c b/tools/testing/selftests/mm/guard-regions.c
index 93af3d3760f9..dbd21d66d383 100644
--- a/tools/testing/selftests/mm/guard-regions.c
+++ b/tools/testing/selftests/mm/guard-regions.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#define _GNU_SOURCE
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <asm-generic/mman.h> /* Force the import of the tools version. */
#include <assert.h>
#include <errno.h>
@@ -36,13 +36,6 @@ static volatile sig_atomic_t signal_jump_set;
static sigjmp_buf signal_jmp_buf;
/*
- * Ignore the checkpatch warning, we must read from x but don't want to do
- * anything with it in order to trigger a read page fault. We therefore must use
- * volatile to stop the compiler from optimising this away.
- */
-#define FORCE_READ(x) (*(volatile typeof(x) *)x)
-
-/*
* How is the test backing the mapping being tested?
*/
enum backing_type {
@@ -101,6 +94,7 @@ static void *mmap_(FIXTURE_DATA(guard_regions) * self,
case ANON_BACKED:
flags |= MAP_PRIVATE | MAP_ANON;
fd = -1;
+ offset = 0;
break;
case SHMEM_BACKED:
case LOCAL_FILE_BACKED:
@@ -152,7 +146,7 @@ static bool try_access_buf(char *ptr, bool write)
if (write)
*ptr = 'x';
else
- FORCE_READ(ptr);
+ FORCE_READ(*ptr);
}
signal_jump_set = false;
@@ -267,6 +261,54 @@ static bool is_buf_eq(char *buf, size_t size, char chr)
return true;
}
+/*
+ * Some file systems have issues with merging due to changing merge-sensitive
+ * parameters in the .mmap callback, and prior to .mmap_prepare being
+ * implemented everywhere this will now result in an unexpected failure to
+ * merge (e.g. - overlayfs).
+ *
+ * Perform a simple test to see if the local file system suffers from this, if
+ * it does then we can skip test logic that assumes local file system merging is
+ * sane.
+ */
+static bool local_fs_has_sane_mmap(FIXTURE_DATA(guard_regions) * self,
+ const FIXTURE_VARIANT(guard_regions) * variant)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr, *ptr2;
+ struct procmap_fd procmap;
+
+ if (variant->backing != LOCAL_FILE_BACKED)
+ return true;
+
+ /* Map 10 pages. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
+ if (ptr == MAP_FAILED)
+ return false;
+ /* Unmap the middle. */
+ munmap(&ptr[5 * page_size], page_size);
+
+ /* Map again. */
+ ptr2 = mmap_(self, variant, &ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE,
+ MAP_FIXED, 5 * page_size);
+
+ if (ptr2 == MAP_FAILED)
+ return false;
+
+ /* Now make sure they all merged. */
+ if (open_self_procmap(&procmap) != 0)
+ return false;
+ if (!find_vma_procmap(&procmap, ptr))
+ return false;
+ if (procmap.query.vma_start != (unsigned long)ptr)
+ return false;
+ if (procmap.query.vma_end != (unsigned long)ptr + 10 * page_size)
+ return false;
+ close_procmap(&procmap);
+
+ return true;
+}
+
FIXTURE_SETUP(guard_regions)
{
self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
@@ -582,7 +624,7 @@ TEST_F(guard_regions, process_madvise)
/* OK we don't have permission to do this, skip. */
if (count == -1 && errno == EPERM)
- ksft_exit_skip("No process_madvise() permissions, try running as root.\n");
+ SKIP(return, "No process_madvise() permissions, try running as root.\n");
/* Returns the number of bytes advised. */
ASSERT_EQ(count, 6 * page_size);
@@ -2145,4 +2187,140 @@ TEST_F(guard_regions, pagemap_scan)
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
+TEST_F(guard_regions, collapse)
+{
+ const unsigned long page_size = self->page_size;
+ const unsigned long size = 2 * HPAGE_SIZE;
+ const unsigned long num_pages = size / page_size;
+ char *ptr;
+ int i;
+
+ /* Need file to be correct size for tests for non-anon. */
+ if (variant->backing != ANON_BACKED)
+ ASSERT_EQ(ftruncate(self->fd, size), 0);
+
+ /*
+ * We must close and re-open local-file backed as read-only for
+ * CONFIG_READ_ONLY_THP_FOR_FS to work.
+ */
+ if (variant->backing == LOCAL_FILE_BACKED) {
+ ASSERT_EQ(close(self->fd), 0);
+
+ self->fd = open(self->path, O_RDONLY);
+ ASSERT_GE(self->fd, 0);
+ }
+
+ ptr = mmap_(self, variant, NULL, size, PROT_READ, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Prevent being faulted-in as huge. */
+ ASSERT_EQ(madvise(ptr, size, MADV_NOHUGEPAGE), 0);
+ /* Fault in. */
+ ASSERT_EQ(madvise(ptr, size, MADV_POPULATE_READ), 0);
+
+ /* Install guard regions in ever other page. */
+ for (i = 0; i < num_pages; i += 2) {
+ char *ptr_page = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_page, page_size, MADV_GUARD_INSTALL), 0);
+ /* Accesses should now fail. */
+ ASSERT_FALSE(try_read_buf(ptr_page));
+ }
+
+ /* Allow huge page throughout region. */
+ ASSERT_EQ(madvise(ptr, size, MADV_HUGEPAGE), 0);
+
+ /*
+ * Now collapse the entire region. This should fail in all cases.
+ *
+ * The madvise() call will also fail if CONFIG_READ_ONLY_THP_FOR_FS is
+ * not set for the local file case, but we can't differentiate whether
+ * this occurred or if the collapse was rightly rejected.
+ */
+ EXPECT_NE(madvise(ptr, size, MADV_COLLAPSE), 0);
+
+ /*
+ * If we introduce a bug that causes the collapse to succeed, gather
+ * data on whether guard regions are at least preserved. The test will
+ * fail at this point in any case.
+ */
+ for (i = 0; i < num_pages; i += 2) {
+ char *ptr_page = &ptr[i * page_size];
+
+ /* Accesses should still fail. */
+ ASSERT_FALSE(try_read_buf(ptr_page));
+ }
+}
+
+TEST_F(guard_regions, smaps)
+{
+ const unsigned long page_size = self->page_size;
+ struct procmap_fd procmap;
+ char *ptr, *ptr2;
+ int i;
+
+ /* Map a region. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* We shouldn't yet see a guard flag. */
+ ASSERT_FALSE(check_vmflag_guard(ptr));
+
+ /* Install a single guard region. */
+ ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
+
+ /* Now we should see a guard flag. */
+ ASSERT_TRUE(check_vmflag_guard(ptr));
+
+ /*
+ * Removing the guard region should not change things because we simply
+ * cannot accurately track whether a given VMA has had all of its guard
+ * regions removed.
+ */
+ ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_REMOVE), 0);
+ ASSERT_TRUE(check_vmflag_guard(ptr));
+
+ /* Install guard regions throughout. */
+ for (i = 0; i < 10; i++) {
+ ASSERT_EQ(madvise(&ptr[i * page_size], page_size, MADV_GUARD_INSTALL), 0);
+ /* We should always see the guard region flag. */
+ ASSERT_TRUE(check_vmflag_guard(ptr));
+ }
+
+ /* Split into two VMAs. */
+ ASSERT_EQ(munmap(&ptr[4 * page_size], page_size), 0);
+
+ /* Both VMAs should have the guard flag set. */
+ ASSERT_TRUE(check_vmflag_guard(ptr));
+ ASSERT_TRUE(check_vmflag_guard(&ptr[5 * page_size]));
+
+ /*
+ * If the local file system is unable to merge VMAs due to having
+ * unusual characteristics, there is no point in asserting merge
+ * behaviour.
+ */
+ if (!local_fs_has_sane_mmap(self, variant)) {
+ TH_LOG("local filesystem does not support sane merging skipping merge test");
+ return;
+ }
+
+ /* Map a fresh VMA between the two split VMAs. */
+ ptr2 = mmap_(self, variant, &ptr[4 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 4 * page_size);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /*
+ * Check the procmap to ensure that this VMA merged with the adjacent
+ * two. The guard region flag is 'sticky' so should not preclude
+ * merging.
+ */
+ ASSERT_EQ(open_self_procmap(&procmap), 0);
+ ASSERT_TRUE(find_vma_procmap(&procmap, ptr));
+ ASSERT_EQ(procmap.query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap.query.vma_end, (unsigned long)ptr + 10 * page_size);
+ ASSERT_EQ(close_procmap(&procmap), 0);
+ /* And, of course, this VMA should have the guard flag set. */
+ ASSERT_TRUE(check_vmflag_guard(ptr));
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c
index 8a97ac5176a4..6279893a0adc 100644
--- a/tools/testing/selftests/mm/gup_longterm.c
+++ b/tools/testing/selftests/mm/gup_longterm.c
@@ -27,7 +27,7 @@
#endif /* LOCAL_CONFIG_HAVE_LIBURING */
#include "../../../../mm/gup_test.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
static size_t pagesize;
@@ -114,7 +114,15 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
}
if (fallocate(fd, 0, 0, size)) {
- if (size == pagesize) {
+ /*
+ * Some filesystems (eg, NFSv3) don't support
+ * fallocate(), report this as a skip rather than a
+ * test failure.
+ */
+ if (errno == EOPNOTSUPP) {
+ ksft_print_msg("fallocate() not supported by filesystem\n");
+ result = KSFT_SKIP;
+ } else if (size == pagesize) {
ksft_print_msg("fallocate() failed (%s)\n", strerror(errno));
result = KSFT_FAIL;
} else {
@@ -298,8 +306,11 @@ static void run_with_memfd(test_fn fn, const char *desc)
log_test_start("%s ... with memfd", desc);
fd = memfd_create("test", 0);
- if (fd < 0)
+ if (fd < 0) {
ksft_print_msg("memfd_create() failed (%s)\n", strerror(errno));
+ log_test_result(KSFT_SKIP);
+ return;
+ }
fn(fd, pagesize);
close(fd);
@@ -366,6 +377,8 @@ static void run_with_memfd_hugetlb(test_fn fn, const char *desc,
fd = memfd_create("test", flags);
if (fd < 0) {
ksft_print_msg("memfd_create() failed (%s)\n", strerror(errno));
+ log_test_result(KSFT_SKIP);
+ return;
}
fn(fd, hugetlbsize);
diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
index bdeaac67ff9a..fb8f9ae49efa 100644
--- a/tools/testing/selftests/mm/gup_test.c
+++ b/tools/testing/selftests/mm/gup_test.c
@@ -12,14 +12,13 @@
#include <pthread.h>
#include <assert.h>
#include <mm/gup_test.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
#define MB (1UL << 20)
-/* Just the flags we need, copied from mm.h: */
+/* Just the flags we need, copied from the kernel internals. */
#define FOLL_WRITE 0x01 /* check pte is writable */
-#define FOLL_TOUCH 0x02 /* mark page accessed */
#define GUP_TEST_FILE "/sys/kernel/debug/gup_test"
@@ -93,7 +92,7 @@ int main(int argc, char **argv)
{
struct gup_test gup = { 0 };
int filed, i, opt, nr_pages = 1, thp = -1, write = 1, nthreads = 1, ret;
- int flags = MAP_PRIVATE, touch = 0;
+ int flags = MAP_PRIVATE;
char *file = "/dev/zero";
pthread_t *tid;
char *p;
@@ -139,6 +138,8 @@ int main(int argc, char **argv)
break;
case 'n':
nr_pages = atoi(optarg);
+ if (nr_pages < 0)
+ nr_pages = size / psize();
break;
case 't':
thp = 1;
@@ -168,10 +169,6 @@ int main(int argc, char **argv)
case 'H':
flags |= (MAP_HUGETLB | MAP_ANONYMOUS);
break;
- case 'z':
- /* fault pages in gup, do not fault in userland */
- touch = 1;
- break;
default:
ksft_exit_fail_msg("Wrong argument\n");
}
@@ -242,18 +239,9 @@ int main(int argc, char **argv)
else if (thp == 0)
madvise(p, size, MADV_NOHUGEPAGE);
- /*
- * FOLL_TOUCH, in gup_test, is used as an either/or case: either
- * fault pages in from the kernel via FOLL_TOUCH, or fault them
- * in here, from user space. This allows comparison of performance
- * between those two cases.
- */
- if (touch) {
- gup.gup_flags |= FOLL_TOUCH;
- } else {
- for (; (unsigned long)p < gup.addr + size; p += psize())
- p[0] = 0;
- }
+ /* Fault them in here, from user space. */
+ for (; (unsigned long)p < gup.addr + size; p += psize())
+ p[0] = 0;
tid = malloc(sizeof(pthread_t) * nthreads);
assert(tid);
diff --git a/tools/testing/selftests/mm/hmm-tests.c b/tools/testing/selftests/mm/hmm-tests.c
index 141bf63cbe05..e8328c89d855 100644
--- a/tools/testing/selftests/mm/hmm-tests.c
+++ b/tools/testing/selftests/mm/hmm-tests.c
@@ -10,7 +10,7 @@
* bugs.
*/
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <errno.h>
#include <fcntl.h>
@@ -25,6 +25,7 @@
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
+#include <sys/time.h>
/*
@@ -50,6 +51,8 @@ enum {
HMM_COHERENCE_DEVICE_TWO,
};
+#define ONEKB (1 << 10)
+#define ONEMEG (1 << 20)
#define TWOMEG (1 << 21)
#define HMM_BUFFER_SIZE (1024 << 12)
#define HMM_PATH_MAX 64
@@ -207,8 +210,10 @@ static void hmm_buffer_free(struct hmm_buffer *buffer)
if (buffer == NULL)
return;
- if (buffer->ptr)
+ if (buffer->ptr) {
munmap(buffer->ptr, buffer->size);
+ buffer->ptr = NULL;
+ }
free(buffer->mirror);
free(buffer);
}
@@ -525,6 +530,8 @@ TEST_F(hmm, anon_write_prot)
/*
* Check that a device writing an anonymous private mapping
* will copy-on-write if a child process inherits the mapping.
+ *
+ * Also verifies after fork() memory the device can be read by child.
*/
TEST_F(hmm, anon_write_child)
{
@@ -532,72 +539,101 @@ TEST_F(hmm, anon_write_child)
unsigned long npages;
unsigned long size;
unsigned long i;
+ void *old_ptr;
+ void *map;
int *ptr;
pid_t pid;
int child_fd;
- int ret;
-
- npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
- ASSERT_NE(npages, 0);
- size = npages << self->page_shift;
-
- buffer = malloc(sizeof(*buffer));
- ASSERT_NE(buffer, NULL);
-
- buffer->fd = -1;
- buffer->size = size;
- buffer->mirror = malloc(size);
- ASSERT_NE(buffer->mirror, NULL);
-
- buffer->ptr = mmap(NULL, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS,
- buffer->fd, 0);
- ASSERT_NE(buffer->ptr, MAP_FAILED);
-
- /* Initialize buffer->ptr so we can tell if it is written. */
- for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
- ptr[i] = i;
-
- /* Initialize data that the device will write to buffer->ptr. */
- for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
- ptr[i] = -i;
+ int ret, use_thp, migrate;
+
+ for (migrate = 0; migrate < 2; ++migrate) {
+ for (use_thp = 0; use_thp < 2; ++use_thp) {
+ npages = ALIGN(use_thp ? TWOMEG : HMM_BUFFER_SIZE,
+ self->page_size) >> self->page_shift;
+ ASSERT_NE(npages, 0);
+ size = npages << self->page_shift;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = size * 2;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+
+ buffer->ptr = mmap(NULL, size * 2,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ old_ptr = buffer->ptr;
+ if (use_thp) {
+ map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ buffer->ptr = map;
+ }
+
+ /* Initialize buffer->ptr so we can tell if it is written. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Initialize data that the device will write to buffer->ptr. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ptr[i] = -i;
+
+ if (migrate) {
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ }
+
+ pid = fork();
+ if (pid == -1)
+ ASSERT_EQ(pid, 0);
+ if (pid != 0) {
+ waitpid(pid, &ret, 0);
+ ASSERT_EQ(WIFEXITED(ret), 1);
+
+ /* Check that the parent's buffer did not change. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+ continue;
+ }
+
+ /* Check that we see the parent's values. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+ if (!migrate) {
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], -i);
+ }
+
+ /* The child process needs its own mirror to its own mm. */
+ child_fd = hmm_open(0);
+ ASSERT_GE(child_fd, 0);
+
+ /* Simulate a device writing system memory. */
+ ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+ ASSERT_EQ(buffer->faults, 1);
- pid = fork();
- if (pid == -1)
- ASSERT_EQ(pid, 0);
- if (pid != 0) {
- waitpid(pid, &ret, 0);
- ASSERT_EQ(WIFEXITED(ret), 1);
+ /* Check what the device wrote. */
+ if (!migrate) {
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], -i);
+ }
- /* Check that the parent's buffer did not change. */
- for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
- ASSERT_EQ(ptr[i], i);
- return;
+ close(child_fd);
+ exit(0);
+ }
}
-
- /* Check that we see the parent's values. */
- for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
- ASSERT_EQ(ptr[i], i);
- for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
- ASSERT_EQ(ptr[i], -i);
-
- /* The child process needs its own mirror to its own mm. */
- child_fd = hmm_open(0);
- ASSERT_GE(child_fd, 0);
-
- /* Simulate a device writing system memory. */
- ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
- ASSERT_EQ(ret, 0);
- ASSERT_EQ(buffer->cpages, npages);
- ASSERT_EQ(buffer->faults, 1);
-
- /* Check what the device wrote. */
- for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
- ASSERT_EQ(ptr[i], -i);
-
- close(child_fd);
- exit(0);
}
/*
@@ -2027,11 +2063,10 @@ TEST_F(hmm, hmm_cow_in_device)
if (pid == -1)
ASSERT_EQ(pid, 0);
if (!pid) {
- /* Child process waitd for SIGTERM from the parent. */
+ /* Child process waits for SIGTERM from the parent. */
while (1) {
}
- perror("Should not reach this\n");
- exit(0);
+ /* Should not reach this */
}
/* Parent process writes to COW pages(s) and gets a
* new copy in system. In case of device private pages,
@@ -2056,4 +2091,765 @@ TEST_F(hmm, hmm_cow_in_device)
hmm_buffer_free(buffer);
}
+
+/*
+ * Migrate private anonymous huge empty page.
+ */
+TEST_F(hmm, migrate_anon_huge_empty)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ void *old_ptr;
+ void *map;
+ int *ptr;
+ int ret;
+
+ size = TWOMEG;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = 2 * size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+ memset(buffer->mirror, 0xFF, size);
+
+ buffer->ptr = mmap(NULL, 2 * size,
+ PROT_READ,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ npages = size >> self->page_shift;
+ map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ old_ptr = buffer->ptr;
+ buffer->ptr = map;
+
+ /* Migrate memory to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], 0);
+
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge zero page.
+ */
+TEST_F(hmm, migrate_anon_huge_zero)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ void *old_ptr;
+ void *map;
+ int *ptr;
+ int ret;
+ int val;
+
+ size = TWOMEG;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = 2 * size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+ memset(buffer->mirror, 0xFF, size);
+
+ buffer->ptr = mmap(NULL, 2 * size,
+ PROT_READ,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ npages = size >> self->page_shift;
+ map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ old_ptr = buffer->ptr;
+ buffer->ptr = map;
+
+ /* Initialize a read-only zero huge page. */
+ val = *(int *)buffer->ptr;
+ ASSERT_EQ(val, 0);
+
+ /* Migrate memory to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], 0);
+
+ /* Fault pages back to system memory and check them. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) {
+ ASSERT_EQ(ptr[i], 0);
+ /* If it asserts once, it probably will 500,000 times */
+ if (ptr[i] != 0)
+ break;
+ }
+
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge page and free.
+ */
+TEST_F(hmm, migrate_anon_huge_free)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ void *old_ptr;
+ void *map;
+ int *ptr;
+ int ret;
+
+ size = TWOMEG;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = 2 * size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+ memset(buffer->mirror, 0xFF, size);
+
+ buffer->ptr = mmap(NULL, 2 * size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ npages = size >> self->page_shift;
+ map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ old_ptr = buffer->ptr;
+ buffer->ptr = map;
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Migrate memory to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ /* Try freeing it. */
+ ret = madvise(map, size, MADV_FREE);
+ ASSERT_EQ(ret, 0);
+
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge page and fault back to sysmem.
+ */
+TEST_F(hmm, migrate_anon_huge_fault)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ void *old_ptr;
+ void *map;
+ int *ptr;
+ int ret;
+
+ size = TWOMEG;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = 2 * size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+ memset(buffer->mirror, 0xFF, size);
+
+ buffer->ptr = mmap(NULL, 2 * size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ npages = size >> self->page_shift;
+ map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ old_ptr = buffer->ptr;
+ buffer->ptr = map;
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Migrate memory to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ /* Fault pages back to system memory and check them. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate memory and fault back to sysmem after partially unmapping.
+ */
+TEST_F(hmm, migrate_partial_unmap_fault)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size = TWOMEG;
+ unsigned long i;
+ void *old_ptr;
+ void *map;
+ int *ptr;
+ int ret, j, use_thp;
+ int offsets[] = { 0, 512 * ONEKB, ONEMEG };
+
+ for (use_thp = 0; use_thp < 2; ++use_thp) {
+ for (j = 0; j < ARRAY_SIZE(offsets); ++j) {
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = 2 * size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+ memset(buffer->mirror, 0xFF, size);
+
+ buffer->ptr = mmap(NULL, 2 * size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ npages = size >> self->page_shift;
+ map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+ if (use_thp)
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ else
+ ret = madvise(map, size, MADV_NOHUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ old_ptr = buffer->ptr;
+ buffer->ptr = map;
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Migrate memory to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ munmap(buffer->ptr + offsets[j], ONEMEG);
+
+ /* Fault pages back to system memory and check them. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ if (i * sizeof(int) < offsets[j] ||
+ i * sizeof(int) >= offsets[j] + ONEMEG)
+ ASSERT_EQ(ptr[i], i);
+
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+ }
+ }
+}
+
+TEST_F(hmm, migrate_remap_fault)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size = TWOMEG;
+ unsigned long i;
+ void *old_ptr, *new_ptr = NULL;
+ void *map;
+ int *ptr;
+ int ret, j, use_thp, dont_unmap, before;
+ int offsets[] = { 0, 512 * ONEKB, ONEMEG };
+
+ for (before = 0; before < 2; ++before) {
+ for (dont_unmap = 0; dont_unmap < 2; ++dont_unmap) {
+ for (use_thp = 0; use_thp < 2; ++use_thp) {
+ for (j = 0; j < ARRAY_SIZE(offsets); ++j) {
+ int flags = MREMAP_MAYMOVE | MREMAP_FIXED;
+
+ if (dont_unmap)
+ flags |= MREMAP_DONTUNMAP;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = 8 * size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+ memset(buffer->mirror, 0xFF, size);
+
+ buffer->ptr = mmap(NULL, buffer->size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ npages = size >> self->page_shift;
+ map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
+ if (use_thp)
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ else
+ ret = madvise(map, size, MADV_NOHUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ old_ptr = buffer->ptr;
+ munmap(map + size, size * 2);
+ buffer->ptr = map;
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr;
+ i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ if (before) {
+ new_ptr = mremap((void *)map, size, size, flags,
+ map + size + offsets[j]);
+ ASSERT_NE(new_ptr, MAP_FAILED);
+ buffer->ptr = new_ptr;
+ }
+
+ /* Migrate memory to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror;
+ i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ if (!before) {
+ new_ptr = mremap((void *)map, size, size, flags,
+ map + size + offsets[j]);
+ ASSERT_NE(new_ptr, MAP_FAILED);
+ buffer->ptr = new_ptr;
+ }
+
+ /* Fault pages back to system memory and check them. */
+ for (i = 0, ptr = buffer->ptr;
+ i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ munmap(new_ptr, size);
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Migrate private anonymous huge page with allocation errors.
+ */
+TEST_F(hmm, migrate_anon_huge_err)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ void *old_ptr;
+ void *map;
+ int *ptr;
+ int ret;
+
+ size = TWOMEG;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = 2 * size;
+ buffer->mirror = malloc(2 * size);
+ ASSERT_NE(buffer->mirror, NULL);
+ memset(buffer->mirror, 0xFF, 2 * size);
+
+ old_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+ ASSERT_NE(old_ptr, MAP_FAILED);
+
+ npages = size >> self->page_shift;
+ map = (void *)ALIGN((uintptr_t)old_ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ buffer->ptr = map;
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Migrate memory to device but force a THP allocation error. */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
+ HMM_DMIRROR_FLAG_FAIL_ALLOC);
+ ASSERT_EQ(ret, 0);
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) {
+ ASSERT_EQ(ptr[i], i);
+ if (ptr[i] != i)
+ break;
+ }
+
+ /* Try faulting back a single (PAGE_SIZE) page. */
+ ptr = buffer->ptr;
+ ASSERT_EQ(ptr[2048], 2048);
+
+ /* unmap and remap the region to reset things. */
+ ret = munmap(old_ptr, 2 * size);
+ ASSERT_EQ(ret, 0);
+ old_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+ ASSERT_NE(old_ptr, MAP_FAILED);
+ map = (void *)ALIGN((uintptr_t)old_ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ buffer->ptr = map;
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Migrate THP to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /*
+ * Force an allocation error when faulting back a THP resident in the
+ * device.
+ */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
+ HMM_DMIRROR_FLAG_FAIL_ALLOC);
+ ASSERT_EQ(ret, 0);
+
+ ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ptr = buffer->ptr;
+ ASSERT_EQ(ptr[2048], 2048);
+
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+}
+
+/*
+ * Migrate private anonymous huge zero page with allocation errors.
+ */
+TEST_F(hmm, migrate_anon_huge_zero_err)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ void *old_ptr;
+ void *map;
+ int *ptr;
+ int ret;
+
+ size = TWOMEG;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = 2 * size;
+ buffer->mirror = malloc(2 * size);
+ ASSERT_NE(buffer->mirror, NULL);
+ memset(buffer->mirror, 0xFF, 2 * size);
+
+ old_ptr = mmap(NULL, 2 * size, PROT_READ,
+ MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+ ASSERT_NE(old_ptr, MAP_FAILED);
+
+ npages = size >> self->page_shift;
+ map = (void *)ALIGN((uintptr_t)old_ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ buffer->ptr = map;
+
+ /* Migrate memory to device but force a THP allocation error. */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
+ HMM_DMIRROR_FLAG_FAIL_ALLOC);
+ ASSERT_EQ(ret, 0);
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], 0);
+
+ /* Try faulting back a single (PAGE_SIZE) page. */
+ ptr = buffer->ptr;
+ ASSERT_EQ(ptr[2048], 0);
+
+ /* unmap and remap the region to reset things. */
+ ret = munmap(old_ptr, 2 * size);
+ ASSERT_EQ(ret, 0);
+ old_ptr = mmap(NULL, 2 * size, PROT_READ,
+ MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+ ASSERT_NE(old_ptr, MAP_FAILED);
+ map = (void *)ALIGN((uintptr_t)old_ptr, size);
+ ret = madvise(map, size, MADV_HUGEPAGE);
+ ASSERT_EQ(ret, 0);
+ buffer->ptr = map;
+
+ /* Initialize buffer in system memory (zero THP page). */
+ ret = ptr[0];
+ ASSERT_EQ(ret, 0);
+
+ /* Migrate memory to device but force a THP allocation error. */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
+ HMM_DMIRROR_FLAG_FAIL_ALLOC);
+ ASSERT_EQ(ret, 0);
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Fault the device memory back and check it. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], 0);
+
+ buffer->ptr = old_ptr;
+ hmm_buffer_free(buffer);
+}
+
+struct benchmark_results {
+ double sys_to_dev_time;
+ double dev_to_sys_time;
+ double throughput_s2d;
+ double throughput_d2s;
+};
+
+static double get_time_ms(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ return (tv.tv_sec * 1000.0) + (tv.tv_usec / 1000.0);
+}
+
+static inline struct hmm_buffer *hmm_buffer_alloc(unsigned long size)
+{
+ struct hmm_buffer *buffer;
+
+ buffer = malloc(sizeof(*buffer));
+
+ buffer->fd = -1;
+ buffer->size = size;
+ buffer->mirror = malloc(size);
+ memset(buffer->mirror, 0xFF, size);
+ return buffer;
+}
+
+static void print_benchmark_results(const char *test_name, size_t buffer_size,
+ struct benchmark_results *thp,
+ struct benchmark_results *regular)
+{
+ double s2d_improvement = ((regular->sys_to_dev_time - thp->sys_to_dev_time) /
+ regular->sys_to_dev_time) * 100.0;
+ double d2s_improvement = ((regular->dev_to_sys_time - thp->dev_to_sys_time) /
+ regular->dev_to_sys_time) * 100.0;
+ double throughput_s2d_improvement = ((thp->throughput_s2d - regular->throughput_s2d) /
+ regular->throughput_s2d) * 100.0;
+ double throughput_d2s_improvement = ((thp->throughput_d2s - regular->throughput_d2s) /
+ regular->throughput_d2s) * 100.0;
+
+ printf("\n=== %s (%.1f MB) ===\n", test_name, buffer_size / (1024.0 * 1024.0));
+ printf(" | With THP | Without THP | Improvement\n");
+ printf("---------------------------------------------------------------------\n");
+ printf("Sys->Dev Migration | %.3f ms | %.3f ms | %.1f%%\n",
+ thp->sys_to_dev_time, regular->sys_to_dev_time, s2d_improvement);
+ printf("Dev->Sys Migration | %.3f ms | %.3f ms | %.1f%%\n",
+ thp->dev_to_sys_time, regular->dev_to_sys_time, d2s_improvement);
+ printf("S->D Throughput | %.2f GB/s | %.2f GB/s | %.1f%%\n",
+ thp->throughput_s2d, regular->throughput_s2d, throughput_s2d_improvement);
+ printf("D->S Throughput | %.2f GB/s | %.2f GB/s | %.1f%%\n",
+ thp->throughput_d2s, regular->throughput_d2s, throughput_d2s_improvement);
+}
+
+/*
+ * Run a single migration benchmark
+ * fd: file descriptor for hmm device
+ * use_thp: whether to use THP
+ * buffer_size: size of buffer to allocate
+ * iterations: number of iterations
+ * results: where to store results
+ */
+static inline int run_migration_benchmark(int fd, int use_thp, size_t buffer_size,
+ int iterations, struct benchmark_results *results)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages = buffer_size / sysconf(_SC_PAGESIZE);
+ double start, end;
+ double s2d_total = 0, d2s_total = 0;
+ int ret, i;
+ int *ptr;
+
+ buffer = hmm_buffer_alloc(buffer_size);
+
+ /* Map memory */
+ buffer->ptr = mmap(NULL, buffer_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (!buffer->ptr)
+ return -1;
+
+ /* Apply THP hint if requested */
+ if (use_thp)
+ ret = madvise(buffer->ptr, buffer_size, MADV_HUGEPAGE);
+ else
+ ret = madvise(buffer->ptr, buffer_size, MADV_NOHUGEPAGE);
+
+ if (ret)
+ return ret;
+
+ /* Initialize memory to make sure pages are allocated */
+ ptr = (int *)buffer->ptr;
+ for (i = 0; i < buffer_size / sizeof(int); i++)
+ ptr[i] = i & 0xFF;
+
+ /* Warmup iteration */
+ ret = hmm_migrate_sys_to_dev(fd, buffer, npages);
+ if (ret)
+ return ret;
+
+ ret = hmm_migrate_dev_to_sys(fd, buffer, npages);
+ if (ret)
+ return ret;
+
+ /* Benchmark iterations */
+ for (i = 0; i < iterations; i++) {
+ /* System to device migration */
+ start = get_time_ms();
+
+ ret = hmm_migrate_sys_to_dev(fd, buffer, npages);
+ if (ret)
+ return ret;
+
+ end = get_time_ms();
+ s2d_total += (end - start);
+
+ /* Device to system migration */
+ start = get_time_ms();
+
+ ret = hmm_migrate_dev_to_sys(fd, buffer, npages);
+ if (ret)
+ return ret;
+
+ end = get_time_ms();
+ d2s_total += (end - start);
+ }
+
+ /* Calculate average times and throughput */
+ results->sys_to_dev_time = s2d_total / iterations;
+ results->dev_to_sys_time = d2s_total / iterations;
+ results->throughput_s2d = (buffer_size / (1024.0 * 1024.0 * 1024.0)) /
+ (results->sys_to_dev_time / 1000.0);
+ results->throughput_d2s = (buffer_size / (1024.0 * 1024.0 * 1024.0)) /
+ (results->dev_to_sys_time / 1000.0);
+
+ /* Cleanup */
+ hmm_buffer_free(buffer);
+ return 0;
+}
+
+/*
+ * Benchmark THP migration with different buffer sizes
+ */
+TEST_F_TIMEOUT(hmm, benchmark_thp_migration, 120)
+{
+ struct benchmark_results thp_results, regular_results;
+ size_t thp_size = 2 * 1024 * 1024; /* 2MB - typical THP size */
+ int iterations = 5;
+
+ printf("\nHMM THP Migration Benchmark\n");
+ printf("---------------------------\n");
+ printf("System page size: %ld bytes\n", sysconf(_SC_PAGESIZE));
+
+ /* Test different buffer sizes */
+ size_t test_sizes[] = {
+ thp_size / 4, /* 512KB - smaller than THP */
+ thp_size / 2, /* 1MB - half THP */
+ thp_size, /* 2MB - single THP */
+ thp_size * 2, /* 4MB - two THPs */
+ thp_size * 4, /* 8MB - four THPs */
+ thp_size * 8, /* 16MB - eight THPs */
+ thp_size * 128, /* 256MB - one twenty eight THPs */
+ };
+
+ static const char *const test_names[] = {
+ "Small Buffer (512KB)",
+ "Half THP Size (1MB)",
+ "Single THP Size (2MB)",
+ "Two THP Size (4MB)",
+ "Four THP Size (8MB)",
+ "Eight THP Size (16MB)",
+ "One twenty eight THP Size (256MB)"
+ };
+
+ int num_tests = ARRAY_SIZE(test_sizes);
+
+ /* Run all tests */
+ for (int i = 0; i < num_tests; i++) {
+ /* Test with THP */
+ ASSERT_EQ(run_migration_benchmark(self->fd, 1, test_sizes[i],
+ iterations, &thp_results), 0);
+
+ /* Test without THP */
+ ASSERT_EQ(run_migration_benchmark(self->fd, 0, test_sizes[i],
+ iterations, &regular_results), 0);
+
+ /* Print results */
+ print_benchmark_results(test_names[i], test_sizes[i],
+ &thp_results, &regular_results);
+ }
+}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/hugepage-mmap.c b/tools/testing/selftests/mm/hugepage-mmap.c
index 3b1b532f1cbb..d543419de040 100644
--- a/tools/testing/selftests/mm/hugepage-mmap.c
+++ b/tools/testing/selftests/mm/hugepage-mmap.c
@@ -15,7 +15,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
diff --git a/tools/testing/selftests/mm/hugepage-mremap.c b/tools/testing/selftests/mm/hugepage-mremap.c
index c463d1c09c9b..b8f7d92e5a35 100644
--- a/tools/testing/selftests/mm/hugepage-mremap.c
+++ b/tools/testing/selftests/mm/hugepage-mremap.c
@@ -24,7 +24,7 @@
#include <sys/ioctl.h>
#include <string.h>
#include <stdbool.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
#define DEFAULT_LENGTH_MB 10UL
@@ -65,10 +65,20 @@ static void register_region_with_uffd(char *addr, size_t len)
struct uffdio_api uffdio_api;
/* Create and enable userfaultfd object. */
-
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
- if (uffd == -1)
- ksft_exit_fail_msg("userfaultfd: %s\n", strerror(errno));
+ if (uffd == -1) {
+ switch (errno) {
+ case EPERM:
+ ksft_exit_skip("Insufficient permissions, try running as root.\n");
+ break;
+ case ENOSYS:
+ ksft_exit_skip("userfaultfd is not supported/not enabled.\n");
+ break;
+ default:
+ ksft_exit_fail_msg("userfaultfd failed with %s\n", strerror(errno));
+ break;
+ }
+ }
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
diff --git a/tools/testing/selftests/mm/hugetlb-madvise.c b/tools/testing/selftests/mm/hugetlb-madvise.c
index e74107185324..05d9d2805ae4 100644
--- a/tools/testing/selftests/mm/hugetlb-madvise.c
+++ b/tools/testing/selftests/mm/hugetlb-madvise.c
@@ -19,7 +19,7 @@
#include <sys/mman.h>
#include <fcntl.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#define MIN_FREE_PAGES 20
#define NR_HUGE_PAGES 10 /* common number of pages to map/allocate */
@@ -47,14 +47,13 @@ void write_fault_pages(void *addr, unsigned long nr_pages)
void read_fault_pages(void *addr, unsigned long nr_pages)
{
- volatile unsigned long dummy = 0;
unsigned long i;
for (i = 0; i < nr_pages; i++) {
- dummy += *((unsigned long *)(addr + (i * huge_page_size)));
-
+ unsigned long *addr2 =
+ ((unsigned long *)(addr + (i * huge_page_size)));
/* Prevent the compiler from optimizing out the entire loop: */
- asm volatile("" : "+r" (dummy));
+ FORCE_READ(*addr2);
}
}
diff --git a/tools/testing/selftests/mm/hugetlb-read-hwpoison.c b/tools/testing/selftests/mm/hugetlb-read-hwpoison.c
index ba6cc6f9cabc..46230462ad48 100644
--- a/tools/testing/selftests/mm/hugetlb-read-hwpoison.c
+++ b/tools/testing/selftests/mm/hugetlb-read-hwpoison.c
@@ -11,7 +11,7 @@
#include <errno.h>
#include <stdbool.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define PREFIX " ... "
#define ERROR_PREFIX " !!! "
diff --git a/tools/testing/selftests/mm/hugetlb-soft-offline.c b/tools/testing/selftests/mm/hugetlb-soft-offline.c
index f086f0e04756..a8bc02688085 100644
--- a/tools/testing/selftests/mm/hugetlb-soft-offline.c
+++ b/tools/testing/selftests/mm/hugetlb-soft-offline.c
@@ -24,7 +24,7 @@
#include <sys/statfs.h>
#include <sys/types.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#ifndef MADV_SOFT_OFFLINE
#define MADV_SOFT_OFFLINE 101
diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c
index db63abe5ee5e..9ac62eb4c97d 100644
--- a/tools/testing/selftests/mm/hugetlb_dio.c
+++ b/tools/testing/selftests/mm/hugetlb_dio.c
@@ -18,7 +18,7 @@
#include <string.h>
#include <sys/mman.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
{
diff --git a/tools/testing/selftests/mm/hugetlb_fault_after_madv.c b/tools/testing/selftests/mm/hugetlb_fault_after_madv.c
index e2640529dbb2..b4b257775b74 100644
--- a/tools/testing/selftests/mm/hugetlb_fault_after_madv.c
+++ b/tools/testing/selftests/mm/hugetlb_fault_after_madv.c
@@ -9,7 +9,7 @@
#include <signal.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#define INLOOP_ITER 100
diff --git a/tools/testing/selftests/mm/hugetlb_madv_vs_map.c b/tools/testing/selftests/mm/hugetlb_madv_vs_map.c
index 8f122a0f0828..efd774b41389 100644
--- a/tools/testing/selftests/mm/hugetlb_madv_vs_map.c
+++ b/tools/testing/selftests/mm/hugetlb_madv_vs_map.c
@@ -25,7 +25,7 @@
#include <unistd.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#define INLOOP_ITER 100
diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c
index 8a4d34cce36b..3fe7ef04ac62 100644
--- a/tools/testing/selftests/mm/khugepaged.c
+++ b/tools/testing/selftests/mm/khugepaged.c
@@ -394,7 +394,7 @@ static void *file_setup_area(int nr_hpages)
perror("open()");
exit(EXIT_FAILURE);
}
- p = mmap(BASE_ADDR, size, PROT_READ | PROT_EXEC,
+ p = mmap(BASE_ADDR, size, PROT_READ,
MAP_PRIVATE, finfo.fd, 0);
if (p == MAP_FAILED || p != BASE_ADDR) {
perror("mmap()");
@@ -561,8 +561,6 @@ static bool wait_for_scan(const char *msg, char *p, int nr_hpages,
usleep(TICK);
}
- madvise(p, nr_hpages * hpage_pmd_size, MADV_NOHUGEPAGE);
-
return timeout == -1;
}
@@ -1190,6 +1188,11 @@ int main(int argc, char **argv)
.read_ahead_kb = 0,
};
+ if (!thp_is_enabled()) {
+ printf("Transparent Hugepages not available\n");
+ return KSFT_SKIP;
+ }
+
parse_test_type(argc, argv);
setbuf(stdout, NULL);
diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
index b61803e36d1c..8d874c4754f3 100644
--- a/tools/testing/selftests/mm/ksm_functional_tests.c
+++ b/tools/testing/selftests/mm/ksm_functional_tests.c
@@ -21,7 +21,7 @@
#include <sys/wait.h>
#include <linux/userfaultfd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
#define KiB 1024u
@@ -38,14 +38,13 @@ enum ksm_merge_mode {
};
static int mem_fd;
-static int ksm_fd;
-static int ksm_full_scans_fd;
-static int proc_self_ksm_stat_fd;
-static int proc_self_ksm_merging_pages_fd;
-static int ksm_use_zero_pages_fd;
+static int pages_to_scan_fd;
+static int sleep_millisecs_fd;
static int pagemap_fd;
static size_t pagesize;
+static void init_global_file_handles(void);
+
static bool range_maps_duplicates(char *addr, unsigned long size)
{
unsigned long offs_a, offs_b, pfn_a, pfn_b;
@@ -73,88 +72,6 @@ static bool range_maps_duplicates(char *addr, unsigned long size)
return false;
}
-static long get_my_ksm_zero_pages(void)
-{
- char buf[200];
- char *substr_ksm_zero;
- size_t value_pos;
- ssize_t read_size;
- unsigned long my_ksm_zero_pages;
-
- if (!proc_self_ksm_stat_fd)
- return 0;
-
- read_size = pread(proc_self_ksm_stat_fd, buf, sizeof(buf) - 1, 0);
- if (read_size < 0)
- return -errno;
-
- buf[read_size] = 0;
-
- substr_ksm_zero = strstr(buf, "ksm_zero_pages");
- if (!substr_ksm_zero)
- return 0;
-
- value_pos = strcspn(substr_ksm_zero, "0123456789");
- my_ksm_zero_pages = strtol(substr_ksm_zero + value_pos, NULL, 10);
-
- return my_ksm_zero_pages;
-}
-
-static long get_my_merging_pages(void)
-{
- char buf[10];
- ssize_t ret;
-
- if (proc_self_ksm_merging_pages_fd < 0)
- return proc_self_ksm_merging_pages_fd;
-
- ret = pread(proc_self_ksm_merging_pages_fd, buf, sizeof(buf) - 1, 0);
- if (ret <= 0)
- return -errno;
- buf[ret] = 0;
-
- return strtol(buf, NULL, 10);
-}
-
-static long ksm_get_full_scans(void)
-{
- char buf[10];
- ssize_t ret;
-
- ret = pread(ksm_full_scans_fd, buf, sizeof(buf) - 1, 0);
- if (ret <= 0)
- return -errno;
- buf[ret] = 0;
-
- return strtol(buf, NULL, 10);
-}
-
-static int ksm_merge(void)
-{
- long start_scans, end_scans;
-
- /* Wait for two full scans such that any possible merging happened. */
- start_scans = ksm_get_full_scans();
- if (start_scans < 0)
- return start_scans;
- if (write(ksm_fd, "1", 1) != 1)
- return -errno;
- do {
- end_scans = ksm_get_full_scans();
- if (end_scans < 0)
- return end_scans;
- } while (end_scans < start_scans + 2);
-
- return 0;
-}
-
-static int ksm_unmerge(void)
-{
- if (write(ksm_fd, "2", 1) != 1)
- return -errno;
- return 0;
-}
-
static char *__mmap_and_merge_range(char val, unsigned long size, int prot,
enum ksm_merge_mode mode)
{
@@ -163,12 +80,12 @@ static char *__mmap_and_merge_range(char val, unsigned long size, int prot,
int ret;
/* Stabilize accounting by disabling KSM completely. */
- if (ksm_unmerge()) {
+ if (ksm_stop() < 0) {
ksft_print_msg("Disabling (unmerging) KSM failed\n");
return err_map;
}
- if (get_my_merging_pages() > 0) {
+ if (ksm_get_self_merging_pages() > 0) {
ksft_print_msg("Still pages merged\n");
return err_map;
}
@@ -218,7 +135,7 @@ static char *__mmap_and_merge_range(char val, unsigned long size, int prot,
}
/* Run KSM to trigger merging and wait. */
- if (ksm_merge()) {
+ if (ksm_start() < 0) {
ksft_print_msg("Running KSM failed\n");
goto unmap;
}
@@ -227,7 +144,7 @@ static char *__mmap_and_merge_range(char val, unsigned long size, int prot,
* Check if anything was merged at all. Ignore the zero page that is
* accounted differently (depending on kernel support).
*/
- if (val && !get_my_merging_pages()) {
+ if (val && !ksm_get_self_merging_pages()) {
ksft_print_msg("No pages got merged\n");
goto unmap;
}
@@ -274,6 +191,7 @@ static void test_unmerge(void)
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
unmap:
+ ksm_stop();
munmap(map, size);
}
@@ -286,15 +204,12 @@ static void test_unmerge_zero_pages(void)
ksft_print_msg("[RUN] %s\n", __func__);
- if (proc_self_ksm_stat_fd < 0) {
- ksft_test_result_skip("open(\"/proc/self/ksm_stat\") failed\n");
+ if (ksm_get_self_zero_pages() < 0) {
+ ksft_test_result_skip("accessing \"/proc/self/ksm_stat\" failed\n");
return;
}
- if (ksm_use_zero_pages_fd < 0) {
- ksft_test_result_skip("open \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n");
- return;
- }
- if (write(ksm_use_zero_pages_fd, "1", 1) != 1) {
+
+ if (ksm_use_zero_pages() < 0) {
ksft_test_result_skip("write \"/sys/kernel/mm/ksm/use_zero_pages\" failed\n");
return;
}
@@ -306,7 +221,7 @@ static void test_unmerge_zero_pages(void)
/* Check if ksm_zero_pages is updated correctly after KSM merging */
pages_expected = size / pagesize;
- if (pages_expected != get_my_ksm_zero_pages()) {
+ if (pages_expected != ksm_get_self_zero_pages()) {
ksft_test_result_fail("'ksm_zero_pages' updated after merging\n");
goto unmap;
}
@@ -319,7 +234,7 @@ static void test_unmerge_zero_pages(void)
/* Check if ksm_zero_pages is updated correctly after unmerging */
pages_expected /= 2;
- if (pages_expected != get_my_ksm_zero_pages()) {
+ if (pages_expected != ksm_get_self_zero_pages()) {
ksft_test_result_fail("'ksm_zero_pages' updated after unmerging\n");
goto unmap;
}
@@ -329,7 +244,7 @@ static void test_unmerge_zero_pages(void)
*((unsigned int *)&map[offs]) = offs;
/* Now we should have no zeropages remaining. */
- if (get_my_ksm_zero_pages()) {
+ if (ksm_get_self_zero_pages()) {
ksft_test_result_fail("'ksm_zero_pages' updated after write fault\n");
goto unmap;
}
@@ -338,6 +253,7 @@ static void test_unmerge_zero_pages(void)
ksft_test_result(!range_maps_duplicates(map, size),
"KSM zero pages were unmerged\n");
unmap:
+ ksm_stop();
munmap(map, size);
}
@@ -366,6 +282,7 @@ static void test_unmerge_discarded(void)
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
unmap:
+ ksm_stop();
munmap(map, size);
}
@@ -393,9 +310,13 @@ static void test_unmerge_uffd_wp(void)
/* See if UFFD-WP is around. */
uffdio_api.api = UFFD_API;
- uffdio_api.features = UFFD_FEATURE_PAGEFAULT_FLAG_WP;
+ uffdio_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffdio_api) < 0) {
- ksft_test_result_fail("UFFDIO_API failed\n");
+ if (errno == EINVAL)
+ ksft_test_result_skip("The API version requested is not supported\n");
+ else
+ ksft_test_result_fail("UFFDIO_API failed: %s\n", strerror(errno));
+
goto close_uffd;
}
if (!(uffdio_api.features & UFFD_FEATURE_PAGEFAULT_FLAG_WP)) {
@@ -403,6 +324,26 @@ static void test_unmerge_uffd_wp(void)
goto close_uffd;
}
+ /*
+ * UFFDIO_API must only be called once to enable features.
+ * So we close the old userfaultfd and create a new one to
+ * actually enable UFFD_FEATURE_PAGEFAULT_FLAG_WP.
+ */
+ close(uffd);
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ if (uffd < 0) {
+ ksft_test_result_fail("__NR_userfaultfd failed\n");
+ goto unmap;
+ }
+
+ /* Now, enable it ("two-step handshake") */
+ uffdio_api.api = UFFD_API;
+ uffdio_api.features = UFFD_FEATURE_PAGEFAULT_FLAG_WP;
+ if (ioctl(uffd, UFFDIO_API, &uffdio_api) < 0) {
+ ksft_test_result_fail("UFFDIO_API failed: %s\n", strerror(errno));
+ goto close_uffd;
+ }
+
/* Register UFFD-WP, no need for an actual handler. */
if (uffd_register(uffd, map, size, false, true, false)) {
ksft_test_result_fail("UFFDIO_REGISTER_MODE_WP failed\n");
@@ -428,6 +369,7 @@ static void test_unmerge_uffd_wp(void)
close_uffd:
close(uffd);
unmap:
+ ksm_stop();
munmap(map, size);
}
#endif
@@ -482,27 +424,30 @@ static int test_child_ksm(void)
/* Test if KSM is enabled for the process. */
if (prctl(PR_GET_MEMORY_MERGE, 0, 0, 0, 0) != 1)
- return -1;
+ return 1;
/* Test if merge could really happen. */
map = __mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, KSM_MERGE_NONE);
if (map == MAP_MERGE_FAIL)
- return -2;
+ return 2;
else if (map == MAP_MERGE_SKIP)
- return -3;
+ return 3;
+ ksm_stop();
munmap(map, size);
return 0;
}
static void test_child_ksm_err(int status)
{
- if (status == -1)
+ if (status == 1)
ksft_test_result_fail("unexpected PR_GET_MEMORY_MERGE result in child\n");
- else if (status == -2)
+ else if (status == 2)
ksft_test_result_fail("Merge in child failed\n");
- else if (status == -3)
+ else if (status == 3)
ksft_test_result_skip("Merge in child skipped\n");
+ else if (status == 4)
+ ksft_test_result_fail("Binary not found\n");
}
/* Verify that prctl ksm flag is inherited. */
@@ -524,6 +469,7 @@ static void test_prctl_fork(void)
child_pid = fork();
if (!child_pid) {
+ init_global_file_handles();
exit(test_child_ksm());
} else if (child_pid < 0) {
ksft_test_result_fail("fork() failed\n");
@@ -549,6 +495,46 @@ static void test_prctl_fork(void)
ksft_test_result_pass("PR_SET_MEMORY_MERGE value is inherited\n");
}
+static int start_ksmd_and_set_frequency(char *pages_to_scan, char *sleep_ms)
+{
+ int ksm_fd;
+
+ ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
+ if (ksm_fd < 0)
+ return -errno;
+
+ if (write(ksm_fd, "1", 1) != 1)
+ return -errno;
+
+ if (write(pages_to_scan_fd, pages_to_scan, strlen(pages_to_scan)) <= 0)
+ return -errno;
+
+ if (write(sleep_millisecs_fd, sleep_ms, strlen(sleep_ms)) <= 0)
+ return -errno;
+
+ return 0;
+}
+
+static int stop_ksmd_and_restore_frequency(void)
+{
+ int ksm_fd;
+
+ ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
+ if (ksm_fd < 0)
+ return -errno;
+
+ if (write(ksm_fd, "2", 1) != 1)
+ return -errno;
+
+ if (write(pages_to_scan_fd, "100", 3) <= 0)
+ return -errno;
+
+ if (write(sleep_millisecs_fd, "20", 2) <= 0)
+ return -errno;
+
+ return 0;
+}
+
static void test_prctl_fork_exec(void)
{
int ret, status;
@@ -556,6 +542,9 @@ static void test_prctl_fork_exec(void)
ksft_print_msg("[RUN] %s\n", __func__);
+ if (start_ksmd_and_set_frequency("2000", "0"))
+ ksft_test_result_fail("set ksmd's scanning frequency failed\n");
+
ret = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
if (ret < 0 && errno == EINVAL) {
ksft_test_result_skip("PR_SET_MEMORY_MERGE not supported\n");
@@ -571,10 +560,10 @@ static void test_prctl_fork_exec(void)
return;
} else if (child_pid == 0) {
char *prg_name = "./ksm_functional_tests";
- char *argv_for_program[] = { prg_name, FORK_EXEC_CHILD_PRG_NAME };
+ char *argv_for_program[] = { prg_name, FORK_EXEC_CHILD_PRG_NAME, NULL };
execv(prg_name, argv_for_program);
- return;
+ exit(4);
}
if (waitpid(child_pid, &status, 0) > 0) {
@@ -598,6 +587,11 @@ static void test_prctl_fork_exec(void)
return;
}
+ if (stop_ksmd_and_restore_frequency()) {
+ ksft_test_result_fail("restore ksmd frequency failed\n");
+ return;
+ }
+
ksft_test_result_pass("PR_SET_MEMORY_MERGE value is inherited\n");
}
@@ -620,6 +614,7 @@ static void test_prctl_unmerge(void)
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
unmap:
+ ksm_stop();
munmap(map, size);
}
@@ -653,6 +648,47 @@ static void test_prot_none(void)
ksft_test_result(!range_maps_duplicates(map, size),
"Pages were unmerged\n");
unmap:
+ ksm_stop();
+ munmap(map, size);
+}
+
+static void test_fork_ksm_merging_page_count(void)
+{
+ const unsigned int size = 2 * MiB;
+ char *map;
+ pid_t child_pid;
+ int status;
+
+ ksft_print_msg("[RUN] %s\n", __func__);
+
+ map = mmap_and_merge_range(0xcf, size, PROT_READ | PROT_WRITE, KSM_MERGE_MADVISE);
+ if (map == MAP_FAILED)
+ return;
+
+ child_pid = fork();
+ if (!child_pid) {
+ init_global_file_handles();
+ exit(ksm_get_self_merging_pages());
+ } else if (child_pid < 0) {
+ ksft_test_result_fail("fork() failed\n");
+ goto unmap;
+ }
+
+ if (waitpid(child_pid, &status, 0) < 0) {
+ ksft_test_result_fail("waitpid() failed\n");
+ goto unmap;
+ }
+
+ status = WEXITSTATUS(status);
+ if (status) {
+ ksft_test_result_fail("ksm_merging_page in child: %d\n", status);
+ goto unmap;
+ }
+
+ ksft_test_result_pass("ksm_merging_pages is not inherited after fork\n");
+
+unmap:
+ ksm_stop();
munmap(map, size);
}
@@ -661,24 +697,27 @@ static void init_global_file_handles(void)
mem_fd = open("/proc/self/mem", O_RDWR);
if (mem_fd < 0)
ksft_exit_fail_msg("opening /proc/self/mem failed\n");
- ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
- if (ksm_fd < 0)
- ksft_exit_skip("open(\"/sys/kernel/mm/ksm/run\") failed\n");
- ksm_full_scans_fd = open("/sys/kernel/mm/ksm/full_scans", O_RDONLY);
- if (ksm_full_scans_fd < 0)
- ksft_exit_skip("open(\"/sys/kernel/mm/ksm/full_scans\") failed\n");
+ if (ksm_stop() < 0)
+ ksft_exit_skip("accessing \"/sys/kernel/mm/ksm/run\") failed\n");
+ if (ksm_get_full_scans() < 0)
+ ksft_exit_skip("accessing \"/sys/kernel/mm/ksm/full_scans\") failed\n");
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
if (pagemap_fd < 0)
ksft_exit_skip("open(\"/proc/self/pagemap\") failed\n");
- proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY);
- proc_self_ksm_merging_pages_fd = open("/proc/self/ksm_merging_pages",
- O_RDONLY);
- ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR);
+ if (ksm_get_self_merging_pages() < 0)
+ ksft_exit_skip("accessing \"/proc/self/ksm_merging_pages\") failed\n");
+
+ pages_to_scan_fd = open("/sys/kernel/mm/ksm/pages_to_scan", O_RDWR);
+ if (pages_to_scan_fd < 0)
+ ksft_exit_fail_msg("opening /sys/kernel/mm/ksm/pages_to_scan failed\n");
+ sleep_millisecs_fd = open("/sys/kernel/mm/ksm/sleep_millisecs", O_RDWR);
+ if (sleep_millisecs_fd < 0)
+ ksft_exit_fail_msg("opening /sys/kernel/mm/ksm/sleep_millisecs failed\n");
}
int main(int argc, char **argv)
{
- unsigned int tests = 8;
+ unsigned int tests = 9;
int err;
if (argc > 1 && !strcmp(argv[1], FORK_EXEC_CHILD_PRG_NAME)) {
@@ -710,6 +749,7 @@ int main(int argc, char **argv)
test_prctl_fork();
test_prctl_fork_exec();
test_prctl_unmerge();
+ test_fork_ksm_merging_page_count();
err = ksft_get_fail_cnt();
if (err)
diff --git a/tools/testing/selftests/mm/ksm_tests.c b/tools/testing/selftests/mm/ksm_tests.c
index dcdd5bb20f3d..a0b48b839d54 100644
--- a/tools/testing/selftests/mm/ksm_tests.c
+++ b/tools/testing/selftests/mm/ksm_tests.c
@@ -12,9 +12,10 @@
#include <stdint.h>
#include <err.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include <include/vdso/time64.h>
#include "vm_util.h"
+#include "thp_settings.h"
#define KSM_SYSFS_PATH "/sys/kernel/mm/ksm/"
#define KSM_FP(s) (KSM_SYSFS_PATH s)
@@ -58,40 +59,12 @@ int debug;
static int ksm_write_sysfs(const char *file_path, unsigned long val)
{
- FILE *f = fopen(file_path, "w");
-
- if (!f) {
- fprintf(stderr, "f %s\n", file_path);
- perror("fopen");
- return 1;
- }
- if (fprintf(f, "%lu", val) < 0) {
- perror("fprintf");
- fclose(f);
- return 1;
- }
- fclose(f);
-
- return 0;
+ return write_sysfs(file_path, val);
}
static int ksm_read_sysfs(const char *file_path, unsigned long *val)
{
- FILE *f = fopen(file_path, "r");
-
- if (!f) {
- fprintf(stderr, "f %s\n", file_path);
- perror("fopen");
- return 1;
- }
- if (fscanf(f, "%lu", val) != 1) {
- perror("fscanf");
- fclose(f);
- return 1;
- }
- fclose(f);
-
- return 0;
+ return read_sysfs(file_path, val);
}
static void ksm_print_sysfs(void)
@@ -555,6 +528,11 @@ static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot,
unsigned long scan_time_ns;
int pagemap_fd, n_normal_pages, n_huge_pages;
+ if (!thp_is_enabled()) {
+ printf("Transparent Hugepages not available\n");
+ return KSFT_SKIP;
+ }
+
map_size *= MB;
size_t len = map_size;
diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c
index b6fabd5c27ed..88050e0f829a 100644
--- a/tools/testing/selftests/mm/madv_populate.c
+++ b/tools/testing/selftests/mm/madv_populate.c
@@ -17,7 +17,7 @@
#include <linux/mman.h>
#include <sys/mman.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
/*
@@ -264,23 +264,6 @@ static void test_softdirty(void)
munmap(addr, SIZE);
}
-static int system_has_softdirty(void)
-{
- /*
- * There is no way to check if the kernel supports soft-dirty, other
- * than by writing to a page and seeing if the bit was set. But the
- * tests are intended to check that the bit gets set when it should, so
- * doing that check would turn a potentially legitimate fail into a
- * skip. Fortunately, we know for sure that arm64 does not support
- * soft-dirty. So for now, let's just use the arch as a corse guide.
- */
-#if defined(__aarch64__)
- return 0;
-#else
- return 1;
-#endif
-}
-
int main(int argc, char **argv)
{
int nr_tests = 16;
@@ -288,7 +271,7 @@ int main(int argc, char **argv)
pagesize = getpagesize();
- if (system_has_softdirty())
+ if (softdirty_supported())
nr_tests += 5;
ksft_print_header();
@@ -300,7 +283,7 @@ int main(int argc, char **argv)
test_holes();
test_populate_read();
test_populate_write();
- if (system_has_softdirty())
+ if (softdirty_supported())
test_softdirty();
err = ksft_get_fail_cnt();
diff --git a/tools/testing/selftests/mm/map_fixed_noreplace.c b/tools/testing/selftests/mm/map_fixed_noreplace.c
index 1e9980b8993c..11241edde7fe 100644
--- a/tools/testing/selftests/mm/map_fixed_noreplace.c
+++ b/tools/testing/selftests/mm/map_fixed_noreplace.c
@@ -12,7 +12,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static void dump_maps(void)
{
diff --git a/tools/testing/selftests/mm/map_hugetlb.c b/tools/testing/selftests/mm/map_hugetlb.c
index b47399feab53..aa409107611b 100644
--- a/tools/testing/selftests/mm/map_hugetlb.c
+++ b/tools/testing/selftests/mm/map_hugetlb.c
@@ -11,7 +11,7 @@
#include <sys/mman.h>
#include <fcntl.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
diff --git a/tools/testing/selftests/mm/map_populate.c b/tools/testing/selftests/mm/map_populate.c
index 9df2636c829b..712327f4e932 100644
--- a/tools/testing/selftests/mm/map_populate.c
+++ b/tools/testing/selftests/mm/map_populate.c
@@ -16,7 +16,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
index 200bedcdc32e..647779653da0 100644
--- a/tools/testing/selftests/mm/mdwe_test.c
+++ b/tools/testing/selftests/mm/mdwe_test.c
@@ -14,7 +14,7 @@
#include <sys/wait.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#ifndef __aarch64__
# define PROT_BTI 0
diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c
index 9a0597310a76..aac4f795c327 100644
--- a/tools/testing/selftests/mm/memfd_secret.c
+++ b/tools/testing/selftests/mm/memfd_secret.c
@@ -22,7 +22,7 @@
#include <stdio.h>
#include <fcntl.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define fail(fmt, ...) ksft_test_result_fail(fmt, ##__VA_ARGS__)
#define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__)
diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
index c76646cdf6e6..363c1033cc7d 100644
--- a/tools/testing/selftests/mm/merge.c
+++ b/tools/testing/selftests/mm/merge.c
@@ -1,13 +1,19 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#define _GNU_SOURCE
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
+#include <linux/prctl.h>
+#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
#include <sys/wait.h>
+#include <linux/perf_event.h>
#include "vm_util.h"
+#include <linux/mman.h>
FIXTURE(merge)
{
@@ -20,7 +26,7 @@ FIXTURE_SETUP(merge)
{
self->page_size = psize();
/* Carve out PROT_NONE region to map over. */
- self->carveout = mmap(NULL, 12 * self->page_size, PROT_NONE,
+ self->carveout = mmap(NULL, 30 * self->page_size, PROT_NONE,
MAP_ANON | MAP_PRIVATE, -1, 0);
ASSERT_NE(self->carveout, MAP_FAILED);
/* Setup PROCMAP_QUERY interface. */
@@ -29,8 +35,13 @@ FIXTURE_SETUP(merge)
FIXTURE_TEARDOWN(merge)
{
- ASSERT_EQ(munmap(self->carveout, 12 * self->page_size), 0);
+ ASSERT_EQ(munmap(self->carveout, 30 * self->page_size), 0);
ASSERT_EQ(close_procmap(&self->procmap), 0);
+ /*
+ * Clear unconditionally, as some tests set this. It is no issue if this
+ * fails (KSM may be disabled for instance).
+ */
+ prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
}
TEST_F(merge, mprotect_unfaulted_left)
@@ -452,4 +463,712 @@ TEST_F(merge, forked_source_vma)
ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
}
+TEST_F(merge, handle_uprobe_upon_merged_vma)
+{
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ unsigned int page_size = self->page_size;
+ const char *probe_file = "./foo";
+ char *carveout = self->carveout;
+ struct perf_event_attr attr;
+ unsigned long type;
+ void *ptr1, *ptr2;
+ int fd;
+
+ fd = open(probe_file, O_RDWR|O_CREAT, 0600);
+ ASSERT_GE(fd, 0);
+
+ ASSERT_EQ(ftruncate(fd, page_size), 0);
+ if (read_sysfs("/sys/bus/event_source/devices/uprobe/type", &type) != 0) {
+ SKIP(goto out, "Failed to read uprobe sysfs file, skipping");
+ }
+
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
+ attr.type = type;
+ attr.config1 = (__u64)(long)probe_file;
+ attr.config2 = 0x0;
+
+ ASSERT_GE(syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0), 0);
+
+ ptr1 = mmap(&carveout[page_size], 10 * page_size, PROT_EXEC,
+ MAP_PRIVATE | MAP_FIXED, fd, 0);
+ ASSERT_NE(ptr1, MAP_FAILED);
+
+ ptr2 = mremap(ptr1, page_size, 2 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr1 + 5 * page_size);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ ASSERT_NE(mremap(ptr2, page_size, page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr1), MAP_FAILED);
+
+out:
+ close(fd);
+ remove(probe_file);
+}
+
+TEST_F(merge, ksm_merge)
+{
+ unsigned int page_size = self->page_size;
+ char *carveout = self->carveout;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2;
+ int err;
+
+ /*
+ * Map two R/W immediately adjacent to one another, they should
+ * trivially merge:
+ *
+ * |-----------|-----------|
+ * | R/W | R/W |
+ * |-----------|-----------|
+ * ptr ptr2
+ */
+
+ ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+
+ /* Unmap the second half of this merged VMA. */
+ ASSERT_EQ(munmap(ptr2, page_size), 0);
+
+ /* OK, now enable global KSM merge. We clear this on test teardown. */
+ err = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
+ if (err == -1) {
+ int errnum = errno;
+
+ /* Only non-failure case... */
+ ASSERT_EQ(errnum, EINVAL);
+ /* ...but indicates we should skip. */
+ SKIP(return, "KSM memory merging not supported, skipping.");
+ }
+
+ /*
+ * Now map a VMA adjacent to the existing that was just made
+ * VM_MERGEABLE, this should merge as well.
+ */
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+
+ /* Now this VMA altogether. */
+ ASSERT_EQ(munmap(ptr, 2 * page_size), 0);
+
+ /* Try the same operation as before, asserting this also merges fine. */
+ ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+}
+
+TEST_F(merge, mremap_unfaulted_to_faulted)
+{
+ unsigned int page_size = self->page_size;
+ char *carveout = self->carveout;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2;
+
+ /*
+ * Map two distinct areas:
+ *
+ * |-----------| |-----------|
+ * | unfaulted | | unfaulted |
+ * |-----------| |-----------|
+ * ptr ptr2
+ */
+ ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /* Offset ptr2 further away. */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /*
+ * Fault in ptr:
+ * \
+ * |-----------| / |-----------|
+ * | faulted | \ | unfaulted |
+ * |-----------| / |-----------|
+ * ptr \ ptr2
+ */
+ ptr[0] = 'x';
+
+ /*
+ * Now move ptr2 adjacent to ptr:
+ *
+ * |-----------|-----------|
+ * | faulted | unfaulted |
+ * |-----------|-----------|
+ * ptr ptr2
+ *
+ * It should merge:
+ *
+ * |----------------------|
+ * | faulted |
+ * |----------------------|
+ * ptr
+ */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
+}
+
+TEST_F(merge, mremap_unfaulted_behind_faulted)
+{
+ unsigned int page_size = self->page_size;
+ char *carveout = self->carveout;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2;
+
+ /*
+ * Map two distinct areas:
+ *
+ * |-----------| |-----------|
+ * | unfaulted | | unfaulted |
+ * |-----------| |-----------|
+ * ptr ptr2
+ */
+ ptr = mmap(&carveout[6 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /* Offset ptr2 further away. */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /*
+ * Fault in ptr:
+ * \
+ * |-----------| / |-----------|
+ * | faulted | \ | unfaulted |
+ * |-----------| / |-----------|
+ * ptr \ ptr2
+ */
+ ptr[0] = 'x';
+
+ /*
+ * Now move ptr2 adjacent, but behind, ptr:
+ *
+ * |-----------|-----------|
+ * | unfaulted | faulted |
+ * |-----------|-----------|
+ * ptr2 ptr
+ *
+ * It should merge:
+ *
+ * |----------------------|
+ * | faulted |
+ * |----------------------|
+ * ptr2
+ */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 10 * page_size);
+}
+
+TEST_F(merge, mremap_unfaulted_between_faulted)
+{
+ unsigned int page_size = self->page_size;
+ char *carveout = self->carveout;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2, *ptr3;
+
+ /*
+ * Map three distinct areas:
+ *
+ * |-----------| |-----------| |-----------|
+ * | unfaulted | | unfaulted | | unfaulted |
+ * |-----------| |-----------| |-----------|
+ * ptr ptr2 ptr3
+ */
+ ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ptr3 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /* Offset ptr3 further away. */
+ ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /* Offset ptr2 further away. */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /*
+ * Fault in ptr, ptr3:
+ * \ \
+ * |-----------| / |-----------| / |-----------|
+ * | faulted | \ | unfaulted | \ | faulted |
+ * |-----------| / |-----------| / |-----------|
+ * ptr \ ptr2 \ ptr3
+ */
+ ptr[0] = 'x';
+ ptr3[0] = 'x';
+
+ /*
+ * Move ptr3 back into place, leaving a place for ptr2:
+ * \
+ * |-----------| |-----------| / |-----------|
+ * | faulted | | faulted | \ | unfaulted |
+ * |-----------| |-----------| / |-----------|
+ * ptr ptr3 \ ptr2
+ */
+ ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /*
+ * Finally, move ptr2 into place:
+ *
+ * |-----------|-----------|-----------|
+ * | faulted | unfaulted | faulted |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * It should merge, but only ptr, ptr2:
+ *
+ * |-----------------------|-----------|
+ * | faulted | unfaulted |
+ * |-----------------------|-----------|
+ */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr3));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr3);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr3 + 5 * page_size);
+}
+
+TEST_F(merge, mremap_unfaulted_between_faulted_unfaulted)
+{
+ unsigned int page_size = self->page_size;
+ char *carveout = self->carveout;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2, *ptr3;
+
+ /*
+ * Map three distinct areas:
+ *
+ * |-----------| |-----------| |-----------|
+ * | unfaulted | | unfaulted | | unfaulted |
+ * |-----------| |-----------| |-----------|
+ * ptr ptr2 ptr3
+ */
+ ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[7 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ptr3 = mmap(&carveout[14 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /* Offset ptr3 further away. */
+ ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+
+ /* Offset ptr2 further away. */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /*
+ * Fault in ptr:
+ * \ \
+ * |-----------| / |-----------| / |-----------|
+ * | faulted | \ | unfaulted | \ | unfaulted |
+ * |-----------| / |-----------| / |-----------|
+ * ptr \ ptr2 \ ptr3
+ */
+ ptr[0] = 'x';
+
+ /*
+ * Move ptr3 back into place, leaving a place for ptr2:
+ * \
+ * |-----------| |-----------| / |-----------|
+ * | faulted | | unfaulted | \ | unfaulted |
+ * |-----------| |-----------| / |-----------|
+ * ptr ptr3 \ ptr2
+ */
+ ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /*
+ * Finally, move ptr2 into place:
+ *
+ * |-----------|-----------|-----------|
+ * | faulted | unfaulted | unfaulted |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * It should merge:
+ *
+ * |-----------------------------------|
+ * | faulted |
+ * |-----------------------------------|
+ */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
+}
+
+TEST_F(merge, mremap_unfaulted_between_correctly_placed_faulted)
+{
+ unsigned int page_size = self->page_size;
+ char *carveout = self->carveout;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2;
+
+ /*
+ * Map one larger area:
+ *
+ * |-----------------------------------|
+ * | unfaulted |
+ * |-----------------------------------|
+ */
+ ptr = mmap(&carveout[page_size], 15 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /*
+ * Fault in ptr:
+ *
+ * |-----------------------------------|
+ * | faulted |
+ * |-----------------------------------|
+ */
+ ptr[0] = 'x';
+
+ /*
+ * Unmap middle:
+ *
+ * |-----------| |-----------|
+ * | faulted | | faulted |
+ * |-----------| |-----------|
+ *
+ * Now the faulted areas are compatible with each other (anon_vma the
+ * same, vma->vm_pgoff equal to virtual page offset).
+ */
+ ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
+
+ /*
+ * Map a new area, ptr2:
+ * \
+ * |-----------| |-----------| / |-----------|
+ * | faulted | | faulted | \ | unfaulted |
+ * |-----------| |-----------| / |-----------|
+ * ptr \ ptr2
+ */
+ ptr2 = mmap(&carveout[20 * page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /*
+ * Finally, move ptr2 into place:
+ *
+ * |-----------|-----------|-----------|
+ * | faulted | unfaulted | faulted |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * It should merge:
+ *
+ * |-----------------------------------|
+ * | faulted |
+ * |-----------------------------------|
+ */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
+}
+
+TEST_F(merge, mremap_correct_placed_faulted)
+{
+ unsigned int page_size = self->page_size;
+ char *carveout = self->carveout;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2, *ptr3;
+
+ /*
+ * Map one larger area:
+ *
+ * |-----------------------------------|
+ * | unfaulted |
+ * |-----------------------------------|
+ */
+ ptr = mmap(&carveout[page_size], 15 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /*
+ * Fault in ptr:
+ *
+ * |-----------------------------------|
+ * | faulted |
+ * |-----------------------------------|
+ */
+ ptr[0] = 'x';
+
+ /*
+ * Offset the final and middle 5 pages further away:
+ * \ \
+ * |-----------| / |-----------| / |-----------|
+ * | faulted | \ | faulted | \ | faulted |
+ * |-----------| / |-----------| / |-----------|
+ * ptr \ ptr2 \ ptr3
+ */
+ ptr3 = &ptr[10 * page_size];
+ ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 2000);
+ ASSERT_NE(ptr3, MAP_FAILED);
+ ptr2 = &ptr[5 * page_size];
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /*
+ * Move ptr2 into its correct place:
+ * \
+ * |-----------|-----------| / |-----------|
+ * | faulted | faulted | \ | faulted |
+ * |-----------|-----------| / |-----------|
+ * ptr ptr2 \ ptr3
+ *
+ * It should merge:
+ * \
+ * |-----------------------| / |-----------|
+ * | faulted | \ | faulted |
+ * |-----------------------| / |-----------|
+ * ptr \ ptr3
+ */
+
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
+
+ /*
+ * Now move ptr out of place:
+ * \ \
+ * |-----------| / |-----------| / |-----------|
+ * | faulted | \ | faulted | \ | faulted |
+ * |-----------| / |-----------| / |-----------|
+ * ptr2 \ ptr \ ptr3
+ */
+ ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr + page_size * 1000);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /*
+ * Now move ptr back into place:
+ * \
+ * |-----------|-----------| / |-----------|
+ * | faulted | faulted | \ | faulted |
+ * |-----------|-----------| / |-----------|
+ * ptr ptr2 \ ptr3
+ *
+ * It should merge:
+ * \
+ * |-----------------------| / |-----------|
+ * | faulted | \ | faulted |
+ * |-----------------------| / |-----------|
+ * ptr \ ptr3
+ */
+ ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
+
+ /*
+ * Now move ptr out of place again:
+ * \ \
+ * |-----------| / |-----------| / |-----------|
+ * | faulted | \ | faulted | \ | faulted |
+ * |-----------| / |-----------| / |-----------|
+ * ptr2 \ ptr \ ptr3
+ */
+ ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr + page_size * 1000);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /*
+ * Now move ptr3 back into place:
+ * \
+ * |-----------|-----------| / |-----------|
+ * | faulted | faulted | \ | faulted |
+ * |-----------|-----------| / |-----------|
+ * ptr2 ptr3 \ ptr
+ *
+ * It should merge:
+ * \
+ * |-----------------------| / |-----------|
+ * | faulted | \ | faulted |
+ * |-----------------------| / |-----------|
+ * ptr2 \ ptr
+ */
+ ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr2[5 * page_size]);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr2));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr2);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 10 * page_size);
+
+ /*
+ * Now move ptr back into place:
+ *
+ * |-----------|-----------------------|
+ * | faulted | faulted |
+ * |-----------|-----------------------|
+ * ptr ptr2
+ *
+ * It should merge:
+ *
+ * |-----------------------------------|
+ * | faulted |
+ * |-----------------------------------|
+ * ptr
+ */
+ ptr = sys_mremap(ptr, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &carveout[page_size]);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
+
+ /*
+ * Now move ptr2 out of the way:
+ * \
+ * |-----------| |-----------| / |-----------|
+ * | faulted | | faulted | \ | faulted |
+ * |-----------| |-----------| / |-----------|
+ * ptr ptr3 \ ptr2
+ */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr2 + page_size * 1000);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ /*
+ * Now move it back:
+ *
+ * |-----------|-----------|-----------|
+ * | faulted | faulted | faulted |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * It should merge:
+ *
+ * |-----------------------------------|
+ * | faulted |
+ * |-----------------------------------|
+ * ptr
+ */
+ ptr2 = sys_mremap(ptr2, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[5 * page_size]);
+ ASSERT_NE(ptr2, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
+
+ /*
+ * Move ptr3 out of place:
+ * \
+ * |-----------------------| / |-----------|
+ * | faulted | \ | faulted |
+ * |-----------------------| / |-----------|
+ * ptr \ ptr3
+ */
+ ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, ptr3 + page_size * 1000);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /*
+ * Now move it back:
+ *
+ * |-----------|-----------|-----------|
+ * | faulted | faulted | faulted |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * It should merge:
+ *
+ * |-----------------------------------|
+ * | faulted |
+ * |-----------------------------------|
+ * ptr
+ */
+ ptr3 = sys_mremap(ptr3, 5 * page_size, 5 * page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, &ptr[10 * page_size]);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/migration.c b/tools/testing/selftests/mm/migration.c
index 1e3a595fbf01..ee24b88c2b24 100644
--- a/tools/testing/selftests/mm/migration.c
+++ b/tools/testing/selftests/mm/migration.c
@@ -4,7 +4,9 @@
* paths in the kernel.
*/
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
+#include "thp_settings.h"
+
#include <strings.h>
#include <pthread.h>
#include <numa.h>
@@ -14,6 +16,7 @@
#include <sys/types.h>
#include <signal.h>
#include <time.h>
+#include "vm_util.h"
#define TWOMEG (2<<20)
#define RUNTIME (20)
@@ -101,15 +104,13 @@ int migrate(uint64_t *ptr, int n1, int n2)
void *access_mem(void *ptr)
{
- volatile uint64_t y = 0;
- volatile uint64_t *x = ptr;
-
while (1) {
pthread_testcancel();
- y += *x;
-
- /* Prevent the compiler from optimizing out the writes to y: */
- asm volatile("" : "+r" (y));
+ /* Force a read from the memory pointed to by ptr. This ensures
+ * the memory access actually happens and prevents the compiler
+ * from optimizing away this entire loop.
+ */
+ FORCE_READ(*(uint64_t *)ptr);
}
return NULL;
@@ -185,6 +186,9 @@ TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
uint64_t *ptr;
int i;
+ if (!thp_is_enabled())
+ SKIP(return, "Transparent Hugepages not available");
+
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
@@ -214,6 +218,9 @@ TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME)
uint64_t *ptr;
int i;
+ if (!thp_is_enabled())
+ SKIP(return, "Transparent Hugepages not available");
+
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c
index 09feeb453646..68dd447a5454 100644
--- a/tools/testing/selftests/mm/mkdirty.c
+++ b/tools/testing/selftests/mm/mkdirty.c
@@ -22,7 +22,7 @@
#include <linux/userfaultfd.h>
#include <linux/mempolicy.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
static size_t pagesize;
diff --git a/tools/testing/selftests/mm/mlock-random-test.c b/tools/testing/selftests/mm/mlock-random-test.c
index b8d7e966f44c..9d349c151360 100644
--- a/tools/testing/selftests/mm/mlock-random-test.c
+++ b/tools/testing/selftests/mm/mlock-random-test.c
@@ -13,7 +13,7 @@
#include <sys/ipc.h>
#include <sys/shm.h>
#include <time.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "mlock2.h"
#define CHUNK_UNIT (128 * 1024)
diff --git a/tools/testing/selftests/mm/mlock2-tests.c b/tools/testing/selftests/mm/mlock2-tests.c
index 3e90ff37e336..b474f2b20def 100644
--- a/tools/testing/selftests/mm/mlock2-tests.c
+++ b/tools/testing/selftests/mm/mlock2-tests.c
@@ -7,7 +7,7 @@
#include <sys/time.h>
#include <sys/resource.h>
#include <stdbool.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "mlock2.h"
struct vm_boundaries {
diff --git a/tools/testing/selftests/mm/mrelease_test.c b/tools/testing/selftests/mm/mrelease_test.c
index 100370a7111d..64e8d00ae944 100644
--- a/tools/testing/selftests/mm/mrelease_test.c
+++ b/tools/testing/selftests/mm/mrelease_test.c
@@ -12,7 +12,7 @@
#include <unistd.h>
#include <asm-generic/unistd.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#define MB(x) (x << 20)
#define MAX_SIZE_MB 1024
diff --git a/tools/testing/selftests/mm/mremap_dontunmap.c b/tools/testing/selftests/mm/mremap_dontunmap.c
index 1d75084b9ca5..a4f75d836733 100644
--- a/tools/testing/selftests/mm/mremap_dontunmap.c
+++ b/tools/testing/selftests/mm/mremap_dontunmap.c
@@ -14,7 +14,7 @@
#include <string.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
unsigned long page_size;
char *page_buffer;
diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c
index bb84476a177f..308576437228 100644
--- a/tools/testing/selftests/mm/mremap_test.c
+++ b/tools/testing/selftests/mm/mremap_test.c
@@ -5,14 +5,18 @@
#define _GNU_SOURCE
#include <errno.h>
+#include <fcntl.h>
+#include <linux/userfaultfd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
+#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <syscall.h>
#include <time.h>
#include <stdbool.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define EXPECT_SUCCESS 0
#define EXPECT_FAILURE 1
@@ -168,6 +172,7 @@ static bool is_range_mapped(FILE *maps_fp, unsigned long start,
if (first_val <= start && second_val >= end) {
success = true;
+ fflush(maps_fp);
break;
}
}
@@ -175,6 +180,15 @@ static bool is_range_mapped(FILE *maps_fp, unsigned long start,
return success;
}
+/* Check if [ptr, ptr + size) mapped in /proc/self/maps. */
+static bool is_ptr_mapped(FILE *maps_fp, void *ptr, unsigned long size)
+{
+ unsigned long start = (unsigned long)ptr;
+ unsigned long end = start + size;
+
+ return is_range_mapped(maps_fp, start, end);
+}
+
/*
* Returns the start address of the mapping on success, else returns
* NULL on failure.
@@ -380,11 +394,607 @@ out:
ksft_test_result_fail("%s\n", test_name);
}
+static bool is_multiple_vma_range_ok(unsigned int pattern_seed,
+ char *ptr, unsigned long page_size)
+{
+ int i;
+
+ srand(pattern_seed);
+ for (i = 0; i <= 10; i += 2) {
+ int j;
+ char *buf = &ptr[i * page_size];
+ size_t size = i == 4 ? 2 * page_size : page_size;
+
+ for (j = 0; j < size; j++) {
+ char chr = rand();
+
+ if (chr != buf[j]) {
+ ksft_print_msg("page %d offset %d corrupted, expected %d got %d\n",
+ i, j, chr, buf[j]);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static void mremap_move_multiple_vmas(unsigned int pattern_seed,
+ unsigned long page_size,
+ bool dont_unmap)
+{
+ int mremap_flags = MREMAP_FIXED | MREMAP_MAYMOVE;
+ char *test_name = "mremap move multiple vmas";
+ const size_t size = 11 * page_size;
+ bool success = true;
+ char *ptr, *tgt_ptr;
+ int i;
+
+ if (dont_unmap)
+ mremap_flags |= MREMAP_DONTUNMAP;
+
+ ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (ptr == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out;
+ }
+
+ tgt_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (tgt_ptr == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out;
+ }
+ if (munmap(tgt_ptr, 2 * size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+
+ /*
+ * Unmap so we end up with:
+ *
+ * 0 2 4 5 6 8 10 offset in buffer
+ * |*| |*| |*****| |*| |*|
+ * |*| |*| |*****| |*| |*|
+ * 0 1 2 3 4 5 6 pattern offset
+ */
+ for (i = 1; i < 10; i += 2) {
+ if (i == 5)
+ continue;
+
+ if (munmap(&ptr[i * page_size], page_size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+ }
+
+ srand(pattern_seed);
+
+ /* Set up random patterns. */
+ for (i = 0; i <= 10; i += 2) {
+ int j;
+ size_t size = i == 4 ? 2 * page_size : page_size;
+ char *buf = &ptr[i * page_size];
+
+ for (j = 0; j < size; j++)
+ buf[j] = rand();
+ }
+
+ /* First, just move the whole thing. */
+ if (mremap(ptr, size, size, mremap_flags, tgt_ptr) == MAP_FAILED) {
+ perror("mremap");
+ success = false;
+ goto out_unmap;
+ }
+ /* Check move was ok. */
+ if (!is_multiple_vma_range_ok(pattern_seed, tgt_ptr, page_size)) {
+ success = false;
+ goto out_unmap;
+ }
+
+ /* Move next to itself. */
+ if (mremap(tgt_ptr, size, size, mremap_flags,
+ &tgt_ptr[size]) == MAP_FAILED) {
+ perror("mremap");
+ success = false;
+ goto out_unmap;
+ }
+ /* Check that the move is ok. */
+ if (!is_multiple_vma_range_ok(pattern_seed, &tgt_ptr[size], page_size)) {
+ success = false;
+ goto out_unmap;
+ }
+
+ /* Map a range to overwrite. */
+ if (mmap(tgt_ptr, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0) == MAP_FAILED) {
+ perror("mmap tgt");
+ success = false;
+ goto out_unmap;
+ }
+ /* Move and overwrite. */
+ if (mremap(&tgt_ptr[size], size, size,
+ mremap_flags, tgt_ptr) == MAP_FAILED) {
+ perror("mremap");
+ success = false;
+ goto out_unmap;
+ }
+ /* Check that the move is ok. */
+ if (!is_multiple_vma_range_ok(pattern_seed, tgt_ptr, page_size)) {
+ success = false;
+ goto out_unmap;
+ }
+
+out_unmap:
+ if (munmap(tgt_ptr, 2 * size))
+ perror("munmap tgt");
+ if (munmap(ptr, size))
+ perror("munmap src");
+
+out:
+ if (success)
+ ksft_test_result_pass("%s%s\n", test_name,
+ dont_unmap ? " [dontunnmap]" : "");
+ else
+ ksft_test_result_fail("%s%s\n", test_name,
+ dont_unmap ? " [dontunnmap]" : "");
+}
+
+static void mremap_shrink_multiple_vmas(unsigned long page_size,
+ bool inplace)
+{
+ char *test_name = "mremap shrink multiple vmas";
+ const size_t size = 10 * page_size;
+ bool success = true;
+ char *ptr, *tgt_ptr;
+ void *res;
+ int i;
+
+ ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (ptr == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out;
+ }
+
+ tgt_ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (tgt_ptr == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out;
+ }
+ if (munmap(tgt_ptr, size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+
+ /*
+ * Unmap so we end up with:
+ *
+ * 0 2 4 6 8 10 offset in buffer
+ * |*| |*| |*| |*| |*| |*|
+ * |*| |*| |*| |*| |*| |*|
+ */
+ for (i = 1; i < 10; i += 2) {
+ if (munmap(&ptr[i * page_size], page_size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+ }
+
+ /*
+ * Shrink in-place across multiple VMAs and gaps so we end up with:
+ *
+ * 0
+ * |*|
+ * |*|
+ */
+ if (inplace)
+ res = mremap(ptr, size, page_size, 0);
+ else
+ res = mremap(ptr, size, page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
+ tgt_ptr);
+
+ if (res == MAP_FAILED) {
+ perror("mremap");
+ success = false;
+ goto out_unmap;
+ }
+
+out_unmap:
+ if (munmap(tgt_ptr, size))
+ perror("munmap tgt");
+ if (munmap(ptr, size))
+ perror("munmap src");
+out:
+ if (success)
+ ksft_test_result_pass("%s%s\n", test_name,
+ inplace ? " [inplace]" : "");
+ else
+ ksft_test_result_fail("%s%s\n", test_name,
+ inplace ? " [inplace]" : "");
+}
+
+static void mremap_move_multiple_vmas_split(unsigned int pattern_seed,
+ unsigned long page_size,
+ bool dont_unmap)
+{
+ char *test_name = "mremap move multiple vmas split";
+ int mremap_flags = MREMAP_FIXED | MREMAP_MAYMOVE;
+ const size_t size = 10 * page_size;
+ bool success = true;
+ char *ptr, *tgt_ptr;
+ int i;
+
+ if (dont_unmap)
+ mremap_flags |= MREMAP_DONTUNMAP;
+
+ ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (ptr == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out;
+ }
+
+ tgt_ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (tgt_ptr == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out;
+ }
+ if (munmap(tgt_ptr, size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+
+ /*
+ * Unmap so we end up with:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 offset in buffer
+ * |**********| |*******|
+ * |**********| |*******|
+ * 0 1 2 3 4 5 6 7 8 9 pattern offset
+ */
+ if (munmap(&ptr[5 * page_size], page_size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+
+ /* Set up random patterns. */
+ srand(pattern_seed);
+ for (i = 0; i < 10; i++) {
+ int j;
+ char *buf = &ptr[i * page_size];
+
+ if (i == 5)
+ continue;
+
+ for (j = 0; j < page_size; j++)
+ buf[j] = rand();
+ }
+
+ /*
+ * Move the below:
+ *
+ * <------------->
+ * 0 1 2 3 4 5 6 7 8 9 10 offset in buffer
+ * |**********| |*******|
+ * |**********| |*******|
+ * 0 1 2 3 4 5 6 7 8 9 pattern offset
+ *
+ * Into:
+ *
+ * 0 1 2 3 4 5 6 7 offset in buffer
+ * |*****| |*****|
+ * |*****| |*****|
+ * 2 3 4 5 6 7 pattern offset
+ */
+ if (mremap(&ptr[2 * page_size], size - 3 * page_size, size - 3 * page_size,
+ mremap_flags, tgt_ptr) == MAP_FAILED) {
+ perror("mremap");
+ success = false;
+ goto out_unmap;
+ }
+
+ /* Offset into random pattern. */
+ srand(pattern_seed);
+ for (i = 0; i < 2 * page_size; i++)
+ rand();
+
+ /* Check pattern. */
+ for (i = 0; i < 7; i++) {
+ int j;
+ char *buf = &tgt_ptr[i * page_size];
+
+ if (i == 3)
+ continue;
+
+ for (j = 0; j < page_size; j++) {
+ char chr = rand();
+
+ if (chr != buf[j]) {
+ ksft_print_msg("page %d offset %d corrupted, expected %d got %d\n",
+ i, j, chr, buf[j]);
+ goto out_unmap;
+ }
+ }
+ }
+
+out_unmap:
+ if (munmap(tgt_ptr, size))
+ perror("munmap tgt");
+ if (munmap(ptr, size))
+ perror("munmap src");
+out:
+ if (success)
+ ksft_test_result_pass("%s%s\n", test_name,
+ dont_unmap ? " [dontunnmap]" : "");
+ else
+ ksft_test_result_fail("%s%s\n", test_name,
+ dont_unmap ? " [dontunnmap]" : "");
+}
+
+#ifdef __NR_userfaultfd
+static void mremap_move_multi_invalid_vmas(FILE *maps_fp,
+ unsigned long page_size)
+{
+ char *test_name = "mremap move multiple invalid vmas";
+ const size_t size = 10 * page_size;
+ bool success = true;
+ char *ptr, *tgt_ptr;
+ int uffd, err, i;
+ void *res;
+ struct uffdio_api api = {
+ .api = UFFD_API,
+ .features = UFFD_EVENT_PAGEFAULT,
+ };
+
+ uffd = syscall(__NR_userfaultfd, O_NONBLOCK);
+ if (uffd == -1) {
+ err = errno;
+ perror("userfaultfd");
+ if (err == EPERM) {
+ ksft_test_result_skip("%s - missing uffd", test_name);
+ return;
+ }
+ success = false;
+ goto out;
+ }
+ if (ioctl(uffd, UFFDIO_API, &api)) {
+ perror("ioctl UFFDIO_API");
+ success = false;
+ goto out_close_uffd;
+ }
+
+ ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (ptr == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out_close_uffd;
+ }
+
+ tgt_ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (tgt_ptr == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out_close_uffd;
+ }
+ if (munmap(tgt_ptr, size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+
+ /*
+ * Unmap so we end up with:
+ *
+ * 0 2 4 6 8 10 offset in buffer
+ * |*| |*| |*| |*| |*|
+ * |*| |*| |*| |*| |*|
+ *
+ * Additionally, register each with UFFD.
+ */
+ for (i = 0; i < 10; i += 2) {
+ void *unmap_ptr = &ptr[(i + 1) * page_size];
+ unsigned long start = (unsigned long)&ptr[i * page_size];
+ struct uffdio_register reg = {
+ .range = {
+ .start = start,
+ .len = page_size,
+ },
+ .mode = UFFDIO_REGISTER_MODE_MISSING,
+ };
+
+ if (ioctl(uffd, UFFDIO_REGISTER, &reg) == -1) {
+ perror("ioctl UFFDIO_REGISTER");
+ success = false;
+ goto out_unmap;
+ }
+ if (munmap(unmap_ptr, page_size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+ }
+
+ /*
+ * Now try to move the entire range which is invalid for multi VMA move.
+ *
+ * This will fail, and no VMA should be moved, as we check this ahead of
+ * time.
+ */
+ res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
+ err = errno;
+ if (res != MAP_FAILED) {
+ fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
+ success = false;
+ goto out_unmap;
+ }
+ if (err != EFAULT) {
+ errno = err;
+ perror("mremap() unexpected error");
+ success = false;
+ goto out_unmap;
+ }
+ if (is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
+ fprintf(stderr,
+ "Invalid uffd-armed VMA at start of multi range moved\n");
+ success = false;
+ goto out_unmap;
+ }
+
+ /*
+ * Now try to move a single VMA, this should succeed as not multi VMA
+ * move.
+ */
+ res = mremap(ptr, page_size, page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
+ if (res == MAP_FAILED) {
+ perror("mremap single invalid-multi VMA");
+ success = false;
+ goto out_unmap;
+ }
+
+ /*
+ * Unmap the VMA, and remap a non-uffd registered (therefore, multi VMA
+ * move valid) VMA at the start of ptr range.
+ */
+ if (munmap(tgt_ptr, page_size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+ res = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ if (res == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out_unmap;
+ }
+
+ /*
+ * Now try to move the entire range, we should succeed in moving the
+ * first VMA, but no others, and report a failure.
+ */
+ res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
+ err = errno;
+ if (res != MAP_FAILED) {
+ fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
+ success = false;
+ goto out_unmap;
+ }
+ if (err != EFAULT) {
+ errno = err;
+ perror("mremap() unexpected error");
+ success = false;
+ goto out_unmap;
+ }
+ if (!is_ptr_mapped(maps_fp, tgt_ptr, page_size)) {
+ fprintf(stderr, "Valid VMA not moved\n");
+ success = false;
+ goto out_unmap;
+ }
+
+ /*
+ * Unmap the VMA, and map valid VMA at start of ptr range, and replace
+ * all existing multi-move invalid VMAs, except the last, with valid
+ * multi-move VMAs.
+ */
+ if (munmap(tgt_ptr, page_size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+ if (munmap(ptr, size - 2 * page_size)) {
+ perror("munmap");
+ success = false;
+ goto out_unmap;
+ }
+ for (i = 0; i < 8; i += 2) {
+ res = mmap(&ptr[i * page_size], page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ if (res == MAP_FAILED) {
+ perror("mmap");
+ success = false;
+ goto out_unmap;
+ }
+ }
+
+ /*
+ * Now try to move the entire range, we should succeed in moving all but
+ * the last VMA, and report a failure.
+ */
+ res = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tgt_ptr);
+ err = errno;
+ if (res != MAP_FAILED) {
+ fprintf(stderr, "mremap() succeeded for multi VMA uffd armed\n");
+ success = false;
+ goto out_unmap;
+ }
+ if (err != EFAULT) {
+ errno = err;
+ perror("mremap() unexpected error");
+ success = false;
+ goto out_unmap;
+ }
+
+ for (i = 0; i < 10; i += 2) {
+ bool is_mapped = is_ptr_mapped(maps_fp,
+ &tgt_ptr[i * page_size], page_size);
+
+ if (i < 8 && !is_mapped) {
+ fprintf(stderr, "Valid VMA not moved at %d\n", i);
+ success = false;
+ goto out_unmap;
+ } else if (i == 8 && is_mapped) {
+ fprintf(stderr, "Invalid VMA moved at %d\n", i);
+ success = false;
+ goto out_unmap;
+ }
+ }
+
+out_unmap:
+ if (munmap(tgt_ptr, size))
+ perror("munmap tgt");
+ if (munmap(ptr, size))
+ perror("munmap src");
+out_close_uffd:
+ close(uffd);
+out:
+ if (success)
+ ksft_test_result_pass("%s\n", test_name);
+ else
+ ksft_test_result_fail("%s\n", test_name);
+}
+#else
+static void mremap_move_multi_invalid_vmas(FILE *maps_fp, unsigned long page_size)
+{
+ char *test_name = "mremap move multiple invalid vmas";
+
+ ksft_test_result_skip("%s - missing uffd", test_name);
+}
+#endif /* __NR_userfaultfd */
+
/* Returns the time taken for the remap on success else returns -1. */
static long long remap_region(struct config c, unsigned int threshold_mb,
char *rand_addr)
{
- void *addr, *src_addr, *dest_addr, *dest_preamble_addr = NULL;
+ void *addr, *tmp_addr, *src_addr, *dest_addr, *dest_preamble_addr = NULL;
unsigned long long t, d;
struct timespec t_start = {0, 0}, t_end = {0, 0};
long long start_ns, end_ns, align_mask, ret, offset;
@@ -422,7 +1032,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Don't destroy existing mappings unless expected to overlap */
while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
/* Check for unsigned overflow */
- if (addr + c.dest_alignment < addr) {
+ tmp_addr = addr + c.dest_alignment;
+ if (tmp_addr < addr) {
ksft_print_msg("Couldn't find a valid region to remap to\n");
ret = -1;
goto clean_up_src;
@@ -721,7 +1332,7 @@ int main(int argc, char **argv)
char *rand_addr;
size_t rand_size;
int num_expand_tests = 2;
- int num_misc_tests = 2;
+ int num_misc_tests = 9;
struct test test_cases[MAX_TEST] = {};
struct test perf_test_cases[MAX_PERF_TEST];
int page_size;
@@ -844,10 +1455,17 @@ int main(int argc, char **argv)
mremap_expand_merge(maps_fp, page_size);
mremap_expand_merge_offset(maps_fp, page_size);
- fclose(maps_fp);
-
mremap_move_within_range(pattern_seed, rand_addr);
mremap_move_1mb_from_start(pattern_seed, rand_addr);
+ mremap_shrink_multiple_vmas(page_size, /* inplace= */true);
+ mremap_shrink_multiple_vmas(page_size, /* inplace= */false);
+ mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ false);
+ mremap_move_multiple_vmas(pattern_seed, page_size, /* dontunmap= */ true);
+ mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ false);
+ mremap_move_multiple_vmas_split(pattern_seed, page_size, /* dontunmap= */ true);
+ mremap_move_multi_invalid_vmas(maps_fp, page_size);
+
+ fclose(maps_fp);
if (run_perf_tests) {
ksft_print_msg("\n%s\n",
diff --git a/tools/testing/selftests/mm/mseal_test.c b/tools/testing/selftests/mm/mseal_test.c
index 005f29c86484..faad4833366a 100644
--- a/tools/testing/selftests/mm/mseal_test.c
+++ b/tools/testing/selftests/mm/mseal_test.c
@@ -8,7 +8,7 @@
#include <sys/time.h>
#include <sys/resource.h>
#include <stdbool.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include <syscall.h>
#include <errno.h>
#include <stdio.h>
diff --git a/tools/testing/selftests/mm/on-fault-limit.c b/tools/testing/selftests/mm/on-fault-limit.c
index 431c1277d83a..fc4117453c84 100644
--- a/tools/testing/selftests/mm/on-fault-limit.c
+++ b/tools/testing/selftests/mm/on-fault-limit.c
@@ -5,7 +5,7 @@
#include <string.h>
#include <sys/time.h>
#include <sys/resource.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static void test_limit(void)
{
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
index b07acc86f4f0..2cb5441f29c7 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+
#define _GNU_SOURCE
#include <stdio.h>
#include <fcntl.h>
@@ -7,7 +8,7 @@
#include <errno.h>
#include <malloc.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#include <linux/types.h>
#include <linux/memfd.h>
#include <linux/userfaultfd.h>
@@ -34,8 +35,8 @@
#define PAGEMAP "/proc/self/pagemap"
int pagemap_fd;
int uffd;
-unsigned long page_size;
-unsigned int hpage_size;
+size_t page_size;
+size_t hpage_size;
const char *progname;
#define LEN(region) ((region.end - region.start)/page_size)
@@ -208,7 +209,7 @@ int userfaultfd_tests(void)
wp_addr_range(mem, mem_size);
vec_size = mem_size/page_size;
- vec = malloc(sizeof(struct page_region) * vec_size);
+ vec = calloc(vec_size, sizeof(struct page_region));
written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
@@ -246,11 +247,11 @@ int sanity_tests_sd(void)
vec_size = num_pages/2;
mem_size = num_pages * page_size;
- vec = malloc(sizeof(struct page_region) * vec_size);
+ vec = calloc(vec_size, sizeof(struct page_region));
if (!vec)
ksft_exit_fail_msg("error nomem\n");
- vec2 = malloc(sizeof(struct page_region) * vec_size);
+ vec2 = calloc(vec_size, sizeof(struct page_region));
if (!vec2)
ksft_exit_fail_msg("error nomem\n");
@@ -435,7 +436,7 @@ int sanity_tests_sd(void)
mem_size = 1050 * page_size;
vec_size = mem_size/(page_size*2);
- vec = malloc(sizeof(struct page_region) * vec_size);
+ vec = calloc(vec_size, sizeof(struct page_region));
if (!vec)
ksft_exit_fail_msg("error nomem\n");
@@ -490,7 +491,7 @@ int sanity_tests_sd(void)
mem_size = 10000 * page_size;
vec_size = 50;
- vec = malloc(sizeof(struct page_region) * vec_size);
+ vec = calloc(vec_size, sizeof(struct page_region));
if (!vec)
ksft_exit_fail_msg("error nomem\n");
@@ -540,7 +541,7 @@ int sanity_tests_sd(void)
vec_size = 1000;
mem_size = vec_size * page_size;
- vec = malloc(sizeof(struct page_region) * vec_size);
+ vec = calloc(vec_size, sizeof(struct page_region));
if (!vec)
ksft_exit_fail_msg("error nomem\n");
@@ -694,8 +695,8 @@ int base_tests(char *prefix, char *mem, unsigned long long mem_size, int skip)
}
vec_size = mem_size/page_size;
- vec = malloc(sizeof(struct page_region) * vec_size);
- vec2 = malloc(sizeof(struct page_region) * vec_size);
+ vec = calloc(vec_size, sizeof(struct page_region));
+ vec2 = calloc(vec_size, sizeof(struct page_region));
/* 1. all new pages must be not be written (dirty) */
written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
@@ -806,8 +807,8 @@ int hpage_unit_tests(void)
unsigned long long vec_size = map_size/page_size;
struct page_region *vec, *vec2;
- vec = malloc(sizeof(struct page_region) * vec_size);
- vec2 = malloc(sizeof(struct page_region) * vec_size);
+ vec = calloc(vec_size, sizeof(struct page_region));
+ vec2 = calloc(vec_size, sizeof(struct page_region));
if (!vec || !vec2)
ksft_exit_fail_msg("malloc failed\n");
@@ -996,7 +997,7 @@ int unmapped_region_tests(void)
void *start = (void *)0x10000000;
int written, len = 0x00040000;
long vec_size = len / page_size;
- struct page_region *vec = malloc(sizeof(struct page_region) * vec_size);
+ struct page_region *vec = calloc(vec_size, sizeof(struct page_region));
/* 1. Get written pages */
written = pagemap_ioctl(start, len, vec, vec_size, 0, 0,
@@ -1061,7 +1062,7 @@ int sanity_tests(void)
mem_size = 10 * page_size;
vec_size = mem_size / page_size;
- vec = malloc(sizeof(struct page_region) * vec_size);
+ vec = calloc(vec_size, sizeof(struct page_region));
mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mem == MAP_FAILED || vec == MAP_FAILED)
ksft_exit_fail_msg("error nomem\n");
@@ -1480,6 +1481,66 @@ static void transact_test(int page_size)
extra_thread_faults);
}
+void zeropfn_tests(void)
+{
+ unsigned long long mem_size;
+ struct page_region vec;
+ int i, ret;
+ char *mmap_mem, *mem;
+
+ /* Test with normal memory */
+ mem_size = 10 * page_size;
+ mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (mem == MAP_FAILED)
+ ksft_exit_fail_msg("error nomem\n");
+
+ /* Touch each page to ensure it's mapped */
+ for (i = 0; i < mem_size; i += page_size)
+ (void)((volatile char *)mem)[i];
+
+ ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0,
+ (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
+ if (ret < 0)
+ ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
+
+ ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size),
+ "%s all pages must have PFNZERO set\n", __func__);
+
+ munmap(mem, mem_size);
+
+ /* Test with huge page if user_zero_page is set to 1 */
+ if (!detect_huge_zeropage()) {
+ ksft_test_result_skip("%s use_zero_page not supported or set to 1\n", __func__);
+ return;
+ }
+
+ mem_size = 2 * hpage_size;
+ mmap_mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mmap_mem == MAP_FAILED)
+ ksft_exit_fail_msg("error nomem\n");
+
+ /* We need a THP-aligned memory area. */
+ mem = (char *)(((uintptr_t)mmap_mem + hpage_size) & ~(hpage_size - 1));
+
+ ret = madvise(mem, hpage_size, MADV_HUGEPAGE);
+ if (!ret) {
+ FORCE_READ(*mem);
+
+ ret = pagemap_ioctl(mem, hpage_size, &vec, 1, 0,
+ 0, PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
+ if (ret < 0)
+ ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
+
+ ksft_test_result(ret == 1 && LEN(vec) == (hpage_size / page_size),
+ "%s all huge pages must have PFNZERO set\n", __func__);
+ } else {
+ ksft_test_result_skip("%s huge page not supported\n", __func__);
+ }
+
+ munmap(mmap_mem, mem_size);
+}
+
int main(int __attribute__((unused)) argc, char *argv[])
{
int shmid, buf_size, fd, i, ret;
@@ -1494,7 +1555,7 @@ int main(int __attribute__((unused)) argc, char *argv[])
if (init_uffd())
ksft_exit_pass();
- ksft_set_plan(115);
+ ksft_set_plan(117);
page_size = getpagesize();
hpage_size = read_pmd_pagesize();
@@ -1669,6 +1730,9 @@ int main(int __attribute__((unused)) argc, char *argv[])
/* 16. Userfaultfd tests */
userfaultfd_tests();
+ /* 17. ZEROPFN tests */
+ zeropfn_tests();
+
close(pagemap_fd);
ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/pfnmap.c b/tools/testing/selftests/mm/pfnmap.c
index 866ac023baf5..f546dfb10cae 100644
--- a/tools/testing/selftests/mm/pfnmap.c
+++ b/tools/testing/selftests/mm/pfnmap.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Basic VM_PFNMAP tests relying on mmap() of '/dev/mem'
+ * Basic VM_PFNMAP tests relying on mmap() of input file provided.
+ * Use '/dev/mem' as default.
*
* Copyright 2025, Red Hat, Inc.
*
@@ -21,10 +22,11 @@
#include <sys/mman.h>
#include <sys/wait.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "vm_util.h"
static sigjmp_buf sigjmp_buf_env;
+static char *file = "/dev/mem";
static void signal_handler(int sig)
{
@@ -51,7 +53,7 @@ static int test_read_access(char *addr, size_t size, size_t pagesize)
return ret;
}
-static int find_ram_target(off_t *phys_addr,
+static int find_ram_target(off_t *offset,
unsigned long long pagesize)
{
unsigned long long start, end;
@@ -91,7 +93,7 @@ static int find_ram_target(off_t *phys_addr,
/* We need two pages. */
if (end > start + 2 * pagesize) {
fclose(file);
- *phys_addr = start;
+ *offset = start;
return 0;
}
}
@@ -100,7 +102,7 @@ static int find_ram_target(off_t *phys_addr,
FIXTURE(pfnmap)
{
- off_t phys_addr;
+ off_t offset;
size_t pagesize;
int dev_mem_fd;
char *addr1;
@@ -113,23 +115,31 @@ FIXTURE_SETUP(pfnmap)
{
self->pagesize = getpagesize();
- /* We'll require two physical pages throughout our tests ... */
- if (find_ram_target(&self->phys_addr, self->pagesize))
- SKIP(return, "Cannot find ram target in '/proc/iomem'\n");
+ if (strncmp(file, "/dev/mem", strlen("/dev/mem")) == 0) {
+ /* We'll require two physical pages throughout our tests ... */
+ if (find_ram_target(&self->offset, self->pagesize))
+ SKIP(return,
+ "Cannot find ram target in '/proc/iomem'\n");
+ } else {
+ self->offset = 0;
+ }
- self->dev_mem_fd = open("/dev/mem", O_RDONLY);
+ self->dev_mem_fd = open(file, O_RDONLY);
if (self->dev_mem_fd < 0)
- SKIP(return, "Cannot open '/dev/mem'\n");
+ SKIP(return, "Cannot open '%s'\n", file);
self->size1 = self->pagesize * 2;
self->addr1 = mmap(NULL, self->size1, PROT_READ, MAP_SHARED,
- self->dev_mem_fd, self->phys_addr);
+ self->dev_mem_fd, self->offset);
if (self->addr1 == MAP_FAILED)
- SKIP(return, "Cannot mmap '/dev/mem'\n");
+ SKIP(return, "Cannot mmap '%s'\n", file);
+
+ if (!check_vmflag_pfnmap(self->addr1))
+ SKIP(return, "Invalid file: '%s'. Not pfnmap'ed\n", file);
/* ... and want to be able to read from them. */
if (test_read_access(self->addr1, self->size1, self->pagesize))
- SKIP(return, "Cannot read-access mmap'ed '/dev/mem'\n");
+ SKIP(return, "Cannot read-access mmap'ed '%s'\n", file);
self->size2 = 0;
self->addr2 = MAP_FAILED;
@@ -182,7 +192,7 @@ TEST_F(pfnmap, munmap_split)
*/
self->size2 = self->pagesize;
self->addr2 = mmap(NULL, self->pagesize, PROT_READ, MAP_SHARED,
- self->dev_mem_fd, self->phys_addr);
+ self->dev_mem_fd, self->offset);
ASSERT_NE(self->addr2, MAP_FAILED);
}
@@ -246,4 +256,14 @@ TEST_F(pfnmap, fork)
ASSERT_EQ(ret, 0);
}
-TEST_HARNESS_MAIN
+int main(int argc, char **argv)
+{
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "--") == 0) {
+ if (i + 1 < argc && strlen(argv[i + 1]) > 0)
+ file = argv[i + 1];
+ return test_harness_run(i, argv);
+ }
+ }
+ return test_harness_run(argc, argv);
+}
diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h
index ea404f80e6cb..7c29f075e40b 100644
--- a/tools/testing/selftests/mm/pkey-helpers.h
+++ b/tools/testing/selftests/mm/pkey-helpers.h
@@ -16,7 +16,7 @@
#include <linux/mman.h>
#include <linux/types.h>
-#include "../kselftest.h"
+#include "kselftest.h"
/* Define some kernel-like types */
typedef __u8 u8;
@@ -84,9 +84,6 @@ extern void abort_hooks(void);
#ifndef noinline
# define noinline __attribute__((noinline))
#endif
-#ifndef __maybe_unused
-# define __maybe_unused __attribute__((__unused__))
-#endif
int sys_pkey_alloc(unsigned long flags, unsigned long init_val);
int sys_pkey_free(unsigned long pkey);
diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c
index b5e076a564c9..302fef54049c 100644
--- a/tools/testing/selftests/mm/pkey_sighandler_tests.c
+++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c
@@ -41,7 +41,7 @@ static siginfo_t siginfo = {0};
* syscall will attempt to access the PLT in order to call a library function
* which is protected by MPK 0 which we don't have access to.
*/
-static inline __always_inline
+static __always_inline
long syscall_raw(long n, long a1, long a2, long a3, long a4, long a5, long a6)
{
unsigned long ret;
diff --git a/tools/testing/selftests/mm/prctl_thp_disable.c b/tools/testing/selftests/mm/prctl_thp_disable.c
new file mode 100644
index 000000000000..ca27200596a4
--- /dev/null
+++ b/tools/testing/selftests/mm/prctl_thp_disable.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic tests for PR_GET/SET_THP_DISABLE prctl calls
+ *
+ * Author(s): Usama Arif <usamaarif642@gmail.com>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <linux/mman.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+
+#include "kselftest_harness.h"
+#include "thp_settings.h"
+#include "vm_util.h"
+
+#ifndef PR_THP_DISABLE_EXCEPT_ADVISED
+#define PR_THP_DISABLE_EXCEPT_ADVISED (1 << 1)
+#endif
+
+enum thp_collapse_type {
+ THP_COLLAPSE_NONE,
+ THP_COLLAPSE_MADV_NOHUGEPAGE,
+ THP_COLLAPSE_MADV_HUGEPAGE, /* MADV_HUGEPAGE before access */
+ THP_COLLAPSE_MADV_COLLAPSE, /* MADV_COLLAPSE after access */
+};
+
+/*
+ * Function to mmap a buffer, fault it in, madvise it appropriately (before
+ * page fault for MADV_HUGE, and after for MADV_COLLAPSE), and check if the
+ * mmap region is huge.
+ * Returns:
+ * 0 if test doesn't give hugepage
+ * 1 if test gives a hugepage
+ * -errno if mmap fails
+ */
+static int test_mmap_thp(enum thp_collapse_type madvise_buf, size_t pmdsize)
+{
+ char *mem, *mmap_mem;
+ size_t mmap_size;
+ int ret;
+
+ /* For alignment purposes, we need twice the THP size. */
+ mmap_size = 2 * pmdsize;
+ mmap_mem = (char *)mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mmap_mem == MAP_FAILED)
+ return -errno;
+
+ /* We need a THP-aligned memory area. */
+ mem = (char *)(((uintptr_t)mmap_mem + pmdsize) & ~(pmdsize - 1));
+
+ if (madvise_buf == THP_COLLAPSE_MADV_HUGEPAGE)
+ madvise(mem, pmdsize, MADV_HUGEPAGE);
+ else if (madvise_buf == THP_COLLAPSE_MADV_NOHUGEPAGE)
+ madvise(mem, pmdsize, MADV_NOHUGEPAGE);
+
+ /* Ensure memory is allocated */
+ memset(mem, 1, pmdsize);
+
+ if (madvise_buf == THP_COLLAPSE_MADV_COLLAPSE)
+ madvise(mem, pmdsize, MADV_COLLAPSE);
+
+ /* HACK: make sure we have a separate VMA that we can check reliably. */
+ mprotect(mem, pmdsize, PROT_READ);
+
+ ret = check_huge_anon(mem, 1, pmdsize);
+ munmap(mmap_mem, mmap_size);
+ return ret;
+}
+
+static void prctl_thp_disable_completely_test(struct __test_metadata *const _metadata,
+ size_t pmdsize,
+ enum thp_enabled thp_policy)
+{
+ ASSERT_EQ(prctl(PR_GET_THP_DISABLE, NULL, NULL, NULL, NULL), 1);
+
+ /* tests after prctl overrides global policy */
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_NONE, pmdsize), 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_NOHUGEPAGE, pmdsize), 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_HUGEPAGE, pmdsize), 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_COLLAPSE, pmdsize), 0);
+
+ /* Reset to global policy */
+ ASSERT_EQ(prctl(PR_SET_THP_DISABLE, 0, NULL, NULL, NULL), 0);
+
+ /* tests after prctl is cleared, and only global policy is effective */
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_NONE, pmdsize),
+ thp_policy == THP_ALWAYS ? 1 : 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_NOHUGEPAGE, pmdsize), 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_HUGEPAGE, pmdsize),
+ thp_policy == THP_NEVER ? 0 : 1);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_COLLAPSE, pmdsize), 1);
+}
+
+FIXTURE(prctl_thp_disable_completely)
+{
+ struct thp_settings settings;
+ size_t pmdsize;
+};
+
+FIXTURE_VARIANT(prctl_thp_disable_completely)
+{
+ enum thp_enabled thp_policy;
+};
+
+FIXTURE_VARIANT_ADD(prctl_thp_disable_completely, never)
+{
+ .thp_policy = THP_NEVER,
+};
+
+FIXTURE_VARIANT_ADD(prctl_thp_disable_completely, madvise)
+{
+ .thp_policy = THP_MADVISE,
+};
+
+FIXTURE_VARIANT_ADD(prctl_thp_disable_completely, always)
+{
+ .thp_policy = THP_ALWAYS,
+};
+
+FIXTURE_SETUP(prctl_thp_disable_completely)
+{
+ if (!thp_available())
+ SKIP(return, "Transparent Hugepages not available\n");
+
+ self->pmdsize = read_pmd_pagesize();
+ if (!self->pmdsize)
+ SKIP(return, "Unable to read PMD size\n");
+
+ if (prctl(PR_SET_THP_DISABLE, 1, NULL, NULL, NULL))
+ SKIP(return, "Unable to disable THPs completely for the process\n");
+
+ thp_save_settings();
+ thp_read_settings(&self->settings);
+ self->settings.thp_enabled = variant->thp_policy;
+ self->settings.hugepages[sz2ord(self->pmdsize, getpagesize())].enabled = THP_INHERIT;
+ thp_write_settings(&self->settings);
+}
+
+FIXTURE_TEARDOWN(prctl_thp_disable_completely)
+{
+ thp_restore_settings();
+}
+
+TEST_F(prctl_thp_disable_completely, nofork)
+{
+ prctl_thp_disable_completely_test(_metadata, self->pmdsize, variant->thp_policy);
+}
+
+TEST_F(prctl_thp_disable_completely, fork)
+{
+ int ret = 0;
+ pid_t pid;
+
+ /* Make sure prctl changes are carried across fork */
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (!pid) {
+ prctl_thp_disable_completely_test(_metadata, self->pmdsize, variant->thp_policy);
+ return;
+ }
+
+ wait(&ret);
+ if (WIFEXITED(ret))
+ ret = WEXITSTATUS(ret);
+ else
+ ret = -EINVAL;
+ ASSERT_EQ(ret, 0);
+}
+
+static void prctl_thp_disable_except_madvise_test(struct __test_metadata *const _metadata,
+ size_t pmdsize,
+ enum thp_enabled thp_policy)
+{
+ ASSERT_EQ(prctl(PR_GET_THP_DISABLE, NULL, NULL, NULL, NULL), 3);
+
+ /* tests after prctl overrides global policy */
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_NONE, pmdsize), 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_NOHUGEPAGE, pmdsize), 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_HUGEPAGE, pmdsize),
+ thp_policy == THP_NEVER ? 0 : 1);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_COLLAPSE, pmdsize), 1);
+
+ /* Reset to global policy */
+ ASSERT_EQ(prctl(PR_SET_THP_DISABLE, 0, NULL, NULL, NULL), 0);
+
+ /* tests after prctl is cleared, and only global policy is effective */
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_NONE, pmdsize),
+ thp_policy == THP_ALWAYS ? 1 : 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_NOHUGEPAGE, pmdsize), 0);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_HUGEPAGE, pmdsize),
+ thp_policy == THP_NEVER ? 0 : 1);
+
+ ASSERT_EQ(test_mmap_thp(THP_COLLAPSE_MADV_COLLAPSE, pmdsize), 1);
+}
+
+FIXTURE(prctl_thp_disable_except_madvise)
+{
+ struct thp_settings settings;
+ size_t pmdsize;
+};
+
+FIXTURE_VARIANT(prctl_thp_disable_except_madvise)
+{
+ enum thp_enabled thp_policy;
+};
+
+FIXTURE_VARIANT_ADD(prctl_thp_disable_except_madvise, never)
+{
+ .thp_policy = THP_NEVER,
+};
+
+FIXTURE_VARIANT_ADD(prctl_thp_disable_except_madvise, madvise)
+{
+ .thp_policy = THP_MADVISE,
+};
+
+FIXTURE_VARIANT_ADD(prctl_thp_disable_except_madvise, always)
+{
+ .thp_policy = THP_ALWAYS,
+};
+
+FIXTURE_SETUP(prctl_thp_disable_except_madvise)
+{
+ if (!thp_available())
+ SKIP(return, "Transparent Hugepages not available\n");
+
+ self->pmdsize = read_pmd_pagesize();
+ if (!self->pmdsize)
+ SKIP(return, "Unable to read PMD size\n");
+
+ if (prctl(PR_SET_THP_DISABLE, 1, PR_THP_DISABLE_EXCEPT_ADVISED, NULL, NULL))
+ SKIP(return, "Unable to set PR_THP_DISABLE_EXCEPT_ADVISED\n");
+
+ thp_save_settings();
+ thp_read_settings(&self->settings);
+ self->settings.thp_enabled = variant->thp_policy;
+ self->settings.hugepages[sz2ord(self->pmdsize, getpagesize())].enabled = THP_INHERIT;
+ thp_write_settings(&self->settings);
+}
+
+FIXTURE_TEARDOWN(prctl_thp_disable_except_madvise)
+{
+ thp_restore_settings();
+}
+
+TEST_F(prctl_thp_disable_except_madvise, nofork)
+{
+ prctl_thp_disable_except_madvise_test(_metadata, self->pmdsize, variant->thp_policy);
+}
+
+TEST_F(prctl_thp_disable_except_madvise, fork)
+{
+ int ret = 0;
+ pid_t pid;
+
+ /* Make sure prctl changes are carried across fork */
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (!pid) {
+ prctl_thp_disable_except_madvise_test(_metadata, self->pmdsize,
+ variant->thp_policy);
+ return;
+ }
+
+ wait(&ret);
+ if (WIFEXITED(ret))
+ ret = WEXITSTATUS(ret);
+ else
+ ret = -EINVAL;
+ ASSERT_EQ(ret, 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/process_madv.c b/tools/testing/selftests/mm/process_madv.c
new file mode 100644
index 000000000000..cd4610baf5d7
--- /dev/null
+++ b/tools/testing/selftests/mm/process_madv.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#define _GNU_SOURCE
+#include "kselftest_harness.h"
+#include <errno.h>
+#include <setjmp.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <sched.h>
+#include "vm_util.h"
+
+#include "../pidfd/pidfd.h"
+
+FIXTURE(process_madvise)
+{
+ unsigned long page_size;
+ pid_t child_pid;
+ int remote_pidfd;
+ int pidfd;
+};
+
+FIXTURE_SETUP(process_madvise)
+{
+ self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
+ self->pidfd = PIDFD_SELF;
+ self->remote_pidfd = -1;
+ self->child_pid = -1;
+};
+
+FIXTURE_TEARDOWN_PARENT(process_madvise)
+{
+ /* This teardown is guaranteed to run, even if tests SKIP or ASSERT */
+ if (self->child_pid > 0) {
+ kill(self->child_pid, SIGKILL);
+ waitpid(self->child_pid, NULL, 0);
+ }
+
+ if (self->remote_pidfd >= 0)
+ close(self->remote_pidfd);
+}
+
+static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
+ size_t vlen, int advice, unsigned int flags)
+{
+ return syscall(__NR_process_madvise, pidfd, iovec, vlen, advice, flags);
+}
+
+/*
+ * This test uses PIDFD_SELF to target the current process. The main
+ * goal is to verify the basic behavior of process_madvise() with
+ * a vector of non-contiguous memory ranges, not its cross-process
+ * capabilities.
+ */
+TEST_F(process_madvise, basic)
+{
+ const unsigned long pagesize = self->page_size;
+ const int madvise_pages = 4;
+ struct iovec vec[madvise_pages];
+ int pidfd = self->pidfd;
+ ssize_t ret;
+ char *map;
+
+ /*
+ * Create a single large mapping. We will pick pages from this
+ * mapping to advise on. This ensures we test non-contiguous iovecs.
+ */
+ map = mmap(NULL, pagesize * 10, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (map == MAP_FAILED)
+ SKIP(return, "mmap failed, not enough memory.\n");
+
+ /* Fill the entire region with a known pattern. */
+ memset(map, 'A', pagesize * 10);
+
+ /*
+ * Setup the iovec to point to 4 non-contiguous pages
+ * within the mapping.
+ */
+ vec[0].iov_base = &map[0 * pagesize];
+ vec[0].iov_len = pagesize;
+ vec[1].iov_base = &map[3 * pagesize];
+ vec[1].iov_len = pagesize;
+ vec[2].iov_base = &map[5 * pagesize];
+ vec[2].iov_len = pagesize;
+ vec[3].iov_base = &map[8 * pagesize];
+ vec[3].iov_len = pagesize;
+
+ ret = sys_process_madvise(pidfd, vec, madvise_pages, MADV_DONTNEED, 0);
+ if (ret == -1 && errno == EPERM)
+ SKIP(return,
+ "process_madvise() unsupported or permission denied, try running as root.\n");
+ else if (errno == EINVAL)
+ SKIP(return,
+ "process_madvise() unsupported or parameter invalid, please check arguments.\n");
+
+ /* The call should succeed and report the total bytes processed. */
+ ASSERT_EQ(ret, madvise_pages * pagesize);
+
+ /* Check that advised pages are now zero. */
+ for (int i = 0; i < madvise_pages; i++) {
+ char *advised_page = (char *)vec[i].iov_base;
+
+ /* Content must be 0, not 'A'. */
+ ASSERT_EQ(*advised_page, '\0');
+ }
+
+ /* Check that an un-advised page in between is still 'A'. */
+ char *unadvised_page = &map[1 * pagesize];
+
+ for (int i = 0; i < pagesize; i++)
+ ASSERT_EQ(unadvised_page[i], 'A');
+
+ /* Cleanup. */
+ ASSERT_EQ(munmap(map, pagesize * 10), 0);
+}
+
+/*
+ * This test deterministically validates process_madvise() with MADV_COLLAPSE
+ * on a remote process, other advices are difficult to verify reliably.
+ *
+ * The test verifies that a memory region in a child process,
+ * focus on process_madv remote result, only check addresses and lengths.
+ * The correctness of the MADV_COLLAPSE can be found in the relevant test examples in khugepaged.
+ */
+TEST_F(process_madvise, remote_collapse)
+{
+ const unsigned long pagesize = self->page_size;
+ long huge_page_size;
+ int pipe_info[2];
+ ssize_t ret;
+ struct iovec vec;
+
+ struct child_info {
+ pid_t pid;
+ void *map_addr;
+ } info;
+
+ huge_page_size = read_pmd_pagesize();
+ if (huge_page_size <= 0)
+ SKIP(return, "Could not determine a valid huge page size.\n");
+
+ ASSERT_EQ(pipe(pipe_info), 0);
+
+ self->child_pid = fork();
+ ASSERT_NE(self->child_pid, -1);
+
+ if (self->child_pid == 0) {
+ char *map;
+ size_t map_size = 2 * huge_page_size;
+
+ close(pipe_info[0]);
+
+ map = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(map, MAP_FAILED);
+
+ /* Fault in as small pages */
+ for (size_t i = 0; i < map_size; i += pagesize)
+ map[i] = 'A';
+
+ /* Send info and pause */
+ info.pid = getpid();
+ info.map_addr = map;
+ ret = write(pipe_info[1], &info, sizeof(info));
+ ASSERT_EQ(ret, sizeof(info));
+ close(pipe_info[1]);
+
+ pause();
+ exit(0);
+ }
+
+ close(pipe_info[1]);
+
+ /* Receive child info */
+ ret = read(pipe_info[0], &info, sizeof(info));
+ if (ret <= 0) {
+ waitpid(self->child_pid, NULL, 0);
+ SKIP(return, "Failed to read child info from pipe.\n");
+ }
+ ASSERT_EQ(ret, sizeof(info));
+ close(pipe_info[0]);
+ self->child_pid = info.pid;
+
+ self->remote_pidfd = syscall(__NR_pidfd_open, self->child_pid, 0);
+ ASSERT_GE(self->remote_pidfd, 0);
+
+ vec.iov_base = info.map_addr;
+ vec.iov_len = huge_page_size;
+
+ ret = sys_process_madvise(self->remote_pidfd, &vec, 1, MADV_COLLAPSE,
+ 0);
+ if (ret == -1) {
+ if (errno == EINVAL)
+ SKIP(return, "PROCESS_MADV_ADVISE is not supported.\n");
+ else if (errno == EPERM)
+ SKIP(return,
+ "No process_madvise() permissions, try running as root.\n");
+ return;
+ }
+
+ ASSERT_EQ(ret, huge_page_size);
+}
+
+/*
+ * Test process_madvise() with a pidfd for a process that has already
+ * exited to ensure correct error handling.
+ */
+TEST_F(process_madvise, exited_process_pidfd)
+{
+ const unsigned long pagesize = self->page_size;
+ struct iovec vec;
+ char *map;
+ ssize_t ret;
+
+ map = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
+ 0);
+ if (map == MAP_FAILED)
+ SKIP(return, "mmap failed, not enough memory.\n");
+
+ vec.iov_base = map;
+ vec.iov_len = pagesize;
+
+ /*
+ * Using a pidfd for a process that has already exited should fail
+ * with ESRCH.
+ */
+ self->child_pid = fork();
+ ASSERT_NE(self->child_pid, -1);
+
+ if (self->child_pid == 0)
+ exit(0);
+
+ self->remote_pidfd = syscall(__NR_pidfd_open, self->child_pid, 0);
+ ASSERT_GE(self->remote_pidfd, 0);
+
+ /* Wait for the child to ensure it has terminated. */
+ waitpid(self->child_pid, NULL, 0);
+
+ ret = sys_process_madvise(self->remote_pidfd, &vec, 1, MADV_DONTNEED,
+ 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ESRCH);
+}
+
+/*
+ * Test process_madvise() with bad pidfds to ensure correct error
+ * handling.
+ */
+TEST_F(process_madvise, bad_pidfd)
+{
+ const unsigned long pagesize = self->page_size;
+ struct iovec vec;
+ char *map;
+ ssize_t ret;
+
+ map = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
+ 0);
+ if (map == MAP_FAILED)
+ SKIP(return, "mmap failed, not enough memory.\n");
+
+ vec.iov_base = map;
+ vec.iov_len = pagesize;
+
+ /* Using an invalid fd number (-1) should fail with EBADF. */
+ ret = sys_process_madvise(-1, &vec, 1, MADV_DONTNEED, 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EBADF);
+
+ /*
+ * Using a valid fd that is not a pidfd (e.g. stdin) should fail
+ * with EBADF.
+ */
+ ret = sys_process_madvise(STDIN_FILENO, &vec, 1, MADV_DONTNEED, 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EBADF);
+}
+
+/*
+ * Test that process_madvise() rejects vlen > UIO_MAXIOV.
+ * The kernel should return -EINVAL when the number of iovecs exceeds 1024.
+ */
+TEST_F(process_madvise, invalid_vlen)
+{
+ const unsigned long pagesize = self->page_size;
+ int pidfd = self->pidfd;
+ struct iovec vec;
+ char *map;
+ ssize_t ret;
+
+ map = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
+ 0);
+ if (map == MAP_FAILED)
+ SKIP(return, "mmap failed, not enough memory.\n");
+
+ vec.iov_base = map;
+ vec.iov_len = pagesize;
+
+ ret = sys_process_madvise(pidfd, &vec, 1025, MADV_DONTNEED, 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EINVAL);
+
+ /* Cleanup. */
+ ASSERT_EQ(munmap(map, pagesize), 0);
+}
+
+/*
+ * Test process_madvise() with an invalid flag value. Currently, only a flag
+ * value of 0 is supported. This test is reserved for the future, e.g., if
+ * synchronous flags are added.
+ */
+TEST_F(process_madvise, flag)
+{
+ const unsigned long pagesize = self->page_size;
+ unsigned int invalid_flag;
+ int pidfd = self->pidfd;
+ struct iovec vec;
+ char *map;
+ ssize_t ret;
+
+ map = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1,
+ 0);
+ if (map == MAP_FAILED)
+ SKIP(return, "mmap failed, not enough memory.\n");
+
+ vec.iov_base = map;
+ vec.iov_len = pagesize;
+
+ invalid_flag = 0x80000000;
+
+ ret = sys_process_madvise(pidfd, &vec, 1, MADV_DONTNEED, invalid_flag);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EINVAL);
+
+ /* Cleanup. */
+ ASSERT_EQ(munmap(map, pagesize), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
index 23ebec367015..2085982dba69 100644
--- a/tools/testing/selftests/mm/protection_keys.c
+++ b/tools/testing/selftests/mm/protection_keys.c
@@ -557,13 +557,11 @@ int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
int nr_iterations = random() % 100;
int ret;
- while (0) {
+ while (nr_iterations-- >= 0) {
int rpkey = alloc_random_pkey();
ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
dprintf1("sys_mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
ptr, size, orig_prot, pkey, ret);
- if (nr_iterations-- < 0)
- break;
dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
" shadow: 0x%016llx\n",
@@ -1304,7 +1302,7 @@ static void test_mprotect_with_pkey_0(int *ptr, u16 pkey)
static void test_ptrace_of_child(int *ptr, u16 pkey)
{
- __attribute__((__unused__)) int peek_result;
+ __always_unused int peek_result;
pid_t child_pid;
void *ignored = 0;
long ret;
diff --git a/tools/testing/selftests/mm/rmap.c b/tools/testing/selftests/mm/rmap.c
new file mode 100644
index 000000000000..53f2058b0ef2
--- /dev/null
+++ b/tools/testing/selftests/mm/rmap.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RMAP functional tests
+ *
+ * Author(s): Wei Yang <richard.weiyang@gmail.com>
+ */
+
+#include "kselftest_harness.h"
+#include <strings.h>
+#include <pthread.h>
+#include <numa.h>
+#include <numaif.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <time.h>
+#include <sys/sem.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "vm_util.h"
+
+#define TOTAL_LEVEL 5
+#define MAX_CHILDREN 3
+
+#define FAIL_ON_CHECK (1 << 0)
+#define FAIL_ON_WORK (1 << 1)
+
+struct sembuf sem_wait = {0, -1, 0};
+struct sembuf sem_signal = {0, 1, 0};
+
+enum backend_type {
+ ANON,
+ SHM,
+ NORM_FILE,
+};
+
+#define PREFIX "kst_rmap"
+#define MAX_FILENAME_LEN 256
+const char *suffixes[] = {
+ "",
+ "_shm",
+ "_file",
+};
+
+struct global_data;
+typedef int (*work_fn)(struct global_data *data);
+typedef int (*check_fn)(struct global_data *data);
+typedef void (*prepare_fn)(struct global_data *data);
+
+struct global_data {
+ int worker_level;
+
+ int semid;
+ int pipefd[2];
+
+ unsigned int mapsize;
+ unsigned int rand_seed;
+ char *region;
+
+ prepare_fn do_prepare;
+ work_fn do_work;
+ check_fn do_check;
+
+ enum backend_type backend;
+ char filename[MAX_FILENAME_LEN];
+
+ unsigned long *expected_pfn;
+};
+
+/*
+ * Create a process tree with TOTAL_LEVEL height and at most MAX_CHILDREN
+ * children for each.
+ *
+ * It will randomly select one process as 'worker' process which will
+ * 'do_work' until all processes are created. And all other processes will
+ * wait until 'worker' finish its work.
+ */
+void propagate_children(struct __test_metadata *_metadata, struct global_data *data)
+{
+ pid_t root_pid, pid;
+ unsigned int num_child;
+ int status;
+ int ret = 0;
+ int curr_child, worker_child;
+ int curr_level = 1;
+ bool is_worker = true;
+
+ root_pid = getpid();
+repeat:
+ num_child = rand_r(&data->rand_seed) % MAX_CHILDREN + 1;
+ worker_child = is_worker ? rand_r(&data->rand_seed) % num_child : -1;
+
+ for (curr_child = 0; curr_child < num_child; curr_child++) {
+ pid = fork();
+
+ if (pid < 0) {
+ perror("Error: fork\n");
+ } else if (pid == 0) {
+ curr_level++;
+
+ if (curr_child != worker_child)
+ is_worker = false;
+
+ if (curr_level == TOTAL_LEVEL)
+ break;
+
+ data->rand_seed += curr_child;
+ goto repeat;
+ }
+ }
+
+ if (data->do_prepare)
+ data->do_prepare(data);
+
+ close(data->pipefd[1]);
+
+ if (is_worker && curr_level == data->worker_level) {
+ /* This is the worker process, first wait last process created */
+ char buf;
+
+ while (read(data->pipefd[0], &buf, 1) > 0)
+ ;
+
+ if (data->do_work)
+ ret = data->do_work(data);
+
+ /* Kick others */
+ semctl(data->semid, 0, IPC_RMID);
+ } else {
+ /* Wait worker finish */
+ semop(data->semid, &sem_wait, 1);
+ if (data->do_check)
+ ret = data->do_check(data);
+ }
+
+ /* Wait all child to quit */
+ while (wait(&status) > 0) {
+ if (WIFEXITED(status))
+ ret |= WEXITSTATUS(status);
+ }
+
+ if (getpid() == root_pid) {
+ if (ret & FAIL_ON_WORK)
+ SKIP(return, "Failed in worker");
+
+ ASSERT_EQ(ret, 0);
+ } else {
+ exit(ret);
+ }
+}
+
+FIXTURE(migrate)
+{
+ struct global_data data;
+};
+
+FIXTURE_SETUP(migrate)
+{
+ struct global_data *data = &self->data;
+
+ if (numa_available() < 0)
+ SKIP(return, "NUMA not available");
+ if (numa_bitmask_weight(numa_all_nodes_ptr) <= 1)
+ SKIP(return, "Not enough NUMA nodes available");
+
+ data->mapsize = getpagesize();
+
+ data->expected_pfn = mmap(0, sizeof(unsigned long),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(data->expected_pfn, MAP_FAILED);
+
+ /* Prepare semaphore */
+ data->semid = semget(IPC_PRIVATE, 1, 0666 | IPC_CREAT);
+ ASSERT_NE(data->semid, -1);
+ ASSERT_NE(semctl(data->semid, 0, SETVAL, 0), -1);
+
+ /* Prepare pipe */
+ ASSERT_NE(pipe(data->pipefd), -1);
+
+ data->rand_seed = time(NULL);
+ srand(data->rand_seed);
+
+ data->worker_level = rand() % TOTAL_LEVEL + 1;
+
+ data->do_prepare = NULL;
+ data->do_work = NULL;
+ data->do_check = NULL;
+
+ data->backend = ANON;
+};
+
+FIXTURE_TEARDOWN(migrate)
+{
+ struct global_data *data = &self->data;
+
+ if (data->region != MAP_FAILED)
+ munmap(data->region, data->mapsize);
+ data->region = MAP_FAILED;
+ if (data->expected_pfn != MAP_FAILED)
+ munmap(data->expected_pfn, sizeof(unsigned long));
+ data->expected_pfn = MAP_FAILED;
+ semctl(data->semid, 0, IPC_RMID);
+ data->semid = -1;
+
+ close(data->pipefd[0]);
+
+ switch (data->backend) {
+ case ANON:
+ break;
+ case SHM:
+ shm_unlink(data->filename);
+ break;
+ case NORM_FILE:
+ unlink(data->filename);
+ break;
+ }
+}
+
+void access_region(struct global_data *data)
+{
+ /*
+ * Force read "region" to make sure page fault in.
+ */
+ FORCE_READ(*data->region);
+}
+
+int try_to_move_page(char *region)
+{
+ int ret;
+ int node;
+ int status = 0;
+ int failures = 0;
+
+ ret = move_pages(0, 1, (void **)&region, NULL, &status, MPOL_MF_MOVE_ALL);
+ if (ret != 0) {
+ perror("Failed to get original numa");
+ return FAIL_ON_WORK;
+ }
+
+ /* Pick up a different target node */
+ for (node = 0; node <= numa_max_node(); node++) {
+ if (numa_bitmask_isbitset(numa_all_nodes_ptr, node) && node != status)
+ break;
+ }
+
+ if (node > numa_max_node()) {
+ ksft_print_msg("Couldn't find available numa node for testing\n");
+ return FAIL_ON_WORK;
+ }
+
+ while (1) {
+ ret = move_pages(0, 1, (void **)&region, &node, &status, MPOL_MF_MOVE_ALL);
+
+ /* migrate successfully */
+ if (!ret)
+ break;
+
+ /* error happened */
+ if (ret < 0) {
+ ksft_perror("Failed to move pages");
+ return FAIL_ON_WORK;
+ }
+
+ /* migration is best effort; try again */
+ if (++failures >= 100)
+ return FAIL_ON_WORK;
+ }
+
+ return 0;
+}
+
+int move_region(struct global_data *data)
+{
+ int ret;
+ int pagemap_fd;
+
+ ret = try_to_move_page(data->region);
+ if (ret != 0)
+ return ret;
+
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ if (pagemap_fd == -1)
+ return FAIL_ON_WORK;
+ *data->expected_pfn = pagemap_get_pfn(pagemap_fd, data->region);
+
+ return 0;
+}
+
+int has_same_pfn(struct global_data *data)
+{
+ unsigned long pfn;
+ int pagemap_fd;
+
+ if (data->region == MAP_FAILED)
+ return 0;
+
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ if (pagemap_fd == -1)
+ return FAIL_ON_CHECK;
+
+ pfn = pagemap_get_pfn(pagemap_fd, data->region);
+ if (pfn != *data->expected_pfn)
+ return FAIL_ON_CHECK;
+
+ return 0;
+}
+
+TEST_F(migrate, anon)
+{
+ struct global_data *data = &self->data;
+
+ /* Map an area and fault in */
+ data->region = mmap(0, data->mapsize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(data->region, MAP_FAILED);
+ memset(data->region, 0xcf, data->mapsize);
+
+ data->do_prepare = access_region;
+ data->do_work = move_region;
+ data->do_check = has_same_pfn;
+
+ propagate_children(_metadata, data);
+}
+
+TEST_F(migrate, shm)
+{
+ int shm_fd;
+ struct global_data *data = &self->data;
+
+ snprintf(data->filename, MAX_FILENAME_LEN, "%s%s", PREFIX, suffixes[SHM]);
+ shm_fd = shm_open(data->filename, O_CREAT | O_RDWR, 0666);
+ ASSERT_NE(shm_fd, -1);
+ ftruncate(shm_fd, data->mapsize);
+ data->backend = SHM;
+
+ /* Map a shared area and fault in */
+ data->region = mmap(0, data->mapsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, shm_fd, 0);
+ ASSERT_NE(data->region, MAP_FAILED);
+ memset(data->region, 0xcf, data->mapsize);
+ close(shm_fd);
+
+ data->do_prepare = access_region;
+ data->do_work = move_region;
+ data->do_check = has_same_pfn;
+
+ propagate_children(_metadata, data);
+}
+
+TEST_F(migrate, file)
+{
+ int fd;
+ struct global_data *data = &self->data;
+
+ snprintf(data->filename, MAX_FILENAME_LEN, "%s%s", PREFIX, suffixes[NORM_FILE]);
+ fd = open(data->filename, O_CREAT | O_RDWR | O_EXCL, 0666);
+ ASSERT_NE(fd, -1);
+ ftruncate(fd, data->mapsize);
+ data->backend = NORM_FILE;
+
+ /* Map a shared area and fault in */
+ data->region = mmap(0, data->mapsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ ASSERT_NE(data->region, MAP_FAILED);
+ memset(data->region, 0xcf, data->mapsize);
+ close(fd);
+
+ data->do_prepare = access_region;
+ data->do_work = move_region;
+ data->do_check = has_same_pfn;
+
+ propagate_children(_metadata, data);
+}
+
+void prepare_local_region(struct global_data *data)
+{
+ /* Allocate range and set the same data */
+ data->region = mmap(NULL, data->mapsize, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1, 0);
+ if (data->region == MAP_FAILED)
+ return;
+
+ memset(data->region, 0xcf, data->mapsize);
+}
+
+int merge_and_migrate(struct global_data *data)
+{
+ int pagemap_fd;
+ int ret = 0;
+
+ if (data->region == MAP_FAILED)
+ return FAIL_ON_WORK;
+
+ if (ksm_start() < 0)
+ return FAIL_ON_WORK;
+
+ ret = try_to_move_page(data->region);
+
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+ if (pagemap_fd == -1)
+ return FAIL_ON_WORK;
+ *data->expected_pfn = pagemap_get_pfn(pagemap_fd, data->region);
+
+ return ret;
+}
+
+TEST_F(migrate, ksm)
+{
+ int ret;
+ struct global_data *data = &self->data;
+
+ if (ksm_stop() < 0)
+ SKIP(return, "accessing \"/sys/kernel/mm/ksm/run\") failed");
+ if (ksm_get_full_scans() < 0)
+ SKIP(return, "accessing \"/sys/kernel/mm/ksm/full_scan\") failed");
+
+ ret = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
+ if (ret < 0 && errno == EINVAL)
+ SKIP(return, "PR_SET_MEMORY_MERGE not supported");
+ else if (ret)
+ ksft_exit_fail_perror("PR_SET_MEMORY_MERGE=1 failed");
+
+ data->do_prepare = prepare_local_region;
+ data->do_work = merge_and_migrate;
+ data->do_check = has_same_pfn;
+
+ propagate_children(_metadata, data);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index dddd1dd8af14..d9173f2312b7 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -65,6 +65,8 @@ separated by spaces:
test pagemap_scan IOCTL
- pfnmap
tests for VM_PFNMAP handling
+- process_madv
+ test for process_madv
- cow
test copy-on-write semantics
- thp
@@ -83,6 +85,8 @@ separated by spaces:
test handling of page fragment allocation and freeing
- vma_merge
test VMA merge cases behave as expected
+- rmap
+ test rmap behaves as expected
example: ./run_vmtests.sh -t "hmm mmap ksm"
EOF
@@ -134,7 +138,7 @@ run_gup_matrix() {
# -n: How many pages to fetch together? 512 is special
# because it's default thp size (or 2M on x86), 123 to
# just test partial gup when hit a huge in whatever form
- for num in "-n 1" "-n 512" "-n 123"; do
+ for num in "-n 1" "-n 512" "-n 123" "-n -1"; do
CATEGORY="gup_test" run_test ./gup_test \
$huge $test_cmd $write $share $num
done
@@ -170,13 +174,13 @@ fi
# set proper nr_hugepages
if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
- nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
+ orig_nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
needpgs=$((needmem_KB / hpgsize_KB))
tries=2
while [ "$tries" -gt 0 ] && [ "$freepgs" -lt "$needpgs" ]; do
lackpgs=$((needpgs - freepgs))
echo 3 > /proc/sys/vm/drop_caches
- if ! echo $((lackpgs + nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then
+ if ! echo $((lackpgs + orig_nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then
echo "Please run this test as root"
exit $ksft_skip
fi
@@ -187,6 +191,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
done < /proc/meminfo
tries=$((tries - 1))
done
+ nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
if [ "$freepgs" -lt "$needpgs" ]; then
printf "Not enough huge pages available (%d < %d)\n" \
"$freepgs" "$needpgs"
@@ -309,9 +314,11 @@ if $RUN_ALL; then
run_gup_matrix
else
# get_user_pages_fast() benchmark
- CATEGORY="gup_test" run_test ./gup_test -u
+ CATEGORY="gup_test" run_test ./gup_test -u -n 1
+ CATEGORY="gup_test" run_test ./gup_test -u -n -1
# pin_user_pages_fast() benchmark
- CATEGORY="gup_test" run_test ./gup_test -a
+ CATEGORY="gup_test" run_test ./gup_test -a -n 1
+ CATEGORY="gup_test" run_test ./gup_test -a -n -1
fi
# Dump pages 0, 19, and 4096, using pin_user_pages:
CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000
@@ -320,11 +327,15 @@ CATEGORY="gup_test" run_test ./gup_longterm
CATEGORY="userfaultfd" run_test ./uffd-unit-tests
uffd_stress_bin=./uffd-stress
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
-# Hugetlb tests require source and destination huge pages. Pass in half
-# the size of the free pages we have, which is used for *each*.
+# Hugetlb tests require source and destination huge pages. Pass in almost half
+# the size of the free pages we have, which is used for *each*. An adjustment
+# of (nr_parallel - 1) is done (see nr_parallel in uffd-stress.c) to have some
+# extra hugepages - this is done to prevent the test from failing by racily
+# reserving more hugepages than strictly required.
# uffd-stress expects a region expressed in MiB, so we adjust
# half_ufd_size_MB accordingly.
-half_ufd_size_MB=$(((freepgs * hpgsize_KB) / 1024 / 2))
+adjustment=$(( (31 < (nr_cpus - 1)) ? 31 : (nr_cpus - 1) ))
+half_ufd_size_MB=$((((freepgs - adjustment) * hpgsize_KB) / 1024 / 2))
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
@@ -425,11 +436,16 @@ CATEGORY="madv_guard" run_test ./guard-regions
# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
CATEGORY="madv_populate" run_test ./madv_populate
+# PROCESS_MADV test
+CATEGORY="process_madv" run_test ./process_madv
+
CATEGORY="vma_merge" run_test ./merge
if [ -x ./memfd_secret ]
then
-(echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
+if [ -f /proc/sys/kernel/yama/ptrace_scope ]; then
+ (echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
+fi
CATEGORY="memfd_secret" run_test ./memfd_secret
fi
@@ -483,6 +499,10 @@ CATEGORY="thp" run_test ./khugepaged
CATEGORY="thp" run_test ./khugepaged -s 2
+CATEGORY="thp" run_test ./khugepaged all:shmem
+
+CATEGORY="thp" run_test ./khugepaged -s 4 all:shmem
+
CATEGORY="thp" run_test ./transhuge-stress -d 20
# Try to create XFS if not provided
@@ -521,6 +541,12 @@ CATEGORY="page_frag" run_test ./test_page_frag.sh aligned
CATEGORY="page_frag" run_test ./test_page_frag.sh nonaligned
+CATEGORY="rmap" run_test ./rmap
+
+if [ "${HAVE_HUGEPAGES}" = 1 ]; then
+ echo "$orig_nr_hugepgs" > /proc/sys/vm/nr_hugepages
+fi
+
echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix
echo "1..${count_total}" | tap_output
diff --git a/tools/testing/selftests/mm/settings b/tools/testing/selftests/mm/settings
index a953c96aa16e..e2206265f67c 100644
--- a/tools/testing/selftests/mm/settings
+++ b/tools/testing/selftests/mm/settings
@@ -1 +1 @@
-timeout=180
+timeout=900
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index 8e1462ce0532..59c0dbe99a9b 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -6,8 +6,10 @@
#include <stdint.h>
#include <malloc.h>
#include <sys/mman.h>
-#include "../kselftest.h"
+
+#include "kselftest.h"
#include "vm_util.h"
+#include "thp_settings.h"
#define PAGEMAP_FILE_PATH "/proc/self/pagemap"
#define TEST_ITERATIONS 10000
@@ -78,8 +80,13 @@ static void test_hugepage(int pagemap_fd, int pagesize)
{
char *map;
int i, ret;
- size_t hpage_len = read_pmd_pagesize();
+ if (!thp_is_enabled()) {
+ ksft_test_result_skip("Transparent Hugepages not available\n");
+ return;
+ }
+
+ size_t hpage_len = read_pmd_pagesize();
if (!hpage_len)
ksft_exit_fail_msg("Reading PMD pagesize failed");
@@ -177,6 +184,130 @@ static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
close(test_fd);
}
+static void test_merge(int pagemap_fd, int pagesize)
+{
+ char *reserved, *map, *map2;
+
+ /*
+ * Reserve space for tests:
+ *
+ * ---padding to ---
+ * | avoid adj. |
+ * v merge v
+ * |---|---|---|---|---|
+ * | | 1 | 2 | 3 | |
+ * |---|---|---|---|---|
+ */
+ reserved = mmap(NULL, 5 * pagesize, PROT_NONE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+ if (reserved == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+ munmap(reserved, 4 * pagesize);
+
+ /*
+ * Establish initial VMA:
+ *
+ * S/D
+ * |---|---|---|---|---|
+ * | | 1 | | | |
+ * |---|---|---|---|---|
+ */
+ map = mmap(&reserved[pagesize], pagesize, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ if (map == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ /* This will clear VM_SOFTDIRTY too. */
+ clear_softdirty();
+
+ /*
+ * Now place a new mapping which will be marked VM_SOFTDIRTY. Away from
+ * map:
+ *
+ * - S/D
+ * |---|---|---|---|---|
+ * | | 1 | | 2 | |
+ * |---|---|---|---|---|
+ */
+ map2 = mmap(&reserved[3 * pagesize], pagesize, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ if (map2 == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ /*
+ * Now remap it immediately adjacent to map, if the merge correctly
+ * propagates VM_SOFTDIRTY, we should then observe the VMA as a whole
+ * being marked soft-dirty:
+ *
+ * merge
+ * S/D
+ * |---|-------|---|---|
+ * | | 1 | | |
+ * |---|-------|---|---|
+ */
+ map2 = mremap(map2, pagesize, pagesize, MREMAP_FIXED | MREMAP_MAYMOVE,
+ &reserved[2 * pagesize]);
+ if (map2 == MAP_FAILED)
+ ksft_exit_fail_msg("mremap failed\n");
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
+ "Test %s-anon soft-dirty after remap merge 1st pg\n",
+ __func__);
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map2) == 1,
+ "Test %s-anon soft-dirty after remap merge 2nd pg\n",
+ __func__);
+
+ munmap(map, 2 * pagesize);
+
+ /*
+ * Now establish another VMA:
+ *
+ * S/D
+ * |---|---|---|---|---|
+ * | | 1 | | | |
+ * |---|---|---|---|---|
+ */
+ map = mmap(&reserved[pagesize], pagesize, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ if (map == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ /* Clear VM_SOFTDIRTY... */
+ clear_softdirty();
+ /* ...and establish incompatible adjacent VMA:
+ *
+ * - S/D
+ * |---|---|---|---|---|
+ * | | 1 | 2 | | |
+ * |---|---|---|---|---|
+ */
+ map2 = mmap(&reserved[2 * pagesize], pagesize,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ if (map2 == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ /*
+ * Now mprotect() VMA 1 so it's compatible with 2 and therefore merges:
+ *
+ * merge
+ * S/D
+ * |---|-------|---|---|
+ * | | 1 | | |
+ * |---|-------|---|---|
+ */
+ if (mprotect(map, pagesize, PROT_READ | PROT_WRITE | PROT_EXEC))
+ ksft_exit_fail_msg("mprotect failed\n");
+
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
+ "Test %s-anon soft-dirty after mprotect merge 1st pg\n",
+ __func__);
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map2) == 1,
+ "Test %s-anon soft-dirty after mprotect merge 2nd pg\n",
+ __func__);
+
+ munmap(map, 2 * pagesize);
+}
+
static void test_mprotect_anon(int pagemap_fd, int pagesize)
{
test_mprotect(pagemap_fd, pagesize, true);
@@ -193,8 +324,11 @@ int main(int argc, char **argv)
int pagesize;
ksft_print_header();
- ksft_set_plan(15);
+ if (!softdirty_supported())
+ ksft_exit_skip("soft-dirty is not support\n");
+
+ ksft_set_plan(19);
pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
if (pagemap_fd < 0)
ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
@@ -206,6 +340,7 @@ int main(int argc, char **argv)
test_hugepage(pagemap_fd, pagesize);
test_mprotect_anon(pagemap_fd, pagesize);
test_mprotect_file(pagemap_fd, pagesize);
+ test_merge(pagemap_fd, pagesize);
close(pagemap_fd);
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index aa7400ed0e99..40799f3f0213 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -20,41 +20,241 @@
#include <stdbool.h>
#include <time.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
uint64_t pagesize;
unsigned int pageshift;
uint64_t pmd_pagesize;
+unsigned int pmd_order;
+int *expected_orders;
#define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages"
#define SMAP_PATH "/proc/self/smaps"
#define INPUT_MAX 80
#define PID_FMT "%d,0x%lx,0x%lx,%d"
+#define PID_FMT_OFFSET "%d,0x%lx,0x%lx,%d,%d"
#define PATH_FMT "%s,0x%lx,0x%lx,%d"
-#define PFN_MASK ((1UL<<55)-1)
-#define KPF_THP (1UL<<22)
+const char *pagemap_proc = "/proc/self/pagemap";
+const char *kpageflags_proc = "/proc/kpageflags";
+int pagemap_fd;
+int kpageflags_fd;
-int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file)
+static bool is_backed_by_folio(char *vaddr, int order, int pagemap_fd,
+ int kpageflags_fd)
{
- uint64_t paddr;
- uint64_t page_flags;
+ const uint64_t folio_head_flags = KPF_THP | KPF_COMPOUND_HEAD;
+ const uint64_t folio_tail_flags = KPF_THP | KPF_COMPOUND_TAIL;
+ const unsigned long nr_pages = 1UL << order;
+ unsigned long pfn_head;
+ uint64_t pfn_flags;
+ unsigned long pfn;
+ unsigned long i;
+
+ pfn = pagemap_get_pfn(pagemap_fd, vaddr);
+
+ /* non present page */
+ if (pfn == -1UL)
+ return false;
+
+ if (pageflags_get(pfn, kpageflags_fd, &pfn_flags))
+ goto fail;
+
+ /* check for order-0 pages */
+ if (!order) {
+ if (pfn_flags & (folio_head_flags | folio_tail_flags))
+ return false;
+ return true;
+ }
+
+ /* non THP folio */
+ if (!(pfn_flags & KPF_THP))
+ return false;
+
+ pfn_head = pfn & ~(nr_pages - 1);
- if (pagemap_file) {
- pread(pagemap_file, &paddr, sizeof(paddr),
- ((long)vaddr >> pageshift) * sizeof(paddr));
+ if (pageflags_get(pfn_head, kpageflags_fd, &pfn_flags))
+ goto fail;
- if (kpageflags_file) {
- pread(kpageflags_file, &page_flags, sizeof(page_flags),
- (paddr & PFN_MASK) * sizeof(page_flags));
+ /* head PFN has no compound_head flag set */
+ if ((pfn_flags & folio_head_flags) != folio_head_flags)
+ return false;
+
+ /* check all tail PFN flags */
+ for (i = 1; i < nr_pages; i++) {
+ if (pageflags_get(pfn_head + i, kpageflags_fd, &pfn_flags))
+ goto fail;
+ if ((pfn_flags & folio_tail_flags) != folio_tail_flags)
+ return false;
+ }
+
+ /*
+ * check the PFN after this folio, but if its flags cannot be obtained,
+ * assume this folio has the expected order
+ */
+ if (pageflags_get(pfn_head + nr_pages, kpageflags_fd, &pfn_flags))
+ return true;
+
+ /* If we find another tail page, then the folio is larger. */
+ return (pfn_flags & folio_tail_flags) != folio_tail_flags;
+fail:
+ ksft_exit_fail_msg("Failed to get folio info\n");
+ return false;
+}
+
+static int vaddr_pageflags_get(char *vaddr, int pagemap_fd, int kpageflags_fd,
+ uint64_t *flags)
+{
+ unsigned long pfn;
+
+ pfn = pagemap_get_pfn(pagemap_fd, vaddr);
+
+ /* non-present PFN */
+ if (pfn == -1UL)
+ return 1;
+
+ if (pageflags_get(pfn, kpageflags_fd, flags))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * gather_after_split_folio_orders - scan through [vaddr_start, len) and record
+ * folio orders
+ *
+ * @vaddr_start: start vaddr
+ * @len: range length
+ * @pagemap_fd: file descriptor to /proc/<pid>/pagemap
+ * @kpageflags_fd: file descriptor to /proc/kpageflags
+ * @orders: output folio order array
+ * @nr_orders: folio order array size
+ *
+ * gather_after_split_folio_orders() scan through [vaddr_start, len) and check
+ * all folios within the range and record their orders. All order-0 pages will
+ * be recorded. Non-present vaddr is skipped.
+ *
+ * NOTE: the function is used to check folio orders after a split is performed,
+ * so it assumes [vaddr_start, len) fully maps to after-split folios within that
+ * range.
+ *
+ * Return: 0 - no error, -1 - unhandled cases
+ */
+static int gather_after_split_folio_orders(char *vaddr_start, size_t len,
+ int pagemap_fd, int kpageflags_fd, int orders[], int nr_orders)
+{
+ uint64_t page_flags = 0;
+ int cur_order = -1;
+ char *vaddr;
+
+ if (pagemap_fd == -1 || kpageflags_fd == -1)
+ return -1;
+ if (!orders)
+ return -1;
+ if (nr_orders <= 0)
+ return -1;
+
+ for (vaddr = vaddr_start; vaddr < vaddr_start + len;) {
+ char *next_folio_vaddr;
+ int status;
+
+ status = vaddr_pageflags_get(vaddr, pagemap_fd, kpageflags_fd,
+ &page_flags);
+ if (status < 0)
+ return -1;
+
+ /* skip non present vaddr */
+ if (status == 1) {
+ vaddr += psize();
+ continue;
+ }
+
+ /* all order-0 pages with possible false postive (non folio) */
+ if (!(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) {
+ orders[0]++;
+ vaddr += psize();
+ continue;
+ }
- return !!(page_flags & KPF_THP);
+ /* skip non thp compound pages */
+ if (!(page_flags & KPF_THP)) {
+ vaddr += psize();
+ continue;
}
+
+ /* vpn points to part of a THP at this point */
+ if (page_flags & KPF_COMPOUND_HEAD)
+ cur_order = 1;
+ else {
+ vaddr += psize();
+ continue;
+ }
+
+ next_folio_vaddr = vaddr + (1UL << (cur_order + pshift()));
+
+ if (next_folio_vaddr >= vaddr_start + len)
+ break;
+
+ while ((status = vaddr_pageflags_get(next_folio_vaddr,
+ pagemap_fd, kpageflags_fd,
+ &page_flags)) >= 0) {
+ /*
+ * non present vaddr, next compound head page, or
+ * order-0 page
+ */
+ if (status == 1 ||
+ (page_flags & KPF_COMPOUND_HEAD) ||
+ !(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) {
+ if (cur_order < nr_orders) {
+ orders[cur_order]++;
+ cur_order = -1;
+ vaddr = next_folio_vaddr;
+ }
+ break;
+ }
+
+ cur_order++;
+ next_folio_vaddr = vaddr + (1UL << (cur_order + pshift()));
+ }
+
+ if (status < 0)
+ return status;
}
+ if (cur_order > 0 && cur_order < nr_orders)
+ orders[cur_order]++;
return 0;
}
+static int check_after_split_folio_orders(char *vaddr_start, size_t len,
+ int pagemap_fd, int kpageflags_fd, int orders[], int nr_orders)
+{
+ int *vaddr_orders;
+ int status;
+ int i;
+
+ vaddr_orders = (int *)malloc(sizeof(int) * nr_orders);
+
+ if (!vaddr_orders)
+ ksft_exit_fail_msg("Cannot allocate memory for vaddr_orders");
+
+ memset(vaddr_orders, 0, sizeof(int) * nr_orders);
+ status = gather_after_split_folio_orders(vaddr_start, len, pagemap_fd,
+ kpageflags_fd, vaddr_orders, nr_orders);
+ if (status)
+ ksft_exit_fail_msg("gather folio info failed\n");
+
+ for (i = 0; i < nr_orders; i++)
+ if (vaddr_orders[i] != orders[i]) {
+ ksft_print_msg("order %d: expected: %d got %d\n", i,
+ orders[i], vaddr_orders[i]);
+ status = -1;
+ }
+
+ free(vaddr_orders);
+ return status;
+}
+
static void write_file(const char *path, const char *buf, size_t buflen)
{
int fd;
@@ -110,7 +310,7 @@ static void verify_rss_anon_split_huge_page_all_zeroes(char *one_page, int nr_hp
unsigned long rss_anon_before, rss_anon_after;
size_t i;
- if (!check_huge_anon(one_page, 4, pmd_pagesize))
+ if (!check_huge_anon(one_page, nr_hpages, pmd_pagesize))
ksft_exit_fail_msg("No THP is allocated\n");
rss_anon_before = rss_anon();
@@ -134,7 +334,7 @@ static void verify_rss_anon_split_huge_page_all_zeroes(char *one_page, int nr_hp
rss_anon_before, rss_anon_after);
}
-void split_pmd_zero_pages(void)
+static void split_pmd_zero_pages(void)
{
char *one_page;
int nr_hpages = 4;
@@ -146,7 +346,7 @@ void split_pmd_zero_pages(void)
free(one_page);
}
-void split_pmd_thp_to_order(int order)
+static void split_pmd_thp_to_order(int order)
{
char *one_page;
size_t len = 4 * pmd_pagesize;
@@ -172,6 +372,13 @@ void split_pmd_thp_to_order(int order)
if (one_page[i] != (char)i)
ksft_exit_fail_msg("%ld byte corrupted\n", i);
+ memset(expected_orders, 0, sizeof(int) * (pmd_order + 1));
+ expected_orders[order] = 4 << (pmd_order - order);
+
+ if (check_after_split_folio_orders(one_page, len, pagemap_fd,
+ kpageflags_fd, expected_orders,
+ (pmd_order + 1)))
+ ksft_exit_fail_msg("Unexpected THP split\n");
if (!check_huge_anon(one_page, 0, pmd_pagesize))
ksft_exit_fail_msg("Still AnonHugePages not split\n");
@@ -180,90 +387,97 @@ void split_pmd_thp_to_order(int order)
free(one_page);
}
-void split_pte_mapped_thp(void)
+static void split_pte_mapped_thp(void)
{
- char *one_page, *pte_mapped, *pte_mapped2;
- size_t len = 4 * pmd_pagesize;
- uint64_t thp_size;
+ const size_t nr_thps = 4;
+ const size_t thp_area_size = nr_thps * pmd_pagesize;
+ const size_t page_area_size = nr_thps * pagesize;
+ char *thp_area, *tmp, *page_area = MAP_FAILED;
size_t i;
- const char *pagemap_template = "/proc/%d/pagemap";
- const char *kpageflags_proc = "/proc/kpageflags";
- char pagemap_proc[255];
- int pagemap_fd;
- int kpageflags_fd;
- if (snprintf(pagemap_proc, 255, pagemap_template, getpid()) < 0)
- ksft_exit_fail_msg("get pagemap proc error: %s\n", strerror(errno));
+ thp_area = mmap((void *)(1UL << 30), thp_area_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (thp_area == MAP_FAILED) {
+ ksft_test_result_fail("Fail to allocate memory: %s\n", strerror(errno));
+ return;
+ }
- pagemap_fd = open(pagemap_proc, O_RDONLY);
- if (pagemap_fd == -1)
- ksft_exit_fail_msg("read pagemap: %s\n", strerror(errno));
+ madvise(thp_area, thp_area_size, MADV_HUGEPAGE);
- kpageflags_fd = open(kpageflags_proc, O_RDONLY);
- if (kpageflags_fd == -1)
- ksft_exit_fail_msg("read kpageflags: %s\n", strerror(errno));
+ for (i = 0; i < thp_area_size; i++)
+ thp_area[i] = (char)i;
- one_page = mmap((void *)(1UL << 30), len, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- if (one_page == MAP_FAILED)
- ksft_exit_fail_msg("Fail to allocate memory: %s\n", strerror(errno));
+ if (!check_huge_anon(thp_area, nr_thps, pmd_pagesize)) {
+ ksft_test_result_skip("Not all THPs allocated\n");
+ goto out;
+ }
- madvise(one_page, len, MADV_HUGEPAGE);
+ /*
+ * To challenge spitting code, we will mremap a single page of each
+ * THP (page[i] of thp[i]) in the thp_area into page_area. This will
+ * replace the PMD mappings in the thp_area by PTE mappings first,
+ * but leaving the THP unsplit, to then create a page-sized hole in
+ * the thp_area.
+ * We will then manually trigger splitting of all THPs through the
+ * single mremap'ed pages of each THP in the page_area.
+ */
+ page_area = mmap(NULL, page_area_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (page_area == MAP_FAILED) {
+ ksft_test_result_fail("Fail to allocate memory: %s\n", strerror(errno));
+ goto out;
+ }
- for (i = 0; i < len; i++)
- one_page[i] = (char)i;
+ for (i = 0; i < nr_thps; i++) {
+ tmp = mremap(thp_area + pmd_pagesize * i + pagesize * i,
+ pagesize, pagesize, MREMAP_MAYMOVE|MREMAP_FIXED,
+ page_area + pagesize * i);
+ if (tmp != MAP_FAILED)
+ continue;
+ ksft_test_result_fail("mremap failed: %s\n", strerror(errno));
+ goto out;
+ }
- if (!check_huge_anon(one_page, 4, pmd_pagesize))
- ksft_exit_fail_msg("No THP is allocated\n");
+ /*
+ * Verify that our THPs were not split yet. Note that
+ * check_huge_anon() cannot be used as it checks for PMD mappings.
+ */
+ for (i = 0; i < nr_thps; i++) {
+ if (is_backed_by_folio(page_area + i * pagesize, pmd_order,
+ pagemap_fd, kpageflags_fd))
+ continue;
+ ksft_test_result_fail("THP %zu missing after mremap\n", i);
+ goto out;
+ }
- /* remap the first pagesize of first THP */
- pte_mapped = mremap(one_page, pagesize, pagesize, MREMAP_MAYMOVE);
-
- /* remap the Nth pagesize of Nth THP */
- for (i = 1; i < 4; i++) {
- pte_mapped2 = mremap(one_page + pmd_pagesize * i + pagesize * i,
- pagesize, pagesize,
- MREMAP_MAYMOVE|MREMAP_FIXED,
- pte_mapped + pagesize * i);
- if (pte_mapped2 == MAP_FAILED)
- ksft_exit_fail_msg("mremap failed: %s\n", strerror(errno));
- }
-
- /* smap does not show THPs after mremap, use kpageflags instead */
- thp_size = 0;
- for (i = 0; i < pagesize * 4; i++)
- if (i % pagesize == 0 &&
- is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd))
- thp_size++;
-
- if (thp_size != 4)
- ksft_exit_fail_msg("Some THPs are missing during mremap\n");
-
- /* split all remapped THPs */
- write_debugfs(PID_FMT, getpid(), (uint64_t)pte_mapped,
- (uint64_t)pte_mapped + pagesize * 4, 0);
-
- /* smap does not show THPs after mremap, use kpageflags instead */
- thp_size = 0;
- for (i = 0; i < pagesize * 4; i++) {
- if (pte_mapped[i] != (char)i)
- ksft_exit_fail_msg("%ld byte corrupted\n", i);
+ /* Split all THPs through the remapped pages. */
+ write_debugfs(PID_FMT, getpid(), (uint64_t)page_area,
+ (uint64_t)page_area + page_area_size, 0);
- if (i % pagesize == 0 &&
- is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd))
- thp_size++;
+ /* Corruption during mremap or split? */
+ for (i = 0; i < page_area_size; i++) {
+ if (page_area[i] == (char)i)
+ continue;
+ ksft_test_result_fail("%zu byte corrupted\n", i);
+ goto out;
}
- if (thp_size)
- ksft_exit_fail_msg("Still %ld THPs not split\n", thp_size);
+ /* Split failed? */
+ for (i = 0; i < nr_thps; i++) {
+ if (is_backed_by_folio(page_area + i * pagesize, 0,
+ pagemap_fd, kpageflags_fd))
+ continue;
+ ksft_test_result_fail("THP %zu not split\n", i);
+ }
ksft_test_result_pass("Split PTE-mapped huge pages successful\n");
- munmap(one_page, len);
- close(pagemap_fd);
- close(kpageflags_fd);
+out:
+ munmap(thp_area, thp_area_size);
+ if (page_area != MAP_FAILED)
+ munmap(page_area, page_area_size);
}
-void split_file_backed_thp(int order)
+static void split_file_backed_thp(int order)
{
int status;
int fd;
@@ -296,7 +510,7 @@ void split_file_backed_thp(int order)
status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc);
if (status >= INPUT_MAX) {
- ksft_exit_fail_msg("Fail to create file-backed THP split testing file\n");
+ ksft_print_msg("Fail to create file-backed THP split testing file\n");
goto cleanup;
}
@@ -365,7 +579,7 @@ out:
ksft_exit_fail_msg("Error occurred\n");
}
-bool prepare_thp_fs(const char *xfs_path, char *thp_fs_template,
+static bool prepare_thp_fs(const char *xfs_path, char *thp_fs_template,
const char **thp_fs_loc)
{
if (xfs_path) {
@@ -381,7 +595,7 @@ bool prepare_thp_fs(const char *xfs_path, char *thp_fs_template,
return true;
}
-void cleanup_thp_fs(const char *thp_fs_loc, bool created_tmp)
+static void cleanup_thp_fs(const char *thp_fs_loc, bool created_tmp)
{
int status;
@@ -394,11 +608,10 @@ void cleanup_thp_fs(const char *thp_fs_loc, bool created_tmp)
strerror(errno));
}
-int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
- char **addr)
+static int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size,
+ int *fd, char **addr)
{
size_t i;
- int dummy = 0;
unsigned char buf[1024];
srand(time(NULL));
@@ -439,9 +652,11 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
}
madvise(*addr, fd_size, MADV_HUGEPAGE);
- for (size_t i = 0; i < fd_size; i++)
- dummy += *(*addr + i);
- asm volatile("" : "+r" (dummy));
+ for (size_t i = 0; i < fd_size; i++) {
+ char *addr2 = *addr + i;
+
+ FORCE_READ(*addr2);
+ }
if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");
@@ -460,10 +675,11 @@ err_out_unlink:
return -1;
}
-void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc,
- int order, int offset)
+static void split_thp_in_pagecache_to_order_at(size_t fd_size,
+ const char *fs_loc, int order, int offset)
{
int fd;
+ char *split_addr;
char *addr;
size_t i;
char testfile[INPUT_MAX];
@@ -477,14 +693,33 @@ void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc,
err = create_pagecache_thp_and_fd(testfile, fd_size, &fd, &addr);
if (err)
return;
+
err = 0;
- if (offset == -1)
- write_debugfs(PID_FMT, getpid(), (uint64_t)addr,
- (uint64_t)addr + fd_size, order);
- else
- write_debugfs(PID_FMT, getpid(), (uint64_t)addr,
- (uint64_t)addr + fd_size, order, offset);
+ memset(expected_orders, 0, sizeof(int) * (pmd_order + 1));
+ /*
+ * use [split_addr, split_addr + pagesize) range to split THPs, since
+ * the debugfs function always split a range with pagesize step and
+ * providing a full [addr, addr + fd_size) range can trigger multiple
+ * splits, complicating after-split result checking.
+ */
+ if (offset == -1) {
+ for (split_addr = addr; split_addr < addr + fd_size; split_addr += pmd_pagesize)
+ write_debugfs(PID_FMT, getpid(), (uint64_t)split_addr,
+ (uint64_t)split_addr + pagesize, order);
+
+ expected_orders[order] = fd_size / (pagesize << order);
+ } else {
+ int times = fd_size / pmd_pagesize;
+
+ for (split_addr = addr; split_addr < addr + fd_size; split_addr += pmd_pagesize)
+ write_debugfs(PID_FMT_OFFSET, getpid(), (uint64_t)split_addr,
+ (uint64_t)split_addr + pagesize, order, offset);
+
+ for (i = order + 1; i < pmd_order; i++)
+ expected_orders[i] = times;
+ expected_orders[order] = 2 * times;
+ }
for (i = 0; i < fd_size; i++)
if (*(addr + i) != (char)i) {
@@ -493,6 +728,14 @@ void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc,
goto out;
}
+ if (check_after_split_folio_orders(addr, fd_size, pagemap_fd,
+ kpageflags_fd, expected_orders,
+ (pmd_order + 1))) {
+ ksft_print_msg("Unexpected THP split\n");
+ err = 1;
+ goto out;
+ }
+
if (!check_huge_file(addr, 0, pmd_pagesize)) {
ksft_print_msg("Still FilePmdMapped not split\n");
err = EXIT_FAILURE;
@@ -523,6 +766,8 @@ int main(int argc, char **argv)
const char *fs_loc;
bool created_tmp;
int offset;
+ unsigned int nr_pages;
+ unsigned int tests;
ksft_print_header();
@@ -534,38 +779,58 @@ int main(int argc, char **argv)
if (argc > 1)
optional_xfs_path = argv[1];
- ksft_set_plan(1+8+1+9+9+8*4+2);
-
pagesize = getpagesize();
pageshift = ffs(pagesize) - 1;
pmd_pagesize = read_pmd_pagesize();
if (!pmd_pagesize)
ksft_exit_fail_msg("Reading PMD pagesize failed\n");
+ nr_pages = pmd_pagesize / pagesize;
+ pmd_order = sz2ord(pmd_pagesize, pagesize);
+
+ expected_orders = (int *)malloc(sizeof(int) * (pmd_order + 1));
+ if (!expected_orders)
+ ksft_exit_fail_msg("Fail to allocate memory: %s\n", strerror(errno));
+
+ tests = 2 + (pmd_order - 1) + (2 * pmd_order) + (pmd_order - 1) * 4 + 2;
+ ksft_set_plan(tests);
+
+ pagemap_fd = open(pagemap_proc, O_RDONLY);
+ if (pagemap_fd == -1)
+ ksft_exit_fail_msg("read pagemap: %s\n", strerror(errno));
+
+ kpageflags_fd = open(kpageflags_proc, O_RDONLY);
+ if (kpageflags_fd == -1)
+ ksft_exit_fail_msg("read kpageflags: %s\n", strerror(errno));
+
fd_size = 2 * pmd_pagesize;
split_pmd_zero_pages();
- for (i = 0; i < 9; i++)
+ for (i = 0; i < pmd_order; i++)
if (i != 1)
split_pmd_thp_to_order(i);
split_pte_mapped_thp();
- for (i = 0; i < 9; i++)
+ for (i = 0; i < pmd_order; i++)
split_file_backed_thp(i);
created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template,
&fs_loc);
- for (i = 8; i >= 0; i--)
+ for (i = pmd_order - 1; i >= 0; i--)
split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, -1);
- for (i = 0; i < 9; i++)
+ for (i = 0; i < pmd_order; i++)
for (offset = 0;
- offset < pmd_pagesize / pagesize;
- offset += MAX(pmd_pagesize / pagesize / 4, 1 << i))
+ offset < nr_pages;
+ offset += MAX(nr_pages / 4, 1 << i))
split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, offset);
cleanup_thp_fs(fs_loc, created_tmp);
+ close(pagemap_fd);
+ close(kpageflags_fd);
+ free(expected_orders);
+
ksft_finished();
return 0;
diff --git a/tools/testing/selftests/mm/test_vmalloc.sh b/tools/testing/selftests/mm/test_vmalloc.sh
index d73b846736f1..d39096723fca 100755
--- a/tools/testing/selftests/mm/test_vmalloc.sh
+++ b/tools/testing/selftests/mm/test_vmalloc.sh
@@ -47,14 +47,14 @@ check_test_requirements()
fi
}
-run_perfformance_check()
+run_performance_check()
{
echo "Run performance tests to evaluate how fast vmalloc allocation is."
echo "It runs all test cases on one single CPU with sequential order."
modprobe $DRIVER $PERF_PARAM > /dev/null 2>&1
echo "Done."
- echo "Ccheck the kernel message buffer to see the summary."
+ echo "Check the kernel message buffer to see the summary."
}
run_stability_check()
@@ -160,7 +160,7 @@ function run_test()
usage
else
if [[ "$1" = "performance" ]]; then
- run_perfformance_check
+ run_performance_check
elif [[ "$1" = "stress" ]]; then
run_stability_check
elif [[ "$1" = "smoke" ]]; then
diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c
index ad872af1c81a..574bd0f8ae48 100644
--- a/tools/testing/selftests/mm/thp_settings.c
+++ b/tools/testing/selftests/mm/thp_settings.c
@@ -381,3 +381,21 @@ unsigned long thp_shmem_supported_orders(void)
{
return __thp_supported_orders(true);
}
+
+bool thp_available(void)
+{
+ if (access(THP_SYSFS, F_OK) != 0)
+ return false;
+ return true;
+}
+
+bool thp_is_enabled(void)
+{
+ if (!thp_available())
+ return false;
+
+ int mode = thp_read_string("enabled", thp_enabled_strings);
+
+ /* THP is considered enabled if it's either "always" or "madvise" */
+ return mode == 1 || mode == 3;
+}
diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h
index fc131d23d593..76eeb712e5f1 100644
--- a/tools/testing/selftests/mm/thp_settings.h
+++ b/tools/testing/selftests/mm/thp_settings.h
@@ -84,4 +84,7 @@ void thp_set_read_ahead_path(char *path);
unsigned long thp_supported_orders(void);
unsigned long thp_shmem_supported_orders(void);
+bool thp_available(void);
+bool thp_is_enabled(void);
+
#endif /* __THP_SETTINGS_H__ */
diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c
index a41bc1234b37..77813d34dcc2 100644
--- a/tools/testing/selftests/mm/thuge-gen.c
+++ b/tools/testing/selftests/mm/thuge-gen.c
@@ -27,7 +27,7 @@
#include <stdarg.h>
#include <string.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#if !defined(MAP_HUGETLB)
#define MAP_HUGETLB 0x40000
@@ -77,40 +77,20 @@ void show(unsigned long ps)
system(buf);
}
-unsigned long read_sysfs(int warn, char *fmt, ...)
+unsigned long read_free(unsigned long ps)
{
- char *line = NULL;
- size_t linelen = 0;
- char buf[100];
- FILE *f;
- va_list ap;
unsigned long val = 0;
+ char buf[100];
- va_start(ap, fmt);
- vsnprintf(buf, sizeof buf, fmt, ap);
- va_end(ap);
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/mm/hugepages/hugepages-%lukB/free_hugepages",
+ ps >> 10);
+ if (read_sysfs(buf, &val) && ps != getpagesize())
+ ksft_print_msg("missing %s\n", buf);
- f = fopen(buf, "r");
- if (!f) {
- if (warn)
- ksft_print_msg("missing %s\n", buf);
- return 0;
- }
- if (getline(&line, &linelen, f) > 0) {
- sscanf(line, "%lu", &val);
- }
- fclose(f);
- free(line);
return val;
}
-unsigned long read_free(unsigned long ps)
-{
- return read_sysfs(ps != getpagesize(),
- "/sys/kernel/mm/hugepages/hugepages-%lukB/free_hugepages",
- ps >> 10);
-}
-
void test_mmap(unsigned long size, unsigned flags)
{
char *map;
@@ -173,6 +153,7 @@ void test_shmget(unsigned long size, unsigned flags)
void find_pagesizes(void)
{
unsigned long largest = getpagesize();
+ unsigned long shmmax_val = 0;
int i;
glob_t g;
@@ -195,13 +176,17 @@ void find_pagesizes(void)
}
globfree(&g);
- if (read_sysfs(0, "/proc/sys/kernel/shmmax") < NUM_PAGES * largest)
- ksft_exit_fail_msg("Please do echo %lu > /proc/sys/kernel/shmmax",
- largest * NUM_PAGES);
+ read_sysfs("/proc/sys/kernel/shmmax", &shmmax_val);
+ if (shmmax_val < NUM_PAGES * largest) {
+ ksft_print_msg("WARNING: shmmax is too small to run this test.\n");
+ ksft_print_msg("Please run the following command to increase shmmax:\n");
+ ksft_print_msg("echo %lu > /proc/sys/kernel/shmmax\n", largest * NUM_PAGES);
+ ksft_exit_skip("Test skipped due to insufficient shmmax value.\n");
+ }
#if defined(__x86_64__)
if (largest != 1U<<30) {
- ksft_exit_fail_msg("No GB pages available on x86-64\n"
+ ksft_exit_skip("No GB pages available on x86-64\n"
"Please boot with hugepagesz=1G hugepages=%d\n", NUM_PAGES);
}
#endif
diff --git a/tools/testing/selftests/mm/transhuge-stress.c b/tools/testing/selftests/mm/transhuge-stress.c
index 68201192e37c..bcad47c09518 100644
--- a/tools/testing/selftests/mm/transhuge-stress.c
+++ b/tools/testing/selftests/mm/transhuge-stress.c
@@ -16,7 +16,7 @@
#include <string.h>
#include <sys/mman.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
int backing_fd = -1;
int mmap_flags = MAP_ANONYMOUS | MAP_NORESERVE | MAP_PRIVATE;
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index a37088a23ffe..edd02328f77b 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -7,18 +7,28 @@
#include "uffd-common.h"
-#define BASE_PMD_ADDR ((void *)(1UL << 30))
-
-volatile bool test_uffdio_copy_eexist = true;
-unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size;
-char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
-int uffd = -1, uffd_flags, finished, *pipefd, test_type;
-bool map_shared;
-bool test_uffdio_wp = true;
-unsigned long long *count_verify;
uffd_test_ops_t *uffd_test_ops;
uffd_test_case_ops_t *uffd_test_case_ops;
-atomic_bool ready_for_fork;
+
+
+/* pthread_mutex_t starts at page offset 0 */
+pthread_mutex_t *area_mutex(char *area, unsigned long nr, uffd_global_test_opts_t *gopts)
+{
+ return (pthread_mutex_t *) (area + nr * gopts->page_size);
+}
+
+/*
+ * count is placed in the page after pthread_mutex_t naturally aligned
+ * to avoid non alignment faults on non-x86 archs.
+ */
+volatile unsigned long long *area_count(char *area, unsigned long nr,
+ uffd_global_test_opts_t *gopts)
+{
+ return (volatile unsigned long long *)
+ ((unsigned long)(area + nr * gopts->page_size +
+ sizeof(pthread_mutex_t) + sizeof(unsigned long long) - 1) &
+ ~(unsigned long)(sizeof(unsigned long long) - 1));
+}
static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
{
@@ -40,15 +50,15 @@ static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
return mem_fd;
}
-static void anon_release_pages(char *rel_area)
+static void anon_release_pages(uffd_global_test_opts_t *gopts, char *rel_area)
{
- if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED))
+ if (madvise(rel_area, gopts->nr_pages * gopts->page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
}
-static int anon_allocate_area(void **alloc_area, bool is_src)
+static int anon_allocate_area(uffd_global_test_opts_t *gopts, void **alloc_area, bool is_src)
{
- *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
+ *alloc_area = mmap(NULL, gopts->nr_pages * gopts->page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (*alloc_area == MAP_FAILED) {
*alloc_area = NULL;
@@ -57,31 +67,32 @@ static int anon_allocate_area(void **alloc_area, bool is_src)
return 0;
}
-static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
+static void noop_alias_mapping(uffd_global_test_opts_t *gopts, __u64 *start,
+ size_t len, unsigned long offset)
{
}
-static void hugetlb_release_pages(char *rel_area)
+static void hugetlb_release_pages(uffd_global_test_opts_t *gopts, char *rel_area)
{
- if (!map_shared) {
- if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED))
+ if (!gopts->map_shared) {
+ if (madvise(rel_area, gopts->nr_pages * gopts->page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
} else {
- if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE))
+ if (madvise(rel_area, gopts->nr_pages * gopts->page_size, MADV_REMOVE))
err("madvise(MADV_REMOVE) failed");
}
}
-static int hugetlb_allocate_area(void **alloc_area, bool is_src)
+static int hugetlb_allocate_area(uffd_global_test_opts_t *gopts, void **alloc_area, bool is_src)
{
- off_t size = nr_pages * page_size;
+ off_t size = gopts->nr_pages * gopts->page_size;
off_t offset = is_src ? 0 : size;
void *area_alias = NULL;
char **alloc_area_alias;
int mem_fd = uffd_mem_fd_create(size * 2, true);
*alloc_area = mmap(NULL, size, PROT_READ | PROT_WRITE,
- (map_shared ? MAP_SHARED : MAP_PRIVATE) |
+ (gopts->map_shared ? MAP_SHARED : MAP_PRIVATE) |
(is_src ? 0 : MAP_NORESERVE),
mem_fd, offset);
if (*alloc_area == MAP_FAILED) {
@@ -89,7 +100,7 @@ static int hugetlb_allocate_area(void **alloc_area, bool is_src)
return -errno;
}
- if (map_shared) {
+ if (gopts->map_shared) {
area_alias = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_SHARED, mem_fd, offset);
if (area_alias == MAP_FAILED)
@@ -97,9 +108,9 @@ static int hugetlb_allocate_area(void **alloc_area, bool is_src)
}
if (is_src) {
- alloc_area_alias = &area_src_alias;
+ alloc_area_alias = &gopts->area_src_alias;
} else {
- alloc_area_alias = &area_dst_alias;
+ alloc_area_alias = &gopts->area_dst_alias;
}
if (area_alias)
*alloc_area_alias = area_alias;
@@ -108,73 +119,82 @@ static int hugetlb_allocate_area(void **alloc_area, bool is_src)
return 0;
}
-static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset)
+static void hugetlb_alias_mapping(uffd_global_test_opts_t *gopts, __u64 *start,
+ size_t len, unsigned long offset)
{
- if (!map_shared)
+ if (!gopts->map_shared)
return;
- *start = (unsigned long) area_dst_alias + offset;
+ *start = (unsigned long) gopts->area_dst_alias + offset;
}
-static void shmem_release_pages(char *rel_area)
+static void shmem_release_pages(uffd_global_test_opts_t *gopts, char *rel_area)
{
- if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE))
+ if (madvise(rel_area, gopts->nr_pages * gopts->page_size, MADV_REMOVE))
err("madvise(MADV_REMOVE) failed");
}
-static int shmem_allocate_area(void **alloc_area, bool is_src)
+static int shmem_allocate_area(uffd_global_test_opts_t *gopts, void **alloc_area, bool is_src)
{
void *area_alias = NULL;
- size_t bytes = nr_pages * page_size, hpage_size = read_pmd_pagesize();
+ size_t bytes = gopts->nr_pages * gopts->page_size, hpage_size = read_pmd_pagesize();
unsigned long offset = is_src ? 0 : bytes;
char *p = NULL, *p_alias = NULL;
int mem_fd = uffd_mem_fd_create(bytes * 2, false);
+ size_t region_size = bytes * 2 + hpage_size;
- /* TODO: clean this up. Use a static addr is ugly */
- p = BASE_PMD_ADDR;
- if (!is_src)
- /* src map + alias + interleaved hpages */
- p += 2 * (bytes + hpage_size);
+ void *reserve = mmap(NULL, region_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0);
+ if (reserve == MAP_FAILED) {
+ close(mem_fd);
+ return -errno;
+ }
+
+ p = reserve;
p_alias = p;
p_alias += bytes;
p_alias += hpage_size; /* Prevent src/dst VMA merge */
- *alloc_area = mmap(p, bytes, PROT_READ | PROT_WRITE, MAP_SHARED,
+ *alloc_area = mmap(p, bytes, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED,
mem_fd, offset);
if (*alloc_area == MAP_FAILED) {
*alloc_area = NULL;
+ munmap(reserve, region_size);
+ close(mem_fd);
return -errno;
}
if (*alloc_area != p)
err("mmap of memfd failed at %p", p);
- area_alias = mmap(p_alias, bytes, PROT_READ | PROT_WRITE, MAP_SHARED,
+ area_alias = mmap(p_alias, bytes, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED,
mem_fd, offset);
if (area_alias == MAP_FAILED) {
- munmap(*alloc_area, bytes);
*alloc_area = NULL;
+ munmap(reserve, region_size);
+ close(mem_fd);
return -errno;
}
if (area_alias != p_alias)
err("mmap of anonymous memory failed at %p", p_alias);
if (is_src)
- area_src_alias = area_alias;
+ gopts->area_src_alias = area_alias;
else
- area_dst_alias = area_alias;
+ gopts->area_dst_alias = area_alias;
close(mem_fd);
return 0;
}
-static void shmem_alias_mapping(__u64 *start, size_t len, unsigned long offset)
+static void shmem_alias_mapping(uffd_global_test_opts_t *gopts, __u64 *start,
+ size_t len, unsigned long offset)
{
- *start = (unsigned long)area_dst_alias + offset;
+ *start = (unsigned long)gopts->area_dst_alias + offset;
}
-static void shmem_check_pmd_mapping(void *p, int expect_nr_hpages)
+static void shmem_check_pmd_mapping(uffd_global_test_opts_t *gopts, void *p, int expect_nr_hpages)
{
- if (!check_huge_shmem(area_dst_alias, expect_nr_hpages,
+ if (!check_huge_shmem(gopts->area_dst_alias, expect_nr_hpages,
read_pmd_pagesize()))
err("Did not find expected %d number of hugepages",
expect_nr_hpages);
@@ -234,18 +254,18 @@ void uffd_stats_report(struct uffd_args *args, int n_cpus)
printf("\n");
}
-int userfaultfd_open(uint64_t *features)
+int userfaultfd_open(uffd_global_test_opts_t *gopts, uint64_t *features)
{
struct uffdio_api uffdio_api;
- uffd = uffd_open(UFFD_FLAGS);
- if (uffd < 0)
+ gopts->uffd = uffd_open(UFFD_FLAGS);
+ if (gopts->uffd < 0)
return -1;
- uffd_flags = fcntl(uffd, F_GETFD, NULL);
+ gopts->uffd_flags = fcntl(gopts->uffd, F_GETFD, NULL);
uffdio_api.api = UFFD_API;
uffdio_api.features = *features;
- if (ioctl(uffd, UFFDIO_API, &uffdio_api))
+ if (ioctl(gopts->uffd, UFFDIO_API, &uffdio_api))
/* Probably lack of CAP_PTRACE? */
return -1;
if (uffdio_api.api != UFFD_API)
@@ -255,59 +275,63 @@ int userfaultfd_open(uint64_t *features)
return 0;
}
-static inline void munmap_area(void **area)
+static inline void munmap_area(uffd_global_test_opts_t *gopts, void **area)
{
if (*area)
- if (munmap(*area, nr_pages * page_size))
+ if (munmap(*area, gopts->nr_pages * gopts->page_size))
err("munmap");
*area = NULL;
}
-void uffd_test_ctx_clear(void)
+void uffd_test_ctx_clear(uffd_global_test_opts_t *gopts)
{
size_t i;
- if (pipefd) {
- for (i = 0; i < nr_parallel * 2; ++i) {
- if (close(pipefd[i]))
+ if (gopts->pipefd) {
+ for (i = 0; i < gopts->nr_parallel * 2; ++i) {
+ if (close(gopts->pipefd[i]))
err("close pipefd");
}
- free(pipefd);
- pipefd = NULL;
+ free(gopts->pipefd);
+ gopts->pipefd = NULL;
}
- if (count_verify) {
- free(count_verify);
- count_verify = NULL;
+ if (gopts->count_verify) {
+ free(gopts->count_verify);
+ gopts->count_verify = NULL;
}
- if (uffd != -1) {
- if (close(uffd))
+ if (gopts->uffd != -1) {
+ if (close(gopts->uffd))
err("close uffd");
- uffd = -1;
+ gopts->uffd = -1;
}
- munmap_area((void **)&area_src);
- munmap_area((void **)&area_src_alias);
- munmap_area((void **)&area_dst);
- munmap_area((void **)&area_dst_alias);
- munmap_area((void **)&area_remap);
+ munmap_area(gopts, (void **)&gopts->area_src);
+ munmap_area(gopts, (void **)&gopts->area_src_alias);
+ munmap_area(gopts, (void **)&gopts->area_dst);
+ munmap_area(gopts, (void **)&gopts->area_dst_alias);
+ munmap_area(gopts, (void **)&gopts->area_remap);
}
-int uffd_test_ctx_init(uint64_t features, const char **errmsg)
+int uffd_test_ctx_init(uffd_global_test_opts_t *gopts, uint64_t features, const char **errmsg)
{
unsigned long nr, cpu;
int ret;
+ gopts->area_src_alias = NULL;
+ gopts->area_dst_alias = NULL;
+ gopts->area_remap = NULL;
+
if (uffd_test_case_ops && uffd_test_case_ops->pre_alloc) {
- ret = uffd_test_case_ops->pre_alloc(errmsg);
+ ret = uffd_test_case_ops->pre_alloc(gopts, errmsg);
if (ret)
return ret;
}
- ret = uffd_test_ops->allocate_area((void **)&area_src, true);
- ret |= uffd_test_ops->allocate_area((void **)&area_dst, false);
+ ret = uffd_test_ops->allocate_area(gopts, (void **) &gopts->area_src, true);
+ ret |= uffd_test_ops->allocate_area(gopts, (void **) &gopts->area_dst, false);
if (ret) {
if (errmsg)
*errmsg = "memory allocation failed";
@@ -315,26 +339,26 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
}
if (uffd_test_case_ops && uffd_test_case_ops->post_alloc) {
- ret = uffd_test_case_ops->post_alloc(errmsg);
+ ret = uffd_test_case_ops->post_alloc(gopts, errmsg);
if (ret)
return ret;
}
- ret = userfaultfd_open(&features);
+ ret = userfaultfd_open(gopts, &features);
if (ret) {
if (errmsg)
*errmsg = "possible lack of privilege";
return ret;
}
- count_verify = malloc(nr_pages * sizeof(unsigned long long));
- if (!count_verify)
+ gopts->count_verify = malloc(gopts->nr_pages * sizeof(unsigned long long));
+ if (!gopts->count_verify)
err("count_verify");
- for (nr = 0; nr < nr_pages; nr++) {
- *area_mutex(area_src, nr) =
+ for (nr = 0; nr < gopts->nr_pages; nr++) {
+ *area_mutex(gopts->area_src, nr, gopts) =
(pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
- count_verify[nr] = *area_count(area_src, nr) = 1;
+ gopts->count_verify[nr] = *area_count(gopts->area_src, nr, gopts) = 1;
/*
* In the transition between 255 to 256, powerpc will
* read out of order in my_bcmp and see both bytes as
@@ -342,7 +366,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
* after the count, to avoid my_bcmp to trigger false
* positives.
*/
- *(area_count(area_src, nr) + 1) = 1;
+ *(area_count(gopts->area_src, nr, gopts) + 1) = 1;
}
/*
@@ -363,13 +387,13 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
* proactively split the thp and drop any accidentally initialized
* pages within area_dst.
*/
- uffd_test_ops->release_pages(area_dst);
+ uffd_test_ops->release_pages(gopts, gopts->area_dst);
- pipefd = malloc(sizeof(int) * nr_parallel * 2);
- if (!pipefd)
+ gopts->pipefd = malloc(sizeof(int) * gopts->nr_parallel * 2);
+ if (!gopts->pipefd)
err("pipefd");
- for (cpu = 0; cpu < nr_parallel; cpu++)
- if (pipe2(&pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK))
+ for (cpu = 0; cpu < gopts->nr_parallel; cpu++)
+ if (pipe2(&gopts->pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK))
err("pipe");
return 0;
@@ -416,9 +440,9 @@ static void continue_range(int ufd, __u64 start, __u64 len, bool wp)
ret, (int64_t) req.mapped);
}
-int uffd_read_msg(int ufd, struct uffd_msg *msg)
+int uffd_read_msg(uffd_global_test_opts_t *gopts, struct uffd_msg *msg)
{
- int ret = read(uffd, msg, sizeof(*msg));
+ int ret = read(gopts->uffd, msg, sizeof(*msg));
if (ret != sizeof(*msg)) {
if (ret < 0) {
@@ -433,7 +457,8 @@ int uffd_read_msg(int ufd, struct uffd_msg *msg)
return 0;
}
-void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args)
+void uffd_handle_page_fault(uffd_global_test_opts_t *gopts, struct uffd_msg *msg,
+ struct uffd_args *args)
{
unsigned long offset;
@@ -442,7 +467,7 @@ void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args)
if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WP) {
/* Write protect page faults */
- wp_range(uffd, msg->arg.pagefault.address, page_size, false);
+ wp_range(gopts->uffd, msg->arg.pagefault.address, gopts->page_size, false);
args->wp_faults++;
} else if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_MINOR) {
uint8_t *area;
@@ -460,12 +485,12 @@ void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args)
* (UFFD-registered).
*/
- area = (uint8_t *)(area_dst +
- ((char *)msg->arg.pagefault.address -
- area_dst_alias));
- for (b = 0; b < page_size; ++b)
+ area = (uint8_t *)(gopts->area_dst +
+ ((char *)msg->arg.pagefault.address -
+ gopts->area_dst_alias));
+ for (b = 0; b < gopts->page_size; ++b)
area[b] = ~area[b];
- continue_range(uffd, msg->arg.pagefault.address, page_size,
+ continue_range(gopts->uffd, msg->arg.pagefault.address, gopts->page_size,
args->apply_wp);
args->minor_faults++;
} else {
@@ -493,10 +518,10 @@ void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args)
if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
err("unexpected write fault");
- offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
- offset &= ~(page_size-1);
+ offset = (char *)(unsigned long)msg->arg.pagefault.address - gopts->area_dst;
+ offset &= ~(gopts->page_size-1);
- if (copy_page(uffd, offset, args->apply_wp))
+ if (copy_page(gopts, offset, args->apply_wp))
args->missing_faults++;
}
}
@@ -504,6 +529,7 @@ void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args)
void *uffd_poll_thread(void *arg)
{
struct uffd_args *args = (struct uffd_args *)arg;
+ uffd_global_test_opts_t *gopts = args->gopts;
unsigned long cpu = args->cpu;
struct pollfd pollfd[2];
struct uffd_msg msg;
@@ -514,12 +540,12 @@ void *uffd_poll_thread(void *arg)
if (!args->handle_fault)
args->handle_fault = uffd_handle_page_fault;
- pollfd[0].fd = uffd;
+ pollfd[0].fd = gopts->uffd;
pollfd[0].events = POLLIN;
- pollfd[1].fd = pipefd[cpu*2];
+ pollfd[1].fd = gopts->pipefd[cpu*2];
pollfd[1].events = POLLIN;
- ready_for_fork = true;
+ gopts->ready_for_fork = true;
for (;;) {
ret = poll(pollfd, 2, -1);
@@ -537,30 +563,30 @@ void *uffd_poll_thread(void *arg)
}
if (!(pollfd[0].revents & POLLIN))
err("pollfd[0].revents %d", pollfd[0].revents);
- if (uffd_read_msg(uffd, &msg))
+ if (uffd_read_msg(gopts, &msg))
continue;
switch (msg.event) {
default:
err("unexpected msg event %u\n", msg.event);
break;
case UFFD_EVENT_PAGEFAULT:
- args->handle_fault(&msg, args);
+ args->handle_fault(gopts, &msg, args);
break;
case UFFD_EVENT_FORK:
- close(uffd);
- uffd = msg.arg.fork.ufd;
- pollfd[0].fd = uffd;
+ close(gopts->uffd);
+ gopts->uffd = msg.arg.fork.ufd;
+ pollfd[0].fd = gopts->uffd;
break;
case UFFD_EVENT_REMOVE:
uffd_reg.range.start = msg.arg.remove.start;
uffd_reg.range.len = msg.arg.remove.end -
msg.arg.remove.start;
- if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range))
+ if (ioctl(gopts->uffd, UFFDIO_UNREGISTER, &uffd_reg.range))
err("remove failure");
break;
case UFFD_EVENT_REMAP:
- area_remap = area_dst; /* save for later unmap */
- area_dst = (char *)(unsigned long)msg.arg.remap.to;
+ gopts->area_remap = gopts->area_dst; /* save for later unmap */
+ gopts->area_dst = (char *)(unsigned long)msg.arg.remap.to;
break;
}
}
@@ -568,17 +594,18 @@ void *uffd_poll_thread(void *arg)
return NULL;
}
-static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy,
+static void retry_copy_page(uffd_global_test_opts_t *gopts, struct uffdio_copy *uffdio_copy,
unsigned long offset)
{
- uffd_test_ops->alias_mapping(&uffdio_copy->dst,
+ uffd_test_ops->alias_mapping(gopts,
+ &uffdio_copy->dst,
uffdio_copy->len,
offset);
- if (ioctl(ufd, UFFDIO_COPY, uffdio_copy)) {
+ if (ioctl(gopts->uffd, UFFDIO_COPY, uffdio_copy)) {
/* real retval in ufdio_copy.copy */
if (uffdio_copy->copy != -EEXIST)
err("UFFDIO_COPY retry error: %"PRId64,
- (int64_t)uffdio_copy->copy);
+ (int64_t)uffdio_copy->copy);
} else {
err("UFFDIO_COPY retry unexpected: %"PRId64,
(int64_t)uffdio_copy->copy);
@@ -597,60 +624,60 @@ static void wake_range(int ufd, unsigned long addr, unsigned long len)
addr), exit(1);
}
-int __copy_page(int ufd, unsigned long offset, bool retry, bool wp)
+int __copy_page(uffd_global_test_opts_t *gopts, unsigned long offset, bool retry, bool wp)
{
struct uffdio_copy uffdio_copy;
- if (offset >= nr_pages * page_size)
+ if (offset >= gopts->nr_pages * gopts->page_size)
err("unexpected offset %lu\n", offset);
- uffdio_copy.dst = (unsigned long) area_dst + offset;
- uffdio_copy.src = (unsigned long) area_src + offset;
- uffdio_copy.len = page_size;
+ uffdio_copy.dst = (unsigned long) gopts->area_dst + offset;
+ uffdio_copy.src = (unsigned long) gopts->area_src + offset;
+ uffdio_copy.len = gopts->page_size;
if (wp)
uffdio_copy.mode = UFFDIO_COPY_MODE_WP;
else
uffdio_copy.mode = 0;
uffdio_copy.copy = 0;
- if (ioctl(ufd, UFFDIO_COPY, &uffdio_copy)) {
+ if (ioctl(gopts->uffd, UFFDIO_COPY, &uffdio_copy)) {
/* real retval in ufdio_copy.copy */
if (uffdio_copy.copy != -EEXIST)
err("UFFDIO_COPY error: %"PRId64,
(int64_t)uffdio_copy.copy);
- wake_range(ufd, uffdio_copy.dst, page_size);
- } else if (uffdio_copy.copy != page_size) {
+ wake_range(gopts->uffd, uffdio_copy.dst, gopts->page_size);
+ } else if (uffdio_copy.copy != gopts->page_size) {
err("UFFDIO_COPY error: %"PRId64, (int64_t)uffdio_copy.copy);
} else {
- if (test_uffdio_copy_eexist && retry) {
- test_uffdio_copy_eexist = false;
- retry_copy_page(ufd, &uffdio_copy, offset);
+ if (gopts->test_uffdio_copy_eexist && retry) {
+ gopts->test_uffdio_copy_eexist = false;
+ retry_copy_page(gopts, &uffdio_copy, offset);
}
return 1;
}
return 0;
}
-int copy_page(int ufd, unsigned long offset, bool wp)
+int copy_page(uffd_global_test_opts_t *gopts, unsigned long offset, bool wp)
{
- return __copy_page(ufd, offset, false, wp);
+ return __copy_page(gopts, offset, false, wp);
}
-int move_page(int ufd, unsigned long offset, unsigned long len)
+int move_page(uffd_global_test_opts_t *gopts, unsigned long offset, unsigned long len)
{
struct uffdio_move uffdio_move;
- if (offset + len > nr_pages * page_size)
+ if (offset + len > gopts->nr_pages * gopts->page_size)
err("unexpected offset %lu and length %lu\n", offset, len);
- uffdio_move.dst = (unsigned long) area_dst + offset;
- uffdio_move.src = (unsigned long) area_src + offset;
+ uffdio_move.dst = (unsigned long) gopts->area_dst + offset;
+ uffdio_move.src = (unsigned long) gopts->area_src + offset;
uffdio_move.len = len;
uffdio_move.mode = UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES;
uffdio_move.move = 0;
- if (ioctl(ufd, UFFDIO_MOVE, &uffdio_move)) {
+ if (ioctl(gopts->uffd, UFFDIO_MOVE, &uffdio_move)) {
/* real retval in uffdio_move.move */
if (uffdio_move.move != -EEXIST)
err("UFFDIO_MOVE error: %"PRId64,
(int64_t)uffdio_move.move);
- wake_range(ufd, uffdio_move.dst, len);
+ wake_range(gopts->uffd, uffdio_move.dst, len);
} else if (uffdio_move.move != len) {
err("UFFDIO_MOVE error: %"PRId64, (int64_t)uffdio_move.move);
} else
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index 7700cbfa3975..844a85ab31eb 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -35,7 +35,7 @@
#include <sys/random.h>
#include <stdatomic.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
#define UFFD_FLAGS (O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY)
@@ -56,20 +56,17 @@
#define err(fmt, ...) errexit(1, fmt, ##__VA_ARGS__)
-/* pthread_mutex_t starts at page offset 0 */
-#define area_mutex(___area, ___nr) \
- ((pthread_mutex_t *) ((___area) + (___nr)*page_size))
-/*
- * count is placed in the page after pthread_mutex_t naturally aligned
- * to avoid non alignment faults on non-x86 archs.
- */
-#define area_count(___area, ___nr) \
- ((volatile unsigned long long *) ((unsigned long) \
- ((___area) + (___nr)*page_size + \
- sizeof(pthread_mutex_t) + \
- sizeof(unsigned long long) - 1) & \
- ~(unsigned long)(sizeof(unsigned long long) \
- - 1)))
+struct uffd_global_test_opts {
+ unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size;
+ char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
+ int uffd, uffd_flags, finished, *pipefd, test_type;
+ bool map_shared;
+ bool test_uffdio_wp;
+ unsigned long long *count_verify;
+ volatile bool test_uffdio_copy_eexist;
+ atomic_bool ready_for_fork;
+};
+typedef struct uffd_global_test_opts uffd_global_test_opts_t;
/* Userfaultfd test statistics */
struct uffd_args {
@@ -79,50 +76,55 @@ struct uffd_args {
unsigned long missing_faults;
unsigned long wp_faults;
unsigned long minor_faults;
+ struct uffd_global_test_opts *gopts;
/* A custom fault handler; defaults to uffd_handle_page_fault. */
- void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args);
+ void (*handle_fault)(struct uffd_global_test_opts *gopts,
+ struct uffd_msg *msg,
+ struct uffd_args *args);
};
struct uffd_test_ops {
- int (*allocate_area)(void **alloc_area, bool is_src);
- void (*release_pages)(char *rel_area);
- void (*alias_mapping)(__u64 *start, size_t len, unsigned long offset);
- void (*check_pmd_mapping)(void *p, int expect_nr_hpages);
+ int (*allocate_area)(uffd_global_test_opts_t *gopts, void **alloc_area, bool is_src);
+ void (*release_pages)(uffd_global_test_opts_t *gopts, char *rel_area);
+ void (*alias_mapping)(uffd_global_test_opts_t *gopts,
+ __u64 *start,
+ size_t len,
+ unsigned long offset);
+ void (*check_pmd_mapping)(uffd_global_test_opts_t *gopts, void *p, int expect_nr_hpages);
};
typedef struct uffd_test_ops uffd_test_ops_t;
struct uffd_test_case_ops {
- int (*pre_alloc)(const char **errmsg);
- int (*post_alloc)(const char **errmsg);
+ int (*pre_alloc)(uffd_global_test_opts_t *gopts, const char **errmsg);
+ int (*post_alloc)(uffd_global_test_opts_t *gopts, const char **errmsg);
};
typedef struct uffd_test_case_ops uffd_test_case_ops_t;
-extern unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size;
-extern char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
-extern int uffd, uffd_flags, finished, *pipefd, test_type;
-extern bool map_shared;
-extern bool test_uffdio_wp;
-extern unsigned long long *count_verify;
-extern volatile bool test_uffdio_copy_eexist;
-extern atomic_bool ready_for_fork;
-
+extern uffd_global_test_opts_t *uffd_gtest_opts;
extern uffd_test_ops_t anon_uffd_test_ops;
extern uffd_test_ops_t shmem_uffd_test_ops;
extern uffd_test_ops_t hugetlb_uffd_test_ops;
extern uffd_test_ops_t *uffd_test_ops;
extern uffd_test_case_ops_t *uffd_test_case_ops;
+pthread_mutex_t *area_mutex(char *area, unsigned long nr, uffd_global_test_opts_t *gopts);
+volatile unsigned long long *area_count(char *area,
+ unsigned long nr,
+ uffd_global_test_opts_t *gopts);
+
void uffd_stats_report(struct uffd_args *args, int n_cpus);
-int uffd_test_ctx_init(uint64_t features, const char **errmsg);
-void uffd_test_ctx_clear(void);
-int userfaultfd_open(uint64_t *features);
-int uffd_read_msg(int ufd, struct uffd_msg *msg);
+int uffd_test_ctx_init(uffd_global_test_opts_t *gopts, uint64_t features, const char **errmsg);
+void uffd_test_ctx_clear(uffd_global_test_opts_t *gopts);
+int userfaultfd_open(uffd_global_test_opts_t *gopts, uint64_t *features);
+int uffd_read_msg(uffd_global_test_opts_t *gopts, struct uffd_msg *msg);
void wp_range(int ufd, __u64 start, __u64 len, bool wp);
-void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args);
-int __copy_page(int ufd, unsigned long offset, bool retry, bool wp);
-int copy_page(int ufd, unsigned long offset, bool wp);
-int move_page(int ufd, unsigned long offset, unsigned long len);
+void uffd_handle_page_fault(uffd_global_test_opts_t *gopts,
+ struct uffd_msg *msg,
+ struct uffd_args *args);
+int __copy_page(uffd_global_test_opts_t *gopts, unsigned long offset, bool retry, bool wp);
+int copy_page(uffd_global_test_opts_t *gopts, unsigned long offset, bool wp);
+int move_page(uffd_global_test_opts_t *gopts, unsigned long offset, unsigned long len);
void *uffd_poll_thread(void *arg);
int uffd_open_dev(unsigned int flags);
diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
index 40af7f67c407..700fbaa18d44 100644
--- a/tools/testing/selftests/mm/uffd-stress.c
+++ b/tools/testing/selftests/mm/uffd-stress.c
@@ -44,6 +44,12 @@ uint64_t features;
#define BOUNCE_VERIFY (1<<2)
#define BOUNCE_POLL (1<<3)
static int bounces;
+/* defined globally for this particular test as the sigalrm handler
+ * depends on test_uffdio_*_eexist.
+ * XXX: define gopts in main() when we figure out a way to deal with
+ * test_uffdio_*_eexist.
+ */
+static uffd_global_test_opts_t *gopts;
/* exercise the test_uffdio_*_eexist every ALARM_INTERVAL_SECS */
#define ALARM_INTERVAL_SECS 10
@@ -51,7 +57,7 @@ static char *zeropage;
pthread_attr_t attr;
#define swap(a, b) \
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+ do { __auto_type __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
const char *examples =
"# Run anonymous memory test on 100MiB region with 99999 bounces:\n"
@@ -76,54 +82,58 @@ static void usage(void)
exit(1);
}
-static void uffd_stats_reset(struct uffd_args *args, unsigned long n_cpus)
+static void uffd_stats_reset(uffd_global_test_opts_t *gopts, struct uffd_args *args,
+ unsigned long n_cpus)
{
int i;
for (i = 0; i < n_cpus; i++) {
args[i].cpu = i;
- args[i].apply_wp = test_uffdio_wp;
+ args[i].apply_wp = gopts->test_uffdio_wp;
args[i].missing_faults = 0;
args[i].wp_faults = 0;
args[i].minor_faults = 0;
+ args[i].gopts = gopts;
}
}
static void *locking_thread(void *arg)
{
- unsigned long cpu = (unsigned long) arg;
+ struct uffd_args *args = (struct uffd_args *) arg;
+ uffd_global_test_opts_t *gopts = args->gopts;
+ unsigned long cpu = (unsigned long) args->cpu;
unsigned long page_nr;
unsigned long long count;
if (!(bounces & BOUNCE_RANDOM)) {
page_nr = -bounces;
if (!(bounces & BOUNCE_RACINGFAULTS))
- page_nr += cpu * nr_pages_per_cpu;
+ page_nr += cpu * gopts->nr_pages_per_cpu;
}
- while (!finished) {
+ while (!gopts->finished) {
if (bounces & BOUNCE_RANDOM) {
if (getrandom(&page_nr, sizeof(page_nr), 0) != sizeof(page_nr))
err("getrandom failed");
} else
page_nr += 1;
- page_nr %= nr_pages;
- pthread_mutex_lock(area_mutex(area_dst, page_nr));
- count = *area_count(area_dst, page_nr);
- if (count != count_verify[page_nr])
+ page_nr %= gopts->nr_pages;
+ pthread_mutex_lock(area_mutex(gopts->area_dst, page_nr, gopts));
+ count = *area_count(gopts->area_dst, page_nr, gopts);
+ if (count != gopts->count_verify[page_nr])
err("page_nr %lu memory corruption %llu %llu",
- page_nr, count, count_verify[page_nr]);
+ page_nr, count, gopts->count_verify[page_nr]);
count++;
- *area_count(area_dst, page_nr) = count_verify[page_nr] = count;
- pthread_mutex_unlock(area_mutex(area_dst, page_nr));
+ *area_count(gopts->area_dst, page_nr, gopts) = gopts->count_verify[page_nr] = count;
+ pthread_mutex_unlock(area_mutex(gopts->area_dst, page_nr, gopts));
}
return NULL;
}
-static int copy_page_retry(int ufd, unsigned long offset)
+static int copy_page_retry(uffd_global_test_opts_t *gopts, unsigned long offset)
{
- return __copy_page(ufd, offset, true, test_uffdio_wp);
+ return __copy_page(gopts, offset, true, gopts->test_uffdio_wp);
}
pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -131,15 +141,16 @@ pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER;
static void *uffd_read_thread(void *arg)
{
struct uffd_args *args = (struct uffd_args *)arg;
+ uffd_global_test_opts_t *gopts = args->gopts;
struct uffd_msg msg;
pthread_mutex_unlock(&uffd_read_mutex);
/* from here cancellation is ok */
for (;;) {
- if (uffd_read_msg(uffd, &msg))
+ if (uffd_read_msg(gopts, &msg))
continue;
- uffd_handle_page_fault(&msg, args);
+ uffd_handle_page_fault(gopts, &msg, args);
}
return NULL;
@@ -147,32 +158,34 @@ static void *uffd_read_thread(void *arg)
static void *background_thread(void *arg)
{
- unsigned long cpu = (unsigned long) arg;
+ struct uffd_args *args = (struct uffd_args *) arg;
+ uffd_global_test_opts_t *gopts = args->gopts;
+ unsigned long cpu = (unsigned long) args->cpu;
unsigned long page_nr, start_nr, mid_nr, end_nr;
- start_nr = cpu * nr_pages_per_cpu;
- end_nr = (cpu+1) * nr_pages_per_cpu;
+ start_nr = cpu * gopts->nr_pages_per_cpu;
+ end_nr = (cpu+1) * gopts->nr_pages_per_cpu;
mid_nr = (start_nr + end_nr) / 2;
/* Copy the first half of the pages */
for (page_nr = start_nr; page_nr < mid_nr; page_nr++)
- copy_page_retry(uffd, page_nr * page_size);
+ copy_page_retry(gopts, page_nr * gopts->page_size);
/*
* If we need to test uffd-wp, set it up now. Then we'll have
* at least the first half of the pages mapped already which
* can be write-protected for testing
*/
- if (test_uffdio_wp)
- wp_range(uffd, (unsigned long)area_dst + start_nr * page_size,
- nr_pages_per_cpu * page_size, true);
+ if (gopts->test_uffdio_wp)
+ wp_range(gopts->uffd, (unsigned long)gopts->area_dst + start_nr * gopts->page_size,
+ gopts->nr_pages_per_cpu * gopts->page_size, true);
/*
* Continue the 2nd half of the page copying, handling write
* protection faults if any
*/
for (page_nr = mid_nr; page_nr < end_nr; page_nr++)
- copy_page_retry(uffd, page_nr * page_size);
+ copy_page_retry(gopts, page_nr * gopts->page_size);
return NULL;
}
@@ -180,17 +193,21 @@ static void *background_thread(void *arg)
static int stress(struct uffd_args *args)
{
unsigned long cpu;
- pthread_t locking_threads[nr_parallel];
- pthread_t uffd_threads[nr_parallel];
- pthread_t background_threads[nr_parallel];
+ uffd_global_test_opts_t *gopts = args->gopts;
+ pthread_t locking_threads[gopts->nr_parallel];
+ pthread_t uffd_threads[gopts->nr_parallel];
+ pthread_t background_threads[gopts->nr_parallel];
- finished = 0;
- for (cpu = 0; cpu < nr_parallel; cpu++) {
+ gopts->finished = 0;
+ for (cpu = 0; cpu < gopts->nr_parallel; cpu++) {
if (pthread_create(&locking_threads[cpu], &attr,
- locking_thread, (void *)cpu))
+ locking_thread, (void *)&args[cpu]))
return 1;
if (bounces & BOUNCE_POLL) {
- if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu]))
+ if (pthread_create(&uffd_threads[cpu],
+ &attr,
+ uffd_poll_thread,
+ (void *) &args[cpu]))
err("uffd_poll_thread create");
} else {
if (pthread_create(&uffd_threads[cpu], &attr,
@@ -200,10 +217,10 @@ static int stress(struct uffd_args *args)
pthread_mutex_lock(&uffd_read_mutex);
}
if (pthread_create(&background_threads[cpu], &attr,
- background_thread, (void *)cpu))
+ background_thread, (void *)&args[cpu]))
return 1;
}
- for (cpu = 0; cpu < nr_parallel; cpu++)
+ for (cpu = 0; cpu < gopts->nr_parallel; cpu++)
if (pthread_join(background_threads[cpu], NULL))
return 1;
@@ -216,17 +233,17 @@ static int stress(struct uffd_args *args)
* UFFDIO_COPY without writing zero pages into area_dst
* because the background threads already completed).
*/
- uffd_test_ops->release_pages(area_src);
+ uffd_test_ops->release_pages(gopts, gopts->area_src);
- finished = 1;
- for (cpu = 0; cpu < nr_parallel; cpu++)
+ gopts->finished = 1;
+ for (cpu = 0; cpu < gopts->nr_parallel; cpu++)
if (pthread_join(locking_threads[cpu], NULL))
return 1;
- for (cpu = 0; cpu < nr_parallel; cpu++) {
- char c;
+ for (cpu = 0; cpu < gopts->nr_parallel; cpu++) {
+ char c = '\0';
if (bounces & BOUNCE_POLL) {
- if (write(pipefd[cpu*2+1], &c, 1) != 1)
+ if (write(gopts->pipefd[cpu*2+1], &c, 1) != 1)
err("pipefd write error");
if (pthread_join(uffd_threads[cpu],
(void *)&args[cpu]))
@@ -242,26 +259,26 @@ static int stress(struct uffd_args *args)
return 0;
}
-static int userfaultfd_stress(void)
+static int userfaultfd_stress(uffd_global_test_opts_t *gopts)
{
void *area;
unsigned long nr;
- struct uffd_args args[nr_parallel];
- uint64_t mem_size = nr_pages * page_size;
+ struct uffd_args args[gopts->nr_parallel];
+ uint64_t mem_size = gopts->nr_pages * gopts->page_size;
int flags = 0;
- memset(args, 0, sizeof(struct uffd_args) * nr_parallel);
+ memset(args, 0, sizeof(struct uffd_args) * gopts->nr_parallel);
- if (features & UFFD_FEATURE_WP_UNPOPULATED && test_type == TEST_ANON)
+ if (features & UFFD_FEATURE_WP_UNPOPULATED && gopts->test_type == TEST_ANON)
flags = UFFD_FEATURE_WP_UNPOPULATED;
- if (uffd_test_ctx_init(flags, NULL))
+ if (uffd_test_ctx_init(gopts, flags, NULL))
err("context init failed");
- if (posix_memalign(&area, page_size, page_size))
+ if (posix_memalign(&area, gopts->page_size, gopts->page_size))
err("out of memory");
zeropage = area;
- bzero(zeropage, page_size);
+ bzero(zeropage, gopts->page_size);
pthread_mutex_lock(&uffd_read_mutex);
@@ -284,18 +301,18 @@ static int userfaultfd_stress(void)
fflush(stdout);
if (bounces & BOUNCE_POLL)
- fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+ fcntl(gopts->uffd, F_SETFL, gopts->uffd_flags | O_NONBLOCK);
else
- fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK);
+ fcntl(gopts->uffd, F_SETFL, gopts->uffd_flags & ~O_NONBLOCK);
/* register */
- if (uffd_register(uffd, area_dst, mem_size,
- true, test_uffdio_wp, false))
+ if (uffd_register(gopts->uffd, gopts->area_dst, mem_size,
+ true, gopts->test_uffdio_wp, false))
err("register failure");
- if (area_dst_alias) {
- if (uffd_register(uffd, area_dst_alias, mem_size,
- true, test_uffdio_wp, false))
+ if (gopts->area_dst_alias) {
+ if (uffd_register(gopts->uffd, gopts->area_dst_alias, mem_size,
+ true, gopts->test_uffdio_wp, false))
err("register failure alias");
}
@@ -323,87 +340,88 @@ static int userfaultfd_stress(void)
* MADV_DONTNEED only after the UFFDIO_REGISTER, so it's
* required to MADV_DONTNEED here.
*/
- uffd_test_ops->release_pages(area_dst);
+ uffd_test_ops->release_pages(gopts, gopts->area_dst);
- uffd_stats_reset(args, nr_parallel);
+ uffd_stats_reset(gopts, args, gopts->nr_parallel);
/* bounce pass */
if (stress(args)) {
- uffd_test_ctx_clear();
+ uffd_test_ctx_clear(gopts);
return 1;
}
/* Clear all the write protections if there is any */
- if (test_uffdio_wp)
- wp_range(uffd, (unsigned long)area_dst,
- nr_pages * page_size, false);
+ if (gopts->test_uffdio_wp)
+ wp_range(gopts->uffd, (unsigned long)gopts->area_dst,
+ gopts->nr_pages * gopts->page_size, false);
/* unregister */
- if (uffd_unregister(uffd, area_dst, mem_size))
+ if (uffd_unregister(gopts->uffd, gopts->area_dst, mem_size))
err("unregister failure");
- if (area_dst_alias) {
- if (uffd_unregister(uffd, area_dst_alias, mem_size))
+ if (gopts->area_dst_alias) {
+ if (uffd_unregister(gopts->uffd, gopts->area_dst_alias, mem_size))
err("unregister failure alias");
}
/* verification */
if (bounces & BOUNCE_VERIFY)
- for (nr = 0; nr < nr_pages; nr++)
- if (*area_count(area_dst, nr) != count_verify[nr])
+ for (nr = 0; nr < gopts->nr_pages; nr++)
+ if (*area_count(gopts->area_dst, nr, gopts) !=
+ gopts->count_verify[nr])
err("error area_count %llu %llu %lu\n",
- *area_count(area_src, nr),
- count_verify[nr], nr);
+ *area_count(gopts->area_src, nr, gopts),
+ gopts->count_verify[nr], nr);
/* prepare next bounce */
- swap(area_src, area_dst);
+ swap(gopts->area_src, gopts->area_dst);
- swap(area_src_alias, area_dst_alias);
+ swap(gopts->area_src_alias, gopts->area_dst_alias);
- uffd_stats_report(args, nr_parallel);
+ uffd_stats_report(args, gopts->nr_parallel);
}
- uffd_test_ctx_clear();
+ uffd_test_ctx_clear(gopts);
return 0;
}
-static void set_test_type(const char *type)
+static void set_test_type(uffd_global_test_opts_t *gopts, const char *type)
{
if (!strcmp(type, "anon")) {
- test_type = TEST_ANON;
+ gopts->test_type = TEST_ANON;
uffd_test_ops = &anon_uffd_test_ops;
} else if (!strcmp(type, "hugetlb")) {
- test_type = TEST_HUGETLB;
+ gopts->test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
- map_shared = true;
+ gopts->map_shared = true;
} else if (!strcmp(type, "hugetlb-private")) {
- test_type = TEST_HUGETLB;
+ gopts->test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
} else if (!strcmp(type, "shmem")) {
- map_shared = true;
- test_type = TEST_SHMEM;
+ gopts->map_shared = true;
+ gopts->test_type = TEST_SHMEM;
uffd_test_ops = &shmem_uffd_test_ops;
} else if (!strcmp(type, "shmem-private")) {
- test_type = TEST_SHMEM;
+ gopts->test_type = TEST_SHMEM;
uffd_test_ops = &shmem_uffd_test_ops;
}
}
-static void parse_test_type_arg(const char *raw_type)
+static void parse_test_type_arg(uffd_global_test_opts_t *gopts, const char *raw_type)
{
- set_test_type(raw_type);
+ set_test_type(gopts, raw_type);
- if (!test_type)
+ if (!gopts->test_type)
err("failed to parse test type argument: '%s'", raw_type);
- if (test_type == TEST_HUGETLB)
- page_size = default_huge_page_size();
+ if (gopts->test_type == TEST_HUGETLB)
+ gopts->page_size = default_huge_page_size();
else
- page_size = sysconf(_SC_PAGE_SIZE);
+ gopts->page_size = sysconf(_SC_PAGE_SIZE);
- if (!page_size)
+ if (!gopts->page_size)
err("Unable to determine page size");
- if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2
- > page_size)
+ if ((unsigned long) area_count(NULL, 0, gopts) + sizeof(unsigned long long) * 2
+ > gopts->page_size)
err("Impossible to run this test");
/*
@@ -415,21 +433,21 @@ static void parse_test_type_arg(const char *raw_type)
if (uffd_get_features(&features) && errno == ENOENT)
ksft_exit_skip("failed to get available features (%d)\n", errno);
- test_uffdio_wp = test_uffdio_wp &&
+ gopts->test_uffdio_wp = gopts->test_uffdio_wp &&
(features & UFFD_FEATURE_PAGEFAULT_FLAG_WP);
- if (test_type != TEST_ANON && !(features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM))
- test_uffdio_wp = false;
+ if (gopts->test_type != TEST_ANON && !(features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM))
+ gopts->test_uffdio_wp = false;
- close(uffd);
- uffd = -1;
+ close(gopts->uffd);
+ gopts->uffd = -1;
}
static void sigalrm(int sig)
{
if (sig != SIGALRM)
abort();
- test_uffdio_copy_eexist = true;
+ gopts->test_uffdio_copy_eexist = true;
alarm(ALARM_INTERVAL_SECS);
}
@@ -438,6 +456,8 @@ int main(int argc, char **argv)
unsigned long nr_cpus;
size_t bytes;
+ gopts = (uffd_global_test_opts_t *) malloc(sizeof(uffd_global_test_opts_t));
+
if (argc < 4)
usage();
@@ -445,29 +465,34 @@ int main(int argc, char **argv)
err("failed to arm SIGALRM");
alarm(ALARM_INTERVAL_SECS);
- parse_test_type_arg(argv[1]);
+ parse_test_type_arg(gopts, argv[1]);
bytes = atol(argv[2]) * 1024 * 1024;
- if (test_type == TEST_HUGETLB &&
- get_free_hugepages() < bytes / page_size) {
- printf("skip: Skipping userfaultfd... not enough hugepages\n");
- return KSFT_SKIP;
- }
-
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (nr_cpus > 32) {
/* Don't let calculation below go to zero. */
ksft_print_msg("_SC_NPROCESSORS_ONLN (%lu) too large, capping nr_threads to 32\n",
nr_cpus);
- nr_parallel = 32;
+ gopts->nr_parallel = 32;
} else {
- nr_parallel = nr_cpus;
+ gopts->nr_parallel = nr_cpus;
+ }
+
+ /*
+ * src and dst each require bytes / page_size number of hugepages.
+ * Ensure nr_parallel - 1 hugepages on top of that to account
+ * for racy extra reservation of hugepages.
+ */
+ if (gopts->test_type == TEST_HUGETLB &&
+ get_free_hugepages() < 2 * (bytes / gopts->page_size) + gopts->nr_parallel - 1) {
+ printf("skip: Skipping userfaultfd... not enough hugepages\n");
+ return KSFT_SKIP;
}
- nr_pages_per_cpu = bytes / page_size / nr_parallel;
- if (!nr_pages_per_cpu) {
+ gopts->nr_pages_per_cpu = bytes / gopts->page_size / gopts->nr_parallel;
+ if (!gopts->nr_pages_per_cpu) {
_err("pages_per_cpu = 0, cannot test (%lu / %lu / %lu)",
- bytes, page_size, nr_parallel);
+ bytes, gopts->page_size, gopts->nr_parallel);
usage();
}
@@ -476,11 +501,11 @@ int main(int argc, char **argv)
_err("invalid bounces");
usage();
}
- nr_pages = nr_pages_per_cpu * nr_parallel;
+ gopts->nr_pages = gopts->nr_pages_per_cpu * gopts->nr_parallel;
printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
- nr_pages, nr_pages_per_cpu);
- return userfaultfd_stress();
+ gopts->nr_pages, gopts->nr_pages_per_cpu);
+ return userfaultfd_stress(gopts);
}
#else /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index c73fd5d455c8..f4807242c5b2 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -76,7 +76,7 @@ struct uffd_test_args {
typedef struct uffd_test_args uffd_test_args_t;
/* Returns: UFFD_TEST_* */
-typedef void (*uffd_test_fn)(uffd_test_args_t *);
+typedef void (*uffd_test_fn)(uffd_global_test_opts_t *, uffd_test_args_t *);
typedef struct {
const char *name;
@@ -181,33 +181,6 @@ out:
return 1;
}
-/*
- * This function initializes the global variables. TODO: remove global
- * vars and then remove this.
- */
-static int
-uffd_setup_environment(uffd_test_args_t *args, uffd_test_case_t *test,
- mem_type_t *mem_type, const char **errmsg)
-{
- map_shared = mem_type->shared;
- uffd_test_ops = mem_type->mem_ops;
- uffd_test_case_ops = test->test_case_ops;
-
- if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))
- page_size = default_huge_page_size();
- else
- page_size = psize();
-
- /* Ensure we have at least 2 pages */
- nr_pages = MAX(UFFD_TEST_MEM_SIZE, page_size * 2) / page_size;
- /* TODO: remove this global var.. it's so ugly */
- nr_parallel = 1;
-
- /* Initialize test arguments */
- args->mem_type = mem_type;
-
- return uffd_test_ctx_init(test->uffd_feature_required, errmsg);
-}
static bool uffd_feature_supported(uffd_test_case_t *test)
{
@@ -237,7 +210,8 @@ static int pagemap_open(void)
} while (0)
typedef struct {
- int parent_uffd, child_uffd;
+ uffd_global_test_opts_t *gopts;
+ int child_uffd;
} fork_event_args;
static void *fork_event_consumer(void *data)
@@ -245,10 +219,10 @@ static void *fork_event_consumer(void *data)
fork_event_args *args = data;
struct uffd_msg msg = { 0 };
- ready_for_fork = true;
+ args->gopts->ready_for_fork = true;
/* Read until a full msg received */
- while (uffd_read_msg(args->parent_uffd, &msg));
+ while (uffd_read_msg(args->gopts, &msg));
if (msg.event != UFFD_EVENT_FORK)
err("wrong message: %u\n", msg.event);
@@ -304,9 +278,9 @@ static void unpin_pages(pin_args *args)
args->pinned = false;
}
-static int pagemap_test_fork(int uffd, bool with_event, bool test_pin)
+static int pagemap_test_fork(uffd_global_test_opts_t *gopts, bool with_event, bool test_pin)
{
- fork_event_args args = { .parent_uffd = uffd, .child_uffd = -1 };
+ fork_event_args args = { .gopts = gopts, .child_uffd = -1 };
pthread_t thread;
pid_t child;
uint64_t value;
@@ -314,10 +288,10 @@ static int pagemap_test_fork(int uffd, bool with_event, bool test_pin)
/* Prepare a thread to resolve EVENT_FORK */
if (with_event) {
- ready_for_fork = false;
+ gopts->ready_for_fork = false;
if (pthread_create(&thread, NULL, fork_event_consumer, &args))
err("pthread_create()");
- while (!ready_for_fork)
+ while (!gopts->ready_for_fork)
; /* Wait for the poll_thread to start executing before forking */
}
@@ -328,14 +302,14 @@ static int pagemap_test_fork(int uffd, bool with_event, bool test_pin)
fd = pagemap_open();
- if (test_pin && pin_pages(&args, area_dst, page_size))
+ if (test_pin && pin_pages(&args, gopts->area_dst, gopts->page_size))
/*
* Normally when reach here we have pinned in
* previous tests, so shouldn't fail anymore
*/
err("pin page failed in child");
- value = pagemap_get_entry(fd, area_dst);
+ value = pagemap_get_entry(fd, gopts->area_dst);
/*
* After fork(), we should handle uffd-wp bit differently:
*
@@ -361,70 +335,70 @@ static int pagemap_test_fork(int uffd, bool with_event, bool test_pin)
return result;
}
-static void uffd_wp_unpopulated_test(uffd_test_args_t *args)
+static void uffd_wp_unpopulated_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
uint64_t value;
int pagemap_fd;
- if (uffd_register(uffd, area_dst, nr_pages * page_size,
+ if (uffd_register(gopts->uffd, gopts->area_dst, gopts->nr_pages * gopts->page_size,
false, true, false))
err("register failed");
pagemap_fd = pagemap_open();
/* Test applying pte marker to anon unpopulated */
- wp_range(uffd, (uint64_t)area_dst, page_size, true);
- value = pagemap_get_entry(pagemap_fd, area_dst);
+ wp_range(gopts->uffd, (uint64_t)gopts->area_dst, gopts->page_size, true);
+ value = pagemap_get_entry(pagemap_fd, gopts->area_dst);
pagemap_check_wp(value, true);
/* Test unprotect on anon pte marker */
- wp_range(uffd, (uint64_t)area_dst, page_size, false);
- value = pagemap_get_entry(pagemap_fd, area_dst);
+ wp_range(gopts->uffd, (uint64_t)gopts->area_dst, gopts->page_size, false);
+ value = pagemap_get_entry(pagemap_fd, gopts->area_dst);
pagemap_check_wp(value, false);
/* Test zap on anon marker */
- wp_range(uffd, (uint64_t)area_dst, page_size, true);
- if (madvise(area_dst, page_size, MADV_DONTNEED))
+ wp_range(gopts->uffd, (uint64_t)gopts->area_dst, gopts->page_size, true);
+ if (madvise(gopts->area_dst, gopts->page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
- value = pagemap_get_entry(pagemap_fd, area_dst);
+ value = pagemap_get_entry(pagemap_fd, gopts->area_dst);
pagemap_check_wp(value, false);
/* Test fault in after marker removed */
- *area_dst = 1;
- value = pagemap_get_entry(pagemap_fd, area_dst);
+ *gopts->area_dst = 1;
+ value = pagemap_get_entry(pagemap_fd, gopts->area_dst);
pagemap_check_wp(value, false);
/* Drop it to make pte none again */
- if (madvise(area_dst, page_size, MADV_DONTNEED))
+ if (madvise(gopts->area_dst, gopts->page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
/* Test read-zero-page upon pte marker */
- wp_range(uffd, (uint64_t)area_dst, page_size, true);
- *(volatile char *)area_dst;
+ wp_range(gopts->uffd, (uint64_t)gopts->area_dst, gopts->page_size, true);
+ *(volatile char *)gopts->area_dst;
/* Drop it to make pte none again */
- if (madvise(area_dst, page_size, MADV_DONTNEED))
+ if (madvise(gopts->area_dst, gopts->page_size, MADV_DONTNEED))
err("madvise(MADV_DONTNEED) failed");
uffd_test_pass();
}
-static void uffd_wp_fork_test_common(uffd_test_args_t *args,
+static void uffd_wp_fork_test_common(uffd_global_test_opts_t *gopts, uffd_test_args_t *args,
bool with_event)
{
int pagemap_fd;
uint64_t value;
- if (uffd_register(uffd, area_dst, nr_pages * page_size,
+ if (uffd_register(gopts->uffd, gopts->area_dst, gopts->nr_pages * gopts->page_size,
false, true, false))
err("register failed");
pagemap_fd = pagemap_open();
/* Touch the page */
- *area_dst = 1;
- wp_range(uffd, (uint64_t)area_dst, page_size, true);
- value = pagemap_get_entry(pagemap_fd, area_dst);
+ *gopts->area_dst = 1;
+ wp_range(gopts->uffd, (uint64_t)gopts->area_dst, gopts->page_size, true);
+ value = pagemap_get_entry(pagemap_fd, gopts->area_dst);
pagemap_check_wp(value, true);
- if (pagemap_test_fork(uffd, with_event, false)) {
+ if (pagemap_test_fork(gopts, with_event, false)) {
uffd_test_fail("Detected %s uffd-wp bit in child in present pte",
with_event ? "missing" : "stall");
goto out;
@@ -442,79 +416,80 @@ static void uffd_wp_fork_test_common(uffd_test_args_t *args,
* to expose pte markers.
*/
if (args->mem_type->shared) {
- if (madvise(area_dst, page_size, MADV_DONTNEED))
+ if (madvise(gopts->area_dst, gopts->page_size, MADV_DONTNEED))
err("MADV_DONTNEED");
} else {
/*
* NOTE: ignore retval because private-hugetlb doesn't yet
* support swapping, so it could fail.
*/
- madvise(area_dst, page_size, MADV_PAGEOUT);
+ madvise(gopts->area_dst, gopts->page_size, MADV_PAGEOUT);
}
/* Uffd-wp should persist even swapped out */
- value = pagemap_get_entry(pagemap_fd, area_dst);
+ value = pagemap_get_entry(pagemap_fd, gopts->area_dst);
pagemap_check_wp(value, true);
- if (pagemap_test_fork(uffd, with_event, false)) {
+ if (pagemap_test_fork(gopts, with_event, false)) {
uffd_test_fail("Detected %s uffd-wp bit in child in zapped pte",
with_event ? "missing" : "stall");
goto out;
}
/* Unprotect; this tests swap pte modifications */
- wp_range(uffd, (uint64_t)area_dst, page_size, false);
- value = pagemap_get_entry(pagemap_fd, area_dst);
+ wp_range(gopts->uffd, (uint64_t)gopts->area_dst, gopts->page_size, false);
+ value = pagemap_get_entry(pagemap_fd, gopts->area_dst);
pagemap_check_wp(value, false);
/* Fault in the page from disk */
- *area_dst = 2;
- value = pagemap_get_entry(pagemap_fd, area_dst);
+ *gopts->area_dst = 2;
+ value = pagemap_get_entry(pagemap_fd, gopts->area_dst);
pagemap_check_wp(value, false);
uffd_test_pass();
out:
- if (uffd_unregister(uffd, area_dst, nr_pages * page_size))
+ if (uffd_unregister(gopts->uffd, gopts->area_dst, gopts->nr_pages * gopts->page_size))
err("unregister failed");
close(pagemap_fd);
}
-static void uffd_wp_fork_test(uffd_test_args_t *args)
+static void uffd_wp_fork_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_wp_fork_test_common(args, false);
+ uffd_wp_fork_test_common(gopts, args, false);
}
-static void uffd_wp_fork_with_event_test(uffd_test_args_t *args)
+static void uffd_wp_fork_with_event_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_wp_fork_test_common(args, true);
+ uffd_wp_fork_test_common(gopts, args, true);
}
-static void uffd_wp_fork_pin_test_common(uffd_test_args_t *args,
+static void uffd_wp_fork_pin_test_common(uffd_global_test_opts_t *gopts,
+ uffd_test_args_t *args,
bool with_event)
{
int pagemap_fd;
pin_args pin_args = {};
- if (uffd_register(uffd, area_dst, page_size, false, true, false))
+ if (uffd_register(gopts->uffd, gopts->area_dst, gopts->page_size, false, true, false))
err("register failed");
pagemap_fd = pagemap_open();
/* Touch the page */
- *area_dst = 1;
- wp_range(uffd, (uint64_t)area_dst, page_size, true);
+ *gopts->area_dst = 1;
+ wp_range(gopts->uffd, (uint64_t)gopts->area_dst, gopts->page_size, true);
/*
* 1. First pin, then fork(). This tests fork() special path when
* doing early CoW if the page is private.
*/
- if (pin_pages(&pin_args, area_dst, page_size)) {
+ if (pin_pages(&pin_args, gopts->area_dst, gopts->page_size)) {
uffd_test_skip("Possibly CONFIG_GUP_TEST missing "
"or unprivileged");
close(pagemap_fd);
- uffd_unregister(uffd, area_dst, page_size);
+ uffd_unregister(gopts->uffd, gopts->area_dst, gopts->page_size);
return;
}
- if (pagemap_test_fork(uffd, with_event, false)) {
+ if (pagemap_test_fork(gopts, with_event, false)) {
uffd_test_fail("Detected %s uffd-wp bit in early CoW of fork()",
with_event ? "missing" : "stall");
unpin_pages(&pin_args);
@@ -527,49 +502,50 @@ static void uffd_wp_fork_pin_test_common(uffd_test_args_t *args,
* 2. First fork(), then pin (in the child, where test_pin==true).
* This tests COR, aka, page unsharing on private memories.
*/
- if (pagemap_test_fork(uffd, with_event, true)) {
+ if (pagemap_test_fork(gopts, with_event, true)) {
uffd_test_fail("Detected %s uffd-wp bit when RO pin",
with_event ? "missing" : "stall");
goto out;
}
uffd_test_pass();
out:
- if (uffd_unregister(uffd, area_dst, page_size))
+ if (uffd_unregister(gopts->uffd, gopts->area_dst, gopts->page_size))
err("register failed");
close(pagemap_fd);
}
-static void uffd_wp_fork_pin_test(uffd_test_args_t *args)
+static void uffd_wp_fork_pin_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_wp_fork_pin_test_common(args, false);
+ uffd_wp_fork_pin_test_common(gopts, args, false);
}
-static void uffd_wp_fork_pin_with_event_test(uffd_test_args_t *args)
+static void uffd_wp_fork_pin_with_event_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_wp_fork_pin_test_common(args, true);
+ uffd_wp_fork_pin_test_common(gopts, args, true);
}
-static void check_memory_contents(char *p)
+static void check_memory_contents(uffd_global_test_opts_t *gopts, char *p)
{
unsigned long i, j;
uint8_t expected_byte;
- for (i = 0; i < nr_pages; ++i) {
+ for (i = 0; i < gopts->nr_pages; ++i) {
expected_byte = ~((uint8_t)(i % ((uint8_t)-1)));
- for (j = 0; j < page_size; j++) {
- uint8_t v = *(uint8_t *)(p + (i * page_size) + j);
+ for (j = 0; j < gopts->page_size; j++) {
+ uint8_t v = *(uint8_t *)(p + (i * gopts->page_size) + j);
if (v != expected_byte)
err("unexpected page contents");
}
}
}
-static void uffd_minor_test_common(bool test_collapse, bool test_wp)
+static void uffd_minor_test_common(uffd_global_test_opts_t *gopts, bool test_collapse, bool test_wp)
{
unsigned long p;
pthread_t uffd_mon;
- char c;
+ char c = '\0';
struct uffd_args args = { 0 };
+ args.gopts = gopts;
/*
* NOTE: MADV_COLLAPSE is not yet compatible with WP, so testing
@@ -577,7 +553,7 @@ static void uffd_minor_test_common(bool test_collapse, bool test_wp)
*/
assert(!(test_collapse && test_wp));
- if (uffd_register(uffd, area_dst_alias, nr_pages * page_size,
+ if (uffd_register(gopts->uffd, gopts->area_dst_alias, gopts->nr_pages * gopts->page_size,
/* NOTE! MADV_COLLAPSE may not work with uffd-wp */
false, test_wp, true))
err("register failure");
@@ -586,9 +562,9 @@ static void uffd_minor_test_common(bool test_collapse, bool test_wp)
* After registering with UFFD, populate the non-UFFD-registered side of
* the shared mapping. This should *not* trigger any UFFD minor faults.
*/
- for (p = 0; p < nr_pages; ++p)
- memset(area_dst + (p * page_size), p % ((uint8_t)-1),
- page_size);
+ for (p = 0; p < gopts->nr_pages; ++p)
+ memset(gopts->area_dst + (p * gopts->page_size), p % ((uint8_t)-1),
+ gopts->page_size);
args.apply_wp = test_wp;
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
@@ -600,50 +576,51 @@ static void uffd_minor_test_common(bool test_collapse, bool test_wp)
* fault. uffd_poll_thread will resolve the fault by bit-flipping the
* page's contents, and then issuing a CONTINUE ioctl.
*/
- check_memory_contents(area_dst_alias);
+ check_memory_contents(gopts, gopts->area_dst_alias);
- if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ if (write(gopts->pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, NULL))
err("join() failed");
if (test_collapse) {
- if (madvise(area_dst_alias, nr_pages * page_size,
+ if (madvise(gopts->area_dst_alias, gopts->nr_pages * gopts->page_size,
MADV_COLLAPSE)) {
/* It's fine to fail for this one... */
uffd_test_skip("MADV_COLLAPSE failed");
return;
}
- uffd_test_ops->check_pmd_mapping(area_dst,
- nr_pages * page_size /
+ uffd_test_ops->check_pmd_mapping(gopts,
+ gopts->area_dst,
+ gopts->nr_pages * gopts->page_size /
read_pmd_pagesize());
/*
* This won't cause uffd-fault - it purely just makes sure there
* was no corruption.
*/
- check_memory_contents(area_dst_alias);
+ check_memory_contents(gopts, gopts->area_dst_alias);
}
- if (args.missing_faults != 0 || args.minor_faults != nr_pages)
+ if (args.missing_faults != 0 || args.minor_faults != gopts->nr_pages)
uffd_test_fail("stats check error");
else
uffd_test_pass();
}
-void uffd_minor_test(uffd_test_args_t *args)
+void uffd_minor_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_minor_test_common(false, false);
+ uffd_minor_test_common(gopts, false, false);
}
-void uffd_minor_wp_test(uffd_test_args_t *args)
+void uffd_minor_wp_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_minor_test_common(false, true);
+ uffd_minor_test_common(gopts, false, true);
}
-void uffd_minor_collapse_test(uffd_test_args_t *args)
+void uffd_minor_collapse_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_minor_test_common(true, false);
+ uffd_minor_test_common(gopts, true, false);
}
static sigjmp_buf jbuf, *sigbuf;
@@ -678,7 +655,7 @@ static void sighndl(int sig, siginfo_t *siginfo, void *ptr)
* This also tests UFFD_FEATURE_EVENT_FORK event along with the signal
* feature. Using monitor thread, verify no userfault events are generated.
*/
-static int faulting_process(int signal_test, bool wp)
+static int faulting_process(uffd_global_test_opts_t *gopts, int signal_test, bool wp)
{
unsigned long nr, i;
unsigned long long count;
@@ -687,7 +664,7 @@ static int faulting_process(int signal_test, bool wp)
struct sigaction act;
volatile unsigned long signalled = 0;
- split_nr_pages = (nr_pages + 1) / 2;
+ split_nr_pages = (gopts->nr_pages + 1) / 2;
if (signal_test) {
sigbuf = &jbuf;
@@ -701,7 +678,7 @@ static int faulting_process(int signal_test, bool wp)
for (nr = 0; nr < split_nr_pages; nr++) {
volatile int steps = 1;
- unsigned long offset = nr * page_size;
+ unsigned long offset = nr * gopts->page_size;
if (signal_test) {
if (sigsetjmp(*sigbuf, 1) != 0) {
@@ -713,15 +690,15 @@ static int faulting_process(int signal_test, bool wp)
if (steps == 1) {
/* This is a MISSING request */
steps++;
- if (copy_page(uffd, offset, wp))
+ if (copy_page(gopts, offset, wp))
signalled++;
} else {
/* This is a WP request */
assert(steps == 2);
- wp_range(uffd,
- (__u64)area_dst +
+ wp_range(gopts->uffd,
+ (__u64)gopts->area_dst +
offset,
- page_size, false);
+ gopts->page_size, false);
}
} else {
signalled++;
@@ -730,77 +707,80 @@ static int faulting_process(int signal_test, bool wp)
}
}
- count = *area_count(area_dst, nr);
- if (count != count_verify[nr])
+ count = *area_count(gopts->area_dst, nr, gopts);
+ if (count != gopts->count_verify[nr])
err("nr %lu memory corruption %llu %llu\n",
- nr, count, count_verify[nr]);
+ nr, count, gopts->count_verify[nr]);
/*
* Trigger write protection if there is by writing
* the same value back.
*/
- *area_count(area_dst, nr) = count;
+ *area_count(gopts->area_dst, nr, gopts) = count;
}
if (signal_test)
return signalled != split_nr_pages;
- area_dst = mremap(area_dst, nr_pages * page_size, nr_pages * page_size,
- MREMAP_MAYMOVE | MREMAP_FIXED, area_src);
- if (area_dst == MAP_FAILED)
+ gopts->area_dst = mremap(gopts->area_dst, gopts->nr_pages * gopts->page_size,
+ gopts->nr_pages * gopts->page_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED,
+ gopts->area_src);
+ if (gopts->area_dst == MAP_FAILED)
err("mremap");
/* Reset area_src since we just clobbered it */
- area_src = NULL;
+ gopts->area_src = NULL;
- for (; nr < nr_pages; nr++) {
- count = *area_count(area_dst, nr);
- if (count != count_verify[nr]) {
+ for (; nr < gopts->nr_pages; nr++) {
+ count = *area_count(gopts->area_dst, nr, gopts);
+ if (count != gopts->count_verify[nr]) {
err("nr %lu memory corruption %llu %llu\n",
- nr, count, count_verify[nr]);
+ nr, count, gopts->count_verify[nr]);
}
/*
* Trigger write protection if there is by writing
* the same value back.
*/
- *area_count(area_dst, nr) = count;
+ *area_count(gopts->area_dst, nr, gopts) = count;
}
- uffd_test_ops->release_pages(area_dst);
+ uffd_test_ops->release_pages(gopts, gopts->area_dst);
- for (nr = 0; nr < nr_pages; nr++)
- for (i = 0; i < page_size; i++)
- if (*(area_dst + nr * page_size + i) != 0)
+ for (nr = 0; nr < gopts->nr_pages; nr++)
+ for (i = 0; i < gopts->page_size; i++)
+ if (*(gopts->area_dst + nr * gopts->page_size + i) != 0)
err("page %lu offset %lu is not zero", nr, i);
return 0;
}
-static void uffd_sigbus_test_common(bool wp)
+static void uffd_sigbus_test_common(uffd_global_test_opts_t *gopts, bool wp)
{
unsigned long userfaults;
pthread_t uffd_mon;
pid_t pid;
int err;
- char c;
+ char c = '\0';
struct uffd_args args = { 0 };
+ args.gopts = gopts;
- ready_for_fork = false;
+ gopts->ready_for_fork = false;
- fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+ fcntl(gopts->uffd, F_SETFL, gopts->uffd_flags | O_NONBLOCK);
- if (uffd_register(uffd, area_dst, nr_pages * page_size,
+ if (uffd_register(gopts->uffd, gopts->area_dst, gopts->nr_pages * gopts->page_size,
true, wp, false))
err("register failure");
- if (faulting_process(1, wp))
+ if (faulting_process(gopts, 1, wp))
err("faulting process failed");
- uffd_test_ops->release_pages(area_dst);
+ uffd_test_ops->release_pages(gopts, gopts->area_dst);
args.apply_wp = wp;
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
- while (!ready_for_fork)
+ while (!gopts->ready_for_fork)
; /* Wait for the poll_thread to start executing before forking */
pid = fork();
@@ -808,12 +788,12 @@ static void uffd_sigbus_test_common(bool wp)
err("fork");
if (!pid)
- exit(faulting_process(2, wp));
+ exit(faulting_process(gopts, 2, wp));
waitpid(pid, &err, 0);
if (err)
err("faulting process failed");
- if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ if (write(gopts->pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, (void **)&userfaults))
err("pthread_join()");
@@ -824,28 +804,29 @@ static void uffd_sigbus_test_common(bool wp)
uffd_test_pass();
}
-static void uffd_sigbus_test(uffd_test_args_t *args)
+static void uffd_sigbus_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_sigbus_test_common(false);
+ uffd_sigbus_test_common(gopts, false);
}
-static void uffd_sigbus_wp_test(uffd_test_args_t *args)
+static void uffd_sigbus_wp_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_sigbus_test_common(true);
+ uffd_sigbus_test_common(gopts, true);
}
-static void uffd_events_test_common(bool wp)
+static void uffd_events_test_common(uffd_global_test_opts_t *gopts, bool wp)
{
pthread_t uffd_mon;
pid_t pid;
int err;
- char c;
+ char c = '\0';
struct uffd_args args = { 0 };
+ args.gopts = gopts;
- ready_for_fork = false;
+ gopts->ready_for_fork = false;
- fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
- if (uffd_register(uffd, area_dst, nr_pages * page_size,
+ fcntl(gopts->uffd, F_SETFL, gopts->uffd_flags | O_NONBLOCK);
+ if (uffd_register(gopts->uffd, gopts->area_dst, gopts->nr_pages * gopts->page_size,
true, wp, false))
err("register failure");
@@ -853,7 +834,7 @@ static void uffd_events_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
- while (!ready_for_fork)
+ while (!gopts->ready_for_fork)
; /* Wait for the poll_thread to start executing before forking */
pid = fork();
@@ -861,39 +842,39 @@ static void uffd_events_test_common(bool wp)
err("fork");
if (!pid)
- exit(faulting_process(0, wp));
+ exit(faulting_process(gopts, 0, wp));
waitpid(pid, &err, 0);
if (err)
err("faulting process failed");
- if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ if (write(gopts->pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, NULL))
err("pthread_join()");
- if (args.missing_faults != nr_pages)
+ if (args.missing_faults != gopts->nr_pages)
uffd_test_fail("Fault counts wrong");
else
uffd_test_pass();
}
-static void uffd_events_test(uffd_test_args_t *args)
+static void uffd_events_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_events_test_common(false);
+ uffd_events_test_common(gopts, false);
}
-static void uffd_events_wp_test(uffd_test_args_t *args)
+static void uffd_events_wp_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
- uffd_events_test_common(true);
+ uffd_events_test_common(gopts, true);
}
-static void retry_uffdio_zeropage(int ufd,
+static void retry_uffdio_zeropage(uffd_global_test_opts_t *gopts,
struct uffdio_zeropage *uffdio_zeropage)
{
- uffd_test_ops->alias_mapping(&uffdio_zeropage->range.start,
+ uffd_test_ops->alias_mapping(gopts, &uffdio_zeropage->range.start,
uffdio_zeropage->range.len,
0);
- if (ioctl(ufd, UFFDIO_ZEROPAGE, uffdio_zeropage)) {
+ if (ioctl(gopts->uffd, UFFDIO_ZEROPAGE, uffdio_zeropage)) {
if (uffdio_zeropage->zeropage != -EEXIST)
err("UFFDIO_ZEROPAGE error: %"PRId64,
(int64_t)uffdio_zeropage->zeropage);
@@ -903,16 +884,16 @@ static void retry_uffdio_zeropage(int ufd,
}
}
-static bool do_uffdio_zeropage(int ufd, bool has_zeropage)
+static bool do_uffdio_zeropage(uffd_global_test_opts_t *gopts, bool has_zeropage)
{
struct uffdio_zeropage uffdio_zeropage = { 0 };
int ret;
__s64 res;
- uffdio_zeropage.range.start = (unsigned long) area_dst;
- uffdio_zeropage.range.len = page_size;
+ uffdio_zeropage.range.start = (unsigned long) gopts->area_dst;
+ uffdio_zeropage.range.len = gopts->page_size;
uffdio_zeropage.mode = 0;
- ret = ioctl(ufd, UFFDIO_ZEROPAGE, &uffdio_zeropage);
+ ret = ioctl(gopts->uffd, UFFDIO_ZEROPAGE, &uffdio_zeropage);
res = uffdio_zeropage.zeropage;
if (ret) {
/* real retval in ufdio_zeropage.zeropage */
@@ -921,10 +902,10 @@ static bool do_uffdio_zeropage(int ufd, bool has_zeropage)
else if (res != -EINVAL)
err("UFFDIO_ZEROPAGE not -EINVAL");
} else if (has_zeropage) {
- if (res != page_size)
+ if (res != gopts->page_size)
err("UFFDIO_ZEROPAGE unexpected size");
else
- retry_uffdio_zeropage(ufd, &uffdio_zeropage);
+ retry_uffdio_zeropage(gopts, &uffdio_zeropage);
return true;
} else
err("UFFDIO_ZEROPAGE succeeded");
@@ -950,25 +931,29 @@ uffd_register_detect_zeropage(int uffd, void *addr, uint64_t len)
}
/* exercise UFFDIO_ZEROPAGE */
-static void uffd_zeropage_test(uffd_test_args_t *args)
+static void uffd_zeropage_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
bool has_zeropage;
int i;
- has_zeropage = uffd_register_detect_zeropage(uffd, area_dst, page_size);
- if (area_dst_alias)
+ has_zeropage = uffd_register_detect_zeropage(gopts->uffd,
+ gopts->area_dst,
+ gopts->page_size);
+ if (gopts->area_dst_alias)
/* Ignore the retval; we already have it */
- uffd_register_detect_zeropage(uffd, area_dst_alias, page_size);
+ uffd_register_detect_zeropage(gopts->uffd, gopts->area_dst_alias, gopts->page_size);
- if (do_uffdio_zeropage(uffd, has_zeropage))
- for (i = 0; i < page_size; i++)
- if (area_dst[i] != 0)
+ if (do_uffdio_zeropage(gopts, has_zeropage))
+ for (i = 0; i < gopts->page_size; i++)
+ if (gopts->area_dst[i] != 0)
err("data non-zero at offset %d\n", i);
- if (uffd_unregister(uffd, area_dst, page_size))
+ if (uffd_unregister(gopts->uffd, gopts->area_dst, gopts->page_size))
err("unregister");
- if (area_dst_alias && uffd_unregister(uffd, area_dst_alias, page_size))
+ if (gopts->area_dst_alias && uffd_unregister(gopts->uffd,
+ gopts->area_dst_alias,
+ gopts->page_size))
err("unregister");
uffd_test_pass();
@@ -987,26 +972,27 @@ static void uffd_register_poison(int uffd, void *addr, uint64_t len)
err("registered area doesn't support COPY and POISON ioctls");
}
-static void do_uffdio_poison(int uffd, unsigned long offset)
+static void do_uffdio_poison(uffd_global_test_opts_t *gopts, unsigned long offset)
{
struct uffdio_poison uffdio_poison = { 0 };
int ret;
__s64 res;
- uffdio_poison.range.start = (unsigned long) area_dst + offset;
- uffdio_poison.range.len = page_size;
+ uffdio_poison.range.start = (unsigned long) gopts->area_dst + offset;
+ uffdio_poison.range.len = gopts->page_size;
uffdio_poison.mode = 0;
- ret = ioctl(uffd, UFFDIO_POISON, &uffdio_poison);
+ ret = ioctl(gopts->uffd, UFFDIO_POISON, &uffdio_poison);
res = uffdio_poison.updated;
if (ret)
err("UFFDIO_POISON error: %"PRId64, (int64_t)res);
- else if (res != page_size)
+ else if (res != gopts->page_size)
err("UFFDIO_POISON unexpected size: %"PRId64, (int64_t)res);
}
-static void uffd_poison_handle_fault(
- struct uffd_msg *msg, struct uffd_args *args)
+static void uffd_poison_handle_fault(uffd_global_test_opts_t *gopts,
+ struct uffd_msg *msg,
+ struct uffd_args *args)
{
unsigned long offset;
@@ -1017,29 +1003,39 @@ static void uffd_poison_handle_fault(
(UFFD_PAGEFAULT_FLAG_WP | UFFD_PAGEFAULT_FLAG_MINOR))
err("unexpected fault type %llu", msg->arg.pagefault.flags);
- offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
- offset &= ~(page_size-1);
+ offset = (char *)(unsigned long)msg->arg.pagefault.address - gopts->area_dst;
+ offset &= ~(gopts->page_size-1);
/* Odd pages -> copy zeroed page; even pages -> poison. */
- if (offset & page_size)
- copy_page(uffd, offset, false);
+ if (offset & gopts->page_size)
+ copy_page(gopts, offset, false);
else
- do_uffdio_poison(uffd, offset);
+ do_uffdio_poison(gopts, offset);
}
-static void uffd_poison_test(uffd_test_args_t *targs)
+/* Make sure to cover odd/even, and minimum duplications */
+#define UFFD_POISON_TEST_NPAGES 4
+
+static void uffd_poison_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *targs)
{
pthread_t uffd_mon;
char c;
struct uffd_args args = { 0 };
struct sigaction act = { 0 };
unsigned long nr_sigbus = 0;
- unsigned long nr;
+ unsigned long nr, poison_pages = UFFD_POISON_TEST_NPAGES;
+
+ if (gopts->nr_pages < poison_pages) {
+ uffd_test_skip("Too less pages for POISON test");
+ return;
+ }
- fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+ args.gopts = gopts;
- uffd_register_poison(uffd, area_dst, nr_pages * page_size);
- memset(area_src, 0, nr_pages * page_size);
+ fcntl(gopts->uffd, F_SETFL, gopts->uffd_flags | O_NONBLOCK);
+
+ uffd_register_poison(gopts->uffd, gopts->area_dst, poison_pages * gopts->page_size);
+ memset(gopts->area_src, 0, poison_pages * gopts->page_size);
args.handle_fault = uffd_poison_handle_fault;
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
@@ -1051,9 +1047,9 @@ static void uffd_poison_test(uffd_test_args_t *targs)
if (sigaction(SIGBUS, &act, 0))
err("sigaction");
- for (nr = 0; nr < nr_pages; ++nr) {
- unsigned long offset = nr * page_size;
- const char *bytes = (const char *) area_dst + offset;
+ for (nr = 0; nr < poison_pages; ++nr) {
+ unsigned long offset = nr * gopts->page_size;
+ const char *bytes = (const char *) gopts->area_dst + offset;
const char *i;
if (sigsetjmp(*sigbuf, 1)) {
@@ -1066,27 +1062,29 @@ static void uffd_poison_test(uffd_test_args_t *targs)
continue;
}
- for (i = bytes; i < bytes + page_size; ++i) {
+ for (i = bytes; i < bytes + gopts->page_size; ++i) {
if (*i)
err("nonzero byte in area_dst (%p) at %p: %u",
- area_dst, i, *i);
+ gopts->area_dst, i, *i);
}
}
- if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ if (write(gopts->pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, NULL))
err("pthread_join()");
- if (nr_sigbus != nr_pages / 2)
+ if (nr_sigbus != poison_pages / 2)
err("expected to receive %lu SIGBUS, actually received %lu",
- nr_pages / 2, nr_sigbus);
+ poison_pages / 2, nr_sigbus);
uffd_test_pass();
}
static void
-uffd_move_handle_fault_common(struct uffd_msg *msg, struct uffd_args *args,
+uffd_move_handle_fault_common(uffd_global_test_opts_t *gopts,
+ struct uffd_msg *msg,
+ struct uffd_args *args,
unsigned long len)
{
unsigned long offset;
@@ -1098,32 +1096,36 @@ uffd_move_handle_fault_common(struct uffd_msg *msg, struct uffd_args *args,
(UFFD_PAGEFAULT_FLAG_WP | UFFD_PAGEFAULT_FLAG_MINOR | UFFD_PAGEFAULT_FLAG_WRITE))
err("unexpected fault type %llu", msg->arg.pagefault.flags);
- offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
+ offset = (char *)(unsigned long)msg->arg.pagefault.address - gopts->area_dst;
offset &= ~(len-1);
- if (move_page(uffd, offset, len))
+ if (move_page(gopts, offset, len))
args->missing_faults++;
}
-static void uffd_move_handle_fault(struct uffd_msg *msg,
+static void uffd_move_handle_fault(uffd_global_test_opts_t *gopts, struct uffd_msg *msg,
struct uffd_args *args)
{
- uffd_move_handle_fault_common(msg, args, page_size);
+ uffd_move_handle_fault_common(gopts, msg, args, gopts->page_size);
}
-static void uffd_move_pmd_handle_fault(struct uffd_msg *msg,
+static void uffd_move_pmd_handle_fault(uffd_global_test_opts_t *gopts, struct uffd_msg *msg,
struct uffd_args *args)
{
- uffd_move_handle_fault_common(msg, args, read_pmd_pagesize());
+ uffd_move_handle_fault_common(gopts, msg, args, read_pmd_pagesize());
}
static void
-uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size,
- void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args))
+uffd_move_test_common(uffd_global_test_opts_t *gopts,
+ uffd_test_args_t *targs,
+ unsigned long chunk_size,
+ void (*handle_fault)(struct uffd_global_test_opts *gopts,
+ struct uffd_msg *msg, struct uffd_args *args)
+)
{
unsigned long nr;
pthread_t uffd_mon;
- char c;
+ char c = '\0';
unsigned long long count;
struct uffd_args args = { 0 };
char *orig_area_src = NULL, *orig_area_dst = NULL;
@@ -1131,11 +1133,13 @@ uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size,
unsigned long src_offs = 0;
unsigned long dst_offs = 0;
+ args.gopts = gopts;
+
/* Prevent source pages from being mapped more than once */
- if (madvise(area_src, nr_pages * page_size, MADV_DONTFORK))
+ if (madvise(gopts->area_src, gopts->nr_pages * gopts->page_size, MADV_DONTFORK))
err("madvise(MADV_DONTFORK) failure");
- if (uffd_register(uffd, area_dst, nr_pages * page_size,
+ if (uffd_register(gopts->uffd, gopts->area_dst, gopts->nr_pages * gopts->page_size,
true, false, false))
err("register failure");
@@ -1143,22 +1147,22 @@ uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size,
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
- step_size = chunk_size / page_size;
- step_count = nr_pages / step_size;
+ step_size = chunk_size / gopts->page_size;
+ step_count = gopts->nr_pages / step_size;
- if (chunk_size > page_size) {
- char *aligned_src = ALIGN_UP(area_src, chunk_size);
- char *aligned_dst = ALIGN_UP(area_dst, chunk_size);
+ if (chunk_size > gopts->page_size) {
+ char *aligned_src = ALIGN_UP(gopts->area_src, chunk_size);
+ char *aligned_dst = ALIGN_UP(gopts->area_dst, chunk_size);
- if (aligned_src != area_src || aligned_dst != area_dst) {
- src_offs = (aligned_src - area_src) / page_size;
- dst_offs = (aligned_dst - area_dst) / page_size;
+ if (aligned_src != gopts->area_src || aligned_dst != gopts->area_dst) {
+ src_offs = (aligned_src - gopts->area_src) / gopts->page_size;
+ dst_offs = (aligned_dst - gopts->area_dst) / gopts->page_size;
step_count--;
}
- orig_area_src = area_src;
- orig_area_dst = area_dst;
- area_src = aligned_src;
- area_dst = aligned_dst;
+ orig_area_src = gopts->area_src;
+ orig_area_dst = gopts->area_dst;
+ gopts->area_src = aligned_src;
+ gopts->area_dst = aligned_dst;
}
/*
@@ -1172,34 +1176,34 @@ uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size,
/* Check area_src content */
for (i = 0; i < step_size; i++) {
- count = *area_count(area_src, nr + i);
- if (count != count_verify[src_offs + nr + i])
+ count = *area_count(gopts->area_src, nr + i, gopts);
+ if (count != gopts->count_verify[src_offs + nr + i])
err("nr %lu source memory invalid %llu %llu\n",
- nr + i, count, count_verify[src_offs + nr + i]);
+ nr + i, count, gopts->count_verify[src_offs + nr + i]);
}
/* Faulting into area_dst should move the page or the huge page */
for (i = 0; i < step_size; i++) {
- count = *area_count(area_dst, nr + i);
- if (count != count_verify[dst_offs + nr + i])
+ count = *area_count(gopts->area_dst, nr + i, gopts);
+ if (count != gopts->count_verify[dst_offs + nr + i])
err("nr %lu memory corruption %llu %llu\n",
- nr, count, count_verify[dst_offs + nr + i]);
+ nr, count, gopts->count_verify[dst_offs + nr + i]);
}
/* Re-check area_src content which should be empty */
for (i = 0; i < step_size; i++) {
- count = *area_count(area_src, nr + i);
+ count = *area_count(gopts->area_src, nr + i, gopts);
if (count != 0)
err("nr %lu move failed %llu %llu\n",
- nr, count, count_verify[src_offs + nr + i]);
+ nr, count, gopts->count_verify[src_offs + nr + i]);
}
}
- if (chunk_size > page_size) {
- area_src = orig_area_src;
- area_dst = orig_area_dst;
+ if (chunk_size > gopts->page_size) {
+ gopts->area_src = orig_area_src;
+ gopts->area_dst = orig_area_dst;
}
- if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
+ if (write(gopts->pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
if (pthread_join(uffd_mon, NULL))
err("join() failed");
@@ -1210,24 +1214,24 @@ uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size,
uffd_test_pass();
}
-static void uffd_move_test(uffd_test_args_t *targs)
+static void uffd_move_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *targs)
{
- uffd_move_test_common(targs, page_size, uffd_move_handle_fault);
+ uffd_move_test_common(gopts, targs, gopts->page_size, uffd_move_handle_fault);
}
-static void uffd_move_pmd_test(uffd_test_args_t *targs)
+static void uffd_move_pmd_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *targs)
{
- if (madvise(area_dst, nr_pages * page_size, MADV_HUGEPAGE))
+ if (madvise(gopts->area_dst, gopts->nr_pages * gopts->page_size, MADV_HUGEPAGE))
err("madvise(MADV_HUGEPAGE) failure");
- uffd_move_test_common(targs, read_pmd_pagesize(),
+ uffd_move_test_common(gopts, targs, read_pmd_pagesize(),
uffd_move_pmd_handle_fault);
}
-static void uffd_move_pmd_split_test(uffd_test_args_t *targs)
+static void uffd_move_pmd_split_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *targs)
{
- if (madvise(area_dst, nr_pages * page_size, MADV_NOHUGEPAGE))
+ if (madvise(gopts->area_dst, gopts->nr_pages * gopts->page_size, MADV_NOHUGEPAGE))
err("madvise(MADV_NOHUGEPAGE) failure");
- uffd_move_test_common(targs, read_pmd_pagesize(),
+ uffd_move_test_common(gopts, targs, read_pmd_pagesize(),
uffd_move_pmd_handle_fault);
}
@@ -1287,6 +1291,11 @@ typedef enum {
THR_STATE_UNINTERRUPTIBLE,
} thread_state;
+typedef struct {
+ uffd_global_test_opts_t *gopts;
+ volatile pid_t *pid;
+} mmap_changing_thread_args;
+
static void sleep_short(void)
{
usleep(1000);
@@ -1329,7 +1338,9 @@ static void thread_state_until(pid_t tid, thread_state state)
static void *uffd_mmap_changing_thread(void *opaque)
{
- volatile pid_t *pid = opaque;
+ mmap_changing_thread_args *args = opaque;
+ uffd_global_test_opts_t *gopts = args->gopts;
+ volatile pid_t *pid = args->pid;
int ret;
/* Unfortunately, it's only fetch-able from the thread itself.. */
@@ -1337,21 +1348,21 @@ static void *uffd_mmap_changing_thread(void *opaque)
*pid = syscall(SYS_gettid);
/* Inject an event, this will hang solid until the event read */
- ret = madvise(area_dst, page_size, MADV_REMOVE);
+ ret = madvise(gopts->area_dst, gopts->page_size, MADV_REMOVE);
if (ret)
err("madvise(MADV_REMOVE) failed");
return NULL;
}
-static void uffd_consume_message(int fd)
+static void uffd_consume_message(uffd_global_test_opts_t *gopts)
{
struct uffd_msg msg = { 0 };
- while (uffd_read_msg(fd, &msg));
+ while (uffd_read_msg(gopts, &msg));
}
-static void uffd_mmap_changing_test(uffd_test_args_t *targs)
+static void uffd_mmap_changing_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *targs)
{
/*
* This stores the real PID (which can be different from how tid is
@@ -1360,13 +1371,14 @@ static void uffd_mmap_changing_test(uffd_test_args_t *targs)
pid_t pid = 0;
pthread_t tid;
int ret;
+ mmap_changing_thread_args args = { gopts, &pid };
- if (uffd_register(uffd, area_dst, nr_pages * page_size,
+ if (uffd_register(gopts->uffd, gopts->area_dst, gopts->nr_pages * gopts->page_size,
true, false, false))
err("uffd_register() failed");
/* Create a thread to generate the racy event */
- ret = pthread_create(&tid, NULL, uffd_mmap_changing_thread, &pid);
+ ret = pthread_create(&tid, NULL, uffd_mmap_changing_thread, &args);
if (ret)
err("pthread_create() failed");
@@ -1380,26 +1392,26 @@ static void uffd_mmap_changing_test(uffd_test_args_t *targs)
/* Wait until the thread hangs at REMOVE event */
thread_state_until(pid, THR_STATE_UNINTERRUPTIBLE);
- if (!uffdio_mmap_changing_test_copy(uffd))
+ if (!uffdio_mmap_changing_test_copy(gopts->uffd))
return;
- if (!uffdio_mmap_changing_test_zeropage(uffd))
+ if (!uffdio_mmap_changing_test_zeropage(gopts->uffd))
return;
- if (!uffdio_mmap_changing_test_move(uffd))
+ if (!uffdio_mmap_changing_test_move(gopts->uffd))
return;
- if (!uffdio_mmap_changing_test_poison(uffd))
+ if (!uffdio_mmap_changing_test_poison(gopts->uffd))
return;
- if (!uffdio_mmap_changing_test_continue(uffd))
+ if (!uffdio_mmap_changing_test_continue(gopts->uffd))
return;
/*
* All succeeded above! Recycle everything. Start by reading the
* event so as to kick the thread roll again..
*/
- uffd_consume_message(uffd);
+ uffd_consume_message(gopts);
ret = pthread_join(tid, NULL);
assert(ret == 0);
@@ -1407,10 +1419,10 @@ static void uffd_mmap_changing_test(uffd_test_args_t *targs)
uffd_test_pass();
}
-static int prevent_hugepages(const char **errmsg)
+static int prevent_hugepages(uffd_global_test_opts_t *gopts, const char **errmsg)
{
/* This should be done before source area is populated */
- if (madvise(area_src, nr_pages * page_size, MADV_NOHUGEPAGE)) {
+ if (madvise(gopts->area_src, gopts->nr_pages * gopts->page_size, MADV_NOHUGEPAGE)) {
/* Ignore only if CONFIG_TRANSPARENT_HUGEPAGE=n */
if (errno != EINVAL) {
if (errmsg)
@@ -1421,10 +1433,10 @@ static int prevent_hugepages(const char **errmsg)
return 0;
}
-static int request_hugepages(const char **errmsg)
+static int request_hugepages(uffd_global_test_opts_t *gopts, const char **errmsg)
{
/* This should be done before source area is populated */
- if (madvise(area_src, nr_pages * page_size, MADV_HUGEPAGE)) {
+ if (madvise(gopts->area_src, gopts->nr_pages * gopts->page_size, MADV_HUGEPAGE)) {
if (errmsg) {
*errmsg = (errno == EINVAL) ?
"CONFIG_TRANSPARENT_HUGEPAGE is not set" :
@@ -1448,13 +1460,17 @@ struct uffd_test_case_ops uffd_move_test_pmd_case_ops = {
* Note that _UFFDIO_ZEROPAGE is tested separately in the zeropage test.
*/
static void
-do_register_ioctls_test(uffd_test_args_t *args, bool miss, bool wp, bool minor)
+do_register_ioctls_test(uffd_global_test_opts_t *gopts,
+ uffd_test_args_t *args,
+ bool miss,
+ bool wp,
+ bool minor)
{
uint64_t ioctls = 0, expected = BIT_ULL(_UFFDIO_WAKE);
mem_type_t *mem_type = args->mem_type;
int ret;
- ret = uffd_register_with_ioctls(uffd, area_dst, page_size,
+ ret = uffd_register_with_ioctls(gopts->uffd, gopts->area_dst, gopts->page_size,
miss, wp, minor, &ioctls);
/*
@@ -1485,18 +1501,18 @@ do_register_ioctls_test(uffd_test_args_t *args, bool miss, bool wp, bool minor)
"(miss=%d, wp=%d, minor=%d): expected=0x%"PRIx64", "
"returned=0x%"PRIx64, miss, wp, minor, expected, ioctls);
- if (uffd_unregister(uffd, area_dst, page_size))
+ if (uffd_unregister(gopts->uffd, gopts->area_dst, gopts->page_size))
err("unregister");
}
-static void uffd_register_ioctls_test(uffd_test_args_t *args)
+static void uffd_register_ioctls_test(uffd_global_test_opts_t *gopts, uffd_test_args_t *args)
{
int miss, wp, minor;
for (miss = 0; miss <= 1; miss++)
for (wp = 0; wp <= 1; wp++)
for (minor = 0; minor <= 1; minor++)
- do_register_ioctls_test(args, miss, wp, minor);
+ do_register_ioctls_test(gopts, args, miss, wp, minor);
uffd_test_pass();
}
@@ -1734,27 +1750,47 @@ int main(int argc, char *argv[])
}
for (j = 0; j < n_mems; j++) {
mem_type = &mem_types[j];
+
+ /* Initialize global test options */
+ uffd_global_test_opts_t gopts = { 0 };
+
+ gopts.map_shared = mem_type->shared;
+ uffd_test_ops = mem_type->mem_ops;
+ uffd_test_case_ops = test->test_case_ops;
+
+ if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) {
+ gopts.page_size = default_huge_page_size();
+ if (gopts.page_size == 0) {
+ uffd_test_skip("huge page size is 0, feature missing?");
+ continue;
+ }
+ } else {
+ gopts.page_size = psize();
+ }
+
+ /* Ensure we have at least 2 pages */
+ gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2)
+ / gopts.page_size;
+
+ gopts.nr_parallel = 1;
+
+ /* Initialize test arguments */
+ args.mem_type = mem_type;
+
if (!(test->mem_targets & mem_type->mem_flag))
continue;
uffd_test_start("%s on %s", test->name, mem_type->name);
- if ((mem_type->mem_flag == MEM_HUGETLB ||
- mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
- (default_huge_page_size() == 0)) {
- uffd_test_skip("huge page size is 0, feature missing?");
- continue;
- }
if (!uffd_feature_supported(test)) {
uffd_test_skip("feature missing");
continue;
}
- if (uffd_setup_environment(&args, test, mem_type,
- &errmsg)) {
+ if (uffd_test_ctx_init(&gopts, test->uffd_feature_required, &errmsg)) {
uffd_test_skip(errmsg);
continue;
}
- test->uffd_fn(&args);
- uffd_test_ctx_clear();
+ test->uffd_fn(&gopts, &args);
+ uffd_test_ctx_clear(&gopts);
}
}
diff --git a/tools/testing/selftests/mm/uffd-wp-mremap.c b/tools/testing/selftests/mm/uffd-wp-mremap.c
index c2ba7d46c7b4..17186d4a4147 100644
--- a/tools/testing/selftests/mm/uffd-wp-mremap.c
+++ b/tools/testing/selftests/mm/uffd-wp-mremap.c
@@ -7,7 +7,7 @@
#include <assert.h>
#include <linux/mman.h>
#include <sys/mman.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "thp_settings.h"
#include "uffd-common.h"
@@ -19,11 +19,6 @@ static size_t thpsizes[20];
static int nr_hugetlbsizes;
static size_t hugetlbsizes[10];
-static int sz2ord(size_t size)
-{
- return __builtin_ctzll(size / pagesize);
-}
-
static int detect_thp_sizes(size_t sizes[], int max)
{
int count = 0;
@@ -87,9 +82,9 @@ static void *alloc_one_folio(size_t size, bool private, bool hugetlb)
struct thp_settings settings = *thp_current_settings();
if (private)
- settings.hugepages[sz2ord(size)].enabled = THP_ALWAYS;
+ settings.hugepages[sz2ord(size, pagesize)].enabled = THP_ALWAYS;
else
- settings.shmem_hugepages[sz2ord(size)].enabled = SHMEM_ALWAYS;
+ settings.shmem_hugepages[sz2ord(size, pagesize)].enabled = SHMEM_ALWAYS;
thp_push_settings(&settings);
@@ -157,7 +152,8 @@ static bool range_is_swapped(void *addr, size_t size)
return true;
}
-static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb)
+static void test_one_folio(uffd_global_test_opts_t *gopts, size_t size, bool private,
+ bool swapout, bool hugetlb)
{
struct uffdio_writeprotect wp_prms;
uint64_t features = 0;
@@ -181,21 +177,21 @@ static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb
}
/* Register range for uffd-wp. */
- if (userfaultfd_open(&features)) {
+ if (userfaultfd_open(gopts, &features)) {
if (errno == ENOENT)
ksft_test_result_skip("userfaultfd not available\n");
else
ksft_test_result_fail("userfaultfd_open() failed\n");
goto out;
}
- if (uffd_register(uffd, mem, size, false, true, false)) {
+ if (uffd_register(gopts->uffd, mem, size, false, true, false)) {
ksft_test_result_fail("uffd_register() failed\n");
goto out;
}
wp_prms.mode = UFFDIO_WRITEPROTECT_MODE_WP;
wp_prms.range.start = (uintptr_t)mem;
wp_prms.range.len = size;
- if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp_prms)) {
+ if (ioctl(gopts->uffd, UFFDIO_WRITEPROTECT, &wp_prms)) {
ksft_test_result_fail("ioctl(UFFDIO_WRITEPROTECT) failed\n");
goto out;
}
@@ -242,9 +238,9 @@ static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb
out:
if (mem)
munmap(mem, size);
- if (uffd >= 0) {
- close(uffd);
- uffd = -1;
+ if (gopts->uffd >= 0) {
+ close(gopts->uffd);
+ gopts->uffd = -1;
}
}
@@ -336,6 +332,7 @@ static const struct testcase testcases[] = {
int main(int argc, char **argv)
{
+ uffd_global_test_opts_t gopts = { 0 };
struct thp_settings settings;
int i, j, plan = 0;
@@ -367,8 +364,8 @@ int main(int argc, char **argv)
const struct testcase *tc = &testcases[i];
for (j = 0; j < *tc->nr_sizes; j++)
- test_one_folio(tc->sizes[j], tc->private, tc->swapout,
- tc->hugetlb);
+ test_one_folio(&gopts, tc->sizes[j], tc->private,
+ tc->swapout, tc->hugetlb);
}
/* If THP is supported, restore original THP settings. */
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.c b/tools/testing/selftests/mm/va_high_addr_switch.c
index 896b3f73fc53..02f290a69132 100644
--- a/tools/testing/selftests/mm/va_high_addr_switch.c
+++ b/tools/testing/selftests/mm/va_high_addr_switch.c
@@ -10,7 +10,7 @@
#include <string.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
/*
* The hint addr value is used to allocate addresses
@@ -230,10 +230,10 @@ void testcases_init(void)
.msg = "mmap(-1, MAP_HUGETLB) again",
},
{
- .addr = (void *)(addr_switch_hint - pagesize),
+ .addr = (void *)(addr_switch_hint - hugepagesize),
.size = 2 * hugepagesize,
.flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
- .msg = "mmap(addr_switch_hint - pagesize, 2*hugepagesize, MAP_HUGETLB)",
+ .msg = "mmap(addr_switch_hint - hugepagesize, 2*hugepagesize, MAP_HUGETLB)",
.low_addr_required = 1,
.keep_mapped = 1,
},
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.sh b/tools/testing/selftests/mm/va_high_addr_switch.sh
index 325de53966b6..a7d4b02b21dd 100755
--- a/tools/testing/selftests/mm/va_high_addr_switch.sh
+++ b/tools/testing/selftests/mm/va_high_addr_switch.sh
@@ -9,6 +9,7 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+orig_nr_hugepages=0
skip()
{
@@ -76,5 +77,41 @@ check_test_requirements()
esac
}
+save_nr_hugepages()
+{
+ orig_nr_hugepages=$(cat /proc/sys/vm/nr_hugepages)
+}
+
+restore_nr_hugepages()
+{
+ echo "$orig_nr_hugepages" > /proc/sys/vm/nr_hugepages
+}
+
+setup_nr_hugepages()
+{
+ local needpgs=$1
+ while read -r name size unit; do
+ if [ "$name" = "HugePages_Free:" ]; then
+ freepgs="$size"
+ break
+ fi
+ done < /proc/meminfo
+ if [ "$freepgs" -ge "$needpgs" ]; then
+ return
+ fi
+ local hpgs=$((orig_nr_hugepages + needpgs))
+ echo $hpgs > /proc/sys/vm/nr_hugepages
+
+ local nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
+ if [ "$nr_hugepgs" != "$hpgs" ]; then
+ restore_nr_hugepages
+ skip "$0: no enough hugepages for testing"
+ fi
+}
+
check_test_requirements
+save_nr_hugepages
+# 4 keep_mapped pages, and one for tmp usage
+setup_nr_hugepages 5
./va_high_addr_switch --run-hugetlb
+restore_nr_hugepages
diff --git a/tools/testing/selftests/mm/virtual_address_range.c b/tools/testing/selftests/mm/virtual_address_range.c
index b380e102b22f..4f0923825ed7 100644
--- a/tools/testing/selftests/mm/virtual_address_range.c
+++ b/tools/testing/selftests/mm/virtual_address_range.c
@@ -16,7 +16,7 @@
#include <fcntl.h>
#include "vm_util.h"
-#include "../kselftest.h"
+#include "kselftest.h"
/*
* Maximum address range mapped with a single mmap()
@@ -44,12 +44,18 @@
* On Arm64 the address space is 256TB and support for
* high mappings up to 4PB virtual address space has
* been added.
+ *
+ * On PowerPC64, the address space up to 128TB can be
+ * mapped without a hint. Addresses beyond 128TB, up to
+ * 4PB, can be mapped with a hint.
+ *
*/
#define NR_CHUNKS_128TB ((128 * SZ_1TB) / MAP_CHUNK_SIZE) /* Number of chunks for 128TB */
#define NR_CHUNKS_256TB (NR_CHUNKS_128TB * 2UL)
#define NR_CHUNKS_384TB (NR_CHUNKS_128TB * 3UL)
#define NR_CHUNKS_3840TB (NR_CHUNKS_128TB * 30UL)
+#define NR_CHUNKS_3968TB (NR_CHUNKS_128TB * 31UL)
#define ADDR_MARK_128TB (1UL << 47) /* First address beyond 128TB */
#define ADDR_MARK_256TB (1UL << 48) /* First address beyond 256TB */
@@ -59,6 +65,11 @@
#define HIGH_ADDR_SHIFT 49
#define NR_CHUNKS_LOW NR_CHUNKS_256TB
#define NR_CHUNKS_HIGH NR_CHUNKS_3840TB
+#elif defined(__PPC64__)
+#define HIGH_ADDR_MARK ADDR_MARK_128TB
+#define HIGH_ADDR_SHIFT 48
+#define NR_CHUNKS_LOW NR_CHUNKS_128TB
+#define NR_CHUNKS_HIGH NR_CHUNKS_3968TB
#else
#define HIGH_ADDR_MARK ADDR_MARK_128TB
#define HIGH_ADDR_SHIFT 48
@@ -77,8 +88,11 @@ static void validate_addr(char *ptr, int high_addr)
{
unsigned long addr = (unsigned long) ptr;
- if (high_addr && addr < HIGH_ADDR_MARK)
- ksft_exit_fail_msg("Bad address %lx\n", addr);
+ if (high_addr) {
+ if (addr < HIGH_ADDR_MARK)
+ ksft_exit_fail_msg("Bad address %lx\n", addr);
+ return;
+ }
if (addr > HIGH_ADDR_MARK)
ksft_exit_fail_msg("Bad address %lx\n", addr);
@@ -224,7 +238,7 @@ int main(int argc, char *argv[])
if (hptr[i] == MAP_FAILED)
break;
- mark_range(ptr[i], MAP_CHUNK_SIZE);
+ mark_range(hptr[i], MAP_CHUNK_SIZE);
validate_addr(hptr[i], 1);
}
hchunks = i;
diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
index 61d7bf1f8c62..d954bf91afd5 100644
--- a/tools/testing/selftests/mm/vm_util.c
+++ b/tools/testing/selftests/mm/vm_util.c
@@ -9,7 +9,7 @@
#include <linux/fs.h>
#include <sys/syscall.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "vm_util.h"
#define PMD_SIZE_FILE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
@@ -338,6 +338,19 @@ int detect_hugetlb_page_sizes(size_t sizes[], int max)
return count;
}
+int pageflags_get(unsigned long pfn, int kpageflags_fd, uint64_t *flags)
+{
+ size_t count;
+
+ count = pread(kpageflags_fd, flags, sizeof(*flags),
+ pfn * sizeof(*flags));
+
+ if (count != sizeof(*flags))
+ return -1;
+
+ return 0;
+}
+
/* If `ioctls' non-NULL, the allowed ioctls will be returned into the var */
int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
bool miss, bool wp, bool minor, uint64_t *ioctls)
@@ -402,7 +415,7 @@ unsigned long get_free_hugepages(void)
return fhp;
}
-bool check_vmflag_io(void *addr)
+static bool check_vmflag(void *addr, const char *flag)
{
char buffer[MAX_LINE_LENGTH];
const char *flags;
@@ -419,13 +432,45 @@ bool check_vmflag_io(void *addr)
if (!flaglen)
return false;
- if (flaglen == strlen("io") && !memcmp(flags, "io", flaglen))
+ if (flaglen == strlen(flag) && !memcmp(flags, flag, flaglen))
return true;
flags += flaglen;
}
}
+bool check_vmflag_io(void *addr)
+{
+ return check_vmflag(addr, "io");
+}
+
+bool check_vmflag_pfnmap(void *addr)
+{
+ return check_vmflag(addr, "pf");
+}
+
+bool check_vmflag_guard(void *addr)
+{
+ return check_vmflag(addr, "gu");
+}
+
+bool softdirty_supported(void)
+{
+ char *addr;
+ bool supported = false;
+ const size_t pagesize = getpagesize();
+
+ /* New mappings are expected to be marked with VM_SOFTDIRTY (sd). */
+ addr = mmap(0, pagesize, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ if (!addr)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ supported = check_vmflag(addr, "sd");
+ munmap(addr, pagesize);
+ return supported;
+}
+
/*
* Open an fd at /proc/$pid/maps and configure procmap_out ready for
* PROCMAP_QUERY query. Returns 0 on success, or an error code otherwise.
@@ -486,3 +531,195 @@ int close_procmap(struct procmap_fd *procmap)
{
return close(procmap->fd);
}
+
+int write_sysfs(const char *file_path, unsigned long val)
+{
+ FILE *f = fopen(file_path, "w");
+
+ if (!f) {
+ fprintf(stderr, "f %s\n", file_path);
+ perror("fopen");
+ return 1;
+ }
+ if (fprintf(f, "%lu", val) < 0) {
+ perror("fprintf");
+ fclose(f);
+ return 1;
+ }
+ fclose(f);
+
+ return 0;
+}
+
+int read_sysfs(const char *file_path, unsigned long *val)
+{
+ FILE *f = fopen(file_path, "r");
+
+ if (!f) {
+ fprintf(stderr, "f %s\n", file_path);
+ perror("fopen");
+ return 1;
+ }
+ if (fscanf(f, "%lu", val) != 1) {
+ perror("fscanf");
+ fclose(f);
+ return 1;
+ }
+ fclose(f);
+
+ return 0;
+}
+
+void *sys_mremap(void *old_address, unsigned long old_size,
+ unsigned long new_size, int flags, void *new_address)
+{
+ return (void *)syscall(__NR_mremap, (unsigned long)old_address,
+ old_size, new_size, flags,
+ (unsigned long)new_address);
+}
+
+bool detect_huge_zeropage(void)
+{
+ int fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page",
+ O_RDONLY);
+ bool enabled = 0;
+ char buf[15];
+ int ret;
+
+ if (fd < 0)
+ return 0;
+
+ ret = pread(fd, buf, sizeof(buf), 0);
+ if (ret > 0 && ret < sizeof(buf)) {
+ buf[ret] = 0;
+
+ if (strtoul(buf, NULL, 10) == 1)
+ enabled = 1;
+ }
+
+ close(fd);
+ return enabled;
+}
+
+long ksm_get_self_zero_pages(void)
+{
+ int proc_self_ksm_stat_fd;
+ char buf[200];
+ char *substr_ksm_zero;
+ size_t value_pos;
+ ssize_t read_size;
+
+ proc_self_ksm_stat_fd = open("/proc/self/ksm_stat", O_RDONLY);
+ if (proc_self_ksm_stat_fd < 0)
+ return -errno;
+
+ read_size = pread(proc_self_ksm_stat_fd, buf, sizeof(buf) - 1, 0);
+ close(proc_self_ksm_stat_fd);
+ if (read_size < 0)
+ return -errno;
+
+ buf[read_size] = 0;
+
+ substr_ksm_zero = strstr(buf, "ksm_zero_pages");
+ if (!substr_ksm_zero)
+ return 0;
+
+ value_pos = strcspn(substr_ksm_zero, "0123456789");
+ return strtol(substr_ksm_zero + value_pos, NULL, 10);
+}
+
+long ksm_get_self_merging_pages(void)
+{
+ int proc_self_ksm_merging_pages_fd;
+ char buf[10];
+ ssize_t ret;
+
+ proc_self_ksm_merging_pages_fd = open("/proc/self/ksm_merging_pages",
+ O_RDONLY);
+ if (proc_self_ksm_merging_pages_fd < 0)
+ return -errno;
+
+ ret = pread(proc_self_ksm_merging_pages_fd, buf, sizeof(buf) - 1, 0);
+ close(proc_self_ksm_merging_pages_fd);
+ if (ret <= 0)
+ return -errno;
+ buf[ret] = 0;
+
+ return strtol(buf, NULL, 10);
+}
+
+long ksm_get_full_scans(void)
+{
+ int ksm_full_scans_fd;
+ char buf[10];
+ ssize_t ret;
+
+ ksm_full_scans_fd = open("/sys/kernel/mm/ksm/full_scans", O_RDONLY);
+ if (ksm_full_scans_fd < 0)
+ return -errno;
+
+ ret = pread(ksm_full_scans_fd, buf, sizeof(buf) - 1, 0);
+ close(ksm_full_scans_fd);
+ if (ret <= 0)
+ return -errno;
+ buf[ret] = 0;
+
+ return strtol(buf, NULL, 10);
+}
+
+int ksm_use_zero_pages(void)
+{
+ int ksm_use_zero_pages_fd;
+ ssize_t ret;
+
+ ksm_use_zero_pages_fd = open("/sys/kernel/mm/ksm/use_zero_pages", O_RDWR);
+ if (ksm_use_zero_pages_fd < 0)
+ return -errno;
+
+ ret = write(ksm_use_zero_pages_fd, "1", 1);
+ close(ksm_use_zero_pages_fd);
+ return ret == 1 ? 0 : -errno;
+}
+
+int ksm_start(void)
+{
+ int ksm_fd;
+ ssize_t ret;
+ long start_scans, end_scans;
+
+ ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
+ if (ksm_fd < 0)
+ return -errno;
+
+ /* Wait for two full scans such that any possible merging happened. */
+ start_scans = ksm_get_full_scans();
+ if (start_scans < 0) {
+ close(ksm_fd);
+ return start_scans;
+ }
+ ret = write(ksm_fd, "1", 1);
+ close(ksm_fd);
+ if (ret != 1)
+ return -errno;
+ do {
+ end_scans = ksm_get_full_scans();
+ if (end_scans < 0)
+ return end_scans;
+ } while (end_scans < start_scans + 2);
+
+ return 0;
+}
+
+int ksm_stop(void)
+{
+ int ksm_fd;
+ ssize_t ret;
+
+ ksm_fd = open("/sys/kernel/mm/ksm/run", O_RDWR);
+ if (ksm_fd < 0)
+ return -errno;
+
+ ret = write(ksm_fd, "2", 1);
+ close(ksm_fd);
+ return ret == 1 ? 0 : -errno;
+}
diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
index adb5d294a220..6ad32b1830f1 100644
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -6,7 +6,7 @@
#include <stdarg.h>
#include <strings.h> /* ffsl() */
#include <unistd.h> /* _SC_PAGESIZE */
-#include "../kselftest.h"
+#include "kselftest.h"
#include <linux/fs.h>
#define BIT_ULL(nr) (1ULL << (nr))
@@ -18,6 +18,16 @@
#define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63)
+#define KPF_COMPOUND_HEAD BIT_ULL(15)
+#define KPF_COMPOUND_TAIL BIT_ULL(16)
+#define KPF_THP BIT_ULL(22)
+/*
+ * Ignore the checkpatch warning, we must read from x but don't want to do
+ * anything with it in order to trigger a read page fault. We therefore must use
+ * volatile to stop the compiler from optimising this away.
+ */
+#define FORCE_READ(x) (*(const volatile typeof(x) *)&(x))
+
extern unsigned int __page_size;
extern unsigned int __page_shift;
@@ -44,6 +54,8 @@ static inline unsigned int pshift(void)
return __page_shift;
}
+bool detect_huge_zeropage(void);
+
/*
* Plan 9 FS has bugs (at least on QEMU) where certain operations fail with
* ENOENT on unlinked files. See
@@ -76,6 +88,7 @@ bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size);
int64_t allocate_transhuge(void *ptr, int pagemap_fd);
unsigned long default_huge_page_size(void);
int detect_hugetlb_page_sizes(size_t sizes[], int max);
+int pageflags_get(unsigned long pfn, int kpageflags_fd, uint64_t *flags);
int uffd_register(int uffd, void *addr, uint64_t len,
bool miss, bool wp, bool minor);
@@ -84,10 +97,15 @@ int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
bool miss, bool wp, bool minor, uint64_t *ioctls);
unsigned long get_free_hugepages(void);
bool check_vmflag_io(void *addr);
+bool check_vmflag_pfnmap(void *addr);
+bool check_vmflag_guard(void *addr);
int open_procmap(pid_t pid, struct procmap_fd *procmap_out);
int query_procmap(struct procmap_fd *procmap);
bool find_vma_procmap(struct procmap_fd *procmap, void *address);
int close_procmap(struct procmap_fd *procmap);
+int write_sysfs(const char *file_path, unsigned long val);
+int read_sysfs(const char *file_path, unsigned long *val);
+bool softdirty_supported(void);
static inline int open_self_procmap(struct procmap_fd *procmap_out)
{
@@ -115,6 +133,21 @@ static inline void log_test_result(int result)
ksft_test_result_report(result, "%s\n", test_name);
}
+static inline int sz2ord(size_t size, size_t pagesize)
+{
+ return __builtin_ctzll(size / pagesize);
+}
+
+void *sys_mremap(void *old_address, unsigned long old_size,
+ unsigned long new_size, int flags, void *new_address);
+
+long ksm_get_self_zero_pages(void);
+long ksm_get_self_merging_pages(void);
+long ksm_get_full_scans(void);
+int ksm_use_zero_pages(void);
+int ksm_start(void);
+int ksm_stop(void);
+
/*
* On ppc64 this will only work with radix 2M hugepage size
*/
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
index 8b378c91debf..7aec3ae82a44 100644
--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -21,7 +21,7 @@
#include <linux/mount.h>
#include "../filesystems/wrappers.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#ifndef CLONE_NEWNS
#define CLONE_NEWNS 0x00020000
@@ -107,6 +107,26 @@
#endif
#endif
+#ifndef __NR_open_tree_attr
+ #if defined __alpha__
+ #define __NR_open_tree_attr 577
+ #elif defined _MIPS_SIM
+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
+ #define __NR_open_tree_attr (467 + 4000)
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
+ #define __NR_open_tree_attr (467 + 6000)
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
+ #define __NR_open_tree_attr (467 + 5000)
+ #endif
+ #elif defined __ia64__
+ #define __NR_open_tree_attr (467 + 1024)
+ #else
+ #define __NR_open_tree_attr 467
+ #endif
+#endif
+
#ifndef MOUNT_ATTR_IDMAP
#define MOUNT_ATTR_IDMAP 0x00100000
#endif
@@ -121,6 +141,12 @@ static inline int sys_mount_setattr(int dfd, const char *path, unsigned int flag
return syscall(__NR_mount_setattr, dfd, path, flags, attr, size);
}
+static inline int sys_open_tree_attr(int dfd, const char *path, unsigned int flags,
+ struct mount_attr *attr, size_t size)
+{
+ return syscall(__NR_open_tree_attr, dfd, path, flags, attr, size);
+}
+
static ssize_t write_nointr(int fd, const void *buf, size_t count)
{
ssize_t ret;
@@ -1222,6 +1248,12 @@ TEST_F(mount_setattr_idmapped, attached_mount_inside_current_mount_namespace)
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ /*
+ * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way
+ * to bypass this mount_setattr() restriction.
+ */
+ ASSERT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
@@ -1255,6 +1287,12 @@ TEST_F(mount_setattr_idmapped, attached_mount_outside_current_mount_namespace)
ASSERT_GE(attr.userns_fd, 0);
ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr,
sizeof(attr)), 0);
+ /*
+ * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way
+ * to bypass this mount_setattr() restriction.
+ */
+ ASSERT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
@@ -1321,6 +1359,19 @@ TEST_F(mount_setattr_idmapped, detached_mount_outside_current_mount_namespace)
ASSERT_EQ(close(open_tree_fd), 0);
}
+static bool expected_uid_gid(int dfd, const char *path, int flags,
+ uid_t expected_uid, gid_t expected_gid)
+{
+ int ret;
+ struct stat st;
+
+ ret = fstatat(dfd, path, &st, flags);
+ if (ret < 0)
+ return false;
+
+ return st.st_uid == expected_uid && st.st_gid == expected_gid;
+}
+
/**
* Validate that currently changing the idmapping of an idmapped mount fails.
*/
@@ -1331,6 +1382,8 @@ TEST_F(mount_setattr_idmapped, change_idmapping)
.attr_set = MOUNT_ATTR_IDMAP,
};
+ ASSERT_TRUE(expected_uid_gid(-EBADF, "/mnt/D", 0, 0, 0));
+
if (!mount_setattr_supported())
SKIP(return, "mount_setattr syscall not supported");
@@ -1348,27 +1401,25 @@ TEST_F(mount_setattr_idmapped, change_idmapping)
AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
+ EXPECT_FALSE(expected_uid_gid(open_tree_fd, ".", 0, 0, 0));
+ EXPECT_TRUE(expected_uid_gid(open_tree_fd, ".", 0, 10000, 10000));
+
/* Change idmapping on a detached mount that is already idmapped. */
attr.userns_fd = get_userns_fd(0, 20000, 10000);
ASSERT_GE(attr.userns_fd, 0);
ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ /*
+ * Make sure that open_tree_attr() without OPEN_TREE_CLONE is not a way
+ * to bypass this mount_setattr() restriction.
+ */
+ EXPECT_LT(sys_open_tree_attr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ EXPECT_FALSE(expected_uid_gid(open_tree_fd, ".", 0, 20000, 20000));
+ EXPECT_TRUE(expected_uid_gid(open_tree_fd, ".", 0, 10000, 10000));
+
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
-static bool expected_uid_gid(int dfd, const char *path, int flags,
- uid_t expected_uid, gid_t expected_gid)
-{
- int ret;
- struct stat st;
-
- ret = fstatat(dfd, path, &st, flags);
- if (ret < 0)
- return false;
-
- return st.st_uid == expected_uid && st.st_gid == expected_gid;
-}
-
TEST_F(mount_setattr_idmapped, idmap_mount_tree_invalid)
{
int open_tree_fd = -EBADF;
@@ -2079,24 +2130,9 @@ TEST_F(mount_setattr, detached_tree_propagation)
* means that the device information will be different for any
* statx() that was taken from /mnt/A before the mount compared
* to one after the mount.
- *
- * Since we already now that the device information between the
- * stx1 and stx2 samples are identical we also now that stx2 and
- * stx3 device information will necessarily differ.
*/
ASSERT_NE(stx1.stx_dev_minor, stx3.stx_dev_minor);
-
- /*
- * If mount propagation worked correctly then the tmpfs mount
- * that was created after the mount namespace was unshared will
- * have propagated onto /mnt/A in the detached mount tree.
- *
- * Verify that the device information for stx3 and stx4 are
- * identical. It is already established that stx3 is different
- * from both stx1 and stx2 sampled before the tmpfs mount was
- * done so if stx3 and stx4 are identical the proof is done.
- */
- ASSERT_EQ(stx3.stx_dev_minor, stx4.stx_dev_minor);
+ ASSERT_EQ(stx1.stx_dev_minor, stx4.stx_dev_minor);
EXPECT_EQ(close(fd_tree), 0);
}
diff --git a/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c b/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
index bcf51d785a37..12434415ec36 100644
--- a/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
+++ b/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c
@@ -15,7 +15,7 @@
#include <stdarg.h>
#include <sys/syscall.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#ifndef CLONE_NEWNS
#define CLONE_NEWNS 0x00020000
diff --git a/tools/testing/selftests/mqueue/mq_open_tests.c b/tools/testing/selftests/mqueue/mq_open_tests.c
index 9403ac01ba11..b16029c40c0f 100644
--- a/tools/testing/selftests/mqueue/mq_open_tests.c
+++ b/tools/testing/selftests/mqueue/mq_open_tests.c
@@ -33,7 +33,7 @@
#include <mqueue.h>
#include <error.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static char *usage =
"Usage:\n"
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index fb898850867c..303c46eebd94 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -40,7 +40,7 @@
#include <popt.h>
#include <error.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static char *usage =
"Usage:\n"
diff --git a/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c b/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c
index 0d2af30c3bf5..cb0ca6ed7ebe 100644
--- a/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c
+++ b/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c
@@ -11,8 +11,8 @@
#include <string.h>
#include <stdbool.h>
-#include "../kselftest.h"
-#include "../kselftest_harness.h"
+#include "kselftest.h"
+#include "kselftest_harness.h"
#define VMFLAGS "VmFlags:"
#define MSEAL_FLAGS "sl"
diff --git a/tools/testing/selftests/namespaces/.gitignore b/tools/testing/selftests/namespaces/.gitignore
new file mode 100644
index 000000000000..0989e80da457
--- /dev/null
+++ b/tools/testing/selftests/namespaces/.gitignore
@@ -0,0 +1,12 @@
+nsid_test
+file_handle_test
+init_ino_test
+ns_active_ref_test
+listns_test
+listns_permissions_test
+listns_efault_test
+siocgskns_test
+cred_change_test
+stress_test
+listns_pagination_bug
+regression_pidfd_setns_test
diff --git a/tools/testing/selftests/namespaces/Makefile b/tools/testing/selftests/namespaces/Makefile
new file mode 100644
index 000000000000..fbb821652c17
--- /dev/null
+++ b/tools/testing/selftests/namespaces/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CFLAGS += -Wall -O0 -g $(KHDR_INCLUDES) $(TOOLS_INCLUDES)
+LDLIBS += -lcap
+
+TEST_GEN_PROGS := nsid_test \
+ file_handle_test \
+ init_ino_test \
+ ns_active_ref_test \
+ listns_test \
+ listns_permissions_test \
+ listns_efault_test \
+ siocgskns_test \
+ cred_change_test \
+ stress_test \
+ listns_pagination_bug \
+ regression_pidfd_setns_test
+
+include ../lib.mk
+
+$(OUTPUT)/ns_active_ref_test: ../filesystems/utils.c
+$(OUTPUT)/listns_test: ../filesystems/utils.c
+$(OUTPUT)/listns_permissions_test: ../filesystems/utils.c
+$(OUTPUT)/listns_efault_test: ../filesystems/utils.c
+$(OUTPUT)/siocgskns_test: ../filesystems/utils.c
+$(OUTPUT)/cred_change_test: ../filesystems/utils.c
+$(OUTPUT)/stress_test: ../filesystems/utils.c
+$(OUTPUT)/listns_pagination_bug: ../filesystems/utils.c
+$(OUTPUT)/regression_pidfd_setns_test: ../filesystems/utils.c
+
diff --git a/tools/testing/selftests/namespaces/config b/tools/testing/selftests/namespaces/config
new file mode 100644
index 000000000000..d09836260262
--- /dev/null
+++ b/tools/testing/selftests/namespaces/config
@@ -0,0 +1,7 @@
+CONFIG_UTS_NS=y
+CONFIG_TIME_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+CONFIG_CGROUPS=y
diff --git a/tools/testing/selftests/namespaces/cred_change_test.c b/tools/testing/selftests/namespaces/cred_change_test.c
new file mode 100644
index 000000000000..7b4f5ad3f725
--- /dev/null
+++ b/tools/testing/selftests/namespaces/cred_change_test.c
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/capability.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <linux/nsfs.h>
+#include "../kselftest_harness.h"
+#include "../filesystems/utils.h"
+#include "wrappers.h"
+
+/*
+ * Test credential changes and their impact on namespace active references.
+ */
+
+/*
+ * Test setuid() in a user namespace properly swaps active references.
+ * Create a user namespace with multiple UIDs mapped, then setuid() between them.
+ * Verify that the user namespace remains active throughout.
+ */
+TEST(setuid_preserves_active_refs)
+{
+ pid_t pid;
+ int status;
+ __u64 userns_id;
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ ssize_t ret;
+ int i;
+ bool found = false;
+ int pipefd[2];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ int fd, userns_fd;
+ __u64 child_userns_id;
+ uid_t orig_uid = getuid();
+ int setuid_count;
+
+ close(pipefd[0]);
+
+ /* Create new user namespace with multiple UIDs mapped (0-9) */
+ userns_fd = get_userns_fd(0, orig_uid, 10);
+ if (userns_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (setns(userns_fd, CLONE_NEWUSER) < 0) {
+ close(userns_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(userns_fd);
+
+ /* Get user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &child_userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Send namespace ID to parent */
+ write(pipefd[1], &child_userns_id, sizeof(child_userns_id));
+
+ /*
+ * Perform multiple setuid() calls.
+ * Each setuid() triggers commit_creds() which should properly
+ * swap active references via switch_cred_namespaces().
+ */
+ for (setuid_count = 0; setuid_count < 50; setuid_count++) {
+ uid_t target_uid = (setuid_count % 10);
+ if (setuid(target_uid) < 0) {
+ if (errno != EPERM) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ }
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ if (read(pipefd[0], &userns_id, sizeof(userns_id)) != sizeof(userns_id)) {
+ close(pipefd[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get namespace ID from child");
+ }
+ close(pipefd[0]);
+
+ TH_LOG("Child user namespace ID: %llu", (unsigned long long)userns_id);
+
+ /* Verify namespace is active while child is running */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret, 0);
+ }
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == userns_id) {
+ found = true;
+ break;
+ }
+ }
+ ASSERT_TRUE(found);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Verify namespace becomes inactive after child exits */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ ASSERT_GE(ret, 0);
+
+ found = false;
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == userns_id) {
+ found = true;
+ break;
+ }
+ }
+
+ ASSERT_FALSE(found);
+ TH_LOG("setuid() correctly preserved active references (no leak)");
+}
+
+/*
+ * Test setgid() in a user namespace properly handles active references.
+ */
+TEST(setgid_preserves_active_refs)
+{
+ pid_t pid;
+ int status;
+ __u64 userns_id;
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ ssize_t ret;
+ int i;
+ bool found = false;
+ int pipefd[2];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ int fd, userns_fd;
+ __u64 child_userns_id;
+ uid_t orig_uid = getuid();
+ int setgid_count;
+
+ close(pipefd[0]);
+
+ /* Create new user namespace with multiple GIDs mapped */
+ userns_fd = get_userns_fd(0, orig_uid, 10);
+ if (userns_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (setns(userns_fd, CLONE_NEWUSER) < 0) {
+ close(userns_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(userns_fd);
+
+ /* Get user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &child_userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ write(pipefd[1], &child_userns_id, sizeof(child_userns_id));
+
+ /* Perform multiple setgid() calls */
+ for (setgid_count = 0; setgid_count < 50; setgid_count++) {
+ gid_t target_gid = (setgid_count % 10);
+ if (setgid(target_gid) < 0) {
+ if (errno != EPERM) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ }
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ if (read(pipefd[0], &userns_id, sizeof(userns_id)) != sizeof(userns_id)) {
+ close(pipefd[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get namespace ID from child");
+ }
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Verify namespace becomes inactive */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret, 0);
+ }
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == userns_id) {
+ found = true;
+ break;
+ }
+ }
+
+ ASSERT_FALSE(found);
+ TH_LOG("setgid() correctly preserved active references (no leak)");
+}
+
+/*
+ * Test setresuid() which changes real, effective, and saved UIDs.
+ * This should properly swap active references via commit_creds().
+ */
+TEST(setresuid_preserves_active_refs)
+{
+ pid_t pid;
+ int status;
+ __u64 userns_id;
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ ssize_t ret;
+ int i;
+ bool found = false;
+ int pipefd[2];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ int fd, userns_fd;
+ __u64 child_userns_id;
+ uid_t orig_uid = getuid();
+ int setres_count;
+
+ close(pipefd[0]);
+
+ /* Create new user namespace */
+ userns_fd = get_userns_fd(0, orig_uid, 10);
+ if (userns_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (setns(userns_fd, CLONE_NEWUSER) < 0) {
+ close(userns_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(userns_fd);
+
+ /* Get user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &child_userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ write(pipefd[1], &child_userns_id, sizeof(child_userns_id));
+
+ /* Perform multiple setresuid() calls */
+ for (setres_count = 0; setres_count < 30; setres_count++) {
+ uid_t uid1 = (setres_count % 5);
+ uid_t uid2 = ((setres_count + 1) % 5);
+ uid_t uid3 = ((setres_count + 2) % 5);
+
+ if (setresuid(uid1, uid2, uid3) < 0) {
+ if (errno != EPERM) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ }
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ if (read(pipefd[0], &userns_id, sizeof(userns_id)) != sizeof(userns_id)) {
+ close(pipefd[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get namespace ID from child");
+ }
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Verify namespace becomes inactive */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret, 0);
+ }
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == userns_id) {
+ found = true;
+ break;
+ }
+ }
+
+ ASSERT_FALSE(found);
+ TH_LOG("setresuid() correctly preserved active references (no leak)");
+}
+
+/*
+ * Test credential changes across multiple user namespaces.
+ * Create nested user namespaces and verify active reference tracking.
+ */
+TEST(cred_change_nested_userns)
+{
+ pid_t pid;
+ int status;
+ __u64 parent_userns_id, child_userns_id;
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ ssize_t ret;
+ int i;
+ bool found_parent = false, found_child = false;
+ int pipefd[2];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ int fd, userns_fd;
+ __u64 parent_id, child_id;
+ uid_t orig_uid = getuid();
+
+ close(pipefd[0]);
+
+ /* Create first user namespace */
+ userns_fd = get_userns_fd(0, orig_uid, 1);
+ if (userns_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (setns(userns_fd, CLONE_NEWUSER) < 0) {
+ close(userns_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(userns_fd);
+
+ /* Get first namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &parent_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Create nested user namespace */
+ userns_fd = get_userns_fd(0, 0, 1);
+ if (userns_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (setns(userns_fd, CLONE_NEWUSER) < 0) {
+ close(userns_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(userns_fd);
+
+ /* Get nested namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &child_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Send both IDs to parent */
+ write(pipefd[1], &parent_id, sizeof(parent_id));
+ write(pipefd[1], &child_id, sizeof(child_id));
+
+ /* Perform some credential changes in nested namespace */
+ setuid(0);
+ setgid(0);
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ /* Read both namespace IDs */
+ if (read(pipefd[0], &parent_userns_id, sizeof(parent_userns_id)) != sizeof(parent_userns_id)) {
+ close(pipefd[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get parent namespace ID");
+ }
+
+ if (read(pipefd[0], &child_userns_id, sizeof(child_userns_id)) != sizeof(child_userns_id)) {
+ close(pipefd[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get child namespace ID");
+ }
+ close(pipefd[0]);
+
+ TH_LOG("Parent userns: %llu, Child userns: %llu",
+ (unsigned long long)parent_userns_id,
+ (unsigned long long)child_userns_id);
+
+ /* Verify both namespaces are active */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret, 0);
+ }
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == parent_userns_id)
+ found_parent = true;
+ if (ns_ids[i] == child_userns_id)
+ found_child = true;
+ }
+
+ ASSERT_TRUE(found_parent);
+ ASSERT_TRUE(found_child);
+
+ /* Wait for child */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Verify both namespaces become inactive */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ ASSERT_GE(ret, 0);
+
+ found_parent = false;
+ found_child = false;
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == parent_userns_id)
+ found_parent = true;
+ if (ns_ids[i] == child_userns_id)
+ found_child = true;
+ }
+
+ ASSERT_FALSE(found_parent);
+ ASSERT_FALSE(found_child);
+ TH_LOG("Nested user namespace credential changes preserved active refs (no leak)");
+}
+
+/*
+ * Test rapid credential changes don't cause refcount imbalances.
+ * This stress-tests the switch_cred_namespaces() logic.
+ */
+TEST(rapid_cred_changes_no_leak)
+{
+ pid_t pid;
+ int status;
+ __u64 userns_id;
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ ssize_t ret;
+ int i;
+ bool found = false;
+ int pipefd[2];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ int fd, userns_fd;
+ __u64 child_userns_id;
+ uid_t orig_uid = getuid();
+ int change_count;
+
+ close(pipefd[0]);
+
+ /* Create new user namespace with wider range of UIDs/GIDs */
+ userns_fd = get_userns_fd(0, orig_uid, 100);
+ if (userns_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (setns(userns_fd, CLONE_NEWUSER) < 0) {
+ close(userns_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(userns_fd);
+
+ /* Get user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &child_userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ write(pipefd[1], &child_userns_id, sizeof(child_userns_id));
+
+ /*
+ * Perform many rapid credential changes.
+ * Mix setuid, setgid, setreuid, setregid, setresuid, setresgid.
+ */
+ for (change_count = 0; change_count < 200; change_count++) {
+ switch (change_count % 6) {
+ case 0:
+ setuid(change_count % 50);
+ break;
+ case 1:
+ setgid(change_count % 50);
+ break;
+ case 2:
+ setreuid(change_count % 50, (change_count + 1) % 50);
+ break;
+ case 3:
+ setregid(change_count % 50, (change_count + 1) % 50);
+ break;
+ case 4:
+ setresuid(change_count % 50, (change_count + 1) % 50, (change_count + 2) % 50);
+ break;
+ case 5:
+ setresgid(change_count % 50, (change_count + 1) % 50, (change_count + 2) % 50);
+ break;
+ }
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ if (read(pipefd[0], &userns_id, sizeof(userns_id)) != sizeof(userns_id)) {
+ close(pipefd[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get namespace ID from child");
+ }
+ close(pipefd[0]);
+
+ TH_LOG("Testing with user namespace ID: %llu", (unsigned long long)userns_id);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Verify namespace becomes inactive (no leaked active refs) */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret, 0);
+ }
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == userns_id) {
+ found = true;
+ break;
+ }
+ }
+
+ ASSERT_FALSE(found);
+ TH_LOG("200 rapid credential changes completed with no active ref leak");
+}
+
+/*
+ * Test setfsuid/setfsgid which change filesystem UID/GID.
+ * These also trigger credential changes but may have different code paths.
+ */
+TEST(setfsuid_preserves_active_refs)
+{
+ pid_t pid;
+ int status;
+ __u64 userns_id;
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ ssize_t ret;
+ int i;
+ bool found = false;
+ int pipefd[2];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ int fd, userns_fd;
+ __u64 child_userns_id;
+ uid_t orig_uid = getuid();
+ int change_count;
+
+ close(pipefd[0]);
+
+ /* Create new user namespace */
+ userns_fd = get_userns_fd(0, orig_uid, 10);
+ if (userns_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (setns(userns_fd, CLONE_NEWUSER) < 0) {
+ close(userns_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(userns_fd);
+
+ /* Get user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &child_userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ write(pipefd[1], &child_userns_id, sizeof(child_userns_id));
+
+ /* Perform multiple setfsuid/setfsgid calls */
+ for (change_count = 0; change_count < 50; change_count++) {
+ setfsuid(change_count % 10);
+ setfsgid(change_count % 10);
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ if (read(pipefd[0], &userns_id, sizeof(userns_id)) != sizeof(userns_id)) {
+ close(pipefd[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get namespace ID from child");
+ }
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Verify namespace becomes inactive */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret, 0);
+ }
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == userns_id) {
+ found = true;
+ break;
+ }
+ }
+
+ ASSERT_FALSE(found);
+ TH_LOG("setfsuid/setfsgid correctly preserved active references (no leak)");
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/file_handle_test.c b/tools/testing/selftests/namespaces/file_handle_test.c
new file mode 100644
index 000000000000..064b41ad96b2
--- /dev/null
+++ b/tools/testing/selftests/namespaces/file_handle_test.c
@@ -0,0 +1,1429 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <linux/unistd.h>
+#include "kselftest_harness.h"
+
+#ifndef FD_NSFS_ROOT
+#define FD_NSFS_ROOT -10003 /* Root of the nsfs filesystem */
+#endif
+
+TEST(nsfs_net_handle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ struct stat st1, st2;
+
+ /* Drop to unprivileged uid/gid */
+ ASSERT_EQ(setresgid(65534, 65534, 65534), 0); /* nogroup */
+ ASSERT_EQ(setresuid(65534, 65534, 65534), 0); /* nobody */
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open a namespace file descriptor */
+ ns_fd = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Try to open using FD_NSFS_ROOT as unprivileged user */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ if (fd < 0 && errno == EPERM) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "Permission denied for unprivileged user (expected)");
+ }
+ ASSERT_GE(fd, 0);
+
+ /* Verify we opened the correct namespace */
+ ASSERT_EQ(fstat(ns_fd, &st1), 0);
+ ASSERT_EQ(fstat(fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ close(fd);
+ close(ns_fd);
+ free(handle);
+}
+
+TEST(nsfs_uts_handle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ struct stat st1, st2;
+
+ /* Drop to unprivileged uid/gid */
+ ASSERT_EQ(setresgid(65534, 65534, 65534), 0); /* nogroup */
+ ASSERT_EQ(setresuid(65534, 65534, 65534), 0); /* nobody */
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open UTS namespace file descriptor */
+ ns_fd = open("/proc/self/ns/uts", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Try to open using FD_NSFS_ROOT */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ ASSERT_GE(fd, 0);
+
+ /* Verify we opened the correct namespace */
+ ASSERT_EQ(fstat(ns_fd, &st1), 0);
+ ASSERT_EQ(fstat(fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ close(fd);
+ close(ns_fd);
+ free(handle);
+}
+
+TEST(nsfs_ipc_handle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ struct stat st1, st2;
+
+ /* Drop to unprivileged uid/gid */
+ ASSERT_EQ(setresgid(65534, 65534, 65534), 0); /* nogroup */
+ ASSERT_EQ(setresuid(65534, 65534, 65534), 0); /* nobody */
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open IPC namespace file descriptor */
+ ns_fd = open("/proc/self/ns/ipc", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Try to open using FD_NSFS_ROOT */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ ASSERT_GE(fd, 0);
+
+ /* Verify we opened the correct namespace */
+ ASSERT_EQ(fstat(ns_fd, &st1), 0);
+ ASSERT_EQ(fstat(fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ close(fd);
+ close(ns_fd);
+ free(handle);
+}
+
+TEST(nsfs_pid_handle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ struct stat st1, st2;
+
+ /* Drop to unprivileged uid/gid */
+ ASSERT_EQ(setresgid(65534, 65534, 65534), 0); /* nogroup */
+ ASSERT_EQ(setresuid(65534, 65534, 65534), 0); /* nobody */
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open PID namespace file descriptor */
+ ns_fd = open("/proc/self/ns/pid", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Try to open using FD_NSFS_ROOT */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ ASSERT_GE(fd, 0);
+
+ /* Verify we opened the correct namespace */
+ ASSERT_EQ(fstat(ns_fd, &st1), 0);
+ ASSERT_EQ(fstat(fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ close(fd);
+ close(ns_fd);
+ free(handle);
+}
+
+TEST(nsfs_mnt_handle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ struct stat st1, st2;
+
+ /* Drop to unprivileged uid/gid */
+ ASSERT_EQ(setresgid(65534, 65534, 65534), 0); /* nogroup */
+ ASSERT_EQ(setresuid(65534, 65534, 65534), 0); /* nobody */
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open mount namespace file descriptor */
+ ns_fd = open("/proc/self/ns/mnt", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Try to open using FD_NSFS_ROOT */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ ASSERT_GE(fd, 0);
+
+ /* Verify we opened the correct namespace */
+ ASSERT_EQ(fstat(ns_fd, &st1), 0);
+ ASSERT_EQ(fstat(fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ close(fd);
+ close(ns_fd);
+ free(handle);
+}
+
+TEST(nsfs_user_handle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ struct stat st1, st2;
+
+ /* Drop to unprivileged uid/gid */
+ ASSERT_EQ(setresgid(65534, 65534, 65534), 0); /* nogroup */
+ ASSERT_EQ(setresuid(65534, 65534, 65534), 0); /* nobody */
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open user namespace file descriptor */
+ ns_fd = open("/proc/self/ns/user", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Try to open using FD_NSFS_ROOT */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ ASSERT_GE(fd, 0);
+
+ /* Verify we opened the correct namespace */
+ ASSERT_EQ(fstat(ns_fd, &st1), 0);
+ ASSERT_EQ(fstat(fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ close(fd);
+ close(ns_fd);
+ free(handle);
+}
+
+TEST(nsfs_cgroup_handle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ struct stat st1, st2;
+
+ /* Drop to unprivileged uid/gid */
+ ASSERT_EQ(setresgid(65534, 65534, 65534), 0); /* nogroup */
+ ASSERT_EQ(setresuid(65534, 65534, 65534), 0); /* nobody */
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open cgroup namespace file descriptor */
+ ns_fd = open("/proc/self/ns/cgroup", O_RDONLY);
+ if (ns_fd < 0) {
+ SKIP(free(handle); return, "cgroup namespace not available");
+ }
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Try to open using FD_NSFS_ROOT */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ ASSERT_GE(fd, 0);
+
+ /* Verify we opened the correct namespace */
+ ASSERT_EQ(fstat(ns_fd, &st1), 0);
+ ASSERT_EQ(fstat(fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ close(fd);
+ close(ns_fd);
+ free(handle);
+}
+
+TEST(nsfs_time_handle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ struct stat st1, st2;
+
+ /* Drop to unprivileged uid/gid */
+ ASSERT_EQ(setresgid(65534, 65534, 65534), 0); /* nogroup */
+ ASSERT_EQ(setresuid(65534, 65534, 65534), 0); /* nobody */
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open time namespace file descriptor */
+ ns_fd = open("/proc/self/ns/time", O_RDONLY);
+ if (ns_fd < 0) {
+ SKIP(free(handle); return, "time namespace not available");
+ }
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Try to open using FD_NSFS_ROOT */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle); close(ns_fd);
+ return,
+ "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ ASSERT_GE(fd, 0);
+
+ /* Verify we opened the correct namespace */
+ ASSERT_EQ(fstat(ns_fd, &st1), 0);
+ ASSERT_EQ(fstat(fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ close(fd);
+ close(ns_fd);
+ free(handle);
+}
+
+TEST(nsfs_user_net_namespace_isolation)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ pid_t pid;
+ int status;
+ int pipefd[2];
+ char result;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Create pipe for communication */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ /* Get handle for current network namespace */
+ ns_fd = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd); close(pipefd[0]);
+ close(pipefd[1]);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ close(ns_fd);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* First create new user namespace to drop privileges */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0) {
+ write(pipefd[1], "U",
+ 1); /* Unable to create user namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Write uid/gid mappings to maintain some capabilities */
+ int uid_map_fd = open("/proc/self/uid_map", O_WRONLY);
+ int gid_map_fd = open("/proc/self/gid_map", O_WRONLY);
+ int setgroups_fd = open("/proc/self/setgroups", O_WRONLY);
+
+ if (uid_map_fd < 0 || gid_map_fd < 0 || setgroups_fd < 0) {
+ write(pipefd[1], "M", 1); /* Unable to set mappings */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Disable setgroups to allow gid mapping */
+ write(setgroups_fd, "deny", 4);
+ close(setgroups_fd);
+
+ /* Map current uid/gid to root in the new namespace */
+ char mapping[64];
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
+ write(uid_map_fd, mapping, strlen(mapping));
+ close(uid_map_fd);
+
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
+ write(gid_map_fd, mapping, strlen(mapping));
+ close(gid_map_fd);
+
+ /* Now create new network namespace */
+ ret = unshare(CLONE_NEWNET);
+ if (ret < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to create network namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Try to open parent's network namespace handle from new user+net namespace */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+
+ if (fd >= 0) {
+ /* Should NOT succeed - we're in a different user namespace */
+ write(pipefd[1], "S", 1); /* Unexpected success */
+ close(fd);
+ } else if (errno == ESTALE) {
+ /* Expected: Stale file handle */
+ write(pipefd[1], "P", 1);
+ } else {
+ /* Other error */
+ write(pipefd[1], "F", 1);
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+ ASSERT_EQ(read(pipefd[0], &result, 1), 1);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ if (result == 'U') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new user namespace");
+ }
+ if (result == 'M') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot set uid/gid mappings");
+ }
+ if (result == 'N') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new network namespace");
+ }
+
+ /* Should fail with permission denied since we're in a different user namespace */
+ ASSERT_EQ(result, 'P');
+
+ close(pipefd[0]);
+ free(handle);
+}
+
+TEST(nsfs_user_uts_namespace_isolation)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ pid_t pid;
+ int status;
+ int pipefd[2];
+ char result;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Create pipe for communication */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ /* Get handle for current UTS namespace */
+ ns_fd = open("/proc/self/ns/uts", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd); close(pipefd[0]);
+ close(pipefd[1]);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ close(ns_fd);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* First create new user namespace to drop privileges */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0) {
+ write(pipefd[1], "U",
+ 1); /* Unable to create user namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Write uid/gid mappings to maintain some capabilities */
+ int uid_map_fd = open("/proc/self/uid_map", O_WRONLY);
+ int gid_map_fd = open("/proc/self/gid_map", O_WRONLY);
+ int setgroups_fd = open("/proc/self/setgroups", O_WRONLY);
+
+ if (uid_map_fd < 0 || gid_map_fd < 0 || setgroups_fd < 0) {
+ write(pipefd[1], "M", 1); /* Unable to set mappings */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Disable setgroups to allow gid mapping */
+ write(setgroups_fd, "deny", 4);
+ close(setgroups_fd);
+
+ /* Map current uid/gid to root in the new namespace */
+ char mapping[64];
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
+ write(uid_map_fd, mapping, strlen(mapping));
+ close(uid_map_fd);
+
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
+ write(gid_map_fd, mapping, strlen(mapping));
+ close(gid_map_fd);
+
+ /* Now create new UTS namespace */
+ ret = unshare(CLONE_NEWUTS);
+ if (ret < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to create UTS namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Try to open parent's UTS namespace handle from new user+uts namespace */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+
+ if (fd >= 0) {
+ /* Should NOT succeed - we're in a different user namespace */
+ write(pipefd[1], "S", 1); /* Unexpected success */
+ close(fd);
+ } else if (errno == ESTALE) {
+ /* Expected: Stale file handle */
+ write(pipefd[1], "P", 1);
+ } else {
+ /* Other error */
+ write(pipefd[1], "F", 1);
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+ ASSERT_EQ(read(pipefd[0], &result, 1), 1);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ if (result == 'U') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new user namespace");
+ }
+ if (result == 'M') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot set uid/gid mappings");
+ }
+ if (result == 'N') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new UTS namespace");
+ }
+
+ /* Should fail with ESTALE since we're in a different user namespace */
+ ASSERT_EQ(result, 'P');
+
+ close(pipefd[0]);
+ free(handle);
+}
+
+TEST(nsfs_user_ipc_namespace_isolation)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ pid_t pid;
+ int status;
+ int pipefd[2];
+ char result;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Create pipe for communication */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ /* Get handle for current IPC namespace */
+ ns_fd = open("/proc/self/ns/ipc", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd); close(pipefd[0]);
+ close(pipefd[1]);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ close(ns_fd);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* First create new user namespace to drop privileges */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0) {
+ write(pipefd[1], "U",
+ 1); /* Unable to create user namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Write uid/gid mappings to maintain some capabilities */
+ int uid_map_fd = open("/proc/self/uid_map", O_WRONLY);
+ int gid_map_fd = open("/proc/self/gid_map", O_WRONLY);
+ int setgroups_fd = open("/proc/self/setgroups", O_WRONLY);
+
+ if (uid_map_fd < 0 || gid_map_fd < 0 || setgroups_fd < 0) {
+ write(pipefd[1], "M", 1); /* Unable to set mappings */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Disable setgroups to allow gid mapping */
+ write(setgroups_fd, "deny", 4);
+ close(setgroups_fd);
+
+ /* Map current uid/gid to root in the new namespace */
+ char mapping[64];
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
+ write(uid_map_fd, mapping, strlen(mapping));
+ close(uid_map_fd);
+
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
+ write(gid_map_fd, mapping, strlen(mapping));
+ close(gid_map_fd);
+
+ /* Now create new IPC namespace */
+ ret = unshare(CLONE_NEWIPC);
+ if (ret < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to create IPC namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Try to open parent's IPC namespace handle from new user+ipc namespace */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+
+ if (fd >= 0) {
+ /* Should NOT succeed - we're in a different user namespace */
+ write(pipefd[1], "S", 1); /* Unexpected success */
+ close(fd);
+ } else if (errno == ESTALE) {
+ /* Expected: Stale file handle */
+ write(pipefd[1], "P", 1);
+ } else {
+ /* Other error */
+ write(pipefd[1], "F", 1);
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+ ASSERT_EQ(read(pipefd[0], &result, 1), 1);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ if (result == 'U') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new user namespace");
+ }
+ if (result == 'M') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot set uid/gid mappings");
+ }
+ if (result == 'N') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new IPC namespace");
+ }
+
+ /* Should fail with ESTALE since we're in a different user namespace */
+ ASSERT_EQ(result, 'P');
+
+ close(pipefd[0]);
+ free(handle);
+}
+
+TEST(nsfs_user_mnt_namespace_isolation)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ pid_t pid;
+ int status;
+ int pipefd[2];
+ char result;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Create pipe for communication */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ /* Get handle for current mount namespace */
+ ns_fd = open("/proc/self/ns/mnt", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd); close(pipefd[0]);
+ close(pipefd[1]);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ close(ns_fd);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* First create new user namespace to drop privileges */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0) {
+ write(pipefd[1], "U",
+ 1); /* Unable to create user namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Write uid/gid mappings to maintain some capabilities */
+ int uid_map_fd = open("/proc/self/uid_map", O_WRONLY);
+ int gid_map_fd = open("/proc/self/gid_map", O_WRONLY);
+ int setgroups_fd = open("/proc/self/setgroups", O_WRONLY);
+
+ if (uid_map_fd < 0 || gid_map_fd < 0 || setgroups_fd < 0) {
+ write(pipefd[1], "M", 1); /* Unable to set mappings */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Disable setgroups to allow gid mapping */
+ write(setgroups_fd, "deny", 4);
+ close(setgroups_fd);
+
+ /* Map current uid/gid to root in the new namespace */
+ char mapping[64];
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
+ write(uid_map_fd, mapping, strlen(mapping));
+ close(uid_map_fd);
+
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
+ write(gid_map_fd, mapping, strlen(mapping));
+ close(gid_map_fd);
+
+ /* Now create new mount namespace */
+ ret = unshare(CLONE_NEWNS);
+ if (ret < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to create mount namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Try to open parent's mount namespace handle from new user+mnt namespace */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+
+ if (fd >= 0) {
+ /* Should NOT succeed - we're in a different user namespace */
+ write(pipefd[1], "S", 1); /* Unexpected success */
+ close(fd);
+ } else if (errno == ESTALE) {
+ /* Expected: Stale file handle */
+ write(pipefd[1], "P", 1);
+ } else {
+ /* Other error */
+ write(pipefd[1], "F", 1);
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+ ASSERT_EQ(read(pipefd[0], &result, 1), 1);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ if (result == 'U') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new user namespace");
+ }
+ if (result == 'M') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot set uid/gid mappings");
+ }
+ if (result == 'N') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new mount namespace");
+ }
+
+ /* Should fail with ESTALE since we're in a different user namespace */
+ ASSERT_EQ(result, 'P');
+
+ close(pipefd[0]);
+ free(handle);
+}
+
+TEST(nsfs_user_cgroup_namespace_isolation)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ pid_t pid;
+ int status;
+ int pipefd[2];
+ char result;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Create pipe for communication */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ /* Get handle for current cgroup namespace */
+ ns_fd = open("/proc/self/ns/cgroup", O_RDONLY);
+ if (ns_fd < 0) {
+ SKIP(free(handle); close(pipefd[0]); close(pipefd[1]);
+ return, "cgroup namespace not available");
+ }
+
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd); close(pipefd[0]);
+ close(pipefd[1]);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ close(ns_fd);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* First create new user namespace to drop privileges */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0) {
+ write(pipefd[1], "U",
+ 1); /* Unable to create user namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Write uid/gid mappings to maintain some capabilities */
+ int uid_map_fd = open("/proc/self/uid_map", O_WRONLY);
+ int gid_map_fd = open("/proc/self/gid_map", O_WRONLY);
+ int setgroups_fd = open("/proc/self/setgroups", O_WRONLY);
+
+ if (uid_map_fd < 0 || gid_map_fd < 0 || setgroups_fd < 0) {
+ write(pipefd[1], "M", 1); /* Unable to set mappings */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Disable setgroups to allow gid mapping */
+ write(setgroups_fd, "deny", 4);
+ close(setgroups_fd);
+
+ /* Map current uid/gid to root in the new namespace */
+ char mapping[64];
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
+ write(uid_map_fd, mapping, strlen(mapping));
+ close(uid_map_fd);
+
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
+ write(gid_map_fd, mapping, strlen(mapping));
+ close(gid_map_fd);
+
+ /* Now create new cgroup namespace */
+ ret = unshare(CLONE_NEWCGROUP);
+ if (ret < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to create cgroup namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Try to open parent's cgroup namespace handle from new user+cgroup namespace */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+
+ if (fd >= 0) {
+ /* Should NOT succeed - we're in a different user namespace */
+ write(pipefd[1], "S", 1); /* Unexpected success */
+ close(fd);
+ } else if (errno == ESTALE) {
+ /* Expected: Stale file handle */
+ write(pipefd[1], "P", 1);
+ } else {
+ /* Other error */
+ write(pipefd[1], "F", 1);
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+ ASSERT_EQ(read(pipefd[0], &result, 1), 1);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ if (result == 'U') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new user namespace");
+ }
+ if (result == 'M') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot set uid/gid mappings");
+ }
+ if (result == 'N') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new cgroup namespace");
+ }
+
+ /* Should fail with ESTALE since we're in a different user namespace */
+ ASSERT_EQ(result, 'P');
+
+ close(pipefd[0]);
+ free(handle);
+}
+
+TEST(nsfs_user_pid_namespace_isolation)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ pid_t pid;
+ int status;
+ int pipefd[2];
+ char result;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Create pipe for communication */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ /* Get handle for current PID namespace */
+ ns_fd = open("/proc/self/ns/pid", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd); close(pipefd[0]);
+ close(pipefd[1]);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ close(ns_fd);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* First create new user namespace to drop privileges */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0) {
+ write(pipefd[1], "U",
+ 1); /* Unable to create user namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Write uid/gid mappings to maintain some capabilities */
+ int uid_map_fd = open("/proc/self/uid_map", O_WRONLY);
+ int gid_map_fd = open("/proc/self/gid_map", O_WRONLY);
+ int setgroups_fd = open("/proc/self/setgroups", O_WRONLY);
+
+ if (uid_map_fd < 0 || gid_map_fd < 0 || setgroups_fd < 0) {
+ write(pipefd[1], "M", 1); /* Unable to set mappings */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Disable setgroups to allow gid mapping */
+ write(setgroups_fd, "deny", 4);
+ close(setgroups_fd);
+
+ /* Map current uid/gid to root in the new namespace */
+ char mapping[64];
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
+ write(uid_map_fd, mapping, strlen(mapping));
+ close(uid_map_fd);
+
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
+ write(gid_map_fd, mapping, strlen(mapping));
+ close(gid_map_fd);
+
+ /* Now create new PID namespace - requires fork to take effect */
+ ret = unshare(CLONE_NEWPID);
+ if (ret < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to create PID namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Fork again for PID namespace to take effect */
+ pid_t child_pid = fork();
+ if (child_pid < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to fork in PID namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ if (child_pid == 0) {
+ /* Grandchild in new PID namespace */
+ /* Try to open parent's PID namespace handle from new user+pid namespace */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+
+ if (fd >= 0) {
+ /* Should NOT succeed - we're in a different user namespace */
+ write(pipefd[1], "S",
+ 1); /* Unexpected success */
+ close(fd);
+ } else if (errno == ESTALE) {
+ /* Expected: Stale file handle */
+ write(pipefd[1], "P", 1);
+ } else {
+ /* Other error */
+ write(pipefd[1], "F", 1);
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Wait for grandchild */
+ waitpid(child_pid, NULL, 0);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+ ASSERT_EQ(read(pipefd[0], &result, 1), 1);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ if (result == 'U') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new user namespace");
+ }
+ if (result == 'M') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot set uid/gid mappings");
+ }
+ if (result == 'N') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new PID namespace");
+ }
+
+ /* Should fail with ESTALE since we're in a different user namespace */
+ ASSERT_EQ(result, 'P');
+
+ close(pipefd[0]);
+ free(handle);
+}
+
+TEST(nsfs_user_time_namespace_isolation)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+ pid_t pid;
+ int status;
+ int pipefd[2];
+ char result;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Create pipe for communication */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ /* Get handle for current time namespace */
+ ns_fd = open("/proc/self/ns/time", O_RDONLY);
+ if (ns_fd < 0) {
+ SKIP(free(handle); close(pipefd[0]); close(pipefd[1]);
+ return, "time namespace not available");
+ }
+
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd); close(pipefd[0]);
+ close(pipefd[1]);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ close(ns_fd);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* First create new user namespace to drop privileges */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0) {
+ write(pipefd[1], "U",
+ 1); /* Unable to create user namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Write uid/gid mappings to maintain some capabilities */
+ int uid_map_fd = open("/proc/self/uid_map", O_WRONLY);
+ int gid_map_fd = open("/proc/self/gid_map", O_WRONLY);
+ int setgroups_fd = open("/proc/self/setgroups", O_WRONLY);
+
+ if (uid_map_fd < 0 || gid_map_fd < 0 || setgroups_fd < 0) {
+ write(pipefd[1], "M", 1); /* Unable to set mappings */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Disable setgroups to allow gid mapping */
+ write(setgroups_fd, "deny", 4);
+ close(setgroups_fd);
+
+ /* Map current uid/gid to root in the new namespace */
+ char mapping[64];
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
+ write(uid_map_fd, mapping, strlen(mapping));
+ close(uid_map_fd);
+
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
+ write(gid_map_fd, mapping, strlen(mapping));
+ close(gid_map_fd);
+
+ /* Now create new time namespace - requires fork to take effect */
+ ret = unshare(CLONE_NEWTIME);
+ if (ret < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to create time namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Fork again for time namespace to take effect */
+ pid_t child_pid = fork();
+ if (child_pid < 0) {
+ write(pipefd[1], "N",
+ 1); /* Unable to fork in time namespace */
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ if (child_pid == 0) {
+ /* Grandchild in new time namespace */
+ /* Try to open parent's time namespace handle from new user+time namespace */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+
+ if (fd >= 0) {
+ /* Should NOT succeed - we're in a different user namespace */
+ write(pipefd[1], "S",
+ 1); /* Unexpected success */
+ close(fd);
+ } else if (errno == ESTALE) {
+ /* Expected: Stale file handle */
+ write(pipefd[1], "P", 1);
+ } else {
+ /* Other error */
+ write(pipefd[1], "F", 1);
+ }
+
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Wait for grandchild */
+ waitpid(child_pid, NULL, 0);
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+ ASSERT_EQ(read(pipefd[0], &result, 1), 1);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ if (result == 'U') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new user namespace");
+ }
+ if (result == 'M') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot set uid/gid mappings");
+ }
+ if (result == 'N') {
+ SKIP(free(handle); close(pipefd[0]);
+ return, "Cannot create new time namespace");
+ }
+
+ /* Should fail with ESTALE since we're in a different user namespace */
+ ASSERT_EQ(result, 'P');
+
+ close(pipefd[0]);
+ free(handle);
+}
+
+TEST(nsfs_open_flags)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int ns_fd;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open a namespace file descriptor */
+ ns_fd = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(ns_fd, 0);
+
+ /* Get handle for the namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(ns_fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(ns_fd);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+ ASSERT_GT(handle->handle_bytes, 0);
+
+ /* Test invalid flags that should fail */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_WRONLY);
+ ASSERT_LT(fd, 0);
+ ASSERT_EQ(errno, EPERM);
+
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDWR);
+ ASSERT_LT(fd, 0);
+ ASSERT_EQ(errno, EPERM);
+
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_TRUNC);
+ ASSERT_LT(fd, 0);
+ ASSERT_EQ(errno, EPERM);
+
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_DIRECT);
+ ASSERT_LT(fd, 0);
+ ASSERT_EQ(errno, EINVAL);
+
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_TMPFILE);
+ ASSERT_LT(fd, 0);
+ ASSERT_EQ(errno, EINVAL);
+
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_DIRECTORY);
+ ASSERT_LT(fd, 0);
+ ASSERT_EQ(errno, ENOTDIR);
+
+ close(ns_fd);
+ free(handle);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/init_ino_test.c b/tools/testing/selftests/namespaces/init_ino_test.c
new file mode 100644
index 000000000000..e4394a2fa0a9
--- /dev/null
+++ b/tools/testing/selftests/namespaces/init_ino_test.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Christian Brauner <brauner@kernel.org>
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <linux/nsfs.h>
+
+#include "kselftest_harness.h"
+
+struct ns_info {
+ const char *name;
+ const char *proc_path;
+ unsigned int expected_ino;
+};
+
+static struct ns_info namespaces[] = {
+ { "ipc", "/proc/1/ns/ipc", IPC_NS_INIT_INO },
+ { "uts", "/proc/1/ns/uts", UTS_NS_INIT_INO },
+ { "user", "/proc/1/ns/user", USER_NS_INIT_INO },
+ { "pid", "/proc/1/ns/pid", PID_NS_INIT_INO },
+ { "cgroup", "/proc/1/ns/cgroup", CGROUP_NS_INIT_INO },
+ { "time", "/proc/1/ns/time", TIME_NS_INIT_INO },
+ { "net", "/proc/1/ns/net", NET_NS_INIT_INO },
+ { "mnt", "/proc/1/ns/mnt", MNT_NS_INIT_INO },
+};
+
+TEST(init_namespace_inodes)
+{
+ struct stat st;
+
+ for (int i = 0; i < sizeof(namespaces) / sizeof(namespaces[0]); i++) {
+ int ret = stat(namespaces[i].proc_path, &st);
+
+ /* Some namespaces might not be available (e.g., time namespace on older kernels) */
+ if (ret < 0) {
+ if (errno == ENOENT) {
+ ksft_test_result_skip("%s namespace not available\n",
+ namespaces[i].name);
+ continue;
+ }
+ ASSERT_GE(ret, 0)
+ TH_LOG("Failed to stat %s: %s",
+ namespaces[i].proc_path, strerror(errno));
+ }
+
+ ASSERT_EQ(st.st_ino, namespaces[i].expected_ino)
+ TH_LOG("Namespace %s has inode 0x%lx, expected 0x%x",
+ namespaces[i].name, st.st_ino, namespaces[i].expected_ino);
+
+ ksft_print_msg("Namespace %s: inode 0x%lx matches expected 0x%x\n",
+ namespaces[i].name, st.st_ino, namespaces[i].expected_ino);
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/listns_efault_test.c b/tools/testing/selftests/namespaces/listns_efault_test.c
new file mode 100644
index 000000000000..c7ed4023d7a8
--- /dev/null
+++ b/tools/testing/selftests/namespaces/listns_efault_test.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/nsfs.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "../kselftest_harness.h"
+#include "../filesystems/utils.h"
+#include "../pidfd/pidfd.h"
+#include "wrappers.h"
+
+/*
+ * Test listns() error handling with invalid buffer addresses.
+ *
+ * When the buffer pointer is invalid (e.g., crossing page boundaries
+ * into unmapped memory), listns() returns EINVAL.
+ *
+ * This test also creates mount namespaces that get destroyed during
+ * iteration, testing that namespace cleanup happens outside the RCU
+ * read lock.
+ */
+TEST(listns_partial_fault_with_ns_cleanup)
+{
+ void *map;
+ __u64 *ns_ids;
+ ssize_t ret;
+ long page_size;
+ pid_t pid, iter_pid;
+ int pidfds[5];
+ int sv[5][2];
+ int iter_pidfd;
+ int i, status;
+ char c;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ ASSERT_GT(page_size, 0);
+
+ /*
+ * Map two pages:
+ * - First page: readable and writable
+ * - Second page: will be unmapped to trigger EFAULT
+ */
+ map = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(map, MAP_FAILED);
+
+ /* Unmap the second page */
+ ret = munmap((char *)map + page_size, page_size);
+ ASSERT_EQ(ret, 0);
+
+ /*
+ * Position the buffer pointer so there's room for exactly one u64
+ * before the page boundary. The second u64 would fall into the
+ * unmapped page.
+ */
+ ns_ids = ((__u64 *)((char *)map + page_size)) - 1;
+
+ /*
+ * Create a separate process to run listns() in a loop concurrently
+ * with namespace creation and destruction.
+ */
+ iter_pid = create_child(&iter_pidfd, 0);
+ ASSERT_NE(iter_pid, -1);
+
+ if (iter_pid == 0) {
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0, /* All types */
+ .spare2 = 0,
+ .user_ns_id = 0, /* Global listing */
+ };
+ int iter_ret;
+
+ /*
+ * Loop calling listns() until killed.
+ * The kernel should:
+ * 1. Successfully write the first namespace ID (within valid page)
+ * 2. Fail with EFAULT when trying to write the second ID (unmapped page)
+ * 3. Handle concurrent namespace destruction without deadlock
+ */
+ while (1) {
+ iter_ret = sys_listns(&req, ns_ids, 2, 0);
+
+ if (iter_ret == -1 && errno == ENOSYS)
+ _exit(PIDFD_SKIP);
+ }
+ }
+
+ /* Small delay to let iterator start looping */
+ usleep(50000);
+
+ /*
+ * Create several child processes, each in its own mount namespace.
+ * These will be destroyed while the iterator is running listns().
+ */
+ for (i = 0; i < 5; i++) {
+ /* Create socketpair for synchronization */
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv[i]), 0);
+
+ pid = create_child(&pidfds[i], CLONE_NEWNS);
+ ASSERT_NE(pid, -1);
+
+ if (pid == 0) {
+ close(sv[i][0]); /* Close parent end */
+
+ if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0))
+ _exit(1);
+
+ /* Child: create a couple of tmpfs mounts */
+ if (mkdir("/tmp/test_mnt1", 0755) == -1 && errno != EEXIST)
+ _exit(1);
+ if (mkdir("/tmp/test_mnt2", 0755) == -1 && errno != EEXIST)
+ _exit(1);
+
+ if (mount("tmpfs", "/tmp/test_mnt1", "tmpfs", 0, NULL) == -1)
+ _exit(1);
+ if (mount("tmpfs", "/tmp/test_mnt2", "tmpfs", 0, NULL) == -1)
+ _exit(1);
+
+ /* Signal parent that setup is complete */
+ if (write_nointr(sv[i][1], "R", 1) != 1)
+ _exit(1);
+
+ /* Wait for parent to signal us to exit */
+ if (read_nointr(sv[i][1], &c, 1) != 1)
+ _exit(1);
+
+ close(sv[i][1]);
+ _exit(0);
+ }
+
+ close(sv[i][1]); /* Close child end */
+ }
+
+ /* Wait for all children to finish setup */
+ for (i = 0; i < 5; i++) {
+ ret = read_nointr(sv[i][0], &c, 1);
+ ASSERT_EQ(ret, 1);
+ ASSERT_EQ(c, 'R');
+ }
+
+ /*
+ * Signal children to exit. This will destroy their mount namespaces
+ * while listns() is iterating the namespace tree.
+ * This tests that cleanup happens outside the RCU read lock.
+ */
+ for (i = 0; i < 5; i++)
+ write_nointr(sv[i][0], "X", 1);
+
+ /* Wait for all mount namespace children to exit and cleanup */
+ for (i = 0; i < 5; i++) {
+ waitpid(-1, NULL, 0);
+ close(sv[i][0]);
+ close(pidfds[i]);
+ }
+
+ /* Kill iterator and wait for it */
+ sys_pidfd_send_signal(iter_pidfd, SIGKILL, NULL, 0);
+ ret = waitpid(iter_pid, &status, 0);
+ ASSERT_EQ(ret, iter_pid);
+ close(iter_pidfd);
+
+ /* Should have been killed */
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_EQ(WTERMSIG(status), SIGKILL);
+
+ /* Clean up */
+ munmap(map, page_size);
+}
+
+/*
+ * Test listns() error handling when the entire buffer is invalid.
+ * This is a sanity check that basic invalid pointer detection works.
+ */
+TEST(listns_complete_fault)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 *ns_ids;
+ ssize_t ret;
+
+ /* Use a clearly invalid pointer */
+ ns_ids = (__u64 *)0xdeadbeef;
+
+ ret = sys_listns(&req, ns_ids, 10, 0);
+
+ if (ret == -1 && errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+
+ /* Should fail with EFAULT */
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EFAULT);
+}
+
+/*
+ * Test listns() error handling when the buffer is NULL.
+ */
+TEST(listns_null_buffer)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ ssize_t ret;
+
+ /* NULL buffer with non-zero count should fail */
+ ret = sys_listns(&req, NULL, 10, 0);
+
+ if (ret == -1 && errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+
+ /* Should fail with EFAULT */
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EFAULT);
+}
+
+/*
+ * Test listns() with a buffer that becomes invalid mid-iteration
+ * (after several successful writes), combined with mount namespace
+ * destruction to test RCU cleanup logic.
+ */
+TEST(listns_late_fault_with_ns_cleanup)
+{
+ void *map;
+ __u64 *ns_ids;
+ ssize_t ret;
+ long page_size;
+ pid_t pid, iter_pid;
+ int pidfds[10];
+ int sv[10][2];
+ int iter_pidfd;
+ int i, status;
+ char c;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ ASSERT_GT(page_size, 0);
+
+ /* Map two pages */
+ map = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(map, MAP_FAILED);
+
+ /* Unmap the second page */
+ ret = munmap((char *)map + page_size, page_size);
+ ASSERT_EQ(ret, 0);
+
+ /*
+ * Position buffer so we can write several u64s successfully
+ * before hitting the page boundary.
+ */
+ ns_ids = ((__u64 *)((char *)map + page_size)) - 5;
+
+ /*
+ * Create a separate process to run listns() concurrently.
+ */
+ iter_pid = create_child(&iter_pidfd, 0);
+ ASSERT_NE(iter_pid, -1);
+
+ if (iter_pid == 0) {
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ int iter_ret;
+
+ /*
+ * Loop calling listns() until killed.
+ * Request 10 namespace IDs while namespaces are being destroyed.
+ * This tests:
+ * 1. EFAULT handling when buffer becomes invalid
+ * 2. Namespace cleanup outside RCU read lock during iteration
+ */
+ while (1) {
+ iter_ret = sys_listns(&req, ns_ids, 10, 0);
+
+ if (iter_ret == -1 && errno == ENOSYS)
+ _exit(PIDFD_SKIP);
+ }
+ }
+
+ /* Small delay to let iterator start looping */
+ usleep(50000);
+
+ /*
+ * Create more children with mount namespaces to increase the
+ * likelihood that namespace cleanup happens during iteration.
+ */
+ for (i = 0; i < 10; i++) {
+ /* Create socketpair for synchronization */
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv[i]), 0);
+
+ pid = create_child(&pidfds[i], CLONE_NEWNS);
+ ASSERT_NE(pid, -1);
+
+ if (pid == 0) {
+ close(sv[i][0]); /* Close parent end */
+
+ if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0))
+ _exit(1);
+
+ /* Child: create tmpfs mounts */
+ if (mkdir("/tmp/test_mnt1", 0755) == -1 && errno != EEXIST)
+ _exit(1);
+ if (mkdir("/tmp/test_mnt2", 0755) == -1 && errno != EEXIST)
+ _exit(1);
+
+ if (mount("tmpfs", "/tmp/test_mnt1", "tmpfs", 0, NULL) == -1)
+ _exit(1);
+ if (mount("tmpfs", "/tmp/test_mnt2", "tmpfs", 0, NULL) == -1)
+ _exit(1);
+
+ /* Signal parent that setup is complete */
+ if (write_nointr(sv[i][1], "R", 1) != 1)
+ _exit(1);
+
+ /* Wait for parent to signal us to exit */
+ if (read_nointr(sv[i][1], &c, 1) != 1)
+ _exit(1);
+
+ close(sv[i][1]);
+ _exit(0);
+ }
+
+ close(sv[i][1]); /* Close child end */
+ }
+
+ /* Wait for all children to finish setup */
+ for (i = 0; i < 10; i++) {
+ ret = read_nointr(sv[i][0], &c, 1);
+ ASSERT_EQ(ret, 1);
+ ASSERT_EQ(c, 'R');
+ }
+
+ /* Kill half the children */
+ for (i = 0; i < 5; i++)
+ write_nointr(sv[i][0], "X", 1);
+
+ /* Small delay to let some exit */
+ usleep(10000);
+
+ /* Kill remaining children */
+ for (i = 5; i < 10; i++)
+ write_nointr(sv[i][0], "X", 1);
+
+ /* Wait for all children and cleanup */
+ for (i = 0; i < 10; i++) {
+ waitpid(-1, NULL, 0);
+ close(sv[i][0]);
+ close(pidfds[i]);
+ }
+
+ /* Kill iterator and wait for it */
+ sys_pidfd_send_signal(iter_pidfd, SIGKILL, NULL, 0);
+ ret = waitpid(iter_pid, &status, 0);
+ ASSERT_EQ(ret, iter_pid);
+ close(iter_pidfd);
+
+ /* Should have been killed */
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_EQ(WTERMSIG(status), SIGKILL);
+
+ /* Clean up */
+ munmap(map, page_size);
+}
+
+/*
+ * Test specifically focused on mount namespace cleanup during EFAULT.
+ * Filter for mount namespaces only.
+ */
+TEST(listns_mnt_ns_cleanup_on_fault)
+{
+ void *map;
+ __u64 *ns_ids;
+ ssize_t ret;
+ long page_size;
+ pid_t pid, iter_pid;
+ int pidfds[8];
+ int sv[8][2];
+ int iter_pidfd;
+ int i, status;
+ char c;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ ASSERT_GT(page_size, 0);
+
+ /* Set up partial fault buffer */
+ map = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(map, MAP_FAILED);
+
+ ret = munmap((char *)map + page_size, page_size);
+ ASSERT_EQ(ret, 0);
+
+ /* Position for 3 successful writes, then fault */
+ ns_ids = ((__u64 *)((char *)map + page_size)) - 3;
+
+ /*
+ * Create a separate process to run listns() concurrently.
+ */
+ iter_pid = create_child(&iter_pidfd, 0);
+ ASSERT_NE(iter_pid, -1);
+
+ if (iter_pid == 0) {
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNS, /* Only mount namespaces */
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ int iter_ret;
+
+ /*
+ * Loop calling listns() until killed.
+ * Call listns() to race with namespace destruction.
+ */
+ while (1) {
+ iter_ret = sys_listns(&req, ns_ids, 10, 0);
+
+ if (iter_ret == -1 && errno == ENOSYS)
+ _exit(PIDFD_SKIP);
+ }
+ }
+
+ /* Small delay to let iterator start looping */
+ usleep(50000);
+
+ /* Create children with mount namespaces */
+ for (i = 0; i < 8; i++) {
+ /* Create socketpair for synchronization */
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv[i]), 0);
+
+ pid = create_child(&pidfds[i], CLONE_NEWNS);
+ ASSERT_NE(pid, -1);
+
+ if (pid == 0) {
+ close(sv[i][0]); /* Close parent end */
+
+ if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0))
+ _exit(1);
+
+ /* Do some mount operations to make cleanup more interesting */
+ if (mkdir("/tmp/test_mnt1", 0755) == -1 && errno != EEXIST)
+ _exit(1);
+ if (mkdir("/tmp/test_mnt2", 0755) == -1 && errno != EEXIST)
+ _exit(1);
+
+ if (mount("tmpfs", "/tmp/test_mnt1", "tmpfs", 0, NULL) == -1)
+ _exit(1);
+ if (mount("tmpfs", "/tmp/test_mnt2", "tmpfs", 0, NULL) == -1)
+ _exit(1);
+
+ /* Signal parent that setup is complete */
+ if (write_nointr(sv[i][1], "R", 1) != 1)
+ _exit(1);
+
+ /* Wait for parent to signal us to exit */
+ if (read_nointr(sv[i][1], &c, 1) != 1)
+ _exit(1);
+
+ close(sv[i][1]);
+ _exit(0);
+ }
+
+ close(sv[i][1]); /* Close child end */
+ }
+
+ /* Wait for all children to finish setup */
+ for (i = 0; i < 8; i++) {
+ ret = read_nointr(sv[i][0], &c, 1);
+ ASSERT_EQ(ret, 1);
+ ASSERT_EQ(c, 'R');
+ }
+
+ /* Kill children to trigger namespace destruction during iteration */
+ for (i = 0; i < 8; i++)
+ write_nointr(sv[i][0], "X", 1);
+
+ /* Wait for children and cleanup */
+ for (i = 0; i < 8; i++) {
+ waitpid(-1, NULL, 0);
+ close(sv[i][0]);
+ close(pidfds[i]);
+ }
+
+ /* Kill iterator and wait for it */
+ sys_pidfd_send_signal(iter_pidfd, SIGKILL, NULL, 0);
+ ret = waitpid(iter_pid, &status, 0);
+ ASSERT_EQ(ret, iter_pid);
+ close(iter_pidfd);
+
+ /* Should have been killed */
+ ASSERT_TRUE(WIFSIGNALED(status));
+ ASSERT_EQ(WTERMSIG(status), SIGKILL);
+
+ munmap(map, page_size);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/listns_pagination_bug.c b/tools/testing/selftests/namespaces/listns_pagination_bug.c
new file mode 100644
index 000000000000..da7d33f96397
--- /dev/null
+++ b/tools/testing/selftests/namespaces/listns_pagination_bug.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "../kselftest_harness.h"
+#include "../filesystems/utils.h"
+#include "wrappers.h"
+
+/*
+ * Minimal test case to reproduce KASAN out-of-bounds in listns pagination.
+ *
+ * The bug occurs when:
+ * 1. Filtering by a specific namespace type (e.g., CLONE_NEWUSER)
+ * 2. Using pagination (req.ns_id != 0)
+ * 3. The lookup_ns_id_at() call in do_listns() passes ns_type=0 instead of
+ * the filtered type, causing it to search the unified tree and potentially
+ * return a namespace of the wrong type.
+ */
+TEST(pagination_with_type_filter)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER, /* Filter by user namespace */
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ pid_t pids[10];
+ int num_children = 10;
+ int i;
+ int sv[2];
+ __u64 first_batch[3];
+ ssize_t ret;
+
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv), 0);
+
+ /* Create children with user namespaces */
+ for (i = 0; i < num_children; i++) {
+ pids[i] = fork();
+ ASSERT_GE(pids[i], 0);
+
+ if (pids[i] == 0) {
+ char c;
+ close(sv[0]);
+
+ if (setup_userns() < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Signal parent we're ready */
+ if (write(sv[1], &c, 1) != 1) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Wait for parent signal to exit */
+ if (read(sv[1], &c, 1) != 1) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ close(sv[1]);
+ exit(0);
+ }
+ }
+
+ close(sv[1]);
+
+ /* Wait for all children to signal ready */
+ for (i = 0; i < num_children; i++) {
+ char c;
+ if (read(sv[0], &c, 1) != 1) {
+ close(sv[0]);
+ for (int j = 0; j < num_children; j++)
+ kill(pids[j], SIGKILL);
+ for (int j = 0; j < num_children; j++)
+ waitpid(pids[j], NULL, 0);
+ ASSERT_TRUE(false);
+ }
+ }
+
+ /* First batch - this should work */
+ ret = sys_listns(&req, first_batch, 3, 0);
+ if (ret < 0) {
+ if (errno == ENOSYS) {
+ close(sv[0]);
+ for (i = 0; i < num_children; i++)
+ kill(pids[i], SIGKILL);
+ for (i = 0; i < num_children; i++)
+ waitpid(pids[i], NULL, 0);
+ SKIP(return, "listns() not supported");
+ }
+ ASSERT_GE(ret, 0);
+ }
+
+ TH_LOG("First batch returned %zd entries", ret);
+
+ if (ret == 3) {
+ __u64 second_batch[3];
+
+ /* Second batch - pagination triggers the bug */
+ req.ns_id = first_batch[2]; /* Continue from last ID */
+ ret = sys_listns(&req, second_batch, 3, 0);
+
+ TH_LOG("Second batch returned %zd entries", ret);
+ ASSERT_GE(ret, 0);
+ }
+
+ /* Signal all children to exit */
+ for (i = 0; i < num_children; i++) {
+ char c = 'X';
+ if (write(sv[0], &c, 1) != 1) {
+ close(sv[0]);
+ for (int j = i; j < num_children; j++)
+ kill(pids[j], SIGKILL);
+ for (int j = 0; j < num_children; j++)
+ waitpid(pids[j], NULL, 0);
+ ASSERT_TRUE(false);
+ }
+ }
+
+ close(sv[0]);
+
+ /* Cleanup */
+ for (i = 0; i < num_children; i++) {
+ int status;
+ waitpid(pids[i], &status, 0);
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/listns_permissions_test.c b/tools/testing/selftests/namespaces/listns_permissions_test.c
new file mode 100644
index 000000000000..82d818751a5f
--- /dev/null
+++ b/tools/testing/selftests/namespaces/listns_permissions_test.c
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/nsfs.h>
+#include <sys/capability.h>
+#include <sys/ioctl.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "../kselftest_harness.h"
+#include "../filesystems/utils.h"
+#include "wrappers.h"
+
+/*
+ * Test that unprivileged users can only see namespaces they're currently in.
+ * Create a namespace, drop privileges, verify we can only see our own namespaces.
+ */
+TEST(listns_unprivileged_current_only)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[100];
+ ssize_t ret;
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ bool found_ours;
+ int unexpected_count;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int fd;
+ __u64 our_netns_id;
+ bool found_ours;
+ int unexpected_count;
+
+ close(pipefd[0]);
+
+ /* Create user namespace to be unprivileged */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Create a network namespace */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Get our network namespace ID */
+ fd = open("/proc/self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &our_netns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Now we're unprivileged - list all network namespaces */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* We should only see our own network namespace */
+ found_ours = false;
+ unexpected_count = 0;
+
+ for (ssize_t i = 0; i < ret; i++) {
+ if (ns_ids[i] == our_netns_id) {
+ found_ours = true;
+ } else {
+ /* This is either init_net (which we can see) or unexpected */
+ unexpected_count++;
+ }
+ }
+
+ /* Send results to parent */
+ write(pipefd[1], &found_ours, sizeof(found_ours));
+ write(pipefd[1], &unexpected_count, sizeof(unexpected_count));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+
+ found_ours = false;
+ unexpected_count = 0;
+ read(pipefd[0], &found_ours, sizeof(found_ours));
+ read(pipefd[0], &unexpected_count, sizeof(unexpected_count));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Child should have seen its own namespace */
+ ASSERT_TRUE(found_ours);
+
+ TH_LOG("Unprivileged child saw its own namespace, plus %d others (likely init_net)",
+ unexpected_count);
+}
+
+/*
+ * Test that users with CAP_SYS_ADMIN in a user namespace can see
+ * all namespaces owned by that user namespace.
+ */
+TEST(listns_cap_sys_admin_in_userns)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0, /* All types */
+ .spare2 = 0,
+ .user_ns_id = 0, /* Will be set to our created user namespace */
+ };
+ __u64 ns_ids[100];
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ bool success;
+ ssize_t count;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int fd;
+ __u64 userns_id;
+ ssize_t ret;
+ int min_expected;
+ bool success;
+
+ close(pipefd[0]);
+
+ /* Create user namespace - we'll have CAP_SYS_ADMIN in it */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Get the user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Create several namespaces owned by this user namespace */
+ unshare(CLONE_NEWNET);
+ unshare(CLONE_NEWUTS);
+ unshare(CLONE_NEWIPC);
+
+ /* List namespaces owned by our user namespace */
+ req.user_ns_id = userns_id;
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /*
+ * We have CAP_SYS_ADMIN in this user namespace,
+ * so we should see all namespaces owned by it.
+ * That includes: net, uts, ipc, and the user namespace itself.
+ */
+ min_expected = 4;
+ success = (ret >= min_expected);
+
+ write(pipefd[1], &success, sizeof(success));
+ write(pipefd[1], &ret, sizeof(ret));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+
+ success = false;
+ count = 0;
+ read(pipefd[0], &success, sizeof(success));
+ read(pipefd[0], &count, sizeof(count));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_TRUE(success);
+ TH_LOG("User with CAP_SYS_ADMIN saw %zd namespaces owned by their user namespace",
+ count);
+}
+
+/*
+ * Test that users cannot see namespaces from unrelated user namespaces.
+ * Create two sibling user namespaces, verify they can't see each other's
+ * owned namespaces.
+ */
+TEST(listns_cannot_see_sibling_userns_namespaces)
+{
+ int pipefd[2];
+ pid_t pid1, pid2;
+ int status;
+ __u64 netns_a_id;
+ int pipefd2[2];
+ bool found_sibling_netns;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ /* Fork first child - creates user namespace A */
+ pid1 = fork();
+ ASSERT_GE(pid1, 0);
+
+ if (pid1 == 0) {
+ int fd;
+ __u64 netns_a_id;
+ char buf;
+
+ close(pipefd[0]);
+
+ /* Create user namespace A */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Create network namespace owned by user namespace A */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Get network namespace ID */
+ fd = open("/proc/self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &netns_a_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Send namespace ID to parent */
+ write(pipefd[1], &netns_a_id, sizeof(netns_a_id));
+
+ /* Keep alive for sibling to check */
+ read(pipefd[1], &buf, 1);
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent reads namespace A ID */
+ close(pipefd[1]);
+ netns_a_id = 0;
+ read(pipefd[0], &netns_a_id, sizeof(netns_a_id));
+
+ TH_LOG("User namespace A created network namespace with ID %llu",
+ (unsigned long long)netns_a_id);
+
+ /* Fork second child - creates user namespace B */
+ ASSERT_EQ(pipe(pipefd2), 0);
+
+ pid2 = fork();
+ ASSERT_GE(pid2, 0);
+
+ if (pid2 == 0) {
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[100];
+ ssize_t ret;
+ bool found_sibling_netns;
+
+ close(pipefd[0]);
+ close(pipefd2[0]);
+
+ /* Create user namespace B (sibling to A) */
+ if (setup_userns() < 0) {
+ close(pipefd2[1]);
+ exit(1);
+ }
+
+ /* Try to list all network namespaces */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+
+ found_sibling_netns = false;
+ if (ret > 0) {
+ for (ssize_t i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_a_id) {
+ found_sibling_netns = true;
+ break;
+ }
+ }
+ }
+
+ /* We should NOT see the sibling's network namespace */
+ write(pipefd2[1], &found_sibling_netns, sizeof(found_sibling_netns));
+ close(pipefd2[1]);
+ exit(0);
+ }
+
+ /* Parent reads result from second child */
+ close(pipefd2[1]);
+ found_sibling_netns = false;
+ read(pipefd2[0], &found_sibling_netns, sizeof(found_sibling_netns));
+ close(pipefd2[0]);
+
+ /* Signal first child to exit */
+ close(pipefd[0]);
+
+ /* Wait for both children */
+ waitpid(pid2, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+
+ waitpid(pid1, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+
+ /* Second child should NOT have seen first child's namespace */
+ ASSERT_FALSE(found_sibling_netns);
+ TH_LOG("User namespace B correctly could not see sibling namespace A's network namespace");
+}
+
+/*
+ * Test permission checking with LISTNS_CURRENT_USER.
+ * Verify that listing with LISTNS_CURRENT_USER respects permissions.
+ */
+TEST(listns_current_user_permissions)
+{
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ bool success;
+ ssize_t count;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = LISTNS_CURRENT_USER,
+ };
+ __u64 ns_ids[100];
+ ssize_t ret;
+ bool success;
+
+ close(pipefd[0]);
+
+ /* Create user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Create some namespaces owned by this user namespace */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (unshare(CLONE_NEWUTS) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* List with LISTNS_CURRENT_USER - should see our owned namespaces */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+
+ success = (ret >= 3); /* At least user, net, uts */
+ write(pipefd[1], &success, sizeof(success));
+ write(pipefd[1], &ret, sizeof(ret));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+
+ success = false;
+ count = 0;
+ read(pipefd[0], &success, sizeof(success));
+ read(pipefd[0], &count, sizeof(count));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_TRUE(success);
+ TH_LOG("LISTNS_CURRENT_USER returned %zd namespaces", count);
+}
+
+/*
+ * Test that CAP_SYS_ADMIN in parent user namespace allows seeing
+ * child user namespace's owned namespaces.
+ */
+TEST(listns_parent_userns_cap_sys_admin)
+{
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ bool found_child_userns;
+ ssize_t count;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int fd;
+ __u64 parent_userns_id;
+ __u64 child_userns_id;
+ struct ns_id_req req;
+ __u64 ns_ids[100];
+ ssize_t ret;
+ bool found_child_userns;
+
+ close(pipefd[0]);
+
+ /* Create parent user namespace - we have CAP_SYS_ADMIN in it */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Get parent user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &parent_userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Create child user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Get child user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &child_userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Create namespaces owned by child user namespace */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* List namespaces owned by parent user namespace */
+ req.size = sizeof(req);
+ req.spare = 0;
+ req.ns_id = 0;
+ req.ns_type = 0;
+ req.spare2 = 0;
+ req.user_ns_id = parent_userns_id;
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+
+ /* Should see child user namespace in the list */
+ found_child_userns = false;
+ if (ret > 0) {
+ for (ssize_t i = 0; i < ret; i++) {
+ if (ns_ids[i] == child_userns_id) {
+ found_child_userns = true;
+ break;
+ }
+ }
+ }
+
+ write(pipefd[1], &found_child_userns, sizeof(found_child_userns));
+ write(pipefd[1], &ret, sizeof(ret));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+
+ found_child_userns = false;
+ count = 0;
+ read(pipefd[0], &found_child_userns, sizeof(found_child_userns));
+ read(pipefd[0], &count, sizeof(count));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_TRUE(found_child_userns);
+ TH_LOG("Process with CAP_SYS_ADMIN in parent user namespace saw child user namespace (total: %zd)",
+ count);
+}
+
+/*
+ * Test that we can see user namespaces we have CAP_SYS_ADMIN inside of.
+ * This is different from seeing namespaces owned by a user namespace.
+ */
+TEST(listns_cap_sys_admin_inside_userns)
+{
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ bool found_ours;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int fd;
+ __u64 our_userns_id;
+ struct ns_id_req req;
+ __u64 ns_ids[100];
+ ssize_t ret;
+ bool found_ours;
+
+ close(pipefd[0]);
+
+ /* Create user namespace - we have CAP_SYS_ADMIN inside it */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Get our user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &our_userns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* List all user namespaces globally */
+ req.size = sizeof(req);
+ req.spare = 0;
+ req.ns_id = 0;
+ req.ns_type = CLONE_NEWUSER;
+ req.spare2 = 0;
+ req.user_ns_id = 0;
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+
+ /* We should be able to see our own user namespace */
+ found_ours = false;
+ if (ret > 0) {
+ for (ssize_t i = 0; i < ret; i++) {
+ if (ns_ids[i] == our_userns_id) {
+ found_ours = true;
+ break;
+ }
+ }
+ }
+
+ write(pipefd[1], &found_ours, sizeof(found_ours));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+
+ found_ours = false;
+ read(pipefd[0], &found_ours, sizeof(found_ours));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_TRUE(found_ours);
+ TH_LOG("Process can see user namespace it has CAP_SYS_ADMIN inside of");
+}
+
+/*
+ * Test that dropping CAP_SYS_ADMIN restricts what we can see.
+ */
+TEST(listns_drop_cap_sys_admin)
+{
+ cap_t caps;
+ cap_value_t cap_list[1] = { CAP_SYS_ADMIN };
+
+ /* This test needs to start with CAP_SYS_ADMIN */
+ caps = cap_get_proc();
+ if (!caps) {
+ SKIP(return, "Cannot get capabilities");
+ }
+
+ cap_flag_value_t cap_val;
+ if (cap_get_flag(caps, CAP_SYS_ADMIN, CAP_EFFECTIVE, &cap_val) < 0) {
+ cap_free(caps);
+ SKIP(return, "Cannot check CAP_SYS_ADMIN");
+ }
+
+ if (cap_val != CAP_SET) {
+ cap_free(caps);
+ SKIP(return, "Test needs CAP_SYS_ADMIN to start");
+ }
+ cap_free(caps);
+
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ bool correct;
+ ssize_t count_before, count_after;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET,
+ .spare2 = 0,
+ .user_ns_id = LISTNS_CURRENT_USER,
+ };
+ __u64 ns_ids_before[100];
+ ssize_t count_before;
+ __u64 ns_ids_after[100];
+ ssize_t count_after;
+ bool correct;
+
+ close(pipefd[0]);
+
+ /* Create user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Count namespaces with CAP_SYS_ADMIN */
+ count_before = sys_listns(&req, ns_ids_before, ARRAY_SIZE(ns_ids_before), 0);
+
+ /* Drop CAP_SYS_ADMIN */
+ caps = cap_get_proc();
+ if (caps) {
+ cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR);
+ cap_set_flag(caps, CAP_PERMITTED, 1, cap_list, CAP_CLEAR);
+ cap_set_proc(caps);
+ cap_free(caps);
+ }
+
+ /* Ensure we can't regain the capability */
+ prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+
+ /* Count namespaces without CAP_SYS_ADMIN */
+ count_after = sys_listns(&req, ns_ids_after, ARRAY_SIZE(ns_ids_after), 0);
+
+ /* Without CAP_SYS_ADMIN, we should see same or fewer namespaces */
+ correct = (count_after <= count_before);
+
+ write(pipefd[1], &correct, sizeof(correct));
+ write(pipefd[1], &count_before, sizeof(count_before));
+ write(pipefd[1], &count_after, sizeof(count_after));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+
+ correct = false;
+ count_before = 0;
+ count_after = 0;
+ read(pipefd[0], &correct, sizeof(correct));
+ read(pipefd[0], &count_before, sizeof(count_before));
+ read(pipefd[0], &count_after, sizeof(count_after));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_TRUE(correct);
+ TH_LOG("With CAP_SYS_ADMIN: %zd namespaces, without: %zd namespaces",
+ count_before, count_after);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/listns_test.c b/tools/testing/selftests/namespaces/listns_test.c
new file mode 100644
index 000000000000..8a95789d6a87
--- /dev/null
+++ b/tools/testing/selftests/namespaces/listns_test.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/nsfs.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "../kselftest_harness.h"
+#include "../filesystems/utils.h"
+#include "wrappers.h"
+
+/*
+ * Test basic listns() functionality with the unified namespace tree.
+ * List all active namespaces globally.
+ */
+TEST(listns_basic_unified)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0, /* All types */
+ .spare2 = 0,
+ .user_ns_id = 0, /* Global listing */
+ };
+ __u64 ns_ids[100];
+ ssize_t ret;
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(false);
+ }
+
+ /* Should find at least the initial namespaces */
+ ASSERT_GT(ret, 0);
+ TH_LOG("Found %zd active namespaces", ret);
+
+ /* Verify all returned IDs are non-zero */
+ for (ssize_t i = 0; i < ret; i++) {
+ ASSERT_NE(ns_ids[i], 0);
+ TH_LOG(" [%zd] ns_id: %llu", i, (unsigned long long)ns_ids[i]);
+ }
+}
+
+/*
+ * Test listns() with type filtering.
+ * List only network namespaces.
+ */
+TEST(listns_filter_by_type)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET, /* Only network namespaces */
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[100];
+ ssize_t ret;
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(false);
+ }
+ ASSERT_GE(ret, 0);
+
+ /* Should find at least init_net */
+ ASSERT_GT(ret, 0);
+ TH_LOG("Found %zd active network namespaces", ret);
+
+ /* Verify we can open each namespace and it's actually a network namespace */
+ for (ssize_t i = 0; i < ret && i < 5; i++) {
+ struct nsfs_file_handle nsfh = {
+ .ns_id = ns_ids[i],
+ .ns_type = CLONE_NEWNET,
+ .ns_inum = 0,
+ };
+ struct file_handle *fh;
+ int fd;
+
+ fh = (struct file_handle *)malloc(sizeof(*fh) + sizeof(nsfh));
+ ASSERT_NE(fh, NULL);
+ fh->handle_bytes = sizeof(nsfh);
+ fh->handle_type = 0;
+ memcpy(fh->f_handle, &nsfh, sizeof(nsfh));
+
+ fd = open_by_handle_at(-10003, fh, O_RDONLY);
+ free(fh);
+
+ if (fd >= 0) {
+ int ns_type;
+ /* Verify it's a network namespace via ioctl */
+ ns_type = ioctl(fd, NS_GET_NSTYPE);
+ if (ns_type >= 0) {
+ ASSERT_EQ(ns_type, CLONE_NEWNET);
+ }
+ close(fd);
+ }
+ }
+}
+
+/*
+ * Test listns() pagination.
+ * List namespaces in batches.
+ */
+TEST(listns_pagination)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 batch1[2], batch2[2];
+ ssize_t ret1, ret2;
+
+ /* Get first batch */
+ ret1 = sys_listns(&req, batch1, ARRAY_SIZE(batch1), 0);
+ if (ret1 < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(false);
+ }
+ ASSERT_GE(ret1, 0);
+
+ if (ret1 == 0)
+ SKIP(return, "No namespaces found");
+
+ TH_LOG("First batch: %zd namespaces", ret1);
+
+ /* Get second batch using last ID from first batch */
+ if (ret1 == ARRAY_SIZE(batch1)) {
+ req.ns_id = batch1[ret1 - 1];
+ ret2 = sys_listns(&req, batch2, ARRAY_SIZE(batch2), 0);
+ ASSERT_GE(ret2, 0);
+
+ TH_LOG("Second batch: %zd namespaces (after ns_id=%llu)",
+ ret2, (unsigned long long)req.ns_id);
+
+ /* If we got more results, verify IDs are monotonically increasing */
+ if (ret2 > 0) {
+ ASSERT_GT(batch2[0], batch1[ret1 - 1]);
+ TH_LOG("Pagination working: %llu > %llu",
+ (unsigned long long)batch2[0],
+ (unsigned long long)batch1[ret1 - 1]);
+ }
+ } else {
+ TH_LOG("All namespaces fit in first batch");
+ }
+}
+
+/*
+ * Test listns() with LISTNS_CURRENT_USER.
+ * List namespaces owned by current user namespace.
+ */
+TEST(listns_current_user)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = LISTNS_CURRENT_USER,
+ };
+ __u64 ns_ids[100];
+ ssize_t ret;
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(false);
+ }
+ ASSERT_GE(ret, 0);
+
+ /* Should find at least the initial namespaces if we're in init_user_ns */
+ TH_LOG("Found %zd namespaces owned by current user namespace", ret);
+
+ for (ssize_t i = 0; i < ret; i++)
+ TH_LOG(" [%zd] ns_id: %llu", i, (unsigned long long)ns_ids[i]);
+}
+
+/*
+ * Test that listns() only returns active namespaces.
+ * Create a namespace, let it become inactive, verify it's not listed.
+ */
+TEST(listns_only_active)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids_before[100], ns_ids_after[100];
+ ssize_t ret_before, ret_after;
+ int pipefd[2];
+ pid_t pid;
+ __u64 new_ns_id = 0;
+ int status;
+
+ /* Get initial list */
+ ret_before = sys_listns(&req, ns_ids_before, ARRAY_SIZE(ns_ids_before), 0);
+ if (ret_before < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(false);
+ }
+ ASSERT_GE(ret_before, 0);
+
+ TH_LOG("Before: %zd active network namespaces", ret_before);
+
+ /* Create a new namespace in a child process and get its ID */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int fd;
+ __u64 ns_id;
+
+ close(pipefd[0]);
+
+ /* Create new network namespace */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Get its ID */
+ fd = open("/proc/self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &ns_id) < 0) {
+ close(fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Send ID to parent */
+ write(pipefd[1], &ns_id, sizeof(ns_id));
+ close(pipefd[1]);
+
+ /* Keep namespace active briefly */
+ usleep(100000);
+ exit(0);
+ }
+
+ /* Parent reads the new namespace ID */
+ {
+ int bytes;
+
+ close(pipefd[1]);
+ bytes = read(pipefd[0], &new_ns_id, sizeof(new_ns_id));
+ close(pipefd[0]);
+
+ if (bytes == sizeof(new_ns_id)) {
+ __u64 ns_ids_during[100];
+ int ret_during;
+
+ TH_LOG("Child created namespace with ID %llu", (unsigned long long)new_ns_id);
+
+ /* List namespaces while child is still alive - should see new one */
+ ret_during = sys_listns(&req, ns_ids_during, ARRAY_SIZE(ns_ids_during), 0);
+ ASSERT_GE(ret_during, 0);
+ TH_LOG("During: %d active network namespaces", ret_during);
+
+ /* Should have more namespaces than before */
+ ASSERT_GE(ret_during, ret_before);
+ }
+ }
+
+ /* Wait for child to exit */
+ waitpid(pid, &status, 0);
+
+ /* Give time for namespace to become inactive */
+ usleep(100000);
+
+ /* List namespaces after child exits - should not see new one */
+ ret_after = sys_listns(&req, ns_ids_after, ARRAY_SIZE(ns_ids_after), 0);
+ ASSERT_GE(ret_after, 0);
+ TH_LOG("After: %zd active network namespaces", ret_after);
+
+ /* Verify the new namespace ID is not in the after list */
+ if (new_ns_id != 0) {
+ bool found = false;
+
+ for (ssize_t i = 0; i < ret_after; i++) {
+ if (ns_ids_after[i] == new_ns_id) {
+ found = true;
+ break;
+ }
+ }
+ ASSERT_FALSE(found);
+ }
+}
+
+/*
+ * Test listns() with specific user namespace ID.
+ * Create a user namespace and list namespaces it owns.
+ */
+TEST(listns_specific_userns)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = 0, /* Will be filled with created userns ID */
+ };
+ __u64 ns_ids[100];
+ int sv[2];
+ pid_t pid;
+ int status;
+ __u64 user_ns_id = 0;
+ int bytes;
+ ssize_t ret;
+
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int fd;
+ __u64 ns_id;
+ char buf;
+
+ close(sv[0]);
+
+ /* Create new user namespace */
+ if (setup_userns() < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Get user namespace ID */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &ns_id) < 0) {
+ close(fd);
+ close(sv[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Send ID to parent */
+ if (write(sv[1], &ns_id, sizeof(ns_id)) != sizeof(ns_id)) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Create some namespaces owned by this user namespace */
+ unshare(CLONE_NEWNET);
+ unshare(CLONE_NEWUTS);
+
+ /* Wait for parent signal */
+ if (read(sv[1], &buf, 1) != 1) {
+ close(sv[1]);
+ exit(1);
+ }
+ close(sv[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(sv[1]);
+ bytes = read(sv[0], &user_ns_id, sizeof(user_ns_id));
+
+ if (bytes != sizeof(user_ns_id)) {
+ close(sv[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get user namespace ID from child");
+ }
+
+ TH_LOG("Child created user namespace with ID %llu", (unsigned long long)user_ns_id);
+
+ /* List namespaces owned by this user namespace */
+ req.user_ns_id = user_ns_id;
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+
+ if (ret < 0) {
+ TH_LOG("listns failed: %s (errno=%d)", strerror(errno), errno);
+ close(sv[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ if (errno == ENOSYS) {
+ SKIP(return, "listns() not supported");
+ }
+ ASSERT_GE(ret, 0);
+ }
+
+ TH_LOG("Found %zd namespaces owned by user namespace %llu", ret,
+ (unsigned long long)user_ns_id);
+
+ /* Should find at least the network and UTS namespaces we created */
+ if (ret > 0) {
+ for (ssize_t i = 0; i < ret && i < 10; i++)
+ TH_LOG(" [%zd] ns_id: %llu", i, (unsigned long long)ns_ids[i]);
+ }
+
+ /* Signal child to exit */
+ if (write(sv[0], "X", 1) != 1) {
+ close(sv[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ ASSERT_TRUE(false);
+ }
+ close(sv[0]);
+ waitpid(pid, &status, 0);
+}
+
+/*
+ * Test listns() with multiple namespace types filter.
+ */
+TEST(listns_multiple_types)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET | CLONE_NEWUTS, /* Network and UTS */
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[100];
+ ssize_t ret;
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(false);
+ }
+ ASSERT_GE(ret, 0);
+
+ TH_LOG("Found %zd active network/UTS namespaces", ret);
+
+ for (ssize_t i = 0; i < ret; i++)
+ TH_LOG(" [%zd] ns_id: %llu", i, (unsigned long long)ns_ids[i]);
+}
+
+/*
+ * Test that hierarchical active reference propagation keeps parent
+ * user namespaces visible in listns().
+ */
+TEST(listns_hierarchical_visibility)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 parent_ns_id = 0, child_ns_id = 0;
+ int sv[2];
+ pid_t pid;
+ int status;
+ int bytes;
+ __u64 ns_ids[100];
+ ssize_t ret;
+ bool found_parent, found_child;
+
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int fd;
+ char buf;
+
+ close(sv[0]);
+
+ /* Create parent user namespace */
+ if (setup_userns() < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &parent_ns_id) < 0) {
+ close(fd);
+ close(sv[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Create child user namespace */
+ if (setup_userns() < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ if (ioctl(fd, NS_GET_ID, &child_ns_id) < 0) {
+ close(fd);
+ close(sv[1]);
+ exit(1);
+ }
+ close(fd);
+
+ /* Send both IDs to parent */
+ if (write(sv[1], &parent_ns_id, sizeof(parent_ns_id)) != sizeof(parent_ns_id)) {
+ close(sv[1]);
+ exit(1);
+ }
+ if (write(sv[1], &child_ns_id, sizeof(child_ns_id)) != sizeof(child_ns_id)) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Wait for parent signal */
+ if (read(sv[1], &buf, 1) != 1) {
+ close(sv[1]);
+ exit(1);
+ }
+ close(sv[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(sv[1]);
+
+ /* Read both namespace IDs */
+ bytes = read(sv[0], &parent_ns_id, sizeof(parent_ns_id));
+ bytes += read(sv[0], &child_ns_id, sizeof(child_ns_id));
+
+ if (bytes != (int)(2 * sizeof(__u64))) {
+ close(sv[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to get namespace IDs from child");
+ }
+
+ TH_LOG("Parent user namespace ID: %llu", (unsigned long long)parent_ns_id);
+ TH_LOG("Child user namespace ID: %llu", (unsigned long long)child_ns_id);
+
+ /* List all user namespaces */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+
+ if (ret < 0 && errno == ENOSYS) {
+ close(sv[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "listns() not supported");
+ }
+
+ ASSERT_GE(ret, 0);
+ TH_LOG("Found %zd active user namespaces", ret);
+
+ /* Both parent and child should be visible (active due to child process) */
+ found_parent = false;
+ found_child = false;
+ for (ssize_t i = 0; i < ret; i++) {
+ if (ns_ids[i] == parent_ns_id)
+ found_parent = true;
+ if (ns_ids[i] == child_ns_id)
+ found_child = true;
+ }
+
+ TH_LOG("Parent namespace %s, child namespace %s",
+ found_parent ? "found" : "NOT FOUND",
+ found_child ? "found" : "NOT FOUND");
+
+ ASSERT_TRUE(found_child);
+ /* With hierarchical propagation, parent should also be active */
+ ASSERT_TRUE(found_parent);
+
+ /* Signal child to exit */
+ if (write(sv[0], "X", 1) != 1) {
+ close(sv[0]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ ASSERT_TRUE(false);
+ }
+ close(sv[0]);
+ waitpid(pid, &status, 0);
+}
+
+/*
+ * Test error cases for listns().
+ */
+TEST(listns_error_cases)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[10];
+ int ret;
+
+ /* Test with invalid flags */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0xFFFF);
+ if (errno == ENOSYS) {
+ /* listns() not supported, skip this check */
+ } else {
+ ASSERT_LT(ret, 0);
+ ASSERT_EQ(errno, EINVAL);
+ }
+
+ /* Test with NULL ns_ids array */
+ ret = sys_listns(&req, NULL, 10, 0);
+ ASSERT_LT(ret, 0);
+
+ /* Test with invalid spare field */
+ req.spare = 1;
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (errno == ENOSYS) {
+ /* listns() not supported, skip this check */
+ } else {
+ ASSERT_LT(ret, 0);
+ ASSERT_EQ(errno, EINVAL);
+ }
+ req.spare = 0;
+
+ /* Test with huge nr_ns_ids */
+ ret = sys_listns(&req, ns_ids, 2000000, 0);
+ if (errno == ENOSYS) {
+ /* listns() not supported, skip this check */
+ } else {
+ ASSERT_LT(ret, 0);
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/ns_active_ref_test.c b/tools/testing/selftests/namespaces/ns_active_ref_test.c
new file mode 100644
index 000000000000..093268f0efaa
--- /dev/null
+++ b/tools/testing/selftests/namespaces/ns_active_ref_test.c
@@ -0,0 +1,2672 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/nsfs.h>
+#include <sys/mount.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <pthread.h>
+#include "../kselftest_harness.h"
+#include "../filesystems/utils.h"
+#include "wrappers.h"
+
+#ifndef FD_NSFS_ROOT
+#define FD_NSFS_ROOT -10003 /* Root of the nsfs filesystem */
+#endif
+
+#ifndef FILEID_NSFS
+#define FILEID_NSFS 0xf1
+#endif
+
+/*
+ * Test that initial namespaces can be reopened via file handle.
+ * Initial namespaces should have active ref count of 1 from boot.
+ */
+TEST(init_ns_always_active)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd1, fd2;
+ struct stat st1, st2;
+
+ handle = malloc(sizeof(*handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(handle, NULL);
+
+ /* Open initial network namespace */
+ fd1 = open("/proc/1/ns/net", O_RDONLY);
+ ASSERT_GE(fd1, 0);
+
+ /* Get file handle for initial namespace */
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(fd1, "", handle, &mount_id, AT_EMPTY_PATH);
+ if (ret < 0 && errno == EOPNOTSUPP) {
+ SKIP(free(handle); close(fd1);
+ return, "nsfs doesn't support file handles");
+ }
+ ASSERT_EQ(ret, 0);
+
+ /* Close the namespace fd */
+ close(fd1);
+
+ /* Try to reopen via file handle - should succeed since init ns is always active */
+ fd2 = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd2 < 0 && (errno == EINVAL || errno == EOPNOTSUPP)) {
+ SKIP(free(handle);
+ return, "open_by_handle_at with FD_NSFS_ROOT not supported");
+ }
+ ASSERT_GE(fd2, 0);
+
+ /* Verify we opened the same namespace */
+ fd1 = open("/proc/1/ns/net", O_RDONLY);
+ ASSERT_GE(fd1, 0);
+ ASSERT_EQ(fstat(fd1, &st1), 0);
+ ASSERT_EQ(fstat(fd2, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+
+ close(fd1);
+ close(fd2);
+ free(handle);
+}
+
+/*
+ * Test namespace lifecycle: create a namespace in a child process,
+ * get a file handle while it's active, then try to reopen after
+ * the process exits (namespace becomes inactive).
+ */
+TEST(ns_inactive_after_exit)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ char buf[sizeof(*handle) + MAX_HANDLE_SZ];
+
+ /* Create pipe for passing file handle from child */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new network namespace */
+ ret = unshare(CLONE_NEWNET);
+ if (ret < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Open our new namespace */
+ fd = open("/proc/self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Get file handle for the namespace */
+ handle = (struct file_handle *)buf;
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ close(fd);
+
+ if (ret < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Send handle to parent */
+ write(pipefd[1], buf, sizeof(*handle) + handle->handle_bytes);
+ close(pipefd[1]);
+
+ /* Exit - namespace should become inactive */
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ /* Read file handle from child */
+ ret = read(pipefd[0], buf, sizeof(buf));
+ close(pipefd[0]);
+
+ /* Wait for child to exit */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_GT(ret, 0);
+ handle = (struct file_handle *)buf;
+
+ /* Try to reopen namespace - should fail with ENOENT since it's inactive */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(fd, 0);
+ /* Should fail with ENOENT (namespace inactive) or ESTALE */
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+}
+
+/*
+ * Test that a namespace remains active while a process is using it,
+ * even after the creating process exits.
+ */
+TEST(ns_active_with_multiple_processes)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int pipefd[2];
+ int syncpipe[2];
+ pid_t pid1, pid2;
+ int status;
+ char buf[sizeof(*handle) + MAX_HANDLE_SZ];
+ char sync_byte;
+
+ /* Create pipes for communication */
+ ASSERT_EQ(pipe(pipefd), 0);
+ ASSERT_EQ(pipe(syncpipe), 0);
+
+ pid1 = fork();
+ ASSERT_GE(pid1, 0);
+
+ if (pid1 == 0) {
+ /* First child - creates namespace */
+ close(pipefd[0]);
+ close(syncpipe[1]);
+
+ /* Create new network namespace */
+ ret = unshare(CLONE_NEWNET);
+ if (ret < 0) {
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+
+ /* Open and get handle */
+ fd = open("/proc/self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+
+ handle = (struct file_handle *)buf;
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ close(fd);
+
+ if (ret < 0) {
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+
+ /* Send handle to parent */
+ write(pipefd[1], buf, sizeof(*handle) + handle->handle_bytes);
+ close(pipefd[1]);
+
+ /* Wait for signal before exiting */
+ read(syncpipe[0], &sync_byte, 1);
+ close(syncpipe[0]);
+ exit(0);
+ }
+
+ /* Parent reads handle */
+ close(pipefd[1]);
+ ret = read(pipefd[0], buf, sizeof(buf));
+ close(pipefd[0]);
+ ASSERT_GT(ret, 0);
+
+ handle = (struct file_handle *)buf;
+
+ /* Create second child that will keep namespace active */
+ pid2 = fork();
+ ASSERT_GE(pid2, 0);
+
+ if (pid2 == 0) {
+ /* Second child - reopens the namespace */
+ close(syncpipe[0]);
+ close(syncpipe[1]);
+
+ /* Open the namespace via handle */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (fd < 0) {
+ exit(1);
+ }
+
+ /* Join the namespace */
+ ret = setns(fd, CLONE_NEWNET);
+ close(fd);
+ if (ret < 0) {
+ exit(1);
+ }
+
+ /* Sleep to keep namespace active */
+ sleep(1);
+ exit(0);
+ }
+
+ /* Let second child enter the namespace */
+ usleep(100000); /* 100ms */
+
+ /* Signal first child to exit */
+ close(syncpipe[0]);
+ sync_byte = 'X';
+ write(syncpipe[1], &sync_byte, 1);
+ close(syncpipe[1]);
+
+ /* Wait for first child */
+ waitpid(pid1, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+
+ /* Namespace should still be active because second child is using it */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_GE(fd, 0);
+ close(fd);
+
+ /* Wait for second child */
+ waitpid(pid2, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+}
+
+/*
+ * Test user namespace active ref tracking via credential lifecycle
+ */
+TEST(userns_active_ref_lifecycle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ char buf[sizeof(*handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new user namespace */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Set up uid/gid mappings */
+ int uid_map_fd = open("/proc/self/uid_map", O_WRONLY);
+ int gid_map_fd = open("/proc/self/gid_map", O_WRONLY);
+ int setgroups_fd = open("/proc/self/setgroups", O_WRONLY);
+
+ if (uid_map_fd >= 0 && gid_map_fd >= 0 && setgroups_fd >= 0) {
+ write(setgroups_fd, "deny", 4);
+ close(setgroups_fd);
+
+ char mapping[64];
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
+ write(uid_map_fd, mapping, strlen(mapping));
+ close(uid_map_fd);
+
+ snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
+ write(gid_map_fd, mapping, strlen(mapping));
+ close(gid_map_fd);
+ }
+
+ /* Get file handle */
+ fd = open("/proc/self/ns/user", O_RDONLY);
+ if (fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ handle = (struct file_handle *)buf;
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ close(fd);
+
+ if (ret < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Send handle to parent */
+ write(pipefd[1], buf, sizeof(*handle) + handle->handle_bytes);
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+ ret = read(pipefd[0], buf, sizeof(buf));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_GT(ret, 0);
+ handle = (struct file_handle *)buf;
+
+ /* Namespace should be inactive after all tasks exit */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(fd, 0);
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+}
+
+/*
+ * Test PID namespace active ref tracking
+ */
+TEST(pidns_active_ref_lifecycle)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ char buf[sizeof(*handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new PID namespace */
+ ret = unshare(CLONE_NEWPID);
+ if (ret < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ /* Fork to actually enter the PID namespace */
+ pid_t child = fork();
+ if (child < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ if (child == 0) {
+ /* Grandchild - in new PID namespace */
+ fd = open("/proc/self/ns/pid", O_RDONLY);
+ if (fd < 0) {
+ exit(1);
+ }
+
+ handle = (struct file_handle *)buf;
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ close(fd);
+
+ if (ret < 0) {
+ exit(1);
+ }
+
+ /* Send handle to grandparent */
+ write(pipefd[1], buf, sizeof(*handle) + handle->handle_bytes);
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Wait for grandchild */
+ waitpid(child, NULL, 0);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+ ret = read(pipefd[0], buf, sizeof(buf));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_GT(ret, 0);
+ handle = (struct file_handle *)buf;
+
+ /* Namespace should be inactive after all processes exit */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(fd, 0);
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+}
+
+/*
+ * Test that an open file descriptor keeps a namespace active.
+ * Even after the creating process exits, the namespace should remain
+ * active as long as an fd is held open.
+ */
+TEST(ns_fd_keeps_active)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int nsfd;
+ int pipe_child_ready[2];
+ int pipe_parent_ready[2];
+ pid_t pid;
+ int status;
+ char buf[sizeof(*handle) + MAX_HANDLE_SZ];
+ char sync_byte;
+ char proc_path[64];
+
+ ASSERT_EQ(pipe(pipe_child_ready), 0);
+ ASSERT_EQ(pipe(pipe_parent_ready), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipe_child_ready[0]);
+ close(pipe_parent_ready[1]);
+
+ TH_LOG("Child: creating new network namespace");
+
+ /* Create new network namespace */
+ ret = unshare(CLONE_NEWNET);
+ if (ret < 0) {
+ TH_LOG("Child: unshare(CLONE_NEWNET) failed: %s", strerror(errno));
+ close(pipe_child_ready[1]);
+ close(pipe_parent_ready[0]);
+ exit(1);
+ }
+
+ TH_LOG("Child: network namespace created successfully");
+
+ /* Get file handle for the namespace */
+ nsfd = open("/proc/self/ns/net", O_RDONLY);
+ if (nsfd < 0) {
+ TH_LOG("Child: failed to open /proc/self/ns/net: %s", strerror(errno));
+ close(pipe_child_ready[1]);
+ close(pipe_parent_ready[0]);
+ exit(1);
+ }
+
+ TH_LOG("Child: opened namespace fd %d", nsfd);
+
+ handle = (struct file_handle *)buf;
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(nsfd, "", handle, &mount_id, AT_EMPTY_PATH);
+ close(nsfd);
+
+ if (ret < 0) {
+ TH_LOG("Child: name_to_handle_at failed: %s", strerror(errno));
+ close(pipe_child_ready[1]);
+ close(pipe_parent_ready[0]);
+ exit(1);
+ }
+
+ TH_LOG("Child: got file handle (bytes=%u)", handle->handle_bytes);
+
+ /* Send file handle to parent */
+ ret = write(pipe_child_ready[1], buf, sizeof(*handle) + handle->handle_bytes);
+ TH_LOG("Child: sent %d bytes of file handle to parent", ret);
+ close(pipe_child_ready[1]);
+
+ /* Wait for parent to open the fd */
+ TH_LOG("Child: waiting for parent to open fd");
+ ret = read(pipe_parent_ready[0], &sync_byte, 1);
+ close(pipe_parent_ready[0]);
+
+ TH_LOG("Child: parent signaled (read %d bytes), exiting now", ret);
+ /* Exit - namespace should stay active because parent holds fd */
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipe_child_ready[1]);
+ close(pipe_parent_ready[0]);
+
+ TH_LOG("Parent: reading file handle from child");
+
+ /* Read file handle from child */
+ ret = read(pipe_child_ready[0], buf, sizeof(buf));
+ close(pipe_child_ready[0]);
+ ASSERT_GT(ret, 0);
+ handle = (struct file_handle *)buf;
+
+ TH_LOG("Parent: received %d bytes, handle size=%u", ret, handle->handle_bytes);
+
+ /* Open the child's namespace while it's still alive */
+ snprintf(proc_path, sizeof(proc_path), "/proc/%d/ns/net", pid);
+ TH_LOG("Parent: opening child's namespace at %s", proc_path);
+ nsfd = open(proc_path, O_RDONLY);
+ if (nsfd < 0) {
+ TH_LOG("Parent: failed to open %s: %s", proc_path, strerror(errno));
+ close(pipe_parent_ready[1]);
+ kill(pid, SIGKILL);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open child's namespace");
+ }
+
+ TH_LOG("Parent: opened child's namespace, got fd %d", nsfd);
+
+ /* Signal child that we have the fd */
+ sync_byte = 'G';
+ write(pipe_parent_ready[1], &sync_byte, 1);
+ close(pipe_parent_ready[1]);
+ TH_LOG("Parent: signaled child that we have the fd");
+
+ /* Wait for child to exit */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ TH_LOG("Child exited, parent holds fd %d to namespace", nsfd);
+
+ /*
+ * Namespace should still be ACTIVE because we hold an fd.
+ * We should be able to reopen it via file handle.
+ */
+ TH_LOG("Attempting to reopen namespace via file handle (should succeed - fd held)");
+ int fd2 = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_GE(fd2, 0);
+
+ TH_LOG("Successfully reopened namespace via file handle, got fd %d", fd2);
+
+ /* Verify it's the same namespace */
+ struct stat st1, st2;
+ ASSERT_EQ(fstat(nsfd, &st1), 0);
+ ASSERT_EQ(fstat(fd2, &st2), 0);
+ TH_LOG("Namespace inodes: nsfd=%lu, fd2=%lu", st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ close(fd2);
+
+ /* Now close the fd - namespace should become inactive */
+ TH_LOG("Closing fd %d - namespace should become inactive", nsfd);
+ close(nsfd);
+
+ /* Now reopening should fail - namespace is inactive */
+ TH_LOG("Attempting to reopen namespace via file handle (should fail - inactive)");
+ fd2 = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(fd2, 0);
+ /* Should fail with ENOENT (inactive) or ESTALE (gone) */
+ TH_LOG("Reopen failed as expected: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+}
+
+/*
+ * Test hierarchical active reference propagation.
+ * When a child namespace is active, its owning user namespace should also
+ * be active automatically due to hierarchical active reference propagation.
+ * This ensures parents are always reachable when children are active.
+ */
+TEST(ns_parent_always_reachable)
+{
+ struct file_handle *parent_handle, *child_handle;
+ int ret;
+ int child_nsfd;
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ __u64 parent_id, child_id;
+ char parent_buf[sizeof(*parent_handle) + MAX_HANDLE_SZ];
+ char child_buf[sizeof(*child_handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ TH_LOG("Child: creating parent user namespace and setting up mappings");
+
+ /* Create parent user namespace with mappings */
+ ret = setup_userns();
+ if (ret < 0) {
+ TH_LOG("Child: setup_userns() for parent failed: %s", strerror(errno));
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ TH_LOG("Child: parent user namespace created, now uid=%d gid=%d", getuid(), getgid());
+
+ /* Get namespace ID for parent user namespace */
+ int parent_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (parent_fd < 0) {
+ TH_LOG("Child: failed to open parent /proc/self/ns/user: %s", strerror(errno));
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ TH_LOG("Child: opened parent userns fd %d", parent_fd);
+
+ if (ioctl(parent_fd, NS_GET_ID, &parent_id) < 0) {
+ TH_LOG("Child: NS_GET_ID for parent failed: %s", strerror(errno));
+ close(parent_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(parent_fd);
+
+ TH_LOG("Child: got parent namespace ID %llu", (unsigned long long)parent_id);
+
+ /* Create child user namespace within parent */
+ TH_LOG("Child: creating nested child user namespace");
+ ret = setup_userns();
+ if (ret < 0) {
+ TH_LOG("Child: setup_userns() for child failed: %s", strerror(errno));
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ TH_LOG("Child: nested child user namespace created, uid=%d gid=%d", getuid(), getgid());
+
+ /* Get namespace ID for child user namespace */
+ int child_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (child_fd < 0) {
+ TH_LOG("Child: failed to open child /proc/self/ns/user: %s", strerror(errno));
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ TH_LOG("Child: opened child userns fd %d", child_fd);
+
+ if (ioctl(child_fd, NS_GET_ID, &child_id) < 0) {
+ TH_LOG("Child: NS_GET_ID for child failed: %s", strerror(errno));
+ close(child_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(child_fd);
+
+ TH_LOG("Child: got child namespace ID %llu", (unsigned long long)child_id);
+
+ /* Send both namespace IDs to parent */
+ TH_LOG("Child: sending both namespace IDs to parent");
+ write(pipefd[1], &parent_id, sizeof(parent_id));
+ write(pipefd[1], &child_id, sizeof(child_id));
+ close(pipefd[1]);
+
+ TH_LOG("Child: exiting - parent userns should become inactive");
+ /* Exit - parent user namespace should become inactive */
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ TH_LOG("Parent: reading both namespace IDs from child");
+
+ /* Read both namespace IDs - fixed size, no parsing needed */
+ ret = read(pipefd[0], &parent_id, sizeof(parent_id));
+ if (ret != sizeof(parent_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read parent namespace ID from child");
+ }
+
+ ret = read(pipefd[0], &child_id, sizeof(child_id));
+ close(pipefd[0]);
+ if (ret != sizeof(child_id)) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read child namespace ID from child");
+ }
+
+ TH_LOG("Parent: received parent_id=%llu, child_id=%llu",
+ (unsigned long long)parent_id, (unsigned long long)child_id);
+
+ /* Construct file handles from namespace IDs */
+ parent_handle = (struct file_handle *)parent_buf;
+ parent_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ parent_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *parent_fh = (struct nsfs_file_handle *)parent_handle->f_handle;
+ parent_fh->ns_id = parent_id;
+ parent_fh->ns_type = 0;
+ parent_fh->ns_inum = 0;
+
+ child_handle = (struct file_handle *)child_buf;
+ child_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ child_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *child_fh = (struct nsfs_file_handle *)child_handle->f_handle;
+ child_fh->ns_id = child_id;
+ child_fh->ns_type = 0;
+ child_fh->ns_inum = 0;
+
+ TH_LOG("Parent: opening child namespace BEFORE child exits");
+
+ /* Open child namespace while child is still alive to keep it active */
+ child_nsfd = open_by_handle_at(FD_NSFS_ROOT, child_handle, O_RDONLY);
+ if (child_nsfd < 0) {
+ TH_LOG("Failed to open child namespace: %s (errno=%d)", strerror(errno), errno);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open child namespace");
+ }
+
+ TH_LOG("Opened child namespace fd %d", child_nsfd);
+
+ /* Now wait for child to exit */
+ TH_LOG("Parent: waiting for child to exit");
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ TH_LOG("Child process exited, parent holds fd to child namespace");
+
+ /*
+ * With hierarchical active reference propagation:
+ * Since the child namespace is active (parent process holds fd),
+ * the parent user namespace should ALSO be active automatically.
+ * This is because when we took an active reference on the child,
+ * it propagated up to the owning user namespace.
+ */
+ TH_LOG("Attempting to reopen parent namespace (should SUCCEED - hierarchical propagation)");
+ int parent_fd = open_by_handle_at(FD_NSFS_ROOT, parent_handle, O_RDONLY);
+ ASSERT_GE(parent_fd, 0);
+
+ TH_LOG("SUCCESS: Parent namespace is active (fd=%d) due to active child", parent_fd);
+
+ /* Verify we can also get parent via NS_GET_USERNS */
+ TH_LOG("Verifying NS_GET_USERNS also works");
+ int parent_fd2 = ioctl(child_nsfd, NS_GET_USERNS);
+ if (parent_fd2 < 0) {
+ close(parent_fd);
+ close(child_nsfd);
+ TH_LOG("NS_GET_USERNS failed: %s (errno=%d)", strerror(errno), errno);
+ SKIP(return, "NS_GET_USERNS not supported or failed");
+ }
+
+ TH_LOG("NS_GET_USERNS succeeded, got parent fd %d", parent_fd2);
+
+ /* Verify both methods give us the same namespace */
+ struct stat st1, st2;
+ ASSERT_EQ(fstat(parent_fd, &st1), 0);
+ ASSERT_EQ(fstat(parent_fd2, &st2), 0);
+ TH_LOG("Parent namespace inodes: parent_fd=%lu, parent_fd2=%lu", st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+
+ /*
+ * Close child fd - parent should remain active because we still
+ * hold direct references to it (parent_fd and parent_fd2).
+ */
+ TH_LOG("Closing child fd - parent should remain active (direct refs held)");
+ close(child_nsfd);
+
+ /* Parent should still be openable */
+ TH_LOG("Verifying parent still active via file handle");
+ int parent_fd3 = open_by_handle_at(FD_NSFS_ROOT, parent_handle, O_RDONLY);
+ ASSERT_GE(parent_fd3, 0);
+ close(parent_fd3);
+
+ TH_LOG("Closing all fds to parent namespace");
+ close(parent_fd);
+ close(parent_fd2);
+
+ /* Both should now be inactive */
+ TH_LOG("Attempting to reopen parent (should fail - inactive, no refs)");
+ parent_fd = open_by_handle_at(FD_NSFS_ROOT, parent_handle, O_RDONLY);
+ ASSERT_LT(parent_fd, 0);
+ TH_LOG("Parent inactive as expected: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+}
+
+/*
+ * Test that bind mounts keep namespaces in the tree even when inactive
+ */
+TEST(ns_bind_mount_keeps_in_tree)
+{
+ struct file_handle *handle;
+ int mount_id;
+ int ret;
+ int fd;
+ int pipefd[2];
+ pid_t pid;
+ int status;
+ char buf[sizeof(*handle) + MAX_HANDLE_SZ];
+ char tmpfile[] = "/tmp/ns-test-XXXXXX";
+ int tmpfd;
+
+ /* Create temporary file for bind mount */
+ tmpfd = mkstemp(tmpfile);
+ if (tmpfd < 0) {
+ SKIP(return, "Cannot create temporary file");
+ }
+ close(tmpfd);
+
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Unshare mount namespace and make mounts private to avoid propagation */
+ ret = unshare(CLONE_NEWNS);
+ if (ret < 0) {
+ close(pipefd[1]);
+ unlink(tmpfile);
+ exit(1);
+ }
+ ret = mount(NULL, "/", NULL, MS_PRIVATE | MS_REC, NULL);
+ if (ret < 0) {
+ close(pipefd[1]);
+ unlink(tmpfile);
+ exit(1);
+ }
+
+ /* Create new network namespace */
+ ret = unshare(CLONE_NEWNET);
+ if (ret < 0) {
+ close(pipefd[1]);
+ unlink(tmpfile);
+ exit(1);
+ }
+
+ /* Bind mount the namespace */
+ ret = mount("/proc/self/ns/net", tmpfile, NULL, MS_BIND, NULL);
+ if (ret < 0) {
+ close(pipefd[1]);
+ unlink(tmpfile);
+ exit(1);
+ }
+
+ /* Get file handle */
+ fd = open("/proc/self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ umount(tmpfile);
+ close(pipefd[1]);
+ unlink(tmpfile);
+ exit(1);
+ }
+
+ handle = (struct file_handle *)buf;
+ handle->handle_bytes = MAX_HANDLE_SZ;
+ ret = name_to_handle_at(fd, "", handle, &mount_id, AT_EMPTY_PATH);
+ close(fd);
+
+ if (ret < 0) {
+ umount(tmpfile);
+ close(pipefd[1]);
+ unlink(tmpfile);
+ exit(1);
+ }
+
+ /* Send handle to parent */
+ write(pipefd[1], buf, sizeof(*handle) + handle->handle_bytes);
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(pipefd[1]);
+ ret = read(pipefd[0], buf, sizeof(buf));
+ close(pipefd[0]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ ASSERT_GT(ret, 0);
+ handle = (struct file_handle *)buf;
+
+ /*
+ * Namespace should be inactive but still in tree due to bind mount.
+ * Reopening should fail with ENOENT (inactive) not ESTALE (not in tree).
+ */
+ fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(fd, 0);
+ /* Should be ENOENT (inactive) since bind mount keeps it in tree */
+ if (errno != ENOENT && errno != ESTALE) {
+ TH_LOG("Unexpected error: %d", errno);
+ }
+
+ /* Cleanup */
+ umount(tmpfile);
+ unlink(tmpfile);
+}
+
+/*
+ * Test multi-level hierarchy (3+ levels deep).
+ * Grandparent → Parent → Child
+ * When child is active, both parent AND grandparent should be active.
+ */
+TEST(ns_multilevel_hierarchy)
+{
+ struct file_handle *gp_handle, *p_handle, *c_handle;
+ int ret, pipefd[2];
+ pid_t pid;
+ int status;
+ __u64 gp_id, p_id, c_id;
+ char gp_buf[sizeof(*gp_handle) + MAX_HANDLE_SZ];
+ char p_buf[sizeof(*p_handle) + MAX_HANDLE_SZ];
+ char c_buf[sizeof(*c_handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ close(pipefd[0]);
+
+ /* Create grandparent user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int gp_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (gp_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(gp_fd, NS_GET_ID, &gp_id) < 0) {
+ close(gp_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(gp_fd);
+
+ /* Create parent user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int p_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (p_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(p_fd, NS_GET_ID, &p_id) < 0) {
+ close(p_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(p_fd);
+
+ /* Create child user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int c_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (c_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(c_fd, NS_GET_ID, &c_id) < 0) {
+ close(c_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(c_fd);
+
+ /* Send all three namespace IDs */
+ write(pipefd[1], &gp_id, sizeof(gp_id));
+ write(pipefd[1], &p_id, sizeof(p_id));
+ write(pipefd[1], &c_id, sizeof(c_id));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ close(pipefd[1]);
+
+ /* Read all three namespace IDs - fixed size, no parsing needed */
+ ret = read(pipefd[0], &gp_id, sizeof(gp_id));
+ if (ret != sizeof(gp_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read grandparent namespace ID from child");
+ }
+
+ ret = read(pipefd[0], &p_id, sizeof(p_id));
+ if (ret != sizeof(p_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read parent namespace ID from child");
+ }
+
+ ret = read(pipefd[0], &c_id, sizeof(c_id));
+ close(pipefd[0]);
+ if (ret != sizeof(c_id)) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read child namespace ID from child");
+ }
+
+ /* Construct file handles from namespace IDs */
+ gp_handle = (struct file_handle *)gp_buf;
+ gp_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ gp_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *gp_fh = (struct nsfs_file_handle *)gp_handle->f_handle;
+ gp_fh->ns_id = gp_id;
+ gp_fh->ns_type = 0;
+ gp_fh->ns_inum = 0;
+
+ p_handle = (struct file_handle *)p_buf;
+ p_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ p_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *p_fh = (struct nsfs_file_handle *)p_handle->f_handle;
+ p_fh->ns_id = p_id;
+ p_fh->ns_type = 0;
+ p_fh->ns_inum = 0;
+
+ c_handle = (struct file_handle *)c_buf;
+ c_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ c_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *c_fh = (struct nsfs_file_handle *)c_handle->f_handle;
+ c_fh->ns_id = c_id;
+ c_fh->ns_type = 0;
+ c_fh->ns_inum = 0;
+
+ /* Open child before process exits */
+ int c_fd = open_by_handle_at(FD_NSFS_ROOT, c_handle, O_RDONLY);
+ if (c_fd < 0) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open child namespace");
+ }
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /*
+ * With 3-level hierarchy and child active:
+ * - Child is active (we hold fd)
+ * - Parent should be active (propagated from child)
+ * - Grandparent should be active (propagated from parent)
+ */
+ TH_LOG("Testing parent active when child is active");
+ int p_fd = open_by_handle_at(FD_NSFS_ROOT, p_handle, O_RDONLY);
+ ASSERT_GE(p_fd, 0);
+
+ TH_LOG("Testing grandparent active when child is active");
+ int gp_fd = open_by_handle_at(FD_NSFS_ROOT, gp_handle, O_RDONLY);
+ ASSERT_GE(gp_fd, 0);
+
+ close(c_fd);
+ close(p_fd);
+ close(gp_fd);
+}
+
+/*
+ * Test multiple children sharing same parent.
+ * Parent should stay active as long as ANY child is active.
+ */
+TEST(ns_multiple_children_same_parent)
+{
+ struct file_handle *p_handle, *c1_handle, *c2_handle;
+ int ret, pipefd[2];
+ pid_t pid;
+ int status;
+ __u64 p_id, c1_id, c2_id;
+ char p_buf[sizeof(*p_handle) + MAX_HANDLE_SZ];
+ char c1_buf[sizeof(*c1_handle) + MAX_HANDLE_SZ];
+ char c2_buf[sizeof(*c2_handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ close(pipefd[0]);
+
+ /* Create parent user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int p_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (p_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(p_fd, NS_GET_ID, &p_id) < 0) {
+ close(p_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(p_fd);
+
+ /* Create first child user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int c1_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (c1_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(c1_fd, NS_GET_ID, &c1_id) < 0) {
+ close(c1_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(c1_fd);
+
+ /* Return to parent user namespace and create second child */
+ /* We can't actually do this easily, so let's create a sibling namespace
+ * by creating a network namespace instead */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int c2_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (c2_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(c2_fd, NS_GET_ID, &c2_id) < 0) {
+ close(c2_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(c2_fd);
+
+ /* Send all namespace IDs */
+ write(pipefd[1], &p_id, sizeof(p_id));
+ write(pipefd[1], &c1_id, sizeof(c1_id));
+ write(pipefd[1], &c2_id, sizeof(c2_id));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ close(pipefd[1]);
+
+ /* Read all three namespace IDs - fixed size, no parsing needed */
+ ret = read(pipefd[0], &p_id, sizeof(p_id));
+ if (ret != sizeof(p_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read parent namespace ID");
+ }
+
+ ret = read(pipefd[0], &c1_id, sizeof(c1_id));
+ if (ret != sizeof(c1_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read first child namespace ID");
+ }
+
+ ret = read(pipefd[0], &c2_id, sizeof(c2_id));
+ close(pipefd[0]);
+ if (ret != sizeof(c2_id)) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read second child namespace ID");
+ }
+
+ /* Construct file handles from namespace IDs */
+ p_handle = (struct file_handle *)p_buf;
+ p_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ p_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *p_fh = (struct nsfs_file_handle *)p_handle->f_handle;
+ p_fh->ns_id = p_id;
+ p_fh->ns_type = 0;
+ p_fh->ns_inum = 0;
+
+ c1_handle = (struct file_handle *)c1_buf;
+ c1_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ c1_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *c1_fh = (struct nsfs_file_handle *)c1_handle->f_handle;
+ c1_fh->ns_id = c1_id;
+ c1_fh->ns_type = 0;
+ c1_fh->ns_inum = 0;
+
+ c2_handle = (struct file_handle *)c2_buf;
+ c2_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ c2_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *c2_fh = (struct nsfs_file_handle *)c2_handle->f_handle;
+ c2_fh->ns_id = c2_id;
+ c2_fh->ns_type = 0;
+ c2_fh->ns_inum = 0;
+
+ /* Open both children before process exits */
+ int c1_fd = open_by_handle_at(FD_NSFS_ROOT, c1_handle, O_RDONLY);
+ int c2_fd = open_by_handle_at(FD_NSFS_ROOT, c2_handle, O_RDONLY);
+
+ if (c1_fd < 0 || c2_fd < 0) {
+ if (c1_fd >= 0) close(c1_fd);
+ if (c2_fd >= 0) close(c2_fd);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open child namespaces");
+ }
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Parent should be active (both children active) */
+ TH_LOG("Both children active - parent should be active");
+ int p_fd = open_by_handle_at(FD_NSFS_ROOT, p_handle, O_RDONLY);
+ ASSERT_GE(p_fd, 0);
+ close(p_fd);
+
+ /* Close first child - parent should STILL be active */
+ TH_LOG("Closing first child - parent should still be active");
+ close(c1_fd);
+ p_fd = open_by_handle_at(FD_NSFS_ROOT, p_handle, O_RDONLY);
+ ASSERT_GE(p_fd, 0);
+ close(p_fd);
+
+ /* Close second child - NOW parent should become inactive */
+ TH_LOG("Closing second child - parent should become inactive");
+ close(c2_fd);
+ p_fd = open_by_handle_at(FD_NSFS_ROOT, p_handle, O_RDONLY);
+ ASSERT_LT(p_fd, 0);
+}
+
+/*
+ * Test that different namespace types with same owner all contribute
+ * active references to the owning user namespace.
+ */
+TEST(ns_different_types_same_owner)
+{
+ struct file_handle *u_handle, *n_handle, *ut_handle;
+ int ret, pipefd[2];
+ pid_t pid;
+ int status;
+ __u64 u_id, n_id, ut_id;
+ char u_buf[sizeof(*u_handle) + MAX_HANDLE_SZ];
+ char n_buf[sizeof(*n_handle) + MAX_HANDLE_SZ];
+ char ut_buf[sizeof(*ut_handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ close(pipefd[0]);
+
+ /* Create user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int u_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (u_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(u_fd, NS_GET_ID, &u_id) < 0) {
+ close(u_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(u_fd);
+
+ /* Create network namespace (owned by user namespace) */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int n_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (n_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(n_fd, NS_GET_ID, &n_id) < 0) {
+ close(n_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(n_fd);
+
+ /* Create UTS namespace (also owned by user namespace) */
+ if (unshare(CLONE_NEWUTS) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int ut_fd = open("/proc/self/ns/uts", O_RDONLY);
+ if (ut_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(ut_fd, NS_GET_ID, &ut_id) < 0) {
+ close(ut_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(ut_fd);
+
+ /* Send all namespace IDs */
+ write(pipefd[1], &u_id, sizeof(u_id));
+ write(pipefd[1], &n_id, sizeof(n_id));
+ write(pipefd[1], &ut_id, sizeof(ut_id));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ close(pipefd[1]);
+
+ /* Read all three namespace IDs - fixed size, no parsing needed */
+ ret = read(pipefd[0], &u_id, sizeof(u_id));
+ if (ret != sizeof(u_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read user namespace ID");
+ }
+
+ ret = read(pipefd[0], &n_id, sizeof(n_id));
+ if (ret != sizeof(n_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read network namespace ID");
+ }
+
+ ret = read(pipefd[0], &ut_id, sizeof(ut_id));
+ close(pipefd[0]);
+ if (ret != sizeof(ut_id)) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read UTS namespace ID");
+ }
+
+ /* Construct file handles from namespace IDs */
+ u_handle = (struct file_handle *)u_buf;
+ u_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ u_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *u_fh = (struct nsfs_file_handle *)u_handle->f_handle;
+ u_fh->ns_id = u_id;
+ u_fh->ns_type = 0;
+ u_fh->ns_inum = 0;
+
+ n_handle = (struct file_handle *)n_buf;
+ n_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ n_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *n_fh = (struct nsfs_file_handle *)n_handle->f_handle;
+ n_fh->ns_id = n_id;
+ n_fh->ns_type = 0;
+ n_fh->ns_inum = 0;
+
+ ut_handle = (struct file_handle *)ut_buf;
+ ut_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ ut_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *ut_fh = (struct nsfs_file_handle *)ut_handle->f_handle;
+ ut_fh->ns_id = ut_id;
+ ut_fh->ns_type = 0;
+ ut_fh->ns_inum = 0;
+
+ /* Open both non-user namespaces before process exits */
+ int n_fd = open_by_handle_at(FD_NSFS_ROOT, n_handle, O_RDONLY);
+ int ut_fd = open_by_handle_at(FD_NSFS_ROOT, ut_handle, O_RDONLY);
+
+ if (n_fd < 0 || ut_fd < 0) {
+ if (n_fd >= 0) close(n_fd);
+ if (ut_fd >= 0) close(ut_fd);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open namespaces");
+ }
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /*
+ * Both network and UTS namespaces are active.
+ * User namespace should be active (gets 2 active refs).
+ */
+ TH_LOG("Both net and uts active - user namespace should be active");
+ int u_fd = open_by_handle_at(FD_NSFS_ROOT, u_handle, O_RDONLY);
+ ASSERT_GE(u_fd, 0);
+ close(u_fd);
+
+ /* Close network namespace - user namespace should STILL be active */
+ TH_LOG("Closing network ns - user ns should still be active (uts still active)");
+ close(n_fd);
+ u_fd = open_by_handle_at(FD_NSFS_ROOT, u_handle, O_RDONLY);
+ ASSERT_GE(u_fd, 0);
+ close(u_fd);
+
+ /* Close UTS namespace - user namespace should become inactive */
+ TH_LOG("Closing uts ns - user ns should become inactive");
+ close(ut_fd);
+ u_fd = open_by_handle_at(FD_NSFS_ROOT, u_handle, O_RDONLY);
+ ASSERT_LT(u_fd, 0);
+}
+
+/*
+ * Test hierarchical propagation with deep namespace hierarchy.
+ * Create: init_user_ns -> user_A -> user_B -> net_ns
+ * When net_ns is active, both user_A and user_B should be active.
+ * This verifies the conditional recursion in __ns_ref_active_put() works.
+ */
+TEST(ns_deep_hierarchy_propagation)
+{
+ struct file_handle *ua_handle, *ub_handle, *net_handle;
+ int ret, pipefd[2];
+ pid_t pid;
+ int status;
+ __u64 ua_id, ub_id, net_id;
+ char ua_buf[sizeof(*ua_handle) + MAX_HANDLE_SZ];
+ char ub_buf[sizeof(*ub_handle) + MAX_HANDLE_SZ];
+ char net_buf[sizeof(*net_handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ close(pipefd[0]);
+
+ /* Create user_A -> user_B -> net hierarchy */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int ua_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (ua_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(ua_fd, NS_GET_ID, &ua_id) < 0) {
+ close(ua_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(ua_fd);
+
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int ub_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (ub_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(ub_fd, NS_GET_ID, &ub_id) < 0) {
+ close(ub_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(ub_fd);
+
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int net_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (net_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(net_fd, NS_GET_ID, &net_id) < 0) {
+ close(net_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(net_fd);
+
+ /* Send all three namespace IDs */
+ write(pipefd[1], &ua_id, sizeof(ua_id));
+ write(pipefd[1], &ub_id, sizeof(ub_id));
+ write(pipefd[1], &net_id, sizeof(net_id));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ close(pipefd[1]);
+
+ /* Read all three namespace IDs - fixed size, no parsing needed */
+ ret = read(pipefd[0], &ua_id, sizeof(ua_id));
+ if (ret != sizeof(ua_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read user_A namespace ID");
+ }
+
+ ret = read(pipefd[0], &ub_id, sizeof(ub_id));
+ if (ret != sizeof(ub_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read user_B namespace ID");
+ }
+
+ ret = read(pipefd[0], &net_id, sizeof(net_id));
+ close(pipefd[0]);
+ if (ret != sizeof(net_id)) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read network namespace ID");
+ }
+
+ /* Construct file handles from namespace IDs */
+ ua_handle = (struct file_handle *)ua_buf;
+ ua_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ ua_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *ua_fh = (struct nsfs_file_handle *)ua_handle->f_handle;
+ ua_fh->ns_id = ua_id;
+ ua_fh->ns_type = 0;
+ ua_fh->ns_inum = 0;
+
+ ub_handle = (struct file_handle *)ub_buf;
+ ub_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ ub_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *ub_fh = (struct nsfs_file_handle *)ub_handle->f_handle;
+ ub_fh->ns_id = ub_id;
+ ub_fh->ns_type = 0;
+ ub_fh->ns_inum = 0;
+
+ net_handle = (struct file_handle *)net_buf;
+ net_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ net_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *net_fh = (struct nsfs_file_handle *)net_handle->f_handle;
+ net_fh->ns_id = net_id;
+ net_fh->ns_type = 0;
+ net_fh->ns_inum = 0;
+
+ /* Open net_ns before child exits to keep it active */
+ int net_fd = open_by_handle_at(FD_NSFS_ROOT, net_handle, O_RDONLY);
+ if (net_fd < 0) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open network namespace");
+ }
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* With net_ns active, both user_A and user_B should be active */
+ TH_LOG("Testing user_B active (net_ns active causes propagation)");
+ int ub_fd = open_by_handle_at(FD_NSFS_ROOT, ub_handle, O_RDONLY);
+ ASSERT_GE(ub_fd, 0);
+
+ TH_LOG("Testing user_A active (propagated through user_B)");
+ int ua_fd = open_by_handle_at(FD_NSFS_ROOT, ua_handle, O_RDONLY);
+ ASSERT_GE(ua_fd, 0);
+
+ /* Close net_ns - user_B should stay active (we hold direct ref) */
+ TH_LOG("Closing net_ns, user_B should remain active (direct ref held)");
+ close(net_fd);
+ int ub_fd2 = open_by_handle_at(FD_NSFS_ROOT, ub_handle, O_RDONLY);
+ ASSERT_GE(ub_fd2, 0);
+ close(ub_fd2);
+
+ /* Close user_B - user_A should stay active (we hold direct ref) */
+ TH_LOG("Closing user_B, user_A should remain active (direct ref held)");
+ close(ub_fd);
+ int ua_fd2 = open_by_handle_at(FD_NSFS_ROOT, ua_handle, O_RDONLY);
+ ASSERT_GE(ua_fd2, 0);
+ close(ua_fd2);
+
+ /* Close user_A - everything should become inactive */
+ TH_LOG("Closing user_A, all should become inactive");
+ close(ua_fd);
+
+ /* All should now be inactive */
+ ua_fd = open_by_handle_at(FD_NSFS_ROOT, ua_handle, O_RDONLY);
+ ASSERT_LT(ua_fd, 0);
+}
+
+/*
+ * Test that parent stays active as long as ANY child is active.
+ * Create parent user namespace with two child net namespaces.
+ * Parent should remain active until BOTH children are inactive.
+ */
+TEST(ns_parent_multiple_children_refcount)
+{
+ struct file_handle *parent_handle, *net1_handle, *net2_handle;
+ int ret, pipefd[2], syncpipe[2];
+ pid_t pid;
+ int status;
+ __u64 p_id, n1_id, n2_id;
+ char p_buf[sizeof(*parent_handle) + MAX_HANDLE_SZ];
+ char n1_buf[sizeof(*net1_handle) + MAX_HANDLE_SZ];
+ char n2_buf[sizeof(*net2_handle) + MAX_HANDLE_SZ];
+ char sync_byte;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ ASSERT_EQ(pipe(syncpipe), 0);
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ close(pipefd[0]);
+ close(syncpipe[1]);
+
+ /* Create parent user namespace */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int p_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (p_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(p_fd, NS_GET_ID, &p_id) < 0) {
+ close(p_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(p_fd);
+
+ /* Create first network namespace */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+
+ int n1_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (n1_fd < 0) {
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+ if (ioctl(n1_fd, NS_GET_ID, &n1_id) < 0) {
+ close(n1_fd);
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+ /* Keep n1_fd open so first namespace stays active */
+
+ /* Create second network namespace */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(n1_fd);
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+
+ int n2_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (n2_fd < 0) {
+ close(n1_fd);
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+ if (ioctl(n2_fd, NS_GET_ID, &n2_id) < 0) {
+ close(n1_fd);
+ close(n2_fd);
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+ /* Keep both n1_fd and n2_fd open */
+
+ /* Send all namespace IDs */
+ write(pipefd[1], &p_id, sizeof(p_id));
+ write(pipefd[1], &n1_id, sizeof(n1_id));
+ write(pipefd[1], &n2_id, sizeof(n2_id));
+ close(pipefd[1]);
+
+ /* Wait for parent to signal before exiting */
+ read(syncpipe[0], &sync_byte, 1);
+ close(syncpipe[0]);
+ exit(0);
+ }
+
+ close(pipefd[1]);
+ close(syncpipe[0]);
+
+ /* Read all three namespace IDs - fixed size, no parsing needed */
+ ret = read(pipefd[0], &p_id, sizeof(p_id));
+ if (ret != sizeof(p_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read parent namespace ID");
+ }
+
+ ret = read(pipefd[0], &n1_id, sizeof(n1_id));
+ if (ret != sizeof(n1_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read first network namespace ID");
+ }
+
+ ret = read(pipefd[0], &n2_id, sizeof(n2_id));
+ close(pipefd[0]);
+ if (ret != sizeof(n2_id)) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read second network namespace ID");
+ }
+
+ /* Construct file handles from namespace IDs */
+ parent_handle = (struct file_handle *)p_buf;
+ parent_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ parent_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *p_fh = (struct nsfs_file_handle *)parent_handle->f_handle;
+ p_fh->ns_id = p_id;
+ p_fh->ns_type = 0;
+ p_fh->ns_inum = 0;
+
+ net1_handle = (struct file_handle *)n1_buf;
+ net1_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ net1_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *n1_fh = (struct nsfs_file_handle *)net1_handle->f_handle;
+ n1_fh->ns_id = n1_id;
+ n1_fh->ns_type = 0;
+ n1_fh->ns_inum = 0;
+
+ net2_handle = (struct file_handle *)n2_buf;
+ net2_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ net2_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *n2_fh = (struct nsfs_file_handle *)net2_handle->f_handle;
+ n2_fh->ns_id = n2_id;
+ n2_fh->ns_type = 0;
+ n2_fh->ns_inum = 0;
+
+ /* Open both net namespaces while child is still alive */
+ int n1_fd = open_by_handle_at(FD_NSFS_ROOT, net1_handle, O_RDONLY);
+ int n2_fd = open_by_handle_at(FD_NSFS_ROOT, net2_handle, O_RDONLY);
+ if (n1_fd < 0 || n2_fd < 0) {
+ if (n1_fd >= 0) close(n1_fd);
+ if (n2_fd >= 0) close(n2_fd);
+ sync_byte = 'G';
+ write(syncpipe[1], &sync_byte, 1);
+ close(syncpipe[1]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open net namespaces");
+ }
+
+ /* Signal child that we have opened the namespaces */
+ sync_byte = 'G';
+ write(syncpipe[1], &sync_byte, 1);
+ close(syncpipe[1]);
+
+ /* Wait for child to exit */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Parent should be active (has 2 active children) */
+ TH_LOG("Both net namespaces active - parent should be active");
+ int p_fd = open_by_handle_at(FD_NSFS_ROOT, parent_handle, O_RDONLY);
+ ASSERT_GE(p_fd, 0);
+ close(p_fd);
+
+ /* Close first net namespace - parent should STILL be active */
+ TH_LOG("Closing first net ns - parent should still be active");
+ close(n1_fd);
+ p_fd = open_by_handle_at(FD_NSFS_ROOT, parent_handle, O_RDONLY);
+ ASSERT_GE(p_fd, 0);
+ close(p_fd);
+
+ /* Close second net namespace - parent should become inactive */
+ TH_LOG("Closing second net ns - parent should become inactive");
+ close(n2_fd);
+ p_fd = open_by_handle_at(FD_NSFS_ROOT, parent_handle, O_RDONLY);
+ ASSERT_LT(p_fd, 0);
+}
+
+/*
+ * Test that user namespace as a child also propagates correctly.
+ * Create user_A -> user_B, verify when user_B is active that user_A
+ * is also active. This is different from non-user namespace children.
+ */
+TEST(ns_userns_child_propagation)
+{
+ struct file_handle *ua_handle, *ub_handle;
+ int ret, pipefd[2];
+ pid_t pid;
+ int status;
+ __u64 ua_id, ub_id;
+ char ua_buf[sizeof(*ua_handle) + MAX_HANDLE_SZ];
+ char ub_buf[sizeof(*ub_handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ close(pipefd[0]);
+
+ /* Create user_A */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int ua_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (ua_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(ua_fd, NS_GET_ID, &ua_id) < 0) {
+ close(ua_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(ua_fd);
+
+ /* Create user_B (child of user_A) */
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int ub_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (ub_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(ub_fd, NS_GET_ID, &ub_id) < 0) {
+ close(ub_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(ub_fd);
+
+ /* Send both namespace IDs */
+ write(pipefd[1], &ua_id, sizeof(ua_id));
+ write(pipefd[1], &ub_id, sizeof(ub_id));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ close(pipefd[1]);
+
+ /* Read both namespace IDs - fixed size, no parsing needed */
+ ret = read(pipefd[0], &ua_id, sizeof(ua_id));
+ if (ret != sizeof(ua_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read user_A namespace ID");
+ }
+
+ ret = read(pipefd[0], &ub_id, sizeof(ub_id));
+ close(pipefd[0]);
+ if (ret != sizeof(ub_id)) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read user_B namespace ID");
+ }
+
+ /* Construct file handles from namespace IDs */
+ ua_handle = (struct file_handle *)ua_buf;
+ ua_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ ua_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *ua_fh = (struct nsfs_file_handle *)ua_handle->f_handle;
+ ua_fh->ns_id = ua_id;
+ ua_fh->ns_type = 0;
+ ua_fh->ns_inum = 0;
+
+ ub_handle = (struct file_handle *)ub_buf;
+ ub_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ ub_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *ub_fh = (struct nsfs_file_handle *)ub_handle->f_handle;
+ ub_fh->ns_id = ub_id;
+ ub_fh->ns_type = 0;
+ ub_fh->ns_inum = 0;
+
+ /* Open user_B before child exits */
+ int ub_fd = open_by_handle_at(FD_NSFS_ROOT, ub_handle, O_RDONLY);
+ if (ub_fd < 0) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open user_B");
+ }
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* With user_B active, user_A should also be active */
+ TH_LOG("Testing user_A active when child user_B is active");
+ int ua_fd = open_by_handle_at(FD_NSFS_ROOT, ua_handle, O_RDONLY);
+ ASSERT_GE(ua_fd, 0);
+
+ /* Close user_B */
+ TH_LOG("Closing user_B");
+ close(ub_fd);
+
+ /* user_A should remain active (we hold direct ref) */
+ int ua_fd2 = open_by_handle_at(FD_NSFS_ROOT, ua_handle, O_RDONLY);
+ ASSERT_GE(ua_fd2, 0);
+ close(ua_fd2);
+
+ /* Close user_A - should become inactive */
+ TH_LOG("Closing user_A - should become inactive");
+ close(ua_fd);
+
+ ua_fd = open_by_handle_at(FD_NSFS_ROOT, ua_handle, O_RDONLY);
+ ASSERT_LT(ua_fd, 0);
+}
+
+/*
+ * Test different namespace types (net, uts, ipc) all contributing
+ * active references to the same owning user namespace.
+ */
+TEST(ns_mixed_types_same_owner)
+{
+ struct file_handle *user_handle, *net_handle, *uts_handle;
+ int ret, pipefd[2];
+ pid_t pid;
+ int status;
+ __u64 u_id, n_id, ut_id;
+ char u_buf[sizeof(*user_handle) + MAX_HANDLE_SZ];
+ char n_buf[sizeof(*net_handle) + MAX_HANDLE_SZ];
+ char ut_buf[sizeof(*uts_handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ close(pipefd[0]);
+
+ if (setup_userns() < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int u_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (u_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(u_fd, NS_GET_ID, &u_id) < 0) {
+ close(u_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(u_fd);
+
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int n_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (n_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(n_fd, NS_GET_ID, &n_id) < 0) {
+ close(n_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(n_fd);
+
+ if (unshare(CLONE_NEWUTS) < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+
+ int ut_fd = open("/proc/self/ns/uts", O_RDONLY);
+ if (ut_fd < 0) {
+ close(pipefd[1]);
+ exit(1);
+ }
+ if (ioctl(ut_fd, NS_GET_ID, &ut_id) < 0) {
+ close(ut_fd);
+ close(pipefd[1]);
+ exit(1);
+ }
+ close(ut_fd);
+
+ /* Send all namespace IDs */
+ write(pipefd[1], &u_id, sizeof(u_id));
+ write(pipefd[1], &n_id, sizeof(n_id));
+ write(pipefd[1], &ut_id, sizeof(ut_id));
+ close(pipefd[1]);
+ exit(0);
+ }
+
+ close(pipefd[1]);
+
+ /* Read all three namespace IDs - fixed size, no parsing needed */
+ ret = read(pipefd[0], &u_id, sizeof(u_id));
+ if (ret != sizeof(u_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read user namespace ID");
+ }
+
+ ret = read(pipefd[0], &n_id, sizeof(n_id));
+ if (ret != sizeof(n_id)) {
+ close(pipefd[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read network namespace ID");
+ }
+
+ ret = read(pipefd[0], &ut_id, sizeof(ut_id));
+ close(pipefd[0]);
+ if (ret != sizeof(ut_id)) {
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read UTS namespace ID");
+ }
+
+ /* Construct file handles from namespace IDs */
+ user_handle = (struct file_handle *)u_buf;
+ user_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ user_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *u_fh = (struct nsfs_file_handle *)user_handle->f_handle;
+ u_fh->ns_id = u_id;
+ u_fh->ns_type = 0;
+ u_fh->ns_inum = 0;
+
+ net_handle = (struct file_handle *)n_buf;
+ net_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ net_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *n_fh = (struct nsfs_file_handle *)net_handle->f_handle;
+ n_fh->ns_id = n_id;
+ n_fh->ns_type = 0;
+ n_fh->ns_inum = 0;
+
+ uts_handle = (struct file_handle *)ut_buf;
+ uts_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ uts_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *ut_fh = (struct nsfs_file_handle *)uts_handle->f_handle;
+ ut_fh->ns_id = ut_id;
+ ut_fh->ns_type = 0;
+ ut_fh->ns_inum = 0;
+
+ /* Open both non-user namespaces */
+ int n_fd = open_by_handle_at(FD_NSFS_ROOT, net_handle, O_RDONLY);
+ int ut_fd = open_by_handle_at(FD_NSFS_ROOT, uts_handle, O_RDONLY);
+ if (n_fd < 0 || ut_fd < 0) {
+ if (n_fd >= 0) close(n_fd);
+ if (ut_fd >= 0) close(ut_fd);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to open namespaces");
+ }
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* User namespace should be active (2 active children) */
+ TH_LOG("Both net and uts active - user ns should be active");
+ int u_fd = open_by_handle_at(FD_NSFS_ROOT, user_handle, O_RDONLY);
+ ASSERT_GE(u_fd, 0);
+ close(u_fd);
+
+ /* Close net - user ns should STILL be active (uts still active) */
+ TH_LOG("Closing net - user ns should still be active");
+ close(n_fd);
+ u_fd = open_by_handle_at(FD_NSFS_ROOT, user_handle, O_RDONLY);
+ ASSERT_GE(u_fd, 0);
+ close(u_fd);
+
+ /* Close uts - user ns should become inactive */
+ TH_LOG("Closing uts - user ns should become inactive");
+ close(ut_fd);
+ u_fd = open_by_handle_at(FD_NSFS_ROOT, user_handle, O_RDONLY);
+ ASSERT_LT(u_fd, 0);
+}
+
+/* Thread test helpers and structures */
+struct thread_ns_info {
+ __u64 ns_id;
+ int pipefd;
+ int syncfd_read;
+ int syncfd_write;
+ int exit_code;
+};
+
+static void *thread_create_namespace(void *arg)
+{
+ struct thread_ns_info *info = (struct thread_ns_info *)arg;
+ int ret;
+
+ /* Create new network namespace */
+ ret = unshare(CLONE_NEWNET);
+ if (ret < 0) {
+ info->exit_code = 1;
+ return NULL;
+ }
+
+ /* Get namespace ID */
+ int fd = open("/proc/thread-self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ info->exit_code = 2;
+ return NULL;
+ }
+
+ ret = ioctl(fd, NS_GET_ID, &info->ns_id);
+ close(fd);
+ if (ret < 0) {
+ info->exit_code = 3;
+ return NULL;
+ }
+
+ /* Send namespace ID to main thread */
+ if (write(info->pipefd, &info->ns_id, sizeof(info->ns_id)) != sizeof(info->ns_id)) {
+ info->exit_code = 4;
+ return NULL;
+ }
+
+ /* Wait for signal to exit */
+ char sync_byte;
+ if (read(info->syncfd_read, &sync_byte, 1) != 1) {
+ info->exit_code = 5;
+ return NULL;
+ }
+
+ info->exit_code = 0;
+ return NULL;
+}
+
+/*
+ * Test that namespace becomes inactive after thread exits.
+ * This verifies active reference counting works with threads, not just processes.
+ */
+TEST(thread_ns_inactive_after_exit)
+{
+ pthread_t thread;
+ struct thread_ns_info info;
+ struct file_handle *handle;
+ int pipefd[2];
+ int syncpipe[2];
+ int ret;
+ char sync_byte;
+ char buf[sizeof(*handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ ASSERT_EQ(pipe(syncpipe), 0);
+
+ info.pipefd = pipefd[1];
+ info.syncfd_read = syncpipe[0];
+ info.syncfd_write = -1;
+ info.exit_code = -1;
+
+ /* Create thread that will create a namespace */
+ ret = pthread_create(&thread, NULL, thread_create_namespace, &info);
+ ASSERT_EQ(ret, 0);
+
+ /* Read namespace ID from thread */
+ __u64 ns_id;
+ ret = read(pipefd[0], &ns_id, sizeof(ns_id));
+ if (ret != sizeof(ns_id)) {
+ sync_byte = 'X';
+ write(syncpipe[1], &sync_byte, 1);
+ pthread_join(thread, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ close(syncpipe[1]);
+ SKIP(return, "Failed to read namespace ID from thread");
+ }
+
+ TH_LOG("Thread created namespace with ID %llu", (unsigned long long)ns_id);
+
+ /* Construct file handle */
+ handle = (struct file_handle *)buf;
+ handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *fh = (struct nsfs_file_handle *)handle->f_handle;
+ fh->ns_id = ns_id;
+ fh->ns_type = 0;
+ fh->ns_inum = 0;
+
+ /* Namespace should be active while thread is alive */
+ TH_LOG("Attempting to open namespace while thread is alive (should succeed)");
+ int nsfd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_GE(nsfd, 0);
+ close(nsfd);
+
+ /* Signal thread to exit */
+ TH_LOG("Signaling thread to exit");
+ sync_byte = 'X';
+ ASSERT_EQ(write(syncpipe[1], &sync_byte, 1), 1);
+ close(syncpipe[1]);
+
+ /* Wait for thread to exit */
+ ASSERT_EQ(pthread_join(thread, NULL), 0);
+ close(pipefd[0]);
+ close(pipefd[1]);
+ close(syncpipe[0]);
+
+ if (info.exit_code != 0)
+ SKIP(return, "Thread failed to create namespace");
+
+ TH_LOG("Thread exited, namespace should be inactive");
+
+ /* Namespace should now be inactive */
+ nsfd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(nsfd, 0);
+ /* Should fail with ENOENT (inactive) or ESTALE (gone) */
+ TH_LOG("Namespace inactive as expected: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+}
+
+/*
+ * Test that a namespace remains active while a thread holds an fd to it.
+ * Even after the thread exits, the namespace should remain active as long as
+ * another thread holds a file descriptor to it.
+ */
+TEST(thread_ns_fd_keeps_active)
+{
+ pthread_t thread;
+ struct thread_ns_info info;
+ struct file_handle *handle;
+ int pipefd[2];
+ int syncpipe[2];
+ int ret;
+ char sync_byte;
+ char buf[sizeof(*handle) + MAX_HANDLE_SZ];
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ ASSERT_EQ(pipe(syncpipe), 0);
+
+ info.pipefd = pipefd[1];
+ info.syncfd_read = syncpipe[0];
+ info.syncfd_write = -1;
+ info.exit_code = -1;
+
+ /* Create thread that will create a namespace */
+ ret = pthread_create(&thread, NULL, thread_create_namespace, &info);
+ ASSERT_EQ(ret, 0);
+
+ /* Read namespace ID from thread */
+ __u64 ns_id;
+ ret = read(pipefd[0], &ns_id, sizeof(ns_id));
+ if (ret != sizeof(ns_id)) {
+ sync_byte = 'X';
+ write(syncpipe[1], &sync_byte, 1);
+ pthread_join(thread, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+ close(syncpipe[0]);
+ close(syncpipe[1]);
+ SKIP(return, "Failed to read namespace ID from thread");
+ }
+
+ TH_LOG("Thread created namespace with ID %llu", (unsigned long long)ns_id);
+
+ /* Construct file handle */
+ handle = (struct file_handle *)buf;
+ handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *fh = (struct nsfs_file_handle *)handle->f_handle;
+ fh->ns_id = ns_id;
+ fh->ns_type = 0;
+ fh->ns_inum = 0;
+
+ /* Open namespace while thread is alive */
+ TH_LOG("Opening namespace while thread is alive");
+ int nsfd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_GE(nsfd, 0);
+
+ /* Signal thread to exit */
+ TH_LOG("Signaling thread to exit");
+ sync_byte = 'X';
+ write(syncpipe[1], &sync_byte, 1);
+ close(syncpipe[1]);
+
+ /* Wait for thread to exit */
+ pthread_join(thread, NULL);
+ close(pipefd[0]);
+ close(pipefd[1]);
+ close(syncpipe[0]);
+
+ if (info.exit_code != 0) {
+ close(nsfd);
+ SKIP(return, "Thread failed to create namespace");
+ }
+
+ TH_LOG("Thread exited, but main thread holds fd - namespace should remain active");
+
+ /* Namespace should still be active because we hold an fd */
+ int nsfd2 = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_GE(nsfd2, 0);
+
+ /* Verify it's the same namespace */
+ struct stat st1, st2;
+ ASSERT_EQ(fstat(nsfd, &st1), 0);
+ ASSERT_EQ(fstat(nsfd2, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ close(nsfd2);
+
+ TH_LOG("Closing fd - namespace should become inactive");
+ close(nsfd);
+
+ /* Now namespace should be inactive */
+ nsfd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(nsfd, 0);
+ /* Should fail with ENOENT (inactive) or ESTALE (gone) */
+ TH_LOG("Namespace inactive as expected: %s (errno=%d)", strerror(errno), errno);
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+}
+
+/* Structure for thread data in subprocess */
+struct thread_sleep_data {
+ int syncfd_read;
+};
+
+static void *thread_sleep_and_wait(void *arg)
+{
+ struct thread_sleep_data *data = (struct thread_sleep_data *)arg;
+ char sync_byte;
+
+ /* Wait for signal to exit - read will unblock when pipe is closed */
+ (void)read(data->syncfd_read, &sync_byte, 1);
+ return NULL;
+}
+
+/*
+ * Test that namespaces become inactive after subprocess with multiple threads exits.
+ * Create a subprocess that unshares user and network namespaces, then creates two
+ * threads that share those namespaces. Verify that after all threads and subprocess
+ * exit, the namespaces are no longer listed by listns() and cannot be opened by
+ * open_by_handle_at().
+ */
+TEST(thread_subprocess_ns_inactive_after_all_exit)
+{
+ int pipefd[2];
+ int sv[2];
+ pid_t pid;
+ int status;
+ __u64 user_id, net_id;
+ struct file_handle *user_handle, *net_handle;
+ char user_buf[sizeof(*user_handle) + MAX_HANDLE_SZ];
+ char net_buf[sizeof(*net_handle) + MAX_HANDLE_SZ];
+ char sync_byte;
+ int ret;
+
+ ASSERT_EQ(pipe(pipefd), 0);
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+ close(sv[0]);
+
+ /* Create user namespace with mappings */
+ if (setup_userns() < 0) {
+ fprintf(stderr, "Child: setup_userns() failed: %s\n", strerror(errno));
+ close(pipefd[1]);
+ close(sv[1]);
+ exit(1);
+ }
+ fprintf(stderr, "Child: setup_userns() succeeded\n");
+
+ /* Get user namespace ID */
+ int user_fd = open("/proc/self/ns/user", O_RDONLY);
+ if (user_fd < 0) {
+ fprintf(stderr, "Child: open(/proc/self/ns/user) failed: %s\n", strerror(errno));
+ close(pipefd[1]);
+ close(sv[1]);
+ exit(1);
+ }
+
+ if (ioctl(user_fd, NS_GET_ID, &user_id) < 0) {
+ fprintf(stderr, "Child: ioctl(NS_GET_ID) for user ns failed: %s\n", strerror(errno));
+ close(user_fd);
+ close(pipefd[1]);
+ close(sv[1]);
+ exit(1);
+ }
+ close(user_fd);
+ fprintf(stderr, "Child: user ns ID = %llu\n", (unsigned long long)user_id);
+
+ /* Unshare network namespace */
+ if (unshare(CLONE_NEWNET) < 0) {
+ fprintf(stderr, "Child: unshare(CLONE_NEWNET) failed: %s\n", strerror(errno));
+ close(pipefd[1]);
+ close(sv[1]);
+ exit(1);
+ }
+ fprintf(stderr, "Child: unshare(CLONE_NEWNET) succeeded\n");
+
+ /* Get network namespace ID */
+ int net_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (net_fd < 0) {
+ fprintf(stderr, "Child: open(/proc/self/ns/net) failed: %s\n", strerror(errno));
+ close(pipefd[1]);
+ close(sv[1]);
+ exit(1);
+ }
+
+ if (ioctl(net_fd, NS_GET_ID, &net_id) < 0) {
+ fprintf(stderr, "Child: ioctl(NS_GET_ID) for net ns failed: %s\n", strerror(errno));
+ close(net_fd);
+ close(pipefd[1]);
+ close(sv[1]);
+ exit(1);
+ }
+ close(net_fd);
+ fprintf(stderr, "Child: net ns ID = %llu\n", (unsigned long long)net_id);
+
+ /* Send namespace IDs to parent */
+ if (write(pipefd[1], &user_id, sizeof(user_id)) != sizeof(user_id)) {
+ fprintf(stderr, "Child: write(user_id) failed: %s\n", strerror(errno));
+ exit(1);
+ }
+ if (write(pipefd[1], &net_id, sizeof(net_id)) != sizeof(net_id)) {
+ fprintf(stderr, "Child: write(net_id) failed: %s\n", strerror(errno));
+ exit(1);
+ }
+ close(pipefd[1]);
+ fprintf(stderr, "Child: sent namespace IDs to parent\n");
+
+ /* Create two threads that share the namespaces */
+ pthread_t thread1, thread2;
+ struct thread_sleep_data data;
+ data.syncfd_read = sv[1];
+
+ int ret_thread = pthread_create(&thread1, NULL, thread_sleep_and_wait, &data);
+ if (ret_thread != 0) {
+ fprintf(stderr, "Child: pthread_create(thread1) failed: %s\n", strerror(ret_thread));
+ close(sv[1]);
+ exit(1);
+ }
+ fprintf(stderr, "Child: created thread1\n");
+
+ ret_thread = pthread_create(&thread2, NULL, thread_sleep_and_wait, &data);
+ if (ret_thread != 0) {
+ fprintf(stderr, "Child: pthread_create(thread2) failed: %s\n", strerror(ret_thread));
+ close(sv[1]);
+ pthread_cancel(thread1);
+ exit(1);
+ }
+ fprintf(stderr, "Child: created thread2\n");
+
+ /* Wait for threads to complete - they will unblock when parent writes */
+ fprintf(stderr, "Child: waiting for threads to exit\n");
+ pthread_join(thread1, NULL);
+ fprintf(stderr, "Child: thread1 exited\n");
+ pthread_join(thread2, NULL);
+ fprintf(stderr, "Child: thread2 exited\n");
+
+ close(sv[1]);
+
+ /* Exit - namespaces should become inactive */
+ fprintf(stderr, "Child: all threads joined, exiting with success\n");
+ exit(0);
+ }
+
+ /* Parent process */
+ close(pipefd[1]);
+ close(sv[1]);
+
+ TH_LOG("Parent: waiting to read namespace IDs from child");
+
+ /* Read namespace IDs from child */
+ ret = read(pipefd[0], &user_id, sizeof(user_id));
+ if (ret != sizeof(user_id)) {
+ TH_LOG("Parent: failed to read user_id, ret=%d, errno=%s", ret, strerror(errno));
+ close(pipefd[0]);
+ sync_byte = 'X';
+ (void)write(sv[0], &sync_byte, 1);
+ close(sv[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read user namespace ID from child");
+ }
+
+ ret = read(pipefd[0], &net_id, sizeof(net_id));
+ close(pipefd[0]);
+ if (ret != sizeof(net_id)) {
+ TH_LOG("Parent: failed to read net_id, ret=%d, errno=%s", ret, strerror(errno));
+ sync_byte = 'X';
+ (void)write(sv[0], &sync_byte, 1);
+ close(sv[0]);
+ waitpid(pid, NULL, 0);
+ SKIP(return, "Failed to read network namespace ID from child");
+ }
+
+ TH_LOG("Child created user ns %llu and net ns %llu with 2 threads",
+ (unsigned long long)user_id, (unsigned long long)net_id);
+
+ /* Construct file handles */
+ user_handle = (struct file_handle *)user_buf;
+ user_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ user_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *user_fh = (struct nsfs_file_handle *)user_handle->f_handle;
+ user_fh->ns_id = user_id;
+ user_fh->ns_type = 0;
+ user_fh->ns_inum = 0;
+
+ net_handle = (struct file_handle *)net_buf;
+ net_handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ net_handle->handle_type = FILEID_NSFS;
+ struct nsfs_file_handle *net_fh = (struct nsfs_file_handle *)net_handle->f_handle;
+ net_fh->ns_id = net_id;
+ net_fh->ns_type = 0;
+ net_fh->ns_inum = 0;
+
+ /* Verify namespaces are active while subprocess and threads are alive */
+ TH_LOG("Verifying namespaces are active while subprocess with threads is running");
+ int user_fd = open_by_handle_at(FD_NSFS_ROOT, user_handle, O_RDONLY);
+ ASSERT_GE(user_fd, 0);
+
+ int net_fd = open_by_handle_at(FD_NSFS_ROOT, net_handle, O_RDONLY);
+ ASSERT_GE(net_fd, 0);
+
+ close(user_fd);
+ close(net_fd);
+
+ /* Also verify they appear in listns() */
+ TH_LOG("Verifying namespaces appear in listns() while active");
+ struct ns_id_req req = {
+ .size = sizeof(struct ns_id_req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ int nr_ids = sys_listns(&req, ns_ids, 256, 0);
+ if (nr_ids < 0) {
+ TH_LOG("listns() not available, skipping listns verification");
+ } else {
+ /* Check if user_id is in the list */
+ int found_user = 0;
+ for (int i = 0; i < nr_ids; i++) {
+ if (ns_ids[i] == user_id) {
+ found_user = 1;
+ break;
+ }
+ }
+ ASSERT_TRUE(found_user);
+ TH_LOG("User namespace found in listns() as expected");
+
+ /* Check network namespace */
+ req.ns_type = CLONE_NEWNET;
+ nr_ids = sys_listns(&req, ns_ids, 256, 0);
+ if (nr_ids >= 0) {
+ int found_net = 0;
+ for (int i = 0; i < nr_ids; i++) {
+ if (ns_ids[i] == net_id) {
+ found_net = 1;
+ break;
+ }
+ }
+ ASSERT_TRUE(found_net);
+ TH_LOG("Network namespace found in listns() as expected");
+ }
+ }
+
+ /* Signal threads to exit */
+ TH_LOG("Signaling threads to exit");
+ sync_byte = 'X';
+ /* Write two bytes - one for each thread */
+ ASSERT_EQ(write(sv[0], &sync_byte, 1), 1);
+ ASSERT_EQ(write(sv[0], &sync_byte, 1), 1);
+ close(sv[0]);
+
+ /* Wait for child process to exit */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ if (WEXITSTATUS(status) != 0) {
+ TH_LOG("Child process failed with exit code %d", WEXITSTATUS(status));
+ SKIP(return, "Child process failed");
+ }
+
+ TH_LOG("Subprocess and all threads have exited successfully");
+
+ /* Verify namespaces are now inactive - open_by_handle_at should fail */
+ TH_LOG("Verifying namespaces are inactive after subprocess and threads exit");
+ user_fd = open_by_handle_at(FD_NSFS_ROOT, user_handle, O_RDONLY);
+ ASSERT_LT(user_fd, 0);
+ TH_LOG("User namespace inactive as expected: %s (errno=%d)",
+ strerror(errno), errno);
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+
+ net_fd = open_by_handle_at(FD_NSFS_ROOT, net_handle, O_RDONLY);
+ ASSERT_LT(net_fd, 0);
+ TH_LOG("Network namespace inactive as expected: %s (errno=%d)",
+ strerror(errno), errno);
+ ASSERT_TRUE(errno == ENOENT || errno == ESTALE);
+
+ /* Verify namespaces do NOT appear in listns() */
+ TH_LOG("Verifying namespaces do NOT appear in listns() when inactive");
+ memset(&req, 0, sizeof(req));
+ req.size = sizeof(struct ns_id_req);
+ req.ns_type = CLONE_NEWUSER;
+ nr_ids = sys_listns(&req, ns_ids, 256, 0);
+ if (nr_ids >= 0) {
+ int found_user = 0;
+ for (int i = 0; i < nr_ids; i++) {
+ if (ns_ids[i] == user_id) {
+ found_user = 1;
+ break;
+ }
+ }
+ ASSERT_FALSE(found_user);
+ TH_LOG("User namespace correctly not listed in listns()");
+
+ /* Check network namespace */
+ req.ns_type = CLONE_NEWNET;
+ nr_ids = sys_listns(&req, ns_ids, 256, 0);
+ if (nr_ids >= 0) {
+ int found_net = 0;
+ for (int i = 0; i < nr_ids; i++) {
+ if (ns_ids[i] == net_id) {
+ found_net = 1;
+ break;
+ }
+ }
+ ASSERT_FALSE(found_net);
+ TH_LOG("Network namespace correctly not listed in listns()");
+ }
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/nsid_test.c b/tools/testing/selftests/namespaces/nsid_test.c
new file mode 100644
index 000000000000..b4a14c6693a5
--- /dev/null
+++ b/tools/testing/selftests/namespaces/nsid_test.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <libgen.h>
+#include <limits.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <poll.h>
+#include <sys/epoll.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <linux/fs.h>
+#include <linux/limits.h>
+#include <linux/nsfs.h>
+#include "kselftest_harness.h"
+
+/* Fixture for tests that create child processes */
+FIXTURE(nsid) {
+ pid_t child_pid;
+};
+
+FIXTURE_SETUP(nsid) {
+ self->child_pid = 0;
+}
+
+FIXTURE_TEARDOWN(nsid) {
+ /* Clean up any child process that may still be running */
+ if (self->child_pid > 0) {
+ kill(self->child_pid, SIGKILL);
+ waitpid(self->child_pid, NULL, 0);
+ }
+}
+
+TEST(nsid_mntns_basic)
+{
+ __u64 mnt_ns_id = 0;
+ int fd_mntns;
+ int ret;
+
+ /* Open the current mount namespace */
+ fd_mntns = open("/proc/self/ns/mnt", O_RDONLY);
+ ASSERT_GE(fd_mntns, 0);
+
+ /* Get the mount namespace ID */
+ ret = ioctl(fd_mntns, NS_GET_MNTNS_ID, &mnt_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(mnt_ns_id, 0);
+
+ /* Verify we can get the same ID again */
+ __u64 mnt_ns_id2 = 0;
+ ret = ioctl(fd_mntns, NS_GET_ID, &mnt_ns_id2);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(mnt_ns_id, mnt_ns_id2);
+
+ close(fd_mntns);
+}
+
+TEST_F(nsid, mntns_separate)
+{
+ __u64 parent_mnt_ns_id = 0;
+ __u64 child_mnt_ns_id = 0;
+ int fd_parent_mntns, fd_child_mntns;
+ int ret;
+ pid_t pid;
+ int pipefd[2];
+
+ /* Get parent's mount namespace ID */
+ fd_parent_mntns = open("/proc/self/ns/mnt", O_RDONLY);
+ ASSERT_GE(fd_parent_mntns, 0);
+ ret = ioctl(fd_parent_mntns, NS_GET_ID, &parent_mnt_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(parent_mnt_ns_id, 0);
+
+ /* Create a pipe for synchronization */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new mount namespace */
+ ret = unshare(CLONE_NEWNS);
+ if (ret != 0) {
+ /* Skip test if we don't have permission */
+ if (errno == EPERM || errno == EACCES) {
+ write(pipefd[1], "S", 1); /* Signal skip */
+ _exit(0);
+ }
+ _exit(1);
+ }
+
+ /* Signal success */
+ write(pipefd[1], "Y", 1);
+ close(pipefd[1]);
+
+ /* Keep namespace alive */
+ pause();
+ _exit(0);
+ }
+
+ /* Track child for cleanup */
+ self->child_pid = pid;
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ char buf;
+ ASSERT_EQ(read(pipefd[0], &buf, 1), 1);
+ close(pipefd[0]);
+
+ if (buf == 'S') {
+ /* Child couldn't create namespace, skip test */
+ close(fd_parent_mntns);
+ SKIP(return, "No permission to create mount namespace");
+ }
+
+ ASSERT_EQ(buf, 'Y');
+
+ /* Open child's mount namespace */
+ char path[256];
+ snprintf(path, sizeof(path), "/proc/%d/ns/mnt", pid);
+ fd_child_mntns = open(path, O_RDONLY);
+ ASSERT_GE(fd_child_mntns, 0);
+
+ /* Get child's mount namespace ID */
+ ret = ioctl(fd_child_mntns, NS_GET_ID, &child_mnt_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(child_mnt_ns_id, 0);
+
+ /* Parent and child should have different mount namespace IDs */
+ ASSERT_NE(parent_mnt_ns_id, child_mnt_ns_id);
+
+ close(fd_parent_mntns);
+ close(fd_child_mntns);
+}
+
+TEST(nsid_cgroupns_basic)
+{
+ __u64 cgroup_ns_id = 0;
+ int fd_cgroupns;
+ int ret;
+
+ /* Open the current cgroup namespace */
+ fd_cgroupns = open("/proc/self/ns/cgroup", O_RDONLY);
+ ASSERT_GE(fd_cgroupns, 0);
+
+ /* Get the cgroup namespace ID */
+ ret = ioctl(fd_cgroupns, NS_GET_ID, &cgroup_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(cgroup_ns_id, 0);
+
+ /* Verify we can get the same ID again */
+ __u64 cgroup_ns_id2 = 0;
+ ret = ioctl(fd_cgroupns, NS_GET_ID, &cgroup_ns_id2);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(cgroup_ns_id, cgroup_ns_id2);
+
+ close(fd_cgroupns);
+}
+
+TEST_F(nsid, cgroupns_separate)
+{
+ __u64 parent_cgroup_ns_id = 0;
+ __u64 child_cgroup_ns_id = 0;
+ int fd_parent_cgroupns, fd_child_cgroupns;
+ int ret;
+ pid_t pid;
+ int pipefd[2];
+
+ /* Get parent's cgroup namespace ID */
+ fd_parent_cgroupns = open("/proc/self/ns/cgroup", O_RDONLY);
+ ASSERT_GE(fd_parent_cgroupns, 0);
+ ret = ioctl(fd_parent_cgroupns, NS_GET_ID, &parent_cgroup_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(parent_cgroup_ns_id, 0);
+
+ /* Create a pipe for synchronization */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new cgroup namespace */
+ ret = unshare(CLONE_NEWCGROUP);
+ if (ret != 0) {
+ /* Skip test if we don't have permission */
+ if (errno == EPERM || errno == EACCES) {
+ write(pipefd[1], "S", 1); /* Signal skip */
+ _exit(0);
+ }
+ _exit(1);
+ }
+
+ /* Signal success */
+ write(pipefd[1], "Y", 1);
+ close(pipefd[1]);
+
+ /* Keep namespace alive */
+ pause();
+ _exit(0);
+ }
+
+ /* Track child for cleanup */
+ self->child_pid = pid;
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ char buf;
+ ASSERT_EQ(read(pipefd[0], &buf, 1), 1);
+ close(pipefd[0]);
+
+ if (buf == 'S') {
+ /* Child couldn't create namespace, skip test */
+ close(fd_parent_cgroupns);
+ SKIP(return, "No permission to create cgroup namespace");
+ }
+
+ ASSERT_EQ(buf, 'Y');
+
+ /* Open child's cgroup namespace */
+ char path[256];
+ snprintf(path, sizeof(path), "/proc/%d/ns/cgroup", pid);
+ fd_child_cgroupns = open(path, O_RDONLY);
+ ASSERT_GE(fd_child_cgroupns, 0);
+
+ /* Get child's cgroup namespace ID */
+ ret = ioctl(fd_child_cgroupns, NS_GET_ID, &child_cgroup_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(child_cgroup_ns_id, 0);
+
+ /* Parent and child should have different cgroup namespace IDs */
+ ASSERT_NE(parent_cgroup_ns_id, child_cgroup_ns_id);
+
+ close(fd_parent_cgroupns);
+ close(fd_child_cgroupns);
+}
+
+TEST(nsid_ipcns_basic)
+{
+ __u64 ipc_ns_id = 0;
+ int fd_ipcns;
+ int ret;
+
+ /* Open the current IPC namespace */
+ fd_ipcns = open("/proc/self/ns/ipc", O_RDONLY);
+ ASSERT_GE(fd_ipcns, 0);
+
+ /* Get the IPC namespace ID */
+ ret = ioctl(fd_ipcns, NS_GET_ID, &ipc_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(ipc_ns_id, 0);
+
+ /* Verify we can get the same ID again */
+ __u64 ipc_ns_id2 = 0;
+ ret = ioctl(fd_ipcns, NS_GET_ID, &ipc_ns_id2);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(ipc_ns_id, ipc_ns_id2);
+
+ close(fd_ipcns);
+}
+
+TEST_F(nsid, ipcns_separate)
+{
+ __u64 parent_ipc_ns_id = 0;
+ __u64 child_ipc_ns_id = 0;
+ int fd_parent_ipcns, fd_child_ipcns;
+ int ret;
+ pid_t pid;
+ int pipefd[2];
+
+ /* Get parent's IPC namespace ID */
+ fd_parent_ipcns = open("/proc/self/ns/ipc", O_RDONLY);
+ ASSERT_GE(fd_parent_ipcns, 0);
+ ret = ioctl(fd_parent_ipcns, NS_GET_ID, &parent_ipc_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(parent_ipc_ns_id, 0);
+
+ /* Create a pipe for synchronization */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new IPC namespace */
+ ret = unshare(CLONE_NEWIPC);
+ if (ret != 0) {
+ /* Skip test if we don't have permission */
+ if (errno == EPERM || errno == EACCES) {
+ write(pipefd[1], "S", 1); /* Signal skip */
+ _exit(0);
+ }
+ _exit(1);
+ }
+
+ /* Signal success */
+ write(pipefd[1], "Y", 1);
+ close(pipefd[1]);
+
+ /* Keep namespace alive */
+ pause();
+ _exit(0);
+ }
+
+ /* Track child for cleanup */
+ self->child_pid = pid;
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ char buf;
+ ASSERT_EQ(read(pipefd[0], &buf, 1), 1);
+ close(pipefd[0]);
+
+ if (buf == 'S') {
+ /* Child couldn't create namespace, skip test */
+ close(fd_parent_ipcns);
+ SKIP(return, "No permission to create IPC namespace");
+ }
+
+ ASSERT_EQ(buf, 'Y');
+
+ /* Open child's IPC namespace */
+ char path[256];
+ snprintf(path, sizeof(path), "/proc/%d/ns/ipc", pid);
+ fd_child_ipcns = open(path, O_RDONLY);
+ ASSERT_GE(fd_child_ipcns, 0);
+
+ /* Get child's IPC namespace ID */
+ ret = ioctl(fd_child_ipcns, NS_GET_ID, &child_ipc_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(child_ipc_ns_id, 0);
+
+ /* Parent and child should have different IPC namespace IDs */
+ ASSERT_NE(parent_ipc_ns_id, child_ipc_ns_id);
+
+ close(fd_parent_ipcns);
+ close(fd_child_ipcns);
+}
+
+TEST(nsid_utsns_basic)
+{
+ __u64 uts_ns_id = 0;
+ int fd_utsns;
+ int ret;
+
+ /* Open the current UTS namespace */
+ fd_utsns = open("/proc/self/ns/uts", O_RDONLY);
+ ASSERT_GE(fd_utsns, 0);
+
+ /* Get the UTS namespace ID */
+ ret = ioctl(fd_utsns, NS_GET_ID, &uts_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(uts_ns_id, 0);
+
+ /* Verify we can get the same ID again */
+ __u64 uts_ns_id2 = 0;
+ ret = ioctl(fd_utsns, NS_GET_ID, &uts_ns_id2);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(uts_ns_id, uts_ns_id2);
+
+ close(fd_utsns);
+}
+
+TEST_F(nsid, utsns_separate)
+{
+ __u64 parent_uts_ns_id = 0;
+ __u64 child_uts_ns_id = 0;
+ int fd_parent_utsns, fd_child_utsns;
+ int ret;
+ pid_t pid;
+ int pipefd[2];
+
+ /* Get parent's UTS namespace ID */
+ fd_parent_utsns = open("/proc/self/ns/uts", O_RDONLY);
+ ASSERT_GE(fd_parent_utsns, 0);
+ ret = ioctl(fd_parent_utsns, NS_GET_ID, &parent_uts_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(parent_uts_ns_id, 0);
+
+ /* Create a pipe for synchronization */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new UTS namespace */
+ ret = unshare(CLONE_NEWUTS);
+ if (ret != 0) {
+ /* Skip test if we don't have permission */
+ if (errno == EPERM || errno == EACCES) {
+ write(pipefd[1], "S", 1); /* Signal skip */
+ _exit(0);
+ }
+ _exit(1);
+ }
+
+ /* Signal success */
+ write(pipefd[1], "Y", 1);
+ close(pipefd[1]);
+
+ /* Keep namespace alive */
+ pause();
+ _exit(0);
+ }
+
+ /* Track child for cleanup */
+ self->child_pid = pid;
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ char buf;
+ ASSERT_EQ(read(pipefd[0], &buf, 1), 1);
+ close(pipefd[0]);
+
+ if (buf == 'S') {
+ /* Child couldn't create namespace, skip test */
+ close(fd_parent_utsns);
+ SKIP(return, "No permission to create UTS namespace");
+ }
+
+ ASSERT_EQ(buf, 'Y');
+
+ /* Open child's UTS namespace */
+ char path[256];
+ snprintf(path, sizeof(path), "/proc/%d/ns/uts", pid);
+ fd_child_utsns = open(path, O_RDONLY);
+ ASSERT_GE(fd_child_utsns, 0);
+
+ /* Get child's UTS namespace ID */
+ ret = ioctl(fd_child_utsns, NS_GET_ID, &child_uts_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(child_uts_ns_id, 0);
+
+ /* Parent and child should have different UTS namespace IDs */
+ ASSERT_NE(parent_uts_ns_id, child_uts_ns_id);
+
+ close(fd_parent_utsns);
+ close(fd_child_utsns);
+}
+
+TEST(nsid_userns_basic)
+{
+ __u64 user_ns_id = 0;
+ int fd_userns;
+ int ret;
+
+ /* Open the current user namespace */
+ fd_userns = open("/proc/self/ns/user", O_RDONLY);
+ ASSERT_GE(fd_userns, 0);
+
+ /* Get the user namespace ID */
+ ret = ioctl(fd_userns, NS_GET_ID, &user_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(user_ns_id, 0);
+
+ /* Verify we can get the same ID again */
+ __u64 user_ns_id2 = 0;
+ ret = ioctl(fd_userns, NS_GET_ID, &user_ns_id2);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(user_ns_id, user_ns_id2);
+
+ close(fd_userns);
+}
+
+TEST_F(nsid, userns_separate)
+{
+ __u64 parent_user_ns_id = 0;
+ __u64 child_user_ns_id = 0;
+ int fd_parent_userns, fd_child_userns;
+ int ret;
+ pid_t pid;
+ int pipefd[2];
+
+ /* Get parent's user namespace ID */
+ fd_parent_userns = open("/proc/self/ns/user", O_RDONLY);
+ ASSERT_GE(fd_parent_userns, 0);
+ ret = ioctl(fd_parent_userns, NS_GET_ID, &parent_user_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(parent_user_ns_id, 0);
+
+ /* Create a pipe for synchronization */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new user namespace */
+ ret = unshare(CLONE_NEWUSER);
+ if (ret != 0) {
+ /* Skip test if we don't have permission */
+ if (errno == EPERM || errno == EACCES) {
+ write(pipefd[1], "S", 1); /* Signal skip */
+ _exit(0);
+ }
+ _exit(1);
+ }
+
+ /* Signal success */
+ write(pipefd[1], "Y", 1);
+ close(pipefd[1]);
+
+ /* Keep namespace alive */
+ pause();
+ _exit(0);
+ }
+
+ /* Track child for cleanup */
+ self->child_pid = pid;
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ char buf;
+ ASSERT_EQ(read(pipefd[0], &buf, 1), 1);
+ close(pipefd[0]);
+
+ if (buf == 'S') {
+ /* Child couldn't create namespace, skip test */
+ close(fd_parent_userns);
+ SKIP(return, "No permission to create user namespace");
+ }
+
+ ASSERT_EQ(buf, 'Y');
+
+ /* Open child's user namespace */
+ char path[256];
+ snprintf(path, sizeof(path), "/proc/%d/ns/user", pid);
+ fd_child_userns = open(path, O_RDONLY);
+ ASSERT_GE(fd_child_userns, 0);
+
+ /* Get child's user namespace ID */
+ ret = ioctl(fd_child_userns, NS_GET_ID, &child_user_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(child_user_ns_id, 0);
+
+ /* Parent and child should have different user namespace IDs */
+ ASSERT_NE(parent_user_ns_id, child_user_ns_id);
+
+ close(fd_parent_userns);
+ close(fd_child_userns);
+}
+
+TEST(nsid_timens_basic)
+{
+ __u64 time_ns_id = 0;
+ int fd_timens;
+ int ret;
+
+ /* Open the current time namespace */
+ fd_timens = open("/proc/self/ns/time", O_RDONLY);
+ if (fd_timens < 0) {
+ SKIP(return, "Time namespaces not supported");
+ }
+
+ /* Get the time namespace ID */
+ ret = ioctl(fd_timens, NS_GET_ID, &time_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(time_ns_id, 0);
+
+ /* Verify we can get the same ID again */
+ __u64 time_ns_id2 = 0;
+ ret = ioctl(fd_timens, NS_GET_ID, &time_ns_id2);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(time_ns_id, time_ns_id2);
+
+ close(fd_timens);
+}
+
+TEST_F(nsid, timens_separate)
+{
+ __u64 parent_time_ns_id = 0;
+ __u64 child_time_ns_id = 0;
+ int fd_parent_timens, fd_child_timens;
+ int ret;
+ pid_t pid;
+ int pipefd[2];
+
+ /* Open the current time namespace */
+ fd_parent_timens = open("/proc/self/ns/time", O_RDONLY);
+ if (fd_parent_timens < 0) {
+ SKIP(return, "Time namespaces not supported");
+ }
+
+ /* Get parent's time namespace ID */
+ ret = ioctl(fd_parent_timens, NS_GET_ID, &parent_time_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(parent_time_ns_id, 0);
+
+ /* Create a pipe for synchronization */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new time namespace */
+ ret = unshare(CLONE_NEWTIME);
+ if (ret != 0) {
+ /* Skip test if we don't have permission */
+ if (errno == EPERM || errno == EACCES || errno == EINVAL) {
+ write(pipefd[1], "S", 1); /* Signal skip */
+ _exit(0);
+ }
+ _exit(1);
+ }
+
+ /* Fork a grandchild to actually enter the new namespace */
+ pid_t grandchild = fork();
+ if (grandchild == 0) {
+ /* Grandchild is in the new namespace */
+ write(pipefd[1], "Y", 1);
+ close(pipefd[1]);
+ pause();
+ _exit(0);
+ } else if (grandchild > 0) {
+ /* Child writes grandchild PID and waits */
+ write(pipefd[1], "Y", 1);
+ write(pipefd[1], &grandchild, sizeof(grandchild));
+ close(pipefd[1]);
+ pause(); /* Keep the parent alive to maintain the grandchild */
+ _exit(0);
+ } else {
+ _exit(1);
+ }
+ }
+
+ /* Track child for cleanup */
+ self->child_pid = pid;
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ char buf;
+ ASSERT_EQ(read(pipefd[0], &buf, 1), 1);
+
+ if (buf == 'S') {
+ /* Child couldn't create namespace, skip test */
+ close(fd_parent_timens);
+ close(pipefd[0]);
+ SKIP(return, "Cannot create time namespace");
+ }
+
+ ASSERT_EQ(buf, 'Y');
+
+ pid_t grandchild_pid;
+ ASSERT_EQ(read(pipefd[0], &grandchild_pid, sizeof(grandchild_pid)), sizeof(grandchild_pid));
+ close(pipefd[0]);
+
+ /* Open grandchild's time namespace */
+ char path[256];
+ snprintf(path, sizeof(path), "/proc/%d/ns/time", grandchild_pid);
+ fd_child_timens = open(path, O_RDONLY);
+ ASSERT_GE(fd_child_timens, 0);
+
+ /* Get child's time namespace ID */
+ ret = ioctl(fd_child_timens, NS_GET_ID, &child_time_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(child_time_ns_id, 0);
+
+ /* Parent and child should have different time namespace IDs */
+ ASSERT_NE(parent_time_ns_id, child_time_ns_id);
+
+ close(fd_parent_timens);
+ close(fd_child_timens);
+}
+
+TEST(nsid_pidns_basic)
+{
+ __u64 pid_ns_id = 0;
+ int fd_pidns;
+ int ret;
+
+ /* Open the current PID namespace */
+ fd_pidns = open("/proc/self/ns/pid", O_RDONLY);
+ ASSERT_GE(fd_pidns, 0);
+
+ /* Get the PID namespace ID */
+ ret = ioctl(fd_pidns, NS_GET_ID, &pid_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(pid_ns_id, 0);
+
+ /* Verify we can get the same ID again */
+ __u64 pid_ns_id2 = 0;
+ ret = ioctl(fd_pidns, NS_GET_ID, &pid_ns_id2);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(pid_ns_id, pid_ns_id2);
+
+ close(fd_pidns);
+}
+
+TEST_F(nsid, pidns_separate)
+{
+ __u64 parent_pid_ns_id = 0;
+ __u64 child_pid_ns_id = 0;
+ int fd_parent_pidns, fd_child_pidns;
+ int ret;
+ pid_t pid;
+ int pipefd[2];
+
+ /* Get parent's PID namespace ID */
+ fd_parent_pidns = open("/proc/self/ns/pid", O_RDONLY);
+ ASSERT_GE(fd_parent_pidns, 0);
+ ret = ioctl(fd_parent_pidns, NS_GET_ID, &parent_pid_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(parent_pid_ns_id, 0);
+
+ /* Create a pipe for synchronization */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new PID namespace */
+ ret = unshare(CLONE_NEWPID);
+ if (ret != 0) {
+ /* Skip test if we don't have permission */
+ if (errno == EPERM || errno == EACCES) {
+ write(pipefd[1], "S", 1); /* Signal skip */
+ _exit(0);
+ }
+ _exit(1);
+ }
+
+ /* Fork a grandchild to actually enter the new namespace */
+ pid_t grandchild = fork();
+ if (grandchild == 0) {
+ /* Grandchild is in the new namespace */
+ write(pipefd[1], "Y", 1);
+ close(pipefd[1]);
+ pause();
+ _exit(0);
+ } else if (grandchild > 0) {
+ /* Child writes grandchild PID and waits */
+ write(pipefd[1], "Y", 1);
+ write(pipefd[1], &grandchild, sizeof(grandchild));
+ close(pipefd[1]);
+ pause(); /* Keep the parent alive to maintain the grandchild */
+ _exit(0);
+ } else {
+ _exit(1);
+ }
+ }
+
+ /* Track child for cleanup */
+ self->child_pid = pid;
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ char buf;
+ ASSERT_EQ(read(pipefd[0], &buf, 1), 1);
+
+ if (buf == 'S') {
+ /* Child couldn't create namespace, skip test */
+ close(fd_parent_pidns);
+ close(pipefd[0]);
+ SKIP(return, "No permission to create PID namespace");
+ }
+
+ ASSERT_EQ(buf, 'Y');
+
+ pid_t grandchild_pid;
+ ASSERT_EQ(read(pipefd[0], &grandchild_pid, sizeof(grandchild_pid)), sizeof(grandchild_pid));
+ close(pipefd[0]);
+
+ /* Open grandchild's PID namespace */
+ char path[256];
+ snprintf(path, sizeof(path), "/proc/%d/ns/pid", grandchild_pid);
+ fd_child_pidns = open(path, O_RDONLY);
+ ASSERT_GE(fd_child_pidns, 0);
+
+ /* Get child's PID namespace ID */
+ ret = ioctl(fd_child_pidns, NS_GET_ID, &child_pid_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(child_pid_ns_id, 0);
+
+ /* Parent and child should have different PID namespace IDs */
+ ASSERT_NE(parent_pid_ns_id, child_pid_ns_id);
+
+ close(fd_parent_pidns);
+ close(fd_child_pidns);
+}
+
+TEST(nsid_netns_basic)
+{
+ __u64 net_ns_id = 0;
+ __u64 netns_cookie = 0;
+ int fd_netns;
+ int sock;
+ socklen_t optlen;
+ int ret;
+
+ /* Open the current network namespace */
+ fd_netns = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(fd_netns, 0);
+
+ /* Get the network namespace ID via ioctl */
+ ret = ioctl(fd_netns, NS_GET_ID, &net_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(net_ns_id, 0);
+
+ /* Create a socket to get the SO_NETNS_COOKIE */
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_GE(sock, 0);
+
+ /* Get the network namespace cookie via socket option */
+ optlen = sizeof(netns_cookie);
+ ret = getsockopt(sock, SOL_SOCKET, SO_NETNS_COOKIE, &netns_cookie, &optlen);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(optlen, sizeof(netns_cookie));
+
+ /* The namespace ID and cookie should be identical */
+ ASSERT_EQ(net_ns_id, netns_cookie);
+
+ /* Verify we can get the same ID again */
+ __u64 net_ns_id2 = 0;
+ ret = ioctl(fd_netns, NS_GET_ID, &net_ns_id2);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(net_ns_id, net_ns_id2);
+
+ close(sock);
+ close(fd_netns);
+}
+
+TEST_F(nsid, netns_separate)
+{
+ __u64 parent_net_ns_id = 0;
+ __u64 parent_netns_cookie = 0;
+ __u64 child_net_ns_id = 0;
+ __u64 child_netns_cookie = 0;
+ int fd_parent_netns, fd_child_netns;
+ int parent_sock, child_sock;
+ socklen_t optlen;
+ int ret;
+ pid_t pid;
+ int pipefd[2];
+
+ /* Get parent's network namespace ID */
+ fd_parent_netns = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(fd_parent_netns, 0);
+ ret = ioctl(fd_parent_netns, NS_GET_ID, &parent_net_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(parent_net_ns_id, 0);
+
+ /* Get parent's network namespace cookie */
+ parent_sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_GE(parent_sock, 0);
+ optlen = sizeof(parent_netns_cookie);
+ ret = getsockopt(parent_sock, SOL_SOCKET, SO_NETNS_COOKIE, &parent_netns_cookie, &optlen);
+ ASSERT_EQ(ret, 0);
+
+ /* Verify parent's ID and cookie match */
+ ASSERT_EQ(parent_net_ns_id, parent_netns_cookie);
+
+ /* Create a pipe for synchronization */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child process */
+ close(pipefd[0]);
+
+ /* Create new network namespace */
+ ret = unshare(CLONE_NEWNET);
+ if (ret != 0) {
+ /* Skip test if we don't have permission */
+ if (errno == EPERM || errno == EACCES) {
+ write(pipefd[1], "S", 1); /* Signal skip */
+ _exit(0);
+ }
+ _exit(1);
+ }
+
+ /* Signal success */
+ write(pipefd[1], "Y", 1);
+ close(pipefd[1]);
+
+ /* Keep namespace alive */
+ pause();
+ _exit(0);
+ }
+
+ /* Track child for cleanup */
+ self->child_pid = pid;
+
+ /* Parent process */
+ close(pipefd[1]);
+
+ char buf;
+ ASSERT_EQ(read(pipefd[0], &buf, 1), 1);
+ close(pipefd[0]);
+
+ if (buf == 'S') {
+ /* Child couldn't create namespace, skip test */
+ close(fd_parent_netns);
+ close(parent_sock);
+ SKIP(return, "No permission to create network namespace");
+ }
+
+ ASSERT_EQ(buf, 'Y');
+
+ /* Open child's network namespace */
+ char path[256];
+ snprintf(path, sizeof(path), "/proc/%d/ns/net", pid);
+ fd_child_netns = open(path, O_RDONLY);
+ ASSERT_GE(fd_child_netns, 0);
+
+ /* Get child's network namespace ID */
+ ret = ioctl(fd_child_netns, NS_GET_ID, &child_net_ns_id);
+ ASSERT_EQ(ret, 0);
+ ASSERT_NE(child_net_ns_id, 0);
+
+ /* Create socket in child's namespace to get cookie */
+ ret = setns(fd_child_netns, CLONE_NEWNET);
+ if (ret == 0) {
+ child_sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_GE(child_sock, 0);
+
+ optlen = sizeof(child_netns_cookie);
+ ret = getsockopt(child_sock, SOL_SOCKET, SO_NETNS_COOKIE, &child_netns_cookie, &optlen);
+ ASSERT_EQ(ret, 0);
+
+ /* Verify child's ID and cookie match */
+ ASSERT_EQ(child_net_ns_id, child_netns_cookie);
+
+ close(child_sock);
+
+ /* Return to parent namespace */
+ setns(fd_parent_netns, CLONE_NEWNET);
+ }
+
+ /* Parent and child should have different network namespace IDs */
+ ASSERT_NE(parent_net_ns_id, child_net_ns_id);
+ if (child_netns_cookie != 0) {
+ ASSERT_NE(parent_netns_cookie, child_netns_cookie);
+ }
+
+ close(fd_parent_netns);
+ close(fd_child_netns);
+ close(parent_sock);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/regression_pidfd_setns_test.c b/tools/testing/selftests/namespaces/regression_pidfd_setns_test.c
new file mode 100644
index 000000000000..753fd29dffd8
--- /dev/null
+++ b/tools/testing/selftests/namespaces/regression_pidfd_setns_test.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include "../pidfd/pidfd.h"
+#include "../kselftest_harness.h"
+
+/*
+ * Regression tests for the setns(pidfd) active reference counting bug.
+ *
+ * These tests are based on the reproducers that triggered the race condition
+ * fixed by commit 1c465d0518dc ("ns: handle setns(pidfd, ...) cleanly").
+ *
+ * The bug: When using setns() with a pidfd, if the target task exits between
+ * prepare_nsset() and commit_nsset(), the namespaces would become inactive.
+ * Then ns_ref_active_get() would increment from 0 without properly resurrecting
+ * the owner chain, causing active reference count underflows.
+ */
+
+/*
+ * Simple pidfd setns test using create_child()+unshare().
+ *
+ * Without the fix, this would trigger active refcount warnings when the
+ * parent exits after doing setns(pidfd) on a child that has already exited.
+ */
+TEST(simple_pidfd_setns)
+{
+ pid_t child_pid;
+ int pidfd = -1;
+ int ret;
+ int sv[2];
+ char c;
+
+ /* Ignore SIGCHLD for autoreap */
+ ASSERT_NE(signal(SIGCHLD, SIG_IGN), SIG_ERR);
+
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv), 0);
+
+ /* Create a child process without namespaces initially */
+ child_pid = create_child(&pidfd, 0);
+ ASSERT_GE(child_pid, 0);
+
+ if (child_pid == 0) {
+ close(sv[0]);
+
+ if (unshare(CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWNET | CLONE_NEWUSER) < 0) {
+ close(sv[1]);
+ _exit(1);
+ }
+
+ /* Signal parent that namespaces are ready */
+ if (write_nointr(sv[1], "1", 1) < 0) {
+ close(sv[1]);
+ _exit(1);
+ }
+
+ close(sv[1]);
+ _exit(0);
+ }
+ ASSERT_GE(pidfd, 0);
+ EXPECT_EQ(close(sv[1]), 0);
+
+ ret = read_nointr(sv[0], &c, 1);
+ ASSERT_EQ(ret, 1);
+ EXPECT_EQ(close(sv[0]), 0);
+
+ /* Set to child's namespaces via pidfd */
+ ret = setns(pidfd, CLONE_NEWUTS | CLONE_NEWIPC);
+ TH_LOG("setns() returned %d", ret);
+ close(pidfd);
+}
+
+/*
+ * Simple pidfd setns test using create_child().
+ *
+ * This variation uses create_child() with namespace flags directly.
+ * Namespaces are created immediately at clone time.
+ */
+TEST(simple_pidfd_setns_clone)
+{
+ pid_t child_pid;
+ int pidfd = -1;
+ int ret;
+
+ /* Ignore SIGCHLD for autoreap */
+ ASSERT_NE(signal(SIGCHLD, SIG_IGN), SIG_ERR);
+
+ /* Create a child process with new namespaces using create_child() */
+ child_pid = create_child(&pidfd, CLONE_NEWUSER | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWNET);
+ ASSERT_GE(child_pid, 0);
+
+ if (child_pid == 0) {
+ /* Child: sleep for a while so parent can setns to us */
+ sleep(2);
+ _exit(0);
+ }
+
+ /* Parent: pidfd was already created by create_child() */
+ ASSERT_GE(pidfd, 0);
+
+ /* Set to child's namespaces via pidfd */
+ ret = setns(pidfd, CLONE_NEWUTS | CLONE_NEWIPC);
+ close(pidfd);
+ TH_LOG("setns() returned %d", ret);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/siocgskns_test.c b/tools/testing/selftests/namespaces/siocgskns_test.c
new file mode 100644
index 000000000000..ba689a22d82f
--- /dev/null
+++ b/tools/testing/selftests/namespaces/siocgskns_test.c
@@ -0,0 +1,1824 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <linux/if.h>
+#include <linux/sockios.h>
+#include <linux/nsfs.h>
+#include <arpa/inet.h>
+#include "../kselftest_harness.h"
+#include "../filesystems/utils.h"
+#include "wrappers.h"
+
+#ifndef SIOCGSKNS
+#define SIOCGSKNS 0x894C
+#endif
+
+#ifndef FD_NSFS_ROOT
+#define FD_NSFS_ROOT -10003
+#endif
+
+#ifndef FILEID_NSFS
+#define FILEID_NSFS 0xf1
+#endif
+
+/*
+ * Test basic SIOCGSKNS functionality.
+ * Create a socket and verify SIOCGSKNS returns the correct network namespace.
+ */
+TEST(siocgskns_basic)
+{
+ int sock_fd, netns_fd, current_netns_fd;
+ struct stat st1, st2;
+
+ /* Create a TCP socket */
+ sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_GE(sock_fd, 0);
+
+ /* Use SIOCGSKNS to get network namespace */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ close(sock_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+
+ /* Get current network namespace */
+ current_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(current_netns_fd, 0);
+
+ /* Verify they match */
+ ASSERT_EQ(fstat(netns_fd, &st1), 0);
+ ASSERT_EQ(fstat(current_netns_fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+
+ close(sock_fd);
+ close(netns_fd);
+ close(current_netns_fd);
+}
+
+/*
+ * Test that socket file descriptors keep network namespaces active.
+ * Create a network namespace, create a socket in it, then exit the namespace.
+ * The namespace should remain active while the socket FD is held.
+ */
+TEST(siocgskns_keeps_netns_active)
+{
+ int sock_fd, netns_fd, test_fd;
+ int ipc_sockets[2];
+ pid_t pid;
+ int status;
+ struct stat st;
+
+ EXPECT_EQ(socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child: create new netns and socket */
+ close(ipc_sockets[0]);
+
+ if (unshare(CLONE_NEWNET) < 0) {
+ TH_LOG("unshare(CLONE_NEWNET) failed: %s", strerror(errno));
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ /* Create a socket in the new network namespace */
+ sock_fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock_fd < 0) {
+ TH_LOG("socket() failed: %s", strerror(errno));
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ /* Send socket FD to parent via SCM_RIGHTS */
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1] = {'X'};
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &sock_fd, sizeof(int));
+
+ if (sendmsg(ipc_sockets[1], &msg, 0) < 0) {
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ exit(0);
+ }
+
+ /* Parent: receive socket FD */
+ close(ipc_sockets[1]);
+
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1];
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ ssize_t n = recvmsg(ipc_sockets[0], &msg, 0);
+ close(ipc_sockets[0]);
+ ASSERT_EQ(n, 1);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ ASSERT_NE(cmsg, NULL);
+ ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS);
+
+ memcpy(&sock_fd, CMSG_DATA(cmsg), sizeof(int));
+
+ /* Wait for child to exit */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Get network namespace from socket */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ close(sock_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+
+ ASSERT_EQ(fstat(netns_fd, &st), 0);
+
+ /*
+ * Namespace should still be active because socket FD keeps it alive.
+ * Try to access it via /proc/self/fd/<fd>.
+ */
+ char path[64];
+ snprintf(path, sizeof(path), "/proc/self/fd/%d", netns_fd);
+ test_fd = open(path, O_RDONLY);
+ ASSERT_GE(test_fd, 0);
+ close(test_fd);
+ close(netns_fd);
+
+ /* Close socket - namespace should become inactive */
+ close(sock_fd);
+
+ /* Try SIOCGSKNS again - should fail since socket is closed */
+ ASSERT_LT(ioctl(sock_fd, SIOCGSKNS), 0);
+}
+
+/*
+ * Test SIOCGSKNS with different socket types (TCP, UDP, RAW).
+ */
+TEST(siocgskns_socket_types)
+{
+ int sock_tcp, sock_udp, sock_raw;
+ int netns_tcp, netns_udp, netns_raw;
+ struct stat st_tcp, st_udp, st_raw;
+
+ /* TCP socket */
+ sock_tcp = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_GE(sock_tcp, 0);
+
+ /* UDP socket */
+ sock_udp = socket(AF_INET, SOCK_DGRAM, 0);
+ ASSERT_GE(sock_udp, 0);
+
+ /* RAW socket (may require privileges) */
+ sock_raw = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP);
+ if (sock_raw < 0 && (errno == EPERM || errno == EACCES)) {
+ sock_raw = -1; /* Skip raw socket test */
+ }
+
+ /* Test SIOCGSKNS on TCP */
+ netns_tcp = ioctl(sock_tcp, SIOCGSKNS);
+ if (netns_tcp < 0) {
+ close(sock_tcp);
+ close(sock_udp);
+ if (sock_raw >= 0) close(sock_raw);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_tcp, 0);
+ }
+
+ /* Test SIOCGSKNS on UDP */
+ netns_udp = ioctl(sock_udp, SIOCGSKNS);
+ ASSERT_GE(netns_udp, 0);
+
+ /* Test SIOCGSKNS on RAW (if available) */
+ if (sock_raw >= 0) {
+ netns_raw = ioctl(sock_raw, SIOCGSKNS);
+ ASSERT_GE(netns_raw, 0);
+ }
+
+ /* Verify all return the same network namespace */
+ ASSERT_EQ(fstat(netns_tcp, &st_tcp), 0);
+ ASSERT_EQ(fstat(netns_udp, &st_udp), 0);
+ ASSERT_EQ(st_tcp.st_ino, st_udp.st_ino);
+
+ if (sock_raw >= 0) {
+ ASSERT_EQ(fstat(netns_raw, &st_raw), 0);
+ ASSERT_EQ(st_tcp.st_ino, st_raw.st_ino);
+ close(netns_raw);
+ close(sock_raw);
+ }
+
+ close(netns_tcp);
+ close(netns_udp);
+ close(sock_tcp);
+ close(sock_udp);
+}
+
+/*
+ * Test SIOCGSKNS across setns.
+ * Create a socket in netns A, switch to netns B, verify SIOCGSKNS still
+ * returns netns A.
+ */
+TEST(siocgskns_across_setns)
+{
+ int sock_fd, netns_a_fd, netns_b_fd, result_fd;
+ struct stat st_a;
+
+ /* Get current netns (A) */
+ netns_a_fd = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(netns_a_fd, 0);
+ ASSERT_EQ(fstat(netns_a_fd, &st_a), 0);
+
+ /* Create socket in netns A */
+ sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_GE(sock_fd, 0);
+
+ /* Create new netns (B) */
+ ASSERT_EQ(unshare(CLONE_NEWNET), 0);
+
+ netns_b_fd = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(netns_b_fd, 0);
+
+ /* Get netns from socket created in A */
+ result_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (result_fd < 0) {
+ close(sock_fd);
+ setns(netns_a_fd, CLONE_NEWNET);
+ close(netns_a_fd);
+ close(netns_b_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(result_fd, 0);
+ }
+
+ /* Verify it still points to netns A */
+ struct stat st_result_stat;
+ ASSERT_EQ(fstat(result_fd, &st_result_stat), 0);
+ ASSERT_EQ(st_a.st_ino, st_result_stat.st_ino);
+
+ close(result_fd);
+ close(sock_fd);
+ close(netns_b_fd);
+
+ /* Restore original netns */
+ ASSERT_EQ(setns(netns_a_fd, CLONE_NEWNET), 0);
+ close(netns_a_fd);
+}
+
+/*
+ * Test SIOCGSKNS fails on non-socket file descriptors.
+ */
+TEST(siocgskns_non_socket)
+{
+ int fd;
+ int pipefd[2];
+
+ /* Test on regular file */
+ fd = open("/dev/null", O_RDONLY);
+ ASSERT_GE(fd, 0);
+
+ ASSERT_LT(ioctl(fd, SIOCGSKNS), 0);
+ ASSERT_TRUE(errno == ENOTTY || errno == EINVAL);
+ close(fd);
+
+ /* Test on pipe */
+ ASSERT_EQ(pipe(pipefd), 0);
+
+ ASSERT_LT(ioctl(pipefd[0], SIOCGSKNS), 0);
+ ASSERT_TRUE(errno == ENOTTY || errno == EINVAL);
+
+ close(pipefd[0]);
+ close(pipefd[1]);
+}
+
+/*
+ * Test multiple sockets keep the same network namespace active.
+ * Create multiple sockets, verify closing some doesn't affect others.
+ */
+TEST(siocgskns_multiple_sockets)
+{
+ int socks[5];
+ int netns_fds[5];
+ int i;
+ struct stat st;
+ ino_t netns_ino;
+
+ /* Create new network namespace */
+ ASSERT_EQ(unshare(CLONE_NEWNET), 0);
+
+ /* Create multiple sockets */
+ for (i = 0; i < 5; i++) {
+ socks[i] = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_GE(socks[i], 0);
+ }
+
+ /* Get netns from all sockets */
+ for (i = 0; i < 5; i++) {
+ netns_fds[i] = ioctl(socks[i], SIOCGSKNS);
+ if (netns_fds[i] < 0) {
+ int j;
+ for (j = 0; j <= i; j++) {
+ close(socks[j]);
+ if (j < i && netns_fds[j] >= 0)
+ close(netns_fds[j]);
+ }
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fds[i], 0);
+ }
+ }
+
+ /* Verify all point to same netns */
+ ASSERT_EQ(fstat(netns_fds[0], &st), 0);
+ netns_ino = st.st_ino;
+
+ for (i = 1; i < 5; i++) {
+ ASSERT_EQ(fstat(netns_fds[i], &st), 0);
+ ASSERT_EQ(st.st_ino, netns_ino);
+ }
+
+ /* Close some sockets */
+ for (i = 0; i < 3; i++) {
+ close(socks[i]);
+ }
+
+ /* Remaining netns FDs should still be valid */
+ for (i = 3; i < 5; i++) {
+ char path[64];
+ snprintf(path, sizeof(path), "/proc/self/fd/%d", netns_fds[i]);
+ int test_fd = open(path, O_RDONLY);
+ ASSERT_GE(test_fd, 0);
+ close(test_fd);
+ }
+
+ /* Cleanup */
+ for (i = 0; i < 5; i++) {
+ if (i >= 3)
+ close(socks[i]);
+ close(netns_fds[i]);
+ }
+}
+
+/*
+ * Test socket keeps netns active after creating process exits.
+ * Verify that as long as the socket FD exists, the namespace remains active.
+ */
+TEST(siocgskns_netns_lifecycle)
+{
+ int sock_fd, netns_fd;
+ int ipc_sockets[2];
+ int syncpipe[2];
+ pid_t pid;
+ int status;
+ char sync_byte;
+ struct stat st;
+ ino_t netns_ino;
+
+ EXPECT_EQ(socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets), 0);
+
+ ASSERT_EQ(pipe(syncpipe), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child */
+ close(ipc_sockets[0]);
+ close(syncpipe[1]);
+
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(ipc_sockets[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+
+ sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (sock_fd < 0) {
+ close(ipc_sockets[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+
+ /* Send socket to parent */
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1] = {'X'};
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &sock_fd, sizeof(int));
+
+ if (sendmsg(ipc_sockets[1], &msg, 0) < 0) {
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ close(syncpipe[0]);
+ exit(1);
+ }
+
+ close(sock_fd);
+ close(ipc_sockets[1]);
+
+ /* Wait for parent signal */
+ read(syncpipe[0], &sync_byte, 1);
+ close(syncpipe[0]);
+ exit(0);
+ }
+
+ /* Parent */
+ close(ipc_sockets[1]);
+ close(syncpipe[0]);
+
+ /* Receive socket FD */
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1];
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ ssize_t n = recvmsg(ipc_sockets[0], &msg, 0);
+ close(ipc_sockets[0]);
+ ASSERT_EQ(n, 1);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ ASSERT_NE(cmsg, NULL);
+ memcpy(&sock_fd, CMSG_DATA(cmsg), sizeof(int));
+
+ /* Get netns from socket while child is alive */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ sync_byte = 'G';
+ write(syncpipe[1], &sync_byte, 1);
+ close(syncpipe[1]);
+ close(sock_fd);
+ waitpid(pid, NULL, 0);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+ ASSERT_EQ(fstat(netns_fd, &st), 0);
+ netns_ino = st.st_ino;
+
+ /* Signal child to exit */
+ sync_byte = 'G';
+ write(syncpipe[1], &sync_byte, 1);
+ close(syncpipe[1]);
+
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+
+ /*
+ * Socket FD should still keep namespace active even after
+ * the creating process exited.
+ */
+ int test_fd = ioctl(sock_fd, SIOCGSKNS);
+ ASSERT_GE(test_fd, 0);
+
+ struct stat st_test;
+ ASSERT_EQ(fstat(test_fd, &st_test), 0);
+ ASSERT_EQ(st_test.st_ino, netns_ino);
+
+ close(test_fd);
+ close(netns_fd);
+
+ /* Close socket - namespace should become inactive */
+ close(sock_fd);
+}
+
+/*
+ * Test IPv6 sockets also work with SIOCGSKNS.
+ */
+TEST(siocgskns_ipv6)
+{
+ int sock_fd, netns_fd, current_netns_fd;
+ struct stat st1, st2;
+
+ /* Create an IPv6 TCP socket */
+ sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ ASSERT_GE(sock_fd, 0);
+
+ /* Use SIOCGSKNS */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ close(sock_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+
+ /* Verify it matches current namespace */
+ current_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+ ASSERT_GE(current_netns_fd, 0);
+
+ ASSERT_EQ(fstat(netns_fd, &st1), 0);
+ ASSERT_EQ(fstat(current_netns_fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+
+ close(sock_fd);
+ close(netns_fd);
+ close(current_netns_fd);
+}
+
+/*
+ * Test that socket-kept netns appears in listns() output.
+ * Verify that a network namespace kept alive by a socket FD appears in
+ * listns() output even after the creating process exits, and that it
+ * disappears when the socket is closed.
+ */
+TEST(siocgskns_listns_visibility)
+{
+ int sock_fd, netns_fd, owner_fd;
+ int ipc_sockets[2];
+ pid_t pid;
+ int status;
+ __u64 netns_id, owner_id;
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ int ret, i;
+ bool found_netns = false;
+
+ EXPECT_EQ(socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child: create new netns and socket */
+ close(ipc_sockets[0]);
+
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ sock_fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock_fd < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ /* Send socket FD to parent via SCM_RIGHTS */
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1] = {'X'};
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &sock_fd, sizeof(int));
+
+ if (sendmsg(ipc_sockets[1], &msg, 0) < 0) {
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ exit(0);
+ }
+
+ /* Parent: receive socket FD */
+ close(ipc_sockets[1]);
+
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1];
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ ssize_t n = recvmsg(ipc_sockets[0], &msg, 0);
+ close(ipc_sockets[0]);
+ ASSERT_EQ(n, 1);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ ASSERT_NE(cmsg, NULL);
+ memcpy(&sock_fd, CMSG_DATA(cmsg), sizeof(int));
+
+ /* Wait for child to exit */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Get network namespace from socket */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ close(sock_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+
+ /* Get namespace ID */
+ ret = ioctl(netns_fd, NS_GET_ID, &netns_id);
+ if (ret < 0) {
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "NS_GET_ID not supported");
+ ASSERT_EQ(ret, 0);
+ }
+
+ /* Get owner user namespace */
+ owner_fd = ioctl(netns_fd, NS_GET_USERNS);
+ if (owner_fd < 0) {
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "NS_GET_USERNS not supported");
+ ASSERT_GE(owner_fd, 0);
+ }
+
+ /* Get owner namespace ID */
+ ret = ioctl(owner_fd, NS_GET_ID, &owner_id);
+ if (ret < 0) {
+ close(owner_fd);
+ close(sock_fd);
+ close(netns_fd);
+ ASSERT_EQ(ret, 0);
+ }
+ close(owner_fd);
+
+ /* Namespace should appear in listns() output */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s", strerror(errno));
+ ASSERT_GE(ret, 0);
+ }
+
+ /* Search for our network namespace in the list */
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_id) {
+ found_netns = true;
+ break;
+ }
+ }
+
+ ASSERT_TRUE(found_netns);
+ TH_LOG("Found netns %llu in listns() output (kept alive by socket)", netns_id);
+
+ /* Now verify with owner filtering */
+ req.user_ns_id = owner_id;
+ found_netns = false;
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ ASSERT_GE(ret, 0);
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_id) {
+ found_netns = true;
+ break;
+ }
+ }
+
+ ASSERT_TRUE(found_netns);
+ TH_LOG("Found netns %llu owned by userns %llu", netns_id, owner_id);
+
+ /* Close socket - namespace should become inactive and disappear from listns() */
+ close(sock_fd);
+ close(netns_fd);
+
+ /* Verify it's no longer in listns() output */
+ req.user_ns_id = 0;
+ found_netns = false;
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ ASSERT_GE(ret, 0);
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_id) {
+ found_netns = true;
+ break;
+ }
+ }
+
+ ASSERT_FALSE(found_netns);
+ TH_LOG("Netns %llu correctly disappeared from listns() after socket closed", netns_id);
+}
+
+/*
+ * Test that socket-kept netns can be reopened via file handle.
+ * Verify that a network namespace kept alive by a socket FD can be
+ * reopened using file handles even after the creating process exits.
+ */
+TEST(siocgskns_file_handle)
+{
+ int sock_fd, netns_fd, reopened_fd;
+ int ipc_sockets[2];
+ pid_t pid;
+ int status;
+ struct stat st1, st2;
+ ino_t netns_ino;
+ __u64 netns_id;
+ struct file_handle *handle;
+ struct nsfs_file_handle *nsfs_fh;
+ int ret;
+
+ /* Allocate file_handle structure for nsfs */
+ handle = malloc(sizeof(struct file_handle) + sizeof(struct nsfs_file_handle));
+ ASSERT_NE(handle, NULL);
+ handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ handle->handle_type = FILEID_NSFS;
+
+ EXPECT_EQ(socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child: create new netns and socket */
+ close(ipc_sockets[0]);
+
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ sock_fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock_fd < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ /* Send socket FD to parent via SCM_RIGHTS */
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1] = {'X'};
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &sock_fd, sizeof(int));
+
+ if (sendmsg(ipc_sockets[1], &msg, 0) < 0) {
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ exit(0);
+ }
+
+ /* Parent: receive socket FD */
+ close(ipc_sockets[1]);
+
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1];
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ ssize_t n = recvmsg(ipc_sockets[0], &msg, 0);
+ close(ipc_sockets[0]);
+ ASSERT_EQ(n, 1);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ ASSERT_NE(cmsg, NULL);
+ memcpy(&sock_fd, CMSG_DATA(cmsg), sizeof(int));
+
+ /* Wait for child to exit */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Get network namespace from socket */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ free(handle);
+ close(sock_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+
+ ASSERT_EQ(fstat(netns_fd, &st1), 0);
+ netns_ino = st1.st_ino;
+
+ /* Get namespace ID */
+ ret = ioctl(netns_fd, NS_GET_ID, &netns_id);
+ if (ret < 0) {
+ free(handle);
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "NS_GET_ID not supported");
+ ASSERT_EQ(ret, 0);
+ }
+
+ /* Construct file handle from namespace ID */
+ nsfs_fh = (struct nsfs_file_handle *)handle->f_handle;
+ nsfs_fh->ns_id = netns_id;
+ nsfs_fh->ns_type = 0; /* Type field not needed for reopening */
+ nsfs_fh->ns_inum = 0; /* Inum field not needed for reopening */
+
+ TH_LOG("Constructed file handle for netns %lu (id=%llu)", netns_ino, netns_id);
+
+ /* Reopen namespace using file handle (while socket still keeps it alive) */
+ reopened_fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (reopened_fd < 0) {
+ free(handle);
+ close(sock_fd);
+ if (errno == EOPNOTSUPP || errno == ENOSYS || errno == EBADF)
+ SKIP(return, "open_by_handle_at with FD_NSFS_ROOT not supported");
+ TH_LOG("open_by_handle_at failed: %s", strerror(errno));
+ ASSERT_GE(reopened_fd, 0);
+ }
+
+ /* Verify it's the same namespace */
+ ASSERT_EQ(fstat(reopened_fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ TH_LOG("Successfully reopened netns %lu via file handle", netns_ino);
+
+ close(reopened_fd);
+
+ /* Close the netns FD */
+ close(netns_fd);
+
+ /* Try to reopen via file handle - should fail since namespace is now inactive */
+ reopened_fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(reopened_fd, 0);
+ TH_LOG("Correctly failed to reopen inactive netns: %s", strerror(errno));
+
+ /* Get network namespace from socket */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ free(handle);
+ close(sock_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+
+ /* Reopen namespace using file handle (while socket still keeps it alive) */
+ reopened_fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (reopened_fd < 0) {
+ free(handle);
+ close(sock_fd);
+ if (errno == EOPNOTSUPP || errno == ENOSYS || errno == EBADF)
+ SKIP(return, "open_by_handle_at with FD_NSFS_ROOT not supported");
+ TH_LOG("open_by_handle_at failed: %s", strerror(errno));
+ ASSERT_GE(reopened_fd, 0);
+ }
+
+ /* Verify it's the same namespace */
+ ASSERT_EQ(fstat(reopened_fd, &st2), 0);
+ ASSERT_EQ(st1.st_ino, st2.st_ino);
+ ASSERT_EQ(st1.st_dev, st2.st_dev);
+
+ TH_LOG("Successfully reopened netns %lu via file handle", netns_ino);
+
+ /* Close socket - namespace should become inactive */
+ close(sock_fd);
+ free(handle);
+}
+
+/*
+ * Test combined listns() and file handle operations with socket-kept netns.
+ * Create a netns, keep it alive with a socket, verify it appears in listns(),
+ * then reopen it via file handle obtained from listns() entry.
+ */
+TEST(siocgskns_listns_and_file_handle)
+{
+ int sock_fd, netns_fd, userns_fd, reopened_fd;
+ int ipc_sockets[2];
+ pid_t pid;
+ int status;
+ struct stat st;
+ ino_t netns_ino;
+ __u64 netns_id, userns_id;
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET | CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ int ret, i;
+ bool found_netns = false, found_userns = false;
+ struct file_handle *handle;
+ struct nsfs_file_handle *nsfs_fh;
+
+ /* Allocate file_handle structure for nsfs */
+ handle = malloc(sizeof(struct file_handle) + sizeof(struct nsfs_file_handle));
+ ASSERT_NE(handle, NULL);
+ handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ handle->handle_type = FILEID_NSFS;
+
+ EXPECT_EQ(socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child: create new userns and netns with socket */
+ close(ipc_sockets[0]);
+
+ if (setup_userns() < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ sock_fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock_fd < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ /* Send socket FD to parent via SCM_RIGHTS */
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1] = {'X'};
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &sock_fd, sizeof(int));
+
+ if (sendmsg(ipc_sockets[1], &msg, 0) < 0) {
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ close(sock_fd);
+ close(ipc_sockets[1]);
+ exit(0);
+ }
+
+ /* Parent: receive socket FD */
+ close(ipc_sockets[1]);
+
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1];
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ ssize_t n = recvmsg(ipc_sockets[0], &msg, 0);
+ close(ipc_sockets[0]);
+ ASSERT_EQ(n, 1);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ ASSERT_NE(cmsg, NULL);
+ memcpy(&sock_fd, CMSG_DATA(cmsg), sizeof(int));
+
+ /* Wait for child to exit */
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /* Get network namespace from socket */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ free(handle);
+ close(sock_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+
+ ASSERT_EQ(fstat(netns_fd, &st), 0);
+ netns_ino = st.st_ino;
+
+ /* Get namespace ID */
+ ret = ioctl(netns_fd, NS_GET_ID, &netns_id);
+ if (ret < 0) {
+ free(handle);
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "NS_GET_ID not supported");
+ ASSERT_EQ(ret, 0);
+ }
+
+ /* Get owner user namespace */
+ userns_fd = ioctl(netns_fd, NS_GET_USERNS);
+ if (userns_fd < 0) {
+ free(handle);
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "NS_GET_USERNS not supported");
+ ASSERT_GE(userns_fd, 0);
+ }
+
+ /* Get owner namespace ID */
+ ret = ioctl(userns_fd, NS_GET_ID, &userns_id);
+ if (ret < 0) {
+ close(userns_fd);
+ free(handle);
+ close(sock_fd);
+ close(netns_fd);
+ ASSERT_EQ(ret, 0);
+ }
+ close(userns_fd);
+
+ TH_LOG("Testing netns %lu (id=%llu) owned by userns id=%llu", netns_ino, netns_id, userns_id);
+
+ /* Verify namespace appears in listns() */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ free(handle);
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s", strerror(errno));
+ ASSERT_GE(ret, 0);
+ }
+
+ found_netns = false;
+ found_userns = false;
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_id)
+ found_netns = true;
+ if (ns_ids[i] == userns_id)
+ found_userns = true;
+ }
+ ASSERT_TRUE(found_netns);
+ ASSERT_TRUE(found_userns);
+ TH_LOG("Found netns %llu in listns() output", netns_id);
+
+ /* Construct file handle from namespace ID */
+ nsfs_fh = (struct nsfs_file_handle *)handle->f_handle;
+ nsfs_fh->ns_id = netns_id;
+ nsfs_fh->ns_type = 0;
+ nsfs_fh->ns_inum = 0;
+
+ reopened_fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (reopened_fd < 0) {
+ free(handle);
+ close(sock_fd);
+ if (errno == EOPNOTSUPP || errno == ENOSYS || errno == EBADF)
+ SKIP(return, "open_by_handle_at with FD_NSFS_ROOT not supported");
+ TH_LOG("open_by_handle_at failed: %s", strerror(errno));
+ ASSERT_GE(reopened_fd, 0);
+ }
+
+ struct stat reopened_st;
+ ASSERT_EQ(fstat(reopened_fd, &reopened_st), 0);
+ ASSERT_EQ(reopened_st.st_ino, netns_ino);
+
+ TH_LOG("Successfully reopened netns %lu via file handle (socket-kept)", netns_ino);
+
+ close(reopened_fd);
+ close(netns_fd);
+
+ /* Try to reopen via file handle - should fail since namespace is now inactive */
+ reopened_fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ ASSERT_LT(reopened_fd, 0);
+ TH_LOG("Correctly failed to reopen inactive netns: %s", strerror(errno));
+
+ /* Get network namespace from socket */
+ netns_fd = ioctl(sock_fd, SIOCGSKNS);
+ if (netns_fd < 0) {
+ free(handle);
+ close(sock_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_fd, 0);
+ }
+
+ /* Verify namespace appears in listns() */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ free(handle);
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s", strerror(errno));
+ ASSERT_GE(ret, 0);
+ }
+
+ found_netns = false;
+ found_userns = false;
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_id)
+ found_netns = true;
+ if (ns_ids[i] == userns_id)
+ found_userns = true;
+ }
+ ASSERT_TRUE(found_netns);
+ ASSERT_TRUE(found_userns);
+ TH_LOG("Found netns %llu in listns() output", netns_id);
+
+ close(netns_fd);
+
+ /* Verify namespace appears in listns() */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ free(handle);
+ close(sock_fd);
+ close(netns_fd);
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ TH_LOG("listns failed: %s", strerror(errno));
+ ASSERT_GE(ret, 0);
+ }
+
+ found_netns = false;
+ found_userns = false;
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_id)
+ found_netns = true;
+ if (ns_ids[i] == userns_id)
+ found_userns = true;
+ }
+ ASSERT_FALSE(found_netns);
+ ASSERT_FALSE(found_userns);
+ TH_LOG("Netns %llu correctly disappeared from listns() after socket closed", netns_id);
+
+ close(sock_fd);
+ free(handle);
+}
+
+/*
+ * Test multi-level namespace resurrection across three user namespace levels.
+ *
+ * This test creates a complex namespace hierarchy with three levels of user
+ * namespaces and a network namespace at the deepest level. It verifies that
+ * the resurrection semantics work correctly when SIOCGSKNS is called on a
+ * socket from an inactive namespace tree, and that listns() and
+ * open_by_handle_at() correctly respect visibility rules.
+ *
+ * Hierarchy after child processes exit (all with 0 active refcount):
+ *
+ * net_L3A (0) <- Level 3 network namespace
+ * |
+ * +
+ * userns_L3 (0) <- Level 3 user namespace
+ * |
+ * +
+ * userns_L2 (0) <- Level 2 user namespace
+ * |
+ * +
+ * userns_L1 (0) <- Level 1 user namespace
+ * |
+ * x
+ * init_user_ns
+ *
+ * The test verifies:
+ * 1. SIOCGSKNS on a socket from inactive net_L3A resurrects the entire chain
+ * 2. After resurrection, all namespaces are visible in listns()
+ * 3. Resurrected namespaces can be reopened via file handles
+ * 4. Closing the netns FD cascades down: the entire ownership chain
+ * (userns_L3 -> userns_L2 -> userns_L1) becomes inactive again
+ * 5. Inactive namespaces disappear from listns() and cannot be reopened
+ * 6. Calling SIOCGSKNS again on the same socket resurrects the tree again
+ * 7. After second resurrection, namespaces are visible and can be reopened
+ */
+TEST(siocgskns_multilevel_resurrection)
+{
+ int ipc_sockets[2];
+ pid_t pid_l1, pid_l2, pid_l3;
+ int status;
+
+ /* Namespace file descriptors to be received from child */
+ int sock_L3A_fd = -1;
+ int netns_L3A_fd = -1;
+ __u64 netns_L3A_id;
+ __u64 userns_L1_id, userns_L2_id, userns_L3_id;
+
+ /* For listns() and file handle testing */
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWNET | CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids[256];
+ int ret, i;
+ struct file_handle *handle;
+ struct nsfs_file_handle *nsfs_fh;
+ int reopened_fd;
+
+ /* Allocate file handle for testing */
+ handle = malloc(sizeof(struct file_handle) + sizeof(struct nsfs_file_handle));
+ ASSERT_NE(handle, NULL);
+ handle->handle_bytes = sizeof(struct nsfs_file_handle);
+ handle->handle_type = FILEID_NSFS;
+
+ EXPECT_EQ(socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets), 0);
+
+ /*
+ * Fork level 1 child that creates userns_L1
+ */
+ pid_l1 = fork();
+ ASSERT_GE(pid_l1, 0);
+
+ if (pid_l1 == 0) {
+ /* Level 1 child */
+ int ipc_L2[2];
+ close(ipc_sockets[0]);
+
+ /* Create userns_L1 */
+ if (setup_userns() < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ /* Create socketpair for communicating with L2 child */
+ if (socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_L2) < 0) {
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ /*
+ * Fork level 2 child that creates userns_L2
+ */
+ pid_l2 = fork();
+ if (pid_l2 < 0) {
+ close(ipc_sockets[1]);
+ close(ipc_L2[0]);
+ close(ipc_L2[1]);
+ exit(1);
+ }
+
+ if (pid_l2 == 0) {
+ /* Level 2 child */
+ int ipc_L3[2];
+ close(ipc_L2[0]);
+
+ /* Create userns_L2 (nested inside userns_L1) */
+ if (setup_userns() < 0) {
+ close(ipc_L2[1]);
+ exit(1);
+ }
+
+ /* Create socketpair for communicating with L3 child */
+ if (socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_L3) < 0) {
+ close(ipc_L2[1]);
+ exit(1);
+ }
+
+ /*
+ * Fork level 3 child that creates userns_L3 and network namespaces
+ */
+ pid_l3 = fork();
+ if (pid_l3 < 0) {
+ close(ipc_L2[1]);
+ close(ipc_L3[0]);
+ close(ipc_L3[1]);
+ exit(1);
+ }
+
+ if (pid_l3 == 0) {
+ /* Level 3 child - the deepest level */
+ int sock_fd;
+ close(ipc_L3[0]);
+
+ /* Create userns_L3 (nested inside userns_L2) */
+ if (setup_userns() < 0) {
+ close(ipc_L3[1]);
+ exit(1);
+ }
+
+ /* Create network namespace at level 3 */
+ if (unshare(CLONE_NEWNET) < 0) {
+ close(ipc_L3[1]);
+ exit(1);
+ }
+
+ /* Create socket in net_L3A */
+ sock_fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock_fd < 0) {
+ close(ipc_L3[1]);
+ exit(1);
+ }
+
+ /* Send socket FD to L2 parent */
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1] = {'X'};
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &sock_fd, sizeof(int));
+
+ if (sendmsg(ipc_L3[1], &msg, 0) < 0) {
+ close(sock_fd);
+ close(ipc_L3[1]);
+ exit(1);
+ }
+
+ close(sock_fd);
+ close(ipc_L3[1]);
+ exit(0);
+ }
+
+ /* Level 2 child - receive from L3 and forward to L1 */
+ close(ipc_L3[1]);
+
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1];
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+ int received_fd;
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ ssize_t n = recvmsg(ipc_L3[0], &msg, 0);
+ close(ipc_L3[0]);
+
+ if (n != 1) {
+ close(ipc_L2[1]);
+ waitpid(pid_l3, NULL, 0);
+ exit(1);
+ }
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ if (!cmsg) {
+ close(ipc_L2[1]);
+ waitpid(pid_l3, NULL, 0);
+ exit(1);
+ }
+ memcpy(&received_fd, CMSG_DATA(cmsg), sizeof(int));
+
+ /* Wait for L3 child */
+ waitpid(pid_l3, NULL, 0);
+
+ /* Forward the socket FD to L1 parent */
+ memset(&msg, 0, sizeof(msg));
+ buf[0] = 'Y';
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &received_fd, sizeof(int));
+
+ if (sendmsg(ipc_L2[1], &msg, 0) < 0) {
+ close(received_fd);
+ close(ipc_L2[1]);
+ exit(1);
+ }
+
+ close(received_fd);
+ close(ipc_L2[1]);
+ exit(0);
+ }
+
+ /* Level 1 child - receive from L2 and forward to parent */
+ close(ipc_L2[1]);
+
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1];
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+ int received_fd;
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ ssize_t n = recvmsg(ipc_L2[0], &msg, 0);
+ close(ipc_L2[0]);
+
+ if (n != 1) {
+ close(ipc_sockets[1]);
+ waitpid(pid_l2, NULL, 0);
+ exit(1);
+ }
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ if (!cmsg) {
+ close(ipc_sockets[1]);
+ waitpid(pid_l2, NULL, 0);
+ exit(1);
+ }
+ memcpy(&received_fd, CMSG_DATA(cmsg), sizeof(int));
+
+ /* Wait for L2 child */
+ waitpid(pid_l2, NULL, 0);
+
+ /* Forward the socket FD to parent */
+ memset(&msg, 0, sizeof(msg));
+ buf[0] = 'Z';
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsg), &received_fd, sizeof(int));
+
+ if (sendmsg(ipc_sockets[1], &msg, 0) < 0) {
+ close(received_fd);
+ close(ipc_sockets[1]);
+ exit(1);
+ }
+
+ close(received_fd);
+ close(ipc_sockets[1]);
+ exit(0);
+ }
+
+ /* Parent - receive the socket from the deepest level */
+ close(ipc_sockets[1]);
+
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ char buf[1];
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+
+ iov.iov_base = buf;
+ iov.iov_len = 1;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ ssize_t n = recvmsg(ipc_sockets[0], &msg, 0);
+ close(ipc_sockets[0]);
+
+ if (n != 1) {
+ free(handle);
+ waitpid(pid_l1, NULL, 0);
+ SKIP(return, "Failed to receive socket from child");
+ }
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ if (!cmsg) {
+ free(handle);
+ waitpid(pid_l1, NULL, 0);
+ SKIP(return, "Failed to receive socket from child");
+ }
+ memcpy(&sock_L3A_fd, CMSG_DATA(cmsg), sizeof(int));
+
+ /* Wait for L1 child */
+ waitpid(pid_l1, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+
+ /*
+ * At this point, all child processes have exited. The socket itself
+ * doesn't keep the namespace active - we need to call SIOCGSKNS which
+ * will resurrect the entire namespace tree by taking active references.
+ */
+
+ /* Get network namespace from socket - this resurrects the tree */
+ netns_L3A_fd = ioctl(sock_L3A_fd, SIOCGSKNS);
+ if (netns_L3A_fd < 0) {
+ free(handle);
+ close(sock_L3A_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "SIOCGSKNS not supported");
+ ASSERT_GE(netns_L3A_fd, 0);
+ }
+
+ /* Get namespace ID for net_L3A */
+ ret = ioctl(netns_L3A_fd, NS_GET_ID, &netns_L3A_id);
+ if (ret < 0) {
+ free(handle);
+ close(sock_L3A_fd);
+ close(netns_L3A_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "NS_GET_ID not supported");
+ ASSERT_EQ(ret, 0);
+ }
+
+ /* Get owner user namespace chain: userns_L3 -> userns_L2 -> userns_L1 */
+ int userns_L3_fd = ioctl(netns_L3A_fd, NS_GET_USERNS);
+ if (userns_L3_fd < 0) {
+ free(handle);
+ close(sock_L3A_fd);
+ close(netns_L3A_fd);
+ if (errno == ENOTTY || errno == EINVAL)
+ SKIP(return, "NS_GET_USERNS not supported");
+ ASSERT_GE(userns_L3_fd, 0);
+ }
+
+ ret = ioctl(userns_L3_fd, NS_GET_ID, &userns_L3_id);
+ ASSERT_EQ(ret, 0);
+
+ int userns_L2_fd = ioctl(userns_L3_fd, NS_GET_USERNS);
+ ASSERT_GE(userns_L2_fd, 0);
+ ret = ioctl(userns_L2_fd, NS_GET_ID, &userns_L2_id);
+ ASSERT_EQ(ret, 0);
+
+ int userns_L1_fd = ioctl(userns_L2_fd, NS_GET_USERNS);
+ ASSERT_GE(userns_L1_fd, 0);
+ ret = ioctl(userns_L1_fd, NS_GET_ID, &userns_L1_id);
+ ASSERT_EQ(ret, 0);
+
+ close(userns_L1_fd);
+ close(userns_L2_fd);
+ close(userns_L3_fd);
+
+ TH_LOG("Multi-level hierarchy: net_L3A (id=%llu) -> userns_L3 (id=%llu) -> userns_L2 (id=%llu) -> userns_L1 (id=%llu)",
+ netns_L3A_id, userns_L3_id, userns_L2_id, userns_L1_id);
+
+ /*
+ * Test 1: Verify net_L3A is visible in listns() after resurrection.
+ * The entire ownership chain should be resurrected and visible.
+ */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ if (ret < 0) {
+ free(handle);
+ close(sock_L3A_fd);
+ close(netns_L3A_fd);
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret, 0);
+ }
+
+ bool found_netns_L3A = false;
+ bool found_userns_L1 = false;
+ bool found_userns_L2 = false;
+ bool found_userns_L3 = false;
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_L3A_id)
+ found_netns_L3A = true;
+ if (ns_ids[i] == userns_L1_id)
+ found_userns_L1 = true;
+ if (ns_ids[i] == userns_L2_id)
+ found_userns_L2 = true;
+ if (ns_ids[i] == userns_L3_id)
+ found_userns_L3 = true;
+ }
+
+ ASSERT_TRUE(found_netns_L3A);
+ ASSERT_TRUE(found_userns_L1);
+ ASSERT_TRUE(found_userns_L2);
+ ASSERT_TRUE(found_userns_L3);
+ TH_LOG("Resurrection verified: all namespaces in hierarchy visible in listns()");
+
+ /*
+ * Test 2: Verify net_L3A can be reopened via file handle.
+ */
+ nsfs_fh = (struct nsfs_file_handle *)handle->f_handle;
+ nsfs_fh->ns_id = netns_L3A_id;
+ nsfs_fh->ns_type = 0;
+ nsfs_fh->ns_inum = 0;
+
+ reopened_fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (reopened_fd < 0) {
+ free(handle);
+ close(sock_L3A_fd);
+ close(netns_L3A_fd);
+ if (errno == EOPNOTSUPP || errno == ENOSYS || errno == EBADF)
+ SKIP(return, "open_by_handle_at with FD_NSFS_ROOT not supported");
+ TH_LOG("open_by_handle_at failed: %s", strerror(errno));
+ ASSERT_GE(reopened_fd, 0);
+ }
+
+ close(reopened_fd);
+ TH_LOG("File handle test passed: net_L3A can be reopened");
+
+ /*
+ * Test 3: Verify that when we close the netns FD (dropping the last
+ * active reference), the entire tree becomes inactive and disappears
+ * from listns(). The cascade goes: net_L3A drops -> userns_L3 drops ->
+ * userns_L2 drops -> userns_L1 drops.
+ */
+ close(netns_L3A_fd);
+
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ ASSERT_GE(ret, 0);
+
+ found_netns_L3A = false;
+ found_userns_L1 = false;
+ found_userns_L2 = false;
+ found_userns_L3 = false;
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_L3A_id)
+ found_netns_L3A = true;
+ if (ns_ids[i] == userns_L1_id)
+ found_userns_L1 = true;
+ if (ns_ids[i] == userns_L2_id)
+ found_userns_L2 = true;
+ if (ns_ids[i] == userns_L3_id)
+ found_userns_L3 = true;
+ }
+
+ ASSERT_FALSE(found_netns_L3A);
+ ASSERT_FALSE(found_userns_L1);
+ ASSERT_FALSE(found_userns_L2);
+ ASSERT_FALSE(found_userns_L3);
+ TH_LOG("Cascade test passed: all namespaces disappeared after netns FD closed");
+
+ /*
+ * Test 4: Verify file handle no longer works for inactive namespace.
+ */
+ reopened_fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (reopened_fd >= 0) {
+ close(reopened_fd);
+ free(handle);
+ ASSERT_TRUE(false); /* Should have failed */
+ }
+ TH_LOG("Inactive namespace correctly cannot be reopened via file handle");
+
+ /*
+ * Test 5: Verify that calling SIOCGSKNS again resurrects the tree again.
+ * The socket is still valid, so we can call SIOCGSKNS on it to resurrect
+ * the namespace tree once more.
+ */
+ netns_L3A_fd = ioctl(sock_L3A_fd, SIOCGSKNS);
+ ASSERT_GE(netns_L3A_fd, 0);
+
+ TH_LOG("Called SIOCGSKNS again to resurrect the namespace tree");
+
+ /* Verify the namespace tree is resurrected and visible in listns() */
+ ret = sys_listns(&req, ns_ids, ARRAY_SIZE(ns_ids), 0);
+ ASSERT_GE(ret, 0);
+
+ found_netns_L3A = false;
+ found_userns_L1 = false;
+ found_userns_L2 = false;
+ found_userns_L3 = false;
+
+ for (i = 0; i < ret; i++) {
+ if (ns_ids[i] == netns_L3A_id)
+ found_netns_L3A = true;
+ if (ns_ids[i] == userns_L1_id)
+ found_userns_L1 = true;
+ if (ns_ids[i] == userns_L2_id)
+ found_userns_L2 = true;
+ if (ns_ids[i] == userns_L3_id)
+ found_userns_L3 = true;
+ }
+
+ ASSERT_TRUE(found_netns_L3A);
+ ASSERT_TRUE(found_userns_L1);
+ ASSERT_TRUE(found_userns_L2);
+ ASSERT_TRUE(found_userns_L3);
+ TH_LOG("Second resurrection verified: all namespaces in hierarchy visible in listns() again");
+
+ /* Verify we can reopen via file handle again */
+ reopened_fd = open_by_handle_at(FD_NSFS_ROOT, handle, O_RDONLY);
+ if (reopened_fd < 0) {
+ free(handle);
+ close(sock_L3A_fd);
+ close(netns_L3A_fd);
+ TH_LOG("open_by_handle_at failed after second resurrection: %s", strerror(errno));
+ ASSERT_GE(reopened_fd, 0);
+ }
+
+ close(reopened_fd);
+ TH_LOG("File handle test passed: net_L3A can be reopened after second resurrection");
+
+ /* Final cleanup */
+ close(sock_L3A_fd);
+ close(netns_L3A_fd);
+ free(handle);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/stress_test.c b/tools/testing/selftests/namespaces/stress_test.c
new file mode 100644
index 000000000000..dd7df7d6cb27
--- /dev/null
+++ b/tools/testing/selftests/namespaces/stress_test.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <linux/nsfs.h>
+#include "../kselftest_harness.h"
+#include "../filesystems/utils.h"
+#include "wrappers.h"
+
+/*
+ * Stress tests for namespace active reference counting.
+ *
+ * These tests validate that the active reference counting system can handle
+ * high load scenarios including rapid namespace creation/destruction, large
+ * numbers of concurrent namespaces, and various edge cases under stress.
+ */
+
+/*
+ * Test rapid creation and destruction of user namespaces.
+ * Create and destroy namespaces in quick succession to stress the
+ * active reference tracking and ensure no leaks occur.
+ */
+TEST(rapid_namespace_creation_destruction)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids_before[256], ns_ids_after[256];
+ ssize_t ret_before, ret_after;
+ int i;
+
+ /* Get baseline count of active user namespaces */
+ ret_before = sys_listns(&req, ns_ids_before, ARRAY_SIZE(ns_ids_before), 0);
+ if (ret_before < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret_before, 0);
+ }
+
+ TH_LOG("Baseline: %zd active user namespaces", ret_before);
+
+ /* Rapidly create and destroy 100 user namespaces */
+ for (i = 0; i < 100; i++) {
+ pid_t pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child: create user namespace and immediately exit */
+ if (setup_userns() < 0)
+ exit(1);
+ exit(0);
+ }
+
+ /* Parent: wait for child */
+ int status;
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+ }
+
+ /* Verify we're back to baseline (no leaked namespaces) */
+ ret_after = sys_listns(&req, ns_ids_after, ARRAY_SIZE(ns_ids_after), 0);
+ ASSERT_GE(ret_after, 0);
+
+ TH_LOG("After 100 rapid create/destroy cycles: %zd active user namespaces", ret_after);
+ ASSERT_EQ(ret_before, ret_after);
+}
+
+/*
+ * Test creating many concurrent namespaces.
+ * Verify that listns() correctly tracks all of them and that they all
+ * become inactive after processes exit.
+ */
+TEST(many_concurrent_namespaces)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids_before[512], ns_ids_during[512], ns_ids_after[512];
+ ssize_t ret_before, ret_during, ret_after;
+ pid_t pids[50];
+ int num_children = 50;
+ int i;
+ int sv[2];
+
+ /* Get baseline */
+ ret_before = sys_listns(&req, ns_ids_before, ARRAY_SIZE(ns_ids_before), 0);
+ if (ret_before < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret_before, 0);
+ }
+
+ TH_LOG("Baseline: %zd active user namespaces", ret_before);
+
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv), 0);
+
+ /* Create many children, each with their own user namespace */
+ for (i = 0; i < num_children; i++) {
+ pids[i] = fork();
+ ASSERT_GE(pids[i], 0);
+
+ if (pids[i] == 0) {
+ /* Child: create user namespace and wait for parent signal */
+ char c;
+
+ close(sv[0]);
+
+ if (setup_userns() < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Signal parent we're ready */
+ if (write(sv[1], &c, 1) != 1) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Wait for parent signal to exit */
+ if (read(sv[1], &c, 1) != 1) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ close(sv[1]);
+ exit(0);
+ }
+ }
+
+ close(sv[1]);
+
+ /* Wait for all children to signal ready */
+ for (i = 0; i < num_children; i++) {
+ char c;
+ if (read(sv[0], &c, 1) != 1) {
+ /* If we fail to read, kill all children and exit */
+ close(sv[0]);
+ for (int j = 0; j < num_children; j++)
+ kill(pids[j], SIGKILL);
+ for (int j = 0; j < num_children; j++)
+ waitpid(pids[j], NULL, 0);
+ ASSERT_TRUE(false);
+ }
+ }
+
+ /* List namespaces while all children are running */
+ ret_during = sys_listns(&req, ns_ids_during, ARRAY_SIZE(ns_ids_during), 0);
+ ASSERT_GE(ret_during, 0);
+
+ TH_LOG("With %d children running: %zd active user namespaces", num_children, ret_during);
+
+ /* Should have at least num_children more namespaces than baseline */
+ ASSERT_GE(ret_during, ret_before + num_children);
+
+ /* Signal all children to exit */
+ for (i = 0; i < num_children; i++) {
+ char c = 'X';
+ if (write(sv[0], &c, 1) != 1) {
+ /* If we fail to write, kill remaining children */
+ close(sv[0]);
+ for (int j = i; j < num_children; j++)
+ kill(pids[j], SIGKILL);
+ for (int j = 0; j < num_children; j++)
+ waitpid(pids[j], NULL, 0);
+ ASSERT_TRUE(false);
+ }
+ }
+
+ close(sv[0]);
+
+ /* Wait for all children */
+ for (i = 0; i < num_children; i++) {
+ int status;
+ waitpid(pids[i], &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ }
+
+ /* Verify we're back to baseline */
+ ret_after = sys_listns(&req, ns_ids_after, ARRAY_SIZE(ns_ids_after), 0);
+ ASSERT_GE(ret_after, 0);
+
+ TH_LOG("After all children exit: %zd active user namespaces", ret_after);
+ ASSERT_EQ(ret_before, ret_after);
+}
+
+/*
+ * Test rapid namespace creation with different namespace types.
+ * Create multiple types of namespaces rapidly to stress the tracking system.
+ */
+TEST(rapid_mixed_namespace_creation)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0, /* All types */
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids_before[512], ns_ids_after[512];
+ ssize_t ret_before, ret_after;
+ int i;
+
+ /* Get baseline count */
+ ret_before = sys_listns(&req, ns_ids_before, ARRAY_SIZE(ns_ids_before), 0);
+ if (ret_before < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret_before, 0);
+ }
+
+ TH_LOG("Baseline: %zd active namespaces (all types)", ret_before);
+
+ /* Rapidly create and destroy namespaces with multiple types */
+ for (i = 0; i < 50; i++) {
+ pid_t pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ /* Child: create multiple namespace types */
+ if (setup_userns() < 0)
+ exit(1);
+
+ /* Create additional namespace types */
+ if (unshare(CLONE_NEWNET) < 0)
+ exit(1);
+ if (unshare(CLONE_NEWUTS) < 0)
+ exit(1);
+ if (unshare(CLONE_NEWIPC) < 0)
+ exit(1);
+
+ exit(0);
+ }
+
+ /* Parent: wait for child */
+ int status;
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ }
+
+ /* Verify we're back to baseline */
+ ret_after = sys_listns(&req, ns_ids_after, ARRAY_SIZE(ns_ids_after), 0);
+ ASSERT_GE(ret_after, 0);
+
+ TH_LOG("After 50 rapid mixed namespace cycles: %zd active namespaces", ret_after);
+ ASSERT_EQ(ret_before, ret_after);
+}
+
+/*
+ * Test nested namespace creation under stress.
+ * Create deeply nested namespace hierarchies and verify proper cleanup.
+ */
+TEST(nested_namespace_stress)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids_before[512], ns_ids_after[512];
+ ssize_t ret_before, ret_after;
+ int i;
+
+ /* Get baseline */
+ ret_before = sys_listns(&req, ns_ids_before, ARRAY_SIZE(ns_ids_before), 0);
+ if (ret_before < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret_before, 0);
+ }
+
+ TH_LOG("Baseline: %zd active user namespaces", ret_before);
+
+ /* Create 20 processes, each with nested user namespaces */
+ for (i = 0; i < 20; i++) {
+ pid_t pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int userns_fd;
+ uid_t orig_uid = getuid();
+ int depth;
+
+ /* Create nested user namespaces (up to 5 levels) */
+ for (depth = 0; depth < 5; depth++) {
+ userns_fd = get_userns_fd(0, (depth == 0) ? orig_uid : 0, 1);
+ if (userns_fd < 0)
+ exit(1);
+
+ if (setns(userns_fd, CLONE_NEWUSER) < 0) {
+ close(userns_fd);
+ exit(1);
+ }
+ close(userns_fd);
+ }
+
+ exit(0);
+ }
+
+ /* Parent: wait for child */
+ int status;
+ waitpid(pid, &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ }
+
+ /* Verify we're back to baseline */
+ ret_after = sys_listns(&req, ns_ids_after, ARRAY_SIZE(ns_ids_after), 0);
+ ASSERT_GE(ret_after, 0);
+
+ TH_LOG("After 20 nested namespace hierarchies: %zd active user namespaces", ret_after);
+ ASSERT_EQ(ret_before, ret_after);
+}
+
+/*
+ * Test listns() pagination under stress.
+ * Create many namespaces and verify pagination works correctly.
+ */
+TEST(listns_pagination_stress)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ pid_t pids[30];
+ int num_children = 30;
+ int i;
+ int sv[2];
+ __u64 all_ns_ids[512];
+ int total_found = 0;
+
+ ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv), 0);
+
+ /* Create many children with user namespaces */
+ for (i = 0; i < num_children; i++) {
+ pids[i] = fork();
+ ASSERT_GE(pids[i], 0);
+
+ if (pids[i] == 0) {
+ char c;
+ close(sv[0]);
+
+ if (setup_userns() < 0) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Signal parent we're ready */
+ if (write(sv[1], &c, 1) != 1) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ /* Wait for parent signal to exit */
+ if (read(sv[1], &c, 1) != 1) {
+ close(sv[1]);
+ exit(1);
+ }
+
+ close(sv[1]);
+ exit(0);
+ }
+ }
+
+ close(sv[1]);
+
+ /* Wait for all children to signal ready */
+ for (i = 0; i < num_children; i++) {
+ char c;
+ if (read(sv[0], &c, 1) != 1) {
+ /* If we fail to read, kill all children and exit */
+ close(sv[0]);
+ for (int j = 0; j < num_children; j++)
+ kill(pids[j], SIGKILL);
+ for (int j = 0; j < num_children; j++)
+ waitpid(pids[j], NULL, 0);
+ ASSERT_TRUE(false);
+ }
+ }
+
+ /* Paginate through all namespaces using small batch sizes */
+ req.ns_id = 0;
+ while (1) {
+ __u64 batch[5]; /* Small batch size to force pagination */
+ ssize_t ret;
+
+ ret = sys_listns(&req, batch, ARRAY_SIZE(batch), 0);
+ if (ret < 0) {
+ if (errno == ENOSYS) {
+ close(sv[0]);
+ for (i = 0; i < num_children; i++)
+ kill(pids[i], SIGKILL);
+ for (i = 0; i < num_children; i++)
+ waitpid(pids[i], NULL, 0);
+ SKIP(return, "listns() not supported");
+ }
+ ASSERT_GE(ret, 0);
+ }
+
+ if (ret == 0)
+ break;
+
+ /* Store results */
+ for (i = 0; i < ret && total_found < 512; i++) {
+ all_ns_ids[total_found++] = batch[i];
+ }
+
+ /* Update cursor for next batch */
+ if (ret == ARRAY_SIZE(batch))
+ req.ns_id = batch[ret - 1];
+ else
+ break;
+ }
+
+ TH_LOG("Paginated through %d user namespaces", total_found);
+
+ /* Verify no duplicates in pagination */
+ for (i = 0; i < total_found; i++) {
+ for (int j = i + 1; j < total_found; j++) {
+ if (all_ns_ids[i] == all_ns_ids[j]) {
+ TH_LOG("Found duplicate ns_id: %llu at positions %d and %d",
+ (unsigned long long)all_ns_ids[i], i, j);
+ ASSERT_TRUE(false);
+ }
+ }
+ }
+
+ /* Signal all children to exit */
+ for (i = 0; i < num_children; i++) {
+ char c = 'X';
+ if (write(sv[0], &c, 1) != 1) {
+ close(sv[0]);
+ for (int j = i; j < num_children; j++)
+ kill(pids[j], SIGKILL);
+ for (int j = 0; j < num_children; j++)
+ waitpid(pids[j], NULL, 0);
+ ASSERT_TRUE(false);
+ }
+ }
+
+ close(sv[0]);
+
+ /* Wait for all children */
+ for (i = 0; i < num_children; i++) {
+ int status;
+ waitpid(pids[i], &status, 0);
+ }
+}
+
+/*
+ * Test concurrent namespace operations.
+ * Multiple processes creating, querying, and destroying namespaces concurrently.
+ */
+TEST(concurrent_namespace_operations)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = 0,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids_before[512], ns_ids_after[512];
+ ssize_t ret_before, ret_after;
+ pid_t pids[20];
+ int num_workers = 20;
+ int i;
+
+ /* Get baseline */
+ ret_before = sys_listns(&req, ns_ids_before, ARRAY_SIZE(ns_ids_before), 0);
+ if (ret_before < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret_before, 0);
+ }
+
+ TH_LOG("Baseline: %zd active namespaces", ret_before);
+
+ /* Create worker processes that do concurrent operations */
+ for (i = 0; i < num_workers; i++) {
+ pids[i] = fork();
+ ASSERT_GE(pids[i], 0);
+
+ if (pids[i] == 0) {
+ /* Each worker: create namespaces, list them, repeat */
+ int iterations;
+
+ for (iterations = 0; iterations < 10; iterations++) {
+ int userns_fd;
+ __u64 temp_ns_ids[100];
+ ssize_t ret;
+
+ /* Create a user namespace */
+ userns_fd = get_userns_fd(0, getuid(), 1);
+ if (userns_fd < 0)
+ continue;
+
+ /* List namespaces */
+ ret = sys_listns(&req, temp_ns_ids, ARRAY_SIZE(temp_ns_ids), 0);
+ (void)ret;
+
+ close(userns_fd);
+
+ /* Small delay */
+ usleep(1000);
+ }
+
+ exit(0);
+ }
+ }
+
+ /* Wait for all workers */
+ for (i = 0; i < num_workers; i++) {
+ int status;
+ waitpid(pids[i], &status, 0);
+ ASSERT_TRUE(WIFEXITED(status));
+ ASSERT_EQ(WEXITSTATUS(status), 0);
+ }
+
+ /* Verify we're back to baseline */
+ ret_after = sys_listns(&req, ns_ids_after, ARRAY_SIZE(ns_ids_after), 0);
+ ASSERT_GE(ret_after, 0);
+
+ TH_LOG("After concurrent operations: %zd active namespaces", ret_after);
+ ASSERT_EQ(ret_before, ret_after);
+}
+
+/*
+ * Test namespace churn - continuous creation and destruction.
+ * Simulates high-churn scenarios like container orchestration.
+ */
+TEST(namespace_churn)
+{
+ struct ns_id_req req = {
+ .size = sizeof(req),
+ .spare = 0,
+ .ns_id = 0,
+ .ns_type = CLONE_NEWUSER | CLONE_NEWNET | CLONE_NEWUTS,
+ .spare2 = 0,
+ .user_ns_id = 0,
+ };
+ __u64 ns_ids_before[512], ns_ids_after[512];
+ ssize_t ret_before, ret_after;
+ int cycle;
+
+ /* Get baseline */
+ ret_before = sys_listns(&req, ns_ids_before, ARRAY_SIZE(ns_ids_before), 0);
+ if (ret_before < 0) {
+ if (errno == ENOSYS)
+ SKIP(return, "listns() not supported");
+ ASSERT_GE(ret_before, 0);
+ }
+
+ TH_LOG("Baseline: %zd active namespaces", ret_before);
+
+ /* Simulate churn: batches of namespaces created and destroyed */
+ for (cycle = 0; cycle < 10; cycle++) {
+ pid_t batch_pids[10];
+ int i;
+
+ /* Create batch */
+ for (i = 0; i < 10; i++) {
+ batch_pids[i] = fork();
+ ASSERT_GE(batch_pids[i], 0);
+
+ if (batch_pids[i] == 0) {
+ /* Create multiple namespace types */
+ if (setup_userns() < 0)
+ exit(1);
+ if (unshare(CLONE_NEWNET) < 0)
+ exit(1);
+ if (unshare(CLONE_NEWUTS) < 0)
+ exit(1);
+
+ /* Keep namespaces alive briefly */
+ usleep(10000);
+ exit(0);
+ }
+ }
+
+ /* Wait for batch to complete */
+ for (i = 0; i < 10; i++) {
+ int status;
+ waitpid(batch_pids[i], &status, 0);
+ }
+ }
+
+ /* Verify we're back to baseline */
+ ret_after = sys_listns(&req, ns_ids_after, ARRAY_SIZE(ns_ids_after), 0);
+ ASSERT_GE(ret_after, 0);
+
+ TH_LOG("After 10 churn cycles (100 namespace sets): %zd active namespaces", ret_after);
+ ASSERT_EQ(ret_before, ret_after);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/namespaces/wrappers.h b/tools/testing/selftests/namespaces/wrappers.h
new file mode 100644
index 000000000000..9741a64a5b1d
--- /dev/null
+++ b/tools/testing/selftests/namespaces/wrappers.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/nsfs.h>
+#include <linux/types.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#ifndef __SELFTESTS_NAMESPACES_WRAPPERS_H__
+#define __SELFTESTS_NAMESPACES_WRAPPERS_H__
+
+#ifndef __NR_listns
+ #if defined __alpha__
+ #define __NR_listns 580
+ #elif defined _MIPS_SIM
+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
+ #define __NR_listns 4470
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
+ #define __NR_listns 6470
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
+ #define __NR_listns 5470
+ #endif
+ #else
+ #define __NR_listns 470
+ #endif
+#endif
+
+static inline int sys_listns(const struct ns_id_req *req, __u64 *ns_ids,
+ size_t nr_ns_ids, unsigned int flags)
+{
+ return syscall(__NR_listns, req, ns_ids, nr_ns_ids, flags);
+}
+
+#endif /* __SELFTESTS_NAMESPACES_WRAPPERS_H__ */
diff --git a/tools/testing/selftests/nci/nci_dev.c b/tools/testing/selftests/nci/nci_dev.c
index 6dec59d64083..312f84ee0444 100644
--- a/tools/testing/selftests/nci/nci_dev.c
+++ b/tools/testing/selftests/nci/nci_dev.c
@@ -16,7 +16,7 @@
#include <sys/socket.h>
#include <linux/nfc.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 532bb732bc6d..6930fe926c58 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -4,10 +4,8 @@ bind_timewait
bind_wildcard
busy_poller
cmsg_sender
-diag_uid
epoll_busy_poll
fin_ack_lat
-gro
hwtstamp_config
io_uring_zerocopy_tx
ioam6_parser
@@ -16,8 +14,8 @@ ip_local_port_range
ipsec
ipv6_flowlabel
ipv6_flowlabel_mgr
+ipv6_fragmentation
log.txt
-msg_oob
msg_zerocopy
netlink-dumps
nettest
@@ -34,8 +32,6 @@ reuseport_bpf_numa
reuseport_dualstack
rxtimestamp
sctp_hello
-scm_pidfd
-scm_rights
sk_bind_sendto_listen
sk_connect_zero_addr
sk_so_peek_off
@@ -50,9 +46,10 @@ tap
tcp_fastopen_backup_key
tcp_inq
tcp_mmap
+tcp_port_share
+tfo
timestamping
tls
-toeplitz
tools
tun
txring_overwrite
@@ -60,4 +57,3 @@ txtimestamp
udpgso
udpgso_bench_rx
udpgso_bench_tx
-unix_connect
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index ea84b88bcb30..b66ba04f19d9 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -1,121 +1,200 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for net selftests
-CFLAGS += -Wall -Wl,--no-as-needed -O2 -g
+CFLAGS += -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/ $(KHDR_INCLUDES)
# Additional include paths needed by kselftest.h
CFLAGS += -I../
-TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh \
- rtnetlink.sh xfrm_policy.sh
-TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
-TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
-TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
-TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
-TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
-TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh fib_nexthop_nongw.sh
-TEST_PROGS += altnames.sh icmp.sh icmp_redirect.sh ip6_gre_headroom.sh
-TEST_PROGS += route_localnet.sh
-TEST_PROGS += reuseaddr_ports_exhausted.sh
-TEST_PROGS += txtimestamp.sh
-TEST_PROGS += vrf-xfrm-tests.sh
-TEST_PROGS += rxtimestamp.sh
-TEST_PROGS += drop_monitor_tests.sh
-TEST_PROGS += vrf_route_leaking.sh
-TEST_PROGS += bareudp.sh
-TEST_PROGS += amt.sh
-TEST_PROGS += unicast_extensions.sh
-TEST_PROGS += udpgro_fwd.sh
-TEST_PROGS += udpgro_frglist.sh
-TEST_PROGS += veth.sh
-TEST_PROGS += ioam6.sh
-TEST_PROGS += gro.sh
-TEST_PROGS += gre_gso.sh
-TEST_PROGS += gre_ipv6_lladdr.sh
-TEST_PROGS += cmsg_so_mark.sh
-TEST_PROGS += cmsg_so_priority.sh
-TEST_PROGS += test_so_rcv.sh
-TEST_PROGS += cmsg_time.sh cmsg_ip.sh
-TEST_PROGS += netns-name.sh
-TEST_PROGS += link_netns.py
-TEST_PROGS += nl_netdev.py
-TEST_PROGS += rtnetlink.py
-TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
-TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
-TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
-TEST_PROGS += srv6_hencap_red_l3vpn_test.sh
-TEST_PROGS += srv6_hl2encap_red_l2vpn_test.sh
-TEST_PROGS += srv6_end_next_csid_l3vpn_test.sh
-TEST_PROGS += srv6_end_x_next_csid_l3vpn_test.sh
-TEST_PROGS += srv6_end_flavors_test.sh
-TEST_PROGS += srv6_end_dx4_netfilter_test.sh
-TEST_PROGS += srv6_end_dx6_netfilter_test.sh
-TEST_PROGS += vrf_strict_mode_test.sh
-TEST_PROGS += arp_ndisc_evict_nocarrier.sh
-TEST_PROGS += ndisc_unsolicited_na_test.sh
-TEST_PROGS += arp_ndisc_untracked_subnets.sh
-TEST_PROGS += stress_reuseport_listen.sh
-TEST_PROGS += l2_tos_ttl_inherit.sh
-TEST_PROGS += bind_bhash.sh
-TEST_PROGS += ip_local_port_range.sh
-TEST_PROGS += rps_default_mask.sh
-TEST_PROGS += big_tcp.sh
-TEST_PROGS += netns-sysctl.sh
-TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh xfrm_policy_add_speed.sh
-TEST_GEN_FILES = socket nettest
-TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
-TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
-TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
-TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr so_netns_cookie
-TEST_GEN_FILES += tcp_fastopen_backup_key
-TEST_GEN_FILES += fin_ack_lat
-TEST_GEN_FILES += reuseaddr_ports_exhausted
-TEST_GEN_FILES += hwtstamp_config rxtimestamp timestamping txtimestamp
-TEST_GEN_FILES += ipsec
-TEST_GEN_FILES += ioam6_parser
-TEST_GEN_FILES += gro
-TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls tun tap epoll_busy_poll
-TEST_GEN_FILES += toeplitz
-TEST_GEN_FILES += cmsg_sender
-TEST_GEN_FILES += stress_reuseport_listen
-TEST_GEN_FILES += so_rcv_listener
-TEST_PROGS += test_vxlan_vnifiltering.sh
-TEST_GEN_FILES += io_uring_zerocopy_tx
-TEST_PROGS += io_uring_zerocopy_tx.sh
-TEST_GEN_FILES += bind_bhash
-TEST_GEN_PROGS += sk_bind_sendto_listen
-TEST_GEN_PROGS += sk_connect_zero_addr
-TEST_GEN_PROGS += sk_so_peek_off
-TEST_PROGS += test_ingress_egress_chaining.sh
-TEST_GEN_PROGS += so_incoming_cpu
-TEST_PROGS += sctp_vrf.sh
-TEST_GEN_FILES += sctp_hello
-TEST_GEN_FILES += ip_local_port_range
-TEST_GEN_PROGS += bind_wildcard
-TEST_GEN_PROGS += bind_timewait
-TEST_PROGS += test_vxlan_mdb.sh
-TEST_PROGS += test_bridge_neigh_suppress.sh
-TEST_PROGS += test_vxlan_nolocalbypass.sh
-TEST_PROGS += test_bridge_backup_port.sh
-TEST_PROGS += fdb_flush.sh fdb_notify.sh
-TEST_PROGS += fq_band_pktlimit.sh
-TEST_PROGS += vlan_hw_filter.sh
-TEST_PROGS += vlan_bridge_binding.sh
-TEST_PROGS += bpf_offload.py
-TEST_PROGS += ipv6_route_update_soft_lockup.sh
-TEST_PROGS += busy_poll_test.sh
-TEST_GEN_PROGS += proc_net_pktgen
-TEST_PROGS += lwt_dst_cache_ref_loop.sh
-TEST_PROGS += skf_net_off.sh
-TEST_GEN_FILES += skf_net_off
+TEST_PROGS := \
+ altnames.sh \
+ amt.sh \
+ arp_ndisc_evict_nocarrier.sh \
+ arp_ndisc_untracked_subnets.sh \
+ bareudp.sh \
+ big_tcp.sh \
+ bind_bhash.sh \
+ bpf_offload.py \
+ broadcast_ether_dst.sh \
+ broadcast_pmtu.sh \
+ busy_poll_test.sh \
+ cmsg_ip.sh \
+ cmsg_so_mark.sh \
+ cmsg_so_priority.sh \
+ cmsg_time.sh \
+ drop_monitor_tests.sh \
+ fcnal-ipv4.sh \
+ fcnal-ipv6.sh \
+ fcnal-other.sh \
+ fdb_flush.sh \
+ fdb_notify.sh \
+ fib-onlink-tests.sh \
+ fib_nexthop_multiprefix.sh \
+ fib_nexthop_nongw.sh \
+ fib_nexthops.sh \
+ fib_rule_tests.sh \
+ fib_tests.sh \
+ fin_ack_lat.sh \
+ fq_band_pktlimit.sh \
+ gre_gso.sh \
+ gre_ipv6_lladdr.sh \
+ icmp.sh \
+ icmp_redirect.sh \
+ io_uring_zerocopy_tx.sh \
+ ioam6.sh \
+ ip6_gre_headroom.sh \
+ ip_defrag.sh \
+ ip_local_port_range.sh \
+ ipv6_flowlabel.sh \
+ ipv6_force_forwarding.sh \
+ ipv6_route_update_soft_lockup.sh \
+ l2_tos_ttl_inherit.sh \
+ l2tp.sh \
+ link_netns.py \
+ lwt_dst_cache_ref_loop.sh \
+ msg_zerocopy.sh \
+ nat6to4.sh \
+ ndisc_unsolicited_na_test.sh \
+ netdev-l2addr.sh \
+ netdevice.sh \
+ netns-name.sh \
+ netns-sysctl.sh \
+ nl_netdev.py \
+ pmtu.sh \
+ psock_snd.sh \
+ reuseaddr_ports_exhausted.sh \
+ reuseport_addr_any.sh \
+ route_hint.sh \
+ route_localnet.sh \
+ rps_default_mask.sh \
+ rtnetlink.py \
+ rtnetlink.sh \
+ rtnetlink_notification.sh \
+ run_afpackettests \
+ run_netsocktests \
+ rxtimestamp.sh \
+ sctp_vrf.sh \
+ skf_net_off.sh \
+ so_txtime.sh \
+ srv6_end_dt46_l3vpn_test.sh \
+ srv6_end_dt4_l3vpn_test.sh \
+ srv6_end_dt6_l3vpn_test.sh \
+ srv6_end_dx4_netfilter_test.sh \
+ srv6_end_dx6_netfilter_test.sh \
+ srv6_end_flavors_test.sh \
+ srv6_end_next_csid_l3vpn_test.sh \
+ srv6_end_x_next_csid_l3vpn_test.sh \
+ srv6_hencap_red_l3vpn_test.sh \
+ srv6_hl2encap_red_l2vpn_test.sh \
+ stress_reuseport_listen.sh \
+ tcp_fastopen_backup_key.sh \
+ test_bpf.sh \
+ test_bridge_backup_port.sh \
+ test_bridge_neigh_suppress.sh \
+ test_ingress_egress_chaining.sh \
+ test_neigh.sh \
+ test_so_rcv.sh \
+ test_vxlan_fdb_changelink.sh \
+ test_vxlan_mdb.sh \
+ test_vxlan_nh.sh \
+ test_vxlan_nolocalbypass.sh \
+ test_vxlan_under_vrf.sh \
+ test_vxlan_vnifiltering.sh \
+ tfo_passive.sh \
+ traceroute.sh \
+ txtimestamp.sh \
+ udpgro.sh \
+ udpgro_bench.sh \
+ udpgro_frglist.sh \
+ udpgro_fwd.sh \
+ udpgso.sh \
+ udpgso_bench.sh \
+ unicast_extensions.sh \
+ veth.sh \
+ vlan_bridge_binding.sh \
+ vlan_hw_filter.sh \
+ vrf-xfrm-tests.sh \
+ vrf_route_leaking.sh \
+ vrf_strict_mode_test.sh \
+ xfrm_policy.sh \
+# end of TEST_PROGS
+
+TEST_PROGS_EXTENDED := \
+ xfrm_policy_add_speed.sh \
+# end of TEST_PROGS_EXTENDED
+
+TEST_GEN_FILES := \
+ bind_bhash \
+ cmsg_sender \
+ fin_ack_lat \
+ hwtstamp_config \
+ io_uring_zerocopy_tx \
+ ioam6_parser \
+ ip_defrag \
+ ip_local_port_range \
+ ipsec \
+ ipv6_flowlabel \
+ ipv6_flowlabel_mgr \
+ msg_zerocopy \
+ nettest \
+ psock_fanout \
+ psock_snd \
+ psock_tpacket \
+ reuseaddr_ports_exhausted \
+ reuseport_addr_any \
+ rxtimestamp \
+ sctp_hello \
+ skf_net_off \
+ so_netns_cookie \
+ so_rcv_listener \
+ so_txtime \
+ socket \
+ stress_reuseport_listen \
+ tcp_fastopen_backup_key \
+ tcp_inq \
+ tcp_mmap \
+ tfo \
+ timestamping \
+ txring_overwrite \
+ txtimestamp \
+ udpgso \
+ udpgso_bench_rx \
+ udpgso_bench_tx \
+# end of TEST_GEN_FILES
+
+TEST_GEN_PROGS := \
+ bind_timewait \
+ bind_wildcard \
+ epoll_busy_poll \
+ ipv6_fragmentation \
+ proc_net_pktgen \
+ reuseaddr_conflict \
+ reuseport_bpf \
+ reuseport_bpf_cpu \
+ reuseport_bpf_numa \
+ reuseport_dualstack \
+ sk_bind_sendto_listen \
+ sk_connect_zero_addr \
+ sk_so_peek_off \
+ so_incoming_cpu \
+ tap \
+ tcp_port_share \
+ tls \
+ tun \
+# end of TEST_GEN_PROGS
+
+TEST_FILES := \
+ fcnal-test.sh \
+ in_netns.sh \
+ lib.sh \
+ settings \
+# end of TEST_FILES
# YNL files, must be before "include ..lib.mk"
-YNL_GEN_FILES := busy_poller netlink-dumps
+YNL_GEN_FILES := busy_poller
+YNL_GEN_PROGS := netlink-dumps
TEST_GEN_FILES += $(YNL_GEN_FILES)
-
-TEST_FILES := settings
-TEST_FILES += in_netns.sh lib.sh setup_loopback.sh setup_veth.sh
+TEST_GEN_PROGS += $(YNL_GEN_PROGS)
TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c))
diff --git a/tools/testing/selftests/net/af_unix/.gitignore b/tools/testing/selftests/net/af_unix/.gitignore
new file mode 100644
index 000000000000..240b26740c9e
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/.gitignore
@@ -0,0 +1,8 @@
+diag_uid
+msg_oob
+scm_inq
+scm_pidfd
+scm_rights
+so_peek_off
+unix_connect
+unix_connreset
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index 50584479540b..3cd677b72072 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,4 +1,14 @@
-CFLAGS += $(KHDR_INCLUDES)
-TEST_GEN_PROGS := diag_uid msg_oob scm_pidfd scm_rights unix_connect
+CFLAGS += $(KHDR_INCLUDES) -Wall -Wflex-array-member-not-at-end
+
+TEST_GEN_PROGS := \
+ diag_uid \
+ msg_oob \
+ scm_inq \
+ scm_pidfd \
+ scm_rights \
+ so_peek_off \
+ unix_connect \
+ unix_connreset \
+# end of TEST_GEN_PROGS
include ../../lib.mk
diff --git a/tools/testing/selftests/net/af_unix/config b/tools/testing/selftests/net/af_unix/config
index 37368567768c..b5429c15a53c 100644
--- a/tools/testing/selftests/net/af_unix/config
+++ b/tools/testing/selftests/net/af_unix/config
@@ -1,3 +1,3 @@
-CONFIG_UNIX=y
CONFIG_AF_UNIX_OOB=y
+CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
diff --git a/tools/testing/selftests/net/af_unix/diag_uid.c b/tools/testing/selftests/net/af_unix/diag_uid.c
index 79a3dd75590e..da7d50cedee6 100644
--- a/tools/testing/selftests/net/af_unix/diag_uid.c
+++ b/tools/testing/selftests/net/af_unix/diag_uid.c
@@ -14,7 +14,7 @@
#include <sys/types.h>
#include <sys/un.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
FIXTURE(diag_uid)
{
diff --git a/tools/testing/selftests/net/af_unix/msg_oob.c b/tools/testing/selftests/net/af_unix/msg_oob.c
index 3ed3882a93b8..1b499d56656c 100644
--- a/tools/testing/selftests/net/af_unix/msg_oob.c
+++ b/tools/testing/selftests/net/af_unix/msg_oob.c
@@ -11,7 +11,7 @@
#include <sys/signalfd.h>
#include <sys/socket.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define BUF_SZ 32
@@ -210,7 +210,7 @@ static void __sendpair(struct __test_metadata *_metadata,
static void __recvpair(struct __test_metadata *_metadata,
FIXTURE_DATA(msg_oob) *self,
const char *expected_buf, int expected_len,
- int buf_len, int flags)
+ int buf_len, int flags, bool is_sender)
{
int i, ret[2], recv_errno[2], expected_errno = 0;
char recv_buf[2][BUF_SZ] = {};
@@ -221,7 +221,9 @@ static void __recvpair(struct __test_metadata *_metadata,
errno = 0;
for (i = 0; i < 2; i++) {
- ret[i] = recv(self->fd[i * 2 + 1], recv_buf[i], buf_len, flags);
+ int index = is_sender ? i * 2 : i * 2 + 1;
+
+ ret[i] = recv(self->fd[index], recv_buf[i], buf_len, flags);
recv_errno[i] = errno;
}
@@ -308,6 +310,20 @@ static void __siocatmarkpair(struct __test_metadata *_metadata,
ASSERT_EQ(answ[0], answ[1]);
}
+static void __resetpair(struct __test_metadata *_metadata,
+ FIXTURE_DATA(msg_oob) *self,
+ const FIXTURE_VARIANT(msg_oob) *variant,
+ bool reset)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ close(self->fd[i * 2 + 1]);
+
+ __recvpair(_metadata, self, "", reset ? -ECONNRESET : 0, 1,
+ variant->peek ? MSG_PEEK : 0, true);
+}
+
#define sendpair(buf, len, flags) \
__sendpair(_metadata, self, buf, len, flags)
@@ -316,9 +332,10 @@ static void __siocatmarkpair(struct __test_metadata *_metadata,
if (variant->peek) \
__recvpair(_metadata, self, \
expected_buf, expected_len, \
- buf_len, (flags) | MSG_PEEK); \
+ buf_len, (flags) | MSG_PEEK, false); \
__recvpair(_metadata, self, \
- expected_buf, expected_len, buf_len, flags); \
+ expected_buf, expected_len, \
+ buf_len, flags, false); \
} while (0)
#define epollpair(oob_remaining) \
@@ -330,6 +347,9 @@ static void __siocatmarkpair(struct __test_metadata *_metadata,
#define setinlinepair() \
__setinlinepair(_metadata, self)
+#define resetpair(reset) \
+ __resetpair(_metadata, self, variant, reset)
+
#define tcp_incompliant \
for (self->tcp_compliant = false; \
self->tcp_compliant == false; \
@@ -344,6 +364,21 @@ TEST_F(msg_oob, non_oob)
recvpair("", -EINVAL, 1, MSG_OOB);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(true);
+}
+
+TEST_F(msg_oob, non_oob_no_reset)
+{
+ sendpair("x", 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ recvpair("x", 1, 1, 0);
+ epollpair(false);
+ siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, oob)
@@ -355,6 +390,19 @@ TEST_F(msg_oob, oob)
recvpair("x", 1, 1, MSG_OOB);
epollpair(false);
siocatmarkpair(true);
+
+ tcp_incompliant {
+ resetpair(false); /* TCP sets -ECONNRESET for ex-OOB. */
+ }
+}
+
+TEST_F(msg_oob, oob_reset)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ resetpair(true);
}
TEST_F(msg_oob, oob_drop)
@@ -370,6 +418,8 @@ TEST_F(msg_oob, oob_drop)
recvpair("", -EINVAL, 1, MSG_OOB);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, oob_ahead)
@@ -385,6 +435,10 @@ TEST_F(msg_oob, oob_ahead)
recvpair("hell", 4, 4, 0);
epollpair(false);
siocatmarkpair(true);
+
+ tcp_incompliant {
+ resetpair(false); /* TCP sets -ECONNRESET for ex-OOB. */
+ }
}
TEST_F(msg_oob, oob_break)
@@ -403,6 +457,8 @@ TEST_F(msg_oob, oob_break)
recvpair("", -EAGAIN, 1, 0);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, oob_ahead_break)
@@ -426,6 +482,8 @@ TEST_F(msg_oob, oob_ahead_break)
recvpair("world", 5, 5, 0);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, oob_break_drop)
@@ -449,6 +507,8 @@ TEST_F(msg_oob, oob_break_drop)
recvpair("", -EINVAL, 1, MSG_OOB);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, ex_oob_break)
@@ -476,6 +536,8 @@ TEST_F(msg_oob, ex_oob_break)
recvpair("ld", 2, 2, 0);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, ex_oob_drop)
@@ -498,6 +560,8 @@ TEST_F(msg_oob, ex_oob_drop)
epollpair(false);
siocatmarkpair(true);
}
+
+ resetpair(false);
}
TEST_F(msg_oob, ex_oob_drop_2)
@@ -523,6 +587,8 @@ TEST_F(msg_oob, ex_oob_drop_2)
epollpair(false);
siocatmarkpair(true);
}
+
+ resetpair(false);
}
TEST_F(msg_oob, ex_oob_oob)
@@ -546,6 +612,54 @@ TEST_F(msg_oob, ex_oob_oob)
recvpair("", -EINVAL, 1, MSG_OOB);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
+}
+
+TEST_F(msg_oob, ex_oob_ex_oob)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("x", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("y", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ tcp_incompliant {
+ resetpair(false); /* TCP sets -ECONNRESET for ex-OOB. */
+ }
+}
+
+TEST_F(msg_oob, ex_oob_ex_oob_oob)
+{
+ sendpair("x", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("x", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ sendpair("y", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
+
+ recvpair("y", 1, 1, MSG_OOB);
+ epollpair(false);
+ siocatmarkpair(true);
+
+ sendpair("z", 1, MSG_OOB);
+ epollpair(true);
+ siocatmarkpair(true);
}
TEST_F(msg_oob, ex_oob_ahead_break)
@@ -576,6 +690,10 @@ TEST_F(msg_oob, ex_oob_ahead_break)
recvpair("d", 1, 1, MSG_OOB);
epollpair(false);
siocatmarkpair(true);
+
+ tcp_incompliant {
+ resetpair(false); /* TCP sets -ECONNRESET for ex-OOB. */
+ }
}
TEST_F(msg_oob, ex_oob_siocatmark)
@@ -595,6 +713,8 @@ TEST_F(msg_oob, ex_oob_siocatmark)
recvpair("hell", 4, 4, 0); /* Intentionally stop at ex-OOB. */
epollpair(true);
siocatmarkpair(false);
+
+ resetpair(true);
}
TEST_F(msg_oob, inline_oob)
@@ -612,6 +732,8 @@ TEST_F(msg_oob, inline_oob)
recvpair("x", 1, 1, 0);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, inline_oob_break)
@@ -633,6 +755,8 @@ TEST_F(msg_oob, inline_oob_break)
recvpair("o", 1, 1, 0);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, inline_oob_ahead_break)
@@ -661,6 +785,8 @@ TEST_F(msg_oob, inline_oob_ahead_break)
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, inline_ex_oob_break)
@@ -686,6 +812,8 @@ TEST_F(msg_oob, inline_ex_oob_break)
recvpair("rld", 3, 3, 0);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, inline_ex_oob_no_drop)
@@ -707,6 +835,8 @@ TEST_F(msg_oob, inline_ex_oob_no_drop)
recvpair("y", 1, 1, 0);
epollpair(false);
siocatmarkpair(false);
+
+ resetpair(false);
}
TEST_F(msg_oob, inline_ex_oob_drop)
@@ -731,6 +861,8 @@ TEST_F(msg_oob, inline_ex_oob_drop)
epollpair(false);
siocatmarkpair(false);
}
+
+ resetpair(false);
}
TEST_F(msg_oob, inline_ex_oob_siocatmark)
@@ -752,6 +884,8 @@ TEST_F(msg_oob, inline_ex_oob_siocatmark)
recvpair("hell", 4, 4, 0); /* Intentionally stop at ex-OOB. */
epollpair(true);
siocatmarkpair(false);
+
+ resetpair(true);
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/af_unix/scm_inq.c b/tools/testing/selftests/net/af_unix/scm_inq.c
new file mode 100644
index 000000000000..3a86be9bda17
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/scm_inq.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2025 Google LLC */
+
+#include <linux/sockios.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include "kselftest_harness.h"
+
+#define NR_CHUNKS 100
+#define MSG_LEN 256
+
+FIXTURE(scm_inq)
+{
+ int fd[2];
+};
+
+FIXTURE_VARIANT(scm_inq)
+{
+ int type;
+};
+
+FIXTURE_VARIANT_ADD(scm_inq, stream)
+{
+ .type = SOCK_STREAM,
+};
+
+FIXTURE_VARIANT_ADD(scm_inq, dgram)
+{
+ .type = SOCK_DGRAM,
+};
+
+FIXTURE_VARIANT_ADD(scm_inq, seqpacket)
+{
+ .type = SOCK_SEQPACKET,
+};
+
+FIXTURE_SETUP(scm_inq)
+{
+ int err;
+
+ err = socketpair(AF_UNIX, variant->type | SOCK_NONBLOCK, 0, self->fd);
+ ASSERT_EQ(0, err);
+}
+
+FIXTURE_TEARDOWN(scm_inq)
+{
+ close(self->fd[0]);
+ close(self->fd[1]);
+}
+
+static void send_chunks(struct __test_metadata *_metadata,
+ FIXTURE_DATA(scm_inq) *self)
+{
+ char buf[MSG_LEN] = {};
+ int i, ret;
+
+ for (i = 0; i < NR_CHUNKS; i++) {
+ ret = send(self->fd[0], buf, sizeof(buf), 0);
+ ASSERT_EQ(sizeof(buf), ret);
+ }
+}
+
+static void recv_chunks(struct __test_metadata *_metadata,
+ FIXTURE_DATA(scm_inq) *self)
+{
+ char cmsg_buf[CMSG_SPACE(sizeof(int))];
+ struct msghdr msg = {};
+ struct iovec iov = {};
+ struct cmsghdr *cmsg;
+ char buf[MSG_LEN];
+ int i, ret;
+ int inq;
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cmsg_buf;
+ msg.msg_controllen = sizeof(cmsg_buf);
+
+ iov.iov_base = buf;
+ iov.iov_len = sizeof(buf);
+
+ for (i = 0; i < NR_CHUNKS; i++) {
+ memset(buf, 0, sizeof(buf));
+ memset(cmsg_buf, 0, sizeof(cmsg_buf));
+
+ ret = recvmsg(self->fd[1], &msg, 0);
+ ASSERT_EQ(MSG_LEN, ret);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ ASSERT_NE(NULL, cmsg);
+ ASSERT_EQ(CMSG_LEN(sizeof(int)), cmsg->cmsg_len);
+ ASSERT_EQ(SOL_SOCKET, cmsg->cmsg_level);
+ ASSERT_EQ(SCM_INQ, cmsg->cmsg_type);
+
+ ret = ioctl(self->fd[1], SIOCINQ, &inq);
+ ASSERT_EQ(0, ret);
+ ASSERT_EQ(*(int *)CMSG_DATA(cmsg), inq);
+ }
+}
+
+TEST_F(scm_inq, basic)
+{
+ int err, inq;
+
+ err = setsockopt(self->fd[1], SOL_SOCKET, SO_INQ, &(int){1}, sizeof(int));
+ if (variant->type != SOCK_STREAM) {
+ ASSERT_EQ(-ENOPROTOOPT, -errno);
+ return;
+ }
+
+ ASSERT_EQ(0, err);
+
+ err = ioctl(self->fd[1], SIOCINQ, &inq);
+ ASSERT_EQ(0, err);
+ ASSERT_EQ(0, inq);
+
+ send_chunks(_metadata, self);
+ recv_chunks(_metadata, self);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/af_unix/scm_pidfd.c b/tools/testing/selftests/net/af_unix/scm_pidfd.c
index 7e534594167e..2c18b92a2603 100644
--- a/tools/testing/selftests/net/af_unix/scm_pidfd.c
+++ b/tools/testing/selftests/net/af_unix/scm_pidfd.c
@@ -15,7 +15,8 @@
#include <sys/types.h>
#include <sys/wait.h>
-#include "../../kselftest_harness.h"
+#include "../../pidfd/pidfd.h"
+#include "kselftest_harness.h"
#define clean_errno() (errno == 0 ? "None" : strerror(errno))
#define log_err(MSG, ...) \
@@ -26,6 +27,8 @@
#define SCM_PIDFD 0x04
#endif
+#define CHILD_EXIT_CODE_OK 123
+
static void child_die()
{
exit(1);
@@ -126,16 +129,64 @@ out:
return result;
}
+struct cmsg_data {
+ struct ucred *ucred;
+ int *pidfd;
+};
+
+static int parse_cmsg(struct msghdr *msg, struct cmsg_data *res)
+{
+ struct cmsghdr *cmsg;
+
+ if (msg->msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
+ log_err("recvmsg: truncated");
+ return 1;
+ }
+
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_PIDFD) {
+ if (cmsg->cmsg_len < sizeof(*res->pidfd)) {
+ log_err("CMSG parse: SCM_PIDFD wrong len");
+ return 1;
+ }
+
+ res->pidfd = (void *)CMSG_DATA(cmsg);
+ }
+
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_CREDENTIALS) {
+ if (cmsg->cmsg_len < sizeof(*res->ucred)) {
+ log_err("CMSG parse: SCM_CREDENTIALS wrong len");
+ return 1;
+ }
+
+ res->ucred = (void *)CMSG_DATA(cmsg);
+ }
+ }
+
+ if (!res->pidfd) {
+ log_err("CMSG parse: SCM_PIDFD not found");
+ return 1;
+ }
+
+ if (!res->ucred) {
+ log_err("CMSG parse: SCM_CREDENTIALS not found");
+ return 1;
+ }
+
+ return 0;
+}
+
static int cmsg_check(int fd)
{
struct msghdr msg = { 0 };
- struct cmsghdr *cmsg;
+ struct cmsg_data res;
struct iovec iov;
- struct ucred *ucred = NULL;
int data = 0;
char control[CMSG_SPACE(sizeof(struct ucred)) +
CMSG_SPACE(sizeof(int))] = { 0 };
- int *pidfd = NULL;
pid_t parent_pid;
int err;
@@ -158,53 +209,98 @@ static int cmsg_check(int fd)
return 1;
}
- for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
- cmsg = CMSG_NXTHDR(&msg, cmsg)) {
- if (cmsg->cmsg_level == SOL_SOCKET &&
- cmsg->cmsg_type == SCM_PIDFD) {
- if (cmsg->cmsg_len < sizeof(*pidfd)) {
- log_err("CMSG parse: SCM_PIDFD wrong len");
- return 1;
- }
+ /* send(pfd, "x", sizeof(char), 0) */
+ if (data != 'x') {
+ log_err("recvmsg: data corruption");
+ return 1;
+ }
- pidfd = (void *)CMSG_DATA(cmsg);
- }
+ if (parse_cmsg(&msg, &res)) {
+ log_err("CMSG parse: parse_cmsg() failed");
+ return 1;
+ }
- if (cmsg->cmsg_level == SOL_SOCKET &&
- cmsg->cmsg_type == SCM_CREDENTIALS) {
- if (cmsg->cmsg_len < sizeof(*ucred)) {
- log_err("CMSG parse: SCM_CREDENTIALS wrong len");
- return 1;
- }
+ /* pidfd from SCM_PIDFD should point to the parent process PID */
+ parent_pid =
+ get_pid_from_fdinfo_file(*res.pidfd, "Pid:", sizeof("Pid:") - 1);
+ if (parent_pid != getppid()) {
+ log_err("wrong SCM_PIDFD %d != %d", parent_pid, getppid());
+ close(*res.pidfd);
+ return 1;
+ }
- ucred = (void *)CMSG_DATA(cmsg);
- }
+ close(*res.pidfd);
+ return 0;
+}
+
+static int cmsg_check_dead(int fd, int expected_pid)
+{
+ int err;
+ struct msghdr msg = { 0 };
+ struct cmsg_data res;
+ struct iovec iov;
+ int data = 0;
+ char control[CMSG_SPACE(sizeof(struct ucred)) +
+ CMSG_SPACE(sizeof(int))] = { 0 };
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_EXIT,
+ };
+
+ iov.iov_base = &data;
+ iov.iov_len = sizeof(data);
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = control;
+ msg.msg_controllen = sizeof(control);
+
+ err = recvmsg(fd, &msg, 0);
+ if (err < 0) {
+ log_err("recvmsg");
+ return 1;
}
- /* send(pfd, "x", sizeof(char), 0) */
- if (data != 'x') {
+ if (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
+ log_err("recvmsg: truncated");
+ return 1;
+ }
+
+ /* send(cfd, "y", sizeof(char), 0) */
+ if (data != 'y') {
log_err("recvmsg: data corruption");
return 1;
}
- if (!pidfd) {
- log_err("CMSG parse: SCM_PIDFD not found");
+ if (parse_cmsg(&msg, &res)) {
+ log_err("CMSG parse: parse_cmsg() failed");
return 1;
}
- if (!ucred) {
- log_err("CMSG parse: SCM_CREDENTIALS not found");
+ /*
+ * pidfd from SCM_PIDFD should point to the client_pid.
+ * Let's read exit information and check if it's what
+ * we expect to see.
+ */
+ if (ioctl(*res.pidfd, PIDFD_GET_INFO, &info)) {
+ log_err("%s: ioctl(PIDFD_GET_INFO) failed", __func__);
+ close(*res.pidfd);
return 1;
}
- /* pidfd from SCM_PIDFD should point to the parent process PID */
- parent_pid =
- get_pid_from_fdinfo_file(*pidfd, "Pid:", sizeof("Pid:") - 1);
- if (parent_pid != getppid()) {
- log_err("wrong SCM_PIDFD %d != %d", parent_pid, getppid());
+ if (!(info.mask & PIDFD_INFO_EXIT)) {
+ log_err("%s: No exit information from ioctl(PIDFD_GET_INFO)", __func__);
+ close(*res.pidfd);
+ return 1;
+ }
+
+ err = WIFEXITED(info.exit_code) ? WEXITSTATUS(info.exit_code) : 1;
+ if (err != CHILD_EXIT_CODE_OK) {
+ log_err("%s: wrong exit_code %d != %d", __func__, err, CHILD_EXIT_CODE_OK);
+ close(*res.pidfd);
return 1;
}
+ close(*res.pidfd);
return 0;
}
@@ -291,6 +387,24 @@ static void fill_sockaddr(struct sock_addr *addr, bool abstract)
memcpy(sun_path_buf, addr->sock_name, strlen(addr->sock_name));
}
+static int sk_enable_cred_pass(int sk)
+{
+ int on = 0;
+
+ on = 1;
+ if (setsockopt(sk, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on))) {
+ log_err("Failed to set SO_PASSCRED");
+ return 1;
+ }
+
+ if (setsockopt(sk, SOL_SOCKET, SO_PASSPIDFD, &on, sizeof(on))) {
+ log_err("Failed to set SO_PASSPIDFD");
+ return 1;
+ }
+
+ return 0;
+}
+
static void client(FIXTURE_DATA(scm_pidfd) *self,
const FIXTURE_VARIANT(scm_pidfd) *variant)
{
@@ -299,7 +413,6 @@ static void client(FIXTURE_DATA(scm_pidfd) *self,
struct ucred peer_cred;
int peer_pidfd;
pid_t peer_pid;
- int on = 0;
cfd = socket(AF_UNIX, variant->type, 0);
if (cfd < 0) {
@@ -322,14 +435,8 @@ static void client(FIXTURE_DATA(scm_pidfd) *self,
child_die();
}
- on = 1;
- if (setsockopt(cfd, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on))) {
- log_err("Failed to set SO_PASSCRED");
- child_die();
- }
-
- if (setsockopt(cfd, SOL_SOCKET, SO_PASSPIDFD, &on, sizeof(on))) {
- log_err("Failed to set SO_PASSPIDFD");
+ if (sk_enable_cred_pass(cfd)) {
+ log_err("sk_enable_cred_pass() failed");
child_die();
}
@@ -340,6 +447,12 @@ static void client(FIXTURE_DATA(scm_pidfd) *self,
child_die();
}
+ /* send something to the parent so it can receive SCM_PIDFD too and validate it */
+ if (send(cfd, "y", sizeof(char), 0) == -1) {
+ log_err("Failed to send(cfd, \"y\", sizeof(char), 0)");
+ child_die();
+ }
+
/* skip further for SOCK_DGRAM as it's not applicable */
if (variant->type == SOCK_DGRAM)
return;
@@ -398,7 +511,13 @@ TEST_F(scm_pidfd, test)
close(self->server);
close(self->startup_pipe[0]);
client(self, variant);
- exit(0);
+
+ /*
+ * It's a bit unusual, but in case of success we return non-zero
+ * exit code (CHILD_EXIT_CODE_OK) and then we expect to read it
+ * from ioctl(PIDFD_GET_INFO) in cmsg_check_dead().
+ */
+ exit(CHILD_EXIT_CODE_OK);
}
close(self->startup_pipe[1]);
@@ -421,9 +540,17 @@ TEST_F(scm_pidfd, test)
ASSERT_NE(-1, err);
}
- close(pfd);
waitpid(self->client_pid, &child_status, 0);
- ASSERT_EQ(0, WIFEXITED(child_status) ? WEXITSTATUS(child_status) : 1);
+ /* see comment before exit(CHILD_EXIT_CODE_OK) */
+ ASSERT_EQ(CHILD_EXIT_CODE_OK, WIFEXITED(child_status) ? WEXITSTATUS(child_status) : 1);
+
+ err = sk_enable_cred_pass(pfd);
+ ASSERT_EQ(0, err);
+
+ err = cmsg_check_dead(pfd, self->client_pid);
+ ASSERT_EQ(0, err);
+
+ close(pfd);
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/af_unix/scm_rights.c b/tools/testing/selftests/net/af_unix/scm_rights.c
index 8b015f16c03d..d82a79c21c17 100644
--- a/tools/testing/selftests/net/af_unix/scm_rights.c
+++ b/tools/testing/selftests/net/af_unix/scm_rights.c
@@ -10,7 +10,7 @@
#include <sys/socket.h>
#include <sys/un.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
FIXTURE(scm_rights)
{
@@ -271,20 +271,11 @@ void __send_fd(struct __test_metadata *_metadata,
{
#define MSG "x"
#define MSGLEN 1
- struct {
- struct cmsghdr cmsghdr;
- int fd[2];
- } cmsg = {
- .cmsghdr = {
- .cmsg_len = CMSG_LEN(sizeof(cmsg.fd)),
- .cmsg_level = SOL_SOCKET,
- .cmsg_type = SCM_RIGHTS,
- },
- .fd = {
- self->fd[inflight * 2],
- self->fd[inflight * 2],
- },
+ int fds[2] = {
+ self->fd[inflight * 2],
+ self->fd[inflight * 2],
};
+ char cmsg_buf[CMSG_SPACE(sizeof(fds))];
struct iovec iov = {
.iov_base = MSG,
.iov_len = MSGLEN,
@@ -294,11 +285,18 @@ void __send_fd(struct __test_metadata *_metadata,
.msg_namelen = 0,
.msg_iov = &iov,
.msg_iovlen = 1,
- .msg_control = &cmsg,
- .msg_controllen = CMSG_SPACE(sizeof(cmsg.fd)),
+ .msg_control = cmsg_buf,
+ .msg_controllen = sizeof(cmsg_buf),
};
+ struct cmsghdr *cmsg;
int ret;
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(fds));
+ memcpy(CMSG_DATA(cmsg), fds, sizeof(fds));
+
ret = sendmsg(self->fd[receiver * 2 + 1], &msg, variant->flags);
if (variant->disabled) {
diff --git a/tools/testing/selftests/net/af_unix/so_peek_off.c b/tools/testing/selftests/net/af_unix/so_peek_off.c
new file mode 100644
index 000000000000..86e7b0fb522d
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/so_peek_off.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2025 Google LLC */
+
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+
+#include "../../kselftest_harness.h"
+
+FIXTURE(so_peek_off)
+{
+ int fd[2]; /* 0: sender, 1: receiver */
+};
+
+FIXTURE_VARIANT(so_peek_off)
+{
+ int type;
+};
+
+FIXTURE_VARIANT_ADD(so_peek_off, stream)
+{
+ .type = SOCK_STREAM,
+};
+
+FIXTURE_VARIANT_ADD(so_peek_off, dgram)
+{
+ .type = SOCK_DGRAM,
+};
+
+FIXTURE_VARIANT_ADD(so_peek_off, seqpacket)
+{
+ .type = SOCK_SEQPACKET,
+};
+
+FIXTURE_SETUP(so_peek_off)
+{
+ struct timeval timeout = {
+ .tv_sec = 5,
+ .tv_usec = 0,
+ };
+ int ret;
+
+ ret = socketpair(AF_UNIX, variant->type, 0, self->fd);
+ ASSERT_EQ(0, ret);
+
+ ret = setsockopt(self->fd[1], SOL_SOCKET, SO_RCVTIMEO_NEW,
+ &timeout, sizeof(timeout));
+ ASSERT_EQ(0, ret);
+
+ ret = setsockopt(self->fd[1], SOL_SOCKET, SO_PEEK_OFF,
+ &(int){0}, sizeof(int));
+ ASSERT_EQ(0, ret);
+}
+
+FIXTURE_TEARDOWN(so_peek_off)
+{
+ close_range(self->fd[0], self->fd[1], 0);
+}
+
+#define sendeq(fd, str, flags) \
+ do { \
+ int bytes, len = strlen(str); \
+ \
+ bytes = send(fd, str, len, flags); \
+ ASSERT_EQ(len, bytes); \
+ } while (0)
+
+#define recveq(fd, str, buflen, flags) \
+ do { \
+ char buf[(buflen) + 1] = {}; \
+ int bytes; \
+ \
+ bytes = recv(fd, buf, buflen, flags); \
+ ASSERT_NE(-1, bytes); \
+ ASSERT_STREQ(str, buf); \
+ } while (0)
+
+#define async \
+ for (pid_t pid = (pid = fork(), \
+ pid < 0 ? \
+ __TH_LOG("Failed to start async {}"), \
+ _metadata->exit_code = KSFT_FAIL, \
+ __bail(1, _metadata), \
+ 0xdead : \
+ pid); \
+ !pid; exit(0))
+
+TEST_F(so_peek_off, single_chunk)
+{
+ sendeq(self->fd[0], "aaaabbbb", 0);
+
+ recveq(self->fd[1], "aaaa", 4, MSG_PEEK);
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+}
+
+TEST_F(so_peek_off, two_chunks)
+{
+ sendeq(self->fd[0], "aaaa", 0);
+ sendeq(self->fd[0], "bbbb", 0);
+
+ recveq(self->fd[1], "aaaa", 4, MSG_PEEK);
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+}
+
+TEST_F(so_peek_off, two_chunks_blocking)
+{
+ async {
+ usleep(1000);
+ sendeq(self->fd[0], "aaaa", 0);
+ }
+
+ recveq(self->fd[1], "aaaa", 4, MSG_PEEK);
+
+ async {
+ usleep(1000);
+ sendeq(self->fd[0], "bbbb", 0);
+ }
+
+ /* goto again; -> goto redo; in unix_stream_read_generic(). */
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+}
+
+TEST_F(so_peek_off, two_chunks_overlap)
+{
+ sendeq(self->fd[0], "aaaa", 0);
+ recveq(self->fd[1], "aa", 2, MSG_PEEK);
+
+ sendeq(self->fd[0], "bbbb", 0);
+
+ if (variant->type == SOCK_STREAM) {
+ /* SOCK_STREAM tries to fill the buffer. */
+ recveq(self->fd[1], "aabb", 4, MSG_PEEK);
+ recveq(self->fd[1], "bb", 100, MSG_PEEK);
+ } else {
+ /* SOCK_DGRAM and SOCK_SEQPACKET returns at the skb boundary. */
+ recveq(self->fd[1], "aa", 100, MSG_PEEK);
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+ }
+}
+
+TEST_F(so_peek_off, two_chunks_overlap_blocking)
+{
+ async {
+ usleep(1000);
+ sendeq(self->fd[0], "aaaa", 0);
+ }
+
+ recveq(self->fd[1], "aa", 2, MSG_PEEK);
+
+ async {
+ usleep(1000);
+ sendeq(self->fd[0], "bbbb", 0);
+ }
+
+ /* Even SOCK_STREAM does not wait if at least one byte is read. */
+ recveq(self->fd[1], "aa", 100, MSG_PEEK);
+
+ recveq(self->fd[1], "bbbb", 100, MSG_PEEK);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/af_unix/unix_connect.c b/tools/testing/selftests/net/af_unix/unix_connect.c
index d799fd8f5c7c..870ca96fa8ea 100644
--- a/tools/testing/selftests/net/af_unix/unix_connect.c
+++ b/tools/testing/selftests/net/af_unix/unix_connect.c
@@ -10,7 +10,7 @@
#include <sys/socket.h>
#include <sys/un.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
FIXTURE(unix_connect)
{
diff --git a/tools/testing/selftests/net/af_unix/unix_connreset.c b/tools/testing/selftests/net/af_unix/unix_connreset.c
new file mode 100644
index 000000000000..08c1de8f5a98
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/unix_connreset.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Selftest for AF_UNIX socket close and ECONNRESET behaviour.
+ *
+ * This test verifies:
+ * 1. SOCK_STREAM returns EOF when the peer closes normally.
+ * 2. SOCK_STREAM returns ECONNRESET if peer closes with unread data.
+ * 3. SOCK_SEQPACKET returns EOF when the peer closes normally.
+ * 4. SOCK_SEQPACKET returns ECONNRESET if the peer closes with unread data.
+ * 5. SOCK_DGRAM does not return ECONNRESET when the peer closes.
+ *
+ * These tests document the intended Linux behaviour.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include "../../kselftest_harness.h"
+
+#define SOCK_PATH "/tmp/af_unix_connreset.sock"
+
+static void remove_socket_file(void)
+{
+ unlink(SOCK_PATH);
+}
+
+FIXTURE(unix_sock)
+{
+ int server;
+ int client;
+ int child;
+};
+
+FIXTURE_VARIANT(unix_sock)
+{
+ int socket_type;
+ const char *name;
+};
+
+FIXTURE_VARIANT_ADD(unix_sock, stream) {
+ .socket_type = SOCK_STREAM,
+ .name = "SOCK_STREAM",
+};
+
+FIXTURE_VARIANT_ADD(unix_sock, dgram) {
+ .socket_type = SOCK_DGRAM,
+ .name = "SOCK_DGRAM",
+};
+
+FIXTURE_VARIANT_ADD(unix_sock, seqpacket) {
+ .socket_type = SOCK_SEQPACKET,
+ .name = "SOCK_SEQPACKET",
+};
+
+FIXTURE_SETUP(unix_sock)
+{
+ struct sockaddr_un addr = {};
+ int err;
+
+ addr.sun_family = AF_UNIX;
+ strcpy(addr.sun_path, SOCK_PATH);
+ remove_socket_file();
+
+ self->server = socket(AF_UNIX, variant->socket_type, 0);
+ ASSERT_LT(-1, self->server);
+
+ err = bind(self->server, (struct sockaddr *)&addr, sizeof(addr));
+ ASSERT_EQ(0, err);
+
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET) {
+ err = listen(self->server, 1);
+ ASSERT_EQ(0, err);
+ }
+
+ self->client = socket(AF_UNIX, variant->socket_type | SOCK_NONBLOCK, 0);
+ ASSERT_LT(-1, self->client);
+
+ err = connect(self->client, (struct sockaddr *)&addr, sizeof(addr));
+ ASSERT_EQ(0, err);
+}
+
+FIXTURE_TEARDOWN(unix_sock)
+{
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET)
+ close(self->child);
+
+ close(self->client);
+ close(self->server);
+ remove_socket_file();
+}
+
+/* Test 1: peer closes normally */
+TEST_F(unix_sock, eof)
+{
+ char buf[16] = {};
+ ssize_t n;
+
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET) {
+ self->child = accept(self->server, NULL, NULL);
+ ASSERT_LT(-1, self->child);
+
+ close(self->child);
+ } else {
+ close(self->server);
+ }
+
+ n = recv(self->client, buf, sizeof(buf), 0);
+
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET) {
+ ASSERT_EQ(0, n);
+ } else {
+ ASSERT_EQ(-1, n);
+ ASSERT_EQ(EAGAIN, errno);
+ }
+}
+
+/* Test 2: peer closes with unread data */
+TEST_F(unix_sock, reset_unread_behavior)
+{
+ char buf[16] = {};
+ ssize_t n;
+
+ /* Send data that will remain unread */
+ send(self->client, "hello", 5, 0);
+
+ if (variant->socket_type == SOCK_DGRAM) {
+ /* No real connection, just close the server */
+ close(self->server);
+ } else {
+ self->child = accept(self->server, NULL, NULL);
+ ASSERT_LT(-1, self->child);
+
+ /* Peer closes before client reads */
+ close(self->child);
+ }
+
+ n = recv(self->client, buf, sizeof(buf), 0);
+ ASSERT_EQ(-1, n);
+
+ if (variant->socket_type == SOCK_STREAM ||
+ variant->socket_type == SOCK_SEQPACKET) {
+ ASSERT_EQ(ECONNRESET, errno);
+ } else {
+ ASSERT_EQ(EAGAIN, errno);
+ }
+}
+
+/* Test 3: closing unaccepted (embryo) server socket should reset client. */
+TEST_F(unix_sock, reset_closed_embryo)
+{
+ char buf[16] = {};
+ ssize_t n;
+
+ if (variant->socket_type == SOCK_DGRAM) {
+ snprintf(_metadata->results->reason,
+ sizeof(_metadata->results->reason),
+ "Test only applies to SOCK_STREAM and SOCK_SEQPACKET");
+ exit(KSFT_XFAIL);
+ }
+
+ /* Close server without accept()ing */
+ close(self->server);
+
+ n = recv(self->client, buf, sizeof(buf), 0);
+
+ ASSERT_EQ(-1, n);
+ ASSERT_EQ(ECONNRESET, errno);
+}
+
+TEST_HARNESS_MAIN
+
diff --git a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh
index 92eb880c52f2..00758f00efbf 100755
--- a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh
+++ b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh
@@ -75,7 +75,7 @@ setup_v4() {
ip neigh get $V4_ADDR1 dev veth0 >/dev/null 2>&1
if [ $? -ne 0 ]; then
cleanup_v4
- echo "failed"
+ echo "failed; is the system using MACAddressPolicy=persistent ?"
exit 1
fi
diff --git a/tools/testing/selftests/net/bareudp.sh b/tools/testing/selftests/net/bareudp.sh
index 4046131e7888..d9e5b967f815 100755
--- a/tools/testing/selftests/net/bareudp.sh
+++ b/tools/testing/selftests/net/bareudp.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Test various bareudp tunnel configurations.
diff --git a/tools/testing/selftests/net/bench/Makefile b/tools/testing/selftests/net/bench/Makefile
new file mode 100644
index 000000000000..2546c45e42f7
--- /dev/null
+++ b/tools/testing/selftests/net/bench/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_GEN_MODS_DIR := page_pool
+
+TEST_PROGS += test_bench_page_pool.sh
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/net/bench/page_pool/Makefile b/tools/testing/selftests/net/bench/page_pool/Makefile
new file mode 100644
index 000000000000..0549a16ba275
--- /dev/null
+++ b/tools/testing/selftests/net/bench/page_pool/Makefile
@@ -0,0 +1,17 @@
+BENCH_PAGE_POOL_SIMPLE_TEST_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
+KDIR ?= /lib/modules/$(shell uname -r)/build
+
+ifeq ($(V),1)
+Q =
+else
+Q = @
+endif
+
+obj-m += bench_page_pool.o
+bench_page_pool-y += bench_page_pool_simple.o time_bench.o
+
+all:
+ +$(Q)make -C $(KDIR) M=$(BENCH_PAGE_POOL_SIMPLE_TEST_DIR) modules
+
+clean:
+ +$(Q)make -C $(KDIR) M=$(BENCH_PAGE_POOL_SIMPLE_TEST_DIR) clean
diff --git a/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c b/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c
new file mode 100644
index 000000000000..cb6468adbda4
--- /dev/null
+++ b/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Benchmark module for page_pool.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <net/page_pool/helpers.h>
+
+#include "time_bench.h"
+
+static int verbose = 1;
+#define MY_POOL_SIZE 1024
+
+/* Makes tests selectable. Useful for perf-record to analyze a single test.
+ * Hint: Bash shells support writing binary number like: $((2#101010)
+ *
+ * # modprobe bench_page_pool_simple run_flags=$((2#100))
+ */
+static unsigned long run_flags = 0xFFFFFFFF;
+module_param(run_flags, ulong, 0);
+MODULE_PARM_DESC(run_flags, "Limit which bench test that runs");
+
+/* Count the bit number from the enum */
+enum benchmark_bit {
+ bit_run_bench_baseline,
+ bit_run_bench_no_softirq01,
+ bit_run_bench_no_softirq02,
+ bit_run_bench_no_softirq03,
+};
+
+#define bit(b) (1 << (b))
+#define enabled(b) ((run_flags & (bit(b))))
+
+/* notice time_bench is limited to U32_MAX nr loops */
+static unsigned long loops = 10000000;
+module_param(loops, ulong, 0);
+MODULE_PARM_DESC(loops, "Specify loops bench will run");
+
+/* Timing at the nanosec level, we need to know the overhead
+ * introduced by the for loop itself
+ */
+static int time_bench_for_loop(struct time_bench_record *rec, void *data)
+{
+ uint64_t loops_cnt = 0;
+ int i;
+
+ time_bench_start(rec);
+ /** Loop to measure **/
+ for (i = 0; i < rec->loops; i++) {
+ loops_cnt++;
+ barrier(); /* avoid compiler to optimize this loop */
+ }
+ time_bench_stop(rec, loops_cnt);
+ return loops_cnt;
+}
+
+static int time_bench_atomic_inc(struct time_bench_record *rec, void *data)
+{
+ uint64_t loops_cnt = 0;
+ atomic_t cnt;
+ int i;
+
+ atomic_set(&cnt, 0);
+
+ time_bench_start(rec);
+ /** Loop to measure **/
+ for (i = 0; i < rec->loops; i++) {
+ atomic_inc(&cnt);
+ barrier(); /* avoid compiler to optimize this loop */
+ }
+ loops_cnt = atomic_read(&cnt);
+ time_bench_stop(rec, loops_cnt);
+ return loops_cnt;
+}
+
+/* The ptr_ping in page_pool uses a spinlock. We need to know the minimum
+ * overhead of taking+releasing a spinlock, to know the cycles that can be saved
+ * by e.g. amortizing this via bulking.
+ */
+static int time_bench_lock(struct time_bench_record *rec, void *data)
+{
+ uint64_t loops_cnt = 0;
+ spinlock_t lock;
+ int i;
+
+ spin_lock_init(&lock);
+
+ time_bench_start(rec);
+ /** Loop to measure **/
+ for (i = 0; i < rec->loops; i++) {
+ spin_lock(&lock);
+ loops_cnt++;
+ barrier(); /* avoid compiler to optimize this loop */
+ spin_unlock(&lock);
+ }
+ time_bench_stop(rec, loops_cnt);
+ return loops_cnt;
+}
+
+/* Helper for filling some page's into ptr_ring */
+static void pp_fill_ptr_ring(struct page_pool *pp, int elems)
+{
+ /* GFP_ATOMIC needed when under run softirq */
+ gfp_t gfp_mask = GFP_ATOMIC;
+ struct page **array;
+ int i;
+
+ array = kcalloc(elems, sizeof(struct page *), gfp_mask);
+
+ for (i = 0; i < elems; i++)
+ array[i] = page_pool_alloc_pages(pp, gfp_mask);
+ for (i = 0; i < elems; i++)
+ page_pool_put_page(pp, array[i], -1, false);
+
+ kfree(array);
+}
+
+enum test_type { type_fast_path, type_ptr_ring, type_page_allocator };
+
+/* Depends on compile optimizing this function */
+static int time_bench_page_pool(struct time_bench_record *rec, void *data,
+ enum test_type type, const char *func)
+{
+ uint64_t loops_cnt = 0;
+ gfp_t gfp_mask = GFP_ATOMIC; /* GFP_ATOMIC is not really needed */
+ int i, err;
+
+ struct page_pool *pp;
+ struct page *page;
+
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = 0,
+ .pool_size = MY_POOL_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = NULL, /* Only use for DMA mapping */
+ .dma_dir = DMA_BIDIRECTIONAL,
+ };
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp)) {
+ err = PTR_ERR(pp);
+ pr_warn("%s: Error(%d) creating page_pool\n", func, err);
+ goto out;
+ }
+ pp_fill_ptr_ring(pp, 64);
+
+ if (in_serving_softirq())
+ pr_warn("%s(): in_serving_softirq fast-path\n", func);
+ else
+ pr_warn("%s(): Cannot use page_pool fast-path\n", func);
+
+ time_bench_start(rec);
+ /** Loop to measure **/
+ for (i = 0; i < rec->loops; i++) {
+ /* Common fast-path alloc that depend on in_serving_softirq() */
+ page = page_pool_alloc_pages(pp, gfp_mask);
+ if (!page)
+ break;
+ loops_cnt++;
+ barrier(); /* avoid compiler to optimize this loop */
+
+ /* The benchmarks purpose it to test different return paths.
+ * Compiler should inline optimize other function calls out
+ */
+ if (type == type_fast_path) {
+ /* Fast-path recycling e.g. XDP_DROP use-case */
+ page_pool_recycle_direct(pp, page);
+
+ } else if (type == type_ptr_ring) {
+ /* Normal return path */
+ page_pool_put_page(pp, page, -1, false);
+
+ } else if (type == type_page_allocator) {
+ /* Test if not pages are recycled, but instead
+ * returned back into systems page allocator
+ */
+ get_page(page); /* cause no-recycling */
+ page_pool_put_page(pp, page, -1, false);
+ put_page(page);
+ } else {
+ BUILD_BUG();
+ }
+ }
+ time_bench_stop(rec, loops_cnt);
+out:
+ page_pool_destroy(pp);
+ return loops_cnt;
+}
+
+static int time_bench_page_pool01_fast_path(struct time_bench_record *rec,
+ void *data)
+{
+ return time_bench_page_pool(rec, data, type_fast_path, __func__);
+}
+
+static int time_bench_page_pool02_ptr_ring(struct time_bench_record *rec,
+ void *data)
+{
+ return time_bench_page_pool(rec, data, type_ptr_ring, __func__);
+}
+
+static int time_bench_page_pool03_slow(struct time_bench_record *rec,
+ void *data)
+{
+ return time_bench_page_pool(rec, data, type_page_allocator, __func__);
+}
+
+static int run_benchmark_tests(void)
+{
+ uint32_t nr_loops = loops;
+
+ /* Baseline tests */
+ if (enabled(bit_run_bench_baseline)) {
+ time_bench_loop(nr_loops * 10, 0, "for_loop", NULL,
+ time_bench_for_loop);
+ time_bench_loop(nr_loops * 10, 0, "atomic_inc", NULL,
+ time_bench_atomic_inc);
+ time_bench_loop(nr_loops, 0, "lock", NULL, time_bench_lock);
+ }
+
+ /* This test cannot activate correct code path, due to no-softirq ctx */
+ if (enabled(bit_run_bench_no_softirq01))
+ time_bench_loop(nr_loops, 0, "no-softirq-page_pool01", NULL,
+ time_bench_page_pool01_fast_path);
+ if (enabled(bit_run_bench_no_softirq02))
+ time_bench_loop(nr_loops, 0, "no-softirq-page_pool02", NULL,
+ time_bench_page_pool02_ptr_ring);
+ if (enabled(bit_run_bench_no_softirq03))
+ time_bench_loop(nr_loops, 0, "no-softirq-page_pool03", NULL,
+ time_bench_page_pool03_slow);
+
+ return 0;
+}
+
+static int __init bench_page_pool_simple_module_init(void)
+{
+ if (verbose)
+ pr_info("Loaded\n");
+
+ if (loops > U32_MAX) {
+ pr_err("Module param loops(%lu) exceeded U32_MAX(%u)\n", loops,
+ U32_MAX);
+ return -ECHRNG;
+ }
+
+ run_benchmark_tests();
+
+ return 0;
+}
+module_init(bench_page_pool_simple_module_init);
+
+static void __exit bench_page_pool_simple_module_exit(void)
+{
+ if (verbose)
+ pr_info("Unloaded\n");
+}
+module_exit(bench_page_pool_simple_module_exit);
+
+MODULE_DESCRIPTION("Benchmark of page_pool simple cases");
+MODULE_AUTHOR("Jesper Dangaard Brouer <netoptimizer@brouer.com>");
+MODULE_LICENSE("GPL");
diff --git a/tools/testing/selftests/net/bench/page_pool/time_bench.c b/tools/testing/selftests/net/bench/page_pool/time_bench.c
new file mode 100644
index 000000000000..073bb36ec5f2
--- /dev/null
+++ b/tools/testing/selftests/net/bench/page_pool/time_bench.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Benchmarking code execution time inside the kernel
+ *
+ * Copyright (C) 2014, Red Hat, Inc., Jesper Dangaard Brouer
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/time.h>
+
+#include <linux/perf_event.h> /* perf_event_create_kernel_counter() */
+
+/* For concurrency testing */
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+
+#include "time_bench.h"
+
+static int verbose = 1;
+
+/** TSC (Time-Stamp Counter) based **
+ * See: linux/time_bench.h
+ * tsc_start_clock() and tsc_stop_clock()
+ */
+
+/** Wall-clock based **
+ */
+
+/** PMU (Performance Monitor Unit) based **
+ */
+#define PERF_FORMAT \
+ (PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | \
+ PERF_FORMAT_TOTAL_TIME_RUNNING)
+
+struct raw_perf_event {
+ uint64_t config; /* event */
+ uint64_t config1; /* umask */
+ struct perf_event *save;
+ char *desc;
+};
+
+/* if HT is enable a maximum of 4 events (5 if one is instructions
+ * retired can be specified, if HT is disabled a maximum of 8 (9 if
+ * one is instructions retired) can be specified.
+ *
+ * From Table 19-1. Architectural Performance Events
+ * Architectures Software Developer’s Manual Volume 3: System Programming
+ * Guide
+ */
+struct raw_perf_event perf_events[] = {
+ { 0x3c, 0x00, NULL, "Unhalted CPU Cycles" },
+ { 0xc0, 0x00, NULL, "Instruction Retired" }
+};
+
+#define NUM_EVTS (ARRAY_SIZE(perf_events))
+
+/* WARNING: PMU config is currently broken!
+ */
+bool time_bench_PMU_config(bool enable)
+{
+ int i;
+ struct perf_event_attr perf_conf;
+ struct perf_event *perf_event;
+ int cpu;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+ pr_info("DEBUG: cpu:%d\n", cpu);
+ preempt_enable();
+
+ memset(&perf_conf, 0, sizeof(struct perf_event_attr));
+ perf_conf.type = PERF_TYPE_RAW;
+ perf_conf.size = sizeof(struct perf_event_attr);
+ perf_conf.read_format = PERF_FORMAT;
+ perf_conf.pinned = 1;
+ perf_conf.exclude_user = 1; /* No userspace events */
+ perf_conf.exclude_kernel = 0; /* Only kernel events */
+
+ for (i = 0; i < NUM_EVTS; i++) {
+ perf_conf.disabled = enable;
+ //perf_conf.disabled = (i == 0) ? 1 : 0;
+ perf_conf.config = perf_events[i].config;
+ perf_conf.config1 = perf_events[i].config1;
+ if (verbose)
+ pr_info("%s() enable PMU counter: %s\n",
+ __func__, perf_events[i].desc);
+ perf_event = perf_event_create_kernel_counter(&perf_conf, cpu,
+ NULL /* task */,
+ NULL /* overflow_handler*/,
+ NULL /* context */);
+ if (perf_event) {
+ perf_events[i].save = perf_event;
+ pr_info("%s():DEBUG perf_event success\n", __func__);
+
+ perf_event_enable(perf_event);
+ } else {
+ pr_info("%s():DEBUG perf_event is NULL\n", __func__);
+ }
+ }
+
+ return true;
+}
+
+/** Generic functions **
+ */
+
+/* Calculate stats, store results in record */
+bool time_bench_calc_stats(struct time_bench_record *rec)
+{
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+ uint64_t ns_per_call_tmp_rem = 0;
+ uint32_t ns_per_call_remainder = 0;
+ uint64_t pmc_ipc_tmp_rem = 0;
+ uint32_t pmc_ipc_remainder = 0;
+ uint32_t pmc_ipc_div = 0;
+ uint32_t invoked_cnt_precision = 0;
+ uint32_t invoked_cnt = 0; /* 32-bit due to div_u64_rem() */
+
+ if (rec->flags & TIME_BENCH_LOOP) {
+ if (rec->invoked_cnt < 1000) {
+ pr_err("ERR: need more(>1000) loops(%llu) for timing\n",
+ rec->invoked_cnt);
+ return false;
+ }
+ if (rec->invoked_cnt > ((1ULL << 32) - 1)) {
+ /* div_u64_rem() can only support div with 32bit*/
+ pr_err("ERR: Invoke cnt(%llu) too big overflow 32bit\n",
+ rec->invoked_cnt);
+ return false;
+ }
+ invoked_cnt = (uint32_t)rec->invoked_cnt;
+ }
+
+ /* TSC (Time-Stamp Counter) records */
+ if (rec->flags & TIME_BENCH_TSC) {
+ rec->tsc_interval = rec->tsc_stop - rec->tsc_start;
+ if (rec->tsc_interval == 0) {
+ pr_err("ABORT: timing took ZERO TSC time\n");
+ return false;
+ }
+ /* Calculate stats */
+ if (rec->flags & TIME_BENCH_LOOP)
+ rec->tsc_cycles = rec->tsc_interval / invoked_cnt;
+ else
+ rec->tsc_cycles = rec->tsc_interval;
+ }
+
+ /* Wall-clock time calc */
+ if (rec->flags & TIME_BENCH_WALLCLOCK) {
+ rec->time_start = rec->ts_start.tv_nsec +
+ (NANOSEC_PER_SEC * rec->ts_start.tv_sec);
+ rec->time_stop = rec->ts_stop.tv_nsec +
+ (NANOSEC_PER_SEC * rec->ts_stop.tv_sec);
+ rec->time_interval = rec->time_stop - rec->time_start;
+ if (rec->time_interval == 0) {
+ pr_err("ABORT: timing took ZERO wallclock time\n");
+ return false;
+ }
+ /* Calculate stats */
+ /*** Division in kernel it tricky ***/
+ /* Orig: time_sec = (time_interval / NANOSEC_PER_SEC); */
+ /* remainder only correct because NANOSEC_PER_SEC is 10^9 */
+ rec->time_sec = div_u64_rem(rec->time_interval, NANOSEC_PER_SEC,
+ &rec->time_sec_remainder);
+ //TODO: use existing struct timespec records instead of div?
+
+ if (rec->flags & TIME_BENCH_LOOP) {
+ /*** Division in kernel it tricky ***/
+ /* Orig: ns = ((double)time_interval / invoked_cnt); */
+ /* First get quotient */
+ rec->ns_per_call_quotient =
+ div_u64_rem(rec->time_interval, invoked_cnt,
+ &ns_per_call_remainder);
+ /* Now get decimals .xxx precision (incorrect roundup)*/
+ ns_per_call_tmp_rem = ns_per_call_remainder;
+ invoked_cnt_precision = invoked_cnt / 1000;
+ if (invoked_cnt_precision > 0) {
+ rec->ns_per_call_decimal =
+ div_u64_rem(ns_per_call_tmp_rem,
+ invoked_cnt_precision,
+ &ns_per_call_remainder);
+ }
+ }
+ }
+
+ /* Performance Monitor Unit (PMU) counters */
+ if (rec->flags & TIME_BENCH_PMU) {
+ //FIXME: Overflow handling???
+ rec->pmc_inst = rec->pmc_inst_stop - rec->pmc_inst_start;
+ rec->pmc_clk = rec->pmc_clk_stop - rec->pmc_clk_start;
+
+ /* Calc Instruction Per Cycle (IPC) */
+ /* First get quotient */
+ rec->pmc_ipc_quotient = div_u64_rem(rec->pmc_inst, rec->pmc_clk,
+ &pmc_ipc_remainder);
+ /* Now get decimals .xxx precision (incorrect roundup)*/
+ pmc_ipc_tmp_rem = pmc_ipc_remainder;
+ pmc_ipc_div = rec->pmc_clk / 1000;
+ if (pmc_ipc_div > 0) {
+ rec->pmc_ipc_decimal = div_u64_rem(pmc_ipc_tmp_rem,
+ pmc_ipc_div,
+ &pmc_ipc_remainder);
+ }
+ }
+
+ return true;
+}
+
+/* Generic function for invoking a loop function and calculating
+ * execution time stats. The function being called/timed is assumed
+ * to perform a tight loop, and update the timing record struct.
+ */
+bool time_bench_loop(uint32_t loops, int step, char *txt, void *data,
+ int (*func)(struct time_bench_record *record, void *data))
+{
+ struct time_bench_record rec;
+
+ /* Setup record */
+ memset(&rec, 0, sizeof(rec)); /* zero func might not update all */
+ rec.version_abi = 1;
+ rec.loops = loops;
+ rec.step = step;
+ rec.flags = (TIME_BENCH_LOOP | TIME_BENCH_TSC | TIME_BENCH_WALLCLOCK);
+
+ /*** Loop function being timed ***/
+ if (!func(&rec, data)) {
+ pr_err("ABORT: function being timed failed\n");
+ return false;
+ }
+
+ if (rec.invoked_cnt < loops)
+ pr_warn("WARNING: Invoke count(%llu) smaller than loops(%d)\n",
+ rec.invoked_cnt, loops);
+
+ /* Calculate stats */
+ time_bench_calc_stats(&rec);
+
+ pr_info("Type:%s Per elem: %llu cycles(tsc) %llu.%03llu ns (step:%d) - (measurement period time:%llu.%09u sec time_interval:%llu) - (invoke count:%llu tsc_interval:%llu)\n",
+ txt, rec.tsc_cycles, rec.ns_per_call_quotient,
+ rec.ns_per_call_decimal, rec.step, rec.time_sec,
+ rec.time_sec_remainder, rec.time_interval, rec.invoked_cnt,
+ rec.tsc_interval);
+ if (rec.flags & TIME_BENCH_PMU)
+ pr_info("Type:%s PMU inst/clock%llu/%llu = %llu.%03llu IPC (inst per cycle)\n",
+ txt, rec.pmc_inst, rec.pmc_clk, rec.pmc_ipc_quotient,
+ rec.pmc_ipc_decimal);
+ return true;
+}
+
+/* Function getting invoked by kthread */
+static int invoke_test_on_cpu_func(void *private)
+{
+ struct time_bench_cpu *cpu = private;
+ struct time_bench_sync *sync = cpu->sync;
+ cpumask_t newmask = CPU_MASK_NONE;
+ void *data = cpu->data;
+
+ /* Restrict CPU */
+ cpumask_set_cpu(cpu->rec.cpu, &newmask);
+ set_cpus_allowed_ptr(current, &newmask);
+
+ /* Synchronize start of concurrency test */
+ atomic_inc(&sync->nr_tests_running);
+ wait_for_completion(&sync->start_event);
+
+ /* Start benchmark function */
+ if (!cpu->bench_func(&cpu->rec, data)) {
+ pr_err("ERROR: function being timed failed on CPU:%d(%d)\n",
+ cpu->rec.cpu, smp_processor_id());
+ } else {
+ if (verbose)
+ pr_info("SUCCESS: ran on CPU:%d(%d)\n", cpu->rec.cpu,
+ smp_processor_id());
+ }
+ cpu->did_bench_run = true;
+
+ /* End test */
+ atomic_dec(&sync->nr_tests_running);
+ /* Wait for kthread_stop() telling us to stop */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+void time_bench_print_stats_cpumask(const char *desc,
+ struct time_bench_cpu *cpu_tasks,
+ const struct cpumask *mask)
+{
+ uint64_t average = 0;
+ int cpu;
+ int step = 0;
+ struct sum {
+ uint64_t tsc_cycles;
+ int records;
+ } sum = { 0 };
+
+ /* Get stats */
+ for_each_cpu(cpu, mask) {
+ struct time_bench_cpu *c = &cpu_tasks[cpu];
+ struct time_bench_record *rec = &c->rec;
+
+ /* Calculate stats */
+ time_bench_calc_stats(rec);
+
+ pr_info("Type:%s CPU(%d) %llu cycles(tsc) %llu.%03llu ns (step:%d) - (measurement period time:%llu.%09u sec time_interval:%llu) - (invoke count:%llu tsc_interval:%llu)\n",
+ desc, cpu, rec->tsc_cycles, rec->ns_per_call_quotient,
+ rec->ns_per_call_decimal, rec->step, rec->time_sec,
+ rec->time_sec_remainder, rec->time_interval,
+ rec->invoked_cnt, rec->tsc_interval);
+
+ /* Collect average */
+ sum.records++;
+ sum.tsc_cycles += rec->tsc_cycles;
+ step = rec->step;
+ }
+
+ if (sum.records) /* avoid div-by-zero */
+ average = sum.tsc_cycles / sum.records;
+ pr_info("Sum Type:%s Average: %llu cycles(tsc) CPUs:%d step:%d\n", desc,
+ average, sum.records, step);
+}
+
+void time_bench_run_concurrent(uint32_t loops, int step, void *data,
+ const struct cpumask *mask, /* Support masking outsome CPUs*/
+ struct time_bench_sync *sync,
+ struct time_bench_cpu *cpu_tasks,
+ int (*func)(struct time_bench_record *record, void *data))
+{
+ int cpu, running = 0;
+
+ if (verbose) // DEBUG
+ pr_warn("%s() Started on CPU:%d\n", __func__,
+ smp_processor_id());
+
+ /* Reset sync conditions */
+ atomic_set(&sync->nr_tests_running, 0);
+ init_completion(&sync->start_event);
+
+ /* Spawn off jobs on all CPUs */
+ for_each_cpu(cpu, mask) {
+ struct time_bench_cpu *c = &cpu_tasks[cpu];
+
+ running++;
+ c->sync = sync; /* Send sync variable along */
+ c->data = data; /* Send opaque along */
+
+ /* Init benchmark record */
+ memset(&c->rec, 0, sizeof(struct time_bench_record));
+ c->rec.version_abi = 1;
+ c->rec.loops = loops;
+ c->rec.step = step;
+ c->rec.flags = (TIME_BENCH_LOOP | TIME_BENCH_TSC |
+ TIME_BENCH_WALLCLOCK);
+ c->rec.cpu = cpu;
+ c->bench_func = func;
+ c->task = kthread_run(invoke_test_on_cpu_func, c,
+ "time_bench%d", cpu);
+ if (IS_ERR(c->task)) {
+ pr_err("%s(): Failed to start test func\n", __func__);
+ return; /* Argh, what about cleanup?! */
+ }
+ }
+
+ /* Wait until all processes are running */
+ while (atomic_read(&sync->nr_tests_running) < running) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+ /* Kick off all CPU concurrently on completion event */
+ complete_all(&sync->start_event);
+
+ /* Wait for CPUs to finish */
+ while (atomic_read(&sync->nr_tests_running)) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ /* Stop the kthreads */
+ for_each_cpu(cpu, mask) {
+ struct time_bench_cpu *c = &cpu_tasks[cpu];
+
+ kthread_stop(c->task);
+ }
+
+ if (verbose) // DEBUG - happens often, finish on another CPU
+ pr_warn("%s() Finished on CPU:%d\n", __func__,
+ smp_processor_id());
+}
diff --git a/tools/testing/selftests/net/bench/page_pool/time_bench.h b/tools/testing/selftests/net/bench/page_pool/time_bench.h
new file mode 100644
index 000000000000..e113fcf341dc
--- /dev/null
+++ b/tools/testing/selftests/net/bench/page_pool/time_bench.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Benchmarking code execution time inside the kernel
+ *
+ * Copyright (C) 2014, Red Hat, Inc., Jesper Dangaard Brouer
+ * for licensing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_TIME_BENCH_H
+#define _LINUX_TIME_BENCH_H
+
+/* Main structure used for recording a benchmark run */
+struct time_bench_record {
+ uint32_t version_abi;
+ uint32_t loops; /* Requested loop invocations */
+ uint32_t step; /* option for e.g. bulk invocations */
+
+ uint32_t flags; /* Measurements types enabled */
+#define TIME_BENCH_LOOP BIT(0)
+#define TIME_BENCH_TSC BIT(1)
+#define TIME_BENCH_WALLCLOCK BIT(2)
+#define TIME_BENCH_PMU BIT(3)
+
+ uint32_t cpu; /* Used when embedded in time_bench_cpu */
+
+ /* Records */
+ uint64_t invoked_cnt; /* Returned actual invocations */
+ uint64_t tsc_start;
+ uint64_t tsc_stop;
+ struct timespec64 ts_start;
+ struct timespec64 ts_stop;
+ /* PMU counters for instruction and cycles
+ * instructions counter including pipelined instructions
+ */
+ uint64_t pmc_inst_start;
+ uint64_t pmc_inst_stop;
+ /* CPU unhalted clock counter */
+ uint64_t pmc_clk_start;
+ uint64_t pmc_clk_stop;
+
+ /* Result records */
+ uint64_t tsc_interval;
+ uint64_t time_start, time_stop, time_interval; /* in nanosec */
+ uint64_t pmc_inst, pmc_clk;
+
+ /* Derived result records */
+ uint64_t tsc_cycles; // +decimal?
+ uint64_t ns_per_call_quotient, ns_per_call_decimal;
+ uint64_t time_sec;
+ uint32_t time_sec_remainder;
+ uint64_t pmc_ipc_quotient, pmc_ipc_decimal; /* inst per cycle */
+};
+
+/* For synchronizing parallel CPUs to run concurrently */
+struct time_bench_sync {
+ atomic_t nr_tests_running;
+ struct completion start_event;
+};
+
+/* Keep track of CPUs executing our bench function.
+ *
+ * Embed a time_bench_record for storing info per cpu
+ */
+struct time_bench_cpu {
+ struct time_bench_record rec;
+ struct time_bench_sync *sync; /* back ptr */
+ struct task_struct *task;
+ /* "data" opaque could have been placed in time_bench_sync,
+ * but to avoid any false sharing, place it per CPU
+ */
+ void *data;
+ /* Support masking outsome CPUs, mark if it ran */
+ bool did_bench_run;
+ /* int cpu; // note CPU stored in time_bench_record */
+ int (*bench_func)(struct time_bench_record *record, void *data);
+};
+
+/*
+ * Below TSC assembler code is not compatible with other archs, and
+ * can also fail on guests if cpu-flags are not correct.
+ *
+ * The way TSC reading is used, many iterations, does not require as
+ * high accuracy as described below (in Intel Doc #324264).
+ *
+ * Considering changing to use get_cycles() (#include <asm/timex.h>).
+ */
+
+/** TSC (Time-Stamp Counter) based **
+ * Recommend reading, to understand details of reading TSC accurately:
+ * Intel Doc #324264, "How to Benchmark Code Execution Times on Intel"
+ *
+ * Consider getting exclusive ownership of CPU by using:
+ * unsigned long flags;
+ * preempt_disable();
+ * raw_local_irq_save(flags);
+ * _your_code_
+ * raw_local_irq_restore(flags);
+ * preempt_enable();
+ *
+ * Clobbered registers: "%rax", "%rbx", "%rcx", "%rdx"
+ * RDTSC only change "%rax" and "%rdx" but
+ * CPUID clears the high 32-bits of all (rax/rbx/rcx/rdx)
+ */
+static __always_inline uint64_t tsc_start_clock(void)
+{
+ /* See: Intel Doc #324264 */
+ unsigned int hi, lo;
+
+ asm volatile("CPUID\n\t"
+ "RDTSC\n\t"
+ "mov %%edx, %0\n\t"
+ "mov %%eax, %1\n\t"
+ : "=r"(hi), "=r"(lo)::"%rax", "%rbx", "%rcx", "%rdx");
+ //FIXME: on 32bit use clobbered %eax + %edx
+ return ((uint64_t)lo) | (((uint64_t)hi) << 32);
+}
+
+static __always_inline uint64_t tsc_stop_clock(void)
+{
+ /* See: Intel Doc #324264 */
+ unsigned int hi, lo;
+
+ asm volatile("RDTSCP\n\t"
+ "mov %%edx, %0\n\t"
+ "mov %%eax, %1\n\t"
+ "CPUID\n\t"
+ : "=r"(hi), "=r"(lo)::"%rax", "%rbx", "%rcx", "%rdx");
+ return ((uint64_t)lo) | (((uint64_t)hi) << 32);
+}
+
+/** Wall-clock based **
+ *
+ * use: getnstimeofday()
+ * getnstimeofday(&rec->ts_start);
+ * getnstimeofday(&rec->ts_stop);
+ *
+ * API changed see: Documentation/core-api/timekeeping.rst
+ * https://www.kernel.org/doc/html/latest/core-api/timekeeping.html#c.getnstimeofday
+ *
+ * We should instead use: ktime_get_real_ts64() is a direct
+ * replacement, but consider using monotonic time (ktime_get_ts64())
+ * and/or a ktime_t based interface (ktime_get()/ktime_get_real()).
+ */
+
+/** PMU (Performance Monitor Unit) based **
+ *
+ * Needed for calculating: Instructions Per Cycle (IPC)
+ * - The IPC number tell how efficient the CPU pipelining were
+ */
+//lookup: perf_event_create_kernel_counter()
+
+bool time_bench_PMU_config(bool enable);
+
+/* Raw reading via rdpmc() using fixed counters
+ *
+ * From: https://github.com/andikleen/simple-pmu
+ */
+enum {
+ FIXED_SELECT = (1U << 30), /* == 0x40000000 */
+ FIXED_INST_RETIRED_ANY = 0,
+ FIXED_CPU_CLK_UNHALTED_CORE = 1,
+ FIXED_CPU_CLK_UNHALTED_REF = 2,
+};
+
+static __always_inline unsigned int long long p_rdpmc(unsigned int in)
+{
+ unsigned int d, a;
+
+ asm volatile("rdpmc" : "=d"(d), "=a"(a) : "c"(in) : "memory");
+ return ((unsigned long long)d << 32) | a;
+}
+
+/* These PMU counter needs to be enabled, but I don't have the
+ * configure code implemented. My current hack is running:
+ * sudo perf stat -e cycles:k -e instructions:k insmod lib/ring_queue_test.ko
+ */
+/* Reading all pipelined instruction */
+static __always_inline unsigned long long pmc_inst(void)
+{
+ return p_rdpmc(FIXED_SELECT | FIXED_INST_RETIRED_ANY);
+}
+
+/* Reading CPU clock cycles */
+static __always_inline unsigned long long pmc_clk(void)
+{
+ return p_rdpmc(FIXED_SELECT | FIXED_CPU_CLK_UNHALTED_CORE);
+}
+
+/* Raw reading via MSR rdmsr() is likely wrong
+ * FIXME: How can I know which raw MSR registers are conf for what?
+ */
+#define MSR_IA32_PCM0 0x400000C1 /* PERFCTR0 */
+#define MSR_IA32_PCM1 0x400000C2 /* PERFCTR1 */
+#define MSR_IA32_PCM2 0x400000C3
+static inline uint64_t msr_inst(unsigned long long *msr_result)
+{
+ return rdmsrq_safe(MSR_IA32_PCM0, msr_result);
+}
+
+/** Generic functions **
+ */
+bool time_bench_loop(uint32_t loops, int step, char *txt, void *data,
+ int (*func)(struct time_bench_record *rec, void *data));
+bool time_bench_calc_stats(struct time_bench_record *rec);
+
+void time_bench_run_concurrent(uint32_t loops, int step, void *data,
+ const struct cpumask *mask, /* Support masking outsome CPUs*/
+ struct time_bench_sync *sync, struct time_bench_cpu *cpu_tasks,
+ int (*func)(struct time_bench_record *record, void *data));
+void time_bench_print_stats_cpumask(const char *desc,
+ struct time_bench_cpu *cpu_tasks,
+ const struct cpumask *mask);
+
+//FIXME: use rec->flags to select measurement, should be MACRO
+static __always_inline void time_bench_start(struct time_bench_record *rec)
+{
+ //getnstimeofday(&rec->ts_start);
+ ktime_get_real_ts64(&rec->ts_start);
+ if (rec->flags & TIME_BENCH_PMU) {
+ rec->pmc_inst_start = pmc_inst();
+ rec->pmc_clk_start = pmc_clk();
+ }
+ rec->tsc_start = tsc_start_clock();
+}
+
+static __always_inline void time_bench_stop(struct time_bench_record *rec,
+ uint64_t invoked_cnt)
+{
+ rec->tsc_stop = tsc_stop_clock();
+ if (rec->flags & TIME_BENCH_PMU) {
+ rec->pmc_inst_stop = pmc_inst();
+ rec->pmc_clk_stop = pmc_clk();
+ }
+ //getnstimeofday(&rec->ts_stop);
+ ktime_get_real_ts64(&rec->ts_stop);
+ rec->invoked_cnt = invoked_cnt;
+}
+
+#endif /* _LINUX_TIME_BENCH_H */
diff --git a/tools/testing/selftests/net/bench/test_bench_page_pool.sh b/tools/testing/selftests/net/bench/test_bench_page_pool.sh
new file mode 100755
index 000000000000..7b8b18cfedce
--- /dev/null
+++ b/tools/testing/selftests/net/bench/test_bench_page_pool.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+
+set -e
+
+DRIVER="./page_pool/bench_page_pool.ko"
+result=""
+
+function run_test()
+{
+ rmmod "bench_page_pool.ko" || true
+ insmod $DRIVER > /dev/null 2>&1
+ result=$(dmesg | tail -10)
+ echo "$result"
+
+ echo
+ echo "Fast path results:"
+ echo "${result}" | grep -o -E "no-softirq-page_pool01 Per elem: ([0-9]+) cycles\(tsc\) ([0-9]+\.[0-9]+) ns"
+
+ echo
+ echo "ptr_ring results:"
+ echo "${result}" | grep -o -E "no-softirq-page_pool02 Per elem: ([0-9]+) cycles\(tsc\) ([0-9]+\.[0-9]+) ns"
+
+ echo
+ echo "slow path results:"
+ echo "${result}" | grep -o -E "no-softirq-page_pool03 Per elem: ([0-9]+) cycles\(tsc\) ([0-9]+\.[0-9]+) ns"
+}
+
+run_test
+
+exit 0
diff --git a/tools/testing/selftests/net/bind_bhash.c b/tools/testing/selftests/net/bind_bhash.c
index 57ff67a3751e..da04b0b19b73 100644
--- a/tools/testing/selftests/net/bind_bhash.c
+++ b/tools/testing/selftests/net/bind_bhash.c
@@ -75,7 +75,7 @@ static void *setup(void *arg)
int *array = (int *)arg;
for (i = 0; i < MAX_CONNECTIONS; i++) {
- sock_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, setup_addr);
+ sock_fd = bind_socket(SO_REUSEPORT, setup_addr);
if (sock_fd < 0) {
ret = sock_fd;
pthread_exit(&ret);
@@ -103,7 +103,7 @@ int main(int argc, const char *argv[])
setup_addr = use_v6 ? setup_addr_v6 : setup_addr_v4;
- listener_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, setup_addr);
+ listener_fd = bind_socket(SO_REUSEPORT, setup_addr);
if (listen(listener_fd, 100) < 0) {
perror("listen failed");
return -1;
diff --git a/tools/testing/selftests/net/bind_timewait.c b/tools/testing/selftests/net/bind_timewait.c
index cb9fdf51ea59..40126f9b901e 100644
--- a/tools/testing/selftests/net/bind_timewait.c
+++ b/tools/testing/selftests/net/bind_timewait.c
@@ -4,7 +4,7 @@
#include <sys/socket.h>
#include <netinet/in.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
FIXTURE(bind_timewait)
{
diff --git a/tools/testing/selftests/net/bind_wildcard.c b/tools/testing/selftests/net/bind_wildcard.c
index b7b54d646b93..7d11548b2c61 100644
--- a/tools/testing/selftests/net/bind_wildcard.c
+++ b/tools/testing/selftests/net/bind_wildcard.c
@@ -4,7 +4,7 @@
#include <sys/socket.h>
#include <netinet/in.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
static const __u32 in4addr_any = INADDR_ANY;
static const __u32 in4addr_loopback = INADDR_LOOPBACK;
diff --git a/tools/testing/selftests/net/bpf_offload.py b/tools/testing/selftests/net/bpf_offload.py
index b2c271b79240..c856d266c8f3 100755
--- a/tools/testing/selftests/net/bpf_offload.py
+++ b/tools/testing/selftests/net/bpf_offload.py
@@ -184,8 +184,8 @@ def bpftool_prog_list(expected=None, ns="", exclude_orphaned=True):
progs = [ p for p in progs if not p['orphaned'] ]
if expected is not None:
if len(progs) != expected:
- fail(True, "%d BPF programs loaded, expected %d" %
- (len(progs), expected))
+ fail(True, "%d BPF programs loaded, expected %d\nLoaded Progs:\n%s" %
+ (len(progs), expected, pp.pformat(progs)))
return progs
def bpftool_map_list(expected=None, ns=""):
diff --git a/tools/testing/selftests/net/broadcast_ether_dst.sh b/tools/testing/selftests/net/broadcast_ether_dst.sh
new file mode 100755
index 000000000000..334a7eca8a80
--- /dev/null
+++ b/tools/testing/selftests/net/broadcast_ether_dst.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Author: Brett A C Sheffield <bacs@librecast.net>
+# Author: Oscar Maes <oscmaes92@gmail.com>
+#
+# Ensure destination ethernet field is correctly set for
+# broadcast packets
+
+source lib.sh
+
+CLIENT_IP4="192.168.0.1"
+GW_IP4="192.168.0.2"
+
+setup() {
+ setup_ns CLIENT_NS SERVER_NS
+
+ ip -net "${SERVER_NS}" link add link1 type veth \
+ peer name link0 netns "${CLIENT_NS}"
+
+ ip -net "${CLIENT_NS}" link set link0 up
+ ip -net "${CLIENT_NS}" addr add "${CLIENT_IP4}"/24 dev link0
+
+ ip -net "${SERVER_NS}" link set link1 up
+
+ ip -net "${CLIENT_NS}" route add default via "${GW_IP4}"
+ ip netns exec "${CLIENT_NS}" arp -s "${GW_IP4}" 00:11:22:33:44:55
+}
+
+cleanup() {
+ rm -f "${CAPFILE}" "${OUTPUT}"
+ ip -net "${SERVER_NS}" link del link1
+ cleanup_ns "${CLIENT_NS}" "${SERVER_NS}"
+}
+
+test_broadcast_ether_dst() {
+ local rc=0
+ CAPFILE=$(mktemp -u cap.XXXXXXXXXX)
+ OUTPUT=$(mktemp -u out.XXXXXXXXXX)
+
+ echo "Testing ethernet broadcast destination"
+
+ # start tcpdump listening for icmp
+ # tcpdump will exit after receiving a single packet
+ # timeout will kill tcpdump if it is still running after 2s
+ timeout 2s ip netns exec "${CLIENT_NS}" \
+ tcpdump -i link0 -c 1 -w "${CAPFILE}" icmp &> "${OUTPUT}" &
+ pid=$!
+ slowwait 1 grep -qs "listening" "${OUTPUT}"
+
+ # send broadcast ping
+ ip netns exec "${CLIENT_NS}" \
+ ping -W0.01 -c1 -b 255.255.255.255 &> /dev/null
+
+ # wait for tcpdump for exit after receiving packet
+ wait "${pid}"
+
+ # compare ethernet destination field to ff:ff:ff:ff:ff:ff
+ ether_dst=$(tcpdump -r "${CAPFILE}" -tnne 2>/dev/null | \
+ awk '{sub(/,/,"",$3); print $3}')
+ if [[ "${ether_dst}" == "ff:ff:ff:ff:ff:ff" ]]; then
+ echo "[ OK ]"
+ rc="${ksft_pass}"
+ else
+ echo "[FAIL] expected dst ether addr to be ff:ff:ff:ff:ff:ff," \
+ "got ${ether_dst}"
+ rc="${ksft_fail}"
+ fi
+
+ return "${rc}"
+}
+
+if [ ! -x "$(command -v tcpdump)" ]; then
+ echo "SKIP: Could not run test without tcpdump tool"
+ exit "${ksft_skip}"
+fi
+
+trap cleanup EXIT
+
+setup
+test_broadcast_ether_dst
+
+exit $?
diff --git a/tools/testing/selftests/net/broadcast_pmtu.sh b/tools/testing/selftests/net/broadcast_pmtu.sh
new file mode 100755
index 000000000000..726eb5d25839
--- /dev/null
+++ b/tools/testing/selftests/net/broadcast_pmtu.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Ensures broadcast route MTU is respected
+
+CLIENT_NS=$(mktemp -u client-XXXXXXXX)
+CLIENT_IP4="192.168.0.1/24"
+CLIENT_BROADCAST_ADDRESS="192.168.0.255"
+
+SERVER_NS=$(mktemp -u server-XXXXXXXX)
+SERVER_IP4="192.168.0.2/24"
+
+setup() {
+ ip netns add "${CLIENT_NS}"
+ ip netns add "${SERVER_NS}"
+
+ ip -net "${SERVER_NS}" link add link1 type veth peer name link0 netns "${CLIENT_NS}"
+
+ ip -net "${CLIENT_NS}" link set link0 up
+ ip -net "${CLIENT_NS}" link set link0 mtu 9000
+ ip -net "${CLIENT_NS}" addr add "${CLIENT_IP4}" dev link0
+
+ ip -net "${SERVER_NS}" link set link1 up
+ ip -net "${SERVER_NS}" link set link1 mtu 1500
+ ip -net "${SERVER_NS}" addr add "${SERVER_IP4}" dev link1
+
+ read -r -a CLIENT_BROADCAST_ENTRY <<< "$(ip -net "${CLIENT_NS}" route show table local type broadcast)"
+ ip -net "${CLIENT_NS}" route del "${CLIENT_BROADCAST_ENTRY[@]}"
+ ip -net "${CLIENT_NS}" route add "${CLIENT_BROADCAST_ENTRY[@]}" mtu 1500
+
+ ip net exec "${SERVER_NS}" sysctl -wq net.ipv4.icmp_echo_ignore_broadcasts=0
+}
+
+cleanup() {
+ ip -net "${SERVER_NS}" link del link1
+ ip netns del "${CLIENT_NS}"
+ ip netns del "${SERVER_NS}"
+}
+
+trap cleanup EXIT
+
+setup &&
+ echo "Testing for broadcast route MTU" &&
+ ip net exec "${CLIENT_NS}" ping -f -M want -q -c 1 -s 8000 -w 1 -b "${CLIENT_BROADCAST_ADDRESS}" > /dev/null 2>&1
+
+exit $?
+
diff --git a/tools/testing/selftests/net/busy_poll_test.sh b/tools/testing/selftests/net/busy_poll_test.sh
index 7d2d40812074..5ec1c85c1623 100755
--- a/tools/testing/selftests/net/busy_poll_test.sh
+++ b/tools/testing/selftests/net/busy_poll_test.sh
@@ -27,6 +27,8 @@ NAPI_DEFER_HARD_IRQS=100
GRO_FLUSH_TIMEOUT=50000
SUSPEND_TIMEOUT=20000000
+NAPI_THREADED_MODE_BUSY_POLL=2
+
setup_ns()
{
set -e
@@ -62,6 +64,9 @@ cleanup_ns()
test_busypoll()
{
suspend_value=${1:-0}
+ napi_threaded_value=${2:-0}
+ prefer_busy_poll_value=${3:-$PREFER_BUSY_POLL}
+
tmp_file=$(mktemp)
out_file=$(mktemp)
@@ -73,10 +78,11 @@ test_busypoll()
-b${SERVER_IP} \
-m${MAX_EVENTS} \
-u${BUSY_POLL_USECS} \
- -P${PREFER_BUSY_POLL} \
+ -P${prefer_busy_poll_value} \
-g${BUSY_POLL_BUDGET} \
-i${NSIM_SV_IFIDX} \
-s${suspend_value} \
+ -t${napi_threaded_value} \
-o${out_file}&
wait_local_port_listen nssv ${SERVER_PORT} tcp
@@ -109,6 +115,15 @@ test_busypoll_with_suspend()
return $?
}
+test_busypoll_with_napi_threaded()
+{
+ # Only enable napi threaded poll. Set suspend timeout and prefer busy
+ # poll to 0.
+ test_busypoll 0 ${NAPI_THREADED_MODE_BUSY_POLL} 0
+
+ return $?
+}
+
###
### Code start
###
@@ -154,6 +169,13 @@ if [ $? -ne 0 ]; then
exit 1
fi
+test_busypoll_with_napi_threaded
+if [ $? -ne 0 ]; then
+ echo "test_busypoll_with_napi_threaded failed"
+ cleanup_ns
+ exit 1
+fi
+
echo "$NSIM_SV_FD:$NSIM_SV_IFIDX" > $NSIM_DEV_SYS_UNLINK
echo $NSIM_CL_ID > $NSIM_DEV_SYS_DEL
diff --git a/tools/testing/selftests/net/busy_poller.c b/tools/testing/selftests/net/busy_poller.c
index 04c7ff577bb8..3a81f9c94795 100644
--- a/tools/testing/selftests/net/busy_poller.c
+++ b/tools/testing/selftests/net/busy_poller.c
@@ -65,15 +65,16 @@ static uint32_t cfg_busy_poll_usecs;
static uint16_t cfg_busy_poll_budget;
static uint8_t cfg_prefer_busy_poll;
-/* IRQ params */
+/* NAPI params */
static uint32_t cfg_defer_hard_irqs;
static uint64_t cfg_gro_flush_timeout;
static uint64_t cfg_irq_suspend_timeout;
+static enum netdev_napi_threaded cfg_napi_threaded_poll = NETDEV_NAPI_THREADED_DISABLED;
static void usage(const char *filepath)
{
error(1, 0,
- "Usage: %s -p<port> -b<addr> -m<max_events> -u<busy_poll_usecs> -P<prefer_busy_poll> -g<busy_poll_budget> -o<outfile> -d<defer_hard_irqs> -r<gro_flush_timeout> -s<irq_suspend_timeout> -i<ifindex>",
+ "Usage: %s -p<port> -b<addr> -m<max_events> -u<busy_poll_usecs> -P<prefer_busy_poll> -g<busy_poll_budget> -o<outfile> -d<defer_hard_irqs> -r<gro_flush_timeout> -s<irq_suspend_timeout> -t<napi_threaded_poll> -i<ifindex>",
filepath);
}
@@ -86,7 +87,7 @@ static void parse_opts(int argc, char **argv)
if (argc <= 1)
usage(argv[0]);
- while ((c = getopt(argc, argv, "p:m:b:u:P:g:o:d:r:s:i:")) != -1) {
+ while ((c = getopt(argc, argv, "p:m:b:u:P:g:o:d:r:s:i:t:")) != -1) {
/* most options take integer values, except o and b, so reduce
* code duplication a bit for the common case by calling
* strtoull here and leave bounds checking and casting per
@@ -168,6 +169,12 @@ static void parse_opts(int argc, char **argv)
cfg_ifindex = (int)tmp;
break;
+ case 't':
+ if (tmp > 2)
+ error(1, ERANGE, "napi threaded poll value must be 0-2");
+
+ cfg_napi_threaded_poll = (enum netdev_napi_threaded)tmp;
+ break;
}
}
@@ -247,6 +254,9 @@ static void setup_queue(void)
netdev_napi_set_req_set_irq_suspend_timeout(set_req,
cfg_irq_suspend_timeout);
+ if (cfg_napi_threaded_poll)
+ netdev_napi_set_req_set_threaded(set_req, cfg_napi_threaded_poll);
+
if (netdev_napi_set(ys, set_req))
error(1, 0, "can't set NAPI params: %s\n", yerr.msg);
diff --git a/tools/testing/selftests/net/can/config b/tools/testing/selftests/net/can/config
new file mode 100644
index 000000000000..188f79796670
--- /dev/null
+++ b/tools/testing/selftests/net/can/config
@@ -0,0 +1,3 @@
+CONFIG_CAN=m
+CONFIG_CAN_DEV=m
+CONFIG_CAN_VCAN=m
diff --git a/tools/testing/selftests/net/can/test_raw_filter.c b/tools/testing/selftests/net/can/test_raw_filter.c
index 4101c36390fd..bb8ae8854273 100644
--- a/tools/testing/selftests/net/can/test_raw_filter.c
+++ b/tools/testing/selftests/net/can/test_raw_filter.c
@@ -19,7 +19,7 @@
#include <linux/can.h>
#include <linux/can/raw.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define ID 0x123
diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
index a825e628aee7..67a72b1a2f3d 100644
--- a/tools/testing/selftests/net/cmsg_sender.c
+++ b/tools/testing/selftests/net/cmsg_sender.c
@@ -16,7 +16,7 @@
#include <linux/udp.h>
#include <sys/socket.h>
-#include "../kselftest.h"
+#include "kselftest.h"
enum {
ERN_SUCCESS = 0,
@@ -491,7 +491,8 @@ int main(int argc, char *argv[])
if (err) {
fprintf(stderr, "Can't resolve address [%s]:%s\n",
opt.host, opt.service);
- return ERN_SOCK_CREATE;
+ err = ERN_SOCK_CREATE;
+ goto err_free_buff;
}
if (ai->ai_family == AF_INET6 && opt.sock.proto == IPPROTO_ICMP)
@@ -500,8 +501,8 @@ int main(int argc, char *argv[])
fd = socket(ai->ai_family, opt.sock.type, opt.sock.proto);
if (fd < 0) {
fprintf(stderr, "Can't open socket: %s\n", strerror(errno));
- freeaddrinfo(ai);
- return ERN_RESOLVE;
+ err = ERN_RESOLVE;
+ goto err_free_info;
}
if (opt.sock.proto == IPPROTO_ICMP) {
@@ -574,6 +575,9 @@ int main(int argc, char *argv[])
err_out:
close(fd);
+err_free_info:
freeaddrinfo(ai);
+err_free_buff:
+ free(buf);
return err;
}
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 3cfef5153823..1e1f253118f5 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -1,118 +1,130 @@
-CONFIG_USER_NS=y
-CONFIG_NET_NS=y
+CONFIG_AMT=m
+CONFIG_BAREUDP=m
CONFIG_BONDING=m
CONFIG_BPF_SYSCALL=y
-CONFIG_TEST_BPF=m
-CONFIG_NUMA=y
-CONFIG_RPS=y
-CONFIG_SYSFS=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_NET_VRF=y
-CONFIG_NET_L3_MASTER_DEV=y
-CONFIG_IPV6=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_VETH=y
-CONFIG_NET_IPVTI=y
-CONFIG_IPV6_VTI=y
-CONFIG_DUMMY=y
-CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_CAN=m
+CONFIG_CAN_DEV=m
+CONFIG_CAN_VXCAN=m
+CONFIG_CRYPTO_ARIA=y
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SM4_GENERIC=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_BTF_MODULES=n
-CONFIG_VLAN_8021Q=y
+CONFIG_DUMMY=y
CONFIG_GENEVE=m
CONFIG_IFB=y
CONFIG_INET_DIAG=y
CONFIG_INET_ESP=y
CONFIG_INET_ESP_OFFLOAD=y
-CONFIG_NET_FOU=y
-CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_ADVANCED=y
-CONFIG_NF_CONNTRACK=m
-CONFIG_IPV6_MROUTE=y
-CONFIG_IPV6_SIT=y
-CONFIG_NF_NAT=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP6_NF_IPTABLES_LEGACY=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_IPTABLES_LEGACY=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_SCTP=m
+CONFIG_IPV6=y
CONFIG_IPV6_GRE=m
+CONFIG_IPV6_ILA=m
+CONFIG_IPV6_IOAM6_LWTUNNEL=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPVLAN=m
+CONFIG_KALLSYMS=y
+CONFIG_L2TP=m
CONFIG_L2TP_ETH=m
CONFIG_L2TP_IP=m
-CONFIG_L2TP=m
CONFIG_L2TP_V3=y
CONFIG_MACSEC=m
CONFIG_MACVLAN=y
CONFIG_MACVTAP=y
CONFIG_MPLS=y
+CONFIG_MPLS_IPTUNNEL=m
+CONFIG_MPLS_ROUTING=m
CONFIG_MPTCP=y
-CONFIG_NF_TABLES=m
-CONFIG_NF_TABLES_IPV6=y
-CONFIG_NF_TABLES_IPV4=y
-CONFIG_NFT_NAT=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NET_ACT_CSUM=m
CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_MATCHALL=m
CONFIG_NET_CLS_U32=m
-CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NETDEVSIM=m
+CONFIG_NET_DROP_MONITOR=m
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_NETFILTER_XTABLES_LEGACY=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_NAT=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+CONFIG_NET_FOU=y
+CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPIP=y
+CONFIG_NET_IPVTI=y
+CONFIG_NETKIT=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_NS=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_SCH_ETF=m
+CONFIG_NET_SCH_FQ=m
CONFIG_NET_SCH_FQ_CODEL=m
CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_FQ=m
-CONFIG_NET_SCH_ETF=m
+CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_SCH_NETEM=y
CONFIG_NET_SCH_PRIO=m
-CONFIG_NFT_COMPAT=m
+CONFIG_NET_VRF=y
+CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_OVS=y
CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_NAT=m
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_NFT_COMPAT=m
+CONFIG_NFT_NAT=m
+CONFIG_NUMA=y
CONFIG_OPENVSWITCH=m
CONFIG_OPENVSWITCH_GENEVE=m
CONFIG_OPENVSWITCH_GRE=m
CONFIG_OPENVSWITCH_VXLAN=m
+CONFIG_PROC_SYSCTL=y
CONFIG_PSAMPLE=m
+CONFIG_RPS=y
+CONFIG_SYSFS=y
CONFIG_TCP_MD5SIG=y
CONFIG_TEST_BLACKHOLE_DEV=m
-CONFIG_KALLSYMS=y
+CONFIG_TEST_BPF=m
CONFIG_TLS=m
CONFIG_TRACEPOINTS=y
-CONFIG_NET_DROP_MONITOR=m
-CONFIG_NETDEVSIM=m
-CONFIG_MPLS_ROUTING=m
-CONFIG_MPLS_IPTUNNEL=m
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_CLS_FLOWER=m
-CONFIG_NET_ACT_TUNNEL_KEY=m
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_BAREUDP=m
-CONFIG_IPV6_IOAM6_LWTUNNEL=y
-CONFIG_CRYPTO_SM4_GENERIC=y
-CONFIG_AMT=m
CONFIG_TUN=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VLAN_8021Q=y
CONFIG_VXLAN=m
-CONFIG_IP_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_CRYPTO_ARIA=y
CONFIG_XFRM_INTERFACE=m
CONFIG_XFRM_USER=m
-CONFIG_IP_NF_MATCH_RPFILTER=m
-CONFIG_IP6_NF_MATCH_RPFILTER=m
-CONFIG_IPVLAN=m
-CONFIG_CAN=m
-CONFIG_CAN_DEV=m
-CONFIG_CAN_VXCAN=m
-CONFIG_NETKIT=y
-CONFIG_NET_PKTGEN=m
-CONFIG_IPV6_ILA=m
-CONFIG_IPV6_RPL_LWTUNNEL=y
diff --git a/tools/testing/selftests/net/epoll_busy_poll.c b/tools/testing/selftests/net/epoll_busy_poll.c
index 16e457c2f877..adf8dd0b5e0b 100644
--- a/tools/testing/selftests/net/epoll_busy_poll.c
+++ b/tools/testing/selftests/net/epoll_busy_poll.c
@@ -23,7 +23,7 @@
#include <sys/ioctl.h>
#include <sys/socket.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
/* if the headers haven't been updated, we need to define some things */
#if !defined(EPOLL_IOC_TYPE)
diff --git a/tools/testing/selftests/net/fcnal-ipv4.sh b/tools/testing/selftests/net/fcnal-ipv4.sh
new file mode 100755
index 000000000000..82f9c867c3e8
--- /dev/null
+++ b/tools/testing/selftests/net/fcnal-ipv4.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+./fcnal-test.sh -t ipv4
diff --git a/tools/testing/selftests/net/fcnal-ipv6.sh b/tools/testing/selftests/net/fcnal-ipv6.sh
new file mode 100755
index 000000000000..ab1fc7aa3caf
--- /dev/null
+++ b/tools/testing/selftests/net/fcnal-ipv6.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+./fcnal-test.sh -t ipv6
diff --git a/tools/testing/selftests/net/fcnal-other.sh b/tools/testing/selftests/net/fcnal-other.sh
new file mode 100755
index 000000000000..a840cf80b32e
--- /dev/null
+++ b/tools/testing/selftests/net/fcnal-other.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+./fcnal-test.sh -t other
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 4fcc38907e48..844a580ae74e 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -189,7 +189,7 @@ show_hint()
kill_procs()
{
killall nettest ping ping6 >/dev/null 2>&1
- sleep 1
+ slowwait 2 sh -c 'test -z "$(pgrep '"'^(nettest|ping|ping6)$'"')"'
}
set_ping_group()
@@ -424,6 +424,8 @@ create_ns()
ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.forwarding=1
ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.forwarding=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.accept_dad=0
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.accept_dad=0
}
# create veth pair to connect namespaces and apply addresses.
@@ -875,7 +877,7 @@ ipv4_tcp_md5_novrf()
# basic use case
log_start
run_cmd nettest -s -M ${MD5_PW} -m ${NSB_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: Single address config"
@@ -883,7 +885,7 @@ ipv4_tcp_md5_novrf()
log_start
show_hint "Should timeout due to MD5 mismatch"
run_cmd nettest -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: Server no config, client uses password"
@@ -891,7 +893,7 @@ ipv4_tcp_md5_novrf()
log_start
show_hint "Should timeout since client uses wrong password"
run_cmd nettest -s -M ${MD5_PW} -m ${NSB_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: Client uses wrong password"
@@ -899,7 +901,7 @@ ipv4_tcp_md5_novrf()
log_start
show_hint "Should timeout due to MD5 mismatch"
run_cmd nettest -s -M ${MD5_PW} -m ${NSB_LO_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: Client address does not match address configured with password"
@@ -910,7 +912,7 @@ ipv4_tcp_md5_novrf()
# client in prefix
log_start
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: Prefix config"
@@ -918,7 +920,7 @@ ipv4_tcp_md5_novrf()
log_start
show_hint "Should timeout since client uses wrong password"
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: Prefix config, client uses wrong password"
@@ -926,7 +928,7 @@ ipv4_tcp_md5_novrf()
log_start
show_hint "Should timeout due to MD5 mismatch"
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -c ${NSB_LO_IP} -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: Prefix config, client address not in configured prefix"
}
@@ -943,7 +945,7 @@ ipv4_tcp_md5()
# basic use case
log_start
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Single address config"
@@ -951,7 +953,7 @@ ipv4_tcp_md5()
log_start
show_hint "Should timeout since server does not have MD5 auth"
run_cmd nettest -s -I ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Server no config, client uses password"
@@ -959,7 +961,7 @@ ipv4_tcp_md5()
log_start
show_hint "Should timeout since client uses wrong password"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: VRF: Client uses wrong password"
@@ -967,7 +969,7 @@ ipv4_tcp_md5()
log_start
show_hint "Should timeout since server config differs from client"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_LO_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Client address does not match address configured with password"
@@ -978,7 +980,7 @@ ipv4_tcp_md5()
# client in prefix
log_start
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Prefix config"
@@ -986,7 +988,7 @@ ipv4_tcp_md5()
log_start
show_hint "Should timeout since client uses wrong password"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: VRF: Prefix config, client uses wrong password"
@@ -994,7 +996,7 @@ ipv4_tcp_md5()
log_start
show_hint "Should timeout since client address is outside of prefix"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -c ${NSB_LO_IP} -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Prefix config, client address not in configured prefix"
@@ -1005,14 +1007,14 @@ ipv4_tcp_md5()
log_start
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} &
run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NSB_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Single address config in default VRF and VRF, conn in VRF"
log_start
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} &
run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NSB_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_WRONG_PW}
log_test $? 0 "MD5: VRF: Single address config in default VRF and VRF, conn in default VRF"
@@ -1020,7 +1022,7 @@ ipv4_tcp_md5()
show_hint "Should timeout since client in default VRF uses VRF password"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} &
run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NSB_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Single address config in default VRF and VRF, conn in default VRF with VRF pw"
@@ -1028,21 +1030,21 @@ ipv4_tcp_md5()
show_hint "Should timeout since client in VRF uses default VRF password"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP} &
run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NSB_IP} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: VRF: Single address config in default VRF and VRF, conn in VRF with default VRF pw"
log_start
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} &
run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Prefix config in default VRF and VRF, conn in VRF"
log_start
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} &
run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_WRONG_PW}
log_test $? 0 "MD5: VRF: Prefix config in default VRF and VRF, conn in default VRF"
@@ -1050,7 +1052,7 @@ ipv4_tcp_md5()
show_hint "Should timeout since client in default VRF uses VRF password"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} &
run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Prefix config in default VRF and VRF, conn in default VRF with VRF pw"
@@ -1058,7 +1060,7 @@ ipv4_tcp_md5()
show_hint "Should timeout since client in VRF uses default VRF password"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} &
run_cmd nettest -s -M ${MD5_WRONG_PW} -m ${NS_NET} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: VRF: Prefix config in default VRF and VRF, conn in VRF with default VRF pw"
@@ -1082,14 +1084,14 @@ test_ipv4_md5_vrf__vrf_server__no_bind_ifindex()
log_start
show_hint "Simulates applications using VRF without TCP_MD5SIG_FLAG_IFINDEX"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: VRF-bound server, unbound key accepts connection"
log_start
show_hint "Binding both the socket and the key is not required but it works"
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: VRF-bound server, bound key accepts connection"
}
@@ -1103,25 +1105,25 @@ test_ipv4_md5_vrf__global_server__bind_ifindex0()
log_start
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Global server, Key bound to ifindex=0 rejects VRF connection"
log_start
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Global server, key bound to ifindex=0 accepts non-VRF connection"
log_start
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts VRF connection"
log_start
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts non-VRF connection"
@@ -1193,7 +1195,7 @@ ipv4_tcp_novrf()
do
log_start
run_cmd nettest -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 0 "Global server"
done
@@ -1201,7 +1203,7 @@ ipv4_tcp_novrf()
a=${NSA_IP}
log_start
run_cmd nettest -s -I ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 0 "Device server"
@@ -1221,13 +1223,13 @@ ipv4_tcp_novrf()
do
log_start
run_cmd_nsb nettest -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -r ${a} -0 ${NSA_IP}
log_test_addr ${a} $? 0 "Client"
log_start
run_cmd_nsb nettest -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 0 "Client, device bind"
@@ -1249,7 +1251,7 @@ ipv4_tcp_novrf()
do
log_start
run_cmd nettest -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -0 ${a} -1 ${a}
log_test_addr ${a} $? 0 "Global server, local connection"
done
@@ -1257,7 +1259,7 @@ ipv4_tcp_novrf()
a=${NSA_IP}
log_start
run_cmd nettest -s -I ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -0 ${a}
log_test_addr ${a} $? 0 "Device server, unbound client, local connection"
@@ -1266,7 +1268,7 @@ ipv4_tcp_novrf()
log_start
show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope"
run_cmd nettest -s -I ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a}
log_test_addr ${a} $? 1 "Device server, unbound client, local connection"
done
@@ -1274,7 +1276,7 @@ ipv4_tcp_novrf()
a=${NSA_IP}
log_start
run_cmd nettest -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -0 ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 0 "Global server, device client, local connection"
@@ -1283,7 +1285,7 @@ ipv4_tcp_novrf()
log_start
show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope"
run_cmd nettest -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 1 "Global server, device client, local connection"
done
@@ -1291,7 +1293,7 @@ ipv4_tcp_novrf()
a=${NSA_IP}
log_start
run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -d ${NSA_DEV} -r ${a} -0 ${a}
log_test_addr ${a} $? 0 "Device server, device client, local connection"
@@ -1323,19 +1325,19 @@ ipv4_tcp_vrf()
log_start
show_hint "Should fail 'Connection refused' since global server with VRF is disabled"
run_cmd nettest -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 1 "Global server"
log_start
run_cmd nettest -s -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 0 "VRF server"
log_start
run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 0 "Device server"
@@ -1352,7 +1354,7 @@ ipv4_tcp_vrf()
log_start
show_hint "Should fail 'Connection refused' since global server with VRF is disabled"
run_cmd nettest -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 1 "Global server, local connection"
@@ -1374,14 +1376,14 @@ ipv4_tcp_vrf()
log_start
show_hint "client socket should be bound to VRF"
run_cmd nettest -s -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 0 "Global server"
log_start
show_hint "client socket should be bound to VRF"
run_cmd nettest -s -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 0 "VRF server"
@@ -1396,7 +1398,7 @@ ipv4_tcp_vrf()
log_start
show_hint "client socket should be bound to device"
run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 0 "Device server"
@@ -1406,7 +1408,7 @@ ipv4_tcp_vrf()
log_start
show_hint "Should fail 'Connection refused' since client is not bound to VRF"
run_cmd nettest -s -I ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a}
log_test_addr ${a} $? 1 "Global server, local connection"
done
@@ -1418,13 +1420,13 @@ ipv4_tcp_vrf()
do
log_start
run_cmd_nsb nettest -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -r ${a} -d ${VRF}
log_test_addr ${a} $? 0 "Client, VRF bind"
log_start
run_cmd_nsb nettest -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 0 "Client, device bind"
@@ -1443,7 +1445,7 @@ ipv4_tcp_vrf()
do
log_start
run_cmd nettest -s -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -d ${VRF} -0 ${a}
log_test_addr ${a} $? 0 "VRF server, VRF client, local connection"
done
@@ -1451,26 +1453,26 @@ ipv4_tcp_vrf()
a=${NSA_IP}
log_start
run_cmd nettest -s -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -d ${NSA_DEV} -0 ${a}
log_test_addr ${a} $? 0 "VRF server, device client, local connection"
log_start
show_hint "Should fail 'No route to host' since client is out of VRF scope"
run_cmd nettest -s -I ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a}
log_test_addr ${a} $? 1 "VRF server, unbound client, local connection"
log_start
run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -d ${VRF} -0 ${a}
log_test_addr ${a} $? 0 "Device server, VRF client, local connection"
log_start
run_cmd nettest -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -r ${a} -d ${NSA_DEV} -0 ${a}
log_test_addr ${a} $? 0 "Device server, device client, local connection"
}
@@ -1509,7 +1511,7 @@ ipv4_udp_novrf()
do
log_start
run_cmd nettest -D -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -D -r ${a}
log_test_addr ${a} $? 0 "Global server"
@@ -1522,7 +1524,7 @@ ipv4_udp_novrf()
a=${NSA_IP}
log_start
run_cmd nettest -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -D -r ${a}
log_test_addr ${a} $? 0 "Device server"
@@ -1533,31 +1535,31 @@ ipv4_udp_novrf()
do
log_start
run_cmd_nsb nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -D -r ${a} -0 ${NSA_IP}
log_test_addr ${a} $? 0 "Client"
log_start
run_cmd_nsb nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -D -r ${a} -d ${NSA_DEV} -0 ${NSA_IP}
log_test_addr ${a} $? 0 "Client, device bind"
log_start
run_cmd_nsb nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -D -r ${a} -d ${NSA_DEV} -C -0 ${NSA_IP}
log_test_addr ${a} $? 0 "Client, device send via cmsg"
log_start
run_cmd_nsb nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S -0 ${NSA_IP}
log_test_addr ${a} $? 0 "Client, device bind via IP_UNICAST_IF"
log_start
run_cmd_nsb nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S -0 ${NSA_IP} -U
log_test_addr ${a} $? 0 "Client, device bind via IP_UNICAST_IF, with connect()"
@@ -1580,7 +1582,7 @@ ipv4_udp_novrf()
do
log_start
run_cmd nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -r ${a} -0 ${a} -1 ${a}
log_test_addr ${a} $? 0 "Global server, local connection"
done
@@ -1588,7 +1590,7 @@ ipv4_udp_novrf()
a=${NSA_IP}
log_start
run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -r ${a}
log_test_addr ${a} $? 0 "Device server, unbound client, local connection"
@@ -1597,7 +1599,7 @@ ipv4_udp_novrf()
log_start
show_hint "Should fail 'Connection refused' since address is out of device scope"
run_cmd nettest -s -D -I ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -r ${a}
log_test_addr ${a} $? 1 "Device server, unbound client, local connection"
done
@@ -1605,25 +1607,25 @@ ipv4_udp_novrf()
a=${NSA_IP}
log_start
run_cmd nettest -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "Global server, device client, local connection"
log_start
run_cmd nettest -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -C -r ${a}
log_test_addr ${a} $? 0 "Global server, device send via cmsg, local connection"
log_start
run_cmd nettest -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -S -r ${a}
log_test_addr ${a} $? 0 "Global server, device client via IP_UNICAST_IF, local connection"
log_start
run_cmd nettest -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -S -r ${a} -U
log_test_addr ${a} $? 0 "Global server, device client via IP_UNICAST_IF, local connection, with connect()"
@@ -1636,28 +1638,28 @@ ipv4_udp_novrf()
log_start
show_hint "Should fail since addresses on loopback are out of device scope"
run_cmd nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 2 "Global server, device client, local connection"
log_start
show_hint "Should fail since addresses on loopback are out of device scope"
run_cmd nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -r ${a} -d ${NSA_DEV} -C
log_test_addr ${a} $? 1 "Global server, device send via cmsg, local connection"
log_start
show_hint "Should fail since addresses on loopback are out of device scope"
run_cmd nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S
log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection"
log_start
show_hint "Should fail since addresses on loopback are out of device scope"
run_cmd nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -r ${a} -d ${NSA_DEV} -S -U
log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection, with connect()"
@@ -1667,7 +1669,7 @@ ipv4_udp_novrf()
a=${NSA_IP}
log_start
run_cmd nettest -D -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -r ${a} -0 ${a}
log_test_addr ${a} $? 0 "Device server, device client, local conn"
@@ -1709,19 +1711,19 @@ ipv4_udp_vrf()
log_start
show_hint "Fails because ingress is in a VRF and global server is disabled"
run_cmd nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -D -r ${a}
log_test_addr ${a} $? 1 "Global server"
log_start
run_cmd nettest -D -I ${VRF} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -D -r ${a}
log_test_addr ${a} $? 0 "VRF server"
log_start
run_cmd nettest -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -D -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server"
@@ -1733,7 +1735,7 @@ ipv4_udp_vrf()
log_start
show_hint "Should fail 'Connection refused' since global server is out of scope"
run_cmd nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 1 "Global server, VRF client, local connection"
done
@@ -1741,26 +1743,26 @@ ipv4_udp_vrf()
a=${NSA_IP}
log_start
run_cmd nettest -s -D -I ${VRF} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "VRF server, VRF client, local conn"
log_start
run_cmd nettest -s -D -I ${VRF} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "VRF server, enslaved device client, local connection"
a=${NSA_IP}
log_start
run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn"
log_start
run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn"
@@ -1775,19 +1777,19 @@ ipv4_udp_vrf()
do
log_start
run_cmd nettest -D -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -D -r ${a}
log_test_addr ${a} $? 0 "Global server"
log_start
run_cmd nettest -D -I ${VRF} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -D -r ${a}
log_test_addr ${a} $? 0 "VRF server"
log_start
run_cmd nettest -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -D -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server"
@@ -1802,13 +1804,13 @@ ipv4_udp_vrf()
#
log_start
run_cmd_nsb nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -d ${VRF} -D -r ${NSB_IP} -1 ${NSA_IP}
log_test $? 0 "VRF client"
log_start
run_cmd_nsb nettest -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -d ${NSA_DEV} -D -r ${NSB_IP} -1 ${NSA_IP}
log_test $? 0 "Enslaved device client"
@@ -1829,31 +1831,31 @@ ipv4_udp_vrf()
a=${NSA_IP}
log_start
run_cmd nettest -D -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "Global server, VRF client, local conn"
log_start
run_cmd nettest -s -D -I ${VRF} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "VRF server, VRF client, local conn"
log_start
run_cmd nettest -s -D -I ${VRF} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "VRF server, device client, local conn"
log_start
run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn"
log_start
run_cmd nettest -s -D -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn"
@@ -1861,7 +1863,7 @@ ipv4_udp_vrf()
do
log_start
run_cmd nettest -D -s -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "Global server, VRF client, local conn"
done
@@ -1870,7 +1872,7 @@ ipv4_udp_vrf()
do
log_start
run_cmd nettest -s -D -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "VRF server, VRF client, local conn"
done
@@ -2093,7 +2095,7 @@ ipv4_rt()
do
log_start
run_cmd nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest ${varg} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2107,7 +2109,7 @@ ipv4_rt()
do
log_start
run_cmd nettest ${varg} -s -I ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest ${varg} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2120,7 +2122,7 @@ ipv4_rt()
a=${NSA_IP}
log_start
run_cmd nettest ${varg} -s -I ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest ${varg} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2134,7 +2136,7 @@ ipv4_rt()
#
log_start
run_cmd_nsb nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest ${varg} -d ${VRF} -r ${NSB_IP} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2145,7 +2147,7 @@ ipv4_rt()
log_start
run_cmd_nsb nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${NSB_IP} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2161,7 +2163,7 @@ ipv4_rt()
do
log_start
run_cmd nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${VRF} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2175,7 +2177,7 @@ ipv4_rt()
do
log_start
run_cmd nettest ${varg} -I ${VRF} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${VRF} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2189,7 +2191,7 @@ ipv4_rt()
log_start
run_cmd nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2200,7 +2202,7 @@ ipv4_rt()
log_start
run_cmd nettest ${varg} -I ${VRF} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2211,7 +2213,7 @@ ipv4_rt()
log_start
run_cmd nettest ${varg} -I ${NSA_DEV} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -2561,7 +2563,7 @@ ipv6_tcp_md5_novrf()
# basic use case
log_start
run_cmd nettest -6 -s -M ${MD5_PW} -m ${NSB_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 0 "MD5: Single address config"
@@ -2569,7 +2571,7 @@ ipv6_tcp_md5_novrf()
log_start
show_hint "Should timeout due to MD5 mismatch"
run_cmd nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 2 "MD5: Server no config, client uses password"
@@ -2577,7 +2579,7 @@ ipv6_tcp_md5_novrf()
log_start
show_hint "Should timeout since client uses wrong password"
run_cmd nettest -6 -s -M ${MD5_PW} -m ${NSB_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: Client uses wrong password"
@@ -2585,7 +2587,7 @@ ipv6_tcp_md5_novrf()
log_start
show_hint "Should timeout due to MD5 mismatch"
run_cmd nettest -6 -s -M ${MD5_PW} -m ${NSB_LO_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 2 "MD5: Client address does not match address configured with password"
@@ -2596,7 +2598,7 @@ ipv6_tcp_md5_novrf()
# client in prefix
log_start
run_cmd nettest -6 -s -M ${MD5_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 0 "MD5: Prefix config"
@@ -2604,7 +2606,7 @@ ipv6_tcp_md5_novrf()
log_start
show_hint "Should timeout since client uses wrong password"
run_cmd nettest -6 -s -M ${MD5_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: Prefix config, client uses wrong password"
@@ -2612,7 +2614,7 @@ ipv6_tcp_md5_novrf()
log_start
show_hint "Should timeout due to MD5 mismatch"
run_cmd nettest -6 -s -M ${MD5_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -c ${NSB_LO_IP6} -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 2 "MD5: Prefix config, client address not in configured prefix"
}
@@ -2629,7 +2631,7 @@ ipv6_tcp_md5()
# basic use case
log_start
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Single address config"
@@ -2637,7 +2639,7 @@ ipv6_tcp_md5()
log_start
show_hint "Should timeout since server does not have MD5 auth"
run_cmd nettest -6 -s -I ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Server no config, client uses password"
@@ -2645,7 +2647,7 @@ ipv6_tcp_md5()
log_start
show_hint "Should timeout since client uses wrong password"
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: VRF: Client uses wrong password"
@@ -2653,7 +2655,7 @@ ipv6_tcp_md5()
log_start
show_hint "Should timeout since server config differs from client"
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_LO_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Client address does not match address configured with password"
@@ -2664,7 +2666,7 @@ ipv6_tcp_md5()
# client in prefix
log_start
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Prefix config"
@@ -2672,7 +2674,7 @@ ipv6_tcp_md5()
log_start
show_hint "Should timeout since client uses wrong password"
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: VRF: Prefix config, client uses wrong password"
@@ -2680,7 +2682,7 @@ ipv6_tcp_md5()
log_start
show_hint "Should timeout since client address is outside of prefix"
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -c ${NSB_LO_IP6} -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Prefix config, client address not in configured prefix"
@@ -2691,14 +2693,14 @@ ipv6_tcp_md5()
log_start
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} &
run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NSB_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Single address config in default VRF and VRF, conn in VRF"
log_start
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} &
run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NSB_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW}
log_test $? 0 "MD5: VRF: Single address config in default VRF and VRF, conn in default VRF"
@@ -2706,7 +2708,7 @@ ipv6_tcp_md5()
show_hint "Should timeout since client in default VRF uses VRF password"
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} &
run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NSB_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Single address config in default VRF and VRF, conn in default VRF with VRF pw"
@@ -2714,21 +2716,21 @@ ipv6_tcp_md5()
show_hint "Should timeout since client in VRF uses default VRF password"
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NSB_IP6} &
run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NSB_IP6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: VRF: Single address config in default VRF and VRF, conn in VRF with default VRF pw"
log_start
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} &
run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 0 "MD5: VRF: Prefix config in default VRF and VRF, conn in VRF"
log_start
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} &
run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW}
log_test $? 0 "MD5: VRF: Prefix config in default VRF and VRF, conn in default VRF"
@@ -2736,7 +2738,7 @@ ipv6_tcp_md5()
show_hint "Should timeout since client in default VRF uses VRF password"
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} &
run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsc nettest -6 -r ${NSA_IP6} -X ${MD5_PW}
log_test $? 2 "MD5: VRF: Prefix config in default VRF and VRF, conn in default VRF with VRF pw"
@@ -2744,7 +2746,7 @@ ipv6_tcp_md5()
show_hint "Should timeout since client in VRF uses default VRF password"
run_cmd nettest -6 -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET6} &
run_cmd nettest -6 -s -M ${MD5_WRONG_PW} -m ${NS_NET6} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${NSA_IP6} -X ${MD5_WRONG_PW}
log_test $? 2 "MD5: VRF: Prefix config in default VRF and VRF, conn in VRF with default VRF pw"
@@ -2772,7 +2774,7 @@ ipv6_tcp_novrf()
do
log_start
run_cmd nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "Global server"
done
@@ -2793,7 +2795,7 @@ ipv6_tcp_novrf()
do
log_start
run_cmd_nsb nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -6 -r ${a}
log_test_addr ${a} $? 0 "Client"
done
@@ -2802,7 +2804,7 @@ ipv6_tcp_novrf()
do
log_start
run_cmd_nsb nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 0 "Client, device bind"
done
@@ -2822,7 +2824,7 @@ ipv6_tcp_novrf()
do
log_start
run_cmd nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a}
log_test_addr ${a} $? 0 "Global server, local connection"
done
@@ -2830,7 +2832,7 @@ ipv6_tcp_novrf()
a=${NSA_IP6}
log_start
run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a} -0 ${a}
log_test_addr ${a} $? 0 "Device server, unbound client, local connection"
@@ -2839,7 +2841,7 @@ ipv6_tcp_novrf()
log_start
show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope"
run_cmd nettest -6 -s -I ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a}
log_test_addr ${a} $? 1 "Device server, unbound client, local connection"
done
@@ -2847,7 +2849,7 @@ ipv6_tcp_novrf()
a=${NSA_IP6}
log_start
run_cmd nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a}
log_test_addr ${a} $? 0 "Global server, device client, local connection"
@@ -2856,7 +2858,7 @@ ipv6_tcp_novrf()
log_start
show_hint "Should fail 'Connection refused' since addresses on loopback are out of device scope"
run_cmd nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 1 "Global server, device client, local connection"
done
@@ -2865,7 +2867,7 @@ ipv6_tcp_novrf()
do
log_start
run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "Device server, device client, local conn"
done
@@ -2898,7 +2900,7 @@ ipv6_tcp_vrf()
log_start
show_hint "Should fail 'Connection refused' since global server with VRF is disabled"
run_cmd nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 1 "Global server"
done
@@ -2907,7 +2909,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd nettest -6 -s -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "VRF server"
done
@@ -2916,7 +2918,7 @@ ipv6_tcp_vrf()
a=${NSA_LINKIP6}%${NSB_DEV}
log_start
run_cmd nettest -6 -s -I ${VRF} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "VRF server"
@@ -2924,7 +2926,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "Device server"
done
@@ -2943,7 +2945,7 @@ ipv6_tcp_vrf()
log_start
show_hint "Should fail 'Connection refused' since global server with VRF is disabled"
run_cmd nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 1 "Global server, local connection"
@@ -2964,7 +2966,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd nettest -6 -s -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "Global server"
done
@@ -2973,7 +2975,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd nettest -6 -s -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "VRF server"
done
@@ -2982,13 +2984,13 @@ ipv6_tcp_vrf()
a=${NSA_LINKIP6}%${NSB_DEV}
log_start
run_cmd nettest -6 -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "Global server"
log_start
run_cmd nettest -6 -s -I ${VRF} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "VRF server"
@@ -2996,7 +2998,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 0 "Device server"
done
@@ -3016,7 +3018,7 @@ ipv6_tcp_vrf()
log_start
show_hint "Fails 'Connection refused' since client is not in VRF"
run_cmd nettest -6 -s -I ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a}
log_test_addr ${a} $? 1 "Global server, local connection"
done
@@ -3029,7 +3031,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd_nsb nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${VRF}
log_test_addr ${a} $? 0 "Client, VRF bind"
done
@@ -3038,7 +3040,7 @@ ipv6_tcp_vrf()
log_start
show_hint "Fails since VRF device does not allow linklocal addresses"
run_cmd_nsb nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${VRF}
log_test_addr ${a} $? 1 "Client, VRF bind"
@@ -3046,7 +3048,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd_nsb nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 0 "Client, device bind"
done
@@ -3071,7 +3073,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd nettest -6 -s -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${VRF} -0 ${a}
log_test_addr ${a} $? 0 "VRF server, VRF client, local connection"
done
@@ -3079,7 +3081,7 @@ ipv6_tcp_vrf()
a=${NSA_IP6}
log_start
run_cmd nettest -6 -s -I ${VRF} -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a}
log_test_addr ${a} $? 0 "VRF server, device client, local connection"
@@ -3087,13 +3089,13 @@ ipv6_tcp_vrf()
log_start
show_hint "Should fail since unbound client is out of VRF scope"
run_cmd nettest -6 -s -I ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a}
log_test_addr ${a} $? 1 "VRF server, unbound client, local connection"
log_start
run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${VRF} -0 ${a}
log_test_addr ${a} $? 0 "Device server, VRF client, local connection"
@@ -3101,7 +3103,7 @@ ipv6_tcp_vrf()
do
log_start
run_cmd nettest -6 -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest -6 -r ${a} -d ${NSA_DEV} -0 ${a}
log_test_addr ${a} $? 0 "Device server, device client, local connection"
done
@@ -3141,13 +3143,13 @@ ipv6_udp_novrf()
do
log_start
run_cmd nettest -6 -D -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "Global server"
log_start
run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "Device server"
done
@@ -3155,7 +3157,7 @@ ipv6_udp_novrf()
a=${NSA_LO_IP6}
log_start
run_cmd nettest -6 -D -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "Global server"
@@ -3165,7 +3167,7 @@ ipv6_udp_novrf()
#log_start
#show_hint "Should fail since loopback address is out of scope"
#run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- #sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
#run_cmd_nsb nettest -6 -D -r ${a}
#log_test_addr ${a} $? 1 "Device server"
@@ -3185,25 +3187,25 @@ ipv6_udp_novrf()
do
log_start
run_cmd_nsb nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -6 -D -r ${a} -0 ${NSA_IP6}
log_test_addr ${a} $? 0 "Client"
log_start
run_cmd_nsb nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -0 ${NSA_IP6}
log_test_addr ${a} $? 0 "Client, device bind"
log_start
run_cmd_nsb nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -C -0 ${NSA_IP6}
log_test_addr ${a} $? 0 "Client, device send via cmsg"
log_start
run_cmd_nsb nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -S -0 ${NSA_IP6}
log_test_addr ${a} $? 0 "Client, device bind via IPV6_UNICAST_IF"
@@ -3225,7 +3227,7 @@ ipv6_udp_novrf()
do
log_start
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -r ${a} -0 ${a} -1 ${a}
log_test_addr ${a} $? 0 "Global server, local connection"
done
@@ -3233,7 +3235,7 @@ ipv6_udp_novrf()
a=${NSA_IP6}
log_start
run_cmd nettest -6 -s -D -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "Device server, unbound client, local connection"
@@ -3242,7 +3244,7 @@ ipv6_udp_novrf()
log_start
show_hint "Should fail 'Connection refused' since address is out of device scope"
run_cmd nettest -6 -s -D -I ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -r ${a}
log_test_addr ${a} $? 1 "Device server, local connection"
done
@@ -3250,19 +3252,19 @@ ipv6_udp_novrf()
a=${NSA_IP6}
log_start
run_cmd nettest -6 -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "Global server, device client, local connection"
log_start
run_cmd nettest -6 -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -C -r ${a}
log_test_addr ${a} $? 0 "Global server, device send via cmsg, local connection"
log_start
run_cmd nettest -6 -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -S -r ${a}
log_test_addr ${a} $? 0 "Global server, device client via IPV6_UNICAST_IF, local connection"
@@ -3271,28 +3273,28 @@ ipv6_udp_novrf()
log_start
show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope"
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV}
log_test_addr ${a} $? 1 "Global server, device client, local connection"
log_start
show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope"
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -C
log_test_addr ${a} $? 1 "Global server, device send via cmsg, local connection"
log_start
show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope"
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -S
log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection"
log_start
show_hint "Should fail 'No route to host' since addresses on loopback are out of device scope"
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -r ${a} -d ${NSA_DEV} -S -U
log_test_addr ${a} $? 1 "Global server, device client via IP_UNICAST_IF, local connection, with connect()"
done
@@ -3300,7 +3302,7 @@ ipv6_udp_novrf()
a=${NSA_IP6}
log_start
run_cmd nettest -6 -D -s -I ${NSA_DEV} -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a} -0 ${a}
log_test_addr ${a} $? 0 "Device server, device client, local conn"
@@ -3314,7 +3316,7 @@ ipv6_udp_novrf()
run_cmd_nsb ip -6 ro add ${NSA_IP6}/128 dev ${NSB_DEV}
log_start
run_cmd nettest -6 -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${NSA_IP6}
log_test $? 0 "UDP in - LLA to GUA"
@@ -3338,7 +3340,7 @@ ipv6_udp_vrf()
log_start
show_hint "Should fail 'Connection refused' since global server is disabled"
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 1 "Global server"
done
@@ -3347,7 +3349,7 @@ ipv6_udp_vrf()
do
log_start
run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "VRF server"
done
@@ -3356,7 +3358,7 @@ ipv6_udp_vrf()
do
log_start
run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server"
done
@@ -3378,7 +3380,7 @@ ipv6_udp_vrf()
log_start
show_hint "Should fail 'Connection refused' since global server is disabled"
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 1 "Global server, VRF client, local conn"
done
@@ -3387,7 +3389,7 @@ ipv6_udp_vrf()
do
log_start
run_cmd nettest -6 -D -I ${VRF} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "VRF server, VRF client, local conn"
done
@@ -3396,25 +3398,25 @@ ipv6_udp_vrf()
log_start
show_hint "Should fail 'Connection refused' since global server is disabled"
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 1 "Global server, device client, local conn"
log_start
run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "VRF server, device client, local conn"
log_start
run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server, VRF client, local conn"
log_start
run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server, device client, local conn"
@@ -3429,7 +3431,7 @@ ipv6_udp_vrf()
do
log_start
run_cmd nettest -6 -D -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "Global server"
done
@@ -3438,7 +3440,7 @@ ipv6_udp_vrf()
do
log_start
run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "VRF server"
done
@@ -3447,7 +3449,7 @@ ipv6_udp_vrf()
do
log_start
run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${a}
log_test_addr ${a} $? 0 "Enslaved device server"
done
@@ -3465,7 +3467,7 @@ ipv6_udp_vrf()
#
log_start
run_cmd_nsb nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${NSB_IP6}
log_test $? 0 "VRF client"
@@ -3476,7 +3478,7 @@ ipv6_udp_vrf()
log_start
run_cmd_nsb nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSB_IP6}
log_test $? 0 "Enslaved device client"
@@ -3491,13 +3493,13 @@ ipv6_udp_vrf()
a=${NSA_IP6}
log_start
run_cmd nettest -6 -D -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "Global server, VRF client, local conn"
#log_start
run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "VRF server, VRF client, local conn"
@@ -3505,13 +3507,13 @@ ipv6_udp_vrf()
a=${VRF_IP6}
log_start
run_cmd nettest -6 -D -s -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "Global server, VRF client, local conn"
log_start
run_cmd nettest -6 -D -I ${VRF} -s -3 ${VRF} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "VRF server, VRF client, local conn"
@@ -3527,25 +3529,25 @@ ipv6_udp_vrf()
a=${NSA_IP6}
log_start
run_cmd nettest -6 -D -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "Global server, device client, local conn"
log_start
run_cmd nettest -6 -D -I ${VRF} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "VRF server, device client, local conn"
log_start
run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${VRF} -r ${a}
log_test_addr ${a} $? 0 "Device server, VRF client, local conn"
log_start
run_cmd nettest -6 -D -I ${NSA_DEV} -s -3 ${NSA_DEV} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${a}
log_test_addr ${a} $? 0 "Device server, device client, local conn"
@@ -3557,7 +3559,7 @@ ipv6_udp_vrf()
# link local addresses
log_start
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -d ${NSB_DEV} -r ${NSA_LINKIP6}
log_test $? 0 "Global server, linklocal IP"
@@ -3568,7 +3570,7 @@ ipv6_udp_vrf()
log_start
run_cmd_nsb nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSB_LINKIP6}
log_test $? 0 "Enslaved device client, linklocal IP"
@@ -3579,7 +3581,7 @@ ipv6_udp_vrf()
log_start
run_cmd nettest -6 -D -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd nettest -6 -D -d ${NSA_DEV} -r ${NSA_LINKIP6}
log_test $? 0 "Enslaved device client, local conn - linklocal IP"
@@ -3592,7 +3594,7 @@ ipv6_udp_vrf()
run_cmd_nsb ip -6 ro add ${NSA_IP6}/128 dev ${NSB_DEV}
log_start
run_cmd nettest -6 -s -D &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 udp
run_cmd_nsb nettest -6 -D -r ${NSA_IP6}
log_test $? 0 "UDP in - LLA to GUA"
@@ -3771,7 +3773,7 @@ ipv6_rt()
do
log_start
run_cmd nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest ${varg} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3785,7 +3787,7 @@ ipv6_rt()
do
log_start
run_cmd nettest ${varg} -I ${VRF} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest ${varg} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3799,7 +3801,7 @@ ipv6_rt()
do
log_start
run_cmd nettest ${varg} -I ${NSA_DEV} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest ${varg} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3814,7 +3816,7 @@ ipv6_rt()
#
log_start
run_cmd_nsb nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest ${varg} -d ${VRF} -r ${NSB_IP6} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3825,7 +3827,7 @@ ipv6_rt()
log_start
run_cmd_nsb nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSB} 12345 tcp
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${NSB_IP6} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3842,7 +3844,7 @@ ipv6_rt()
do
log_start
run_cmd nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${VRF} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3856,7 +3858,7 @@ ipv6_rt()
do
log_start
run_cmd nettest ${varg} -I ${VRF} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${VRF} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3869,7 +3871,7 @@ ipv6_rt()
a=${NSA_IP6}
log_start
run_cmd nettest ${varg} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3880,7 +3882,7 @@ ipv6_rt()
log_start
run_cmd nettest ${varg} -I ${VRF} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3891,7 +3893,7 @@ ipv6_rt()
log_start
run_cmd nettest ${varg} -I ${NSA_DEV} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} &
sleep 3
run_cmd ip link del ${VRF}
@@ -3950,7 +3952,7 @@ netfilter_tcp_reset()
do
log_start
run_cmd nettest -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -r ${a}
log_test_addr ${a} $? 1 "Global server, reject with TCP-reset on Rx"
done
@@ -3968,7 +3970,7 @@ netfilter_icmp()
do
log_start
run_cmd nettest ${arg} -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest ${arg} -r ${a}
log_test_addr ${a} $? 1 "Global ${stype} server, Rx reject icmp-port-unreach"
done
@@ -4007,7 +4009,7 @@ netfilter_tcp6_reset()
do
log_start
run_cmd nettest -6 -s &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 -r ${a}
log_test_addr ${a} $? 1 "Global server, reject with TCP-reset on Rx"
done
@@ -4025,7 +4027,7 @@ netfilter_icmp6()
do
log_start
run_cmd nettest -6 -s ${arg} &
- sleep 1
+ wait_local_port_listen ${NSA} 12345 tcp
run_cmd_nsb nettest -6 ${arg} -r ${a}
log_test_addr ${a} $? 1 "Global ${stype} server, Rx reject icmp-port-unreach"
done
@@ -4221,12 +4223,12 @@ use_case_snat_on_vrf()
run_cmd ip6tables -t nat -A POSTROUTING -p tcp -m tcp --dport ${port} -j SNAT --to-source ${NSA_LO_IP6} -o ${VRF}
run_cmd_nsb nettest -s -l ${NSB_IP} -p ${port} &
- sleep 1
+ wait_local_port_listen ${NSB} ${port} tcp
run_cmd nettest -d ${VRF} -r ${NSB_IP} -p ${port}
log_test $? 0 "IPv4 TCP connection over VRF with SNAT"
run_cmd_nsb nettest -6 -s -l ${NSB_IP6} -p ${port} &
- sleep 1
+ wait_local_port_listen ${NSB} ${port} tcp
run_cmd nettest -6 -d ${VRF} -r ${NSB_IP6} -p ${port}
log_test $? 0 "IPv6 TCP connection over VRF with SNAT"
@@ -4272,6 +4274,7 @@ EOF
TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter"
TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter"
TESTS_OTHER="use_cases"
+# note: each TEST_ group needs a dedicated runner, e.g. fcnal-ipv4.sh
PAUSE_ON_FAIL=no
PAUSE=no
@@ -4302,6 +4305,8 @@ elif [ "$TESTS" = "ipv4" ]; then
TESTS="$TESTS_IPV4"
elif [ "$TESTS" = "ipv6" ]; then
TESTS="$TESTS_IPV6"
+elif [ "$TESTS" = "other" ]; then
+ TESTS="$TESTS_OTHER"
fi
check_gen_prog "nettest"
diff --git a/tools/testing/selftests/net/fdb_notify.sh b/tools/testing/selftests/net/fdb_notify.sh
index c159230c9b62..0b8a2465dd04 100755
--- a/tools/testing/selftests/net/fdb_notify.sh
+++ b/tools/testing/selftests/net/fdb_notify.sh
@@ -40,16 +40,16 @@ do_test_dup()
test_dup_bridge()
{
- ip_link_add br up type bridge vlan_filtering 1
+ adf_ip_link_add br up type bridge vlan_filtering 1
do_test_dup add "bridge" dev br self
do_test_dup del "bridge" dev br self
}
test_dup_vxlan_self()
{
- ip_link_add br up type bridge vlan_filtering 1
- ip_link_add vx up type vxlan id 2000 dstport 4789
- ip_link_set_master vx br
+ adf_ip_link_add br up type bridge vlan_filtering 1
+ adf_ip_link_add vx up type vxlan id 2000 dstport 4789
+ adf_ip_link_set_master vx br
do_test_dup add "vxlan" dev vx self dst 192.0.2.1
do_test_dup del "vxlan" dev vx self dst 192.0.2.1
@@ -57,9 +57,9 @@ test_dup_vxlan_self()
test_dup_vxlan_master()
{
- ip_link_add br up type bridge vlan_filtering 1
- ip_link_add vx up type vxlan id 2000 dstport 4789
- ip_link_set_master vx br
+ adf_ip_link_add br up type bridge vlan_filtering 1
+ adf_ip_link_add vx up type vxlan id 2000 dstport 4789
+ adf_ip_link_set_master vx br
do_test_dup add "vxlan master" dev vx master
do_test_dup del "vxlan master" dev vx master
@@ -67,8 +67,8 @@ test_dup_vxlan_master()
test_dup_macvlan_self()
{
- ip_link_add dd up type dummy
- ip_link_add mv up link dd type macvlan mode passthru
+ adf_ip_link_add dd up type dummy
+ adf_ip_link_add mv up link dd type macvlan mode passthru
do_test_dup add "macvlan self" dev mv self
do_test_dup del "macvlan self" dev mv self
@@ -76,10 +76,10 @@ test_dup_macvlan_self()
test_dup_macvlan_master()
{
- ip_link_add br up type bridge vlan_filtering 1
- ip_link_add dd up type dummy
- ip_link_add mv up link dd type macvlan mode passthru
- ip_link_set_master mv br
+ adf_ip_link_add br up type bridge vlan_filtering 1
+ adf_ip_link_add dd up type dummy
+ adf_ip_link_add mv up link dd type macvlan mode passthru
+ adf_ip_link_set_master mv br
do_test_dup add "macvlan master" dev mv self
do_test_dup del "macvlan master" dev mv self
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index b39f748c2572..2b0a90581e2f 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -467,8 +467,8 @@ ipv6_fdb_grp_fcnal()
log_test $? 0 "Get Fdb nexthop group by id"
# fdb nexthop group can only contain fdb nexthops
- run_cmd "$IP nexthop add id 63 via 2001:db8:91::4"
- run_cmd "$IP nexthop add id 64 via 2001:db8:91::5"
+ run_cmd "$IP nexthop add id 63 via 2001:db8:91::4 dev veth1"
+ run_cmd "$IP nexthop add id 64 via 2001:db8:91::5 dev veth1"
run_cmd "$IP nexthop add id 103 group 63/64 fdb"
log_test $? 2 "Fdb Nexthop group with non-fdb nexthops"
@@ -494,6 +494,26 @@ ipv6_fdb_grp_fcnal()
run_cmd "$IP nexthop add id 69 encap mpls 101 via 2001:db8:91::8 dev veth1 fdb"
log_test $? 2 "Fdb Nexthop with encap"
+ # Replace FDB nexthop to non-FDB and vice versa
+ run_cmd "$IP nexthop add id 70 via 2001:db8:91::2 fdb"
+ run_cmd "$IP nexthop replace id 70 via 2001:db8:91::2 dev veth1"
+ log_test $? 0 "Replace FDB nexthop to non-FDB nexthop"
+ run_cmd "$IP nexthop replace id 70 via 2001:db8:91::2 fdb"
+ log_test $? 0 "Replace non-FDB nexthop to FDB nexthop"
+
+ # Replace FDB nexthop address while in a group
+ run_cmd "$IP nexthop add id 71 group 70 fdb"
+ run_cmd "$IP nexthop replace id 70 via 2001:db8:91::3 fdb"
+ log_test $? 0 "Replace FDB nexthop address while in a group"
+
+ # Cannot replace FDB nexthop to non-FDB and vice versa while in a group
+ run_cmd "$IP nexthop replace id 70 via 2001:db8:91::2 dev veth1"
+ log_test $? 2 "Replace FDB nexthop to non-FDB nexthop while in a group"
+ run_cmd "$IP nexthop add id 72 via 2001:db8:91::2 dev veth1"
+ run_cmd "$IP nexthop add id 73 group 72"
+ run_cmd "$IP nexthop replace id 72 via 2001:db8:91::2 fdb"
+ log_test $? 2 "Replace non-FDB nexthop to FDB nexthop while in a group"
+
run_cmd "$IP link add name vx10 type vxlan id 1010 local 2001:db8:91::9 remote 2001:db8:91::10 dstport 4789 nolearning noudpcsum tos inherit ttl 100"
run_cmd "$BRIDGE fdb add 02:02:00:00:00:13 dev vx10 nhid 102 self"
log_test $? 0 "Fdb mac add with nexthop group"
@@ -547,15 +567,15 @@ ipv4_fdb_grp_fcnal()
log_test $? 0 "Get Fdb nexthop group by id"
# fdb nexthop group can only contain fdb nexthops
- run_cmd "$IP nexthop add id 14 via 172.16.1.2"
- run_cmd "$IP nexthop add id 15 via 172.16.1.3"
+ run_cmd "$IP nexthop add id 14 via 172.16.1.2 dev veth1"
+ run_cmd "$IP nexthop add id 15 via 172.16.1.3 dev veth1"
run_cmd "$IP nexthop add id 103 group 14/15 fdb"
log_test $? 2 "Fdb Nexthop group with non-fdb nexthops"
# Non fdb nexthop group can not contain fdb nexthops
run_cmd "$IP nexthop add id 16 via 172.16.1.2 fdb"
run_cmd "$IP nexthop add id 17 via 172.16.1.3 fdb"
- run_cmd "$IP nexthop add id 104 group 14/15"
+ run_cmd "$IP nexthop add id 104 group 16/17"
log_test $? 2 "Non-Fdb Nexthop group with fdb nexthops"
# fdb nexthop cannot have blackhole
@@ -574,6 +594,26 @@ ipv4_fdb_grp_fcnal()
run_cmd "$IP nexthop add id 17 encap mpls 101 via 172.16.1.2 dev veth1 fdb"
log_test $? 2 "Fdb Nexthop with encap"
+ # Replace FDB nexthop to non-FDB and vice versa
+ run_cmd "$IP nexthop add id 18 via 172.16.1.2 fdb"
+ run_cmd "$IP nexthop replace id 18 via 172.16.1.2 dev veth1"
+ log_test $? 0 "Replace FDB nexthop to non-FDB nexthop"
+ run_cmd "$IP nexthop replace id 18 via 172.16.1.2 fdb"
+ log_test $? 0 "Replace non-FDB nexthop to FDB nexthop"
+
+ # Replace FDB nexthop address while in a group
+ run_cmd "$IP nexthop add id 19 group 18 fdb"
+ run_cmd "$IP nexthop replace id 18 via 172.16.1.3 fdb"
+ log_test $? 0 "Replace FDB nexthop address while in a group"
+
+ # Cannot replace FDB nexthop to non-FDB and vice versa while in a group
+ run_cmd "$IP nexthop replace id 18 via 172.16.1.2 dev veth1"
+ log_test $? 2 "Replace FDB nexthop to non-FDB nexthop while in a group"
+ run_cmd "$IP nexthop add id 20 via 172.16.1.2 dev veth1"
+ run_cmd "$IP nexthop add id 21 group 20"
+ run_cmd "$IP nexthop replace id 20 via 172.16.1.2 fdb"
+ log_test $? 2 "Replace non-FDB nexthop to FDB nexthop while in a group"
+
run_cmd "$IP link add name vx10 type vxlan id 1010 local 10.0.0.1 remote 10.0.0.2 dstport 4789 nolearning noudpcsum tos inherit ttl 100"
run_cmd "$BRIDGE fdb add 02:02:00:00:00:13 dev vx10 nhid 102 self"
log_test $? 0 "Fdb mac add with nexthop group"
@@ -582,7 +622,7 @@ ipv4_fdb_grp_fcnal()
run_cmd "$BRIDGE fdb add 02:02:00:00:00:14 dev vx10 nhid 12 self"
log_test $? 255 "Fdb mac add with nexthop"
- run_cmd "$IP ro add 172.16.0.0/22 nhid 15"
+ run_cmd "$IP ro add 172.16.0.0/22 nhid 16"
log_test $? 2 "Route add with fdb nexthop"
run_cmd "$IP ro add 172.16.0.0/22 nhid 103"
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index a94b73a53f72..a88f797c549a 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -11,7 +11,8 @@ TESTS="unregister down carrier nexthop suppress ipv6_notify ipv4_notify \
ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics \
ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr \
ipv6_del_addr ipv4_mangle ipv6_mangle ipv4_bcast_neigh fib6_gc_test \
- ipv4_mpath_list ipv6_mpath_list ipv4_mpath_balance ipv6_mpath_balance"
+ ipv4_mpath_list ipv6_mpath_list ipv4_mpath_balance ipv6_mpath_balance \
+ fib6_ra_to_static"
VERBOSE=0
PAUSE_ON_FAIL=no
@@ -1476,6 +1477,68 @@ ipv6_route_metrics_test()
route_cleanup
}
+fib6_ra_to_static()
+{
+ setup
+
+ echo
+ echo "Fib6 route promotion from RA-learned to static test"
+ set -e
+
+ # ra6 is required for the test. (ipv6toolkit)
+ if [ ! -x "$(command -v ra6)" ]; then
+ echo "SKIP: ra6 not found."
+ set +e
+ cleanup &> /dev/null
+ return
+ fi
+
+ # Create a pair of veth devices to send a RA message from one
+ # device to another.
+ $IP link add veth1 type veth peer name veth2
+ $IP link set dev veth1 up
+ $IP link set dev veth2 up
+ $IP -6 address add 2001:10::1/64 dev veth1 nodad
+ $IP -6 address add 2001:10::2/64 dev veth2 nodad
+
+ # Make veth1 ready to receive RA messages.
+ $NS_EXEC sysctl -wq net.ipv6.conf.veth1.accept_ra=2
+
+ # Send a RA message with a prefix from veth2.
+ $NS_EXEC ra6 -i veth2 -d 2001:10::1 -P 2001:12::/64\#LA\#120\#60
+
+ # Wait for the RA message.
+ sleep 1
+
+ # systemd may mess up the test. Make sure that
+ # systemd-networkd.service and systemd-networkd.socket are stopped.
+ check_rt_num_clean 2 $($IP -6 route list|grep expires|wc -l) || return
+
+ # Configure static address on the same prefix
+ $IP -6 address add 2001:12::dead/64 dev veth1 nodad
+
+ # On-link route won't expire anymore, default route still owned by RA
+ check_rt_num 1 $($IP -6 route list |grep expires|wc -l)
+
+ # Send a second RA message with a prefix from veth2.
+ $NS_EXEC ra6 -i veth2 -d 2001:10::1 -P 2001:12::/64\#LA\#120\#60
+ sleep 1
+
+ # Expire is not back, on-link route is still static
+ check_rt_num 1 $($IP -6 route list |grep expires|wc -l)
+
+ $IP -6 address del 2001:12::dead/64 dev veth1 nodad
+
+ # Expire is back, on-link route is now owned by RA again
+ check_rt_num 2 $($IP -6 route list |grep expires|wc -l)
+
+ log_test $ret 0 "ipv6 promote RA route to static"
+
+ set +e
+
+ cleanup &> /dev/null
+}
+
# add route for a prefix, flushing any existing routes first
# expected to be the first step of a test
add_route()
@@ -2798,6 +2861,7 @@ do
ipv6_mpath_list) ipv6_mpath_list_test;;
ipv4_mpath_balance) ipv4_mpath_balance_test;;
ipv6_mpath_balance) ipv6_mpath_balance_test;;
+ fib6_ra_to_static) fib6_ra_to_static;;
help) echo "Test names: $TESTS"; exit 0;;
esac
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index 00bde7b6f39e..ff4a00d91a26 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0+ OR MIT
-TEST_PROGS = bridge_fdb_learning_limit.sh \
+TEST_PROGS := \
+ bridge_activity_notify.sh \
+ bridge_fdb_learning_limit.sh \
+ bridge_fdb_local_vlan_0.sh \
bridge_igmp.sh \
bridge_locked_port.sh \
bridge_mdb.sh \
@@ -18,64 +21,64 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
gre_custom_multipath_hash.sh \
gre_inner_v4_multipath.sh \
gre_inner_v6_multipath.sh \
- gre_multipath_nh_res.sh \
- gre_multipath_nh.sh \
gre_multipath.sh \
+ gre_multipath_nh.sh \
+ gre_multipath_nh_res.sh \
ip6_forward_instats_vrf.sh \
ip6gre_custom_multipath_hash.sh \
+ ip6gre_flat.sh \
ip6gre_flat_key.sh \
ip6gre_flat_keys.sh \
- ip6gre_flat.sh \
+ ip6gre_hier.sh \
ip6gre_hier_key.sh \
ip6gre_hier_keys.sh \
- ip6gre_hier.sh \
ip6gre_inner_v4_multipath.sh \
ip6gre_inner_v6_multipath.sh \
+ ipip_flat_gre.sh \
ipip_flat_gre_key.sh \
ipip_flat_gre_keys.sh \
- ipip_flat_gre.sh \
+ ipip_hier_gre.sh \
ipip_hier_gre_key.sh \
ipip_hier_gre_keys.sh \
- ipip_hier_gre.sh \
lib_sh_test.sh \
local_termination.sh \
min_max_mtu.sh \
+ mirror_gre.sh \
mirror_gre_bound.sh \
mirror_gre_bridge_1d.sh \
mirror_gre_bridge_1d_vlan.sh \
- mirror_gre_bridge_1q_lag.sh \
mirror_gre_bridge_1q.sh \
+ mirror_gre_bridge_1q_lag.sh \
mirror_gre_changes.sh \
mirror_gre_flower.sh \
mirror_gre_lag_lacp.sh \
mirror_gre_neigh.sh \
mirror_gre_nh.sh \
- mirror_gre.sh \
- mirror_gre_vlan_bridge_1q.sh \
mirror_gre_vlan.sh \
+ mirror_gre_vlan_bridge_1q.sh \
mirror_vlan.sh \
no_forwarding.sh \
pedit_dsfield.sh \
pedit_ip.sh \
pedit_l4port.sh \
- q_in_vni_ipv6.sh \
q_in_vni.sh \
+ q_in_vni_ipv6.sh \
+ router.sh \
router_bridge.sh \
router_bridge_1d.sh \
router_bridge_1d_lag.sh \
router_bridge_lag.sh \
+ router_bridge_pvid_vlan_upper.sh \
router_bridge_vlan.sh \
router_bridge_vlan_upper.sh \
- router_bridge_pvid_vlan_upper.sh \
router_bridge_vlan_upper_pvid.sh \
router_broadcast.sh \
- router_mpath_nh_res.sh \
router_mpath_nh.sh \
+ router_mpath_nh_res.sh \
router_mpath_seed.sh \
router_multicast.sh \
router_multipath.sh \
router_nh.sh \
- router.sh \
router_vid_1.sh \
sch_ets.sh \
sch_red.sh \
@@ -85,31 +88,34 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
skbedit_priority.sh \
tc_actions.sh \
tc_chains.sh \
- tc_flower_router.sh \
tc_flower.sh \
- tc_flower_l2_miss.sh \
tc_flower_cfm.sh \
+ tc_flower_l2_miss.sh \
tc_flower_port_range.sh \
+ tc_flower_router.sh \
tc_mpls_l2vpn.sh \
tc_police.sh \
tc_shblocks.sh \
tc_tunnel_key.sh \
tc_vlan_modify.sh \
- vxlan_asymmetric_ipv6.sh \
vxlan_asymmetric.sh \
+ vxlan_asymmetric_ipv6.sh \
+ vxlan_bridge_1d.sh \
vxlan_bridge_1d_ipv6.sh \
- vxlan_bridge_1d_port_8472_ipv6.sh \
vxlan_bridge_1d_port_8472.sh \
- vxlan_bridge_1d.sh \
+ vxlan_bridge_1d_port_8472_ipv6.sh \
+ vxlan_bridge_1q.sh \
vxlan_bridge_1q_ipv6.sh \
- vxlan_bridge_1q_port_8472_ipv6.sh \
+ vxlan_bridge_1q_mc_ul.sh \
vxlan_bridge_1q_port_8472.sh \
- vxlan_bridge_1q.sh \
+ vxlan_bridge_1q_port_8472_ipv6.sh \
vxlan_reserved.sh \
+ vxlan_symmetric.sh \
vxlan_symmetric_ipv6.sh \
- vxlan_symmetric.sh
+# end of TEST_PROGS
-TEST_FILES := devlink_lib.sh \
+TEST_FILES := \
+ devlink_lib.sh \
fib_offload_lib.sh \
forwarding.config.sample \
ip6gre_lib.sh \
@@ -124,10 +130,12 @@ TEST_FILES := devlink_lib.sh \
sch_ets_tests.sh \
sch_tbf_core.sh \
sch_tbf_etsprio.sh \
- tc_common.sh
+ tc_common.sh \
+# end of TEST_FILES
TEST_INCLUDES := \
+ $(wildcard ../lib/sh/*.sh) \
../lib.sh \
- $(wildcard ../lib/sh/*.sh)
+# end of TEST_INCLUDES
include ../../lib.mk
diff --git a/tools/testing/selftests/net/forwarding/README b/tools/testing/selftests/net/forwarding/README
index 7b41cff993ad..392a5a91ed37 100644
--- a/tools/testing/selftests/net/forwarding/README
+++ b/tools/testing/selftests/net/forwarding/README
@@ -57,6 +57,21 @@ o Code shall be checked using ShellCheck [1] prior to submission.
1. https://www.shellcheck.net/
+Cleanups
+--------
+
+o lib.sh brings in defer.sh (by way of ../lib.sh) by default. Consider
+ making use of the defer primitive to schedule automatic cleanups. This
+ makes it harder to forget to remove a temporary netdevice, kill a running
+ process or perform other cleanup when the test script is interrupted.
+
+o When adding a helper that dirties the environment, but schedules all
+ necessary cleanups through defer, consider prefixing it adf_ for
+ consistency with lib.sh and ../lib.sh helpers. This serves as an
+ immediately visible bit of documentation about the helper API.
+
+o Definitely do the above for any new code in lib.sh, if practical.
+
Customization
=============
diff --git a/tools/testing/selftests/net/forwarding/bridge_activity_notify.sh b/tools/testing/selftests/net/forwarding/bridge_activity_notify.sh
new file mode 100755
index 000000000000..522a5b1b046c
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_activity_notify.sh
@@ -0,0 +1,170 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-----------------------+ +------------------------+
+# | H1 (vrf) | | H2 (vrf) |
+# | 192.0.2.1/28 | | 192.0.2.2/28 |
+# | + $h1 | | + $h2 |
+# +----|------------------+ +----|-------------------+
+# | |
+# +----|--------------------------------------------------|-------------------+
+# | SW | | |
+# | +--|--------------------------------------------------|-----------------+ |
+# | | + $swp1 BR1 (802.1d) + $swp2 | |
+# | | | |
+# | +-----------------------------------------------------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ new_inactive_test
+ existing_active_test
+ norefresh_test
+"
+
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+ adf_simple_if_init "$h1" 192.0.2.1/28
+}
+
+h2_create()
+{
+ adf_simple_if_init "$h2" 192.0.2.2/28
+}
+
+switch_create()
+{
+ adf_ip_link_add br1 type bridge vlan_filtering 0 mcast_snooping 0 \
+ ageing_time "$LOW_AGEING_TIME"
+ adf_ip_link_set_up br1
+
+ adf_ip_link_set_master "$swp1" br1
+ adf_ip_link_set_up "$swp1"
+
+ adf_ip_link_set_master "$swp2" br1
+ adf_ip_link_set_up "$swp2"
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ adf_vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+}
+
+fdb_active_wait()
+{
+ local mac=$1; shift
+
+ bridge -d fdb get "$mac" br br1 | grep -q -v "inactive"
+}
+
+fdb_inactive_wait()
+{
+ local mac=$1; shift
+
+ bridge -d fdb get "$mac" br br1 | grep -q "inactive"
+}
+
+new_inactive_test()
+{
+ local mac="00:11:22:33:44:55"
+
+ # Add a new FDB entry as static and inactive and check that it
+ # becomes active upon traffic.
+ RET=0
+
+ bridge fdb add "$mac" dev "$swp1" master static activity_notify inactive
+ bridge -d fdb get "$mac" br br1 | grep -q "inactive"
+ check_err $? "FDB entry not present as \"inactive\" when should"
+
+ $MZ "$h1" -c 1 -p 64 -a "$mac" -b bcast -t ip -q
+
+ busywait "$BUSYWAIT_TIMEOUT" fdb_active_wait "$mac"
+ check_err $? "FDB entry present as \"inactive\" when should not"
+
+ log_test "Transition from inactive to active"
+
+ bridge fdb del "$mac" dev "$swp1" master
+}
+
+existing_active_test()
+{
+ local mac="00:11:22:33:44:55"
+ local ageing_time
+
+ # Enable activity notifications on an existing dynamic FDB entry and
+ # check that it becomes inactive after the ageing time passed.
+ RET=0
+
+ bridge fdb add "$mac" dev "$swp1" master dynamic
+ bridge fdb replace "$mac" dev "$swp1" master static activity_notify norefresh
+
+ bridge -d fdb get "$mac" br br1 | grep -q "activity_notify"
+ check_err $? "FDB entry not present as \"activity_notify\" when should"
+
+ bridge -d fdb get "$mac" br br1 | grep -q "inactive"
+ check_fail $? "FDB entry present as \"inactive\" when should not"
+
+ ageing_time=$(bridge_ageing_time_get br1)
+ slowwait $((ageing_time * 2)) fdb_inactive_wait "$mac"
+ check_err $? "FDB entry not present as \"inactive\" when should"
+
+ log_test "Transition from active to inactive"
+
+ bridge fdb del "$mac" dev "$swp1" master
+}
+
+norefresh_test()
+{
+ local mac="00:11:22:33:44:55"
+ local updated_time
+
+ # Check that the "updated" time is reset when replacing an FDB entry
+ # without the "norefresh" keyword and that it is not reset when
+ # replacing with the "norefresh" keyword.
+ RET=0
+
+ bridge fdb add "$mac" dev "$swp1" master static
+ sleep 1
+
+ bridge fdb replace "$mac" dev "$swp1" master static activity_notify
+ updated_time=$(bridge -d -s -j fdb get "$mac" br br1 | jq '.[]["updated"]')
+ if [[ $updated_time -ne 0 ]]; then
+ check_err 1 "\"updated\" time was not reset when should"
+ fi
+
+ sleep 1
+ bridge fdb replace "$mac" dev "$swp1" master static norefresh
+ updated_time=$(bridge -d -s -j fdb get "$mac" br br1 | jq '.[]["updated"]')
+ if [[ $updated_time -eq 0 ]]; then
+ check_err 1 "\"updated\" time was reset when should not"
+ fi
+
+ log_test "Resetting of \"updated\" time"
+
+ bridge fdb del "$mac" dev "$swp1" master
+}
+
+if ! bridge fdb help 2>&1 | grep -q "activity_notify"; then
+ echo "SKIP: iproute2 too old, missing bridge FDB activity notification control"
+ exit "$ksft_skip"
+fi
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh b/tools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh
new file mode 100755
index 000000000000..694de8ba97e4
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh
@@ -0,0 +1,387 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-----------------------+ +-----------------------+ +-----------------------+
+# | H1 (vrf) | | H2 (vrf) | | H3 (vrf) |
+# | + $h1 | | + $h2 | | + $h3 |
+# | | 192.0.2.1/28 | | | 192.0.2.2/28 | | | 192.0.2.18/28 |
+# | | 2001:db8:1::1/64 | | | 2001:db8:1::2/64 | | | 2001:db8:2::2/64 |
+# | | | | | | | | |
+# +----|------------------+ +----|------------------+ +----|------------------+
+# | | |
+# +----|-------------------------|-------------------------|------------------+
+# | +--|-------------------------|------------------+ | |
+# | | + $swp1 + $swp2 | + $swp3 |
+# | | | 192.0.2.17/28 |
+# | | BR1 (802.1q) | 2001:db8:2::1/64 |
+# | | 192.0.2.3/28 | |
+# | | 2001:db8:1::3/64 | |
+# | +-----------------------------------------------+ SW |
+# +---------------------------------------------------------------------------+
+#
+#shellcheck disable=SC2317 # SC doesn't see our uses of functions.
+#shellcheck disable=SC2034 # ... and global variables
+
+ALL_TESTS="
+ test_d_no_sharing
+ test_d_sharing
+ test_q_no_sharing
+ test_q_sharing
+ test_addr_set
+"
+
+NUM_NETIFS=6
+source lib.sh
+
+pMAC=00:11:22:33:44:55
+bMAC=00:11:22:33:44:66
+mMAC=00:11:22:33:44:77
+xMAC=00:11:22:33:44:88
+
+host_create()
+{
+ local h=$1; shift
+ local ipv4=$1; shift
+ local ipv6=$1; shift
+
+ adf_simple_if_init "$h" "$ipv4" "$ipv6"
+ adf_ip_route_add vrf "v$h" 192.0.2.16/28 nexthop via 192.0.2.3
+ adf_ip_route_add vrf "v$h" 2001:db8:2::/64 nexthop via 2001:db8:1::3
+}
+
+h3_create()
+{
+ adf_simple_if_init "$h3" 192.0.2.18/28 2001:db8:2::2/64
+ adf_ip_route_add vrf "v$h3" 192.0.2.0/28 nexthop via 192.0.2.17
+ adf_ip_route_add vrf "v$h3" 2001:db8:1::/64 nexthop via 2001:db8:2::1
+
+ tc qdisc add dev "$h3" clsact
+ defer tc qdisc del dev "$h3" clsact
+
+ tc filter add dev "$h3" ingress proto ip pref 104 \
+ flower skip_hw ip_proto udp dst_port 4096 \
+ action pass
+ defer tc filter del dev "$h3" ingress proto ip pref 104
+
+ tc qdisc add dev "$h2" clsact
+ defer tc qdisc del dev "$h2" clsact
+
+ tc filter add dev "$h2" ingress proto ip pref 104 \
+ flower skip_hw ip_proto udp dst_port 4096 \
+ action pass
+ defer tc filter del dev "$h2" ingress proto ip pref 104
+}
+
+switch_create()
+{
+ adf_ip_link_set_up "$swp1"
+
+ adf_ip_link_set_up "$swp2"
+
+ adf_ip_addr_add "$swp3" 192.0.2.17/28
+ adf_ip_addr_add "$swp3" 2001:db8:2::1/64
+ adf_ip_link_set_up "$swp3"
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ adf_vrf_prepare
+ adf_forwarding_enable
+
+ host_create "$h1" 192.0.2.1/28 2001:db8:1::1/64
+ host_create "$h2" 192.0.2.2/28 2001:db8:1::2/64
+ h3_create
+
+ switch_create
+}
+
+adf_bridge_configure()
+{
+ local dev
+
+ adf_ip_addr_add br 192.0.2.3/28
+ adf_ip_addr_add br 2001:db8:1::3/64
+
+ adf_bridge_vlan_add dev br vid 1 pvid untagged self
+ adf_bridge_vlan_add dev br vid 2 self
+ adf_bridge_vlan_add dev br vid 3 self
+
+ for dev in "$swp1" "$swp2"; do
+ adf_ip_link_set_master "$dev" br
+ adf_bridge_vlan_add dev "$dev" vid 1 pvid untagged
+ adf_bridge_vlan_add dev "$dev" vid 2
+ adf_bridge_vlan_add dev "$dev" vid 3
+ done
+}
+
+adf_bridge_create()
+{
+ local mac
+
+ adf_ip_link_add br up type bridge vlan_default_pvid 0 "$@"
+ mac=$(mac_get br)
+ adf_bridge_configure
+ adf_ip_link_set_addr br "$mac"
+}
+
+check_fdb_local_vlan_0_support()
+{
+ if adf_ip_link_add XXbr up type bridge vlan_filtering 1 \
+ fdb_local_vlan_0 1 &>/dev/null; then
+ return 0
+ fi
+
+ log_test_skip "FDB sharing" \
+ "iproute 2 or the kernel do not support fdb_local_vlan_0"
+}
+
+check_mac_presence()
+{
+ local should_fail=$1; shift
+ local dev=$1; shift
+ local vlan=$1; shift
+ local mac
+
+ mac=$(mac_get "$dev")
+
+ if ((vlan == 0)); then
+ vlan=null
+ fi
+
+ bridge -j fdb show dev "$dev" |
+ jq -e --arg mac "$mac" --argjson vlan "$vlan" \
+ '.[] | select(.mac == $mac) | select(.vlan == $vlan)' > /dev/null
+ check_err_fail "$should_fail" $? "FDB dev $dev vid $vlan addr $mac exists"
+}
+
+do_sharing_test()
+{
+ local should_fail=$1; shift
+ local what=$1; shift
+ local dev
+
+ RET=0
+
+ for dev in "$swp1" "$swp2" br; do
+ check_mac_presence 0 "$dev" 0
+ check_mac_presence "$should_fail" "$dev" 1
+ check_mac_presence "$should_fail" "$dev" 2
+ check_mac_presence "$should_fail" "$dev" 3
+ done
+
+ log_test "$what"
+}
+
+do_end_to_end_test()
+{
+ local mac=$1; shift
+ local what=$1; shift
+ local probe_dev=${1-$h3}; shift
+ local expect=${1-10}; shift
+
+ local t0
+ local t1
+ local dd
+
+ RET=0
+
+ # In mausezahn, use $dev MAC as the destination MAC. In the MAC sharing
+ # context, that will cause an FDB miss on VLAN 1 and prompt a second
+ # lookup in VLAN 0.
+
+ t0=$(tc_rule_stats_get "$probe_dev" 104 ingress)
+
+ $MZ "$h1" -c 10 -p 64 -a own -b "$mac" \
+ -A 192.0.2.1 -B 192.0.2.18 -t udp "dp=4096,sp=2048" -q
+ sleep 1
+
+ t1=$(tc_rule_stats_get "$probe_dev" 104 ingress)
+ dd=$((t1 - t0))
+
+ ((dd == expect))
+ check_err $? "Expected $expect packets on $probe_dev got $dd"
+
+ log_test "$what"
+}
+
+do_tests()
+{
+ local should_fail=$1; shift
+ local what=$1; shift
+ local swp1_mac
+ local br_mac
+
+ swp1_mac=$(mac_get "$swp1")
+ br_mac=$(mac_get br)
+
+ do_sharing_test "$should_fail" "$what"
+ do_end_to_end_test "$swp1_mac" "$what: end to end, $swp1 MAC"
+ do_end_to_end_test "$br_mac" "$what: end to end, br MAC"
+}
+
+bridge_standard()
+{
+ local vlan_filtering=$1; shift
+
+ if ((vlan_filtering)); then
+ echo 802.1q
+ else
+ echo 802.1d
+ fi
+}
+
+nonexistent_fdb_test()
+{
+ local vlan_filtering=$1; shift
+ local standard
+
+ standard=$(bridge_standard "$vlan_filtering")
+
+ # We expect flooding, so $h2 should get the traffic.
+ do_end_to_end_test "$xMAC" "$standard: Nonexistent FDB" "$h2"
+}
+
+misleading_fdb_test()
+{
+ local vlan_filtering=$1; shift
+ local standard
+
+ standard=$(bridge_standard "$vlan_filtering")
+
+ defer_scope_push
+ # Add an FDB entry on VLAN 0. The lookup on VLAN-aware bridge
+ # shouldn't pick this up even with fdb_local_vlan_0 enabled, so
+ # the traffic should be flooded. This all holds on
+ # vlan_filtering bridge, on non-vlan_filtering one the FDB entry
+ # is expected to be found as usual, no flooding takes place.
+ #
+ # Adding only on VLAN 0 is a bit tricky, because bridge is
+ # trying to be nice and interprets the request as if the FDB
+ # should be added on each VLAN.
+
+ bridge fdb add "$mMAC" dev "$swp1" master
+ bridge fdb del "$mMAC" dev "$swp1" vlan 1 master
+ bridge fdb del "$mMAC" dev "$swp1" vlan 2 master
+ bridge fdb del "$mMAC" dev "$swp1" vlan 3 master
+
+ local expect=$((vlan_filtering ? 10 : 0))
+ do_end_to_end_test "$mMAC" \
+ "$standard: Lookup of non-local MAC on VLAN 0" \
+ "$h2" "$expect"
+ defer_scope_pop
+}
+
+change_mac()
+{
+ local dev=$1; shift
+ local mac=$1; shift
+ local cur_mac
+
+ cur_mac=$(mac_get "$dev")
+
+ log_info "Change $dev MAC $cur_mac -> $mac"
+ adf_ip_link_set_addr "$dev" "$mac"
+ defer log_info "Change $dev MAC back"
+}
+
+do_test_no_sharing()
+{
+ local vlan_filtering=$1; shift
+ local standard
+
+ standard=$(bridge_standard "$vlan_filtering")
+
+ adf_bridge_create vlan_filtering "$vlan_filtering"
+ setup_wait
+
+ do_tests 0 "$standard, no FDB sharing"
+
+ change_mac "$swp1" "$pMAC"
+ change_mac br "$bMAC"
+
+ do_tests 0 "$standard, no FDB sharing after MAC change"
+
+ in_defer_scope check_fdb_local_vlan_0_support || return
+
+ log_info "Set fdb_local_vlan_0=1"
+ ip link set dev br type bridge fdb_local_vlan_0 1
+
+ do_tests 1 "$standard, fdb sharing after toggle"
+}
+
+do_test_sharing()
+{
+ local vlan_filtering=$1; shift
+ local standard
+
+ standard=$(bridge_standard "$vlan_filtering")
+
+ in_defer_scope check_fdb_local_vlan_0_support || return
+
+ adf_bridge_create vlan_filtering "$vlan_filtering" fdb_local_vlan_0 1
+ setup_wait
+
+ do_tests 1 "$standard, FDB sharing"
+
+ nonexistent_fdb_test "$vlan_filtering"
+ misleading_fdb_test "$vlan_filtering"
+
+ change_mac "$swp1" "$pMAC"
+ change_mac br "$bMAC"
+
+ do_tests 1 "$standard, FDB sharing after MAC change"
+
+ log_info "Set fdb_local_vlan_0=0"
+ ip link set dev br type bridge fdb_local_vlan_0 0
+
+ do_tests 0 "$standard, No FDB sharing after toggle"
+}
+
+test_d_no_sharing()
+{
+ do_test_no_sharing 0
+}
+
+test_d_sharing()
+{
+ do_test_sharing 0
+}
+
+test_q_no_sharing()
+{
+ do_test_no_sharing 1
+}
+
+test_q_sharing()
+{
+ do_test_sharing 1
+}
+
+adf_addr_set_bridge_create()
+{
+ adf_ip_link_add br up type bridge vlan_filtering 0
+ adf_ip_link_set_addr br "$(mac_get br)"
+ adf_bridge_configure
+}
+
+test_addr_set()
+{
+ adf_addr_set_bridge_create
+ setup_wait
+
+ do_end_to_end_test "$(mac_get br)" "NET_ADDR_SET: end to end, br MAC"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+tests_run
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
index 8c1597ebc2d3..e86d77946585 100755
--- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
@@ -28,6 +28,7 @@ ALL_TESTS="
cfg_test
fwd_test
ctrl_test
+ disable_test
"
NUM_NETIFS=4
@@ -64,7 +65,10 @@ h2_destroy()
switch_create()
{
- ip link add name br0 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ local vlan_filtering=$1; shift
+
+ ip link add name br0 type bridge \
+ vlan_filtering "$vlan_filtering" vlan_default_pvid 0 \
mcast_snooping 1 mcast_igmp_version 3 mcast_mld_version 2
bridge vlan add vid 10 dev br0 self
bridge vlan add vid 20 dev br0 self
@@ -118,7 +122,7 @@ setup_prepare()
h1_create
h2_create
- switch_create
+ switch_create 1
}
cleanup()
@@ -1357,6 +1361,98 @@ ctrl_test()
ctrl_mldv2_is_in_test
}
+check_group()
+{
+ local group=$1; shift
+ local vid=$1; shift
+ local should_fail=$1; shift
+ local when=$1; shift
+ local -a vidkws
+
+ if ((vid)); then
+ vidkws=(vid "$vid")
+ fi
+
+ bridge mdb get dev br0 grp "$group" "${vidkws[@]}" 2>/dev/null |
+ grep -q "port $swp1"
+ check_err_fail "$should_fail" $? "$group seen $when snooping disable:"
+}
+
+__disable_test()
+{
+ local vid=$1; shift
+ local what=$1; shift
+ local -a vidkws
+
+ if ((vid)); then
+ vidkws=(vid "$vid")
+ fi
+
+ RET=0
+
+ bridge mdb add dev br0 port "$swp1" grp ff0e::1 permanent \
+ "${vidkws[@]}" filter_mode include source_list 2001:db8:1::1
+ bridge mdb add dev br0 port "$swp1" grp ff0e::2 permanent \
+ "${vidkws[@]}" filter_mode exclude
+
+ bridge mdb add dev br0 port "$swp1" grp ff0e::3 \
+ "${vidkws[@]}" filter_mode include source_list 2001:db8:1::2
+ bridge mdb add dev br0 port "$swp1" grp ff0e::4 \
+ "${vidkws[@]}" filter_mode exclude
+
+ bridge mdb add dev br0 port "$swp1" grp 239.1.1.1 permanent \
+ "${vidkws[@]}" filter_mode include source_list 192.0.2.1
+ bridge mdb add dev br0 port "$swp1" grp 239.1.1.2 permanent \
+ "${vidkws[@]}" filter_mode exclude
+
+ bridge mdb add dev br0 port "$swp1" grp 239.1.1.3 \
+ "${vidkws[@]}" filter_mode include source_list 192.0.2.2
+ bridge mdb add dev br0 port "$swp1" grp 239.1.1.4 \
+ "${vidkws[@]}" filter_mode exclude
+
+ check_group ff0e::1 "$vid" 0 "before"
+ check_group ff0e::2 "$vid" 0 "before"
+ check_group ff0e::3 "$vid" 0 "before"
+ check_group ff0e::4 "$vid" 0 "before"
+
+ check_group 239.1.1.1 "$vid" 0 "before"
+ check_group 239.1.1.2 "$vid" 0 "before"
+ check_group 239.1.1.3 "$vid" 0 "before"
+ check_group 239.1.1.4 "$vid" 0 "before"
+
+ ip link set dev br0 type bridge mcast_snooping 0
+
+ check_group ff0e::1 "$vid" 0 "after"
+ check_group ff0e::2 "$vid" 0 "after"
+ check_group ff0e::3 "$vid" 1 "after"
+ check_group ff0e::4 "$vid" 1 "after"
+
+ check_group 239.1.1.1 "$vid" 0 "after"
+ check_group 239.1.1.2 "$vid" 0 "after"
+ check_group 239.1.1.3 "$vid" 1 "after"
+ check_group 239.1.1.4 "$vid" 1 "after"
+
+ log_test "$what: Flush after disable"
+
+ ip link set dev br0 type bridge mcast_snooping 1
+ sleep 10
+}
+
+disable_test()
+{
+ __disable_test 10 802.1q
+
+ switch_destroy
+ switch_create 0
+ setup_wait
+
+ __disable_test 0 802.1d
+
+ switch_destroy
+ switch_create 1
+ setup_wait
+}
+
if ! bridge mdb help 2>&1 | grep -q "flush"; then
echo "SKIP: iproute2 too old, missing bridge mdb flush support"
exit $ksft_skip
diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
index 18fd69d8d937..ce64518aaa11 100644
--- a/tools/testing/selftests/net/forwarding/config
+++ b/tools/testing/selftests/net/forwarding/config
@@ -1,24 +1,23 @@
+CONFIG_BPF_SYSCALL=y
CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
-CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_BRIDGE_IGMP_SNOOPING=y
-CONFIG_NET_L3_MASTER_DEV=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_NET_VRF=m
-CONFIG_BPF_SYSCALL=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_CGROUP_BPF=y
CONFIG_DUMMY=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
CONFIG_IPV6=y
CONFIG_IPV6_GRE=m
CONFIG_IPV6_MROUTE=y
CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_PIMSM_V2=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
CONFIG_MACVLAN=m
+CONFIG_NAMESPACES=y
CONFIG_NET_ACT_CT=m
+CONFIG_NET_ACT_GACT=m
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_MPLS=m
CONFIG_NET_ACT_PEDIT=m
@@ -27,29 +26,30 @@ CONFIG_NET_ACT_SAMPLE=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_TUNNEL_KEY=m
CONFIG_NET_ACT_VLAN=m
+CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_MATCHALL=m
-CONFIG_NET_CLS_BASIC=m
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_META=m
+CONFIG_NETFILTER=y
CONFIG_NET_IPGRE=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPIP=m
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_NS=y
CONFIG_NET_SCH_ETS=m
CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_ACT_GACT=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_NET_TEAM=y
CONFIG_NET_TEAM_MODE_LOADBALANCE=y
-CONFIG_NETFILTER=y
+CONFIG_NET_VRF=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NF_TABLES=m
CONFIG_VETH=m
-CONFIG_NAMESPACES=y
-CONFIG_NET_NS=y
+CONFIG_VLAN_8021Q=m
CONFIG_VXLAN=m
CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
index 7d531f7091e6..5dbfab0e23e3 100755
--- a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
@@ -226,7 +226,7 @@ send_flowlabel()
# Generate 16384 echo requests, each with a random flow label.
ip vrf exec v$h1 sh -c \
"for _ in {1..16384}; do \
- $PING6 2001:db8:4::2 -F 0 -c 1 -q >/dev/null 2>&1; \
+ $PING6 -F 0 -c 1 -q 2001:db8:4::2 >/dev/null 2>&1; \
done"
}
diff --git a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
index dda11a4a9450..b4f17a5bbc61 100755
--- a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
@@ -321,7 +321,7 @@ send_flowlabel()
# Generate 16384 echo requests, each with a random flow label.
ip vrf exec v$h1 sh -c \
"for _ in {1..16384}; do \
- $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1; \
+ $PING6 -F 0 -c 1 -q 2001:db8:2::2 >/dev/null 2>&1; \
done"
}
diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
index 49fa94b53a1c..25036e38043c 100755
--- a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
+++ b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
@@ -95,7 +95,7 @@ ipv6_in_too_big_err()
# Send too big packets
ip vrf exec $vrf_name \
- $PING6 -s 1300 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+ $PING6 -s 1300 -c 1 -w $PING_TIMEOUT 2001:1:2::2 &> /dev/null
local t1=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
test "$((t1 - t0))" -ne 0
@@ -131,7 +131,7 @@ ipv6_in_addr_err()
# Disable forwarding temporary while sending the packet
sysctl -qw net.ipv6.conf.all.forwarding=0
ip vrf exec $vrf_name \
- $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+ $PING6 -c 1 -w $PING_TIMEOUT 2001:1:2::2 &> /dev/null
sysctl -qw net.ipv6.conf.all.forwarding=1
local t1=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
@@ -150,7 +150,7 @@ ipv6_in_discard()
# Add a policy to discard
ip xfrm policy add dst 2001:1:2::2/128 dir fwd action block
ip vrf exec $vrf_name \
- $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+ $PING6 -c 1 -w $PING_TIMEOUT 2001:1:2::2 &> /dev/null
ip xfrm policy del dst 2001:1:2::2/128 dir fwd
local t1=$(ipv6_stats_get $rtr1 Ip6InDiscards)
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
index e28b4a079e52..b24acfa52a3a 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
@@ -323,7 +323,7 @@ send_flowlabel()
# Generate 16384 echo requests, each with a random flow label.
ip vrf exec v$h1 sh -c \
"for _ in {1..16384}; do \
- $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1; \
+ $PING6 -F 0 -c 1 -q 2001:db8:2::2 >/dev/null 2>&1; \
done"
}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 508f3c700d71..a9034f0bb58b 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -37,6 +37,7 @@ declare -A NETIFS=(
: "${TEAMD:=teamd}"
: "${MCD:=smcrouted}"
: "${MC_CLI:=smcroutectl}"
+: "${MCD_TABLE_NAME:=selftests}"
# Constants for netdevice bring-up:
# Default time in seconds to wait for an interface to come up before giving up
@@ -141,6 +142,20 @@ check_tc_version()
fi
}
+check_tc_erspan_support()
+{
+ local dev=$1; shift
+
+ tc filter add dev $dev ingress pref 1 handle 1 flower \
+ erspan_opts 1:0:0:0 &> /dev/null
+ if [[ $? -ne 0 ]]; then
+ echo "SKIP: iproute2 too old; tc is missing erspan support"
+ return $ksft_skip
+ fi
+ tc filter del dev $dev ingress pref 1 handle 1 flower \
+ erspan_opts 1:0:0:0 &> /dev/null
+}
+
# Old versions of tc don't understand "mpls_uc"
check_tc_mpls_support()
{
@@ -525,9 +540,9 @@ setup_wait_dev_with_timeout()
return 1
}
-setup_wait()
+setup_wait_n()
{
- local num_netifs=${1:-$NUM_NETIFS}
+ local num_netifs=$1; shift
local i
for ((i = 1; i <= num_netifs; ++i)); do
@@ -538,6 +553,11 @@ setup_wait()
sleep $WAIT_TIME
}
+setup_wait()
+{
+ setup_wait_n "$NUM_NETIFS"
+}
+
wait_for_dev()
{
local dev=$1; shift
@@ -551,30 +571,6 @@ wait_for_dev()
fi
}
-cmd_jq()
-{
- local cmd=$1
- local jq_exp=$2
- local jq_opts=$3
- local ret
- local output
-
- output="$($cmd)"
- # it the command fails, return error right away
- ret=$?
- if [[ $ret -ne 0 ]]; then
- return $ret
- fi
- output=$(echo $output | jq -r $jq_opts "$jq_exp")
- ret=$?
- if [[ $ret -ne 0 ]]; then
- return $ret
- fi
- echo $output
- # return success only in case of non-empty output
- [ ! -z "$output" ]
-}
-
pre_cleanup()
{
if [ "${PAUSE_ON_CLEANUP}" = "yes" ]; then
@@ -603,6 +599,12 @@ vrf_cleanup()
ip -4 rule del pref 32765
}
+adf_vrf_prepare()
+{
+ vrf_prepare
+ defer vrf_cleanup
+}
+
__last_tb_id=0
declare -A __TB_IDS
@@ -715,6 +717,12 @@ simple_if_fini()
vrf_destroy $vrf_name
}
+adf_simple_if_init()
+{
+ simple_if_init "$@"
+ defer simple_if_fini "$@"
+}
+
tunnel_create()
{
local name=$1; shift
@@ -1015,6 +1023,12 @@ forwarding_restore()
sysctl_restore net.ipv4.conf.all.forwarding
}
+adf_forwarding_enable()
+{
+ forwarding_enable
+ defer forwarding_restore
+}
+
declare -A MTU_ORIG
mtu_set()
{
@@ -1271,8 +1285,8 @@ ping_do()
vrf_name=$(master_name_get $if_name)
ip vrf exec $vrf_name \
- $PING $args $dip -c $PING_COUNT -i 0.1 \
- -w $PING_TIMEOUT &> /dev/null
+ $PING $args -c $PING_COUNT -i 0.1 \
+ -w $PING_TIMEOUT $dip &> /dev/null
}
ping_test()
@@ -1302,8 +1316,8 @@ ping6_do()
vrf_name=$(master_name_get $if_name)
ip vrf exec $vrf_name \
- $PING6 $args $dip -c $PING_COUNT -i 0.1 \
- -w $PING_TIMEOUT &> /dev/null
+ $PING6 $args -c $PING_COUNT -i 0.1 \
+ -w $PING_TIMEOUT $dip &> /dev/null
}
ping6_test()
@@ -1757,6 +1771,51 @@ mc_send()
msend -g $groups -I $if_name -c 1 > /dev/null 2>&1
}
+adf_mcd_start()
+{
+ local ifs=("$@")
+
+ local table_name="$MCD_TABLE_NAME"
+ local smcroutedir
+ local pid
+ local if
+ local i
+
+ check_command "$MCD" || return 1
+ check_command "$MC_CLI" || return 1
+
+ smcroutedir=$(mktemp -d)
+ defer rm -rf "$smcroutedir"
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ echo "phyint ${NETIFS[p$i]} enable" >> \
+ "$smcroutedir/$table_name.conf"
+ done
+
+ for if in "${ifs[@]}"; do
+ if ! ip_link_has_flag "$if" MULTICAST; then
+ ip link set dev "$if" multicast on
+ defer ip link set dev "$if" multicast off
+ fi
+
+ echo "phyint $if enable" >> \
+ "$smcroutedir/$table_name.conf"
+ done
+
+ "$MCD" -N -I "$table_name" -f "$smcroutedir/$table_name.conf" \
+ -P "$smcroutedir/$table_name.pid"
+ busywait "$BUSYWAIT_TIMEOUT" test -e "$smcroutedir/$table_name.pid"
+ pid=$(cat "$smcroutedir/$table_name.pid")
+ defer kill_process "$pid"
+}
+
+mc_cli()
+{
+ local table_name="$MCD_TABLE_NAME"
+
+ "$MC_CLI" -I "$table_name" "$@"
+}
+
start_ip_monitor()
{
local mtype=$1; shift
diff --git a/tools/testing/selftests/net/forwarding/lib_sh_test.sh b/tools/testing/selftests/net/forwarding/lib_sh_test.sh
index ff2accccaf4d..b4eda6c6199e 100755
--- a/tools/testing/selftests/net/forwarding/lib_sh_test.sh
+++ b/tools/testing/selftests/net/forwarding/lib_sh_test.sh
@@ -30,6 +30,11 @@ tfail()
do_test "tfail" false
}
+tfail2()
+{
+ do_test "tfail2" false
+}
+
txfail()
{
FAIL_TO_XFAIL=yes do_test "txfail" false
@@ -132,6 +137,8 @@ test_ret()
ret_subtest $ksft_fail "tfail" txfail tfail
ret_subtest $ksft_xfail "txfail" txfail txfail
+
+ ret_subtest $ksft_fail "tfail2" tfail2 tfail
}
exit_status_tests_run()
diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
index ecd34f364125..892895659c7e 100755
--- a/tools/testing/selftests/net/forwarding/local_termination.sh
+++ b/tools/testing/selftests/net/forwarding/local_termination.sh
@@ -176,6 +176,8 @@ run_test()
local rcv_dmac=$(mac_get $rcv_if_name)
local should_receive
+ setup_wait
+
tcpdump_start $rcv_if_name
mc_route_prepare $send_if_name
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
index a20d22d1df36..8d4ae6c952a1 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -238,7 +238,7 @@ test_lag_slave()
ip neigh flush dev br1
setup_wait_dev $up_dev
setup_wait_dev $host_dev
- $ARPING -I br1 192.0.2.130 -qfc 1
+ $ARPING -I br1 -qfc 1 192.0.2.130
sleep 2
mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 ">= 10"
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index 1b902cc579f6..a21c771908b3 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -196,7 +196,7 @@ test_span_gre_forbidden_egress()
bridge vlan add dev $swp3 vid 555
# Re-prime FDB
- $ARPING -I br1.555 192.0.2.130 -fqc 1
+ $ARPING -I br1.555 -fqc 1 192.0.2.130
sleep 1
quick_test_span_gre_dir $tundev
@@ -290,7 +290,7 @@ test_span_gre_fdb_roaming()
bridge fdb del dev $swp2 $h3mac vlan 555 master 2>/dev/null
# Re-prime FDB
- $ARPING -I br1.555 192.0.2.130 -fqc 1
+ $ARPING -I br1.555 -fqc 1 192.0.2.130
sleep 1
quick_test_span_gre_dir $tundev
diff --git a/tools/testing/selftests/net/forwarding/router.sh b/tools/testing/selftests/net/forwarding/router.sh
index b98ea9449b8b..dfb6646cb97b 100755
--- a/tools/testing/selftests/net/forwarding/router.sh
+++ b/tools/testing/selftests/net/forwarding/router.sh
@@ -18,6 +18,8 @@
# | 2001:db8:1::1/64 2001:db8:2::1/64 |
# | |
# +-----------------------------------------------------------------+
+#
+#shellcheck disable=SC2034 # SC doesn't see our uses of global variables
ALL_TESTS="
ping_ipv4
@@ -27,6 +29,7 @@ ALL_TESTS="
ipv4_sip_equal_dip
ipv6_sip_equal_dip
ipv4_dip_link_local
+ ipv4_sip_link_local
"
NUM_NETIFS=4
@@ -330,6 +333,32 @@ ipv4_dip_link_local()
tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower
}
+ipv4_sip_link_local()
+{
+ local sip=169.254.1.1
+
+ RET=0
+
+ # Disable rpfilter to prevent packets to be dropped because of it.
+ sysctl_set net.ipv4.conf.all.rp_filter 0
+ sysctl_set net.ipv4.conf."$rp1".rp_filter 0
+
+ tc filter add dev "$rp2" egress protocol ip pref 1 handle 101 \
+ flower src_ip "$sip" action pass
+
+ $MZ "$h1" -t udp "sp=54321,dp=12345" -c 5 -d 1msec -b "$rp1mac" \
+ -A "$sip" -B 198.51.100.2 -q
+
+ tc_check_packets "dev $rp2 egress" 101 5
+ check_err $? "Packets were dropped"
+
+ log_test "IPv4 source IP is link-local"
+
+ tc filter del dev "$rp2" egress protocol ip pref 1 handle 101 flower
+ sysctl_restore net.ipv4.conf."$rp1".rp_filter
+ sysctl_restore net.ipv4.conf.all.rp_filter
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/router_multicast.sh b/tools/testing/selftests/net/forwarding/router_multicast.sh
index 5a58b1ec8aef..83e52abdbc2e 100755
--- a/tools/testing/selftests/net/forwarding/router_multicast.sh
+++ b/tools/testing/selftests/net/forwarding/router_multicast.sh
@@ -33,10 +33,6 @@ NUM_NETIFS=6
source lib.sh
source tc_common.sh
-require_command $MCD
-require_command $MC_CLI
-table_name=selftests
-
h1_create()
{
simple_if_init $h1 198.51.100.2/28 2001:db8:1::2/64
@@ -149,25 +145,6 @@ router_destroy()
ip link set dev $rp1 down
}
-start_mcd()
-{
- SMCROUTEDIR="$(mktemp -d)"
-
- for ((i = 1; i <= $NUM_NETIFS; ++i)); do
- echo "phyint ${NETIFS[p$i]} enable" >> \
- $SMCROUTEDIR/$table_name.conf
- done
-
- $MCD -N -I $table_name -f $SMCROUTEDIR/$table_name.conf \
- -P $SMCROUTEDIR/$table_name.pid
-}
-
-kill_mcd()
-{
- pkill $MCD
- rm -rf $SMCROUTEDIR
-}
-
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -179,7 +156,7 @@ setup_prepare()
rp3=${NETIFS[p5]}
h3=${NETIFS[p6]}
- start_mcd
+ adf_mcd_start || exit "$EXIT_STATUS"
vrf_prepare
@@ -206,7 +183,7 @@ cleanup()
vrf_cleanup
- kill_mcd
+ defer_scopes_cleanup
}
create_mcast_sg()
@@ -214,9 +191,9 @@ create_mcast_sg()
local if_name=$1; shift
local s_addr=$1; shift
local mcast=$1; shift
- local dest_ifs=${@}
+ local dest_ifs=("${@}")
- $MC_CLI -I $table_name add $if_name $s_addr $mcast $dest_ifs
+ mc_cli add "$if_name" "$s_addr" "$mcast" "${dest_ifs[@]}"
}
delete_mcast_sg()
@@ -224,9 +201,9 @@ delete_mcast_sg()
local if_name=$1; shift
local s_addr=$1; shift
local mcast=$1; shift
- local dest_ifs=${@}
+ local dest_ifs=("${@}")
- $MC_CLI -I $table_name remove $if_name $s_addr $mcast $dest_ifs
+ mc_cli remove "$if_name" "$s_addr" "$mcast" "${dest_ifs[@]}"
}
mcast_v4()
diff --git a/tools/testing/selftests/net/forwarding/sch_ets.sh b/tools/testing/selftests/net/forwarding/sch_ets.sh
index 1f6f53e284b5..6269d5e23487 100755
--- a/tools/testing/selftests/net/forwarding/sch_ets.sh
+++ b/tools/testing/selftests/net/forwarding/sch_ets.sh
@@ -11,6 +11,7 @@ ALL_TESTS="
ets_test_strict
ets_test_mixed
ets_test_dwrr
+ ets_test_plug
classifier_mode
ets_test_strict
ets_test_mixed
diff --git a/tools/testing/selftests/net/forwarding/sch_ets_core.sh b/tools/testing/selftests/net/forwarding/sch_ets_core.sh
index 8f9922c695b0..0453210271dc 100644
--- a/tools/testing/selftests/net/forwarding/sch_ets_core.sh
+++ b/tools/testing/selftests/net/forwarding/sch_ets_core.sh
@@ -165,8 +165,7 @@ h1_create()
{
local i;
- simple_if_init $h1
- defer simple_if_fini $h1
+ adf_simple_if_init $h1
mtu_set $h1 9900
defer mtu_restore $h1
@@ -182,8 +181,7 @@ h2_create()
{
local i
- simple_if_init $h2
- defer simple_if_fini $h2
+ adf_simple_if_init $h2
mtu_set $h2 9900
defer mtu_restore $h2
@@ -251,8 +249,7 @@ setup_prepare()
put=$swp2
hut=$h2
- vrf_prepare
- defer vrf_cleanup
+ adf_vrf_prepare
h1_create
h2_create
diff --git a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
index 08240d3e3c87..79d837a2868a 100644
--- a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
+++ b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
@@ -224,3 +224,11 @@ ets_test_dwrr()
ets_set_dwrr_two_bands
xfail_on_slow ets_dwrr_test_01
}
+
+ets_test_plug()
+{
+ ets_change_qdisc $put 2 "3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3" "1514 1514"
+ tc qdisc add dev $put handle 20: parent 10:4 plug
+ start_traffic_pktsize 100 $h1.10 192.0.2.1 192.0.2.2 00:c1:a0:c1:a0:00 "-c 1"
+ ets_qdisc_setup $put 2
+}
diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh
index af166662b78a..f2a3d9254642 100755
--- a/tools/testing/selftests/net/forwarding/sch_red.sh
+++ b/tools/testing/selftests/net/forwarding/sch_red.sh
@@ -52,8 +52,7 @@ PKTSZ=1400
h1_create()
{
- simple_if_init $h1 192.0.2.1/28
- defer simple_if_fini $h1 192.0.2.1/28
+ adf_simple_if_init $h1 192.0.2.1/28
mtu_set $h1 10000
defer mtu_restore $h1
@@ -65,8 +64,7 @@ h1_create()
h2_create()
{
- simple_if_init $h2 192.0.2.2/28
- defer simple_if_fini $h2 192.0.2.2/28
+ adf_simple_if_init $h2 192.0.2.2/28
mtu_set $h2 10000
defer mtu_restore $h2
@@ -74,8 +72,7 @@ h2_create()
h3_create()
{
- simple_if_init $h3 192.0.2.3/28
- defer simple_if_fini $h3 192.0.2.3/28
+ adf_simple_if_init $h3 192.0.2.3/28
mtu_set $h3 10000
defer mtu_restore $h3
@@ -125,8 +122,7 @@ setup_prepare()
h3_mac=$(mac_get $h3)
- vrf_prepare
- defer vrf_cleanup
+ adf_vrf_prepare
h1_create
h2_create
diff --git a/tools/testing/selftests/net/forwarding/sch_tbf_core.sh b/tools/testing/selftests/net/forwarding/sch_tbf_core.sh
index ec309a5086bc..070c17faa9e4 100644
--- a/tools/testing/selftests/net/forwarding/sch_tbf_core.sh
+++ b/tools/testing/selftests/net/forwarding/sch_tbf_core.sh
@@ -59,8 +59,7 @@ host_create()
local dev=$1; shift
local host=$1; shift
- simple_if_init $dev
- defer simple_if_fini $dev
+ adf_simple_if_init $dev
mtu_set $dev 10000
defer mtu_restore $dev
@@ -149,8 +148,7 @@ setup_prepare()
h2_mac=$(mac_get $h2)
- vrf_prepare
- defer vrf_cleanup
+ adf_vrf_prepare
h1_create
h2_create
diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
index b1daad19b01e..b58909a93112 100755
--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
+++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
@@ -6,7 +6,7 @@ ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
match_ip_tos_test match_indev_test match_ip_ttl_test
match_mpls_label_test \
match_mpls_tc_test match_mpls_bos_test match_mpls_ttl_test \
- match_mpls_lse_test"
+ match_mpls_lse_test match_erspan_opts_test"
NUM_NETIFS=2
source tc_common.sh
source lib.sh
@@ -676,6 +676,56 @@ match_mpls_lse_test()
log_test "mpls lse match ($tcflags)"
}
+match_erspan_opts_test()
+{
+ RET=0
+
+ check_tc_erspan_support $h2 || return 0
+
+ # h1 erspan setup
+ tunnel_create erspan1 erspan 192.0.2.1 192.0.2.2 dev $h1 seq key 1001 \
+ tos C ttl 64 erspan_ver 1 erspan 6789 # ERSPAN Type II
+ tunnel_create erspan2 erspan 192.0.2.1 192.0.2.2 dev $h1 seq key 1002 \
+ tos C ttl 64 erspan_ver 2 erspan_dir egress erspan_hwid 63 \
+ # ERSPAN Type III
+ ip link set dev erspan1 master v$h1
+ ip link set dev erspan2 master v$h1
+ # h2 erspan setup
+ ip link add ep-ex type erspan ttl 64 external # To collect tunnel info
+ ip link set ep-ex up
+ ip link set dev ep-ex master v$h2
+ tc qdisc add dev ep-ex clsact
+
+ # ERSPAN Type II [decap direction]
+ tc filter add dev ep-ex ingress protocol ip handle 101 flower \
+ $tcflags enc_src_ip 192.0.2.1 enc_dst_ip 192.0.2.2 \
+ enc_key_id 1001 erspan_opts 1:6789:0:0 \
+ action drop
+ # ERSPAN Type III [decap direction]
+ tc filter add dev ep-ex ingress protocol ip handle 102 flower \
+ $tcflags enc_src_ip 192.0.2.1 enc_dst_ip 192.0.2.2 \
+ enc_key_id 1002 erspan_opts 2:0:1:63 action drop
+
+ ep1mac=$(mac_get erspan1)
+ $MZ erspan1 -c 1 -p 64 -a $ep1mac -b $h2mac -t ip -q
+ tc_check_packets "dev ep-ex ingress" 101 1
+ check_err $? "ERSPAN Type II"
+
+ ep2mac=$(mac_get erspan2)
+ $MZ erspan2 -c 1 -p 64 -a $ep1mac -b $h2mac -t ip -q
+ tc_check_packets "dev ep-ex ingress" 102 1
+ check_err $? "ERSPAN Type III"
+
+ # h2 erspan cleanup
+ tc qdisc del dev ep-ex clsact
+ tunnel_destroy ep-ex
+ # h1 erspan cleanup
+ tunnel_destroy erspan2 # ERSPAN Type III
+ tunnel_destroy erspan1 # ERSPAN Type II
+
+ log_test "erspan_opts match ($tcflags)"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh
new file mode 100755
index 000000000000..6a570d256e07
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh
@@ -0,0 +1,766 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-----------------------------------------+
+# | + $h1.10 + $h1.20 |
+# | | 192.0.2.1/28 | 2001:db8:1::1/64 |
+# | \________ ________/ |
+# | \ / |
+# | + $h1 H1 (vrf) |
+# +-----------|-----------------------------+
+# |
+# +-----------|----------------------------------------------------------------+
+# | +---------|--------------------------------------+ SWITCH (main vrf) |
+# | | + $swp1 BR1 (802.1q) | |
+# | | vid 10 20 | |
+# | | | |
+# | | + vx10 (vxlan) + vx20 (vxlan) | + lo10 (dummy) |
+# | | local 192.0.2.100 local 2001:db8:4::1 | 192.0.2.100/28 |
+# | | group 233.252.0.1 group ff0e::1:2:3 | 2001:db8:4::1/64 |
+# | | id 1000 id 2000 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | +------------------------------------------------+ |
+# | |
+# | + $swp2 $swp3 + |
+# | | 192.0.2.33/28 192.0.2.65/28 | |
+# | | 2001:db8:2::1/64 2001:db8:3::1/64 | |
+# | | | |
+# +---|--------------------------------------------------------------------|---+
+# | |
+# +---|--------------------------------+ +--------------------------------|---+
+# | | H2 (vrf) | | H3 (vrf) | |
+# | +-|----------------------------+ | | +-----------------------------|-+ |
+# | | + $h2 BR2 (802.1d) | | | | BR3 (802.1d) $h3 + | |
+# | | | | | | | |
+# | | + v1$h2 (veth) | | | | v1$h3 (veth) + | |
+# | +-|----------------------------+ | | +-----------------------------|-+ |
+# | | | | | |
+# +---|--------------------------------+ +--------------------------------|---+
+# | |
+# +---|--------------------------------+ +--------------------------------|---+
+# | + v2$h2 (veth) NS2 (netns) | | NS3 (netns) v2$h3 (veth) + |
+# | 192.0.2.34/28 | | 192.0.2.66/28 |
+# | 2001:db8:2::2/64 | | 2001:db8:3::2/64 |
+# | | | |
+# | +--------------------------------+ | | +--------------------------------+ |
+# | | BR1 (802.1q) | | | | BR1 (802.1q) | |
+# | | + vx10 (vxlan) | | | | + vx10 (vxlan) | |
+# | | local 192.0.2.34 | | | | local 192.0.2.50 | |
+# | | group 233.252.0.1 dev v2$h2 | | | | group 233.252.0.1 dev v2$h3 | |
+# | | id 1000 dstport $VXPORT | | | | id 1000 dstport $VXPORT | |
+# | | vid 10 pvid untagged | | | | vid 10 pvid untagged | |
+# | | | | | | | |
+# | | + vx20 (vxlan) | | | | + vx20 (vxlan) | |
+# | | local 2001:db8:2::2 | | | | local 2001:db8:3::2 | |
+# | | group ff0e::1:2:3 dev v2$h2 | | | | group ff0e::1:2:3 dev v2$h3 | |
+# | | id 2000 dstport $VXPORT | | | | id 2000 dstport $VXPORT | |
+# | | vid 20 pvid untagged | | | | vid 20 pvid untagged | |
+# | | | | | | | |
+# | | + w1 (veth) | | | | + w1 (veth) | |
+# | | | vid 10 20 | | | | | vid 10 20 | |
+# | +--|-----------------------------+ | | +--|-----------------------------+ |
+# | | | | | |
+# | +--|-----------------------------+ | | +--|-----------------------------+ |
+# | | + w2 (veth) VW2 (vrf) | | | | + w2 (veth) VW2 (vrf) | |
+# | | |\ | | | | |\ | |
+# | | | + w2.10 | | | | | + w2.10 | |
+# | | | 192.0.2.3/28 | | | | | 192.0.2.4/28 | |
+# | | | | | | | | | |
+# | | + w2.20 | | | | + w2.20 | |
+# | | 2001:db8:1::3/64 | | | | 2001:db8:1::4/64 | |
+# | +--------------------------------+ | | +--------------------------------+ |
+# +------------------------------------+ +------------------------------------+
+#
+#shellcheck disable=SC2317 # SC doesn't see our uses of functions.
+
+: "${VXPORT:=4789}"
+export VXPORT
+
+: "${GROUP4:=233.252.0.1}"
+export GROUP4
+
+: "${GROUP6:=ff0e::1:2:3}"
+export GROUP6
+
+: "${IPMR:=lo10}"
+
+ALL_TESTS="
+ ipv4_nomcroute
+ ipv4_mcroute
+ ipv4_mcroute_changelink
+ ipv4_mcroute_starg
+ ipv4_mcroute_noroute
+ ipv4_mcroute_fdb
+ ipv4_mcroute_fdb_oif0
+ ipv4_mcroute_fdb_oif0_sep
+
+ ipv6_nomcroute
+ ipv6_mcroute
+ ipv6_mcroute_changelink
+ ipv6_mcroute_starg
+ ipv6_mcroute_noroute
+ ipv6_mcroute_fdb
+ ipv6_mcroute_fdb_oif0
+
+ ipv4_nomcroute_rx
+ ipv4_mcroute_rx
+ ipv4_mcroute_starg_rx
+ ipv4_mcroute_fdb_oif0_sep_rx
+ ipv4_mcroute_fdb_sep_rx
+
+ ipv6_nomcroute_rx
+ ipv6_mcroute_rx
+ ipv6_mcroute_starg_rx
+ ipv6_mcroute_fdb_sep_rx
+"
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ adf_simple_if_init "$h1"
+
+ adf_ip_link_add "$h1.10" master "v$h1" link "$h1" type vlan id 10
+ adf_ip_link_set_up "$h1.10"
+ adf_ip_addr_add "$h1.10" 192.0.2.1/28
+
+ adf_ip_link_add "$h1.20" master "v$h1" link "$h1" type vlan id 20
+ adf_ip_link_set_up "$h1.20"
+ adf_ip_addr_add "$h1.20" 2001:db8:1::1/64
+}
+
+install_capture()
+{
+ local dev=$1; shift
+
+ tc qdisc add dev "$dev" clsact
+ defer tc qdisc del dev "$dev" clsact
+
+ tc filter add dev "$dev" ingress proto ip pref 104 \
+ flower skip_hw ip_proto udp dst_port "$VXPORT" \
+ action pass
+ defer tc filter del dev "$dev" ingress proto ip pref 104
+
+ tc filter add dev "$dev" ingress proto ipv6 pref 106 \
+ flower skip_hw ip_proto udp dst_port "$VXPORT" \
+ action pass
+ defer tc filter del dev "$dev" ingress proto ipv6 pref 106
+}
+
+h2_create()
+{
+ # $h2
+ adf_ip_link_set_up "$h2"
+
+ # H2
+ vrf_create "v$h2"
+ defer vrf_destroy "v$h2"
+
+ adf_ip_link_set_up "v$h2"
+
+ # br2
+ adf_ip_link_add br2 type bridge vlan_filtering 0 mcast_snooping 0
+ adf_ip_link_set_master br2 "v$h2"
+ adf_ip_link_set_up br2
+
+ # $h2
+ adf_ip_link_set_master "$h2" br2
+ install_capture "$h2"
+
+ # v1$h2
+ adf_ip_link_set_up "v1$h2"
+ adf_ip_link_set_master "v1$h2" br2
+}
+
+h3_create()
+{
+ # $h3
+ adf_ip_link_set_up "$h3"
+
+ # H3
+ vrf_create "v$h3"
+ defer vrf_destroy "v$h3"
+
+ adf_ip_link_set_up "v$h3"
+
+ # br3
+ adf_ip_link_add br3 type bridge vlan_filtering 0 mcast_snooping 0
+ adf_ip_link_set_master br3 "v$h3"
+ adf_ip_link_set_up br3
+
+ # $h3
+ adf_ip_link_set_master "$h3" br3
+ install_capture "$h3"
+
+ # v1$h3
+ adf_ip_link_set_up "v1$h3"
+ adf_ip_link_set_master "v1$h3" br3
+}
+
+switch_create()
+{
+ local swp1_mac
+
+ # br1
+ swp1_mac=$(mac_get "$swp1")
+ adf_ip_link_add br1 type bridge vlan_filtering 1 \
+ vlan_default_pvid 0 mcast_snooping 0
+ adf_ip_link_set_addr br1 "$swp1_mac"
+ adf_ip_link_set_up br1
+
+ # A dummy to force the IPv6 OIF=0 test to install a suitable MC route on
+ # $IPMR to be deterministic. Also used for the IPv6 RX!=TX ping test.
+ adf_ip_link_add "X$IPMR" up type dummy
+
+ # IPMR
+ adf_ip_link_add "$IPMR" up type dummy
+ adf_ip_addr_add "$IPMR" 192.0.2.100/28
+ adf_ip_addr_add "$IPMR" 2001:db8:4::1/64
+
+ # $swp1
+ adf_ip_link_set_up "$swp1"
+ adf_ip_link_set_master "$swp1" br1
+ adf_bridge_vlan_add vid 10 dev "$swp1"
+ adf_bridge_vlan_add vid 20 dev "$swp1"
+
+ # $swp2
+ adf_ip_link_set_up "$swp2"
+ adf_ip_addr_add "$swp2" 192.0.2.33/28
+ adf_ip_addr_add "$swp2" 2001:db8:2::1/64
+
+ # $swp3
+ adf_ip_link_set_up "$swp3"
+ adf_ip_addr_add "$swp3" 192.0.2.65/28
+ adf_ip_addr_add "$swp3" 2001:db8:3::1/64
+}
+
+vx_create()
+{
+ local name=$1; shift
+ local vid=$1; shift
+
+ adf_ip_link_add "$name" up type vxlan dstport "$VXPORT" \
+ nolearning noudpcsum tos inherit ttl 16 \
+ "$@"
+ adf_ip_link_set_master "$name" br1
+ adf_bridge_vlan_add vid "$vid" dev "$name" pvid untagged
+}
+export -f vx_create
+
+vx_wait()
+{
+ # Wait for all the ARP, IGMP etc. noise to settle down so that the
+ # tunnel is clear for measurements.
+ sleep 10
+}
+
+vx10_create()
+{
+ vx_create vx10 10 id 1000 "$@"
+}
+export -f vx10_create
+
+vx20_create()
+{
+ vx_create vx20 20 id 2000 "$@"
+}
+export -f vx20_create
+
+vx10_create_wait()
+{
+ vx10_create "$@"
+ vx_wait
+}
+
+vx20_create_wait()
+{
+ vx20_create "$@"
+ vx_wait
+}
+
+ns_init_common()
+{
+ local ns=$1; shift
+ local if_in=$1; shift
+ local ipv4_in=$1; shift
+ local ipv6_in=$1; shift
+ local ipv4_host=$1; shift
+ local ipv6_host=$1; shift
+
+ # v2$h2 / v2$h3
+ adf_ip_link_set_up "$if_in"
+ adf_ip_addr_add "$if_in" "$ipv4_in"
+ adf_ip_addr_add "$if_in" "$ipv6_in"
+
+ # br1
+ adf_ip_link_add br1 type bridge vlan_filtering 1 \
+ vlan_default_pvid 0 mcast_snooping 0
+ adf_ip_link_set_up br1
+
+ # vx10, vx20
+ vx10_create local "${ipv4_in%/*}" group "$GROUP4" dev "$if_in"
+ vx20_create local "${ipv6_in%/*}" group "$GROUP6" dev "$if_in"
+
+ # w1
+ adf_ip_link_add w1 type veth peer name w2
+ adf_ip_link_set_master w1 br1
+ adf_ip_link_set_up w1
+ adf_bridge_vlan_add vid 10 dev w1
+ adf_bridge_vlan_add vid 20 dev w1
+
+ # w2
+ adf_simple_if_init w2
+
+ # w2.10
+ adf_ip_link_add w2.10 master vw2 link w2 type vlan id 10
+ adf_ip_link_set_up w2.10
+ adf_ip_addr_add w2.10 "$ipv4_host"
+
+ # w2.20
+ adf_ip_link_add w2.20 master vw2 link w2 type vlan id 20
+ adf_ip_link_set_up w2.20
+ adf_ip_addr_add w2.20 "$ipv6_host"
+}
+export -f ns_init_common
+
+ns2_create()
+{
+ # NS2
+ ip netns add ns2
+ defer ip netns del ns2
+
+ # v2$h2
+ ip link set dev "v2$h2" netns ns2
+ defer ip -n ns2 link set dev "v2$h2" netns 1
+
+ in_ns ns2 \
+ ns_init_common ns2 "v2$h2" \
+ 192.0.2.34/28 2001:db8:2::2/64 \
+ 192.0.2.3/28 2001:db8:1::3/64
+}
+
+ns3_create()
+{
+ # NS3
+ ip netns add ns3
+ defer ip netns del ns3
+
+ # v2$h3
+ ip link set dev "v2$h3" netns ns3
+ defer ip -n ns3 link set dev "v2$h3" netns 1
+
+ ip -n ns3 link set dev "v2$h3" up
+
+ in_ns ns3 \
+ ns_init_common ns3 "v2$h3" \
+ 192.0.2.66/28 2001:db8:3::2/64 \
+ 192.0.2.4/28 2001:db8:1::4/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ adf_vrf_prepare
+ adf_forwarding_enable
+
+ adf_ip_link_add "v1$h2" type veth peer name "v2$h2"
+ adf_ip_link_add "v1$h3" type veth peer name "v2$h3"
+
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+ ns2_create
+ ns3_create
+}
+
+adf_install_broken_sg()
+{
+ adf_mcd_start "$IPMR" || exit "$EXIT_STATUS"
+
+ mc_cli add "$swp2" 192.0.2.100 "$GROUP4" "$swp1" "$swp3"
+ defer mc_cli remove "$swp2" 192.0.2.100 "$GROUP4" "$swp1" "$swp3"
+
+ mc_cli add "$swp2" 2001:db8:4::1 "$GROUP6" "$swp1" "$swp3"
+ defer mc_cli remove "$swp2" 2001:db8:4::1 "$GROUP6" "$swp1" "$swp3"
+}
+
+adf_install_rx()
+{
+ mc_cli add "$swp2" 0.0.0.0 "$GROUP4" "$IPMR"
+ defer mc_cli remove "$swp2" 0.0.0.0 "$GROUP4" lo10
+
+ mc_cli add "$swp3" 0.0.0.0 "$GROUP4" "$IPMR"
+ defer mc_cli remove "$swp3" 0.0.0.0 "$GROUP4" lo10
+
+ mc_cli add "$swp2" :: "$GROUP6" "$IPMR"
+ defer mc_cli remove "$swp2" :: "$GROUP6" lo10
+
+ mc_cli add "$swp3" :: "$GROUP6" "$IPMR"
+ defer mc_cli remove "$swp3" :: "$GROUP6" lo10
+}
+
+adf_install_sg()
+{
+ adf_mcd_start "$IPMR" || exit "$EXIT_STATUS"
+
+ mc_cli add "$IPMR" 192.0.2.100 "$GROUP4" "$swp2" "$swp3"
+ defer mc_cli remove "$IPMR" 192.0.2.33 "$GROUP4" "$swp2" "$swp3"
+
+ mc_cli add "$IPMR" 2001:db8:4::1 "$GROUP6" "$swp2" "$swp3"
+ defer mc_cli remove "$IPMR" 2001:db8:4::1 "$GROUP6" "$swp2" "$swp3"
+
+ adf_install_rx
+}
+
+adf_install_sg_sep()
+{
+ adf_mcd_start lo || exit "$EXIT_STATUS"
+
+ mc_cli add lo 192.0.2.120 "$GROUP4" "$swp2" "$swp3"
+ defer mc_cli remove lo 192.0.2.120 "$GROUP4" "$swp2" "$swp3"
+
+ mc_cli add lo 2001:db8:5::1 "$GROUP6" "$swp2" "$swp3"
+ defer mc_cli remove lo 2001:db8:5::1 "$GROUP6" "$swp2" "$swp3"
+}
+
+adf_install_sg_sep_rx()
+{
+ local lo=$1; shift
+
+ adf_mcd_start "$IPMR" "$lo" || exit "$EXIT_STATUS"
+
+ mc_cli add "$lo" 192.0.2.120 "$GROUP4" "$swp2" "$swp3"
+ defer mc_cli remove "$lo" 192.0.2.120 "$GROUP4" "$swp2" "$swp3"
+
+ mc_cli add "$lo" 2001:db8:5::1 "$GROUP6" "$swp2" "$swp3"
+ defer mc_cli remove "$lo" 2001:db8:5::1 "$GROUP6" "$swp2" "$swp3"
+
+ adf_install_rx
+}
+
+adf_install_starg()
+{
+ adf_mcd_start "$IPMR" || exit "$EXIT_STATUS"
+
+ mc_cli add "$IPMR" 0.0.0.0 "$GROUP4" "$swp2" "$swp3"
+ defer mc_cli remove "$IPMR" 0.0.0.0 "$GROUP4" "$swp2" "$swp3"
+
+ mc_cli add "$IPMR" :: "$GROUP6" "$swp2" "$swp3"
+ defer mc_cli remove "$IPMR" :: "$GROUP6" "$swp2" "$swp3"
+
+ adf_install_rx
+}
+
+do_packets_v4()
+{
+ local mac
+
+ mac=$(mac_get "$h2")
+ "$MZ" "$h1" -Q 10 -c 10 -d 100msec -p 64 -a own -b "$mac" \
+ -A 192.0.2.1 -B 192.0.2.2 -t udp sp=1234,dp=2345 -q
+}
+
+do_packets_v6()
+{
+ local mac
+
+ mac=$(mac_get "$h2")
+ "$MZ" -6 "$h1" -Q 20 -c 10 -d 100msec -p 64 -a own -b "$mac" \
+ -A 2001:db8:1::1 -B 2001:db8:1::2 -t udp sp=1234,dp=2345 -q
+}
+
+do_test()
+{
+ local ipv=$1; shift
+ local expect_h2=$1; shift
+ local expect_h3=$1; shift
+ local what=$1; shift
+
+ local pref=$((100 + ipv))
+ local t0_h2
+ local t0_h3
+ local t1_h2
+ local t1_h3
+ local d_h2
+ local d_h3
+
+ RET=0
+
+ t0_h2=$(tc_rule_stats_get "$h2" "$pref" ingress)
+ t0_h3=$(tc_rule_stats_get "$h3" "$pref" ingress)
+
+ "do_packets_v$ipv"
+ sleep 1
+
+ t1_h2=$(tc_rule_stats_get "$h2" "$pref" ingress)
+ t1_h3=$(tc_rule_stats_get "$h3" "$pref" ingress)
+
+ d_h2=$((t1_h2 - t0_h2))
+ d_h3=$((t1_h3 - t0_h3))
+
+ ((d_h2 == expect_h2))
+ check_err $? "Expected $expect_h2 packets on H2, got $d_h2"
+
+ ((d_h3 == expect_h3))
+ check_err $? "Expected $expect_h3 packets on H3, got $d_h3"
+
+ log_test "VXLAN MC flood $what"
+}
+
+ipv4_do_test_rx()
+{
+ local h3_should_fail=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ ping_do "$h1.10" 192.0.2.3
+ check_err $? "H2 should respond"
+
+ ping_do "$h1.10" 192.0.2.4
+ check_err_fail "$h3_should_fail" $? "H3 responds"
+
+ log_test "VXLAN MC flood $what"
+}
+
+ipv6_do_test_rx()
+{
+ local h3_should_fail=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ ping6_do "$h1.20" 2001:db8:1::3
+ check_err $? "H2 should respond"
+
+ ping6_do "$h1.20" 2001:db8:1::4
+ check_err_fail "$h3_should_fail" $? "H3 responds"
+
+ log_test "VXLAN MC flood $what"
+}
+
+ipv4_nomcroute()
+{
+ # Install a misleading (S,G) rule to attempt to trick the system into
+ # pushing the packets elsewhere.
+ adf_install_broken_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$swp2"
+ do_test 4 10 0 "IPv4 nomcroute"
+}
+
+ipv6_nomcroute()
+{
+ # Like for IPv4, install a misleading (S,G).
+ adf_install_broken_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$swp2"
+ do_test 6 10 0 "IPv6 nomcroute"
+}
+
+ipv4_nomcroute_rx()
+{
+ vx10_create local 192.0.2.100 group "$GROUP4" dev "$swp2"
+ ipv4_do_test_rx 1 "IPv4 nomcroute ping"
+}
+
+ipv6_nomcroute_rx()
+{
+ vx20_create local 2001:db8:4::1 group "$GROUP6" dev "$swp2"
+ ipv6_do_test_rx 1 "IPv6 nomcroute ping"
+}
+
+ipv4_mcroute()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ do_test 4 10 10 "IPv4 mcroute"
+}
+
+ipv6_mcroute()
+{
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ do_test 6 10 10 "IPv6 mcroute"
+}
+
+ipv4_mcroute_rx()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ ipv4_do_test_rx 0 "IPv4 mcroute ping"
+}
+
+ipv6_mcroute_rx()
+{
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ ipv6_do_test_rx 0 "IPv6 mcroute ping"
+}
+
+ipv4_mcroute_changelink()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR"
+ ip link set dev vx10 type vxlan mcroute
+ sleep 1
+ do_test 4 10 10 "IPv4 mcroute changelink"
+}
+
+ipv6_mcroute_changelink()
+{
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ ip link set dev vx20 type vxlan mcroute
+ sleep 1
+ do_test 6 10 10 "IPv6 mcroute changelink"
+}
+
+ipv4_mcroute_starg()
+{
+ adf_install_starg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ do_test 4 10 10 "IPv4 mcroute (*,G)"
+}
+
+ipv6_mcroute_starg()
+{
+ adf_install_starg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ do_test 6 10 10 "IPv6 mcroute (*,G)"
+}
+
+ipv4_mcroute_starg_rx()
+{
+ adf_install_starg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ ipv4_do_test_rx 0 "IPv4 mcroute (*,G) ping"
+}
+
+ipv6_mcroute_starg_rx()
+{
+ adf_install_starg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ ipv6_do_test_rx 0 "IPv6 mcroute (*,G) ping"
+}
+
+ipv4_mcroute_noroute()
+{
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ do_test 4 0 0 "IPv4 mcroute, no route"
+}
+
+ipv6_mcroute_noroute()
+{
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ do_test 6 0 0 "IPv6 mcroute, no route"
+}
+
+ipv4_mcroute_fdb()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 dev "$IPMR" mcroute
+ bridge fdb add dev vx10 \
+ 00:00:00:00:00:00 self static dst "$GROUP4" via "$IPMR"
+ do_test 4 10 10 "IPv4 mcroute FDB"
+}
+
+ipv6_mcroute_fdb()
+{
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 dev "$IPMR" mcroute
+ bridge -6 fdb add dev vx20 \
+ 00:00:00:00:00:00 self static dst "$GROUP6" via "$IPMR"
+ do_test 6 10 10 "IPv6 mcroute FDB"
+}
+
+# Use FDB to configure VXLAN in a way where oif=0 for purposes of FIB lookup.
+ipv4_mcroute_fdb_oif0()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ bridge fdb del dev vx10 00:00:00:00:00:00
+ bridge fdb add dev vx10 00:00:00:00:00:00 self static dst "$GROUP4"
+ do_test 4 10 10 "IPv4 mcroute oif=0"
+}
+
+ipv6_mcroute_fdb_oif0()
+{
+ # The IPv6 tunnel lookup does not fall back to selection by source
+ # address. Instead it just does a FIB match, and that would find one of
+ # the several ff00::/8 multicast routes -- each device has one. In order
+ # to reliably force the $IPMR device, add a /128 route for the
+ # destination group address.
+ ip -6 route add table local multicast "$GROUP6/128" dev "$IPMR"
+ defer ip -6 route del table local multicast "$GROUP6/128" dev "$IPMR"
+
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ bridge -6 fdb del dev vx20 00:00:00:00:00:00
+ bridge -6 fdb add dev vx20 00:00:00:00:00:00 self static dst "$GROUP6"
+ do_test 6 10 10 "IPv6 mcroute oif=0"
+}
+
+# In oif=0 test as above, have FIB lookup resolve to loopback instead of IPMR.
+# This doesn't work with IPv6 -- a MC route on lo would be marked as RTF_REJECT.
+ipv4_mcroute_fdb_oif0_sep()
+{
+ adf_install_sg_sep
+
+ adf_ip_addr_add lo 192.0.2.120/28
+ vx10_create_wait local 192.0.2.120 group "$GROUP4" dev "$IPMR" mcroute
+ bridge fdb del dev vx10 00:00:00:00:00:00
+ bridge fdb add dev vx10 00:00:00:00:00:00 self static dst "$GROUP4"
+ do_test 4 10 10 "IPv4 mcroute TX!=RX oif=0"
+}
+
+ipv4_mcroute_fdb_oif0_sep_rx()
+{
+ adf_install_sg_sep_rx lo
+
+ adf_ip_addr_add lo 192.0.2.120/28
+ vx10_create_wait local 192.0.2.120 group "$GROUP4" dev "$IPMR" mcroute
+ bridge fdb del dev vx10 00:00:00:00:00:00
+ bridge fdb add dev vx10 00:00:00:00:00:00 self static dst "$GROUP4"
+ ipv4_do_test_rx 0 "IPv4 mcroute TX!=RX oif=0 ping"
+}
+
+ipv4_mcroute_fdb_sep_rx()
+{
+ adf_install_sg_sep_rx lo
+
+ adf_ip_addr_add lo 192.0.2.120/28
+ vx10_create_wait local 192.0.2.120 group "$GROUP4" dev "$IPMR" mcroute
+ bridge fdb del dev vx10 00:00:00:00:00:00
+ bridge fdb add \
+ dev vx10 00:00:00:00:00:00 self static dst "$GROUP4" via lo
+ ipv4_do_test_rx 0 "IPv4 mcroute TX!=RX ping"
+}
+
+ipv6_mcroute_fdb_sep_rx()
+{
+ adf_install_sg_sep_rx "X$IPMR"
+
+ adf_ip_addr_add "X$IPMR" 2001:db8:5::1/64
+ vx20_create_wait local 2001:db8:5::1 group "$GROUP6" dev "$IPMR" mcroute
+ bridge -6 fdb del dev vx20 00:00:00:00:00:00
+ bridge -6 fdb add dev vx20 00:00:00:00:00:00 \
+ self static dst "$GROUP6" via "X$IPMR"
+ ipv6_do_test_rx 0 "IPv6 mcroute TX!=RX ping"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/net/forwarding/vxlan_reserved.sh b/tools/testing/selftests/net/forwarding/vxlan_reserved.sh
index 46c31794b91b..709845123727 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_reserved.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_reserved.sh
@@ -47,8 +47,7 @@ source lib.sh
h1_create()
{
- simple_if_init $h1 192.0.2.1/28
- defer simple_if_fini $h1 192.0.2.1/28
+ adf_simple_if_init $h1 192.0.2.1/28
tc qdisc add dev $h1 clsact
defer tc qdisc del dev $h1 clsact
@@ -60,24 +59,23 @@ h1_create()
switch_create()
{
- ip_link_add br1 type bridge vlan_filtering 0 mcast_snooping 0
+ adf_ip_link_add br1 type bridge vlan_filtering 0 mcast_snooping 0
# Make sure the bridge uses the MAC address of the local port and not
# that of the VxLAN's device.
- ip_link_set_addr br1 $(mac_get $swp1)
- ip_link_set_up br1
+ adf_ip_link_set_addr br1 $(mac_get $swp1)
+ adf_ip_link_set_up br1
- ip_link_set_up $rp1
- ip_addr_add $rp1 192.0.2.17/28
- ip_route_add 192.0.2.32/28 nexthop via 192.0.2.18
+ adf_ip_link_set_up $rp1
+ adf_ip_addr_add $rp1 192.0.2.17/28
+ adf_ip_route_add 192.0.2.32/28 nexthop via 192.0.2.18
- ip_link_set_master $swp1 br1
- ip_link_set_up $swp1
+ adf_ip_link_set_master $swp1 br1
+ adf_ip_link_set_up $swp1
}
vrp2_create()
{
- simple_if_init $rp2 192.0.2.18/28
- defer simple_if_fini $rp2 192.0.2.18/28
+ adf_simple_if_init $rp2 192.0.2.18/28
}
setup_prepare()
@@ -88,11 +86,8 @@ setup_prepare()
rp1=${NETIFS[p3]}
rp2=${NETIFS[p4]}
- vrf_prepare
- defer vrf_cleanup
-
- forwarding_enable
- defer forwarding_restore
+ adf_vrf_prepare
+ adf_forwarding_enable
h1_create
switch_create
@@ -200,10 +195,10 @@ vxlan_ping_do()
vxlan_device_add()
{
- ip_link_add vx1 up type vxlan id 1000 \
+ adf_ip_link_add vx1 up type vxlan id 1000 \
local 192.0.2.17 dstport "$VXPORT" \
nolearning noudpcsum tos inherit ttl 100 "$@"
- ip_link_set_master vx1 br1
+ adf_ip_link_set_master vx1 br1
}
vxlan_all_reserved_bits()
diff --git a/tools/testing/selftests/net/gre_ipv6_lladdr.sh b/tools/testing/selftests/net/gre_ipv6_lladdr.sh
index 5b34f6e1f831..48eb999a3120 100755
--- a/tools/testing/selftests/net/gre_ipv6_lladdr.sh
+++ b/tools/testing/selftests/net/gre_ipv6_lladdr.sh
@@ -24,7 +24,10 @@ setup_basenet()
ip -netns "${NS0}" address add dev lo 2001:db8::10/64 nodad
}
-# Check if network device has an IPv6 link-local address assigned.
+# Check the IPv6 configuration of a network device.
+#
+# We currently check the generation of the link-local IPv6 address and the
+# creation of the ff00::/8 multicast route.
#
# Parameters:
#
@@ -35,7 +38,7 @@ setup_basenet()
# a link-local address)
# * $4: The user visible name for the scenario being tested
#
-check_ipv6_ll_addr()
+check_ipv6_device_config()
{
local DEV="$1"
local EXTRA_MATCH="$2"
@@ -45,7 +48,11 @@ check_ipv6_ll_addr()
RET=0
set +e
ip -netns "${NS0}" -6 address show dev "${DEV}" scope link | grep "fe80::" | grep -q "${EXTRA_MATCH}"
- check_err_fail "${XRET}" $? ""
+ check_err_fail "${XRET}" $? "IPv6 link-local address generation"
+
+ ip -netns "${NS0}" -6 route show table local type multicast ff00::/8 proto kernel | grep -q "${DEV}"
+ check_err_fail 0 $? "IPv6 multicast route creation"
+
log_test "${MSG}"
set -e
}
@@ -102,20 +109,20 @@ test_gre_device()
;;
esac
- # Check that IPv6 link-local address is generated when device goes up
+ # Check the IPv6 device configuration when it goes up
ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode="${ADDR_GEN_MODE}"
ip -netns "${NS0}" link set dev gretest up
- check_ipv6_ll_addr gretest "${MATCH_REGEXP}" "${XRET}" "config: ${MSG}"
+ check_ipv6_device_config gretest "${MATCH_REGEXP}" "${XRET}" "config: ${MSG}"
# Now disable link-local address generation
ip -netns "${NS0}" link set dev gretest down
ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode=1
ip -netns "${NS0}" link set dev gretest up
- # Check that link-local address generation works when re-enabled while
- # the device is already up
+ # Check the IPv6 device configuration when link-local address
+ # generation is re-enabled while the device is already up
ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode="${ADDR_GEN_MODE}"
- check_ipv6_ll_addr gretest "${MATCH_REGEXP}" "${XRET}" "update: ${MSG}"
+ check_ipv6_device_config gretest "${MATCH_REGEXP}" "${XRET}" "update: ${MSG}"
ip -netns "${NS0}" link del dev gretest
}
@@ -126,7 +133,7 @@ test_gre4()
local MODE
for GRE_TYPE in "gre" "gretap"; do
- printf "\n####\nTesting IPv6 link-local address generation on ${GRE_TYPE} devices\n####\n\n"
+ printf "\n####\nTesting IPv6 configuration of ${GRE_TYPE} devices\n####\n\n"
for MODE in "eui64" "none" "stable-privacy" "random"; do
test_gre_device "${GRE_TYPE}" 192.0.2.10 192.0.2.11 "${MODE}"
@@ -142,7 +149,7 @@ test_gre6()
local MODE
for GRE_TYPE in "ip6gre" "ip6gretap"; do
- printf "\n####\nTesting IPv6 link-local address generation on ${GRE_TYPE} devices\n####\n\n"
+ printf "\n####\nTesting IPv6 configuration of ${GRE_TYPE} devices\n####\n\n"
for MODE in "eui64" "none" "stable-privacy" "random"; do
test_gre_device "${GRE_TYPE}" 2001:db8::10 2001:db8::11 "${MODE}"
diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh
deleted file mode 100755
index 9e3f186bc2a1..000000000000
--- a/tools/testing/selftests/net/gro.sh
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-readonly SERVER_MAC="aa:00:00:00:00:02"
-readonly CLIENT_MAC="aa:00:00:00:00:01"
-readonly TESTS=("data" "ack" "flags" "tcp" "ip" "large")
-readonly PROTOS=("ipv4" "ipv6")
-dev=""
-test="all"
-proto="ipv4"
-
-run_test() {
- local server_pid=0
- local exit_code=0
- local protocol=$1
- local test=$2
- local ARGS=( "--${protocol}" "--dmac" "${SERVER_MAC}" \
- "--smac" "${CLIENT_MAC}" "--test" "${test}" "--verbose" )
-
- setup_ns
- # Each test is run 6 times to deflake, because given the receive timing,
- # not all packets that should coalesce will be considered in the same flow
- # on every try.
- for tries in {1..6}; do
- # Actual test starts here
- ip netns exec $server_ns ./gro "${ARGS[@]}" "--rx" "--iface" "server" \
- 1>>log.txt &
- server_pid=$!
- sleep 0.5 # to allow for socket init
- ip netns exec $client_ns ./gro "${ARGS[@]}" "--iface" "client" \
- 1>>log.txt
- wait "${server_pid}"
- exit_code=$?
- if [[ ${test} == "large" && -n "${KSFT_MACHINE_SLOW}" && \
- ${exit_code} -ne 0 ]]; then
- echo "Ignoring errors due to slow environment" 1>&2
- exit_code=0
- fi
- if [[ "${exit_code}" -eq 0 ]]; then
- break;
- fi
- done
- cleanup_ns
- echo ${exit_code}
-}
-
-run_all_tests() {
- local failed_tests=()
- for proto in "${PROTOS[@]}"; do
- for test in "${TESTS[@]}"; do
- echo "running test ${proto} ${test}" >&2
- exit_code=$(run_test $proto $test)
- if [[ "${exit_code}" -ne 0 ]]; then
- failed_tests+=("${proto}_${test}")
- fi;
- done;
- done
- if [[ ${#failed_tests[@]} -ne 0 ]]; then
- echo "failed tests: ${failed_tests[*]}. \
- Please see log.txt for more logs"
- exit 1
- else
- echo "All Tests Succeeded!"
- fi;
-}
-
-usage() {
- echo "Usage: $0 \
- [-i <DEV>] \
- [-t data|ack|flags|tcp|ip|large] \
- [-p <ipv4|ipv6>]" 1>&2;
- exit 1;
-}
-
-while getopts "i:t:p:" opt; do
- case "${opt}" in
- i)
- dev="${OPTARG}"
- ;;
- t)
- test="${OPTARG}"
- ;;
- p)
- proto="${OPTARG}"
- ;;
- *)
- usage
- ;;
- esac
-done
-
-if [ -n "$dev" ]; then
- source setup_loopback.sh
-else
- source setup_veth.sh
-fi
-
-setup
-trap cleanup EXIT
-if [[ "${test}" == "all" ]]; then
- run_all_tests
-else
- exit_code=$(run_test "${proto}" "${test}")
- exit $exit_code
-fi;
diff --git a/tools/testing/selftests/net/hsr/Makefile b/tools/testing/selftests/net/hsr/Makefile
index 884cd2cc0681..4b6afc0fe9f8 100644
--- a/tools/testing/selftests/net/hsr/Makefile
+++ b/tools/testing/selftests/net/hsr/Makefile
@@ -2,7 +2,11 @@
top_srcdir = ../../../../..
-TEST_PROGS := hsr_ping.sh hsr_redbox.sh
+TEST_PROGS := \
+ hsr_ping.sh \
+ hsr_redbox.sh \
+# end of TEST_PROGS
+
TEST_FILES += hsr_common.sh
include ../../lib.mk
diff --git a/tools/testing/selftests/net/hsr/config b/tools/testing/selftests/net/hsr/config
index 555a868743f0..205cc4d3d64b 100644
--- a/tools/testing/selftests/net/hsr/config
+++ b/tools/testing/selftests/net/hsr/config
@@ -1,6 +1,6 @@
+CONFIG_BRIDGE=y
+CONFIG_HSR=y
CONFIG_IPV6=y
CONFIG_NET_SCH_NETEM=m
-CONFIG_HSR=y
CONFIG_VETH=y
-CONFIG_BRIDGE=y
CONFIG_VLAN_8021Q=m
diff --git a/tools/testing/selftests/net/io_uring_zerocopy_tx.c b/tools/testing/selftests/net/io_uring_zerocopy_tx.c
index 76e604e4810e..7bfeeb133705 100644
--- a/tools/testing/selftests/net/io_uring_zerocopy_tx.c
+++ b/tools/testing/selftests/net/io_uring_zerocopy_tx.c
@@ -106,14 +106,14 @@ static void do_tx(int domain, int type, int protocol)
ret = io_uring_queue_init(512, &ring, 0);
if (ret)
- error(1, ret, "io_uring: queue init");
+ error(1, -ret, "io_uring: queue init");
iov.iov_base = payload;
iov.iov_len = cfg_payload_len;
ret = io_uring_register_buffers(&ring, &iov, 1);
if (ret)
- error(1, ret, "io_uring: buffer registration");
+ error(1, -ret, "io_uring: buffer registration");
tstop = gettimeofday_ms() + cfg_runtime_ms;
do {
@@ -149,24 +149,24 @@ static void do_tx(int domain, int type, int protocol)
ret = io_uring_submit(&ring);
if (ret != cfg_nr_reqs)
- error(1, ret, "submit");
+ error(1, -ret, "submit");
if (cfg_cork)
do_setsockopt(fd, IPPROTO_UDP, UDP_CORK, 0);
for (i = 0; i < cfg_nr_reqs; i++) {
ret = io_uring_wait_cqe(&ring, &cqe);
if (ret)
- error(1, ret, "wait cqe");
+ error(1, -ret, "wait cqe");
if (cqe->user_data != NONZC_TAG &&
cqe->user_data != ZC_TAG)
- error(1, -EINVAL, "invalid cqe->user_data");
+ error(1, EINVAL, "invalid cqe->user_data");
if (cqe->flags & IORING_CQE_F_NOTIF) {
if (cqe->flags & IORING_CQE_F_MORE)
- error(1, -EINVAL, "invalid notif flags");
+ error(1, EINVAL, "invalid notif flags");
if (compl_cqes <= 0)
- error(1, -EINVAL, "notification mismatch");
+ error(1, EINVAL, "notification mismatch");
compl_cqes--;
i--;
io_uring_cqe_seen(&ring);
@@ -174,14 +174,14 @@ static void do_tx(int domain, int type, int protocol)
}
if (cqe->flags & IORING_CQE_F_MORE) {
if (cqe->user_data != ZC_TAG)
- error(1, cqe->res, "unexpected F_MORE");
+ error(1, -cqe->res, "unexpected F_MORE");
compl_cqes++;
}
if (cqe->res >= 0) {
packets++;
bytes += cqe->res;
} else if (cqe->res != -EAGAIN) {
- error(1, cqe->res, "send failed");
+ error(1, -cqe->res, "send failed");
}
io_uring_cqe_seen(&ring);
}
@@ -190,11 +190,11 @@ static void do_tx(int domain, int type, int protocol)
while (compl_cqes) {
ret = io_uring_wait_cqe(&ring, &cqe);
if (ret)
- error(1, ret, "wait cqe");
+ error(1, -ret, "wait cqe");
if (cqe->flags & IORING_CQE_F_MORE)
- error(1, -EINVAL, "invalid notif flags");
+ error(1, EINVAL, "invalid notif flags");
if (!(cqe->flags & IORING_CQE_F_NOTIF))
- error(1, -EINVAL, "missing notif flag");
+ error(1, EINVAL, "missing notif flag");
io_uring_cqe_seen(&ring);
compl_cqes--;
diff --git a/tools/testing/selftests/net/ip_local_port_range.c b/tools/testing/selftests/net/ip_local_port_range.c
index 29451d2244b7..e6834a6cfc8f 100644
--- a/tools/testing/selftests/net/ip_local_port_range.c
+++ b/tools/testing/selftests/net/ip_local_port_range.c
@@ -10,7 +10,7 @@
#include <fcntl.h>
#include <netinet/ip.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#ifndef IP_LOCAL_PORT_RANGE
#define IP_LOCAL_PORT_RANGE 51
diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
index 9b44a091802c..0ccf484b1d9d 100644
--- a/tools/testing/selftests/net/ipsec.c
+++ b/tools/testing/selftests/net/ipsec.c
@@ -34,7 +34,7 @@
#include <time.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define printk(fmt, ...) \
ksft_print_msg("%d[%u] " fmt "\n", getpid(), __LINE__, ##__VA_ARGS__)
diff --git a/tools/testing/selftests/net/ipv6_force_forwarding.sh b/tools/testing/selftests/net/ipv6_force_forwarding.sh
new file mode 100755
index 000000000000..bf0243366caa
--- /dev/null
+++ b/tools/testing/selftests/net/ipv6_force_forwarding.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test IPv6 force_forwarding interface property
+#
+# This test verifies that the force_forwarding property works correctly:
+# - When global forwarding is disabled, packets are not forwarded normally
+# - When force_forwarding is enabled on an interface, packets are forwarded
+# regardless of the global forwarding setting
+
+source lib.sh
+
+cleanup() {
+ cleanup_ns $ns1 $ns2 $ns3
+}
+
+trap cleanup EXIT
+
+setup_test() {
+ # Create three namespaces: sender, router, receiver
+ setup_ns ns1 ns2 ns3
+
+ # Create veth pairs: ns1 <-> ns2 <-> ns3
+ ip link add name veth12 type veth peer name veth21
+ ip link add name veth23 type veth peer name veth32
+
+ # Move interfaces to namespaces
+ ip link set veth12 netns $ns1
+ ip link set veth21 netns $ns2
+ ip link set veth23 netns $ns2
+ ip link set veth32 netns $ns3
+
+ # Configure interfaces
+ ip -n $ns1 addr add 2001:db8:1::1/64 dev veth12 nodad
+ ip -n $ns2 addr add 2001:db8:1::2/64 dev veth21 nodad
+ ip -n $ns2 addr add 2001:db8:2::1/64 dev veth23 nodad
+ ip -n $ns3 addr add 2001:db8:2::2/64 dev veth32 nodad
+
+ # Bring up interfaces
+ ip -n $ns1 link set veth12 up
+ ip -n $ns2 link set veth21 up
+ ip -n $ns2 link set veth23 up
+ ip -n $ns3 link set veth32 up
+
+ # Add routes
+ ip -n $ns1 route add 2001:db8:2::/64 via 2001:db8:1::2
+ ip -n $ns3 route add 2001:db8:1::/64 via 2001:db8:2::1
+
+ # Disable global forwarding
+ ip netns exec $ns2 sysctl -qw net.ipv6.conf.all.forwarding=0
+}
+
+test_force_forwarding() {
+ local ret=0
+
+ echo "TEST: force_forwarding functionality"
+
+ # Check if force_forwarding sysctl exists
+ if ! ip netns exec $ns2 test -f /proc/sys/net/ipv6/conf/veth21/force_forwarding; then
+ echo "SKIP: force_forwarding not available"
+ return $ksft_skip
+ fi
+
+ # Test 1: Without force_forwarding, ping should fail
+ ip netns exec $ns2 sysctl -qw net.ipv6.conf.veth21.force_forwarding=0
+ ip netns exec $ns2 sysctl -qw net.ipv6.conf.veth23.force_forwarding=0
+
+ if ip netns exec $ns1 ping -6 -c 1 -W 2 2001:db8:2::2 &>/dev/null; then
+ echo "FAIL: ping succeeded when forwarding disabled"
+ ret=1
+ else
+ echo "PASS: forwarding disabled correctly"
+ fi
+
+ # Test 2: With force_forwarding enabled, ping should succeed
+ ip netns exec $ns2 sysctl -qw net.ipv6.conf.veth21.force_forwarding=1
+ ip netns exec $ns2 sysctl -qw net.ipv6.conf.veth23.force_forwarding=1
+
+ if ip netns exec $ns1 ping -6 -c 1 -W 2 2001:db8:2::2 &>/dev/null; then
+ echo "PASS: force_forwarding enabled forwarding"
+ else
+ echo "FAIL: ping failed with force_forwarding enabled"
+ ret=1
+ fi
+
+ return $ret
+}
+
+echo "IPv6 force_forwarding test"
+echo "=========================="
+
+setup_test
+test_force_forwarding
+ret=$?
+
+if [ $ret -eq 0 ]; then
+ echo "OK"
+ exit 0
+elif [ $ret -eq $ksft_skip ]; then
+ echo "SKIP"
+ exit $ksft_skip
+else
+ echo "FAIL"
+ exit 1
+fi
diff --git a/tools/testing/selftests/net/ipv6_fragmentation.c b/tools/testing/selftests/net/ipv6_fragmentation.c
new file mode 100644
index 000000000000..672c9fe086a7
--- /dev/null
+++ b/tools/testing/selftests/net/ipv6_fragmentation.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Brett A C Sheffield <bacs@librecast.net>
+ *
+ * Kernel selftest for the IPv6 fragmentation regression which affected stable
+ * kernels:
+ *
+ * https://lore.kernel.org/stable/aElivdUXqd1OqgMY@karahi.gladserv.com
+ *
+ * Commit: a18dfa9925b9 ("ipv6: save dontfrag in cork") was backported to stable
+ * without some prerequisite commits.
+ *
+ * This caused a regression when sending IPv6 UDP packets by preventing
+ * fragmentation and instead returning -1 (EMSGSIZE).
+ *
+ * This selftest demonstrates the issue by sending an IPv6 UDP packet to
+ * localhost (::1) on the loopback interface from the autoconfigured link-local
+ * address.
+ *
+ * sendmsg(2) returns bytes sent correctly on a working kernel, and returns -1
+ * (EMSGSIZE) when the regression is present.
+ *
+ * The regression was not present in the mainline kernel, but add this test to
+ * catch similar breakage in future.
+ */
+
+#define _GNU_SOURCE
+
+#include <error.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <sched.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include "kselftest.h"
+
+#define MTU 1500
+#define LARGER_THAN_MTU 8192
+
+static void setup(void)
+{
+ struct ifreq ifr = {
+ .ifr_name = "lo"
+ };
+ int ctl;
+
+ /* we need to set MTU, so do this in a namespace to play nicely */
+ if (unshare(CLONE_NEWNET) == -1)
+ error(KSFT_FAIL, errno, "unshare");
+
+ ctl = socket(AF_LOCAL, SOCK_STREAM, 0);
+ if (ctl == -1)
+ error(KSFT_FAIL, errno, "socket");
+
+ /* ensure MTU is smaller than what we plan to send */
+ ifr.ifr_mtu = MTU;
+ if (ioctl(ctl, SIOCSIFMTU, &ifr) == -1)
+ error(KSFT_FAIL, errno, "ioctl: set MTU");
+
+ /* bring up interface */
+ if (ioctl(ctl, SIOCGIFFLAGS, &ifr) == -1)
+ error(KSFT_FAIL, errno, "ioctl SIOCGIFFLAGS");
+ ifr.ifr_flags = ifr.ifr_flags | IFF_UP;
+ if (ioctl(ctl, SIOCSIFFLAGS, &ifr) == -1)
+ error(KSFT_FAIL, errno, "ioctl: bring interface up");
+
+ if (close(ctl) == -1)
+ error(KSFT_FAIL, errno, "close");
+}
+
+int main(void)
+{
+ struct in6_addr addr = {
+ .s6_addr[15] = 0x01, /* ::1 */
+ };
+ struct sockaddr_in6 sa = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = addr,
+ .sin6_port = htons(9) /* port 9/udp (DISCARD) */
+ };
+ static char buf[LARGER_THAN_MTU] = {0};
+ struct iovec iov = { .iov_base = buf, .iov_len = sizeof(buf) };
+ struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_name = (struct sockaddr *)&sa,
+ .msg_namelen = sizeof(sa),
+ };
+ ssize_t rc;
+ int s;
+
+ printf("Testing IPv6 fragmentation\n");
+ setup();
+ s = socket(AF_INET6, SOCK_DGRAM, 0);
+send_again:
+ rc = sendmsg(s, &msg, 0);
+ if (rc == -1) {
+ /* if interface wasn't ready, try again */
+ if (errno == EADDRNOTAVAIL) {
+ usleep(1000);
+ goto send_again;
+ }
+ error(KSFT_FAIL, errno, "sendmsg");
+ } else if (rc != LARGER_THAN_MTU) {
+ error(KSFT_FAIL, errno, "sendmsg returned %zi, expected %i",
+ rc, LARGER_THAN_MTU);
+ }
+ printf("[PASS] sendmsg() returned %zi\n", rc);
+ if (close(s) == -1)
+ error(KSFT_FAIL, errno, "close");
+ return KSFT_PASS;
+}
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index 006fdadcc4b9..f448bafb3f20 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -43,7 +43,7 @@ __ksft_status_merge()
weights[$i]=$((weight++))
done
- if [[ ${weights[$a]} > ${weights[$b]} ]]; then
+ if [[ ${weights[$a]} -ge ${weights[$b]} ]]; then
echo "$a"
return 0
else
@@ -240,6 +240,29 @@ create_netdevsim() {
echo nsim$id
}
+create_netdevsim_port() {
+ local nsim_id="$1"
+ local ns="$2"
+ local port_id="$3"
+ local perm_addr="$4"
+ local orig_dev
+ local new_dev
+ local nsim_path
+
+ nsim_path="/sys/bus/netdevsim/devices/netdevsim$nsim_id"
+
+ echo "$port_id $perm_addr" | ip netns exec "$ns" tee "$nsim_path"/new_port > /dev/null || return 1
+
+ orig_dev=$(ip netns exec "$ns" find "$nsim_path"/net/ -maxdepth 1 -name 'e*' | tail -n 1)
+ orig_dev=$(basename "$orig_dev")
+ new_dev="nsim${nsim_id}p$port_id"
+
+ ip -netns "$ns" link set dev "$orig_dev" name "$new_dev"
+ ip -netns "$ns" link set dev "$new_dev" up
+
+ echo "$new_dev"
+}
+
# Remove netdevsim with given id.
cleanup_netdevsim() {
local id="$1"
@@ -312,7 +335,7 @@ log_test_result()
local test_name=$1; shift
local opt_str=$1; shift
local result=$1; shift
- local retmsg=$1; shift
+ local retmsg=$1
printf "TEST: %-60s [%s]\n" "$test_name $opt_str" "$result"
if [[ $retmsg ]]; then
@@ -520,80 +543,86 @@ require_command()
fi
}
-ip_link_add()
+adf_ip_link_add()
{
local name=$1; shift
- ip link add name "$name" "$@"
- defer ip link del dev "$name"
+ ip link add name "$name" "$@" && \
+ defer ip link del dev "$name"
}
-ip_link_set_master()
+adf_ip_link_set_master()
{
local member=$1; shift
local master=$1; shift
- ip link set dev "$member" master "$master"
- defer ip link set dev "$member" nomaster
+ ip link set dev "$member" master "$master" && \
+ defer ip link set dev "$member" nomaster
}
-ip_link_set_addr()
+adf_ip_link_set_addr()
{
local name=$1; shift
local addr=$1; shift
local old_addr=$(mac_get "$name")
- ip link set dev "$name" address "$addr"
- defer ip link set dev "$name" address "$old_addr"
+ ip link set dev "$name" address "$addr" && \
+ defer ip link set dev "$name" address "$old_addr"
}
-ip_link_is_up()
+ip_link_has_flag()
{
local name=$1; shift
+ local flag=$1; shift
local state=$(ip -j link show "$name" |
- jq -r '(.[].flags[] | select(. == "UP")) // "DOWN"')
- [[ $state == "UP" ]]
+ jq --arg flag "$flag" 'any(.[].flags.[]; . == $flag)')
+ [[ $state == true ]]
}
-ip_link_set_up()
+ip_link_is_up()
+{
+ ip_link_has_flag "$1" UP
+}
+
+adf_ip_link_set_up()
{
local name=$1; shift
if ! ip_link_is_up "$name"; then
- ip link set dev "$name" up
- defer ip link set dev "$name" down
+ ip link set dev "$name" up && \
+ defer ip link set dev "$name" down
fi
}
-ip_link_set_down()
+adf_ip_link_set_down()
{
local name=$1; shift
if ip_link_is_up "$name"; then
- ip link set dev "$name" down
- defer ip link set dev "$name" up
+ ip link set dev "$name" down && \
+ defer ip link set dev "$name" up
fi
}
-ip_addr_add()
+adf_ip_addr_add()
{
local name=$1; shift
- ip addr add dev "$name" "$@"
- defer ip addr del dev "$name" "$@"
+ ip addr add dev "$name" "$@" && \
+ defer ip addr del dev "$name" "$@"
}
-ip_route_add()
+adf_ip_route_add()
{
- ip route add "$@"
- defer ip route del "$@"
+ ip route add "$@" && \
+ defer ip route del "$@"
}
-bridge_vlan_add()
+adf_bridge_vlan_add()
{
- bridge vlan add "$@"
- defer bridge vlan del "$@"
+ bridge vlan add "$@" && \
+ defer bridge vlan del "$@"
}
wait_local_port_listen()
@@ -616,3 +645,27 @@ wait_local_port_listen()
sleep 0.1
done
}
+
+cmd_jq()
+{
+ local cmd=$1
+ local jq_exp=$2
+ local jq_opts=$3
+ local ret
+ local output
+
+ output="$($cmd)"
+ # it the command fails, return error right away
+ ret=$?
+ if [[ $ret -ne 0 ]]; then
+ return $ret
+ fi
+ output=$(echo $output | jq -r $jq_opts "$jq_exp")
+ ret=$?
+ if [[ $ret -ne 0 ]]; then
+ return $ret
+ fi
+ echo $output
+ # return success only in case of non-empty output
+ [ ! -z "$output" ]
+}
diff --git a/tools/testing/selftests/net/lib/Makefile b/tools/testing/selftests/net/lib/Makefile
index 88c4bc461459..5339f56329e1 100644
--- a/tools/testing/selftests/net/lib/Makefile
+++ b/tools/testing/selftests/net/lib/Makefile
@@ -5,12 +5,17 @@ CFLAGS += -I../../../../../usr/include/ $(KHDR_INCLUDES)
# Additional include paths needed by kselftest.h
CFLAGS += -I../../
-TEST_FILES := ../../../../../Documentation/netlink/specs
-TEST_FILES += ../../../../net/ynl
+TEST_FILES := \
+ ../../../../net/ynl \
+ ../../../../../Documentation/netlink/specs \
+ ksft_setup_loopback.sh \
+# end of TEST_FILES
-TEST_GEN_FILES += csum
-TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c))
-TEST_GEN_FILES += xdp_helper
+TEST_GEN_FILES := \
+ $(patsubst %.c,%.o,$(wildcard *.bpf.c)) \
+ csum \
+ xdp_helper \
+# end of TEST_GEN_FILES
TEST_INCLUDES := $(wildcard py/*.py sh/*.sh)
diff --git a/tools/testing/selftests/net/lib/ksft_setup_loopback.sh b/tools/testing/selftests/net/lib/ksft_setup_loopback.sh
new file mode 100755
index 000000000000..3defbb1919c5
--- /dev/null
+++ b/tools/testing/selftests/net/lib/ksft_setup_loopback.sh
@@ -0,0 +1,111 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Setup script for running ksft tests over a real interface in loopback mode.
+# This scripts replaces the historical setup_loopback.sh. It puts
+# a (presumably) real hardware interface into loopback mode, creates macvlan
+# interfaces on top and places them in a network namespace for isolation.
+#
+# NETIF env variable must be exported to indicate the real target device.
+# Note that the test will override NETIF with one of the macvlans, the
+# actual ksft test will only see the macvlans.
+#
+# Example use:
+# export NETIF=eth0
+# ./net/lib/ksft_setup_loopback.sh ./drivers/net/gro.py
+
+if [ -z "$NETIF" ]; then
+ echo "Error: NETIF variable not set"
+ exit 1
+fi
+if ! [ -d "/sys/class/net/$NETIF" ]; then
+ echo "Error: Can't find $NETIF, invalid netdevice"
+ exit 1
+fi
+
+# Save original settings for cleanup
+readonly FLUSH_PATH="/sys/class/net/${NETIF}/gro_flush_timeout"
+readonly IRQ_PATH="/sys/class/net/${NETIF}/napi_defer_hard_irqs"
+FLUSH_TIMEOUT="$(< "${FLUSH_PATH}")"
+readonly FLUSH_TIMEOUT
+HARD_IRQS="$(< "${IRQ_PATH}")"
+readonly HARD_IRQS
+
+SERVER_NS=$(mktemp -u server-XXXXXXXX)
+readonly SERVER_NS
+CLIENT_NS=$(mktemp -u client-XXXXXXXX)
+readonly CLIENT_NS
+readonly SERVER_MAC="aa:00:00:00:00:02"
+readonly CLIENT_MAC="aa:00:00:00:00:01"
+
+# ksft expects addresses to communicate with remote
+export LOCAL_V6=2001:db8:1::1
+export REMOTE_V6=2001:db8:1::2
+
+cleanup() {
+ local exit_code=$?
+
+ echo "Cleaning up..."
+
+ # Remove macvlan interfaces and namespaces
+ ip -netns "${SERVER_NS}" link del dev server 2>/dev/null || true
+ ip netns del "${SERVER_NS}" 2>/dev/null || true
+ ip -netns "${CLIENT_NS}" link del dev client 2>/dev/null || true
+ ip netns del "${CLIENT_NS}" 2>/dev/null || true
+
+ # Disable loopback
+ ethtool -K "${NETIF}" loopback off 2>/dev/null || true
+ sleep 1
+
+ echo "${FLUSH_TIMEOUT}" >"${FLUSH_PATH}"
+ echo "${HARD_IRQS}" >"${IRQ_PATH}"
+
+ exit $exit_code
+}
+
+trap cleanup EXIT INT TERM
+
+# Enable loopback mode
+echo "Enabling loopback on ${NETIF}..."
+ethtool -K "${NETIF}" loopback on || {
+ echo "Failed to enable loopback mode"
+ exit 1
+}
+# The interface may need time to get carrier back, but selftests
+# will wait for carrier, so no need to wait / sleep here.
+
+# Use timer on host to trigger the network stack
+# Also disable device interrupt to not depend on NIC interrupt
+# Reduce test flakiness caused by unexpected interrupts
+echo 100000 >"${FLUSH_PATH}"
+echo 50 >"${IRQ_PATH}"
+
+# Create server namespace with macvlan
+ip netns add "${SERVER_NS}"
+ip link add link "${NETIF}" dev server address "${SERVER_MAC}" type macvlan
+ip link set dev server netns "${SERVER_NS}"
+ip -netns "${SERVER_NS}" link set dev server up
+ip -netns "${SERVER_NS}" addr add $LOCAL_V6/64 dev server
+ip -netns "${SERVER_NS}" link set dev lo up
+
+# Create client namespace with macvlan
+ip netns add "${CLIENT_NS}"
+ip link add link "${NETIF}" dev client address "${CLIENT_MAC}" type macvlan
+ip link set dev client netns "${CLIENT_NS}"
+ip -netns "${CLIENT_NS}" link set dev client up
+ip -netns "${CLIENT_NS}" addr add $REMOTE_V6/64 dev client
+ip -netns "${CLIENT_NS}" link set dev lo up
+
+echo "Setup complete!"
+echo " Device: ${NETIF}"
+echo " Server NS: ${SERVER_NS}"
+echo " Client NS: ${CLIENT_NS}"
+echo ""
+
+# Setup environment variables for tests
+export NETIF=server
+export REMOTE_TYPE=netns
+export REMOTE_ARGS="${CLIENT_NS}"
+
+# Run the command
+ip netns exec "${SERVER_NS}" "$@"
diff --git a/tools/testing/selftests/net/lib/py/__init__.py b/tools/testing/selftests/net/lib/py/__init__.py
index 8697bd27dc30..40f9ce307dd1 100644
--- a/tools/testing/selftests/net/lib/py/__init__.py
+++ b/tools/testing/selftests/net/lib/py/__init__.py
@@ -1,9 +1,33 @@
# SPDX-License-Identifier: GPL-2.0
+"""
+Python selftest helpers for netdev.
+"""
+
from .consts import KSRC
-from .ksft import *
+from .ksft import KsftFailEx, KsftSkipEx, KsftXfailEx, ksft_pr, ksft_eq, \
+ ksft_ne, ksft_true, ksft_not_none, ksft_in, ksft_not_in, ksft_is, \
+ ksft_ge, ksft_gt, ksft_lt, ksft_raises, ksft_busy_wait, \
+ ktap_result, ksft_disruptive, ksft_setup, ksft_run, ksft_exit, \
+ ksft_variants, KsftNamedVariant
from .netns import NetNS, NetNSEnter
-from .nsim import *
-from .utils import *
+from .nsim import NetdevSim, NetdevSimDev
+from .utils import CmdExitFailure, fd_read_timeout, cmd, bkg, defer, \
+ bpftool, ip, ethtool, bpftrace, rand_port, wait_port_listen, wait_file
from .ynl import NlError, YnlFamily, EthtoolFamily, NetdevFamily, RtnlFamily, RtnlAddrFamily
-from .ynl import NetshaperFamily
+from .ynl import NetshaperFamily, DevlinkFamily, PSPFamily
+
+__all__ = ["KSRC",
+ "KsftFailEx", "KsftSkipEx", "KsftXfailEx", "ksft_pr", "ksft_eq",
+ "ksft_ne", "ksft_true", "ksft_not_none", "ksft_in", "ksft_not_in",
+ "ksft_is", "ksft_ge", "ksft_gt", "ksft_lt", "ksft_raises",
+ "ksft_busy_wait", "ktap_result", "ksft_disruptive", "ksft_setup",
+ "ksft_run", "ksft_exit", "ksft_variants", "KsftNamedVariant",
+ "NetNS", "NetNSEnter",
+ "CmdExitFailure", "fd_read_timeout", "cmd", "bkg", "defer",
+ "bpftool", "ip", "ethtool", "bpftrace", "rand_port",
+ "wait_port_listen", "wait_file",
+ "NetdevSim", "NetdevSimDev",
+ "NetshaperFamily", "DevlinkFamily", "PSPFamily", "NlError",
+ "YnlFamily", "EthtoolFamily", "NetdevFamily", "RtnlFamily",
+ "RtnlAddrFamily"]
diff --git a/tools/testing/selftests/net/lib/py/ksft.py b/tools/testing/selftests/net/lib/py/ksft.py
index 61287c203b6e..531e7fa1b3ea 100644
--- a/tools/testing/selftests/net/lib/py/ksft.py
+++ b/tools/testing/selftests/net/lib/py/ksft.py
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-import builtins
import functools
import inspect
import signal
import sys
import time
import traceback
+from collections import namedtuple
from .consts import KSFT_MAIN_NAME
from .utils import global_defer_queue
@@ -32,6 +32,7 @@ class KsftTerminate(KeyboardInterrupt):
def ksft_pr(*objs, **kwargs):
+ kwargs["flush"] = True
print("#", *objs, **kwargs)
@@ -71,6 +72,11 @@ def ksft_true(a, comment=""):
_fail("Check failed", a, "does not eval to True", comment)
+def ksft_not_none(a, comment=""):
+ if a is None:
+ _fail("Check failed", a, "is None", comment)
+
+
def ksft_in(a, b, comment=""):
if a not in b:
_fail("Check failed", a, "not in", b, comment)
@@ -91,6 +97,11 @@ def ksft_ge(a, b, comment=""):
_fail("Check failed", a, "<", b, comment)
+def ksft_gt(a, b, comment=""):
+ if a <= b:
+ _fail("Check failed", a, "<=", b, comment)
+
+
def ksft_lt(a, b, comment=""):
if a >= b:
_fail("Check failed", a, ">=", b, comment)
@@ -125,7 +136,7 @@ def ksft_busy_wait(cond, sleep=0.005, deadline=1, comment=""):
time.sleep(sleep)
-def ktap_result(ok, cnt=1, case="", comment=""):
+def ktap_result(ok, cnt=1, case_name="", comment=""):
global KSFT_RESULT_ALL
KSFT_RESULT_ALL = KSFT_RESULT_ALL and ok
@@ -135,11 +146,11 @@ def ktap_result(ok, cnt=1, case="", comment=""):
res += "ok "
res += str(cnt) + " "
res += KSFT_MAIN_NAME
- if case:
- res += "." + str(case.__name__)
+ if case_name:
+ res += "." + case_name
if comment:
res += " # " + comment
- print(res)
+ print(res, flush=True)
def ksft_flush_defer():
@@ -152,7 +163,7 @@ def ksft_flush_defer():
entry = global_defer_queue.pop()
try:
entry.exec_only()
- except:
+ except Exception:
ksft_pr(f"Exception while handling defer / cleanup (callback {i} of {qlen_start})!")
tb = traceback.format_exc()
for line in tb.strip().split('\n'):
@@ -160,6 +171,10 @@ def ksft_flush_defer():
KSFT_RESULT = False
+KsftCaseFunction = namedtuple("KsftCaseFunction",
+ ['name', 'original_func', 'variants'])
+
+
def ksft_disruptive(func):
"""
Decorator that marks the test as disruptive (e.g. the test
@@ -170,11 +185,47 @@ def ksft_disruptive(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not KSFT_DISRUPTIVE:
- raise KsftSkipEx(f"marked as disruptive")
+ raise KsftSkipEx("marked as disruptive")
return func(*args, **kwargs)
return wrapper
+class KsftNamedVariant:
+ """ Named string name + argument list tuple for @ksft_variants """
+
+ def __init__(self, name, *params):
+ self.params = params
+ self.name = name or "_".join([str(x) for x in self.params])
+
+
+def ksft_variants(params):
+ """
+ Decorator defining the sets of inputs for a test.
+ The parameters will be included in the name of the resulting sub-case.
+ Parameters can be either single object, tuple or a KsftNamedVariant.
+ The argument can be a list or a generator.
+
+ Example:
+
+ @ksft_variants([
+ (1, "a"),
+ (2, "b"),
+ KsftNamedVariant("three", 3, "c"),
+ ])
+ def my_case(cfg, a, b):
+ pass # ...
+
+ ksft_run(cases=[my_case], args=(cfg, ))
+
+ Will generate cases:
+ my_case.1_a
+ my_case.2_b
+ my_case.three
+ """
+
+ return lambda func: KsftCaseFunction(func.__name__, func, params)
+
+
def ksft_setup(env):
"""
Setup test framework global state from the environment.
@@ -188,7 +239,7 @@ def ksft_setup(env):
return False
try:
return bool(int(value))
- except:
+ except Exception:
raise Exception(f"failed to parse {name}")
if "DISRUPTIVE" in env:
@@ -209,9 +260,13 @@ def _ksft_intr(signum, frame):
ksft_pr(f"Ignoring SIGTERM (cnt: {term_cnt}), already exiting...")
-def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
+def _ksft_generate_test_cases(cases, globs, case_pfx, args):
+ """Generate a flat list of (func, args, name) tuples"""
+
cases = cases or []
+ test_cases = []
+ # If using the globs method find all relevant functions
if globs and case_pfx:
for key, value in globs.items():
if not callable(value):
@@ -221,26 +276,47 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
cases.append(value)
break
+ for func in cases:
+ if isinstance(func, KsftCaseFunction):
+ # Parametrized test - create case for each param
+ for param in func.variants:
+ if not isinstance(param, KsftNamedVariant):
+ if not isinstance(param, tuple):
+ param = (param, )
+ param = KsftNamedVariant(None, *param)
+
+ test_cases.append((func.original_func,
+ (*args, *param.params),
+ func.name + "." + param.name))
+ else:
+ test_cases.append((func, args, func.__name__))
+
+ return test_cases
+
+
+def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
+ test_cases = _ksft_generate_test_cases(cases, globs, case_pfx, args)
+
global term_cnt
term_cnt = 0
prev_sigterm = signal.signal(signal.SIGTERM, _ksft_intr)
totals = {"pass": 0, "fail": 0, "skip": 0, "xfail": 0}
- print("TAP version 13")
- print("1.." + str(len(cases)))
+ print("TAP version 13", flush=True)
+ print("1.." + str(len(test_cases)), flush=True)
global KSFT_RESULT
cnt = 0
stop = False
- for case in cases:
+ for func, args, name in test_cases:
KSFT_RESULT = True
cnt += 1
comment = ""
cnt_key = ""
try:
- case(*args)
+ func(*args)
except KsftSkipEx as e:
comment = "SKIP " + str(e)
cnt_key = 'skip'
@@ -257,12 +333,26 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()):
KSFT_RESULT = False
cnt_key = 'fail'
- ksft_flush_defer()
+ try:
+ ksft_flush_defer()
+ except BaseException as e:
+ tb = traceback.format_exc()
+ for line in tb.strip().split('\n'):
+ ksft_pr("Exception|", line)
+ if isinstance(e, KeyboardInterrupt):
+ ksft_pr()
+ ksft_pr("WARN: defer() interrupted, cleanup may be incomplete.")
+ ksft_pr(" Attempting to finish cleanup before exiting.")
+ ksft_pr(" Interrupt again to exit immediately.")
+ ksft_pr()
+ stop = True
+ # Flush was interrupted, try to finish the job best we can
+ ksft_flush_defer()
if not cnt_key:
cnt_key = 'pass' if KSFT_RESULT else 'fail'
- ktap_result(KSFT_RESULT, cnt, case, comment=comment)
+ ktap_result(KSFT_RESULT, cnt, name, comment=comment)
totals[cnt_key] += 1
if stop:
diff --git a/tools/testing/selftests/net/lib/py/nsim.py b/tools/testing/selftests/net/lib/py/nsim.py
index 1a8cbe9acc48..7c640ed64c0b 100644
--- a/tools/testing/selftests/net/lib/py/nsim.py
+++ b/tools/testing/selftests/net/lib/py/nsim.py
@@ -27,7 +27,7 @@ class NetdevSim:
self.port_index = port_index
self.ns = ns
self.dfs_dir = "%s/ports/%u/" % (nsimdev.dfs_dir, port_index)
- ret = ip("-j link show dev %s" % ifname, ns=ns)
+ ret = ip("-d -j link show dev %s" % ifname, ns=ns)
self.dev = json.loads(ret.stdout)[0]
self.ifindex = self.dev["ifindex"]
diff --git a/tools/testing/selftests/net/lib/py/utils.py b/tools/testing/selftests/net/lib/py/utils.py
index 34470d65d871..106ee1f2df86 100644
--- a/tools/testing/selftests/net/lib/py/utils.py
+++ b/tools/testing/selftests/net/lib/py/utils.py
@@ -1,9 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-import errno
import json as _json
import os
-import random
import re
import select
import socket
@@ -21,18 +19,20 @@ def fd_read_timeout(fd, timeout):
rlist, _, _ = select.select([fd], [], [], timeout)
if rlist:
return os.read(fd, 1024)
- else:
- raise TimeoutError("Timeout waiting for fd read")
+ raise TimeoutError("Timeout waiting for fd read")
class cmd:
"""
Execute a command on local or remote host.
+ @shell defaults to false, and class will try to split @comm into a list
+ if it's a string with spaces.
+
Use bkg() instead to run a command in the background.
"""
- def __init__(self, comm, shell=True, fail=True, ns=None, background=False,
- host=None, timeout=5, ksft_wait=None):
+ def __init__(self, comm, shell=None, fail=True, ns=None, background=False,
+ host=None, timeout=5, ksft_ready=None, ksft_wait=None):
if ns:
comm = f'ip netns exec {ns} ' + comm
@@ -45,24 +45,32 @@ class cmd:
if host:
self.proc = host.cmd(comm)
else:
+ # If user doesn't explicitly request shell try to avoid it.
+ if shell is None and isinstance(comm, str) and ' ' in comm:
+ comm = comm.split()
+
# ksft_wait lets us wait for the background process to fully start,
# we pass an FD to the child process, and wait for it to write back.
# Similarly term_fd tells child it's time to exit.
- pass_fds = ()
+ pass_fds = []
env = os.environ.copy()
if ksft_wait is not None:
- rfd, ready_fd = os.pipe()
wait_fd, self.ksft_term_fd = os.pipe()
- pass_fds = (ready_fd, wait_fd, )
- env["KSFT_READY_FD"] = str(ready_fd)
+ pass_fds.append(wait_fd)
env["KSFT_WAIT_FD"] = str(wait_fd)
+ ksft_ready = True # ksft_wait implies ready
+ if ksft_ready is not None:
+ rfd, ready_fd = os.pipe()
+ pass_fds.append(ready_fd)
+ env["KSFT_READY_FD"] = str(ready_fd)
self.proc = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pass_fds=pass_fds,
env=env)
if ksft_wait is not None:
- os.close(ready_fd)
os.close(wait_fd)
+ if ksft_ready is not None:
+ os.close(ready_fd)
msg = fd_read_timeout(rfd, ksft_wait)
os.close(rfd)
if not msg:
@@ -111,12 +119,13 @@ class bkg(cmd):
with bkg("my_binary", ksft_wait=5):
"""
- def __init__(self, comm, shell=True, fail=None, ns=None, host=None,
- exit_wait=False, ksft_wait=None):
+ def __init__(self, comm, shell=None, fail=None, ns=None, host=None,
+ exit_wait=False, ksft_ready=None, ksft_wait=None):
super().__init__(comm, background=True,
shell=shell, fail=fail, ns=ns, host=host,
- ksft_wait=ksft_wait)
+ ksft_ready=ksft_ready, ksft_wait=ksft_wait)
self.terminate = not exit_wait and not ksft_wait
+ self._exit_wait = exit_wait
self.check_fail = fail
if shell and self.terminate:
@@ -127,7 +136,9 @@ class bkg(cmd):
return self
def __exit__(self, ex_type, ex_value, ex_tb):
- return self.process(terminate=self.terminate, fail=self.check_fail)
+ # Force termination on exception
+ terminate = self.terminate or (self._exit_wait and ex_type)
+ return self.process(terminate=terminate, fail=self.check_fail)
global_defer_queue = []
@@ -135,8 +146,6 @@ global_defer_queue = []
class defer:
def __init__(self, func, *args, **kwargs):
- global global_defer_queue
-
if not callable(func):
raise Exception("defer created with un-callable object, did you call the function instead of passing its name?")
@@ -175,6 +184,10 @@ def tool(name, args, json=None, ns=None, host=None):
return cmd_obj
+def bpftool(args, json=None, ns=None, host=None):
+ return tool('bpftool', args, json=json, ns=ns, host=host)
+
+
def ip(args, json=None, ns=None, host=None):
if ns:
args = f'-netns {ns} ' + args
@@ -185,11 +198,46 @@ def ethtool(args, json=None, ns=None, host=None):
return tool('ethtool', args, json=json, ns=ns, host=host)
-def rand_port(type=socket.SOCK_STREAM):
+def bpftrace(expr, json=None, ns=None, host=None, timeout=None):
+ """
+ Run bpftrace and return map data (if json=True).
+ The output of bpftrace is inconvenient, so the helper converts
+ to a dict indexed by map name, e.g.:
+ {
+ "@": { ... },
+ "@map2": { ... },
+ }
+ """
+ cmd_arr = ['bpftrace']
+ # Throw in --quiet if json, otherwise the output has two objects
+ if json:
+ cmd_arr += ['-f', 'json', '-q']
+ if timeout:
+ expr += ' interval:s:' + str(timeout) + ' { exit(); }'
+ cmd_arr += ['-e', expr]
+ cmd_obj = cmd(cmd_arr, ns=ns, host=host, shell=False)
+ if json:
+ # bpftrace prints objects as lines
+ ret = {}
+ for l in cmd_obj.stdout.split('\n'):
+ if not l.strip():
+ continue
+ one = _json.loads(l)
+ if one.get('type') != 'map':
+ continue
+ for k, v in one["data"].items():
+ if k.startswith('@'):
+ k = k.lstrip('@')
+ ret[k] = v
+ return ret
+ return cmd_obj
+
+
+def rand_port(stype=socket.SOCK_STREAM):
"""
Get a random unprivileged port.
"""
- with socket.socket(socket.AF_INET6, type) as s:
+ with socket.socket(socket.AF_INET6, stype) as s:
s.bind(("", 0))
return s.getsockname()[1]
@@ -210,3 +258,21 @@ def wait_port_listen(port, proto="tcp", ns=None, host=None, sleep=0.005, deadlin
if time.monotonic() > end:
raise Exception("Waiting for port listen timed out")
time.sleep(sleep)
+
+
+def wait_file(fname, test_fn, sleep=0.005, deadline=5, encoding='utf-8'):
+ """
+ Wait for file contents on the local system to satisfy a condition.
+ test_fn() should take one argument (file contents) and return whether
+ condition is met.
+ """
+ end = time.monotonic() + deadline
+
+ with open(fname, "r", encoding=encoding) as fp:
+ while True:
+ if test_fn(fp.read()):
+ break
+ fp.seek(0)
+ if time.monotonic() > end:
+ raise TimeoutError("Wait for file contents failed", fname)
+ time.sleep(sleep)
diff --git a/tools/testing/selftests/net/lib/py/ynl.py b/tools/testing/selftests/net/lib/py/ynl.py
index 6329ae805abf..32c223e93b2c 100644
--- a/tools/testing/selftests/net/lib/py/ynl.py
+++ b/tools/testing/selftests/net/lib/py/ynl.py
@@ -56,3 +56,13 @@ class NetshaperFamily(YnlFamily):
def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('net_shaper.yaml')).as_posix(),
schema='', recv_size=recv_size)
+
+class DevlinkFamily(YnlFamily):
+ def __init__(self, recv_size=0):
+ super().__init__((SPEC_PATH / Path('devlink.yaml')).as_posix(),
+ schema='', recv_size=recv_size)
+
+class PSPFamily(YnlFamily):
+ def __init__(self, recv_size=0):
+ super().__init__((SPEC_PATH / Path('psp.yaml')).as_posix(),
+ schema='', recv_size=recv_size)
diff --git a/tools/testing/selftests/net/lib/sh/defer.sh b/tools/testing/selftests/net/lib/sh/defer.sh
index 082f5d38321b..47ab78c4d465 100644
--- a/tools/testing/selftests/net/lib/sh/defer.sh
+++ b/tools/testing/selftests/net/lib/sh/defer.sh
@@ -1,6 +1,10 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Whether to pause and allow debugging when an executed deferred command has a
+# non-zero exit code.
+: "${DEFER_PAUSE_ON_FAIL:=no}"
+
# map[(scope_id,track,cleanup_id) -> cleanup_command]
# track={d=default | p=priority}
declare -A __DEFER__JOBS
@@ -38,8 +42,20 @@ __defer__run()
local track=$1; shift
local defer_ix=$1; shift
local defer_key=$(__defer__defer_key $track $defer_ix)
+ local ret
+
+ eval ${__DEFER__JOBS[$defer_key]}
+ ret=$?
+
+ if [[ "$DEFER_PAUSE_ON_FAIL" == yes && "$ret" -ne 0 ]]; then
+ echo "Deferred command (track $track index $defer_ix):"
+ echo " ${__DEFER__JOBS[$defer_key]}"
+ echo "... ended with an exit status of $ret"
+ echo "Hit enter to continue, 'q' to quit"
+ read a
+ [[ "$a" == q ]] && exit 1
+ fi
- ${__DEFER__JOBS[$defer_key]}
unset __DEFER__JOBS[$defer_key]
}
@@ -49,7 +65,7 @@ __defer__schedule()
local ndefers=$(__defer__ndefers $track)
local ndefers_key=$(__defer__ndefer_key $track)
local defer_key=$(__defer__defer_key $track $ndefers)
- local defer="$@"
+ local defer="${@@Q}"
__DEFER__JOBS[$defer_key]="$defer"
__DEFER__NJOBS[$ndefers_key]=$((ndefers + 1))
diff --git a/tools/testing/selftests/net/lib/xdp_native.bpf.c b/tools/testing/selftests/net/lib/xdp_native.bpf.c
new file mode 100644
index 000000000000..64f05229ab24
--- /dev/null
+++ b/tools/testing/selftests/net/lib/xdp_native.bpf.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+#define MAX_ADJST_OFFSET 256
+#define MAX_PAYLOAD_LEN 5000
+#define MAX_HDR_LEN 64
+
+extern int bpf_xdp_pull_data(struct xdp_md *xdp, __u32 len) __ksym __weak;
+
+enum {
+ XDP_MODE = 0,
+ XDP_PORT = 1,
+ XDP_ADJST_OFFSET = 2,
+ XDP_ADJST_TAG = 3,
+} xdp_map_setup_keys;
+
+enum {
+ XDP_MODE_PASS = 0,
+ XDP_MODE_DROP = 1,
+ XDP_MODE_TX = 2,
+ XDP_MODE_TAIL_ADJST = 3,
+ XDP_MODE_HEAD_ADJST = 4,
+} xdp_map_modes;
+
+enum {
+ STATS_RX = 0,
+ STATS_PASS = 1,
+ STATS_DROP = 2,
+ STATS_TX = 3,
+ STATS_ABORT = 4,
+} xdp_stats;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 5);
+ __type(key, __u32);
+ __type(value, __s32);
+} map_xdp_setup SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 5);
+ __type(key, __u32);
+ __type(value, __u64);
+} map_xdp_stats SEC(".maps");
+
+static __u32 min(__u32 a, __u32 b)
+{
+ return a < b ? a : b;
+}
+
+static void record_stats(struct xdp_md *ctx, __u32 stat_type)
+{
+ __u64 *count;
+
+ count = bpf_map_lookup_elem(&map_xdp_stats, &stat_type);
+
+ if (count)
+ __sync_fetch_and_add(count, 1);
+}
+
+static struct udphdr *filter_udphdr(struct xdp_md *ctx, __u16 port)
+{
+ struct udphdr *udph = NULL;
+ void *data, *data_end;
+ struct ethhdr *eth;
+ int err;
+
+ err = bpf_xdp_pull_data(ctx, sizeof(*eth));
+ if (err)
+ return NULL;
+
+ data_end = (void *)(long)ctx->data_end;
+ data = eth = (void *)(long)ctx->data;
+
+ if (data + sizeof(*eth) > data_end)
+ return NULL;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph;
+
+ err = bpf_xdp_pull_data(ctx, sizeof(*eth) + sizeof(*iph) +
+ sizeof(*udph));
+ if (err)
+ return NULL;
+
+ data_end = (void *)(long)ctx->data_end;
+ data = (void *)(long)ctx->data;
+
+ iph = data + sizeof(*eth);
+
+ if (iph + 1 > (struct iphdr *)data_end ||
+ iph->protocol != IPPROTO_UDP)
+ return NULL;
+
+ udph = data + sizeof(*iph) + sizeof(*eth);
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ipv6h;
+
+ err = bpf_xdp_pull_data(ctx, sizeof(*eth) + sizeof(*ipv6h) +
+ sizeof(*udph));
+ if (err)
+ return NULL;
+
+ data_end = (void *)(long)ctx->data_end;
+ data = (void *)(long)ctx->data;
+
+ ipv6h = data + sizeof(*eth);
+
+ if (ipv6h + 1 > (struct ipv6hdr *)data_end ||
+ ipv6h->nexthdr != IPPROTO_UDP)
+ return NULL;
+
+ udph = data + sizeof(*ipv6h) + sizeof(*eth);
+ } else {
+ return NULL;
+ }
+
+ if (udph + 1 > (struct udphdr *)data_end)
+ return NULL;
+
+ if (udph->dest != bpf_htons(port))
+ return NULL;
+
+ record_stats(ctx, STATS_RX);
+
+ return udph;
+}
+
+static int xdp_mode_pass(struct xdp_md *ctx, __u16 port)
+{
+ struct udphdr *udph = NULL;
+
+ udph = filter_udphdr(ctx, port);
+ if (!udph)
+ return XDP_PASS;
+
+ record_stats(ctx, STATS_PASS);
+
+ return XDP_PASS;
+}
+
+static int xdp_mode_drop_handler(struct xdp_md *ctx, __u16 port)
+{
+ struct udphdr *udph = NULL;
+
+ udph = filter_udphdr(ctx, port);
+ if (!udph)
+ return XDP_PASS;
+
+ record_stats(ctx, STATS_DROP);
+
+ return XDP_DROP;
+}
+
+static void swap_machdr(void *data)
+{
+ struct ethhdr *eth = data;
+ __u8 tmp_mac[ETH_ALEN];
+
+ __builtin_memcpy(tmp_mac, eth->h_source, ETH_ALEN);
+ __builtin_memcpy(eth->h_source, eth->h_dest, ETH_ALEN);
+ __builtin_memcpy(eth->h_dest, tmp_mac, ETH_ALEN);
+}
+
+static int xdp_mode_tx_handler(struct xdp_md *ctx, __u16 port)
+{
+ struct udphdr *udph = NULL;
+ void *data, *data_end;
+ struct ethhdr *eth;
+ int err;
+
+ err = bpf_xdp_pull_data(ctx, sizeof(*eth));
+ if (err)
+ return XDP_PASS;
+
+ data_end = (void *)(long)ctx->data_end;
+ data = eth = (void *)(long)ctx->data;
+
+ if (data + sizeof(*eth) > data_end)
+ return XDP_PASS;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph;
+ __be32 tmp_ip;
+
+ err = bpf_xdp_pull_data(ctx, sizeof(*eth) + sizeof(*iph) +
+ sizeof(*udph));
+ if (err)
+ return XDP_PASS;
+
+ data_end = (void *)(long)ctx->data_end;
+ data = (void *)(long)ctx->data;
+
+ iph = data + sizeof(*eth);
+
+ if (iph + 1 > (struct iphdr *)data_end ||
+ iph->protocol != IPPROTO_UDP)
+ return XDP_PASS;
+
+ udph = data + sizeof(*iph) + sizeof(*eth);
+
+ if (udph + 1 > (struct udphdr *)data_end)
+ return XDP_PASS;
+ if (udph->dest != bpf_htons(port))
+ return XDP_PASS;
+
+ record_stats(ctx, STATS_RX);
+ eth = data;
+ swap_machdr((void *)eth);
+
+ tmp_ip = iph->saddr;
+ iph->saddr = iph->daddr;
+ iph->daddr = tmp_ip;
+
+ record_stats(ctx, STATS_TX);
+
+ return XDP_TX;
+
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct in6_addr tmp_ipv6;
+ struct ipv6hdr *ipv6h;
+
+ err = bpf_xdp_pull_data(ctx, sizeof(*eth) + sizeof(*ipv6h) +
+ sizeof(*udph));
+ if (err)
+ return XDP_PASS;
+
+ data_end = (void *)(long)ctx->data_end;
+ data = (void *)(long)ctx->data;
+
+ ipv6h = data + sizeof(*eth);
+
+ if (ipv6h + 1 > (struct ipv6hdr *)data_end ||
+ ipv6h->nexthdr != IPPROTO_UDP)
+ return XDP_PASS;
+
+ udph = data + sizeof(*ipv6h) + sizeof(*eth);
+
+ if (udph + 1 > (struct udphdr *)data_end)
+ return XDP_PASS;
+ if (udph->dest != bpf_htons(port))
+ return XDP_PASS;
+
+ record_stats(ctx, STATS_RX);
+ eth = data;
+ swap_machdr((void *)eth);
+
+ __builtin_memcpy(&tmp_ipv6, &ipv6h->saddr, sizeof(tmp_ipv6));
+ __builtin_memcpy(&ipv6h->saddr, &ipv6h->daddr,
+ sizeof(tmp_ipv6));
+ __builtin_memcpy(&ipv6h->daddr, &tmp_ipv6, sizeof(tmp_ipv6));
+
+ record_stats(ctx, STATS_TX);
+
+ return XDP_TX;
+ }
+
+ return XDP_PASS;
+}
+
+static void *update_pkt(struct xdp_md *ctx, __s16 offset, __u32 *udp_csum)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct udphdr *udph = NULL;
+ struct ethhdr *eth = data;
+ __u32 len, len_new;
+
+ if (data + sizeof(*eth) > data_end)
+ return NULL;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph = data + sizeof(*eth);
+ __u16 total_len;
+
+ if (iph + 1 > (struct iphdr *)data_end)
+ return NULL;
+
+ iph->tot_len = bpf_htons(bpf_ntohs(iph->tot_len) + offset);
+
+ udph = (void *)eth + sizeof(*iph) + sizeof(*eth);
+ if (!udph || udph + 1 > (struct udphdr *)data_end)
+ return NULL;
+
+ len_new = bpf_htons(bpf_ntohs(udph->len) + offset);
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ipv6h = data + sizeof(*eth);
+ __u16 payload_len;
+
+ if (ipv6h + 1 > (struct ipv6hdr *)data_end)
+ return NULL;
+
+ udph = (void *)eth + sizeof(*ipv6h) + sizeof(*eth);
+ if (!udph || udph + 1 > (struct udphdr *)data_end)
+ return NULL;
+
+ *udp_csum = ~((__u32)udph->check);
+
+ len = ipv6h->payload_len;
+ len_new = bpf_htons(bpf_ntohs(len) + offset);
+ ipv6h->payload_len = len_new;
+
+ *udp_csum = bpf_csum_diff(&len, sizeof(len), &len_new,
+ sizeof(len_new), *udp_csum);
+
+ len = udph->len;
+ len_new = bpf_htons(bpf_ntohs(udph->len) + offset);
+ *udp_csum = bpf_csum_diff(&len, sizeof(len), &len_new,
+ sizeof(len_new), *udp_csum);
+ } else {
+ return NULL;
+ }
+
+ udph->len = len_new;
+
+ return udph;
+}
+
+static __u16 csum_fold_helper(__u32 csum)
+{
+ return ~((csum & 0xffff) + (csum >> 16)) ? : 0xffff;
+}
+
+static int xdp_adjst_tail_shrnk_data(struct xdp_md *ctx, __u16 offset,
+ unsigned long hdr_len)
+{
+ char tmp_buff[MAX_ADJST_OFFSET];
+ __u32 buff_pos, udp_csum = 0;
+ struct udphdr *udph = NULL;
+ __u32 buff_len;
+
+ udph = update_pkt(ctx, 0 - offset, &udp_csum);
+ if (!udph)
+ return -1;
+
+ buff_len = bpf_xdp_get_buff_len(ctx);
+
+ offset = (offset & 0x1ff) >= MAX_ADJST_OFFSET ? MAX_ADJST_OFFSET :
+ offset & 0xff;
+ if (offset == 0)
+ return -1;
+
+ /* Make sure we have enough data to avoid eating the header */
+ if (buff_len - offset < hdr_len)
+ return -1;
+
+ buff_pos = buff_len - offset;
+ if (bpf_xdp_load_bytes(ctx, buff_pos, tmp_buff, offset) < 0)
+ return -1;
+
+ udp_csum = bpf_csum_diff((__be32 *)tmp_buff, offset, 0, 0, udp_csum);
+ udph->check = (__u16)csum_fold_helper(udp_csum);
+
+ if (bpf_xdp_adjust_tail(ctx, 0 - offset) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int xdp_adjst_tail_grow_data(struct xdp_md *ctx, __u16 offset)
+{
+ char tmp_buff[MAX_ADJST_OFFSET];
+ __u32 buff_pos, udp_csum = 0;
+ __u32 buff_len, hdr_len, key;
+ struct udphdr *udph;
+ __s32 *val;
+ __u8 tag;
+
+ /* Proceed to update the packet headers before attempting to adjuste
+ * the tail. Once the tail is adjusted we lose access to the offset
+ * amount of data at the end of the packet which is crucial to update
+ * the checksum.
+ * Since any failure beyond this would abort the packet, we should
+ * not worry about passing a packet up the stack with wrong headers
+ */
+ udph = update_pkt(ctx, offset, &udp_csum);
+ if (!udph)
+ return -1;
+
+ key = XDP_ADJST_TAG;
+ val = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!val)
+ return -1;
+
+ tag = (__u8)(*val);
+
+ for (int i = 0; i < MAX_ADJST_OFFSET; i++)
+ __builtin_memcpy(&tmp_buff[i], &tag, 1);
+
+ offset = (offset & 0x1ff) >= MAX_ADJST_OFFSET ? MAX_ADJST_OFFSET :
+ offset & 0xff;
+ if (offset == 0)
+ return -1;
+
+ udp_csum = bpf_csum_diff(0, 0, (__be32 *)tmp_buff, offset, udp_csum);
+ udph->check = (__u16)csum_fold_helper(udp_csum);
+
+ buff_len = bpf_xdp_get_buff_len(ctx);
+
+ if (bpf_xdp_adjust_tail(ctx, offset) < 0) {
+ bpf_printk("Failed to adjust tail\n");
+ return -1;
+ }
+
+ if (bpf_xdp_store_bytes(ctx, buff_len, tmp_buff, offset) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int xdp_adjst_tail(struct xdp_md *ctx, __u16 port)
+{
+ struct udphdr *udph = NULL;
+ __s32 *adjust_offset, *val;
+ unsigned long hdr_len;
+ void *offset_ptr;
+ __u32 key;
+ __u8 tag;
+ int ret;
+
+ udph = filter_udphdr(ctx, port);
+ if (!udph)
+ return XDP_PASS;
+
+ hdr_len = (void *)udph - (void *)(long)ctx->data +
+ sizeof(struct udphdr);
+ key = XDP_ADJST_OFFSET;
+ adjust_offset = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!adjust_offset)
+ return XDP_PASS;
+
+ if (*adjust_offset < 0)
+ ret = xdp_adjst_tail_shrnk_data(ctx,
+ (__u16)(0 - *adjust_offset),
+ hdr_len);
+ else
+ ret = xdp_adjst_tail_grow_data(ctx, (__u16)(*adjust_offset));
+ if (ret)
+ goto abort_pkt;
+
+ record_stats(ctx, STATS_PASS);
+ return XDP_PASS;
+
+abort_pkt:
+ record_stats(ctx, STATS_ABORT);
+ return XDP_ABORTED;
+}
+
+static int xdp_adjst_head_shrnk_data(struct xdp_md *ctx, __u64 hdr_len,
+ __u32 offset)
+{
+ char tmp_buff[MAX_ADJST_OFFSET];
+ struct udphdr *udph;
+ void *offset_ptr;
+ __u32 udp_csum = 0;
+
+ /* Update the length information in the IP and UDP headers before
+ * adjusting the headroom. This simplifies accessing the relevant
+ * fields in the IP and UDP headers for fragmented packets. Any
+ * failure beyond this point will result in the packet being aborted,
+ * so we don't need to worry about incorrect length information for
+ * passed packets.
+ */
+ udph = update_pkt(ctx, (__s16)(0 - offset), &udp_csum);
+ if (!udph)
+ return -1;
+
+ offset = (offset & 0x1ff) >= MAX_ADJST_OFFSET ? MAX_ADJST_OFFSET :
+ offset & 0xff;
+ if (offset == 0)
+ return -1;
+
+ if (bpf_xdp_load_bytes(ctx, hdr_len, tmp_buff, offset) < 0)
+ return -1;
+
+ udp_csum = bpf_csum_diff((__be32 *)tmp_buff, offset, 0, 0, udp_csum);
+
+ udph->check = (__u16)csum_fold_helper(udp_csum);
+
+ if (bpf_xdp_load_bytes(ctx, 0, tmp_buff, MAX_ADJST_OFFSET) < 0)
+ return -1;
+
+ if (bpf_xdp_adjust_head(ctx, offset) < 0)
+ return -1;
+
+ if (offset > MAX_ADJST_OFFSET)
+ return -1;
+
+ if (hdr_len > MAX_ADJST_OFFSET || hdr_len == 0)
+ return -1;
+
+ /* Added here to handle clang complain about negative value */
+ hdr_len = hdr_len & 0xff;
+
+ if (hdr_len == 0)
+ return -1;
+
+ if (bpf_xdp_store_bytes(ctx, 0, tmp_buff, hdr_len) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int xdp_adjst_head_grow_data(struct xdp_md *ctx, __u64 hdr_len,
+ __u32 offset)
+{
+ char hdr_buff[MAX_HDR_LEN];
+ char data_buff[MAX_ADJST_OFFSET];
+ void *offset_ptr;
+ __s32 *val;
+ __u32 key;
+ __u8 tag;
+ __u32 udp_csum = 0;
+ struct udphdr *udph;
+
+ udph = update_pkt(ctx, (__s16)(offset), &udp_csum);
+ if (!udph)
+ return -1;
+
+ key = XDP_ADJST_TAG;
+ val = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!val)
+ return -1;
+
+ tag = (__u8)(*val);
+ for (int i = 0; i < MAX_ADJST_OFFSET; i++)
+ __builtin_memcpy(&data_buff[i], &tag, 1);
+
+ offset = (offset & 0x1ff) >= MAX_ADJST_OFFSET ? MAX_ADJST_OFFSET :
+ offset & 0xff;
+ if (offset == 0)
+ return -1;
+
+ udp_csum = bpf_csum_diff(0, 0, (__be32 *)data_buff, offset, udp_csum);
+ udph->check = (__u16)csum_fold_helper(udp_csum);
+
+ if (hdr_len > MAX_ADJST_OFFSET || hdr_len == 0)
+ return -1;
+
+ /* Added here to handle clang complain about negative value */
+ hdr_len = hdr_len & 0xff;
+
+ if (hdr_len == 0)
+ return -1;
+
+ if (bpf_xdp_load_bytes(ctx, 0, hdr_buff, hdr_len) < 0)
+ return -1;
+
+ if (offset > MAX_ADJST_OFFSET)
+ return -1;
+
+ if (bpf_xdp_adjust_head(ctx, 0 - offset) < 0)
+ return -1;
+
+ if (bpf_xdp_store_bytes(ctx, 0, hdr_buff, hdr_len) < 0)
+ return -1;
+
+ if (bpf_xdp_store_bytes(ctx, hdr_len, data_buff, offset) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int xdp_head_adjst(struct xdp_md *ctx, __u16 port)
+{
+ struct udphdr *udph_ptr = NULL;
+ __u32 key, size, hdr_len;
+ __s32 *val;
+ int res;
+
+ /* Filter packets based on UDP port */
+ udph_ptr = filter_udphdr(ctx, port);
+ if (!udph_ptr)
+ return XDP_PASS;
+
+ hdr_len = (void *)udph_ptr - (void *)(long)ctx->data +
+ sizeof(struct udphdr);
+
+ key = XDP_ADJST_OFFSET;
+ val = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!val)
+ return XDP_PASS;
+
+ switch (*val) {
+ case -16:
+ case 16:
+ size = 16;
+ break;
+ case -32:
+ case 32:
+ size = 32;
+ break;
+ case -64:
+ case 64:
+ size = 64;
+ break;
+ case -128:
+ case 128:
+ size = 128;
+ break;
+ case -256:
+ case 256:
+ size = 256;
+ break;
+ default:
+ bpf_printk("Invalid adjustment offset: %d\n", *val);
+ goto abort;
+ }
+
+ if (*val < 0)
+ res = xdp_adjst_head_grow_data(ctx, hdr_len, size);
+ else
+ res = xdp_adjst_head_shrnk_data(ctx, hdr_len, size);
+
+ if (res)
+ goto abort;
+
+ record_stats(ctx, STATS_PASS);
+ return XDP_PASS;
+
+abort:
+ record_stats(ctx, STATS_ABORT);
+ return XDP_ABORTED;
+}
+
+static int xdp_prog_common(struct xdp_md *ctx)
+{
+ __u32 key, *port;
+ __s32 *mode;
+
+ key = XDP_MODE;
+ mode = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!mode)
+ return XDP_PASS;
+
+ key = XDP_PORT;
+ port = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!port)
+ return XDP_PASS;
+
+ switch (*mode) {
+ case XDP_MODE_PASS:
+ return xdp_mode_pass(ctx, (__u16)(*port));
+ case XDP_MODE_DROP:
+ return xdp_mode_drop_handler(ctx, (__u16)(*port));
+ case XDP_MODE_TX:
+ return xdp_mode_tx_handler(ctx, (__u16)(*port));
+ case XDP_MODE_TAIL_ADJST:
+ return xdp_adjst_tail(ctx, (__u16)(*port));
+ case XDP_MODE_HEAD_ADJST:
+ return xdp_head_adjst(ctx, (__u16)(*port));
+ }
+
+ /* Default action is to simple pass */
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_prog(struct xdp_md *ctx)
+{
+ return xdp_prog_common(ctx);
+}
+
+SEC("xdp.frags")
+int xdp_prog_frags(struct xdp_md *ctx)
+{
+ return xdp_prog_common(ctx);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index e47788bfa671..15d144a25d82 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -4,12 +4,31 @@ top_srcdir = ../../../../..
CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
-TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
- simult_flows.sh mptcp_sockopt.sh userspace_pm.sh
-
-TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq mptcp_diag
-
-TEST_FILES := mptcp_lib.sh settings
+TEST_PROGS := \
+ diag.sh \
+ mptcp_connect.sh \
+ mptcp_connect_checksum.sh \
+ mptcp_connect_mmap.sh \
+ mptcp_connect_sendfile.sh \
+ mptcp_join.sh \
+ mptcp_sockopt.sh \
+ pm_netlink.sh \
+ simult_flows.sh \
+ userspace_pm.sh \
+# end of TEST_PROGS
+
+TEST_GEN_FILES := \
+ mptcp_connect \
+ mptcp_diag \
+ mptcp_inq \
+ mptcp_sockopt \
+ pm_nl_ctl \
+# end of TEST_GEN_FILES
+
+TEST_FILES := \
+ mptcp_lib.sh \
+ settings \
+# end of TEST_FILES
TEST_INCLUDES := ../lib.sh $(wildcard ../lib/sh/*.sh)
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index 4f80014cae49..59051ee2a986 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -1,34 +1,36 @@
-CONFIG_KALLSYMS=y
-CONFIG_MPTCP=y
-CONFIG_IPV6=y
-CONFIG_MPTCP_IPV6=y
CONFIG_INET_DIAG=m
CONFIG_INET_MPTCP_DIAG=m
-CONFIG_VETH=y
-CONFIG_NET_SCH_NETEM=m
-CONFIG_SYN_COOKIES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_ADVANCED=y
-CONFIG_NETFILTER_NETLINK=m
-CONFIG_NF_TABLES=m
-CONFIG_NFT_COMPAT=m
-CONFIG_NETFILTER_XTABLES=m
-CONFIG_NETFILTER_XT_MATCH_BPF=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NF_TABLES_INET=y
-CONFIG_NFT_TPROXY=m
-CONFIG_NFT_SOCKET=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IPV6=y
CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IP6_NF_FILTER=m
+CONFIG_KALLSYMS=y
+CONFIG_MPTCP=y
+CONFIG_MPTCP_IPV6=y
CONFIG_NET_ACT_CSUM=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_CLS_FW=m
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XTABLES_LEGACY=y
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
+CONFIG_NFT_COMPAT=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_TPROXY=m
+CONFIG_SYN_COOKIES=y
+CONFIG_VETH=y
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 7a3cb4c09e45..d847ff1737c3 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -28,7 +28,7 @@ flush_pids()
}
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index ac1349c4b9e5..404a77bf366a 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -183,9 +183,10 @@ static void xgetaddrinfo(const char *node, const char *service,
struct addrinfo *hints,
struct addrinfo **res)
{
-again:
- int err = getaddrinfo(node, service, hints, res);
+ int err;
+again:
+ err = getaddrinfo(node, service, hints, res);
if (err) {
const char *errstr;
@@ -709,8 +710,14 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd,
bw = do_rnd_write(peerfd, winfo->buf + winfo->off, winfo->len);
if (bw < 0) {
- if (cfg_rcv_trunc)
- return 0;
+ /* expected reset, continue to read */
+ if (cfg_rcv_trunc &&
+ (errno == ECONNRESET ||
+ errno == EPIPE)) {
+ fds.events &= ~POLLOUT;
+ continue;
+ }
+
perror("write");
return 111;
}
@@ -736,8 +743,10 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd,
}
if (fds.revents & (POLLERR | POLLNVAL)) {
- if (cfg_rcv_trunc)
- return 0;
+ if (cfg_rcv_trunc) {
+ fds.events &= ~(POLLERR | POLLNVAL);
+ continue;
+ }
fprintf(stderr, "Unexpected revents: "
"POLLERR/POLLNVAL(%x)\n", fds.revents);
return 5;
@@ -1063,6 +1072,8 @@ static void check_getpeername_connect(int fd)
socklen_t salen = sizeof(ss);
char a[INET6_ADDRSTRLEN];
char b[INET6_ADDRSTRLEN];
+ const char *iface;
+ size_t len;
if (getpeername(fd, (struct sockaddr *)&ss, &salen) < 0) {
perror("getpeername");
@@ -1072,7 +1083,13 @@ static void check_getpeername_connect(int fd)
xgetnameinfo((struct sockaddr *)&ss, salen,
a, sizeof(a), b, sizeof(b));
- if (strcmp(cfg_host, a) || strcmp(cfg_port, b))
+ iface = strchr(cfg_host, '%');
+ if (iface)
+ len = iface - cfg_host;
+ else
+ len = strlen(cfg_host) + 1;
+
+ if (strncmp(cfg_host, a, len) || strcmp(cfg_port, b))
fprintf(stderr, "%s: %s vs %s, %s vs %s\n", __func__,
cfg_host, a, cfg_port, b);
}
@@ -1092,6 +1109,7 @@ int main_loop_s(int listensock)
struct pollfd polls;
socklen_t salen;
int remotesock;
+ int err = 0;
int fd = 0;
again:
@@ -1124,7 +1142,7 @@ again:
SOCK_TEST_TCPULP(remotesock, 0);
memset(&winfo, 0, sizeof(winfo));
- copyfd_io(fd, remotesock, 1, true, &winfo);
+ err = copyfd_io(fd, remotesock, 1, true, &winfo);
} else {
perror("accept");
return 1;
@@ -1133,10 +1151,10 @@ again:
if (cfg_input)
close(fd);
- if (--cfg_repeat > 0)
+ if (!err && --cfg_repeat > 0)
goto again;
- return 0;
+ return err;
}
static void init_rng(void)
@@ -1246,7 +1264,7 @@ void xdisconnect(int fd)
else
xerror("bad family");
- strcpy(cmd, "ss -M | grep -q ");
+ strcpy(cmd, "ss -Mnt | grep -q ");
cmdlen = strlen(cmd);
if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen],
sizeof(cmd) - cmdlen))
@@ -1256,7 +1274,7 @@ void xdisconnect(int fd)
/*
* wait until the pending data is completely flushed and all
- * the MPTCP sockets reached the closed status.
+ * the sockets reached the closed status.
* disconnect will bypass/ignore/drop any pending data.
*/
for (i = 0; ; i += msec_sleep) {
@@ -1431,7 +1449,7 @@ static void parse_opts(int argc, char **argv)
*/
if (cfg_truncate < 0) {
cfg_rcv_trunc = true;
- signal(SIGPIPE, handle_signal);
+ signal(SIGPIPE, SIG_IGN);
}
break;
case 'j':
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 5e3c56253274..a6447f7a31fe 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -134,7 +134,7 @@ ns4=""
TEST_GROUP=""
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
rm -f "$cin_disconnect"
@@ -211,6 +211,11 @@ if $checksum; then
done
fi
+if $capture; then
+ rndh="${ns1:4}"
+ mptcp_lib_pr_info "Packet capture files will have this prefix: ${rndh}-"
+fi
+
set_ethtool_flags() {
local ns="$1"
local dev="$2"
@@ -361,7 +366,6 @@ do_transfer()
if $capture; then
local capuser
- local rndh="${connector_ns:4}"
if [ -z $SUDO_USER ] ; then
capuser=""
else
@@ -371,81 +375,75 @@ do_transfer()
local capfile="${rndh}-${connector_ns:0:3}-${listener_ns:0:3}-${cl_proto}-${srv_proto}-${connect_addr}-${port}"
local capopt="-i any -s 65535 -B 32768 ${capuser}"
- ip netns exec ${listener_ns} tcpdump ${capopt} -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 &
+ ip netns exec ${listener_ns} tcpdump ${capopt} \
+ -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 &
local cappid_listener=$!
- ip netns exec ${connector_ns} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
- local cappid_connector=$!
+ if [ ${listener_ns} != ${connector_ns} ]; then
+ ip netns exec ${connector_ns} tcpdump ${capopt} \
+ -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
+ local cappid_connector=$!
+ fi
sleep 1
fi
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat -n
+ mptcp_lib_nstat_init "${listener_ns}"
if [ ${listener_ns} != ${connector_ns} ]; then
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat -n
- fi
-
- local stat_synrx_last_l
- local stat_ackrx_last_l
- local stat_cookietx_last
- local stat_cookierx_last
- local stat_csum_err_s
- local stat_csum_err_c
- local stat_tcpfb_last_l
- stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
- stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
- stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
- stat_tcpfb_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
-
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_args $local_addr < "$sin" > "$sout" &
+ mptcp_lib_nstat_init "${connector_ns}"
+ fi
+
+ ip netns exec ${listener_ns} \
+ ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
+ $extra_args $local_addr < "$sin" > "$sout" &
local spid=$!
mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
local start
start=$(date +%s%3N)
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_args $connect_addr < "$cin" > "$cout" &
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_args $connect_addr < "$cin" > "$cout" &
local cpid=$!
+ mptcp_lib_wait_timeout "${timeout_test}" "${listener_ns}" \
+ "${connector_ns}" "${port}" "${cpid}" "${spid}" &
+ local timeout_pid=$!
+
wait $cpid
local retc=$?
wait $spid
local rets=$?
+ if kill -0 $timeout_pid; then
+ # Finished before the timeout: kill the background job
+ mptcp_lib_kill_group_wait $timeout_pid
+ timeout_pid=0
+ fi
+
local stop
stop=$(date +%s%3N)
if $capture; then
sleep 1
kill ${cappid_listener}
- kill ${cappid_connector}
+ if [ ${listener_ns} != ${connector_ns} ]; then
+ kill ${cappid_connector}
+ fi
fi
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat | grep Tcp > /tmp/${listener_ns}.out
+ mptcp_lib_nstat_get "${listener_ns}"
if [ ${listener_ns} != ${connector_ns} ]; then
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat | grep Tcp > /tmp/${connector_ns}.out
+ mptcp_lib_nstat_get "${connector_ns}"
fi
local duration
duration=$((stop-start))
printf "(duration %05sms) " "${duration}"
- if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ] || [ ${timeout_pid} -ne 0 ]; then
mptcp_lib_pr_fail "client exit code $retc, server $rets"
- mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}" \
- "/tmp/${listener_ns}.out" "/tmp/${connector_ns}.out"
+ mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}"
echo
cat "$capout"
@@ -459,38 +457,38 @@ do_transfer()
rets=$?
local extra=""
- local stat_synrx_now_l
- local stat_ackrx_now_l
- local stat_cookietx_now
- local stat_cookierx_now
- local stat_ooo_now
- local stat_tcpfb_now_l
- stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
- stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
- stat_tcpfb_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
-
- expect_synrx=$((stat_synrx_last_l))
- expect_ackrx=$((stat_ackrx_last_l))
+ local stat_synrx
+ local stat_ackrx
+ local stat_cookietx
+ local stat_cookierx
+ local stat_ooo
+ local stat_tcpfb
+ stat_synrx=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+ stat_ackrx=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+ stat_cookietx=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ stat_cookierx=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ stat_ooo=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
+ stat_tcpfb=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
+
+ expect_synrx=0
+ expect_ackrx=0
cookies=$(ip netns exec ${listener_ns} sysctl net.ipv4.tcp_syncookies)
cookies=${cookies##*=}
if [ ${cl_proto} = "MPTCP" ] && [ ${srv_proto} = "MPTCP" ]; then
- expect_synrx=$((stat_synrx_last_l+connect_per_transfer))
- expect_ackrx=$((stat_ackrx_last_l+connect_per_transfer))
+ expect_synrx=${connect_per_transfer}
+ expect_ackrx=${connect_per_transfer}
fi
- if [ ${stat_synrx_now_l} -lt ${expect_synrx} ]; then
- mptcp_lib_pr_fail "lower MPC SYN rx (${stat_synrx_now_l})" \
+ if [ ${stat_synrx} -lt ${expect_synrx} ]; then
+ mptcp_lib_pr_fail "lower MPC SYN rx (${stat_synrx})" \
"than expected (${expect_synrx})"
retc=1
fi
- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ] && [ ${stat_ooo_now} -eq 0 ]; then
- if [ ${stat_ooo_now} -eq 0 ]; then
- mptcp_lib_pr_fail "lower MPC ACK rx (${stat_ackrx_now_l})" \
+ if [ ${stat_ackrx} -lt ${expect_ackrx} ]; then
+ if [ ${stat_ooo} -eq 0 ]; then
+ mptcp_lib_pr_fail "lower MPC ACK rx (${stat_ackrx})" \
"than expected (${expect_ackrx})"
rets=1
else
@@ -504,47 +502,45 @@ do_transfer()
csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
- local csum_err_s_nr=$((csum_err_s - stat_csum_err_s))
- if [ $csum_err_s_nr -gt 0 ]; then
- mptcp_lib_pr_fail "server got ${csum_err_s_nr} data checksum error[s]"
+ if [ $csum_err_s -gt 0 ]; then
+ mptcp_lib_pr_fail "server got ${csum_err_s} data checksum error[s]"
rets=1
fi
- local csum_err_c_nr=$((csum_err_c - stat_csum_err_c))
- if [ $csum_err_c_nr -gt 0 ]; then
- mptcp_lib_pr_fail "client got ${csum_err_c_nr} data checksum error[s]"
+ if [ $csum_err_c -gt 0 ]; then
+ mptcp_lib_pr_fail "client got ${csum_err_c} data checksum error[s]"
retc=1
fi
fi
- if [ ${stat_ooo_now} -eq 0 ] && [ ${stat_tcpfb_last_l} -ne ${stat_tcpfb_now_l} ]; then
+ if [ ${stat_ooo} -eq 0 ] && [ ${stat_tcpfb} -gt 0 ]; then
mptcp_lib_pr_fail "unexpected fallback to TCP"
rets=1
fi
if [ $cookies -eq 2 ];then
- if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then
+ if [ $stat_cookietx -eq 0 ] ;then
extra+=" WARN: CookieSent: did not advance"
fi
- if [ $stat_cookierx_last -ge $stat_cookierx_now ] ;then
+ if [ $stat_cookierx -eq 0 ] ;then
extra+=" WARN: CookieRecv: did not advance"
fi
else
- if [ $stat_cookietx_last -ne $stat_cookietx_now ] ;then
+ if [ $stat_cookietx -gt 0 ] ;then
extra+=" WARN: CookieSent: changed"
fi
- if [ $stat_cookierx_last -ne $stat_cookierx_now ] ;then
+ if [ $stat_cookierx -gt 0 ] ;then
extra+=" WARN: CookieRecv: changed"
fi
fi
- if [ ${stat_synrx_now_l} -gt ${expect_synrx} ]; then
+ if [ ${stat_synrx} -gt ${expect_synrx} ]; then
extra+=" WARN: SYNRX: expect ${expect_synrx},"
- extra+=" got ${stat_synrx_now_l} (probably retransmissions)"
+ extra+=" got ${stat_synrx} (probably retransmissions)"
fi
- if [ ${stat_ackrx_now_l} -gt ${expect_ackrx} ]; then
+ if [ ${stat_ackrx} -gt ${expect_ackrx} ]; then
extra+=" WARN: ACKRX: expect ${expect_ackrx},"
- extra+=" got ${stat_ackrx_now_l} (probably retransmissions)"
+ extra+=" got ${stat_ackrx} (probably retransmissions)"
fi
if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
new file mode 100755
index 000000000000..ce93ec2f107f
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -C "${@}"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
new file mode 100755
index 000000000000..5dd30f9394af
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -m mmap "${@}"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
new file mode 100755
index 000000000000..1d16fb1cc9bb
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -m sendfile "${@}"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c
index 3cf1e2a612ce..8e8f6441ad8b 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
@@ -75,9 +75,10 @@ static void xgetaddrinfo(const char *node, const char *service,
struct addrinfo *hints,
struct addrinfo **res)
{
-again:
- int err = getaddrinfo(node, service, hints, res);
+ int err;
+again:
+ err = getaddrinfo(node, service, hints, res);
if (err) {
const char *errstr;
@@ -501,6 +502,7 @@ static int server(int unixfd)
process_one_client(r, unixfd);
+ close(fd);
return 0;
}
@@ -579,8 +581,12 @@ int main(int argc, char *argv[])
die_perror("pipe");
s = xfork();
- if (s == 0)
- return server(unixfds[1]);
+ if (s == 0) {
+ close(unixfds[0]);
+ ret = server(unixfds[1]);
+ close(unixfds[1]);
+ return ret;
+ }
close(unixfds[1]);
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index b8af65373b3a..b2e6e548f796 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -8,7 +8,7 @@
# ShellCheck incorrectly believes that most of the code here is unreachable
# because it's invoked by variable name, see how the "tests" array is used
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
. "$(dirname "${0}")/mptcp_lib.sh"
@@ -62,6 +62,7 @@ unset sflags
unset fastclose
unset fullmesh
unset speed
+unset bind_addr
unset join_syn_rej
unset join_csum_ns1
unset join_csum_ns2
@@ -74,6 +75,17 @@ unset join_create_err
unset join_bind_err
unset join_connect_err
+unset fb_ns1
+unset fb_ns2
+unset fb_infinite_map_tx
+unset fb_dss_corruption
+unset fb_simult_conn
+unset fb_mpc_passive
+unset fb_mpc_active
+unset fb_mpc_data
+unset fb_md5_sig
+unset fb_dss
+
# generated using "nfbpf_compile '(ip && (ip[54] & 0xf0) == 0x30) ||
# (ip6 && (ip6[74] & 0xf0) == 0x30)'"
CBPF_MPTCP_SUBOPTION_ADD_ADDR="14,
@@ -347,6 +359,7 @@ reset_with_add_addr_timeout()
tables="${ip6tables}"
fi
+ # set a maximum, to avoid too long timeout with exponential backoff
ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
if ! ip netns exec $ns2 $tables -A OUTPUT -p tcp \
@@ -633,6 +646,27 @@ wait_mpj()
done
}
+wait_ll_ready()
+{
+ local ns="${1}"
+
+ local i
+ for i in $(seq 50); do
+ ip -n "${ns}" -6 addr show scope link | grep "inet6 fe80" |
+ grep -qw "tentative" || break
+ sleep 0.1
+ done
+}
+
+get_ll_addr()
+{
+ local ns="${1}"
+ local iface="${2}"
+
+ ip -n "${ns}" -6 addr show dev "${iface}" scope link |
+ grep "inet6 fe80" | sed 's#.*\(fe80::.*\)/.*#\1#'
+}
+
kill_events_pids()
{
mptcp_lib_kill_wait $evts_ns1_pid
@@ -939,6 +973,9 @@ do_transfer()
local FAILING_LINKS=${FAILING_LINKS:-""}
local fastclose=${fastclose:-""}
local speed=${speed:-"fast"}
+ local bind_addr=${bind_addr:-"::"}
+ local listener_in="${sin}"
+ local connector_in="${cin}"
port=$(get_port)
:> "$cout"
@@ -946,10 +983,8 @@ do_transfer()
cond_start_capture ${listener_ns}
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat -n
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat -n
+ mptcp_lib_nstat_init "${listener_ns}"
+ mptcp_lib_nstat_init "${connector_ns}"
local extra_args
if [ $speed = "fast" ]; then
@@ -987,42 +1022,40 @@ do_transfer()
extra_srv_args="$extra_args $extra_srv_args"
if [ "$test_linkfail" -gt 1 ];then
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_srv_args "::" < "$sinfail" > "$sout" &
- else
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
- $extra_srv_args "::" < "$sin" > "$sout" &
+ listener_in="${sinfail}"
fi
+ ip netns exec ${listener_ns} \
+ ./mptcp_connect -t ${timeout_poll} -l -p ${port} -s ${srv_proto} \
+ ${extra_srv_args} "${bind_addr}" < "${listener_in}" > "${sout}" &
local spid=$!
mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
extra_cl_args="$extra_args $extra_cl_args"
if [ "$test_linkfail" -eq 0 ];then
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_cl_args $connect_addr < "$cin" > "$cout" &
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_cl_args $connect_addr < "$cin" > "$cout" &
elif [ "$test_linkfail" -eq 1 ] || [ "$test_linkfail" -eq 2 ];then
+ connector_in="${cinsent}"
( cat "$cinfail" ; sleep 2; link_failure $listener_ns ; cat "$cinfail" ) | \
tee "$cinsent" | \
- timeout ${timeout_test} \
ip netns exec ${connector_ns} \
./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
$extra_cl_args $connect_addr > "$cout" &
else
+ connector_in="${cinsent}"
tee "$cinsent" < "$cinfail" | \
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
- $extra_cl_args $connect_addr > "$cout" &
+ ip netns exec ${connector_ns} \
+ ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
+ $extra_cl_args $connect_addr > "$cout" &
fi
local cpid=$!
+ mptcp_lib_wait_timeout "${timeout_test}" "${listener_ns}" \
+ "${connector_ns}" "${port}" "${cpid}" "${spid}" &
+ local timeout_pid=$!
+
pm_nl_set_endpoint $listener_ns $connector_ns $connect_addr
check_cestab $listener_ns $connector_ns
@@ -1031,31 +1064,26 @@ do_transfer()
wait $spid
local rets=$?
+ if kill -0 $timeout_pid; then
+ # Finished before the timeout: kill the background job
+ mptcp_lib_kill_group_wait $timeout_pid
+ timeout_pid=0
+ fi
+
cond_stop_capture
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat | grep Tcp > /tmp/${listener_ns}.out
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat | grep Tcp > /tmp/${connector_ns}.out
+ mptcp_lib_nstat_get "${listener_ns}"
+ mptcp_lib_nstat_get "${connector_ns}"
- if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ] || [ ${timeout_pid} -ne 0 ]; then
fail_test "client exit code $retc, server $rets"
- mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}" \
- "/tmp/${listener_ns}.out" "/tmp/${connector_ns}.out"
+ mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}"
return 1
fi
- if [ "$test_linkfail" -gt 1 ];then
- check_transfer $sinfail $cout "file received by client" $trunc_size
- else
- check_transfer $sin $cout "file received by client" $trunc_size
- fi
+ check_transfer $listener_in $cout "file received by client" $trunc_size
retc=$?
- if [ "$test_linkfail" -eq 0 ];then
- check_transfer $cin $sout "file received by server" $trunc_size
- else
- check_transfer $cinsent $sout "file received by server" $trunc_size
- fi
+ check_transfer $connector_in $sout "file received by server" $trunc_size
rets=$?
[ $retc -eq 0 ] && [ $rets -eq 0 ]
@@ -1124,12 +1152,20 @@ run_tests()
do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr}
}
+_dump_stats()
+{
+ local ns="${1}"
+ local side="${2}"
+
+ mptcp_lib_print_err "${side} ns stats (${ns2})"
+ mptcp_lib_pr_nstat "${ns}"
+ echo
+}
+
dump_stats()
{
- echo Server ns stats
- ip netns exec $ns1 nstat -as | grep Tcp
- echo Client ns stats
- ip netns exec $ns2 nstat -as | grep Tcp
+ _dump_stats "${ns1}" "Server"
+ _dump_stats "${ns2}" "Client"
}
chk_csum_nr()
@@ -1399,6 +1435,115 @@ chk_join_tx_nr()
print_results "join Tx" ${rc}
}
+chk_fallback_nr()
+{
+ local infinite_map_tx=${fb_infinite_map_tx:-0}
+ local dss_corruption=${fb_dss_corruption:-0}
+ local simult_conn=${fb_simult_conn:-0}
+ local mpc_passive=${fb_mpc_passive:-0}
+ local mpc_active=${fb_mpc_active:-0}
+ local mpc_data=${fb_mpc_data:-0}
+ local md5_sig=${fb_md5_sig:-0}
+ local dss=${fb_dss:-0}
+ local rc=${KSFT_PASS}
+ local ns=$1
+ local count
+
+ count=$(mptcp_lib_get_counter ${!ns} "MPTcpExtInfiniteMapTx")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "$infinite_map_tx" ]; then
+ rc=${KSFT_FAIL}
+ print_check "$ns infinite map tx fallback"
+ fail_test "got $count infinite map tx fallback[s] in $ns expected $infinite_map_tx"
+ fi
+
+ count=$(mptcp_lib_get_counter ${!ns} "MPTcpExtDSSCorruptionFallback")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "$dss_corruption" ]; then
+ rc=${KSFT_FAIL}
+ print_check "$ns dss corruption fallback"
+ fail_test "got $count dss corruption fallback[s] in $ns expected $dss_corruption"
+ fi
+
+ count=$(mptcp_lib_get_counter ${!ns} "MPTcpExtSimultConnectFallback")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "$simult_conn" ]; then
+ rc=${KSFT_FAIL}
+ print_check "$ns simult conn fallback"
+ fail_test "got $count simult conn fallback[s] in $ns expected $simult_conn"
+ fi
+
+ count=$(mptcp_lib_get_counter ${!ns} "MPTcpExtMPCapableFallbackACK")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "$mpc_passive" ]; then
+ rc=${KSFT_FAIL}
+ print_check "$ns mpc passive fallback"
+ fail_test "got $count mpc passive fallback[s] in $ns expected $mpc_passive"
+ fi
+
+ count=$(mptcp_lib_get_counter ${!ns} "MPTcpExtMPCapableFallbackSYNACK")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "$mpc_active" ]; then
+ rc=${KSFT_FAIL}
+ print_check "$ns mpc active fallback"
+ fail_test "got $count mpc active fallback[s] in $ns expected $mpc_active"
+ fi
+
+ count=$(mptcp_lib_get_counter ${!ns} "MPTcpExtMPCapableDataFallback")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "$mpc_data" ]; then
+ rc=${KSFT_FAIL}
+ print_check "$ns mpc data fallback"
+ fail_test "got $count mpc data fallback[s] in $ns expected $mpc_data"
+ fi
+
+ count=$(mptcp_lib_get_counter ${!ns} "MPTcpExtMD5SigFallback")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "$md5_sig" ]; then
+ rc=${KSFT_FAIL}
+ print_check "$ns MD5 Sig fallback"
+ fail_test "got $count MD5 Sig fallback[s] in $ns expected $md5_sig"
+ fi
+
+ count=$(mptcp_lib_get_counter ${!ns} "MPTcpExtDssFallback")
+ if [ -z "$count" ]; then
+ rc=${KSFT_SKIP}
+ elif [ "$count" != "$dss" ]; then
+ rc=${KSFT_FAIL}
+ print_check "$ns dss fallback"
+ fail_test "got $count dss fallback[s] in $ns expected $dss"
+ fi
+
+ return $rc
+}
+
+chk_fallback_nr_all()
+{
+ local netns=("ns1" "ns2")
+ local fb_ns=("fb_ns1" "fb_ns2")
+ local rc=${KSFT_PASS}
+
+ for i in 0 1; do
+ if [ -n "${!fb_ns[i]}" ]; then
+ eval "${!fb_ns[i]}" \
+ chk_fallback_nr ${netns[i]} || rc=${?}
+ else
+ chk_fallback_nr ${netns[i]} || rc=${?}
+ fi
+ done
+
+ if [ "${rc}" != "${KSFT_PASS}" ]; then
+ print_results "fallback" ${rc}
+ fi
+}
+
chk_join_nr()
{
local syn_nr=$1
@@ -1484,6 +1629,8 @@ chk_join_nr()
join_syn_tx="${join_syn_tx:-${syn_nr}}" \
chk_join_tx_nr
+ chk_fallback_nr_all
+
if $validate_checksum; then
chk_csum_nr $csum_ns1 $csum_ns2
chk_fail_nr $fail_nr $fail_nr
@@ -1547,7 +1694,6 @@ chk_add_nr()
local tx=""
local rx=""
local count
- local timeout
if [[ $ns_invert = "invert" ]]; then
ns_tx=$ns2
@@ -1556,15 +1702,13 @@ chk_add_nr()
rx=" server"
fi
- timeout=$(ip netns exec ${ns_tx} sysctl -n net.mptcp.add_addr_timeout)
-
print_check "add addr rx${rx}"
count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtAddAddr")
if [ -z "$count" ]; then
print_skip
- # if the test configured a short timeout tolerate greater then expected
- # add addrs options, due to retransmissions
- elif [ "$count" != "$add_nr" ] && { [ "$timeout" -gt 1 ] || [ "$count" -lt "$add_nr" ]; }; then
+ # Tolerate more ADD_ADDR then expected (if any), due to retransmissions
+ elif [ "$count" != "$add_nr" ] &&
+ { [ "$add_nr" -eq 0 ] || [ "$count" -lt "$add_nr" ]; }; then
fail_test "got $count ADD_ADDR[s] expected $add_nr"
else
print_ok
@@ -1652,18 +1796,15 @@ chk_add_tx_nr()
{
local add_tx_nr=$1
local echo_tx_nr=$2
- local timeout
local count
- timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
-
print_check "add addr tx"
count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtAddAddrTx")
if [ -z "$count" ]; then
print_skip
- # if the test configured a short timeout tolerate greater then expected
- # add addrs options, due to retransmissions
- elif [ "$count" != "$add_tx_nr" ] && { [ "$timeout" -gt 1 ] || [ "$count" -lt "$add_tx_nr" ]; }; then
+ # Tolerate more ADD_ADDR then expected (if any), due to retransmissions
+ elif [ "$count" != "$add_tx_nr" ] &&
+ { [ "$add_tx_nr" -eq 0 ] || [ "$count" -lt "$add_tx_nr" ]; }; then
fail_test "got $count ADD_ADDR[s] TX, expected $add_tx_nr"
else
print_ok
@@ -2151,7 +2292,8 @@ signal_address_tests()
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
pm_nl_set_limits $ns2 3 3
- run_tests $ns1 $ns2 10.0.1.1
+ speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 3 3 3
chk_add_nr 3 3
fi
@@ -2163,7 +2305,8 @@ signal_address_tests()
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
pm_nl_set_limits $ns2 3 3
- run_tests $ns1 $ns2 10.0.1.1
+ speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
join_syn_tx=3 \
chk_join_nr 1 1 1
chk_add_nr 3 3
@@ -2201,6 +2344,74 @@ signal_address_tests()
fi
}
+laminar_endp_tests()
+{
+ # no laminar endpoints: routing rules are used
+ if reset_with_tcp_filter "without a laminar endpoint" ns1 10.0.2.2 REJECT &&
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1
+ join_syn_tx=1 \
+ chk_join_nr 0 0 0
+ chk_add_nr 1 1
+ fi
+
+ # laminar endpoints: this endpoint is used
+ if reset_with_tcp_filter "with a laminar endpoint" ns1 10.0.2.2 REJECT &&
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags laminar
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+ fi
+
+ # laminar endpoints: these endpoints are used
+ if reset_with_tcp_filter "with multiple laminar endpoints" ns1 10.0.2.2 REJECT &&
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+ pm_nl_add_endpoint $ns2 dead:beef:3::2 flags laminar
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags laminar
+ pm_nl_add_endpoint $ns2 10.0.4.2 flags laminar
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 2 2 2
+ chk_add_nr 2 2
+ fi
+
+ # laminar endpoints: only one endpoint is used
+ if reset_with_tcp_filter "single laminar endpoint" ns1 10.0.2.2 REJECT &&
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags laminar
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 2 2
+ fi
+
+ # laminar endpoints: subflow and laminar flags
+ if reset_with_tcp_filter "sublow + laminar endpoints" ns1 10.0.2.2 REJECT &&
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ pm_nl_set_limits $ns1 0 4
+ pm_nl_set_limits $ns2 2 4
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,laminar
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,laminar
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+ fi
+}
+
link_failure_tests()
{
# accept and use add_addr with additional subflows and link loss
@@ -2345,7 +2556,7 @@ remove_tests()
if reset "remove single subflow"; then
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
addr_nr_ns2=-1 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
@@ -2358,8 +2569,8 @@ remove_tests()
if reset "remove multiple subflows"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 0 2
- pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
addr_nr_ns2=-2 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
@@ -2370,7 +2581,7 @@ remove_tests()
# single address, remove
if reset "remove single address"; then
pm_nl_set_limits $ns1 0 1
- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
pm_nl_set_limits $ns2 1 1
addr_nr_ns1=-1 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
@@ -2383,9 +2594,9 @@ remove_tests()
# subflow and signal, remove
if reset "remove subflow and signal"; then
pm_nl_set_limits $ns1 0 2
- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
pm_nl_set_limits $ns2 1 2
- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
addr_nr_ns1=-1 addr_nr_ns2=-1 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
@@ -2397,10 +2608,10 @@ remove_tests()
# subflows and signal, remove
if reset "remove subflows and signal"; then
pm_nl_set_limits $ns1 0 3
- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
pm_nl_set_limits $ns2 1 3
- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
+ pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow,backup
addr_nr_ns1=-1 addr_nr_ns2=-2 speed=10 \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 3 3 3
@@ -2412,9 +2623,9 @@ remove_tests()
# addresses remove
if reset "remove addresses"; then
pm_nl_set_limits $ns1 3 3
- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal id 250
- pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
- pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup id 250
+ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal,backup
+ pm_nl_add_endpoint $ns1 10.0.4.1 flags signal,backup
pm_nl_set_limits $ns2 3 3
addr_nr_ns1=-3 speed=10 \
run_tests $ns1 $ns2 10.0.1.1
@@ -2427,10 +2638,10 @@ remove_tests()
# invalid addresses remove
if reset "remove invalid addresses"; then
pm_nl_set_limits $ns1 3 3
- pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.12.1 flags signal,backup
# broadcast IP: no packet for this address will be received on ns1
- pm_nl_add_endpoint $ns1 224.0.0.1 flags signal
- pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+ pm_nl_add_endpoint $ns1 224.0.0.1 flags signal,backup
+ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal,backup
pm_nl_set_limits $ns2 2 2
addr_nr_ns1=-3 speed=10 \
run_tests $ns1 $ns2 10.0.1.1
@@ -2444,10 +2655,10 @@ remove_tests()
# subflows and signal, flush
if reset "flush subflows and signal"; then
pm_nl_set_limits $ns1 0 3
- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
pm_nl_set_limits $ns2 1 3
- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
+ pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow,backup
addr_nr_ns1=-8 addr_nr_ns2=-8 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 3 3 3
@@ -2460,9 +2671,9 @@ remove_tests()
if reset "flush subflows"; then
pm_nl_set_limits $ns1 3 3
pm_nl_set_limits $ns2 3 3
- pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow id 150
- pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
- pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup id 150
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
+ pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow,backup
addr_nr_ns1=-8 addr_nr_ns2=-8 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 3 3 3
@@ -2479,9 +2690,9 @@ remove_tests()
# addresses flush
if reset "flush addresses"; then
pm_nl_set_limits $ns1 3 3
- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal id 250
- pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
- pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup id 250
+ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal,backup
+ pm_nl_add_endpoint $ns1 10.0.4.1 flags signal,backup
pm_nl_set_limits $ns2 3 3
addr_nr_ns1=-8 addr_nr_ns2=-8 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
@@ -2494,9 +2705,9 @@ remove_tests()
# invalid addresses flush
if reset "flush invalid addresses"; then
pm_nl_set_limits $ns1 3 3
- pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
- pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
- pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.12.1 flags signal,backup
+ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal,backup
+ pm_nl_add_endpoint $ns1 10.0.14.1 flags signal,backup
pm_nl_set_limits $ns2 3 3
addr_nr_ns1=-8 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
@@ -2765,7 +2976,11 @@ mixed_tests()
pm_nl_add_endpoint $ns1 10.0.1.1 flags signal
speed=slow \
run_tests $ns1 $ns2 dead:beef:2::1
- chk_join_nr 1 1 1
+ if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_fullmesh_max$"; then
+ chk_join_nr 0 0 0
+ else
+ chk_join_nr 1 1 1
+ fi
fi
# fullmesh still tries to create all the possibly subflows with
@@ -3046,6 +3261,133 @@ add_addr_ports_tests()
fi
}
+bind_tests()
+{
+ # bind to one address should not allow extra subflows to other addresses
+ if reset "bind main address v4, no join v4"; then
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ bind_addr="10.0.1.1" \
+ run_tests $ns1 $ns2 10.0.1.1
+ join_syn_tx=1 \
+ chk_join_nr 0 0 0
+ chk_add_nr 1 1
+ fi
+
+ # bind to one address should not allow extra subflows to other addresses
+ if reset "bind main address v6, no join v6"; then
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+ bind_addr="dead:beef:1::1" \
+ run_tests $ns1 $ns2 dead:beef:1::1
+ join_syn_tx=1 \
+ chk_join_nr 0 0 0
+ chk_add_nr 1 1
+ fi
+
+ # multiple binds to allow extra subflows to other addresses
+ if reset "multiple bind to allow joins v4"; then
+ local extra_bind
+
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+
+ # Launching another app listening on a different address
+ # Note: it could be a totally different app, e.g. nc, socat, ...
+ ip netns exec ${ns1} ./mptcp_connect -l -t -1 -p "$(get_port)" \
+ -s MPTCP 10.0.2.1 &
+ extra_bind=$!
+
+ bind_addr="10.0.1.1" \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+
+ kill ${extra_bind}
+ fi
+
+ # multiple binds to allow extra subflows to other addresses
+ if reset "multiple bind to allow joins v6"; then
+ local extra_bind
+
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 dead:beef:2::1 flags signal
+
+ # Launching another app listening on a different address
+ # Note: it could be a totally different app, e.g. nc, socat, ...
+ ip netns exec ${ns1} ./mptcp_connect -l -t -1 -p "$(get_port)" \
+ -s MPTCP dead:beef:2::1 &
+ extra_bind=$!
+
+ bind_addr="dead:beef:1::1" \
+ run_tests $ns1 $ns2 dead:beef:1::1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+
+ kill ${extra_bind}
+ fi
+
+ # multiple binds to allow extra subflows to other addresses: v6 LL case
+ if reset "multiple bind to allow joins v6 link-local routing"; then
+ local extra_bind ns1ll1 ns1ll2
+
+ ns1ll1="$(get_ll_addr $ns1 ns1eth1)"
+ ns1ll2="$(get_ll_addr $ns1 ns1eth2)"
+
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 "${ns1ll2}" flags signal
+
+ wait_ll_ready $ns1 # to be able to bind
+ wait_ll_ready $ns2 # also needed to bind on the client side
+ ip netns exec ${ns1} ./mptcp_connect -l -t -1 -p "$(get_port)" \
+ -s MPTCP "${ns1ll2}%ns1eth2" &
+ extra_bind=$!
+
+ bind_addr="${ns1ll1}%ns1eth1" \
+ run_tests $ns1 $ns2 "${ns1ll1}%ns2eth1"
+ # it is not possible to connect to the announced LL addr without
+ # specifying the outgoing interface.
+ join_connect_err=1 \
+ chk_join_nr 0 0 0
+ chk_add_nr 1 1
+
+ kill ${extra_bind}
+ fi
+
+ # multiple binds to allow extra subflows to v6 LL addresses: laminar
+ if reset "multiple bind to allow joins v6 link-local laminar" &&
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ local extra_bind ns1ll1 ns1ll2 ns2ll2
+
+ ns1ll1="$(get_ll_addr $ns1 ns1eth1)"
+ ns1ll2="$(get_ll_addr $ns1 ns1eth2)"
+ ns2ll2="$(get_ll_addr $ns2 ns2eth2)"
+
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 "${ns1ll2}" flags signal
+ pm_nl_add_endpoint $ns2 "${ns2ll2}" flags laminar dev ns2eth2
+
+ wait_ll_ready $ns1 # to be able to bind
+ wait_ll_ready $ns2 # also needed to bind on the client side
+ ip netns exec ${ns1} ./mptcp_connect -l -t -1 -p "$(get_port)" \
+ -s MPTCP "${ns1ll2}%ns1eth2" &
+ extra_bind=$!
+
+ bind_addr="${ns1ll1}%ns1eth1" \
+ run_tests $ns1 $ns2 "${ns1ll1}%ns2eth1"
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+
+ kill ${extra_bind}
+ fi
+}
+
syncookies_tests()
{
# single subflow, syncookies
@@ -3187,6 +3529,17 @@ deny_join_id0_tests()
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
fi
+
+ # default limits, server deny join id 0 + signal
+ if reset_with_allow_join_id0 "default limits, server deny join id 0" 0 1; then
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 0 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 2 2 2
+ fi
}
fullmesh_tests()
@@ -3302,7 +3655,6 @@ fullmesh_tests()
fastclose_tests()
{
if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
- MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=client \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
@@ -3311,7 +3663,6 @@ fastclose_tests()
fi
if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
- MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=server \
run_tests $ns1 $ns2 10.0.1.1
join_rst_nr=1 \
@@ -3337,6 +3688,7 @@ fail_tests()
join_csum_ns1=+1 join_csum_ns2=+0 \
join_fail_nr=1 join_rst_nr=0 join_infi_nr=1 \
join_corrupted_pkts="$(pedit_action_pkts)" \
+ fb_ns1="fb_dss=1" fb_ns2="fb_infinite_map_tx=1" \
chk_join_nr 0 0 0
chk_fail_nr 1 -1 invert
fi
@@ -3607,7 +3959,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns1
pm_nl_set_limits $ns2 2 2
- { speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns1
@@ -3632,7 +3984,7 @@ userspace_tests()
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
kill_events_pids
- mptcp_lib_kill_wait $tests_pid
+ mptcp_lib_kill_group_wait $tests_pid
fi
# userspace pm create destroy subflow
@@ -3640,7 +3992,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns2
pm_nl_set_limits $ns1 0 1
- { speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns2
@@ -3660,7 +4012,7 @@ userspace_tests()
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
kill_events_pids
- mptcp_lib_kill_wait $tests_pid
+ mptcp_lib_kill_group_wait $tests_pid
fi
# userspace pm create id 0 subflow
@@ -3668,7 +4020,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns2
pm_nl_set_limits $ns1 0 1
- { speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns2
@@ -3681,7 +4033,7 @@ userspace_tests()
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 2 2
kill_events_pids
- mptcp_lib_kill_wait $tests_pid
+ mptcp_lib_kill_group_wait $tests_pid
fi
# userspace pm remove initial subflow
@@ -3689,7 +4041,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns2
pm_nl_set_limits $ns1 0 1
- { speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns2
@@ -3705,7 +4057,7 @@ userspace_tests()
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 1 1
kill_events_pids
- mptcp_lib_kill_wait $tests_pid
+ mptcp_lib_kill_group_wait $tests_pid
fi
# userspace pm send RM_ADDR for ID 0
@@ -3713,7 +4065,7 @@ userspace_tests()
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns1
pm_nl_set_limits $ns2 1 1
- { speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
wait_mpj $ns1
@@ -3731,7 +4083,7 @@ userspace_tests()
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 1 1
kill_events_pids
- mptcp_lib_kill_wait $tests_pid
+ mptcp_lib_kill_group_wait $tests_pid
fi
}
@@ -3740,11 +4092,11 @@ endpoint_tests()
# subflow_rebuild_header is needed to support the implicit flag
# userspace pm type prevents add_addr
if reset "implicit EP" &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 2 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- { speed=slow \
+ { timeout_test=120 test_linkfail=128 speed=slow \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
@@ -3761,17 +4113,17 @@ endpoint_tests()
pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
pm_nl_check_endpoint "modif is allowed" \
$ns2 10.0.2.2 id 1 flags signal
- mptcp_lib_kill_wait $tests_pid
+ mptcp_lib_kill_group_wait $tests_pid
fi
if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
start_events
pm_nl_set_limits $ns1 0 3
pm_nl_set_limits $ns2 0 3
pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
- { test_linkfail=4 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
@@ -3816,7 +4168,7 @@ endpoint_tests()
chk_mptcp_info subflows 3 subflows 3
done
- mptcp_lib_kill_wait $tests_pid
+ mptcp_lib_kill_group_wait $tests_pid
kill_events_pids
chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1
@@ -3841,14 +4193,15 @@ endpoint_tests()
# remove and re-add
if reset_with_events "delete re-add signal" &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0
pm_nl_set_limits $ns1 0 3
pm_nl_set_limits $ns2 3 3
pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
# broadcast IP: no packet for this address will be received on ns1
pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
- { test_linkfail=4 speed=5 \
+ { timeout_test=120 test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
@@ -3857,39 +4210,46 @@ endpoint_tests()
$ns1 10.0.2.1 id 1 flags signal
chk_subflow_nr "before delete" 2
chk_mptcp_info subflows 1 subflows 1
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 1
pm_nl_del_endpoint $ns1 1 10.0.2.1
pm_nl_del_endpoint $ns1 2 224.0.0.1
sleep 0.5
chk_subflow_nr "after delete" 1
chk_mptcp_info subflows 0 subflows 0
+ chk_mptcp_info add_addr_signal 0 add_addr_accepted 0
pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-add" 3
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
pm_nl_del_endpoint $ns1 42 10.0.1.1
sleep 0.5
chk_subflow_nr "after delete ID 0" 2
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-add ID 0" 3
chk_mptcp_info subflows 3 subflows 3
+ chk_mptcp_info add_addr_signal 3 add_addr_accepted 2
pm_nl_del_endpoint $ns1 99 10.0.1.1
sleep 0.5
chk_subflow_nr "after re-delete ID 0" 2
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-re-add ID 0" 3
chk_mptcp_info subflows 3 subflows 3
- mptcp_lib_kill_wait $tests_pid
+ chk_mptcp_info add_addr_signal 3 add_addr_accepted 2
+ mptcp_lib_kill_group_wait $tests_pid
kill_events_pids
chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1
@@ -3915,13 +4275,13 @@ endpoint_tests()
# flush and re-add
if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 1 2
# broadcast IP: no packet for this address will be received on ns1
pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
- { test_linkfail=4 speed=20 \
+ { timeout_test=120 test_linkfail=128 speed=20 \
run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
local tests_pid=$!
@@ -3937,7 +4297,7 @@ endpoint_tests()
wait_mpj $ns2
pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
wait_mpj $ns2
- mptcp_lib_kill_wait $tests_pid
+ mptcp_lib_kill_group_wait $tests_pid
join_syn_tx=3 join_connect_err=1 \
chk_join_nr 2 2 2
@@ -3977,6 +4337,7 @@ all_tests_sorted=(
f@subflows_tests
e@subflows_error_tests
s@signal_address_tests
+ L@laminar_endp_tests
l@link_failure_tests
t@add_addr_timeout_tests
r@remove_tests
@@ -3986,6 +4347,7 @@ all_tests_sorted=(
M@mixed_tests
b@backup_tests
p@add_addr_ports_tests
+ B@bind_tests
k@syncookies_tests
S@checksum_tests
d@deny_join_id0_tests
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index 09cd24b2ae46..5fea7e7df628 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -106,23 +106,32 @@ mptcp_lib_pr_info() {
mptcp_lib_print_info "INFO: ${*}"
}
-# $1-2: listener/connector ns ; $3 port ; $4-5 listener/connector stat file
+mptcp_lib_pr_nstat() {
+ local ns="${1}"
+ local hist="/tmp/${ns}.out"
+
+ if [ -f "${hist}" ]; then
+ awk '$2 != 0 { print " "$0 }' "${hist}"
+ else
+ ip netns exec "${ns}" nstat -as | grep Tcp
+ fi
+}
+
+# $1-2: listener/connector ns ; $3 port
mptcp_lib_pr_err_stats() {
local lns="${1}"
local cns="${2}"
local port="${3}"
- local lstat="${4}"
- local cstat="${5}"
echo -en "${MPTCP_LIB_COLOR_RED}"
{
printf "\nnetns %s (listener) socket stat for %d:\n" "${lns}" "${port}"
ip netns exec "${lns}" ss -Menitam -o "sport = :${port}"
- cat "${lstat}"
+ mptcp_lib_pr_nstat "${lns}"
printf "\nnetns %s (connector) socket stat for %d:\n" "${cns}" "${port}"
ip netns exec "${cns}" ss -Menitam -o "dport = :${port}"
- [ "${lstat}" != "${cstat}" ] && cat "${cstat}"
+ [ "${lns}" != "${cns}" ] && mptcp_lib_pr_nstat "${cns}"
} 1>&2
echo -en "${MPTCP_LIB_COLOR_RESET}"
}
@@ -341,6 +350,19 @@ mptcp_lib_evts_get_info() {
mptcp_lib_get_info_value "${1}" "^type:${3:-1},"
}
+mptcp_lib_wait_timeout() {
+ local timeout_test="${1}"
+ local listener_ns="${2}"
+ local connector_ns="${3}"
+ local port="${4}"
+ shift 4 # rest are PIDs
+
+ sleep "${timeout_test}"
+ mptcp_lib_print_err "timeout"
+ mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}"
+ kill "${@}" 2>/dev/null
+}
+
# $1: PID
mptcp_lib_kill_wait() {
[ "${1}" -eq 0 ] && return 0
@@ -350,19 +372,62 @@ mptcp_lib_kill_wait() {
wait "${1}" 2>/dev/null
}
+# $1: PID
+mptcp_lib_pid_list_children() {
+ local curr="${1}"
+ # evoke 'ps' only once
+ local pids="${2:-"$(ps o pid,ppid)"}"
+
+ echo "${curr}"
+
+ local pid
+ for pid in $(echo "${pids}" | awk "\$2 == ${curr} { print \$1 }"); do
+ mptcp_lib_pid_list_children "${pid}" "${pids}"
+ done
+}
+
+# $1: PID
+mptcp_lib_kill_group_wait() {
+ # Some users might not have procps-ng: cannot use "kill -- -PID"
+ mptcp_lib_pid_list_children "${1}" | xargs -r kill &>/dev/null
+ wait "${1}" 2>/dev/null
+}
+
# $1: IP address
mptcp_lib_is_v6() {
[ -z "${1##*:*}" ]
}
+mptcp_lib_nstat_init() {
+ local ns="${1}"
+
+ rm -f "/tmp/${ns}."{nstat,out}
+ NSTAT_HISTORY="/tmp/${ns}.nstat" ip netns exec "${ns}" nstat -n
+}
+
+mptcp_lib_nstat_get() {
+ local ns="${1}"
+
+ # filter out non-*TCP stats, and the rate (last column)
+ NSTAT_HISTORY="/tmp/${ns}.nstat" ip netns exec "${ns}" nstat -sz |
+ grep -o ".*Tcp\S\+\s\+[0-9]\+" > "/tmp/${ns}.out"
+}
+
# $1: ns, $2: MIB counter
+# Get the counter from the history (mptcp_lib_nstat_{init,get}()) if available.
+# If not, get the counter from nstat ignoring any history.
mptcp_lib_get_counter() {
local ns="${1}"
local counter="${2}"
+ local hist="/tmp/${ns}.out"
local count
- count=$(ip netns exec "${ns}" nstat -asz "${counter}" |
- awk 'NR==1 {next} {print $2}')
+ if [[ -s "${hist}" && "${counter}" == *"Tcp"* ]]; then
+ count=$(awk "/^${counter} / {print \$2; exit}" "${hist}")
+ else
+ count=$(ip netns exec "${ns}" nstat -asz "${counter}" |
+ awk 'NR==1 {next} {print $2}')
+ fi
if [ -z "${count}" ]; then
mptcp_lib_fail_if_expected_feature "${counter} counter"
return 1
@@ -384,7 +449,7 @@ mptcp_lib_make_file() {
mptcp_lib_print_file_err() {
ls -l "${1}" 1>&2
echo "Trailing bytes are: "
- tail -c 27 "${1}"
+ tail -c 32 "${1}" | od -x | head -n2
}
# $1: input file ; $2: output file ; $3: what kind of file
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
index 9934a68df237..286164f7246e 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
@@ -162,9 +162,10 @@ static void xgetaddrinfo(const char *node, const char *service,
struct addrinfo *hints,
struct addrinfo **res)
{
-again:
- int err = getaddrinfo(node, service, hints, res);
+ int err;
+again:
+ err = getaddrinfo(node, service, hints, res);
if (err) {
const char *errstr;
@@ -666,22 +667,26 @@ static void process_one_client(int fd, int pipefd)
do_getsockopts(&s, fd, ret, ret2);
if (s.mptcpi_rcv_delta != (uint64_t)ret + 1)
- xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret);
+ xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64,
+ s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1));
/* be nice when running on top of older kernel */
if (s.pkt_stats_avail) {
if (s.last_sample.mptcpi_bytes_sent != ret2)
- xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64,
+ xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64
+ ", diff %" PRId64,
s.last_sample.mptcpi_bytes_sent, ret2,
s.last_sample.mptcpi_bytes_sent - ret2);
if (s.last_sample.mptcpi_bytes_received != ret)
- xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64,
+ xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64
+ ", diff %" PRId64,
s.last_sample.mptcpi_bytes_received, ret,
s.last_sample.mptcpi_bytes_received - ret);
if (s.last_sample.mptcpi_bytes_acked != ret)
- xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64,
- s.last_sample.mptcpi_bytes_acked, ret2,
- s.last_sample.mptcpi_bytes_acked - ret2);
+ xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64
+ ", diff %" PRId64,
+ s.last_sample.mptcpi_bytes_acked, ret,
+ s.last_sample.mptcpi_bytes_acked - ret);
}
close(fd);
@@ -721,6 +726,7 @@ static int server(int pipefd)
process_one_client(r, pipefd);
+ close(fd);
return 0;
}
@@ -846,8 +852,12 @@ int main(int argc, char *argv[])
die_perror("pipe");
s = xfork();
- if (s == 0)
- return server(pipefds[1]);
+ if (s == 0) {
+ close(pipefds[0]);
+ ret = server(pipefds[1]);
+ close(pipefds[1]);
+ return ret;
+ }
close(pipefds[1]);
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index 418a903c3a4d..ab8bce06b262 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -95,7 +95,7 @@ init()
}
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
mptcp_lib_ns_exit "${ns1}" "${ns2}" "${ns_sbox}"
@@ -169,41 +169,44 @@ do_transfer()
cmsg+=",TCPINQ"
fi
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat -n
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat -n
+ mptcp_lib_nstat_init "${listener_ns}"
+ mptcp_lib_nstat_init "${connector_ns}"
- timeout ${timeout_test} \
- ip netns exec ${listener_ns} \
- $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c "${cmsg}" \
- ${local_addr} < "$sin" > "$sout" &
+ ip netns exec ${listener_ns} \
+ $mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c "${cmsg}" \
+ ${local_addr} < "$sin" > "$sout" &
local spid=$!
- sleep 1
+ mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
- timeout ${timeout_test} \
- ip netns exec ${connector_ns} \
- $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c "${cmsg}" \
- $connect_addr < "$cin" > "$cout" &
+ ip netns exec ${connector_ns} \
+ $mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c "${cmsg}" \
+ $connect_addr < "$cin" > "$cout" &
local cpid=$!
+ mptcp_lib_wait_timeout "${timeout_test}" "${listener_ns}" \
+ "${connector_ns}" "${port}" "${cpid}" "${spid}" &
+ local timeout_pid=$!
+
wait $cpid
local retc=$?
wait $spid
local rets=$?
- NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
- nstat | grep Tcp > /tmp/${listener_ns}.out
- NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
- nstat | grep Tcp > /tmp/${connector_ns}.out
+ if kill -0 $timeout_pid; then
+ # Finished before the timeout: kill the background job
+ mptcp_lib_kill_group_wait $timeout_pid
+ timeout_pid=0
+ fi
+
+ mptcp_lib_nstat_get "${listener_ns}"
+ mptcp_lib_nstat_get "${connector_ns}"
print_title "Transfer ${ip:2}"
- if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ] || [ ${timeout_pid} -ne 0 ]; then
mptcp_lib_pr_fail "client exit code $retc, server $rets"
- mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}" \
- "/tmp/${listener_ns}.out" "/tmp/${connector_ns}.out"
+ mptcp_lib_pr_err_stats "${listener_ns}" "${connector_ns}" "${port}"
mptcp_lib_result_fail "transfer ${ip}"
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
index 2e6648a2b2c0..ec6a87588191 100755
--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -32,7 +32,7 @@ ns1=""
err=$(mktemp)
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
rm -f "${err}"
@@ -70,8 +70,9 @@ format_endpoints() {
mptcp_lib_pm_nl_format_endpoints "${@}"
}
+# This function is invoked indirectly
+#shellcheck disable=SC2317,SC2329
get_endpoint() {
- # shellcheck disable=SC2317 # invoked indirectly
mptcp_lib_pm_nl_get_endpoint "${ns1}" "${@}"
}
@@ -198,6 +199,7 @@ set_limits 1 9 2>/dev/null
check "get_limits" "${default_limits}" "subflows above hard limit"
set_limits 8 8
+flush_endpoint ## to make sure it doesn't affect the limits
check "get_limits" "$(format_limits 8 8)" "set limits"
flush_endpoint
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index 994a556f46c1..65b374232ff5 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -2,6 +2,7 @@
#include <errno.h>
#include <error.h>
+#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -113,6 +114,8 @@ static int capture_events(int fd, int event_group)
error(1, errno, "could not join the " MPTCP_PM_EV_GRP_NAME " mcast group");
do {
+ bool server_side = false;
+
FD_ZERO(&rfds);
FD_SET(fd, &rfds);
res_len = NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
@@ -187,11 +190,22 @@ static int capture_events(int fd, int event_group)
else if (attrs->rta_type == MPTCP_ATTR_ERROR)
fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
- fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
+ server_side = !!*(__u8 *)RTA_DATA(attrs);
+ else if (attrs->rta_type == MPTCP_ATTR_FLAGS) {
+ __u16 flags = *(__u16 *)RTA_DATA(attrs);
+
+ /* only print when present, easier */
+ if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0)
+ fprintf(stderr, ",deny_join_id0:1");
+ if (flags & MPTCP_PM_EV_FLAG_SERVER_SIDE)
+ server_side = true;
+ }
attrs = RTA_NEXT(attrs, msg_len);
}
}
+ if (server_side)
+ fprintf(stderr, ",server_side:1");
fprintf(stderr, "\n");
} while (1);
@@ -816,6 +830,8 @@ int add_addr(int fd, int pm_family, int argc, char *argv[])
flags |= MPTCP_PM_ADDR_FLAG_SUBFLOW;
else if (!strcmp(tok, "signal"))
flags |= MPTCP_PM_ADDR_FLAG_SIGNAL;
+ else if (!strcmp(tok, "laminar"))
+ flags |= MPTCP_PM_ADDR_FLAG_LAMINAR;
else if (!strcmp(tok, "backup"))
flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
else if (!strcmp(tok, "fullmesh"))
@@ -1004,6 +1020,13 @@ static void print_addr(struct rtattr *attrs, int len)
printf(",");
}
+ if (flags & MPTCP_PM_ADDR_FLAG_LAMINAR) {
+ printf("laminar");
+ flags &= ~MPTCP_PM_ADDR_FLAG_LAMINAR;
+ if (flags)
+ printf(",");
+ }
+
if (flags & MPTCP_PM_ADDR_FLAG_BACKUP) {
printf("backup");
flags &= ~MPTCP_PM_ADDR_FLAG_BACKUP;
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 2329c2f8519b..806aaa7d2d61 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -35,7 +35,7 @@ usage() {
}
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
rm -f "$cout" "$sout"
@@ -155,48 +155,53 @@ do_transfer()
sleep 1
fi
- NSTAT_HISTORY=/tmp/${ns3}.nstat ip netns exec ${ns3} \
- nstat -n
- NSTAT_HISTORY=/tmp/${ns1}.nstat ip netns exec ${ns1} \
- nstat -n
+ mptcp_lib_nstat_init "${ns3}"
+ mptcp_lib_nstat_init "${ns1}"
- timeout ${timeout_test} \
- ip netns exec ${ns3} \
- ./mptcp_connect -jt ${timeout_poll} -l -p $port -T $max_time \
- 0.0.0.0 < "$sin" > "$sout" &
+ ip netns exec ${ns3} \
+ ./mptcp_connect -jt ${timeout_poll} -l -p $port -T $max_time \
+ 0.0.0.0 < "$sin" > "$sout" &
local spid=$!
mptcp_lib_wait_local_port_listen "${ns3}" "${port}"
- timeout ${timeout_test} \
- ip netns exec ${ns1} \
- ./mptcp_connect -jt ${timeout_poll} -p $port -T $max_time \
- 10.0.3.3 < "$cin" > "$cout" &
+ ip netns exec ${ns1} \
+ ./mptcp_connect -jt ${timeout_poll} -p $port -T $max_time \
+ 10.0.3.3 < "$cin" > "$cout" &
local cpid=$!
+ mptcp_lib_wait_timeout "${timeout_test}" "${ns3}" "${ns1}" "${port}" \
+ "${cpid}" "${spid}" &
+ local timeout_pid=$!
+
wait $cpid
local retc=$?
wait $spid
local rets=$?
+ if kill -0 $timeout_pid; then
+ # Finished before the timeout: kill the background job
+ mptcp_lib_kill_group_wait $timeout_pid
+ timeout_pid=0
+ fi
+
if $capture; then
sleep 1
kill ${cappid_listener}
kill ${cappid_connector}
fi
- NSTAT_HISTORY=/tmp/${ns3}.nstat ip netns exec ${ns3} \
- nstat | grep Tcp > /tmp/${ns3}.out
- NSTAT_HISTORY=/tmp/${ns1}.nstat ip netns exec ${ns1} \
- nstat | grep Tcp > /tmp/${ns1}.out
+ mptcp_lib_nstat_get "${ns3}"
+ mptcp_lib_nstat_get "${ns1}"
cmp $sin $cout > /dev/null 2>&1
local cmps=$?
cmp $cin $sout > /dev/null 2>&1
local cmpc=$?
- if [ $retc -eq 0 ] && [ $rets -eq 0 ] && \
- [ $cmpc -eq 0 ] && [ $cmps -eq 0 ]; then
+ if [ $retc -eq 0 ] && [ $rets -eq 0 ] &&
+ [ $cmpc -eq 0 ] && [ $cmps -eq 0 ] &&
+ [ $timeout_pid -eq 0 ]; then
printf "%-16s" " max $max_time "
mptcp_lib_pr_ok
cat "$capout"
@@ -204,8 +209,7 @@ do_transfer()
fi
mptcp_lib_pr_fail "client exit code $retc, server $rets"
- mptcp_lib_pr_err_stats "${ns3}" "${ns1}" "${port}" \
- "/tmp/${ns3}.out" "/tmp/${ns1}.out"
+ mptcp_lib_pr_err_stats "${ns3}" "${ns1}" "${port}"
ls -l $sin $cout
ls -l $cin $sout
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 333064b0b5ac..e9ae1806ab07 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -94,7 +94,7 @@ test_fail()
}
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
print_title "Cleanup"
@@ -201,6 +201,9 @@ make_connection()
is_v6="v4"
fi
+ # set this on the client side only: will not affect the rest
+ ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0
+
:>"$client_evts"
:>"$server_evts"
@@ -208,7 +211,8 @@ make_connection()
ip netns exec "$ns1" \
./mptcp_connect -s MPTCP -w 300 -p $app_port -l $listen_addr > /dev/null 2>&1 &
local server_pid=$!
- sleep 0.5
+
+ mptcp_lib_wait_local_port_listen "${ns1}" "${port}"
# Run the client, transfer $file and stay connected to the server
# to conduct tests
@@ -223,23 +227,28 @@ make_connection()
local client_token
local client_port
local client_serverside
+ local client_nojoin
local server_token
local server_serverside
+ local server_nojoin
client_token=$(mptcp_lib_evts_get_info token "$client_evts")
client_port=$(mptcp_lib_evts_get_info sport "$client_evts")
client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts")
+ client_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$client_evts")
server_token=$(mptcp_lib_evts_get_info token "$server_evts")
server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts")
+ server_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$server_evts")
print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1"
- if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
- [ "$server_serverside" = 1 ]
+ if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] &&
+ [ "${client_serverside:-0}" = 0 ] && [ "${server_serverside:-0}" = 1 ] &&
+ [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ]
then
test_pass
print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
else
- test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
+ test_fail "Expected tokens (c:${client_token} - s:${server_token}), server (c:${client_serverside} - s:${server_serverside}), nojoin (c:${client_nojoin} - s:${server_nojoin})"
mptcp_lib_result_print_all_tap
exit ${KSFT_FAIL}
fi
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index 7ea5fb28c93d..1d5d3c4e7e87 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -77,6 +77,7 @@
static int cfg_cork;
static bool cfg_cork_mixed;
static int cfg_cpu = -1; /* default: pin to last cpu */
+static int cfg_expect_zerocopy = -1;
static int cfg_family = PF_UNSPEC;
static int cfg_ifindex = 1;
static int cfg_payload_len;
@@ -92,9 +93,9 @@ static socklen_t cfg_alen;
static struct sockaddr_storage cfg_dst_addr;
static struct sockaddr_storage cfg_src_addr;
+static int exitcode;
static char payload[IP_MAXPACKET];
static long packets, bytes, completions, expected_completions;
-static int zerocopied = -1;
static uint32_t next_completion;
static uint32_t sends_since_notify;
@@ -444,11 +445,13 @@ static bool do_recv_completion(int fd, int domain)
next_completion = hi + 1;
zerocopy = !(serr->ee_code & SO_EE_CODE_ZEROCOPY_COPIED);
- if (zerocopied == -1)
- zerocopied = zerocopy;
- else if (zerocopied != zerocopy) {
- fprintf(stderr, "serr: inconsistent\n");
- zerocopied = zerocopy;
+ if (cfg_expect_zerocopy != -1 &&
+ cfg_expect_zerocopy != zerocopy) {
+ fprintf(stderr, "serr: ee_code: %u != expected %u\n",
+ zerocopy, cfg_expect_zerocopy);
+ exitcode = 1;
+ /* suppress repeated messages */
+ cfg_expect_zerocopy = zerocopy;
}
if (cfg_verbose >= 2)
@@ -571,7 +574,7 @@ static void do_tx(int domain, int type, int protocol)
fprintf(stderr, "tx=%lu (%lu MB) txc=%lu zc=%c\n",
packets, bytes >> 20, completions,
- zerocopied == 1 ? 'y' : 'n');
+ cfg_zerocopy && cfg_expect_zerocopy == 1 ? 'y' : 'n');
}
static int do_setup_rx(int domain, int type, int protocol)
@@ -715,7 +718,7 @@ static void parse_opts(int argc, char **argv)
cfg_payload_len = max_payload_len;
- while ((c = getopt(argc, argv, "46c:C:D:i:l:mp:rs:S:t:vz")) != -1) {
+ while ((c = getopt(argc, argv, "46c:C:D:i:l:mp:rs:S:t:vzZ:")) != -1) {
switch (c) {
case '4':
if (cfg_family != PF_UNSPEC)
@@ -770,6 +773,9 @@ static void parse_opts(int argc, char **argv)
case 'z':
cfg_zerocopy = true;
break;
+ case 'Z':
+ cfg_expect_zerocopy = !!atoi(optarg);
+ break;
}
}
@@ -817,5 +823,5 @@ int main(int argc, char **argv)
else
error(1, 0, "unknown cfg_test %s", cfg_test);
- return 0;
+ return exitcode;
}
diff --git a/tools/testing/selftests/net/msg_zerocopy.sh b/tools/testing/selftests/net/msg_zerocopy.sh
index 89c22f5320e0..28178a38a4e7 100755
--- a/tools/testing/selftests/net/msg_zerocopy.sh
+++ b/tools/testing/selftests/net/msg_zerocopy.sh
@@ -6,6 +6,7 @@
set -e
readonly DEV="veth0"
+readonly DUMMY_DEV="dummy0"
readonly DEV_MTU=65535
readonly BIN="./msg_zerocopy"
@@ -14,21 +15,25 @@ readonly NSPREFIX="ns-${RAND}"
readonly NS1="${NSPREFIX}1"
readonly NS2="${NSPREFIX}2"
-readonly SADDR4='192.168.1.1'
-readonly DADDR4='192.168.1.2'
-readonly SADDR6='fd::1'
-readonly DADDR6='fd::2'
+readonly LPREFIX4='192.168.1'
+readonly RPREFIX4='192.168.2'
+readonly LPREFIX6='fd'
+readonly RPREFIX6='fc'
+
readonly path_sysctl_mem="net.core.optmem_max"
# No arguments: automated test
if [[ "$#" -eq "0" ]]; then
- $0 4 tcp -t 1
- $0 6 tcp -t 1
- $0 4 udp -t 1
- $0 6 udp -t 1
- echo "OK. All tests passed"
- exit 0
+ ret=0
+
+ $0 4 tcp -t 1 || ret=1
+ $0 6 tcp -t 1 || ret=1
+ $0 4 udp -t 1 || ret=1
+ $0 6 udp -t 1 || ret=1
+
+ [[ "$ret" == "0" ]] && echo "OK. All tests passed"
+ exit $ret
fi
# Argument parsing
@@ -45,11 +50,18 @@ readonly EXTRA_ARGS="$@"
# Argument parsing: configure addresses
if [[ "${IP}" == "4" ]]; then
- readonly SADDR="${SADDR4}"
- readonly DADDR="${DADDR4}"
+ readonly SADDR="${LPREFIX4}.1"
+ readonly DADDR="${LPREFIX4}.2"
+ readonly DUMMY_ADDR="${RPREFIX4}.1"
+ readonly DADDR_TXONLY="${RPREFIX4}.2"
+ readonly MASK="24"
elif [[ "${IP}" == "6" ]]; then
- readonly SADDR="${SADDR6}"
- readonly DADDR="${DADDR6}"
+ readonly SADDR="${LPREFIX6}::1"
+ readonly DADDR="${LPREFIX6}::2"
+ readonly DUMMY_ADDR="${RPREFIX6}::1"
+ readonly DADDR_TXONLY="${RPREFIX6}::2"
+ readonly MASK="64"
+ readonly NODAD="nodad"
else
echo "Invalid IP version ${IP}"
exit 1
@@ -89,33 +101,61 @@ ip netns exec "${NS2}" sysctl -w -q "${path_sysctl_mem}=1000000"
ip link add "${DEV}" mtu "${DEV_MTU}" netns "${NS1}" type veth \
peer name "${DEV}" mtu "${DEV_MTU}" netns "${NS2}"
+ip link add "${DUMMY_DEV}" mtu "${DEV_MTU}" netns "${NS2}" type dummy
+
# Bring the devices up
ip -netns "${NS1}" link set "${DEV}" up
ip -netns "${NS2}" link set "${DEV}" up
+ip -netns "${NS2}" link set "${DUMMY_DEV}" up
# Set fixed MAC addresses on the devices
ip -netns "${NS1}" link set dev "${DEV}" address 02:02:02:02:02:02
ip -netns "${NS2}" link set dev "${DEV}" address 06:06:06:06:06:06
# Add fixed IP addresses to the devices
-ip -netns "${NS1}" addr add 192.168.1.1/24 dev "${DEV}"
-ip -netns "${NS2}" addr add 192.168.1.2/24 dev "${DEV}"
-ip -netns "${NS1}" addr add fd::1/64 dev "${DEV}" nodad
-ip -netns "${NS2}" addr add fd::2/64 dev "${DEV}" nodad
+ip -netns "${NS1}" addr add "${SADDR}/${MASK}" dev "${DEV}" ${NODAD}
+ip -netns "${NS2}" addr add "${DADDR}/${MASK}" dev "${DEV}" ${NODAD}
+ip -netns "${NS2}" addr add "${DUMMY_ADDR}/${MASK}" dev "${DUMMY_DEV}" ${NODAD}
+
+ip -netns "${NS1}" route add default via "${DADDR}" dev "${DEV}"
+ip -netns "${NS2}" route add default via "${DADDR_TXONLY}" dev "${DUMMY_DEV}"
+
+ip netns exec "${NS2}" sysctl -wq net.ipv4.ip_forward=1
+ip netns exec "${NS2}" sysctl -wq net.ipv6.conf.all.forwarding=1
# Optionally disable sg or csum offload to test edge cases
# ip netns exec "${NS1}" ethtool -K "${DEV}" sg off
+ret=0
+
do_test() {
local readonly ARGS="$1"
- echo "ipv${IP} ${TXMODE} ${ARGS}"
- ip netns exec "${NS2}" "${BIN}" "-${IP}" -i "${DEV}" -t 2 -C 2 -S "${SADDR}" -D "${DADDR}" ${ARGS} -r "${RXMODE}" &
+ # tx-rx test
+ # packets queued to a local socket are copied,
+ # sender notification has SO_EE_CODE_ZEROCOPY_COPIED.
+
+ echo -e "\nipv${IP} ${TXMODE} ${ARGS} tx-rx\n"
+ ip netns exec "${NS2}" "${BIN}" "-${IP}" -i "${DEV}" -t 2 -C 2 \
+ -S "${SADDR}" -D "${DADDR}" ${ARGS} -r "${RXMODE}" &
sleep 0.2
- ip netns exec "${NS1}" "${BIN}" "-${IP}" -i "${DEV}" -t 1 -C 3 -S "${SADDR}" -D "${DADDR}" ${ARGS} "${TXMODE}"
+ ip netns exec "${NS1}" "${BIN}" "-${IP}" -i "${DEV}" -t 1 -C 3 \
+ -S "${SADDR}" -D "${DADDR}" ${ARGS} "${TXMODE}" -Z 0 || ret=1
wait
+
+ # next test is unconnected tx to dummy0, cannot exercise with tcp
+ [[ "${TXMODE}" == "tcp" ]] && return
+
+ # tx-only test: send out dummy0
+ # packets leaving the host are not copied,
+ # sender notification does not have SO_EE_CODE_ZEROCOPY_COPIED.
+
+ echo -e "\nipv${IP} ${TXMODE} ${ARGS} tx-only\n"
+ ip netns exec "${NS1}" "${BIN}" "-${IP}" -i "${DEV}" -t 1 -C 3 \
+ -S "${SADDR}" -D "${DADDR_TXONLY}" ${ARGS} "${TXMODE}" -Z 1 || ret=1
}
do_test "${EXTRA_ARGS}"
do_test "-z ${EXTRA_ARGS}"
-echo ok
+
+[[ "$ret" == "0" ]] && echo "OK"
diff --git a/tools/testing/selftests/net/nat6to4.sh b/tools/testing/selftests/net/nat6to4.sh
new file mode 100755
index 000000000000..0ee859b622a4
--- /dev/null
+++ b/tools/testing/selftests/net/nat6to4.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+NS="ns-peer-$(mktemp -u XXXXXX)"
+
+ip netns add "${NS}"
+ip -netns "${NS}" link set lo up
+ip -netns "${NS}" route add default via 127.0.0.2 dev lo
+
+tc -n "${NS}" qdisc add dev lo ingress
+tc -n "${NS}" filter add dev lo ingress prio 4 protocol ip \
+ bpf object-file nat6to4.bpf.o section schedcls/egress4/snat4 direct-action
+
+ip netns exec "${NS}" \
+ bash -c 'echo 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789abc | socat - UDP4-DATAGRAM:224.1.0.1:6666,ip-multicast-loop=1'
diff --git a/tools/testing/selftests/net/netdev-l2addr.sh b/tools/testing/selftests/net/netdev-l2addr.sh
new file mode 100755
index 000000000000..18509da293e5
--- /dev/null
+++ b/tools/testing/selftests/net/netdev-l2addr.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source lib.sh
+set -o pipefail
+
+NSIM_ADDR=2025
+TEST_ADDR="d0:be:d0:be:d0:00"
+
+RET_CODE=0
+
+cleanup() {
+ cleanup_netdevsim "$NSIM_ADDR"
+ cleanup_ns "$NS"
+}
+
+trap cleanup EXIT
+
+fail() {
+ echo "ERROR: ${1:-unexpected return code} (ret: $_)" >&2
+ RET_CODE=1
+}
+
+get_addr()
+{
+ local type="$1"
+ local dev="$2"
+ local ns="$3"
+
+ ip -j -n "$ns" link show dev "$dev" | jq -er ".[0].$type"
+}
+
+setup_ns NS
+
+nsim=$(create_netdevsim $NSIM_ADDR "$NS")
+
+get_addr address "$nsim" "$NS" >/dev/null || fail "Couldn't get ether addr"
+get_addr broadcast "$nsim" "$NS" >/dev/null || fail "Couldn't get brd addr"
+get_addr permaddr "$nsim" "$NS" >/dev/null && fail "Found perm_addr without setting it"
+
+ip -n "$NS" link set dev "$nsim" address "$TEST_ADDR"
+ip -n "$NS" link set dev "$nsim" brd "$TEST_ADDR"
+
+[[ "$(get_addr address "$nsim" "$NS")" == "$TEST_ADDR" ]] || fail "Couldn't set ether addr"
+[[ "$(get_addr broadcast "$nsim" "$NS")" == "$TEST_ADDR" ]] || fail "Couldn't set brd addr"
+
+if create_netdevsim_port "$NSIM_ADDR" "$NS" 2 "FF:FF:FF:FF:FF:FF" 2>/dev/null; then
+ fail "Created netdevsim with broadcast permaddr"
+fi
+
+nsim_port=$(create_netdevsim_port "$NSIM_ADDR" "$NS" 2 "$TEST_ADDR")
+
+get_addr address "$nsim_port" "$NS" >/dev/null || fail "Couldn't get ether addr"
+get_addr broadcast "$nsim_port" "$NS" >/dev/null || fail "Couldn't get brd addr"
+[[ "$(get_addr permaddr "$nsim_port" "$NS")" == "$TEST_ADDR" ]] || fail "Couldn't get permaddr"
+
+cleanup_netdevsim "$NSIM_ADDR" "$NS"
+
+exit $RET_CODE
diff --git a/tools/testing/selftests/net/netfilter/.gitignore b/tools/testing/selftests/net/netfilter/.gitignore
index 64c4f8d9aa6c..5d2be9a00627 100644
--- a/tools/testing/selftests/net/netfilter/.gitignore
+++ b/tools/testing/selftests/net/netfilter/.gitignore
@@ -5,3 +5,4 @@ conntrack_dump_flush
conntrack_reverse_clash
sctp_collision
nf_queue
+udpclash
diff --git a/tools/testing/selftests/net/netfilter/Makefile b/tools/testing/selftests/net/netfilter/Makefile
index e9b2f553588d..ee2d1a5254f8 100644
--- a/tools/testing/selftests/net/netfilter/Makefile
+++ b/tools/testing/selftests/net/netfilter/Makefile
@@ -6,44 +6,52 @@ HOSTPKG_CONFIG := pkg-config
MNL_CFLAGS := $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
MNL_LDLIBS := $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
-TEST_PROGS := br_netfilter.sh bridge_brouter.sh
-TEST_PROGS += br_netfilter_queue.sh
-TEST_PROGS += conntrack_dump_flush.sh
-TEST_PROGS += conntrack_icmp_related.sh
-TEST_PROGS += conntrack_ipip_mtu.sh
-TEST_PROGS += conntrack_tcp_unreplied.sh
-TEST_PROGS += conntrack_resize.sh
-TEST_PROGS += conntrack_sctp_collision.sh
-TEST_PROGS += conntrack_vrf.sh
-TEST_PROGS += conntrack_reverse_clash.sh
-TEST_PROGS += ipvs.sh
-TEST_PROGS += nf_conntrack_packetdrill.sh
-TEST_PROGS += nf_nat_edemux.sh
-TEST_PROGS += nft_audit.sh
-TEST_PROGS += nft_concat_range.sh
-TEST_PROGS += nft_conntrack_helper.sh
-TEST_PROGS += nft_fib.sh
-TEST_PROGS += nft_flowtable.sh
-TEST_PROGS += nft_interface_stress.sh
-TEST_PROGS += nft_meta.sh
-TEST_PROGS += nft_nat.sh
-TEST_PROGS += nft_nat_zones.sh
-TEST_PROGS += nft_queue.sh
-TEST_PROGS += nft_synproxy.sh
-TEST_PROGS += nft_tproxy_tcp.sh
-TEST_PROGS += nft_tproxy_udp.sh
-TEST_PROGS += nft_zones_many.sh
-TEST_PROGS += rpath.sh
-TEST_PROGS += vxlan_mtu_frag.sh
-TEST_PROGS += xt_string.sh
+TEST_PROGS := \
+ br_netfilter.sh \
+ br_netfilter_queue.sh \
+ bridge_brouter.sh \
+ conntrack_clash.sh \
+ conntrack_dump_flush.sh \
+ conntrack_icmp_related.sh \
+ conntrack_ipip_mtu.sh \
+ conntrack_resize.sh \
+ conntrack_reverse_clash.sh \
+ conntrack_sctp_collision.sh \
+ conntrack_tcp_unreplied.sh \
+ conntrack_vrf.sh \
+ ipvs.sh \
+ nf_conntrack_packetdrill.sh \
+ nf_nat_edemux.sh \
+ nft_audit.sh \
+ nft_concat_range.sh \
+ nft_conntrack_helper.sh \
+ nft_fib.sh \
+ nft_flowtable.sh \
+ nft_interface_stress.sh \
+ nft_meta.sh \
+ nft_nat.sh \
+ nft_nat_zones.sh \
+ nft_queue.sh \
+ nft_synproxy.sh \
+ nft_tproxy_tcp.sh \
+ nft_tproxy_udp.sh \
+ nft_zones_many.sh \
+ rpath.sh \
+ vxlan_mtu_frag.sh \
+ xt_string.sh \
+# end of TEST_PROGS
TEST_PROGS_EXTENDED = nft_concat_range_perf.sh
-TEST_GEN_FILES = audit_logread
-TEST_GEN_FILES += connect_close nf_queue
-TEST_GEN_FILES += conntrack_dump_flush
-TEST_GEN_FILES += conntrack_reverse_clash
-TEST_GEN_FILES += sctp_collision
+TEST_GEN_FILES = \
+ audit_logread \
+ connect_close \
+ conntrack_dump_flush \
+ conntrack_reverse_clash \
+ nf_queue \
+ sctp_collision \
+ udpclash \
+# end of TEST_GEN_FILES
include ../../lib.mk
@@ -52,10 +60,14 @@ $(OUTPUT)/nf_queue: LDLIBS += $(MNL_LDLIBS)
$(OUTPUT)/conntrack_dump_flush: CFLAGS += $(MNL_CFLAGS)
$(OUTPUT)/conntrack_dump_flush: LDLIBS += $(MNL_LDLIBS)
+$(OUTPUT)/udpclash: LDLIBS += -lpthread
-TEST_FILES := lib.sh
-TEST_FILES += packetdrill
+TEST_FILES := \
+ lib.sh \
+ packetdrill \
+# end of TEST_FILES
TEST_INCLUDES := \
+ $(wildcard ../lib/sh/*.sh) \
../lib.sh \
- $(wildcard ../lib/sh/*.sh)
+# end of TEST_INCLUDES
diff --git a/tools/testing/selftests/net/netfilter/config b/tools/testing/selftests/net/netfilter/config
index 363646f4fefe..12ce61fa15a8 100644
--- a/tools/testing/selftests/net/netfilter/config
+++ b/tools/testing/selftests/net/netfilter/config
@@ -7,65 +7,74 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_T_FILTER=m
CONFIG_BRIDGE_NETFILTER=m
CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_NF_EBTABLES_LEGACY=m
CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_CGROUP_BPF=y
+CONFIG_CRYPTO_SHA1=m
CONFIG_DUMMY=m
+CONFIG_INET_DIAG=m
CONFIG_INET_ESP=m
-CONFIG_IP_NF_MATCH_RPFILTER=m
-CONFIG_IP6_NF_MATCH_RPFILTER=m
-CONFIG_IP_NF_IPTABLES=m
+CONFIG_INET_SCTP_DIAG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_IPTABLES_LEGACY=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_RAW=m
CONFIG_IP_NF_FILTER=m
-CONFIG_IP6_NF_FILTER=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_IPTABLES_LEGACY=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_RAW=m
-CONFIG_IP6_NF_RAW=m
CONFIG_IP_SCTP=m
+CONFIG_IPV6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IP_VS=m
CONFIG_IP_VS_PROTO_TCP=y
CONFIG_IP_VS_RR=m
-CONFIG_IPV6=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_MACVLAN=m
CONFIG_NAMESPACES=y
CONFIG_NET_CLS_U32=m
-CONFIG_NET_L3_MASTER_DEV=y
-CONFIG_NET_NS=y
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_IPIP=m
-CONFIG_NET_VRF=y
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NETFILTER_NETLINK=m
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_SYNPROXY=m
CONFIG_NETFILTER_XTABLES=m
-CONFIG_NETFILTER_XT_NAT=m
+CONFIG_NETFILTER_XTABLES_LEGACY=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_NAT=m
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
+CONFIG_NET_IPIP=m
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_NS=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_VRF=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_PROTO_SCTP=y
CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_LOG_IPV4=m
CONFIG_NF_LOG_IPV6=m
CONFIG_NF_NAT=m
-CONFIG_NF_NAT_REDIRECT=y
CONFIG_NF_NAT_MASQUERADE=y
+CONFIG_NF_NAT_REDIRECT=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_IPV4=y
CONFIG_NF_TABLES_IPV6=y
CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_CT=m
@@ -84,12 +93,9 @@ CONFIG_NFT_QUOTA=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_TPROXY=m
+CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VLAN_8021Q=m
CONFIG_VXLAN=m
-CONFIG_XFRM_USER=m
CONFIG_XFRM_STATISTICS=y
-CONFIG_NET_PKTGEN=m
-CONFIG_TUN=m
-CONFIG_INET_DIAG=m
-CONFIG_SCTP_DIAG=m
+CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/net/netfilter/conntrack_clash.sh b/tools/testing/selftests/net/netfilter/conntrack_clash.sh
new file mode 100755
index 000000000000..7fc6c5dbd551
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/conntrack_clash.sh
@@ -0,0 +1,174 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source lib.sh
+
+clash_resolution_active=0
+dport=22111
+ret=0
+
+cleanup()
+{
+ # netns cleanup also zaps any remaining socat echo server.
+ cleanup_all_ns
+}
+
+checktool "nft --version" "run test without nft"
+checktool "conntrack --version" "run test without conntrack"
+checktool "socat -h" "run test without socat"
+
+trap cleanup EXIT
+
+setup_ns nsclient1 nsclient2 nsrouter
+
+ip netns exec "$nsrouter" nft -f -<<EOF
+table ip t {
+ chain lb {
+ meta l4proto udp dnat to numgen random mod 3 map { 0 : 10.0.2.1 . 9000, 1 : 10.0.2.1 . 9001, 2 : 10.0.2.1 . 9002 }
+ }
+
+ chain prerouting {
+ type nat hook prerouting priority dstnat
+
+ udp dport $dport counter jump lb
+ }
+
+ chain output {
+ type nat hook output priority dstnat
+
+ udp dport $dport counter jump lb
+ }
+}
+EOF
+
+load_simple_ruleset()
+{
+ip netns exec "$1" nft -f -<<EOF
+table ip t {
+ chain forward {
+ type filter hook forward priority 0
+
+ ct state new counter
+ }
+}
+EOF
+}
+
+spawn_servers()
+{
+ local ns="$1"
+ local ports="9000 9001 9002"
+
+ for port in $ports; do
+ ip netns exec "$ns" socat UDP-RECVFROM:$port,fork PIPE 2>/dev/null &
+ done
+
+ for port in $ports; do
+ wait_local_port_listen "$ns" $port udp
+ done
+}
+
+add_addr()
+{
+ local ns="$1"
+ local dev="$2"
+ local i="$3"
+ local j="$4"
+
+ ip -net "$ns" link set "$dev" up
+ ip -net "$ns" addr add "10.0.$i.$j/24" dev "$dev"
+}
+
+ping_test()
+{
+ local ns="$1"
+ local daddr="$2"
+
+ if ! ip netns exec "$ns" ping -q -c 1 $daddr > /dev/null;then
+ echo "FAIL: ping from $ns to $daddr"
+ exit 1
+ fi
+}
+
+run_one_clash_test()
+{
+ local ns="$1"
+ local ctns="$2"
+ local daddr="$3"
+ local dport="$4"
+ local entries
+ local cre
+
+ if ! ip netns exec "$ns" timeout 30 ./udpclash $daddr $dport;then
+ echo "INFO: did not receive expected number of replies for $daddr:$dport"
+ ip netns exec "$ctns" conntrack -S
+ # don't fail: check if clash resolution triggered after all.
+ fi
+
+ entries=$(ip netns exec "$ctns" conntrack -S | wc -l)
+ cre=$(ip netns exec "$ctns" conntrack -S | grep "clash_resolve=0" | wc -l)
+
+ if [ "$cre" -ne "$entries" ];then
+ clash_resolution_active=1
+ return 0
+ fi
+
+ # not a failure: clash resolution logic did not trigger.
+ # With right timing, xmit completed sequentially and
+ # no parallel insertion occurs.
+ return $ksft_skip
+}
+
+run_clash_test()
+{
+ local ns="$1"
+ local ctns="$2"
+ local daddr="$3"
+ local dport="$4"
+ local softerr=0
+
+ for i in $(seq 1 10);do
+ run_one_clash_test "$ns" "$ctns" "$daddr" "$dport"
+ local rv=$?
+ if [ $rv -eq 0 ];then
+ echo "PASS: clash resolution test for $daddr:$dport on attempt $i"
+ return 0
+ elif [ $rv -eq $ksft_skip ]; then
+ softerr=1
+ fi
+ done
+
+ [ $softerr -eq 1 ] && echo "SKIP: clash resolution for $daddr:$dport did not trigger"
+}
+
+ip link add veth0 netns "$nsclient1" type veth peer name veth0 netns "$nsrouter"
+ip link add veth0 netns "$nsclient2" type veth peer name veth1 netns "$nsrouter"
+add_addr "$nsclient1" veth0 1 1
+add_addr "$nsclient2" veth0 2 1
+add_addr "$nsrouter" veth0 1 99
+add_addr "$nsrouter" veth1 2 99
+
+ip -net "$nsclient1" route add default via 10.0.1.99
+ip -net "$nsclient2" route add default via 10.0.2.99
+ip netns exec "$nsrouter" sysctl -q net.ipv4.ip_forward=1
+
+ping_test "$nsclient1" 10.0.1.99
+ping_test "$nsclient1" 10.0.2.1
+ping_test "$nsclient2" 10.0.1.1
+
+spawn_servers "$nsclient2"
+
+# exercise clash resolution with nat:
+# nsrouter is supposed to dnat to 10.0.2.1:900{0,1,2,3}.
+run_clash_test "$nsclient1" "$nsrouter" 10.0.1.99 "$dport"
+
+# exercise clash resolution without nat.
+load_simple_ruleset "$nsclient2"
+run_clash_test "$nsclient2" "$nsclient2" 127.0.0.1 9001
+
+if [ $clash_resolution_active -eq 0 ];then
+ [ "$ret" -eq 0 ] && ret=$ksft_skip
+ echo "SKIP: Clash resolution did not trigger"
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c b/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
index 5f827e10717d..5cecb8a1bc94 100644
--- a/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
+++ b/tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
@@ -10,7 +10,7 @@
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define TEST_ZONE_ID 123
#define NF_CT_DEFAULT_ZONE_ID 0
diff --git a/tools/testing/selftests/net/netfilter/conntrack_resize.sh b/tools/testing/selftests/net/netfilter/conntrack_resize.sh
index 9e033e80219e..615fe3c6f405 100755
--- a/tools/testing/selftests/net/netfilter/conntrack_resize.sh
+++ b/tools/testing/selftests/net/netfilter/conntrack_resize.sh
@@ -12,6 +12,9 @@ tmpfile=""
tmpfile_proc=""
tmpfile_uniq=""
ret=0
+have_socat=0
+
+socat -h > /dev/null && have_socat=1
insert_count=2000
[ "$KSFT_MACHINE_SLOW" = "yes" ] && insert_count=400
@@ -123,7 +126,7 @@ ctflush() {
done
}
-ctflood()
+ct_pingflood()
{
local ns="$1"
local duration="$2"
@@ -152,6 +155,44 @@ ctflood()
wait
}
+ct_udpflood()
+{
+ local ns="$1"
+ local duration="$2"
+ local now=$(date +%s)
+ local end=$((now + duration))
+
+ [ $have_socat -ne "1" ] && return
+
+ while [ $now -lt $end ]; do
+ip netns exec "$ns" bash<<"EOF"
+ for i in $(seq 1 100);do
+ dport=$(((RANDOM%65536)+1))
+
+ echo bar | socat -u STDIN UDP:"127.0.0.1:$dport" &
+ done > /dev/null 2>&1
+ wait
+EOF
+ now=$(date +%s)
+ done
+}
+
+ct_udpclash()
+{
+ local ns="$1"
+ local duration="$2"
+ local now=$(date +%s)
+ local end=$((now + duration))
+
+ [ -x udpclash ] || return
+
+ while [ $now -lt $end ]; do
+ ip netns exec "$ns" timeout 30 ./udpclash 127.0.0.1 $((RANDOM%65536)) > /dev/null 2>&1
+
+ now=$(date +%s)
+ done
+}
+
# dump to /dev/null. We don't want dumps to cause infinite loops
# or use-after-free even when conntrack table is altered while dumps
# are in progress.
@@ -169,6 +210,48 @@ ct_nulldump()
wait
}
+ct_nulldump_loop()
+{
+ local ns="$1"
+ local duration="$2"
+ local now=$(date +%s)
+ local end=$((now + duration))
+
+ while [ $now -lt $end ]; do
+ ct_nulldump "$ns"
+ sleep $((RANDOM%2))
+ now=$(date +%s)
+ done
+}
+
+change_timeouts()
+{
+ local ns="$1"
+ local r1=$((RANDOM%2))
+ local r2=$((RANDOM%2))
+
+ [ "$r1" -eq 1 ] && ip netns exec "$ns" sysctl -q net.netfilter.nf_conntrack_icmp_timeout=$((RANDOM%5))
+ [ "$r2" -eq 1 ] && ip netns exec "$ns" sysctl -q net.netfilter.nf_conntrack_udp_timeout=$((RANDOM%5))
+}
+
+ct_change_timeouts_loop()
+{
+ local ns="$1"
+ local duration="$2"
+ local now=$(date +%s)
+ local end=$((now + duration))
+
+ while [ $now -lt $end ]; do
+ change_timeouts "$ns"
+ sleep $((RANDOM%2))
+ now=$(date +%s)
+ done
+
+ # restore defaults
+ ip netns exec "$ns" sysctl -q net.netfilter.nf_conntrack_icmp_timeout=30
+ ip netns exec "$ns" sysctl -q net.netfilter.nf_conntrack_udp_timeout=30
+}
+
check_taint()
{
local tainted_then="$1"
@@ -194,14 +277,19 @@ check_taint()
insert_flood()
{
local n="$1"
+ local timeout="$2"
local r=0
r=$((RANDOM%$insert_count))
- ctflood "$n" "$timeout" "floodresize" &
+ ct_pingflood "$n" "$timeout" "floodresize" &
+ ct_udpflood "$n" "$timeout" &
+ ct_udpclash "$n" "$timeout" &
+
insert_ctnetlink "$n" "$r" &
ctflush "$n" "$timeout" &
- ct_nulldump "$n" &
+ ct_nulldump_loop "$n" "$timeout" &
+ ct_change_timeouts_loop "$n" "$timeout" &
wait
}
@@ -215,7 +303,7 @@ test_floodresize_all()
read tainted_then < /proc/sys/kernel/tainted
for n in "$nsclient1" "$nsclient2";do
- insert_flood "$n" &
+ insert_flood "$n" "$timeout" &
done
# resize table constantly while flood/insert/dump/flushs
@@ -306,7 +394,7 @@ test_dump_all()
ip netns exec "$nsclient1" sysctl -q net.netfilter.nf_conntrack_icmp_timeout=3600
- ctflood "$nsclient1" $timeout "dumpall" &
+ ct_pingflood "$nsclient1" $timeout "dumpall" &
insert_ctnetlink "$nsclient2" $insert_count
wait
@@ -368,7 +456,7 @@ test_conntrack_disable()
ct_flush_once "$nsclient1"
ct_flush_once "$nsclient2"
- ctflood "$nsclient1" "$timeout" "conntrack disable"
+ ct_pingflood "$nsclient1" "$timeout" "conntrack disable"
ip netns exec "$nsclient2" ping -q -c 1 127.0.0.1 >/dev/null 2>&1
# Disabled, should not have picked up any connection.
diff --git a/tools/testing/selftests/net/netfilter/ipvs.sh b/tools/testing/selftests/net/netfilter/ipvs.sh
index 6af2ea3ad6b8..9c9d5b38ab71 100755
--- a/tools/testing/selftests/net/netfilter/ipvs.sh
+++ b/tools/testing/selftests/net/netfilter/ipvs.sh
@@ -151,7 +151,7 @@ test_nat() {
test_tun() {
ip netns exec "${ns0}" ip route add "${vip_v4}" via "${gip_v4}" dev br0
- ip netns exec "${ns1}" modprobe -q ipip
+ modprobe -q ipip
ip netns exec "${ns1}" ip link set tunl0 up
ip netns exec "${ns1}" sysctl -qw net.ipv4.ip_forward=0
ip netns exec "${ns1}" sysctl -qw net.ipv4.conf.all.send_redirects=0
@@ -160,10 +160,10 @@ test_tun() {
ip netns exec "${ns1}" ipvsadm -a -i -t "${vip_v4}:${port}" -r ${rip_v4}:${port}
ip netns exec "${ns1}" ip addr add ${vip_v4}/32 dev lo:1
- ip netns exec "${ns2}" modprobe -q ipip
ip netns exec "${ns2}" ip link set tunl0 up
ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.arp_ignore=1
ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.arp_announce=2
+ ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.tunl0.rp_filter=0
ip netns exec "${ns2}" ip addr add "${vip_v4}/32" dev lo:1
test_service
diff --git a/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh b/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh
index 1014551dd769..6731fe1eaf2e 100755
--- a/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh
+++ b/tools/testing/selftests/net/netfilter/nf_nat_edemux.sh
@@ -17,9 +17,31 @@ cleanup()
checktool "socat -h" "run test without socat"
checktool "iptables --version" "run test without iptables"
+checktool "conntrack --version" "run test without conntrack"
trap cleanup EXIT
+connect_done()
+{
+ local ns="$1"
+ local port="$2"
+
+ ip netns exec "$ns" ss -nt -o state established "dport = :$port" | grep -q "$port"
+}
+
+check_ctstate()
+{
+ local ns="$1"
+ local dp="$2"
+
+ if ! ip netns exec "$ns" conntrack --get -s 192.168.1.2 -d 192.168.1.1 -p tcp \
+ --sport 10000 --dport "$dp" --state ESTABLISHED > /dev/null 2>&1;then
+ echo "FAIL: Did not find expected state for dport $2"
+ ip netns exec "$ns" bash -c 'conntrack -L; conntrack -S; ss -nt'
+ ret=1
+ fi
+}
+
setup_ns ns1 ns2
# Connect the namespaces using a veth pair
@@ -44,15 +66,18 @@ socatpid=$!
ip netns exec "$ns2" sysctl -q net.ipv4.ip_local_port_range="10000 10000"
# add a virtual IP using DNAT
-ip netns exec "$ns2" iptables -t nat -A OUTPUT -d 10.96.0.1/32 -p tcp --dport 443 -j DNAT --to-destination 192.168.1.1:5201
+ip netns exec "$ns2" iptables -t nat -A OUTPUT -d 10.96.0.1/32 -p tcp --dport 443 -j DNAT --to-destination 192.168.1.1:5201 || exit 1
# ... and route it to the other namespace
ip netns exec "$ns2" ip route add 10.96.0.1 via 192.168.1.1
-# add a persistent connection from the other namespace
-ip netns exec "$ns2" socat -t 10 - TCP:192.168.1.1:5201 > /dev/null &
+# listener should be up by now, wait if it isn't yet.
+wait_local_port_listen "$ns1" 5201 tcp
-sleep 1
+# add a persistent connection from the other namespace
+sleep 10 | ip netns exec "$ns2" socat -t 10 - TCP:192.168.1.1:5201 > /dev/null &
+cpid0=$!
+busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" "5201"
# ip daddr:dport will be rewritten to 192.168.1.1 5201
# NAT must reallocate source port 10000 because
@@ -71,26 +96,25 @@ fi
ip netns exec "$ns1" iptables -t nat -A PREROUTING -p tcp --dport 5202 -j REDIRECT --to-ports 5201
ip netns exec "$ns1" iptables -t nat -A PREROUTING -p tcp --dport 5203 -j REDIRECT --to-ports 5201
-sleep 5 | ip netns exec "$ns2" socat -t 5 -u STDIN TCP:192.168.1.1:5202,connect-timeout=5 >/dev/null &
+sleep 5 | ip netns exec "$ns2" socat -T 5 -u STDIN TCP:192.168.1.1:5202,connect-timeout=5 >/dev/null &
+cpid1=$!
-# if connect succeeds, client closes instantly due to EOF on stdin.
-# if connect hangs, it will time out after 5s.
-echo | ip netns exec "$ns2" socat -t 3 -u STDIN TCP:192.168.1.1:5203,connect-timeout=5 >/dev/null &
+sleep 5 | ip netns exec "$ns2" socat -T 5 -u STDIN TCP:192.168.1.1:5203,connect-timeout=5 >/dev/null &
cpid2=$!
-time_then=$(date +%s)
-wait $cpid2
-rv=$?
-time_now=$(date +%s)
+busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" 5202
+busywait "$BUSYWAIT_TIMEOUT" connect_done "$ns2" 5203
-# Check how much time has elapsed, expectation is for
-# 'cpid2' to connect and then exit (and no connect delay).
-delta=$((time_now - time_then))
+check_ctstate "$ns1" 5202
+check_ctstate "$ns1" 5203
-if [ $delta -lt 2 ] && [ $rv -eq 0 ]; then
+kill $socatpid $cpid0 $cpid1 $cpid2
+socatpid=0
+
+if [ $ret -eq 0 ]; then
echo "PASS: could connect to service via redirected ports"
else
- echo "FAIL: socat cannot connect to service via redirect ($delta seconds elapsed, returned $rv)"
+ echo "FAIL: socat cannot connect to service via redirect"
ret=1
fi
diff --git a/tools/testing/selftests/net/netfilter/nft_concat_range.sh b/tools/testing/selftests/net/netfilter/nft_concat_range.sh
index efea93cf23d4..ad97c6227f35 100755
--- a/tools/testing/selftests/net/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/net/netfilter/nft_concat_range.sh
@@ -29,7 +29,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
net6_port_net6_port net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
-BUGS="flush_remove_add reload net_port_proto_match avx2_mismatch"
+BUGS="flush_remove_add reload net_port_proto_match avx2_mismatch doublecreate"
# List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS="
@@ -378,7 +378,7 @@ display net,port,proto
type_spec ipv4_addr . inet_service . inet_proto
chain_spec ip daddr . udp dport . meta l4proto
dst addr4 port proto
-src
+src
start 1
count 9
src_delta 9
@@ -408,6 +408,18 @@ perf_duration 0
"
+TYPE_doublecreate="
+display cannot create same element twice
+type_spec ipv4_addr . ipv4_addr
+chain_spec ip saddr . ip daddr
+dst addr4
+proto icmp
+
+race_repeat 0
+
+perf_duration 0
+"
+
# Set template for all tests, types and rules are filled in depending on test
set_template='
flush ruleset
@@ -419,6 +431,7 @@ table inet filter {
set test {
type ${type_spec}
+ counter
flags interval,timeout
}
@@ -1158,9 +1171,18 @@ del() {
fi
}
-# Return packet count from 'test' counter in 'inet filter' table
+# Return packet count for elem $1 from 'test' counter in 'inet filter' table
count_packets() {
found=0
+ for token in $(nft reset element inet filter test "${1}" ); do
+ [ ${found} -eq 1 ] && echo "${token}" && return
+ [ "${token}" = "packets" ] && found=1
+ done
+}
+
+# Return packet count from 'test' counter in 'inet filter' table
+count_packets_nomatch() {
+ found=0
for token in $(nft list counter inet filter test); do
[ ${found} -eq 1 ] && echo "${token}" && return
[ "${token}" = "packets" ] && found=1
@@ -1206,6 +1228,10 @@ perf() {
# Set MAC addresses, send single packet, check that it matches, reset counter
send_match() {
+ local elem="$1"
+
+ shift
+
ip link set veth_a address "$(format_mac "${1}")"
ip -n B link set veth_b address "$(format_mac "${2}")"
@@ -1216,7 +1242,7 @@ send_match() {
eval src_"$f"=\$\(format_\$f "${2}"\)
done
eval send_\$proto
- if [ "$(count_packets)" != "1" ]; then
+ if [ "$(count_packets "$elem")" != "1" ]; then
err "${proto} packet to:"
err " $(for f in ${dst}; do
eval format_\$f "${1}"; printf ' '; done)"
@@ -1242,7 +1268,7 @@ send_nomatch() {
eval src_"$f"=\$\(format_\$f "${2}"\)
done
eval send_\$proto
- if [ "$(count_packets)" != "0" ]; then
+ if [ "$(count_packets_nomatch)" != "0" ]; then
err "${proto} packet to:"
err " $(for f in ${dst}; do
eval format_\$f "${1}"; printf ' '; done)"
@@ -1255,13 +1281,54 @@ send_nomatch() {
fi
}
+maybe_send_nomatch() {
+ local elem="$1"
+ local what="$4"
+
+ [ $((RANDOM%20)) -gt 0 ] && return
+
+ dst_addr4="$2"
+ dst_port="$3"
+ send_udp
+
+ if [ "$(count_packets_nomatch)" != "0" ]; then
+ err "Packet to $dst_addr4:$dst_port did match $what"
+ err "$(nft -a list ruleset)"
+ return 1
+ fi
+}
+
+maybe_send_match() {
+ local elem="$1"
+ local what="$4"
+
+ [ $((RANDOM%20)) -gt 0 ] && return
+
+ dst_addr4="$2"
+ dst_port="$3"
+ send_udp
+
+ if [ "$(count_packets "{ $elem }")" != "1" ]; then
+ err "Packet to $dst_addr4:$dst_port did not match $what"
+ err "$(nft -a list ruleset)"
+ return 1
+ fi
+ nft reset counter inet filter test >/dev/null
+ nft reset element inet filter test "{ $elem }" >/dev/null
+}
+
# Correctness test template:
# - add ranged element, check that packets match it
# - check that packets outside range don't match it
# - remove some elements, check that packets don't match anymore
test_correctness_main() {
range_size=1
+
+ send_nomatch $((end + 1)) $((end + 1 + src_delta)) || return 1
+
for i in $(seq "${start}" $((start + count))); do
+ local elem=""
+
end=$((start + range_size))
# Avoid negative or zero-sized port ranges
@@ -1272,15 +1339,16 @@ test_correctness_main() {
srcstart=$((start + src_delta))
srcend=$((end + src_delta))
- add "$(format)" || return 1
+ elem="$(format)"
+ add "$elem" || return 1
for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do
- send_match "${j}" $((j + src_delta)) || return 1
+ send_match "$elem" "${j}" $((j + src_delta)) || return 1
done
send_nomatch $((end + 1)) $((end + 1 + src_delta)) || return 1
# Delete elements now and then
if [ $((i % 3)) -eq 0 ]; then
- del "$(format)" || return 1
+ del "$elem" || return 1
for j in $(seq "$start" \
$((range_size / 2 + 1)) ${end}); do
send_nomatch "${j}" $((j + src_delta)) \
@@ -1572,14 +1640,17 @@ test_timeout() {
range_size=1
for i in $(seq "$start" $((start + count))); do
+ local elem=""
+
end=$((start + range_size))
srcstart=$((start + src_delta))
srcend=$((end + src_delta))
- add "$(format)" || return 1
+ elem="$(format)"
+ add "$elem" || return 1
for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do
- send_match "${j}" $((j + src_delta)) || return 1
+ send_match "$elem" "${j}" $((j + src_delta)) || return 1
done
range_size=$((range_size + 1))
@@ -1737,7 +1808,7 @@ test_bug_reload() {
srcend=$((end + src_delta))
for j in $(seq "$start" $((range_size / 2 + 1)) ${end}); do
- send_match "${j}" $((j + src_delta)) || return 1
+ send_match "$(format)" "${j}" $((j + src_delta)) || return 1
done
range_size=$((range_size + 1))
@@ -1756,22 +1827,34 @@ test_bug_net_port_proto_match() {
range_size=1
for i in $(seq 1 10); do
for j in $(seq 1 20) ; do
- elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+ local dport=$j
+
+ elem=$(printf "10.%d.%d.0/24 . %d-%d0 . 6-17 " ${i} ${j} ${dport} "$((dport+1))")
+
+ # too slow, do not test all addresses
+ maybe_send_nomatch "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d1" $((dport+1))) "before add" || return 1
nft "add element inet filter test { $elem }" || return 1
+
+ maybe_send_match "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d" $dport) "after add" || return 1
+
nft "get element inet filter test { $elem }" | grep -q "$elem"
if [ $? -ne 0 ];then
local got=$(nft "get element inet filter test { $elem }")
err "post-add: should have returned $elem but got $got"
return 1
fi
+
+ maybe_send_nomatch "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d1" $((dport+1))) "out-of-range" || return 1
done
done
# recheck after set was filled
for i in $(seq 1 10); do
for j in $(seq 1 20) ; do
- elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+ local dport=$j
+
+ elem=$(printf "10.%d.%d.0/24 . %d-%d0 . 6-17 " ${i} ${j} ${dport} "$((dport+1))")
nft "get element inet filter test { $elem }" | grep -q "$elem"
if [ $? -ne 0 ];then
@@ -1779,6 +1862,9 @@ test_bug_net_port_proto_match() {
err "post-fill: should have returned $elem but got $got"
return 1
fi
+
+ maybe_send_match "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d" $dport) "recheck" || return 1
+ maybe_send_nomatch "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d1" $((dport+1))) "recheck out-of-range" || return 1
done
done
@@ -1786,9 +1872,10 @@ test_bug_net_port_proto_match() {
for i in $(seq 1 10); do
for j in $(seq 1 20) ; do
local rnd=$((RANDOM%10))
+ local dport=$j
local got=""
- elem=$(printf "10.%d.%d.0/24 . %d1-%d0 . 6-17 " ${i} ${j} ${i} "$((i+1))")
+ elem=$(printf "10.%d.%d.0/24 . %d-%d0 . 6-17 " ${i} ${j} ${dport} "$((dport+1))")
if [ $rnd -gt 0 ];then
continue
fi
@@ -1799,6 +1886,8 @@ test_bug_net_port_proto_match() {
err "post-delete: query for $elem returned $got instead of error."
return 1
fi
+
+ maybe_send_nomatch "$elem" $(printf "10.%d.%d.1" $i $j) $(printf "%d" $dport) "match after deletion" || return 1
done
done
@@ -1817,12 +1906,54 @@ test_bug_avx2_mismatch()
dst_addr6="$a2"
send_icmp6
- if [ "$(count_packets)" -gt "0" ]; then
+ if [ "$(count_packets "{ icmpv6 . $a1 }")" -gt "0" ]; then
err "False match for $a2"
return 1
fi
}
+test_bug_doublecreate()
+{
+ local elements="1.2.3.4 . 1.2.4.1, 1.2.4.1 . 1.2.3.4"
+ local ret=1
+ local i
+
+ setup veth send_"${proto}" set || return ${ksft_skip}
+
+ add "{ $elements }" || return 1
+ # expected to work: 'add' on existing should be no-op.
+ add "{ $elements }" || return 1
+
+ # 'create' should return an error.
+ if nft create element inet filter test "{ $elements }" 2>/dev/null; then
+ err "Could create an existing element"
+ return 1
+ fi
+nft -f - <<EOF 2>/dev/null
+flush set inet filter test
+create element inet filter test { $elements }
+create element inet filter test { $elements }
+EOF
+ ret=$?
+ if [ $ret -eq 0 ]; then
+ err "Could create element twice in one transaction"
+ err "$(nft -a list ruleset)"
+ return 1
+ fi
+
+nft -f - <<EOF 2>/dev/null
+flush set inet filter test
+create element inet filter test { $elements }
+EOF
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ err "Could not flush and re-create element in one transaction"
+ return 1
+ fi
+
+ return 0
+}
+
test_reported_issues() {
eval test_bug_"${subtest}"
}
diff --git a/tools/testing/selftests/net/netfilter/nft_fib.sh b/tools/testing/selftests/net/netfilter/nft_fib.sh
index 9929a9ffef65..04544905c216 100755
--- a/tools/testing/selftests/net/netfilter/nft_fib.sh
+++ b/tools/testing/selftests/net/netfilter/nft_fib.sh
@@ -256,12 +256,12 @@ test_ping_unreachable() {
local daddr4=$1
local daddr6=$2
- if ip netns exec "$ns1" ping -c 1 -w 1 -q "$daddr4" > /dev/null; then
+ if ip netns exec "$ns1" ping -c 1 -W 0.1 -q "$daddr4" > /dev/null; then
echo "FAIL: ${ns1} could reach $daddr4" 1>&2
return 1
fi
- if ip netns exec "$ns1" ping -c 1 -w 1 -q "$daddr6" > /dev/null; then
+ if ip netns exec "$ns1" ping -c 1 -W 0.1 -q "$daddr6" > /dev/null; then
echo "FAIL: ${ns1} could reach $daddr6" 1>&2
return 1
fi
@@ -437,14 +437,17 @@ check_type()
local addr="$3"
local type="$4"
local count="$5"
+ local lret=0
[ -z "$count" ] && count=1
if ! ip netns exec "$nsrouter" nft get element inet t "$setname" { "$iifname" . "$addr" . "$type" } |grep -q "counter packets $count";then
- echo "FAIL: did not find $iifname . $addr . $type in $setname"
+ echo "FAIL: did not find $iifname . $addr . $type in $setname with $count packets"
ip netns exec "$nsrouter" nft list set inet t "$setname"
ret=1
- return 1
+ # do not fail right away, delete entry if it exists so later test that
+ # checks for unwanted keys don't get confused by this *expected* key.
+ lret=1
fi
# delete the entry, this allows to check if anything unexpected appeared
@@ -456,7 +459,7 @@ check_type()
return 1
fi
- return 0
+ return $lret
}
check_local()
diff --git a/tools/testing/selftests/net/netfilter/nft_flowtable.sh b/tools/testing/selftests/net/netfilter/nft_flowtable.sh
index a4ee5496f2a1..a68bc882fa4e 100755
--- a/tools/testing/selftests/net/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/net/netfilter/nft_flowtable.sh
@@ -20,6 +20,7 @@ ret=0
SOCAT_TIMEOUT=60
nsin=""
+nsin_small=""
ns1out=""
ns2out=""
@@ -36,7 +37,7 @@ cleanup() {
cleanup_all_ns
- rm -f "$nsin" "$ns1out" "$ns2out"
+ rm -f "$nsin" "$nsin_small" "$ns1out" "$ns2out"
[ "$log_netns" -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns="$log_netns"
}
@@ -72,6 +73,7 @@ lmtu=1500
rmtu=2000
filesize=$((2 * 1024 * 1024))
+filesize_small=$((filesize / 16))
usage(){
echo "nft_flowtable.sh [OPTIONS]"
@@ -89,7 +91,10 @@ do
o) omtu=$OPTARG;;
l) lmtu=$OPTARG;;
r) rmtu=$OPTARG;;
- s) filesize=$OPTARG;;
+ s)
+ filesize=$OPTARG
+ filesize_small=$((OPTARG / 16))
+ ;;
*) usage;;
esac
done
@@ -122,6 +127,8 @@ ip -net "$nsr1" addr add fee1:2::1/64 dev veth1 nodad
ip -net "$nsr2" addr add 192.168.10.2/24 dev veth0
ip -net "$nsr2" addr add fee1:2::2/64 dev veth0 nodad
+ip netns exec "$nsr1" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$nsr2" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
for i in 0 1; do
ip netns exec "$nsr1" sysctl net.ipv4.conf.veth$i.forwarding=1 > /dev/null
ip netns exec "$nsr2" sysctl net.ipv4.conf.veth$i.forwarding=1 > /dev/null
@@ -148,7 +155,9 @@ ip -net "$ns1" route add default via dead:1::1
ip -net "$ns2" route add default via dead:2::1
ip -net "$nsr1" route add default via 192.168.10.2
+ip -6 -net "$nsr1" route add default via fee1:2::2
ip -net "$nsr2" route add default via 192.168.10.1
+ip -6 -net "$nsr2" route add default via fee1:2::1
ip netns exec "$nsr1" nft -f - <<EOF
table inet filter {
@@ -215,6 +224,7 @@ if ! ip netns exec "$ns2" ping -c 1 -q 10.0.1.99 > /dev/null; then
fi
nsin=$(mktemp)
+nsin_small=$(mktemp)
ns1out=$(mktemp)
ns2out=$(mktemp)
@@ -265,6 +275,7 @@ check_counters()
check_dscp()
{
local what=$1
+ local pmtud="$2"
local ok=1
local counter
@@ -277,37 +288,39 @@ check_dscp()
local pc4z=${counter%*bytes*}
local pc4z=${pc4z#*packets}
+ local failmsg="FAIL: pmtu $pmtu: $what counters do not match, expected"
+
case "$what" in
"dscp_none")
if [ "$pc4" -gt 0 ] || [ "$pc4z" -eq 0 ]; then
- echo "FAIL: dscp counters do not match, expected dscp3 == 0, dscp0 > 0, but got $pc4,$pc4z" 1>&2
+ echo "$failmsg dscp3 == 0, dscp0 > 0, but got $pc4,$pc4z" 1>&2
ret=1
ok=0
fi
;;
"dscp_fwd")
if [ "$pc4" -eq 0 ] || [ "$pc4z" -eq 0 ]; then
- echo "FAIL: dscp counters do not match, expected dscp3 and dscp0 > 0 but got $pc4,$pc4z" 1>&2
+ echo "$failmsg dscp3 and dscp0 > 0 but got $pc4,$pc4z" 1>&2
ret=1
ok=0
fi
;;
"dscp_ingress")
if [ "$pc4" -eq 0 ] || [ "$pc4z" -gt 0 ]; then
- echo "FAIL: dscp counters do not match, expected dscp3 > 0, dscp0 == 0 but got $pc4,$pc4z" 1>&2
+ echo "$failmsg dscp3 > 0, dscp0 == 0 but got $pc4,$pc4z" 1>&2
ret=1
ok=0
fi
;;
"dscp_egress")
if [ "$pc4" -eq 0 ] || [ "$pc4z" -gt 0 ]; then
- echo "FAIL: dscp counters do not match, expected dscp3 > 0, dscp0 == 0 but got $pc4,$pc4z" 1>&2
+ echo "$failmsg dscp3 > 0, dscp0 == 0 but got $pc4,$pc4z" 1>&2
ret=1
ok=0
fi
;;
*)
- echo "FAIL: Unknown DSCP check" 1>&2
+ echo "$failmsg: Unknown DSCP check" 1>&2
ret=1
ok=0
esac
@@ -319,9 +332,9 @@ check_dscp()
check_transfer()
{
- in=$1
- out=$2
- what=$3
+ local in=$1
+ local out=$2
+ local what=$3
if ! cmp "$in" "$out" > /dev/null 2>&1; then
echo "FAIL: file mismatch for $what" 1>&2
@@ -342,25 +355,42 @@ test_tcp_forwarding_ip()
{
local nsa=$1
local nsb=$2
- local dstip=$3
- local dstport=$4
+ local pmtu=$3
+ local proto=$4
+ local dstip=$5
+ local dstport=$6
local lret=0
+ local socatc
+ local socatl
+ local infile="$nsin"
+
+ if [ $pmtu -eq 0 ]; then
+ infile="$nsin_small"
+ fi
- timeout "$SOCAT_TIMEOUT" ip netns exec "$nsb" socat -4 TCP-LISTEN:12345,reuseaddr STDIO < "$nsin" > "$ns2out" &
+ timeout "$SOCAT_TIMEOUT" ip netns exec "$nsb" socat -${proto} \
+ TCP"${proto}"-LISTEN:12345,reuseaddr STDIO < "$infile" > "$ns2out" &
lpid=$!
busywait 1000 listener_ready
- timeout "$SOCAT_TIMEOUT" ip netns exec "$nsa" socat -4 TCP:"$dstip":"$dstport" STDIO < "$nsin" > "$ns1out"
+ timeout "$SOCAT_TIMEOUT" ip netns exec "$nsa" socat -${proto} \
+ TCP"${proto}":"$dstip":"$dstport" STDIO < "$infile" > "$ns1out"
+ socatc=$?
wait $lpid
+ socatl=$?
+
+ if [ $socatl -ne 0 ] || [ $socatc -ne 0 ];then
+ rc=1
+ fi
- if ! check_transfer "$nsin" "$ns2out" "ns1 -> ns2"; then
+ if ! check_transfer "$infile" "$ns2out" "ns1 -> ns2"; then
lret=1
ret=1
fi
- if ! check_transfer "$nsin" "$ns1out" "ns1 <- ns2"; then
+ if ! check_transfer "$infile" "$ns1out" "ns1 <- ns2"; then
lret=1
ret=1
fi
@@ -370,14 +400,22 @@ test_tcp_forwarding_ip()
test_tcp_forwarding()
{
- test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
+ local pmtu="$3"
+ local proto="$4"
+ local dstip="$5"
+ local dstport="$6"
+
+ test_tcp_forwarding_ip "$1" "$2" "$pmtu" "$proto" "$dstip" "$dstport"
return $?
}
test_tcp_forwarding_set_dscp()
{
- check_dscp "dscp_none"
+ local pmtu="$3"
+ local proto="$4"
+ local dstip="$5"
+ local dstport="$6"
ip netns exec "$nsr1" nft -f - <<EOF
table netdev dscpmangle {
@@ -388,8 +426,8 @@ table netdev dscpmangle {
}
EOF
if [ $? -eq 0 ]; then
- test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
- check_dscp "dscp_ingress"
+ test_tcp_forwarding_ip "$1" "$2" "$pmtu" "$proto" "$dstip" "$dstport"
+ check_dscp "dscp_ingress" "$pmtu"
ip netns exec "$nsr1" nft delete table netdev dscpmangle
else
@@ -405,10 +443,10 @@ table netdev dscpmangle {
}
EOF
if [ $? -eq 0 ]; then
- test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
- check_dscp "dscp_egress"
+ test_tcp_forwarding_ip "$1" "$2" "$pmtu" "$proto" "$dstip" "$dstport"
+ check_dscp "dscp_egress" "$pmtu"
- ip netns exec "$nsr1" nft flush table netdev dscpmangle
+ ip netns exec "$nsr1" nft delete table netdev dscpmangle
else
echo "SKIP: Could not load netdev:egress for veth1"
fi
@@ -416,48 +454,53 @@ fi
# partial. If flowtable really works, then both dscp-is-0 and dscp-is-cs3
# counters should have seen packets (before and after ft offload kicks in).
ip netns exec "$nsr1" nft -a insert rule inet filter forward ip dscp set cs3
- test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
- check_dscp "dscp_fwd"
+ test_tcp_forwarding_ip "$1" "$2" "$pmtu" "$proto" "$dstip" "$dstport"
+ check_dscp "dscp_fwd" "$pmtu"
}
test_tcp_forwarding_nat()
{
+ local nsa="$1"
+ local nsb="$2"
+ local pmtu="$3"
+ local what="$4"
local lret
- local pmtu
- test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
- lret=$?
+ [ "$pmtu" -eq 0 ] && what="$what (pmtu disabled)"
- pmtu=$3
- what=$4
+ test_tcp_forwarding_ip "$nsa" "$nsb" "$pmtu" 4 10.0.2.99 12345
+ lret=$?
if [ "$lret" -eq 0 ] ; then
if [ "$pmtu" -eq 1 ] ;then
- check_counters "flow offload for ns1/ns2 with masquerade and pmtu discovery $what"
+ check_counters "flow offload for ns1/ns2 with masquerade $what"
else
echo "PASS: flow offload for ns1/ns2 with masquerade $what"
fi
- test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666
+ test_tcp_forwarding_ip "$1" "$2" "$pmtu" 4 10.6.6.6 1666
lret=$?
if [ "$pmtu" -eq 1 ] ;then
- check_counters "flow offload for ns1/ns2 with dnat and pmtu discovery $what"
+ check_counters "flow offload for ns1/ns2 with dnat $what"
elif [ "$lret" -eq 0 ] ; then
echo "PASS: flow offload for ns1/ns2 with dnat $what"
fi
+ else
+ echo "FAIL: flow offload for ns1/ns2 with dnat $what"
fi
return $lret
}
make_file "$nsin" "$filesize"
+make_file "$nsin_small" "$filesize_small"
# First test:
# No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed.
# Due to MTU mismatch in both directions, all packets (except small packets like pure
# acks) have to be handled by normal forwarding path. Therefore, packet counters
# are not checked.
-if test_tcp_forwarding "$ns1" "$ns2"; then
+if test_tcp_forwarding "$ns1" "$ns2" 0 4 10.0.2.99 12345; then
echo "PASS: flow offloaded for ns1/ns2"
else
echo "FAIL: flow offload for ns1/ns2:" 1>&2
@@ -465,6 +508,14 @@ else
ret=1
fi
+if test_tcp_forwarding "$ns1" "$ns2" 0 6 "[dead:2::99]" 12345; then
+ echo "PASS: IPv6 flow offloaded for ns1/ns2"
+else
+ echo "FAIL: IPv6 flow offload for ns1/ns2:" 1>&2
+ ip netns exec "$nsr1" nft list ruleset
+ ret=1
+fi
+
# delete default route, i.e. ns2 won't be able to reach ns1 and
# will depend on ns1 being masqueraded in nsr1.
# expect ns1 has nsr1 address.
@@ -489,8 +540,9 @@ table ip nat {
}
EOF
-if ! test_tcp_forwarding_set_dscp "$ns1" "$ns2" 0 ""; then
- echo "FAIL: flow offload for ns1/ns2 with dscp update" 1>&2
+check_dscp "dscp_none" "0"
+if ! test_tcp_forwarding_set_dscp "$ns1" "$ns2" 0 4 10.0.2.99 12345; then
+ echo "FAIL: flow offload for ns1/ns2 with dscp update and no pmtu discovery" 1>&2
exit 0
fi
@@ -513,12 +565,87 @@ ip netns exec "$ns2" sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
# For earlier tests (large mtus), packets cannot be handled via flowtable
# (except pure acks and other small packets).
ip netns exec "$nsr1" nft reset counters table inet filter >/dev/null
+ip netns exec "$ns2" nft reset counters table inet filter >/dev/null
+
+if ! test_tcp_forwarding_set_dscp "$ns1" "$ns2" 1 4 10.0.2.99 12345; then
+ echo "FAIL: flow offload for ns1/ns2 with dscp update and pmtu discovery" 1>&2
+ exit 0
+fi
+
+ip netns exec "$nsr1" nft reset counters table inet filter >/dev/null
if ! test_tcp_forwarding_nat "$ns1" "$ns2" 1 ""; then
echo "FAIL: flow offload for ns1/ns2 with NAT and pmtu discovery" 1>&2
ip netns exec "$nsr1" nft list ruleset
fi
+# IPIP tunnel test:
+# Add IPIP tunnel interfaces and check flowtable acceleration.
+test_ipip() {
+if ! ip -net "$nsr1" link add name tun0 type ipip \
+ local 192.168.10.1 remote 192.168.10.2 >/dev/null;then
+ echo "SKIP: could not add ipip tunnel"
+ [ "$ret" -eq 0 ] && ret=$ksft_skip
+ return
+fi
+ip -net "$nsr1" link set tun0 up
+ip -net "$nsr1" addr add 192.168.100.1/24 dev tun0
+ip netns exec "$nsr1" sysctl net.ipv4.conf.tun0.forwarding=1 > /dev/null
+
+ip -net "$nsr2" link add name tun0 type ipip local 192.168.10.2 remote 192.168.10.1
+ip -net "$nsr2" link set tun0 up
+ip -net "$nsr2" addr add 192.168.100.2/24 dev tun0
+ip netns exec "$nsr2" sysctl net.ipv4.conf.tun0.forwarding=1 > /dev/null
+
+ip -net "$nsr1" route change default via 192.168.100.2
+ip -net "$nsr2" route change default via 192.168.100.1
+ip -net "$ns2" route add default via 10.0.2.1
+
+ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif tun0 accept'
+ip netns exec "$nsr1" nft -a insert rule inet filter forward \
+ 'meta oif "veth0" tcp sport 12345 ct mark set 1 flow add @f1 counter name routed_repl accept'
+
+if ! test_tcp_forwarding_nat "$ns1" "$ns2" 1 "IPIP tunnel"; then
+ echo "FAIL: flow offload for ns1/ns2 with IPIP tunnel" 1>&2
+ ip netns exec "$nsr1" nft list ruleset
+ ret=1
+fi
+
+# Create vlan tagged devices for IPIP traffic.
+ip -net "$nsr1" link add link veth1 name veth1.10 type vlan id 10
+ip -net "$nsr1" link set veth1.10 up
+ip -net "$nsr1" addr add 192.168.20.1/24 dev veth1.10
+ip netns exec "$nsr1" sysctl net.ipv4.conf.veth1/10.forwarding=1 > /dev/null
+ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif veth1.10 accept'
+ip -net "$nsr1" link add name tun1 type ipip local 192.168.20.1 remote 192.168.20.2
+ip -net "$nsr1" link set tun1 up
+ip -net "$nsr1" addr add 192.168.200.1/24 dev tun1
+ip -net "$nsr1" route change default via 192.168.200.2
+ip netns exec "$nsr1" sysctl net.ipv4.conf.tun1.forwarding=1 > /dev/null
+ip netns exec "$nsr1" nft -a insert rule inet filter forward 'meta oif tun1 accept'
+
+ip -net "$nsr2" link add link veth0 name veth0.10 type vlan id 10
+ip -net "$nsr2" link set veth0.10 up
+ip -net "$nsr2" addr add 192.168.20.2/24 dev veth0.10
+ip netns exec "$nsr2" sysctl net.ipv4.conf.veth0/10.forwarding=1 > /dev/null
+ip -net "$nsr2" link add name tun1 type ipip local 192.168.20.2 remote 192.168.20.1
+ip -net "$nsr2" link set tun1 up
+ip -net "$nsr2" addr add 192.168.200.2/24 dev tun1
+ip -net "$nsr2" route change default via 192.168.200.1
+ip netns exec "$nsr2" sysctl net.ipv4.conf.tun1.forwarding=1 > /dev/null
+
+if ! test_tcp_forwarding_nat "$ns1" "$ns2" 1 "IPIP tunnel over vlan"; then
+ echo "FAIL: flow offload for ns1/ns2 with IPIP tunnel over vlan" 1>&2
+ ip netns exec "$nsr1" nft list ruleset
+ ret=1
+fi
+
+# Restore the previous configuration
+ip -net "$nsr1" route change default via 192.168.10.2
+ip -net "$nsr2" route change default via 192.168.10.1
+ip -net "$ns2" route del default via 10.0.2.1
+}
+
# Another test:
# Add bridge interface br0 to Router1, with NAT enabled.
test_bridge() {
@@ -604,6 +731,8 @@ ip -net "$nsr1" addr add dead:1::1/64 dev veth0 nodad
ip -net "$nsr1" link set up dev veth0
}
+test_ipip
+
test_bridge
KEY_SHA="0x"$(ps -af | sha1sum | cut -d " " -f 1)
@@ -644,7 +773,7 @@ ip -net "$ns2" route del 192.168.10.1 via 10.0.2.1
ip -net "$ns2" route add default via 10.0.2.1
ip -net "$ns2" route add default via dead:2::1
-if test_tcp_forwarding "$ns1" "$ns2"; then
+if test_tcp_forwarding "$ns1" "$ns2" 1 4 10.0.2.99 12345; then
check_counters "ipsec tunnel mode for ns1/ns2"
else
echo "FAIL: ipsec tunnel mode for ns1/ns2"
@@ -652,6 +781,14 @@ else
ip netns exec "$nsr1" cat /proc/net/xfrm_stat 1>&2
fi
+if test_tcp_forwarding "$ns1" "$ns2" 1 6 "[dead:2::99]" 12345; then
+ check_counters "IPv6 ipsec tunnel mode for ns1/ns2"
+else
+ echo "FAIL: IPv6 ipsec tunnel mode for ns1/ns2"
+ ip netns exec "$nsr1" nft list ruleset 1>&2
+ ip netns exec "$nsr1" cat /proc/net/xfrm_stat 1>&2
+fi
+
if [ "$1" = "" ]; then
low=1280
mtu=$((65536 - low))
@@ -668,7 +805,7 @@ if [ "$1" = "" ]; then
fi
echo "re-run with random mtus and file size: -o $o -l $l -r $r -s $filesize"
- $0 -o "$o" -l "$l" -r "$r" -s "$filesize"
+ $0 -o "$o" -l "$l" -r "$r" -s "$filesize" || ret=1
fi
exit $ret
diff --git a/tools/testing/selftests/net/netfilter/nft_interface_stress.sh b/tools/testing/selftests/net/netfilter/nft_interface_stress.sh
index 5ff7be9daeee..c0fffaa6dbd9 100755
--- a/tools/testing/selftests/net/netfilter/nft_interface_stress.sh
+++ b/tools/testing/selftests/net/netfilter/nft_interface_stress.sh
@@ -10,6 +10,8 @@ source lib.sh
checktool "nft --version" "run test without nft tool"
checktool "iperf3 --version" "run test without iperf3 tool"
+read kernel_tainted < /proc/sys/kernel/tainted
+
# how many seconds to torture the kernel?
# default to 80% of max run time but don't exceed 48s
TEST_RUNTIME=$((${kselftest_timeout:-60} * 8 / 10))
@@ -135,7 +137,8 @@ else
wait
fi
-[[ $(</proc/sys/kernel/tainted) -eq 0 ]] || {
+
+[[ $kernel_tainted -eq 0 && $(</proc/sys/kernel/tainted) -ne 0 ]] && {
echo "FAIL: Kernel is tainted!"
exit $ksft_fail
}
diff --git a/tools/testing/selftests/net/netfilter/nft_nat.sh b/tools/testing/selftests/net/netfilter/nft_nat.sh
index 9e39de26455f..b3ec2d0a3f56 100755
--- a/tools/testing/selftests/net/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/net/netfilter/nft_nat.sh
@@ -569,7 +569,7 @@ test_redirect6()
ip netns exec "$ns0" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
if ! ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null;then
- echo "ERROR: cannnot ping $ns1 from $ns2 via ipv6"
+ echo "ERROR: cannot ping $ns1 from $ns2 via ipv6"
lret=1
fi
@@ -859,13 +859,31 @@ EOF
# from router:service bypass connection tracking.
test_port_shadow_notrack "$family"
- # test nat based mitigation: fowarded packets coming from service port
+ # test nat based mitigation: forwarded packets coming from service port
# are masqueraded with random highport.
test_port_shadow_pat "$family"
ip netns exec "$ns0" nft delete table $family nat
}
+file_cmp()
+{
+ local infile="$1"
+ local outfile="$2"
+
+ if ! cmp "$infile" "$outfile";then
+ echo -n "Infile "
+ ls -l "$infile"
+ echo -n "Outfile "
+ ls -l "$outfile"
+ echo "ERROR: in and output file mismatch when checking $msg" 1>&1
+ ret=1
+ return 1
+ fi
+
+ return 0
+}
+
test_stateless_nat_ip()
{
local lret=0
@@ -966,11 +984,7 @@ EOF
wait
- if ! cmp "$INFILE" "$OUTFILE";then
- ls -l "$INFILE" "$OUTFILE"
- echo "ERROR: in and output file mismatch when checking udp with stateless nat" 1>&2
- lret=1
- fi
+ file_cmp "$INFILE" "$OUTFILE" "udp with stateless nat" || lret=1
:> "$OUTFILE"
@@ -991,6 +1005,62 @@ EOF
return $lret
}
+test_dnat_clash()
+{
+ local lret=0
+
+ if ! socat -h > /dev/null 2>&1;then
+ echo "SKIP: Could not run dnat clash test without socat tool"
+ [ $ret -eq 0 ] && ret=$ksft_skip
+ return $ksft_skip
+ fi
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+flush ruleset
+table ip dnat-test {
+ chain prerouting {
+ type nat hook prerouting priority dstnat; policy accept;
+ ip daddr 10.0.2.1 udp dport 1234 counter dnat to 10.0.1.1:1234
+ }
+}
+EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add dnat rules"
+ [ $ret -eq 0 ] && ret=$ksft_skip
+ return $ksft_skip
+ fi
+
+ local udpdaddr="10.0.2.1"
+ for i in 1 2;do
+ echo "PING $udpdaddr" > "$INFILE"
+ echo "PONG 10.0.1.1 step $i" | ip netns exec "$ns0" timeout 3 socat STDIO UDP4-LISTEN:1234,bind=10.0.1.1 > "$OUTFILE" 2>/dev/null &
+ local lpid=$!
+
+ busywait $BUSYWAIT_TIMEOUT listener_ready "$ns0" 1234 "-u"
+
+ result=$(ip netns exec "$ns1" timeout 3 socat STDIO UDP4-SENDTO:"$udpdaddr:1234,sourceport=4321" < "$INFILE")
+ udpdaddr="10.0.1.1"
+
+ if [ "$result" != "PONG 10.0.1.1 step $i" ] ; then
+ echo "ERROR: failed to test udp $ns1 to $ns2 with dnat rule step $i, result: \"$result\"" 1>&2
+ lret=1
+ ret=1
+ fi
+
+ wait
+
+ file_cmp "$INFILE" "$OUTFILE" "udp dnat step $i" || lret=1
+
+ :> "$OUTFILE"
+ done
+
+ test $lret -eq 0 && echo "PASS: IP dnat clash $ns1:$ns2"
+
+ ip netns exec "$ns0" nft flush ruleset
+
+ return $lret
+}
+
# ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99
for i in "$ns0" "$ns1" "$ns2" ;do
ip netns exec "$i" nft -f /dev/stdin <<EOF
@@ -1147,6 +1217,7 @@ $test_inet_nat && test_redirect6 inet
test_port_shadowing
test_stateless_nat_ip
+test_dnat_clash
if [ $ret -ne 0 ];then
echo -n "FAIL: "
diff --git a/tools/testing/selftests/net/netfilter/sctp_collision.c b/tools/testing/selftests/net/netfilter/sctp_collision.c
index 21bb1cfd8a85..b282d1785c9b 100644
--- a/tools/testing/selftests/net/netfilter/sctp_collision.c
+++ b/tools/testing/selftests/net/netfilter/sctp_collision.c
@@ -9,9 +9,10 @@
int main(int argc, char *argv[])
{
struct sockaddr_in saddr = {}, daddr = {};
- int sd, ret, len = sizeof(daddr);
+ socklen_t len = sizeof(daddr);
struct timeval tv = {25, 0};
char buf[] = "hello";
+ int sd, ret;
if (argc != 6 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
printf("%s <server|client> <LOCAL_IP> <LOCAL_PORT> <REMOTE_IP> <REMOTE_PORT>\n",
diff --git a/tools/testing/selftests/net/netfilter/udpclash.c b/tools/testing/selftests/net/netfilter/udpclash.c
new file mode 100644
index 000000000000..79de163d61ab
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/udpclash.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Usage: ./udpclash <IP> <PORT>
+ *
+ * Emit THREAD_COUNT UDP packets sharing the same saddr:daddr pair.
+ *
+ * This mimics DNS resolver libraries that emit A and AAAA requests
+ * in parallel.
+ *
+ * This exercises conntrack clash resolution logic added and later
+ * refined in
+ *
+ * 71d8c47fc653 ("netfilter: conntrack: introduce clash resolution on insertion race")
+ * ed07d9a021df ("netfilter: nf_conntrack: resolve clash for matching conntracks")
+ * 6a757c07e51f ("netfilter: conntrack: allow insertion of clashing entries")
+ */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <pthread.h>
+
+#define THREAD_COUNT 128
+
+struct thread_args {
+ const struct sockaddr_in *si_remote;
+ int sockfd;
+};
+
+static volatile int wait = 1;
+
+static void *thread_main(void *varg)
+{
+ const struct sockaddr_in *si_remote;
+ const struct thread_args *args = varg;
+ static const char msg[] = "foo";
+
+ si_remote = args->si_remote;
+
+ while (wait == 1)
+ ;
+
+ if (sendto(args->sockfd, msg, strlen(msg), MSG_NOSIGNAL,
+ (struct sockaddr *)si_remote, sizeof(*si_remote)) < 0)
+ exit(111);
+
+ return varg;
+}
+
+static int run_test(int fd, const struct sockaddr_in *si_remote)
+{
+ struct thread_args thread_args = {
+ .si_remote = si_remote,
+ .sockfd = fd,
+ };
+ pthread_t *tid = calloc(THREAD_COUNT, sizeof(pthread_t));
+ unsigned int repl_count = 0, timeout = 0;
+ int i;
+
+ if (!tid) {
+ perror("calloc");
+ return 1;
+ }
+
+ for (i = 0; i < THREAD_COUNT; i++) {
+ int err = pthread_create(&tid[i], NULL, &thread_main, &thread_args);
+
+ if (err != 0) {
+ perror("pthread_create");
+ exit(1);
+ }
+ }
+
+ wait = 0;
+
+ for (i = 0; i < THREAD_COUNT; i++)
+ pthread_join(tid[i], NULL);
+
+ while (repl_count < THREAD_COUNT) {
+ struct sockaddr_in si_repl;
+ socklen_t si_repl_len = sizeof(si_repl);
+ char repl[512];
+ ssize_t ret;
+
+ ret = recvfrom(fd, repl, sizeof(repl), MSG_NOSIGNAL,
+ (struct sockaddr *) &si_repl, &si_repl_len);
+ if (ret < 0) {
+ if (timeout++ > 5000) {
+ fputs("timed out while waiting for reply from thread\n", stderr);
+ break;
+ }
+
+ /* give reply time to pass though the stack */
+ usleep(1000);
+ continue;
+ }
+
+ if (si_repl_len != sizeof(*si_remote)) {
+ fprintf(stderr, "warning: reply has unexpected repl_len %d vs %d\n",
+ (int)si_repl_len, (int)sizeof(si_repl));
+ } else if (si_remote->sin_addr.s_addr != si_repl.sin_addr.s_addr ||
+ si_remote->sin_port != si_repl.sin_port) {
+ char a[64], b[64];
+
+ inet_ntop(AF_INET, &si_remote->sin_addr, a, sizeof(a));
+ inet_ntop(AF_INET, &si_repl.sin_addr, b, sizeof(b));
+
+ fprintf(stderr, "reply from wrong source: want %s:%d got %s:%d\n",
+ a, ntohs(si_remote->sin_port), b, ntohs(si_repl.sin_port));
+ }
+
+ repl_count++;
+ }
+
+ printf("got %d of %d replies\n", repl_count, THREAD_COUNT);
+
+ free(tid);
+
+ return repl_count == THREAD_COUNT ? 0 : 1;
+}
+
+int main(int argc, char *argv[])
+{
+ struct sockaddr_in si_local = {
+ .sin_family = AF_INET,
+ };
+ struct sockaddr_in si_remote = {
+ .sin_family = AF_INET,
+ };
+ int fd, ret;
+
+ if (argc < 3) {
+ fputs("Usage: send_udp <daddr> <dport>\n", stderr);
+ return 1;
+ }
+
+ si_remote.sin_port = htons(atoi(argv[2]));
+ si_remote.sin_addr.s_addr = inet_addr(argv[1]);
+
+ fd = socket(AF_INET, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, IPPROTO_UDP);
+ if (fd < 0) {
+ perror("socket");
+ return 1;
+ }
+
+ if (bind(fd, (struct sockaddr *)&si_local, sizeof(si_local)) < 0) {
+ perror("bind");
+ return 1;
+ }
+
+ ret = run_test(fd, &si_remote);
+
+ close(fd);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/net/netlink-dumps.c b/tools/testing/selftests/net/netlink-dumps.c
index 07423f256f96..51129c564d0a 100644
--- a/tools/testing/selftests/net/netlink-dumps.c
+++ b/tools/testing/selftests/net/netlink-dumps.c
@@ -18,7 +18,7 @@
#include <linux/mqueue.h>
#include <linux/rtnetlink.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <ynl.h>
@@ -31,9 +31,18 @@ struct ext_ack {
const char *str;
};
-/* 0: no done, 1: done found, 2: extack found, -1: error */
-static int nl_get_extack(char *buf, size_t n, struct ext_ack *ea)
+enum get_ea_ret {
+ ERROR = -1,
+ NO_CTRL = 0,
+ FOUND_DONE,
+ FOUND_ERR,
+ FOUND_EXTACK,
+};
+
+static enum get_ea_ret
+nl_get_extack(char *buf, size_t n, struct ext_ack *ea)
{
+ enum get_ea_ret ret = NO_CTRL;
const struct nlmsghdr *nlh;
const struct nlattr *attr;
ssize_t rem;
@@ -41,15 +50,19 @@ static int nl_get_extack(char *buf, size_t n, struct ext_ack *ea)
for (rem = n; rem > 0; NLMSG_NEXT(nlh, rem)) {
nlh = (struct nlmsghdr *)&buf[n - rem];
if (!NLMSG_OK(nlh, rem))
- return -1;
+ return ERROR;
- if (nlh->nlmsg_type != NLMSG_DONE)
+ if (nlh->nlmsg_type == NLMSG_ERROR)
+ ret = FOUND_ERR;
+ else if (nlh->nlmsg_type == NLMSG_DONE)
+ ret = FOUND_DONE;
+ else
continue;
ea->err = -*(int *)NLMSG_DATA(nlh);
if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS))
- return 1;
+ return ret;
ynl_attr_for_each(attr, nlh, sizeof(int)) {
switch (ynl_attr_type(attr)) {
@@ -68,10 +81,10 @@ static int nl_get_extack(char *buf, size_t n, struct ext_ack *ea)
}
}
- return 2;
+ return FOUND_EXTACK;
}
- return 0;
+ return ret;
}
static const struct {
@@ -99,9 +112,9 @@ static const struct {
TEST(dump_extack)
{
int netlink_sock;
+ int i, cnt, ret;
char buf[8192];
int one = 1;
- int i, cnt;
ssize_t n;
netlink_sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
@@ -118,7 +131,7 @@ TEST(dump_extack)
ASSERT_EQ(n, 0);
/* Dump so many times we fill up the buffer */
- cnt = 64;
+ cnt = 80;
for (i = 0; i < cnt; i++) {
n = send(netlink_sock, &dump_neigh_bad,
sizeof(dump_neigh_bad), 0);
@@ -130,6 +143,7 @@ TEST(dump_extack)
EXPECT_EQ(n, -1);
EXPECT_EQ(errno, ENOBUFS);
+ ret = NO_CTRL;
for (i = 0; i < cnt; i++) {
struct ext_ack ea = {};
@@ -140,10 +154,20 @@ TEST(dump_extack)
}
ASSERT_GE(n, (ssize_t)sizeof(struct nlmsghdr));
- EXPECT_EQ(nl_get_extack(buf, n, &ea), 2);
+ ret = nl_get_extack(buf, n, &ea);
+ /* Once we fill the buffer we'll see one ENOBUFS followed
+ * by a number of EBUSYs. Then the last recv() will finally
+ * trigger and complete the dump.
+ */
+ if (ret == FOUND_ERR && (ea.err == ENOBUFS || ea.err == EBUSY))
+ continue;
+ EXPECT_EQ(ret, FOUND_EXTACK);
+ EXPECT_EQ(ea.err, EINVAL);
EXPECT_EQ(ea.attr_offs,
sizeof(struct nlmsghdr) + sizeof(struct ndmsg));
}
+ /* Make sure last message was a full DONE+extack */
+ EXPECT_EQ(ret, FOUND_EXTACK);
}
static const struct {
diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c
index cd8a58097448..1f5227f3d64d 100644
--- a/tools/testing/selftests/net/nettest.c
+++ b/tools/testing/selftests/net/nettest.c
@@ -385,7 +385,7 @@ static int get_bind_to_device(int sd, char *name, size_t len)
name[0] = '\0';
rc = getsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, name, &optlen);
if (rc < 0)
- log_err_errno("setsockopt(SO_BINDTODEVICE)");
+ log_err_errno("getsockopt(SO_BINDTODEVICE)");
return rc;
}
@@ -535,7 +535,7 @@ static int set_freebind(int sd, int version)
break;
case AF_INET6:
if (setsockopt(sd, SOL_IPV6, IPV6_FREEBIND, &one, sizeof(one))) {
- log_err_errno("setsockopt(IPV6_FREEBIND");
+ log_err_errno("setsockopt(IPV6_FREEBIND)");
rc = -1;
}
break;
@@ -812,7 +812,7 @@ static int convert_addr(struct sock_args *args, const char *_str,
sep++;
if (str_to_uint(sep, 1, pfx_len_max,
&args->prefix_len) != 0) {
- fprintf(stderr, "Invalid port\n");
+ fprintf(stderr, "Invalid prefix length\n");
return 1;
}
} else {
@@ -1272,7 +1272,7 @@ static int msg_loop(int client, int sd, void *addr, socklen_t alen,
}
}
- nfds = interactive ? MAX(fileno(stdin), sd) + 1 : sd + 1;
+ nfds = interactive ? MAX(fileno(stdin), sd) + 1 : sd + 1;
while (1) {
FD_ZERO(&rfds);
FD_SET(sd, &rfds);
@@ -1492,7 +1492,7 @@ static int lsock_init(struct sock_args *args)
sd = socket(args->version, args->type, args->protocol);
if (sd < 0) {
log_err_errno("Error opening socket");
- return -1;
+ return -1;
}
if (set_reuseaddr(sd) != 0)
@@ -1912,7 +1912,7 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
* waiting to be told when to continue
*/
if (read(fd, &buf, sizeof(buf)) <= 0) {
- log_err_errno("Failed to read IPC status from status");
+ log_err_errno("Failed to read IPC status from pipe");
return 1;
}
if (!buf) {
diff --git a/tools/testing/selftests/net/nl_netdev.py b/tools/testing/selftests/net/nl_netdev.py
index beaee5e4e2aa..5c66421ab8aa 100755
--- a/tools/testing/selftests/net/nl_netdev.py
+++ b/tools/testing/selftests/net/nl_netdev.py
@@ -2,8 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
import time
+from os import system
from lib.py import ksft_run, ksft_exit, ksft_pr
-from lib.py import ksft_eq, ksft_ge, ksft_busy_wait
+from lib.py import ksft_eq, ksft_ge, ksft_ne, ksft_busy_wait
from lib.py import NetdevFamily, NetdevSimDev, ip
@@ -34,6 +35,128 @@ def napi_list_check(nf) -> None:
ksft_eq(len(napis), 100,
comment=f"queue count after reset queue {q} mode {i}")
+def napi_set_threaded(nf) -> None:
+ """
+ Test that verifies various cases of napi threaded
+ set and unset at napi and device level.
+ """
+ with NetdevSimDev(queue_count=2) as nsimdev:
+ nsim = nsimdev.nsims[0]
+
+ ip(f"link set dev {nsim.ifname} up")
+
+ napis = nf.napi_get({'ifindex': nsim.ifindex}, dump=True)
+ ksft_eq(len(napis), 2)
+
+ napi0_id = napis[0]['id']
+ napi1_id = napis[1]['id']
+
+ # set napi threaded and verify
+ nf.napi_set({'id': napi0_id, 'threaded': "enabled"})
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "enabled")
+ ksft_ne(napi0.get('pid'), None)
+
+ # check it is not set for napi1
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_eq(napi1['threaded'], "disabled")
+ ksft_eq(napi1.get('pid'), None)
+
+ ip(f"link set dev {nsim.ifname} down")
+ ip(f"link set dev {nsim.ifname} up")
+
+ # verify if napi threaded is still set
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "enabled")
+ ksft_ne(napi0.get('pid'), None)
+
+ # check it is still not set for napi1
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_eq(napi1['threaded'], "disabled")
+ ksft_eq(napi1.get('pid'), None)
+
+ # unset napi threaded and verify
+ nf.napi_set({'id': napi0_id, 'threaded': "disabled"})
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "disabled")
+ ksft_eq(napi0.get('pid'), None)
+
+ # set threaded at device level
+ system(f"echo 1 > /sys/class/net/{nsim.ifname}/threaded")
+
+ # check napi threaded is set for both napis
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "enabled")
+ ksft_ne(napi0.get('pid'), None)
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_eq(napi1['threaded'], "enabled")
+ ksft_ne(napi1.get('pid'), None)
+
+ # unset threaded at device level
+ system(f"echo 0 > /sys/class/net/{nsim.ifname}/threaded")
+
+ # check napi threaded is unset for both napis
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "disabled")
+ ksft_eq(napi0.get('pid'), None)
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_eq(napi1['threaded'], "disabled")
+ ksft_eq(napi1.get('pid'), None)
+
+ # set napi threaded for napi0
+ nf.napi_set({'id': napi0_id, 'threaded': 1})
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "enabled")
+ ksft_ne(napi0.get('pid'), None)
+
+ # unset threaded at device level
+ system(f"echo 0 > /sys/class/net/{nsim.ifname}/threaded")
+
+ # check napi threaded is unset for both napis
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "disabled")
+ ksft_eq(napi0.get('pid'), None)
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_eq(napi1['threaded'], "disabled")
+ ksft_eq(napi1.get('pid'), None)
+
+def dev_set_threaded(nf) -> None:
+ """
+ Test that verifies various cases of napi threaded
+ set and unset at device level using sysfs.
+ """
+ with NetdevSimDev(queue_count=2) as nsimdev:
+ nsim = nsimdev.nsims[0]
+
+ ip(f"link set dev {nsim.ifname} up")
+
+ napis = nf.napi_get({'ifindex': nsim.ifindex}, dump=True)
+ ksft_eq(len(napis), 2)
+
+ napi0_id = napis[0]['id']
+ napi1_id = napis[1]['id']
+
+ # set threaded
+ system(f"echo 1 > /sys/class/net/{nsim.ifname}/threaded")
+
+ # check napi threaded is set for both napis
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "enabled")
+ ksft_ne(napi0.get('pid'), None)
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_eq(napi1['threaded'], "enabled")
+ ksft_ne(napi1.get('pid'), None)
+
+ # unset threaded
+ system(f"echo 0 > /sys/class/net/{nsim.ifname}/threaded")
+
+ # check napi threaded is unset for both napis
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0['threaded'], "disabled")
+ ksft_eq(napi0.get('pid'), None)
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_eq(napi1['threaded'], "disabled")
+ ksft_eq(napi1.get('pid'), None)
def nsim_rxq_reset_down(nf) -> None:
"""
@@ -122,7 +245,7 @@ def page_pool_check(nf) -> None:
def main() -> None:
nf = NetdevFamily()
ksft_run([empty_check, lo_check, page_pool_check, napi_list_check,
- nsim_rxq_reset_down],
+ dev_set_threaded, napi_set_threaded, nsim_rxq_reset_down],
args=(nf, ))
ksft_exit()
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index 3c8d3455d8e7..b327d3061ed5 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -25,6 +25,7 @@ tests="
nat_related_v4 ip4-nat-related: ICMP related matches work with SNAT
netlink_checks ovsnl: validate netlink attrs and settings
upcall_interfaces ovs: test the upcall interfaces
+ tunnel_metadata ovs: test extraction of tunnel metadata
drop_reason drop: test drop reasons are emitted
psample psample: Sampling packets with psample"
@@ -113,13 +114,13 @@ ovs_add_dp () {
}
ovs_add_if () {
- info "Adding IF to DP: br:$2 if:$3"
- if [ "$4" != "-u" ]; then
- ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if "$2" "$3" \
- || return 1
+ info "Adding IF to DP: br:$3 if:$4 ($2)"
+ if [ "$5" != "-u" ]; then
+ ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if \
+ -t "$2" "$3" "$4" || return 1
else
python3 $ovs_base/ovs-dpctl.py add-if \
- -u "$2" "$3" >$ovs_dir/$3.out 2>$ovs_dir/$3.err &
+ -u -t "$2" "$3" "$4" >$ovs_dir/$4.out 2>$ovs_dir/$4.err &
pid=$!
on_exit "ovs_sbx $1 kill -TERM $pid 2>/dev/null"
fi
@@ -166,9 +167,9 @@ ovs_add_netns_and_veths () {
fi
if [ "$7" != "-u" ]; then
- ovs_add_if "$1" "$2" "$4" || return 1
+ ovs_add_if "$1" "netdev" "$2" "$4" || return 1
else
- ovs_add_if "$1" "$2" "$4" -u || return 1
+ ovs_add_if "$1" "netdev" "$2" "$4" -u || return 1
fi
if [ $TRACING -eq 1 ]; then
@@ -756,6 +757,79 @@ test_upcall_interfaces() {
return 0
}
+ovs_add_kernel_tunnel() {
+ local sbxname=$1; shift
+ local ns=$1; shift
+ local tnl_type=$1; shift
+ local name=$1; shift
+ local addr=$1; shift
+
+ info "setting up kernel ${tnl_type} tunnel ${name}"
+ ovs_sbx "${sbxname}" ip -netns ${ns} link add dev ${name} type ${tnl_type} $* || return 1
+ on_exit "ovs_sbx ${sbxname} ip -netns ${ns} link del ${name} >/dev/null 2>&1"
+ ovs_sbx "${sbxname}" ip -netns ${ns} addr add dev ${name} ${addr} || return 1
+ ovs_sbx "${sbxname}" ip -netns ${ns} link set dev ${name} mtu 1450 up || return 1
+}
+
+test_tunnel_metadata() {
+ which arping >/dev/null 2>&1 || return $ksft_skip
+
+ sbxname="test_tunnel_metadata"
+ sbx_add "${sbxname}" || return 1
+
+ info "setting up new DP"
+ ovs_add_dp "${sbxname}" tdp0 -V 2:1 || return 1
+
+ ovs_add_netns_and_veths "${sbxname}" tdp0 tns left0 l0 \
+ 172.31.110.1/24 || return 1
+
+ info "removing veth interface from openvswitch and setting IP"
+ ovs_del_if "${sbxname}" tdp0 left0 || return 1
+ ovs_sbx "${sbxname}" ip addr add 172.31.110.2/24 dev left0 || return 1
+ ovs_sbx "${sbxname}" ip link set left0 up || return 1
+
+ info "setting up tunnel port in openvswitch"
+ ovs_add_if "${sbxname}" "vxlan" tdp0 ovs-vxlan0 -u || return 1
+ on_exit "ovs_sbx ${sbxname} ip link del ovs-vxlan0"
+ ovs_wait ip link show ovs-vxlan0 &>/dev/null || return 1
+ ovs_sbx "${sbxname}" ip link set ovs-vxlan0 up || return 1
+
+ configs=$(echo '
+ 1 172.31.221.1/24 1155332 32 set udpcsum flags\(df\|csum\)
+ 2 172.31.222.1/24 1234567 45 set noudpcsum flags\(df\)
+ 3 172.31.223.1/24 1020304 23 unset udpcsum flags\(csum\)
+ 4 172.31.224.1/24 1357986 15 unset noudpcsum' | sed '/^$/d')
+
+ while read -r i addr id ttl df csum flags; do
+ ovs_add_kernel_tunnel "${sbxname}" tns vxlan vxlan${i} ${addr} \
+ remote 172.31.110.2 id ${id} dstport 4789 \
+ ttl ${ttl} df ${df} ${csum} || return 1
+ done <<< "${configs}"
+
+ ovs_wait grep -q 'listening on upcall packet handler' \
+ ${ovs_dir}/ovs-vxlan0.out || return 1
+
+ info "sending arping"
+ for i in 1 2 3 4; do
+ ovs_sbx "${sbxname}" ip netns exec tns \
+ arping -I vxlan${i} 172.31.22${i}.2 -c 1 \
+ >${ovs_dir}/arping.stdout 2>${ovs_dir}/arping.stderr
+ done
+
+ info "checking that received decapsulated packets carry correct metadata"
+ while read -r i addr id ttl df csum flags; do
+ arp_hdr="arp\\(sip=172.31.22${i}.1,tip=172.31.22${i}.2,op=1,sha="
+ addrs="src=172.31.110.1,dst=172.31.110.2"
+ ports="tp_src=[0-9]*,tp_dst=4789"
+ tnl_md="tunnel\\(tun_id=${id},${addrs},ttl=${ttl},${ports},${flags}\\)"
+
+ ovs_sbx "${sbxname}" grep -qE "MISS upcall.*${tnl_md}.*${arp_hdr}" \
+ ${ovs_dir}/ovs-vxlan0.out || return 1
+ done <<< "${configs}"
+
+ return 0
+}
+
run_test() {
(
tname="$1"
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index 8a0396bfaf99..b521e0dea506 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -1877,7 +1877,7 @@ class OvsPacket(GenericNetlinkSocket):
elif msg["cmd"] == OvsPacket.OVS_PACKET_CMD_EXECUTE:
up.execute(msg)
else:
- print("Unkonwn cmd: %d" % msg["cmd"])
+ print("Unknown cmd: %d" % msg["cmd"])
except NetlinkError as ne:
raise ne
diff --git a/tools/testing/selftests/net/ovpn/Makefile b/tools/testing/selftests/net/ovpn/Makefile
index e0926d76b4c8..dbe0388c8512 100644
--- a/tools/testing/selftests/net/ovpn/Makefile
+++ b/tools/testing/selftests/net/ovpn/Makefile
@@ -19,13 +19,15 @@ LDLIBS += $(VAR_LDLIBS)
TEST_FILES = common.sh
-TEST_PROGS = test.sh \
- test-large-mtu.sh \
+TEST_PROGS := \
test-chachapoly.sh \
- test-tcp.sh \
- test-float.sh \
+ test-close-socket-tcp.sh \
test-close-socket.sh \
- test-close-socket-tcp.sh
+ test-float.sh \
+ test-large-mtu.sh \
+ test-tcp.sh \
+ test.sh \
+# end of TEST_PROGS
TEST_GEN_FILES := ovpn-cli
diff --git a/tools/testing/selftests/net/ovpn/config b/tools/testing/selftests/net/ovpn/config
index 71946ba9fa17..42699740936d 100644
--- a/tools/testing/selftests/net/ovpn/config
+++ b/tools/testing/selftests/net/ovpn/config
@@ -1,10 +1,10 @@
-CONFIG_NET=y
-CONFIG_INET=y
-CONFIG_STREAM_PARSER=y
-CONFIG_NET_UDP_TUNNEL=y
-CONFIG_DST_CACHE=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_AES=y
-CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_DST_CACHE=y
+CONFIG_INET=y
+CONFIG_NET=y
+CONFIG_NET_UDP_TUNNEL=y
CONFIG_OVPN=m
+CONFIG_STREAM_PARSER=y
diff --git a/tools/testing/selftests/net/ovpn/ovpn-cli.c b/tools/testing/selftests/net/ovpn/ovpn-cli.c
index de9c26f98b2e..0f3babf19fd0 100644
--- a/tools/testing/selftests/net/ovpn/ovpn-cli.c
+++ b/tools/testing/selftests/net/ovpn/ovpn-cli.c
@@ -32,9 +32,10 @@
#include <sys/socket.h>
+#include "kselftest.h"
+
/* defines to make checkpatch happy */
#define strscpy strncpy
-#define __always_unused __attribute__((__unused__))
/* libnl < 3.5.0 does not set the NLA_F_NESTED on its own, therefore we
* have to explicitly do it to prevent the kernel from failing upon
@@ -1586,6 +1587,7 @@ static int ovpn_listen_mcast(void)
sock = nl_socket_alloc();
if (!sock) {
fprintf(stderr, "cannot allocate netlink socket\n");
+ ret = -ENOMEM;
goto err_free;
}
@@ -2105,6 +2107,7 @@ static int ovpn_run_cmd(struct ovpn_ctx *ovpn)
ret = ovpn_listen_mcast();
break;
case CMD_INVALID:
+ ret = -EINVAL;
break;
}
@@ -2166,6 +2169,7 @@ static int ovpn_parse_cmd_args(struct ovpn_ctx *ovpn, int argc, char *argv[])
ovpn->peers_file = argv[4];
+ ovpn->sa_family = AF_INET;
if (argc > 5 && !strcmp(argv[5], "ipv6"))
ovpn->sa_family = AF_INET6;
break;
diff --git a/tools/testing/selftests/net/ovpn/test-large-mtu.sh b/tools/testing/selftests/net/ovpn/test-large-mtu.sh
new file mode 100755
index 000000000000..ce2a2cb64f72
--- /dev/null
+++ b/tools/testing/selftests/net/ovpn/test-large-mtu.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+MTU="1500"
+
+source test.sh
diff --git a/tools/testing/selftests/net/packetdrill/Makefile b/tools/testing/selftests/net/packetdrill/Makefile
index 31cfb666ba8b..ff54641493e9 100644
--- a/tools/testing/selftests/net/packetdrill/Makefile
+++ b/tools/testing/selftests/net/packetdrill/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_INCLUDES := ksft_runner.sh \
- defaults.sh \
- set_sysctls.py \
- ../../kselftest/ktap_helpers.sh
+TEST_INCLUDES := \
+ defaults.sh \
+ ksft_runner.sh \
+ set_sysctls.py \
+ ../../kselftest/ktap_helpers.sh \
+# end of TEST_INCLUDES
TEST_PROGS := $(wildcard *.pkt)
diff --git a/tools/testing/selftests/net/packetdrill/config b/tools/testing/selftests/net/packetdrill/config
index 0237ed98f3c0..c4a19a785521 100644
--- a/tools/testing/selftests/net/packetdrill/config
+++ b/tools/testing/selftests/net/packetdrill/config
@@ -1,6 +1,6 @@
-CONFIG_IPV6=y
-CONFIG_HZ_1000=y
CONFIG_HZ=1000
+CONFIG_HZ_1000=y
+CONFIG_IPV6=y
CONFIG_NET_NS=y
CONFIG_NET_SCH_FIFO=y
CONFIG_NET_SCH_FQ=y
diff --git a/tools/testing/selftests/net/packetdrill/defaults.sh b/tools/testing/selftests/net/packetdrill/defaults.sh
index 1095a7b22f44..37edd3dc3b07 100755
--- a/tools/testing/selftests/net/packetdrill/defaults.sh
+++ b/tools/testing/selftests/net/packetdrill/defaults.sh
@@ -51,7 +51,8 @@ sysctl -q net.ipv4.tcp_pacing_ss_ratio=200
sysctl -q net.ipv4.tcp_pacing_ca_ratio=120
sysctl -q net.ipv4.tcp_notsent_lowat=4294967295 > /dev/null 2>&1
-sysctl -q net.ipv4.tcp_fastopen=0x70403
+sysctl -q net.ipv4.tcp_fastopen=0x3
+# Use TFO_COOKIE in ksft_runner.sh for this key.
sysctl -q net.ipv4.tcp_fastopen_key=a1a1a1a1-b2b2b2b2-c3c3c3c3-d4d4d4d4
sysctl -q net.ipv4.tcp_syncookies=1
diff --git a/tools/testing/selftests/net/packetdrill/ksft_runner.sh b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
index ef8b25a606d8..b34e5cf0112e 100755
--- a/tools/testing/selftests/net/packetdrill/ksft_runner.sh
+++ b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
@@ -3,21 +3,26 @@
source "$(dirname $(realpath $0))/../../kselftest/ktap_helpers.sh"
-readonly ipv4_args=('--ip_version=ipv4 '
- '--local_ip=192.168.0.1 '
- '--gateway_ip=192.168.0.1 '
- '--netmask_ip=255.255.0.0 '
- '--remote_ip=192.0.2.1 '
- '-D CMSG_LEVEL_IP=SOL_IP '
- '-D CMSG_TYPE_RECVERR=IP_RECVERR ')
-
-readonly ipv6_args=('--ip_version=ipv6 '
- '--mtu=1520 '
- '--local_ip=fd3d:0a0b:17d6::1 '
- '--gateway_ip=fd3d:0a0b:17d6:8888::1 '
- '--remote_ip=fd3d:fa7b:d17d::1 '
- '-D CMSG_LEVEL_IP=SOL_IPV6 '
- '-D CMSG_TYPE_RECVERR=IPV6_RECVERR ')
+declare -A ip_args=(
+ [ipv4]="--ip_version=ipv4
+ --local_ip=192.168.0.1
+ --gateway_ip=192.168.0.1
+ --netmask_ip=255.255.0.0
+ --remote_ip=192.0.2.1
+ -D TFO_COOKIE=3021b9d889017eeb
+ -D TFO_COOKIE_ZERO=b7c12350a90dc8f5
+ -D CMSG_LEVEL_IP=SOL_IP
+ -D CMSG_TYPE_RECVERR=IP_RECVERR"
+ [ipv6]="--ip_version=ipv6
+ --mtu=1520
+ --local_ip=fd3d:0a0b:17d6::1
+ --gateway_ip=fd3d:0a0b:17d6:8888::1
+ --remote_ip=fd3d:fa7b:d17d::1
+ -D TFO_COOKIE=c1d1e9742a47a9bc
+ -D TFO_COOKIE_ZERO=82af1a8f9a205c34
+ -D CMSG_LEVEL_IP=SOL_IPV6
+ -D CMSG_TYPE_RECVERR=IPV6_RECVERR"
+)
if [ $# -ne 1 ]; then
ktap_exit_fail_msg "usage: $0 <script>"
@@ -35,28 +40,23 @@ failfunc=ktap_test_fail
if [[ -n "${KSFT_MACHINE_SLOW}" ]]; then
optargs+=('--tolerance_usecs=14000')
+ failfunc=ktap_test_xfail
+fi
- # xfail tests that are known flaky with dbg config, not fixable.
- # still run them for coverage (and expect 100% pass without dbg).
- declare -ar xfail_list=(
- "tcp_eor_no-coalesce-retrans.pkt"
- "tcp_fast_recovery_prr-ss.*.pkt"
- "tcp_slow_start_slow-start-after-win-update.pkt"
- "tcp_timestamping.*.pkt"
- "tcp_user_timeout_user-timeout-probe.pkt"
- "tcp_zerocopy_epoll_.*.pkt"
- "tcp_tcp_info_tcp-info-.*-limited.pkt"
- )
- readonly xfail_regex="^($(printf '%s|' "${xfail_list[@]}"))$"
- [[ "$script" =~ ${xfail_regex} ]] && failfunc=ktap_test_xfail
+ip_versions=$(grep -E '^--ip_version=' $script | cut -d '=' -f 2)
+if [[ -z $ip_versions ]]; then
+ ip_versions="ipv4 ipv6"
+elif [[ ! "$ip_versions" =~ ^ipv[46]$ ]]; then
+ ktap_exit_fail_msg "Too many or unsupported --ip_version: $ip_versions"
+ exit "$KSFT_FAIL"
fi
ktap_print_header
-ktap_set_plan 2
+ktap_set_plan $(echo $ip_versions | wc -w)
-unshare -n packetdrill ${ipv4_args[@]} ${optargs[@]} $script > /dev/null \
- && ktap_test_pass "ipv4" || $failfunc "ipv4"
-unshare -n packetdrill ${ipv6_args[@]} ${optargs[@]} $script > /dev/null \
- && ktap_test_pass "ipv6" || $failfunc "ipv6"
+for ip_version in $ip_versions; do
+ unshare -n packetdrill ${ip_args[$ip_version]} ${optargs[@]} $script > /dev/null \
+ && ktap_test_pass $ip_version || $failfunc $ip_version
+done
ktap_finished
diff --git a/tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-read.pkt b/tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-read.pkt
index 914eabab367a..657e42ca65b5 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-read.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_blocking_blocking-read.pkt
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Test for blocking read.
+
--tolerance_usecs=10000
+--mss=1000
`./defaults.sh`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_close_no_rst.pkt b/tools/testing/selftests/net/packetdrill/tcp_close_no_rst.pkt
new file mode 100644
index 000000000000..eef01d5f1118
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_close_no_rst.pkt
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+--mss=1000
+
+`./defaults.sh`
+
+// Initialize connection
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK>
+ +.1 < . 1:1(0) ack 1 win 32792
+
+
+ +0 accept(3, ..., ...) = 4
+ +0 < . 1:1001(1000) ack 1 win 32792
+ +0 > . 1:1(0) ack 1001
+ +0 read(4, ..., 1000) = 1000
+
+// resend the payload + a FIN
+ +0 < F. 1:1001(1000) ack 1 win 32792
+// Why do we have a delay and no dsack ?
+ +0~+.04 > . 1:1(0) ack 1002
+
+ +0 close(4) = 0
+
+// According to RFC 2525, section 2.17
+// we should _not_ send an RST here, because there was no data to consume.
+ +0 > F. 1:1(0) ack 1002
diff --git a/tools/testing/selftests/net/packetdrill/tcp_dsack_mult.pkt b/tools/testing/selftests/net/packetdrill/tcp_dsack_mult.pkt
new file mode 100644
index 000000000000..c790d0af635e
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_dsack_mult.pkt
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test various DSACK (RFC 2883) behaviors.
+
+--mss=1000
+
+`./defaults.sh`
+
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 1024
+ +0 accept(3, ..., ...) = 4
+
+// First SACK range.
+ +0 < P. 1001:2001(1000) ack 1 win 1024
+ +0 > . 1:1(0) ack 1 <nop, nop, sack 1001:2001>
+
+// Check SACK coalescing (contiguous sequence).
+ +0 < P. 2001:3001(1000) ack 1 win 1024
+ +0 > . 1:1(0) ack 1 <nop,nop,sack 1001:3001>
+
+// Check we have two SACK ranges for non contiguous sequences.
+ +0 < P. 4001:5001(1000) ack 1 win 1024
+ +0 > . 1:1(0) ack 1 <nop,nop,sack 4001:5001 1001:3001>
+
+// Three ranges.
+ +0 < P. 7001:8001(1000) ack 1 win 1024
+ +0 > . 1:1(0) ack 1 <nop,nop,sack 7001:8001 4001:5001 1001:3001>
+
+// DSACK (1001:3001) + SACK (6001:7001)
+ +0 < P. 1:6001(6000) ack 1 win 1024
+ +0 > . 1:1(0) ack 6001 <nop,nop,sack 1001:3001 7001:8001>
+
+// DSACK (7001:8001)
+ +0 < P. 6001:8001(2000) ack 1 win 1024
+ +0 > . 1:1(0) ack 8001 <nop,nop,sack 7001:8001>
+
+// DSACK for an older segment.
+ +0 < P. 1:1001(1000) ack 1 win 1024
+ +0 > . 1:1(0) ack 8001 <nop,nop,sack 1:1001>
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-cookie-not-reqd.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-cookie-not-reqd.pkt
new file mode 100644
index 000000000000..32aff9bc4052
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-cookie-not-reqd.pkt
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Basic TFO server test
+//
+// Test TFO_SERVER_COOKIE_NOT_REQD flag on receiving
+// SYN with data but without Fast Open cookie option.
+
+`./defaults.sh
+ ./set_sysctls.py /proc/sys/net/ipv4/tcp_fastopen=0x202`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+// Since TFO_SERVER_COOKIE_NOT_REQD, a TFO socket will be created with
+// the data accepted.
+ +0 < S 0:1000(1000) win 32792 <mss 1460,sackOK,nop,nop>
+ +0 > S. 0:0(0) ack 1001 <mss 1460,nop,nop,sackOK>
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+ +0 read(4, ..., 1024) = 1000
+
+// Data After SYN will be accepted too.
+ +0 < . 1001:2001(1000) ack 1 win 5840
+ +0 > . 1:1(0) ack 2001
+
+// Should change the implementation later to set the SYN flag as well.
+ +0 read(4, ..., 1024) = 1000
+ +0 write(4, ..., 1000) = 1000
+ +0 > P. 1:1001(1000) ack 2001
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-no-setsockopt.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-no-setsockopt.pkt
new file mode 100644
index 000000000000..649997a58099
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-no-setsockopt.pkt
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Basic TFO server test
+//
+// Test TFO_SERVER_WO_SOCKOPT1 without setsockopt(TCP_FASTOPEN)
+
+`./defaults.sh
+ ./set_sysctls.py /proc/sys/net/ipv4/tcp_fastopen=0x402`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+
+ +0 read(4, ..., 512) = 10
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-non-tfo-listener.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-non-tfo-listener.pkt
new file mode 100644
index 000000000000..4a00e0d994f2
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-non-tfo-listener.pkt
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Basic TFO server test
+//
+// Server w/o TCP_FASTOPEN socket option
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,FO TFO_COOKIE>
+
+// Data is ignored since TCP_FASTOPEN is not set on the listener
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK>
+
+ +0 accept(3, ..., ...) = -1 EAGAIN (Resource temporarily unavailable)
+
+// The above should block until ack comes in below.
+ +0 < . 1:31(30) ack 1 win 5840
+ +0 accept(3, ..., ...) = 4
+
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) == 0, tcpi_options }%
+ +0 read(4, ..., 512) = 30
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-pure-syn-data.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-pure-syn-data.pkt
new file mode 100644
index 000000000000..345ed26ff7f8
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-pure-syn-data.pkt
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Basic TFO server test
+//
+// Test that TFO-enabled server would not respond SYN-ACK with any TFO option
+// when receiving a pure SYN-data. It should respond a pure SYN-ack.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 999000:999040(40) win 32792 <mss 1460,sackOK,TS val 100 ecr 100,nop,wscale 6>
+ +0 > S. 1234:1234(0) ack 999001 <mss 1460,sackOK,TS val 100 ecr 100,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 100
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) == 0, tcpi_options }%
+ +0 close(3) = 0
+
+// Test ECN-setup SYN with ECN disabled because this has happened in reality
+ +0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < SEW 999000:999040(40) win 32792 <mss 1460,sackOK,TS val 100 ecr 100,nop,wscale 6>
+ +0 > S. 1234:1234(0) ack 999001 <mss 1460,sackOK,TS val 100 ecr 100,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 100
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) == 0, tcpi_options }%
+ +0 close(3) = 0
+
+// Test ECN-setup SYN w/ ECN enabled
+ +0 `sysctl -q net.ipv4.tcp_ecn=2`
+ +0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < SEW 999000:999040(40) win 32792 <mss 1460,sackOK,TS val 100 ecr 100,nop,wscale 6>
+ +0 > SE. 1234:1234(0) ack 999001 <mss 1460,sackOK,TS val 100 ecr 100,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 100
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) == 0, tcpi_options }%
+ +0 close(3) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-rw.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-rw.pkt
new file mode 100644
index 000000000000..98e6f84497cd
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-rw.pkt
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Basic TFO server test
+//
+// Test TFO server with SYN that has TFO cookie and data.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+
+ +0 read(4, ..., 512) = 10
+ +0 write(4, ..., 100) = 100
+ +0 > P. 1:101(100) ack 11
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-zero-payload.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-zero-payload.pkt
new file mode 100644
index 000000000000..95b1047ffdd5
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_basic-zero-payload.pkt
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Basic TFO server test
+//
+// Test zero-payload packet w/ valid TFO cookie - a TFO socket will
+// still be created and accepted but read() will not return until a
+// later pkt with 10 byte.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK>
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) == 0, tcpi_options }%
+
+// A TFO socket is created and is writable.
+ +0 write(4, ..., 100) = 100
+ +0 > P. 1:101(100) ack 1
+ +0...0.300 read(4, ..., 512) = 10
+ +.3 < P. 1:11(10) ack 1 win 5840
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_client-ack-dropped-then-recovery-ms-timestamps.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_client-ack-dropped-then-recovery-ms-timestamps.pkt
new file mode 100644
index 000000000000..f75efd51ed0c
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_client-ack-dropped-then-recovery-ms-timestamps.pkt
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// A reproducer case for a TFO SYNACK RTO undo bug in:
+// 794200d66273 ("tcp: undo cwnd on Fast Open spurious SYNACK retransmit")
+// This sequence that tickles this bug is:
+// - Fast Open server receives TFO SYN with data, sends SYNACK
+// - (client receives SYNACK and sends ACK, but ACK is lost)
+// - server app sends some data packets
+// - (N of the first data packets are lost)
+// - server receives client ACK that has a TS ECR matching first SYNACK,
+// and also SACKs suggesting the first N data packets were lost
+// - server performs undo of SYNACK RTO, then immediately enters recovery
+// - buggy behavior in 794200d66273 then performed an undo that caused
+// the connection to be in a bad state, in CA_Open with retrans_out != 0
+
+// Check that outbound TS Val ticks are as we would expect with 1000 usec per
+// timestamp tick:
+--tcp_ts_tick_usecs=1000
+
+`./defaults.sh`
+
+// Initialize connection
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:1000(1000) win 65535 <mss 1012,sackOK,TS val 1000 ecr 0,wscale 7,nop,nop,nop,FO TFO_COOKIE>
+ +0 > S. 0:0(0) ack 1001 <mss 1460,sackOK,TS val 2000 ecr 1000,nop,wscale 8>
+ +0 accept(3, ..., ...) = 4
+
+// Application writes more data
+ +.010 write(4, ..., 10000) = 10000
+ +0 > P. 1:5001(5000) ack 1001 <nop,nop,TS val 2010 ecr 1000>
+ +0 > P. 5001:10001(5000) ack 1001 <nop,nop,TS val 2010 ecr 1000>
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
+ +0 < . 1001:1001(0) ack 1 win 257 <TS val 1010 ecr 2000,sack 2001:5001>
+ +0 > P. 1:2001(2000) ack 1001 <nop,nop,TS val 2010 ecr 1010>
+ +0 %{ assert tcpi_ca_state == TCP_CA_Recovery, tcpi_ca_state }%
+ +0 %{ assert tcpi_snd_cwnd == 7, tcpi_snd_cwnd }%
+
+ +0 < . 1001:1001(0) ack 1 win 257 <TS val 1011 ecr 2000,sack 2001:6001>
+ +0 %{ assert tcpi_ca_state == TCP_CA_Recovery, tcpi_ca_state }%
+ +0 %{ assert tcpi_snd_cwnd == 7, tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_experimental_option.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_experimental_option.pkt
new file mode 100644
index 000000000000..c3cb0e8bdcf8
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_experimental_option.pkt
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Test the Experimental Option
+//
+// SYN w/ FOEXP w/o cookie must generates SYN+ACK w/ FOEXP
+// w/ a valid cookie, and the cookie must be the same one
+// with one generated by IANA FO
+
+`./defaults.sh`
+
+// Request a TFO cookie by Experimental Option
+// This must generate the same TFO_COOKIE
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FOEXP>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,FOEXP TFO_COOKIE>
+
+ +0 close(3) = 0
+
+// Test if FOEXP with a valid cookie creates a TFO socket
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FOEXP TFO_COOKIE>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+
+ +0 read(4, ..., 512) = 10
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_fin-close-socket.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_fin-close-socket.pkt
new file mode 100644
index 000000000000..dc09f8d9a381
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_fin-close-socket.pkt
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Send a FIN pkt with the ACK bit to a TFO socket.
+// The socket will go to TCP_CLOSE_WAIT state and data can be
+// read until the socket is closed, at which time a FIN will be sent.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+// FIN is acked and the socket goes to TCP_CLOSE_WAIT state
+// in tcp_fin() called from tcp_data_queue().
+ +0 < F. 11:11(0) ack 1 win 32792
+ +0 > . 1:1(0) ack 12
+
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+ +0 %{ assert tcpi_state == TCP_CLOSE_WAIT, tcpi_state }%
+
+ +0 read(4, ..., 512) = 10
+ +0 close(4) = 0
+ +0 > F. 1:1(0) ack 12
+ * > F. 1:1(0) ack 12
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_icmp-before-accept.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_icmp-before-accept.pkt
new file mode 100644
index 000000000000..d5543672e2bd
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_icmp-before-accept.pkt
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Send an ICMP host_unreachable pkt to a pending SYN_RECV req.
+//
+// If it's a TFO req, the ICMP error will cause it to switch
+// to TCP_CLOSE state but remains in the acceptor queue.
+
+--ip_version=ipv4
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+// Out-of-window icmp is ignored but accounted.
+ +0 `nstat > /dev/null`
+ +0 < icmp unreachable [5000:6000(1000)]
+ +0 `nstat | grep TcpExtOutOfWindowIcmps > /dev/null`
+
+// Valid ICMP unreach.
+ +0 < icmp unreachable host_unreachable [0:10(10)]
+
+// Unlike the non-TFO case, the req is still there to be accepted.
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+
+// tcp_done_with_error() in tcp_v4_err() sets sk->sk_state
+// to TCP_CLOSE
+ +0 %{ assert tcpi_state == TCP_CLOSE, tcpi_state }%
+
+// The 1st read will succeed and return the data in SYN
+ +0 read(4, ..., 512) = 10
+
+// The 2nd read will fail.
+ +0 read(4, ..., 512) = -1 EHOSTUNREACH (No route to host)
+
+// But is no longer writable because it's in TCP_CLOSE state.
+ +0 write(4, ..., 100) = -1 EPIPE (Broken Pipe)
+
+// inbound pkt will trigger RST because the socket has been moved
+// off the TCP hash tables.
+ +0 < . 1:1(0) ack 1 win 32792
+ +0 > R 1:1(0)
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-accept.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-accept.pkt
new file mode 100644
index 000000000000..040d5547ed80
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-accept.pkt
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Send a RST to a TFO socket after it has been accepted.
+//
+// First read() will return all the data and this is consistent
+// with the non-TFO case. Second read will return -1
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+ +0 %{ assert tcpi_state == TCP_SYN_RECV, tcpi_state }%
+
+// 1st read will return the data from SYN.
+// tcp_reset() sets sk->sk_err to ECONNRESET for SYN_RECV.
+ +0 < R. 11:11(0) win 32792
+ +0 %{ assert tcpi_state == TCP_CLOSE, tcpi_state }%
+
+// This one w/o ACK bit will cause the same effect.
+// +0 < R 11:11(0) win 32792
+// See Step 2 in tcp_validate_incoming().
+
+// found_ok_skb in tcp_recvmsg_locked()
+ +0 read(4, ..., 512) = 10
+
+// !copied && sk->sk_err -> sock_error(sk)
+ +0 read(4, ..., 512) = -1 ECONNRESET (Connection reset by peer)
+ +0 close(4) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-before-accept.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-before-accept.pkt
new file mode 100644
index 000000000000..7f9de6c66cbd
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-before-accept.pkt
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Send a RST to a TFO socket before it is accepted.
+//
+// The socket won't go away and after it's accepted the data
+// in the SYN pkt can still be read. But that's about all that
+// the acceptor can do with the socket.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,nop,wscale 7,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+
+// 1st read will return the data from SYN.
+ +0 < R. 11:11(0) win 257
+
+// This one w/o ACK bit will cause the same effect.
+// +0 < R 11:11(0) win 257
+
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+ +0 %{ assert tcpi_state == TCP_CLOSE, tcpi_state }%
+
+ +0 read(4, ..., 512) = 10
+ +0 read(4, ..., 512) = -1 ECONNRESET (Connection reset by peer)
+ +0 close(4) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-close-with-unread-data.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-close-with-unread-data.pkt
new file mode 100644
index 000000000000..548a87701b5d
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-close-with-unread-data.pkt
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Send a RST to a TFO socket after it is accepted.
+//
+// The socket will change to TCP_CLOSE state with pending data so
+// write() will fail. Pending data can be still be read and close()
+// won't trigger RST if data is not read
+//
+// 565b7b2d2e63 ("tcp: do not send reset to already closed sockets")
+// https://lore.kernel.org/netdev/4C1A2502.1030502@openvz.org/
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop, FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+ +0 %{ assert tcpi_state == TCP_SYN_RECV, tcpi_state }%
+
+// tcp_done() sets sk->sk_state to TCP_CLOSE and clears tp->fastopen_rsk
+ +0 < R. 11:11(0) win 32792
+ +0 %{ assert tcpi_state == TCP_CLOSE, tcpi_state }%
+
+ +0 write(4, ..., 100) = -1 ECONNRESET(Connection reset by peer)
+ +0 close(4) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-non-tfo-socket.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-non-tfo-socket.pkt
new file mode 100644
index 000000000000..20090bf77655
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-non-tfo-socket.pkt
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Send a RST to a fully established socket with pending data before
+// it is accepted.
+//
+// The socket with pending data won't go away and can still be accepted
+// with data read. But it will be in TCP_CLOSE state.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+// Invalid cookie, so accept() fails.
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FO aaaaaaaaaaaaaaaa,nop,nop>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK, FO TFO_COOKIE,nop,nop>
+
+ +0 accept(3, ..., ...) = -1 EAGAIN (Resource temporarily unavailable)
+
+// Complete 3WHS and send data and RST
+ +0 < . 1:1(0) ack 1 win 32792
+ +0 < . 1:11(10) ack 1 win 32792
+ +0 < R. 11:11(0) win 32792
+
+// A valid reset won't make the fully-established socket go away.
+// It's just that the acceptor will get a dead, unusable socket
+// in TCP_CLOSE state.
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) == 0, tcpi_options }%
+ +0 %{ assert tcpi_state == TCP_CLOSE, tcpi_state }%
+
+ +0 write(4, ..., 100) = -1 ECONNRESET(Connection reset by peer)
+ +0 read(4, ..., 512) = 10
+ +0 read(4, ..., 512) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_sockopt-fastopen-key.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_sockopt-fastopen-key.pkt
new file mode 100644
index 000000000000..9f52d7de3436
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_sockopt-fastopen-key.pkt
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Test the server cookie is generated by aes64 encoding of remote and local
+// IP addresses with a master key specified via sockopt TCP_FASTOPEN_KEY
+//
+`./defaults.sh
+ ./set_sysctls.py /proc/sys/net/ipv4/tcp_fastopen_key=00000000-00000000-00000000-00000000`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+
+// Set a key of a1a1a1a1-b2b2b2b2-c3c3c3c3-d4d4d4d4 (big endian).
+// This would produce a cookie of TFO_COOKIE like many other
+// tests (which the same key but set via sysctl).
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN_KEY,
+ "\xa1\xa1\xa1\xa1\xb2\xb2\xb2\xb2\xc3\xc3\xc3\xc3\xd4\xd4\xd4\xd4", 16) = 0
+
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+// Request a valid cookie TFO_COOKIE
+ +0 < S 1428932:1428942(10) win 10000 <mss 1012,nop,nop,FO,sackOK,TS val 1 ecr 0,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1428933 <mss 1460,sackOK,TS val 10000 ecr 1,nop,wscale 8,FO TFO_COOKIE,nop,nop>
+ +0 < . 1:1(0) ack 1 win 257 <nop,nop,TS val 2 ecr 10000>
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) == 0, tcpi_options }%
+
+ +0 close(4) = 0
+ +0 > F. 1:1(0) ack 1 <nop,nop,TS val 10001 ecr 2>
+ +0 < F. 1:1(0) ack 2 win 257 <nop,nop,TS val 3 ecr 10001>
+ +0 > . 2:2(0) ack 2 <nop,nop,TS val 10002 ecr 3>
+
+ +0 close(3) = 0
+
+// Restart the listener
+ +0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+// Test setting the key in the listen state, and produces an identical cookie
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN_KEY,
+ "\xa1\xa1\xa1\xa1\xb2\xb2\xb2\xb2\xc3\xc3\xc3\xc3\xd4\xd4\xd4\xd4", 16) = 0
+
+ +0 < S 6814000:6815000(1000) win 10000 <mss 1012,nop,nop,FO TFO_COOKIE,sackOK,TS val 10 ecr 0,nop,wscale 7>
+ +0 > S. 0:0(0) ack 6815001 <mss 1460,sackOK,TS val 10000 ecr 10,nop,wscale 8>
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+ +0 < . 1001:1001(0) ack 1 win 257 <nop,nop,TS val 12 ecr 10000>
+ +0 read(4, ..., 8192) = 1000
+
+ +0 close(4) = 0
+ +0 > F. 1:1(0) ack 1001 <nop,nop,TS val 10101 ecr 12>
+ +0 < F. 1001:1001(0) ack 2 win 257 <nop,nop,TS val 112 ecr 10101>
+ +0 > . 2:2(0) ack 1002 <nop,nop,TS val 10102 ecr 112>
+
+ +0 close(3) = 0
+
+// Restart the listener
+ +0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+// Test invalid key length (must be 16 bytes)
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN_KEY, "", 0) = -1 (Invalid Argument)
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN_KEY, "", 3) = -1 (Invalid Argument)
+
+// Previous cookie won't be accepted b/c this listener uses the global key (0-0-0-0)
+ +0 < S 6814000:6815000(1000) win 10000 <mss 1012,nop,nop,FO TFO_COOKIE,sackOK,TS val 10 ecr 0,nop,wscale 7>
+ +0 > S. 0:0(0) ack 6814001 <mss 1460,sackOK,TS val 10000 ecr 10,nop,wscale 8,FO TFO_COOKIE_ZERO,nop,nop>
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-listener-closed.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-listener-closed.pkt
new file mode 100644
index 000000000000..e82e06da44c9
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-listener-closed.pkt
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Close a listener socket with pending TFO child.
+// This will trigger RST pkt to go out.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+// RST pkt is generated for each not-yet-accepted TFO child.
+// inet_csk_listen_stop() -> inet_child_forget() -> tcp_disconnect()
+// -> tcp_need_reset() is true for SYN_RECV
+ +0 close(3) = 0
+ +0 > R. 1:1(0) ack 11
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-reconnect.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-reconnect.pkt
new file mode 100644
index 000000000000..2a148bb14cbf
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-reconnect.pkt
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+`./defaults.sh
+ ./set_sysctls.py /proc/sys/net/ipv4/tcp_timestamps=0`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,nop,nop,sackOK,nop,nop,FO TFO_COOKIE>
+ +0 > S. 0:0(0) ack 11 win 65535 <mss 1460,nop,nop,sackOK>
+
+// sk->sk_state is TCP_SYN_RECV
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert tcpi_state == TCP_SYN_RECV, tcpi_state }%
+
+// tcp_disconnect() sets sk->sk_state to TCP_CLOSE
+ +0 connect(4, AF_UNSPEC, ...) = 0
+ +0 > R. 1:1(0) ack 11 win 65535
+ +0 %{ assert tcpi_state == TCP_CLOSE, tcpi_state }%
+
+// connect() sets sk->sk_state to TCP_SYN_SENT
+ +0 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 connect(4, ..., ...) = -1 EINPROGRESS (Operation is now in progress)
+ +0 > S 0:0(0) win 65535 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 %{ assert tcpi_state == TCP_SYN_SENT, tcpi_state }%
+
+// tp->fastopen_rsk must be NULL
+ +1 > S 0:0(0) win 65535 <mss 1460,nop,nop,sackOK,nop,wscale 8>
diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-unread-data-closed.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-unread-data-closed.pkt
new file mode 100644
index 000000000000..09fb63f78a0e
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_trigger-rst-unread-data-closed.pkt
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Close a TFO socket with unread data.
+// This will trigger a RST pkt.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 32792 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+ +0 %{ assert tcpi_state == TCP_SYN_RECV, tcpi_state }%
+
+// data_was_unread == true in __tcp_close()
+ +0 close(4) = 0
+ +0 > R. 1:1(0) ack 11
diff --git a/tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt b/tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt
index df49c67645ac..e13f0eee9795 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_inq_client.pkt
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Test TCP_INQ and TCP_CM_INQ on the client side.
+
+--mss=1000
+
`./defaults.sh
`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt b/tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt
index 04a5e2590c62..14dd5f813d50 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_inq_server.pkt
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Test TCP_INQ and TCP_CM_INQ on the server side.
+
+--mss=1000
+
`./defaults.sh
`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt b/tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt
new file mode 100644
index 000000000000..09aabc775e80
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+
+--mss=1000
+
+`./defaults.sh
+sysctl -q net.ipv4.tcp_rmem="4096 131072 $((32*1024*1024))"`
+
+// Test that a not-yet-accepted socket does not change
+// its initial sk_rcvbuf (tcp_rmem[1]) when receiving ooo packets.
+
+ +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 65535 <mss 1000,nop,nop,sackOK,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 10>
+ +.1 < . 1:1(0) ack 1 win 257
+ +0 < . 2001:41001(39000) ack 1 win 257
+ +0 > . 1:1(0) ack 1 <nop,nop,sack 2001:41001>
+ +0 < . 41001:101001(60000) ack 1 win 257
+ +0 > . 1:1(0) ack 1 <nop,nop,sack 2001:101001>
+ +0 < . 1:1001(1000) ack 1 win 257
+ +0 > . 1:1(0) ack 1001 <nop,nop,sack 2001:101001>
+ +0 < . 1001:2001(1000) ack 1 win 257
+ +0 > . 1:1(0) ack 101001
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 %{ assert SK_MEMINFO_RCVBUF == 131072, SK_MEMINFO_RCVBUF }%
+
+ +0 close(4) = 0
+ +0 close(3) = 0
+
+// Test that ooo packets for accepted sockets do increase sk_rcvbuf
+ +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 65535 <mss 1000,nop,nop,sackOK,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 10>
+ +.1 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 < . 2001:41001(39000) ack 1 win 257
+ +0 > . 1:1(0) ack 1 <nop,nop,sack 2001:41001>
+ +0 < . 41001:101001(60000) ack 1 win 257
+ +0 > . 1:1(0) ack 1 <nop,nop,sack 2001:101001>
+
+ +0 %{ assert SK_MEMINFO_RCVBUF > 131072, SK_MEMINFO_RCVBUF }%
+
diff --git a/tools/testing/selftests/net/packetdrill/tcp_ooo_rcv_mss.pkt b/tools/testing/selftests/net/packetdrill/tcp_ooo_rcv_mss.pkt
new file mode 100644
index 000000000000..7e6bc5fb0c8d
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_ooo_rcv_mss.pkt
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+
+--mss=1000
+
+`./defaults.sh
+sysctl -q net.ipv4.tcp_rmem="4096 131072 $((32*1024*1024))"`
+
+ +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 65535 <mss 1000,nop,nop,sackOK,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 10>
+ +.1 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 < . 2001:11001(9000) ack 1 win 257
+ +0 > . 1:1(0) ack 1 win 81 <nop,nop,sack 2001:11001>
+
+// check that ooo packet properly updates tcpi_rcv_mss
+ +0 %{ assert tcpi_rcv_mss == 1000, tcpi_rcv_mss }%
+
+ +0 < . 11001:21001(10000) ack 1 win 257
+ +0 > . 1:1(0) ack 1 win 81 <nop,nop,sack 2001:21001>
+
diff --git a/tools/testing/selftests/net/packetdrill/tcp_rcv_big_endseq.pkt b/tools/testing/selftests/net/packetdrill/tcp_rcv_big_endseq.pkt
new file mode 100644
index 000000000000..3848b419e68c
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_rcv_big_endseq.pkt
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+
+--mss=1000
+
+`./defaults.sh`
+
+ 0 `nstat -n`
+
+// Establish a connection.
+ +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_RCVBUF, [10000], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,wscale 0>
+ +.1 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 < P. 1:4001(4000) ack 1 win 257
+ +0 > . 1:1(0) ack 4001 win 5000
+
+// packet in sequence : SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE / LINUX_MIB_BEYOND_WINDOW
+ +0 < P. 4001:54001(50000) ack 1 win 257
+ +0 > . 1:1(0) ack 4001 win 5000
+
+// ooo packet. : SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE / LINUX_MIB_BEYOND_WINDOW
+ +1 < P. 5001:55001(50000) ack 1 win 257
+ +0 > . 1:1(0) ack 4001 win 5000
+
+// SKB_DROP_REASON_TCP_INVALID_SEQUENCE / LINUX_MIB_BEYOND_WINDOW
+ +0 < P. 70001:80001(10000) ack 1 win 257
+ +0 > . 1:1(0) ack 4001 win 5000
+
+ +0 read(4, ..., 100000) = 4000
+
+// If queue is empty, accept a packet even if its end_seq is above wup + rcv_wnd
+ +0 < P. 4001:54001(50000) ack 1 win 257
+ +0 > . 1:1(0) ack 54001 win 0
+
+// Check LINUX_MIB_BEYOND_WINDOW has been incremented 3 times.
++0 `nstat | grep TcpExtBeyondWindow | grep -q " 3 "`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_rcv_toobig.pkt b/tools/testing/selftests/net/packetdrill/tcp_rcv_toobig.pkt
new file mode 100644
index 000000000000..f575c0ff89da
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_rcv_toobig.pkt
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+--mss=1000
+
+`./defaults.sh`
+
+ 0 `nstat -n`
+
+// Establish a connection.
+ +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_RCVBUF, [20000], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 win 18980 <mss 1460,nop,wscale 0>
+ +.1 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 < P. 1:20001(20000) ack 1 win 257
+ +.04 > . 1:1(0) ack 20001 win 18000
+
+ +0 setsockopt(4, SOL_SOCKET, SO_RCVBUF, [12000], 4) = 0
+ +0 < P. 20001:80001(60000) ack 1 win 257
+ +0 > . 1:1(0) ack 20001 win 18000
+
+ +0 read(4, ..., 20000) = 20000
+// A too big packet is accepted if the receive queue is empty
+ +0 < P. 20001:80001(60000) ack 1 win 257
+ +0 > . 1:1(0) ack 80001 win 0
+
diff --git a/tools/testing/selftests/net/packetdrill/tcp_rto_synack_rto_max.pkt b/tools/testing/selftests/net/packetdrill/tcp_rto_synack_rto_max.pkt
new file mode 100644
index 000000000000..47550df124ce
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_rto_synack_rto_max.pkt
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Test SYN+ACK RTX with 1s RTO.
+//
+`./defaults.sh
+ ./set_sysctls.py /proc/sys/net/ipv4/tcp_rto_max_ms=1000`
+
+//
+// Test 1: TFO SYN+ACK
+//
+ 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [1], 4) = 0
+
+ +0 < S 0:10(10) win 1000 <mss 1460,sackOK,nop,nop,FO TFO_COOKIE,nop,nop>
+ +0 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+// RTO must be capped to 1s
+ +1 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+ +1 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+ +1 > S. 0:0(0) ack 11 <mss 1460,nop,nop,sackOK>
+
+ +0 < . 11:11(0) ack 1 win 1000 <mss 1460,nop,nop,sackOK>
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) != 0, tcpi_options }%
+
+ +0 close(4) = 0
+ +0 close(3) = 0
+
+
+//
+// Test 2: non-TFO SYN+ACK
+//
+ +0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 1000 <mss 1460,sackOK,nop,nop>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK>
+
+// RTO must be capped to 1s
+ +1 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK>
+ +1 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK>
+ +1 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK>
+
+ +0 < . 1:1(0) ack 1 win 1000 <mss 1460,nop,nop,sackOK>
+ +0 accept(3, ..., ...) = 4
+ +0 %{ assert (tcpi_options & TCPI_OPT_SYN_DATA) == 0, tcpi_options }%
+
+ +0 close(4) = 0
+ +0 close(3) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt b/tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt
index b2b2cdf27e20..454441e7ecff 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_syscall_bad_arg_sendmsg-empty-iov.pkt
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
// Test that we correctly skip zero-length IOVs.
+
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
+
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_user_timeout_user-timeout-probe.pkt b/tools/testing/selftests/net/packetdrill/tcp_user_timeout_user-timeout-probe.pkt
index 183051ba0cae..6882b8240a8a 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_user_timeout_user-timeout-probe.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_user_timeout_user-timeout-probe.pkt
@@ -23,14 +23,16 @@
// install a qdisc dropping all packets
+0 `tc qdisc delete dev tun0 root 2>/dev/null ; tc qdisc add dev tun0 root pfifo limit 0`
+
+0 write(4, ..., 24) = 24
// When qdisc is congested we retry every 500ms
// (TCP_RESOURCE_PROBE_INTERVAL) and therefore
// we retry 6 times before hitting 3s timeout.
// First verify that the connection is alive:
-+3.250 write(4, ..., 24) = 24
++3 write(4, ..., 24) = 24
+
// Now verify that shortly after that the socket is dead:
- +.100 write(4, ..., 24) = -1 ETIMEDOUT (Connection timed out)
++1 write(4, ..., 24) = -1 ETIMEDOUT (Connection timed out)
+0 %{ assert tcpi_probes == 6, tcpi_probes; \
assert tcpi_backoff == 0, tcpi_backoff }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt
index a82c8899d36b..0a0700afdaa3 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt
@@ -4,6 +4,8 @@
// send a packet with MSG_ZEROCOPY and receive the notification ID
// repeat and verify IDs are consecutive
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt
index c01915e7f4a1..df91675d2991 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt
@@ -3,6 +3,8 @@
//
// send multiple packets, then read one range of all notifications.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt
index 6509882932e9..2963cfcb14df 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Minimal client-side zerocopy test
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 4
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt
index 2cd78755cb2a..ea0c2fa73c2d 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt
@@ -7,6 +7,8 @@
// First send on a closed socket and wait for (absent) notification.
// Then connect and send and verify that notification nr. is zero.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 4
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt
index 7671c20e01cf..4df978a9b82e 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt
@@ -7,6 +7,9 @@
// fire two sends with MSG_ZEROCOPY and receive the acks. confirm that EPOLLERR
// is correctly fired only once, when EPOLLET is set. send another packet with
// MSG_ZEROCOPY. confirm that EPOLLERR is correctly fired again only once.
+
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt
index fadc480fdb7f..36b6edc4858c 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt
@@ -8,6 +8,9 @@
// fire two sends with MSG_ZEROCOPY and receive the acks. confirm that EPOLLERR
// is correctly fired only once, when EPOLLET is set. send another packet with
// MSG_ZEROCOPY. confirm that EPOLLERR is correctly fired again only once.
+
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt
index 5bfa0d1d2f4a..1bea6f3b4558 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt
@@ -8,6 +8,9 @@
// is correctly fired only once, when EPOLLONESHOT is set. send another packet
// with MSG_ZEROCOPY. confirm that EPOLLERR is not fired. Rearm the FD and
// confirm that EPOLLERR is correctly set.
+
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt
index 4a73bbf46961..e27c21ff5d18 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt
@@ -8,6 +8,8 @@
// one will have no data in the initial send. On return 0 the
// zerocopy notification counter is not incremented. Verify this too.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
// Send a FastOpen request, no cookie yet so no data in SYN
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt
index 36086c5877ce..b1fa77c77dfa 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt
@@ -4,6 +4,8 @@
// send data with MSG_FASTOPEN | MSG_ZEROCOPY and verify that the
// kernel returns the notification ID.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh
./set_sysctls.py /proc/sys/net/ipv4/tcp_fastopen=0x207`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt
index 672f817faca0..2f5317d0a9fa 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt
@@ -7,6 +7,8 @@
// because each iovec element becomes a frag
// 3) the PSH bit is set on an skb when it runs out of fragments
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt
index a9a1ac0aea4f..9d5272c6b207 100644
--- a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt
@@ -4,6 +4,8 @@
// verify that SO_EE_CODE_ZEROCOPY_COPIED is set on zerocopy
// packets of all sizes, including the smallest payload, 1B.
+--send_omit_free // do not reuse send buffers with zerocopy
+
`./defaults.sh`
0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 88e914c4eef9..a3323c21f001 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -1089,10 +1089,11 @@ cleanup() {
cleanup_all_ns
- ip link del veth_A-C 2>/dev/null
- ip link del veth_A-R1 2>/dev/null
- cleanup_del_ovs_internal
- cleanup_del_ovs_vswitchd
+ [ -e "/sys/class/net/veth_A-C" ] && ip link del veth_A-C
+ [ -e "/sys/class/net/veth_A-R1" ] && ip link del veth_A-R1
+ [ -e "/sys/class/net/ovs_br0" ] && cleanup_del_ovs_internal
+ [ -e "/sys/class/net/ovs_br0" ] && cleanup_del_ovs_vswitchd
+
rm -f "$tmpoutfile"
}
diff --git a/tools/testing/selftests/net/proc_net_pktgen.c b/tools/testing/selftests/net/proc_net_pktgen.c
index 69444fb29577..fab3b5c2e25d 100644
--- a/tools/testing/selftests/net/proc_net_pktgen.c
+++ b/tools/testing/selftests/net/proc_net_pktgen.c
@@ -10,7 +10,7 @@
#include <stdlib.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
static const char ctrl_cmd_stop[] = "stop";
static const char ctrl_cmd_start[] = "start";
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 84c524357075..ab8d8b7e6cb0 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -54,7 +54,7 @@
#include <unistd.h>
#include "psock_lib.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#define RING_NUM_FRAMES 20
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index 6e4fef560873..067265b0a554 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -22,10 +22,6 @@
#define PORT_BASE 8000
-#ifndef __maybe_unused
-# define __maybe_unused __attribute__ ((__unused__))
-#endif
-
static __maybe_unused void pair_udp_setfilter(int fd)
{
/* the filter below checks for all of the following conditions that
diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c
index 221270cee3ea..7caf3135448d 100644
--- a/tools/testing/selftests/net/psock_tpacket.c
+++ b/tools/testing/selftests/net/psock_tpacket.c
@@ -22,6 +22,7 @@
* - TPACKET_V3: RX_RING
*/
+#undef NDEBUG
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
@@ -33,7 +34,6 @@
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
-#include <bits/wordsize.h>
#include <net/ethernet.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
@@ -46,7 +46,7 @@
#include "psock_lib.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#ifndef bug_on
# define bug_on(cond) assert(!(cond))
@@ -785,7 +785,7 @@ static int test_kernel_bit_width(void)
static int test_user_bit_width(void)
{
- return __WORDSIZE;
+ return sizeof(long) * 8;
}
static const char *tpacket_str[] = {
diff --git a/tools/testing/selftests/net/rds/Makefile b/tools/testing/selftests/net/rds/Makefile
index 612a7219990e..762845cc973c 100644
--- a/tools/testing/selftests/net/rds/Makefile
+++ b/tools/testing/selftests/net/rds/Makefile
@@ -5,8 +5,14 @@ all:
TEST_PROGS := run.sh
-TEST_FILES := include.sh test.py
+TEST_FILES := \
+ include.sh \
+ test.py \
+# end of TEST_FILES
-EXTRA_CLEAN := /tmp/rds_logs include.sh
+EXTRA_CLEAN := \
+ include.sh \
+ /tmp/rds_logs \
+# end of EXTRA_CLEAN
include ../../lib.mk
diff --git a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
index 7b9bf8a7bbe1..5aad27a0d13a 100644
--- a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
+++ b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
@@ -22,7 +22,7 @@
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
struct reuse_opts {
int reuseaddr[2];
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 65aea27d761c..b6634d6da3d6 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -24,7 +24,7 @@
#include <sys/resource.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
struct test_params {
int recv_family;
diff --git a/tools/testing/selftests/net/reuseport_bpf_numa.c b/tools/testing/selftests/net/reuseport_bpf_numa.c
index c9ba36aa688e..2ffd957ffb15 100644
--- a/tools/testing/selftests/net/reuseport_bpf_numa.c
+++ b/tools/testing/selftests/net/reuseport_bpf_numa.c
@@ -23,7 +23,7 @@
#include <unistd.h>
#include <numa.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static const int PORT = 8888;
diff --git a/tools/testing/selftests/net/route_hint.sh b/tools/testing/selftests/net/route_hint.sh
new file mode 100755
index 000000000000..2db01ece0cc1
--- /dev/null
+++ b/tools/testing/selftests/net/route_hint.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test ensures directed broadcast routes use dst hint mechanism
+
+source lib.sh
+
+CLIENT_IP4="192.168.0.1"
+SERVER_IP4="192.168.0.2"
+BROADCAST_ADDRESS="192.168.0.255"
+
+setup() {
+ setup_ns CLIENT_NS SERVER_NS
+
+ ip -net "${SERVER_NS}" link add link1 type veth peer name link0 netns "${CLIENT_NS}"
+
+ ip -net "${CLIENT_NS}" link set link0 up
+ ip -net "${CLIENT_NS}" addr add "${CLIENT_IP4}/24" dev link0
+
+ ip -net "${SERVER_NS}" link set link1 up
+ ip -net "${SERVER_NS}" addr add "${SERVER_IP4}/24" dev link1
+
+ ip netns exec "${CLIENT_NS}" ethtool -K link0 tcp-segmentation-offload off
+ ip netns exec "${SERVER_NS}" sh -c "echo 500000000 > /sys/class/net/link1/gro_flush_timeout"
+ ip netns exec "${SERVER_NS}" sh -c "echo 1 > /sys/class/net/link1/napi_defer_hard_irqs"
+ ip netns exec "${SERVER_NS}" ethtool -K link1 generic-receive-offload on
+}
+
+cleanup() {
+ ip -net "${SERVER_NS}" link del link1
+ cleanup_ns "${CLIENT_NS}" "${SERVER_NS}"
+}
+
+directed_bcast_hint_test()
+{
+ local rc=0
+
+ echo "Testing for directed broadcast route hint"
+
+ orig_in_brd=$(ip netns exec "${SERVER_NS}" lnstat -j -i1 -c1 | jq '.in_brd')
+ ip netns exec "${CLIENT_NS}" mausezahn link0 -a own -b bcast -A "${CLIENT_IP4}" \
+ -B "${BROADCAST_ADDRESS}" -c1 -t tcp "sp=1-100,dp=1234,s=1,a=0" -p 5 -q
+ sleep 1
+ new_in_brd=$(ip netns exec "${SERVER_NS}" lnstat -j -i1 -c1 | jq '.in_brd')
+
+ res=$(echo "${new_in_brd} - ${orig_in_brd}" | bc)
+
+ if [ "${res}" -lt 100 ]; then
+ echo "[ OK ]"
+ rc="${ksft_pass}"
+ else
+ echo "[FAIL] expected in_brd to be under 100, got ${res}"
+ rc="${ksft_fail}"
+ fi
+
+ return "${rc}"
+}
+
+if [ ! -x "$(command -v mausezahn)" ]; then
+ echo "SKIP: Could not run test without mausezahn tool"
+ exit "${ksft_skip}"
+fi
+
+if [ ! -x "$(command -v jq)" ]; then
+ echo "SKIP: Could not run test without jq tool"
+ exit "${ksft_skip}"
+fi
+
+if [ ! -x "$(command -v bc)" ]; then
+ echo "SKIP: Could not run test without bc tool"
+ exit "${ksft_skip}"
+fi
+
+trap cleanup EXIT
+
+setup
+
+directed_bcast_hint_test
+exit $?
diff --git a/tools/testing/selftests/net/rps_default_mask.sh b/tools/testing/selftests/net/rps_default_mask.sh
index 4287a8529890..b200019b3c80 100755
--- a/tools/testing/selftests/net/rps_default_mask.sh
+++ b/tools/testing/selftests/net/rps_default_mask.sh
@@ -54,16 +54,16 @@ cleanup
echo 1 > /proc/sys/net/core/rps_default_mask
setup
-chk_rps "changing rps_default_mask dont affect existing devices" "" lo $INITIAL_RPS_DEFAULT_MASK
+chk_rps "changing rps_default_mask doesn't affect existing devices" "" lo $INITIAL_RPS_DEFAULT_MASK
echo 3 > /proc/sys/net/core/rps_default_mask
-chk_rps "changing rps_default_mask dont affect existing netns" $NETNS lo 0
+chk_rps "changing rps_default_mask doesn't affect existing netns" $NETNS lo 0
ip link add name $VETH type veth peer netns $NETNS name $VETH
ip link set dev $VETH up
ip -n $NETNS link set dev $VETH up
-chk_rps "changing rps_default_mask affect newly created devices" "" $VETH 3
-chk_rps "changing rps_default_mask don't affect newly child netns[II]" $NETNS $VETH 0
+chk_rps "changing rps_default_mask affects newly created devices" "" $VETH 3
+chk_rps "changing rps_default_mask doesn't affect newly child netns[II]" $NETNS $VETH 0
ip link del dev $VETH
ip netns del $NETNS
@@ -72,8 +72,8 @@ chk_rps "rps_default_mask is 0 by default in child netns" "$NETNS" lo 0
ip netns exec $NETNS sysctl -qw net.core.rps_default_mask=1
ip link add name $VETH type veth peer netns $NETNS name $VETH
-chk_rps "changing rps_default_mask in child ns don't affect the main one" "" lo $INITIAL_RPS_DEFAULT_MASK
+chk_rps "changing rps_default_mask in child ns doesn't affect the main one" "" lo $INITIAL_RPS_DEFAULT_MASK
chk_rps "changing rps_default_mask in child ns affects new childns devices" $NETNS $VETH 1
-chk_rps "changing rps_default_mask in child ns don't affect existing devices" $NETNS lo 0
+chk_rps "changing rps_default_mask in child ns doesn't affect existing devices" $NETNS lo 0
exit $ret
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 2e8243a65b50..248c2b91fe42 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -8,6 +8,7 @@ ALL_TESTS="
kci_test_polrouting
kci_test_route_get
kci_test_addrlft
+ kci_test_addrlft_route_cleanup
kci_test_promote_secondaries
kci_test_tc
kci_test_gre
@@ -21,6 +22,7 @@ ALL_TESTS="
kci_test_vrf
kci_test_encap
kci_test_macsec
+ kci_test_macsec_vlan
kci_test_ipsec
kci_test_ipsec_offload
kci_test_fdb_get
@@ -30,6 +32,7 @@ ALL_TESTS="
kci_test_address_proto
kci_test_enslave_bonding
kci_test_mngtmpaddr
+ kci_test_operstate
"
devdummy="test-dummy0"
@@ -291,6 +294,17 @@ kci_test_route_get()
end_test "PASS: route get"
}
+check_addr_not_exist()
+{
+ dev=$1
+ addr=$2
+ if ip addr show dev $dev | grep -q $addr; then
+ return 1
+ else
+ return 0
+ fi
+}
+
kci_test_addrlft()
{
for i in $(seq 10 100) ;do
@@ -298,9 +312,10 @@ kci_test_addrlft()
run_cmd ip addr add 10.23.11.$i/32 dev "$devdummy" preferred_lft $lft valid_lft $((lft+1))
done
- sleep 5
- run_cmd_grep_fail "10.23.11." ip addr show dev "$devdummy"
- if [ $? -eq 0 ]; then
+ slowwait 5 check_addr_not_exist "$devdummy" "10.23.11."
+ if [ $? -eq 1 ]; then
+ # troubleshoot the reason for our failure
+ run_cmd ip addr show dev "$devdummy"
check_err 1
end_test "FAIL: preferred_lft addresses remaining"
return
@@ -309,8 +324,32 @@ kci_test_addrlft()
end_test "PASS: preferred_lft addresses have expired"
}
+kci_test_addrlft_route_cleanup()
+{
+ local ret=0
+ local test_addr="2001:db8:99::1/64"
+ local test_prefix="2001:db8:99::/64"
+
+ run_cmd ip -6 addr add $test_addr dev "$devdummy" valid_lft 300 preferred_lft 300
+ run_cmd_grep "$test_prefix proto kernel" ip -6 route show dev "$devdummy"
+ run_cmd ip -6 addr del $test_addr dev "$devdummy"
+ run_cmd_grep_fail "$test_prefix" ip -6 route show dev "$devdummy"
+
+ if [ $ret -ne 0 ]; then
+ end_test "FAIL: route not cleaned up when address with valid_lft deleted"
+ return 1
+ fi
+
+ end_test "PASS: route cleaned up when address with valid_lft deleted"
+}
+
kci_test_promote_secondaries()
{
+ run_cmd ifconfig "$devdummy"
+ if [ $ret -ne 0 ]; then
+ end_test "SKIP: ifconfig not installed"
+ return $ksft_skip
+ fi
promote=$(sysctl -n net.ipv4.conf.$devdummy.promote_secondaries)
sysctl -q net.ipv4.conf.$devdummy.promote_secondaries=1
@@ -507,7 +546,7 @@ kci_test_encap_fou()
run_cmd_fail ip -netns "$testns" fou del port 9999
run_cmd ip -netns "$testns" fou del port 7777
if [ $ret -ne 0 ]; then
- end_test "FAIL: fou"s
+ end_test "FAIL: fou"
return 1
fi
@@ -561,6 +600,41 @@ kci_test_macsec()
end_test "PASS: macsec"
}
+# Test __dev_set_rx_mode call from dev_uc_add under addr_list_lock spinlock.
+# Make sure __dev_set_promiscuity is not grabbing (sleeping) netdev instance
+# lock.
+# https://lore.kernel.org/netdev/2aff4342b0f5b1539c02ffd8df4c7e58dd9746e7.camel@nvidia.com/
+kci_test_macsec_vlan()
+{
+ msname="test_macsec1"
+ vlanname="test_vlan1"
+ local ret=0
+ run_cmd_grep "^Usage: ip macsec" ip macsec help
+ if [ $? -ne 0 ]; then
+ end_test "SKIP: macsec: iproute2 too old"
+ return $ksft_skip
+ fi
+ run_cmd ip link add link "$devdummy" "$msname" type macsec port 42 encrypt on
+ if [ $ret -ne 0 ];then
+ end_test "FAIL: can't add macsec interface, skipping test"
+ return 1
+ fi
+
+ run_cmd ip link set dev "$msname" up
+ ip link add link "$msname" name "$vlanname" type vlan id 1
+ ip link set dev "$vlanname" address 00:11:22:33:44:88
+ ip link set dev "$vlanname" up
+ run_cmd ip link del dev "$vlanname"
+ run_cmd ip link del dev "$msname"
+
+ if [ $ret -ne 0 ];then
+ end_test "FAIL: macsec_vlan"
+ return 1
+ fi
+
+ end_test "PASS: macsec_vlan"
+}
+
#-------------------------------------------------------------------
# Example commands
# ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \
@@ -673,6 +747,11 @@ kci_test_ipsec_offload()
sysfsf=$sysfsd/ipsec
sysfsnet=/sys/bus/netdevsim/devices/netdevsim0/net/
probed=false
+ esp4_offload_probed_default=false
+
+ if lsmod | grep -q esp4_offload; then
+ esp4_offload_probed_default=true
+ fi
if ! mount | grep -q debugfs; then
mount -t debugfs none /sys/kernel/debug/ &> /dev/null
@@ -766,6 +845,7 @@ EOF
fi
# clean up any leftovers
+ ! "$esp4_offload_probed_default" && lsmod | grep -q esp4_offload && rmmod esp4_offload
echo 0 > /sys/bus/netdevsim/del_device
$probed && rmmod netdevsim
@@ -1148,6 +1228,12 @@ do_test_address_proto()
local ret=0
local err
+ run_cmd_grep 'proto' ip address help
+ if [ $? -ne 0 ];then
+ end_test "SKIP: addr proto ${what}: iproute2 too old"
+ return $ksft_skip
+ fi
+
ip address add dev "$devdummy" "$addr3"
check_err $?
proto=$(address_get_proto "$addr3")
@@ -1334,6 +1420,39 @@ kci_test_mngtmpaddr()
return $ret
}
+kci_test_operstate()
+{
+ local ret=0
+
+ # Check that it is possible to set operational state during device
+ # creation and that it is preserved when the administrative state of
+ # the device is toggled.
+ run_cmd ip link add name vx0 up state up type vxlan id 10010 dstport 4789
+ run_cmd_grep "state UP" ip link show dev vx0
+ run_cmd ip link set dev vx0 down
+ run_cmd_grep "state DOWN" ip link show dev vx0
+ run_cmd ip link set dev vx0 up
+ run_cmd_grep "state UP" ip link show dev vx0
+
+ run_cmd ip link del dev vx0
+
+ # Check that it is possible to set the operational state of the device
+ # after creation.
+ run_cmd ip link add name vx0 up type vxlan id 10010 dstport 4789
+ run_cmd_grep "state UNKNOWN" ip link show dev vx0
+ run_cmd ip link set dev vx0 state up
+ run_cmd_grep "state UP" ip link show dev vx0
+
+ run_cmd ip link del dev vx0
+
+ if [ "$ret" -ne 0 ]; then
+ end_test "FAIL: operstate"
+ return 1
+ fi
+
+ end_test "PASS: operstate"
+}
+
kci_test_rtnl()
{
local current_test
@@ -1367,6 +1486,8 @@ usage: ${0##*/} OPTS
EOF
}
+require_command jq
+
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
end_test "SKIP: Need root privileges"
diff --git a/tools/testing/selftests/net/rtnetlink_notification.sh b/tools/testing/selftests/net/rtnetlink_notification.sh
new file mode 100755
index 000000000000..3f9780232bd6
--- /dev/null
+++ b/tools/testing/selftests/net/rtnetlink_notification.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test is for checking rtnetlink notification callpaths, and get as much
+# coverage as possible.
+#
+# set -e
+
+ALL_TESTS="
+ kci_test_mcast_addr_notification
+ kci_test_anycast_addr_notification
+"
+
+source lib.sh
+test_dev="test-dummy1"
+
+kci_test_mcast_addr_notification()
+{
+ RET=0
+ local tmpfile
+ local monitor_pid
+ local match_result
+
+ tmpfile=$(mktemp)
+ defer rm "$tmpfile"
+
+ ip monitor maddr > $tmpfile &
+ monitor_pid=$!
+ defer kill_process "$monitor_pid"
+
+ sleep 1
+
+ if [ ! -e "/proc/$monitor_pid" ]; then
+ RET=$ksft_skip
+ log_test "mcast addr notification: iproute2 too old"
+ return $RET
+ fi
+
+ ip link add name "$test_dev" type dummy
+ check_err $? "failed to add dummy interface"
+ ip link set "$test_dev" up
+ check_err $? "failed to set dummy interface up"
+ ip link del dev "$test_dev"
+ check_err $? "Failed to delete dummy interface"
+ sleep 1
+
+ # There should be 4 line matches as follows.
+ # 13: test-dummy1    inet6 mcast ff02::1 scope global 
+ # 13: test-dummy1    inet mcast 224.0.0.1 scope global 
+ # Deleted 13: test-dummy1    inet mcast 224.0.0.1 scope global 
+ # Deleted 13: test-dummy1    inet6 mcast ff02::1 scope global 
+ match_result=$(grep -cE "$test_dev.*(224.0.0.1|ff02::1)" "$tmpfile")
+ if [ "$match_result" -ne 4 ]; then
+ RET=$ksft_fail
+ fi
+ log_test "mcast addr notification: Expected 4 matches, got $match_result"
+ return $RET
+}
+
+kci_test_anycast_addr_notification()
+{
+ RET=0
+ local tmpfile
+ local monitor_pid
+ local match_result
+
+ tmpfile=$(mktemp)
+ defer rm "$tmpfile"
+
+ ip monitor acaddress > "$tmpfile" &
+ monitor_pid=$!
+ defer kill_process "$monitor_pid"
+ sleep 1
+
+ if [ ! -e "/proc/$monitor_pid" ]; then
+ RET=$ksft_skip
+ log_test "anycast addr notification: iproute2 too old"
+ return "$RET"
+ fi
+
+ ip link add name "$test_dev" type dummy
+ check_err $? "failed to add dummy interface"
+ ip link set "$test_dev" up
+ check_err $? "failed to set dummy interface up"
+ sysctl -qw net.ipv6.conf."$test_dev".forwarding=1
+ ip link del dev "$test_dev"
+ check_err $? "Failed to delete dummy interface"
+ sleep 1
+
+ # There should be 2 line matches as follows.
+ # 9: dummy2 inet6 any fe80:: scope global
+ # Deleted 9: dummy2 inet6 any fe80:: scope global
+ match_result=$(grep -cE "$test_dev.*(fe80::)" "$tmpfile")
+ if [ "$match_result" -ne 2 ]; then
+ RET=$ksft_fail
+ fi
+ log_test "anycast addr notification: Expected 2 matches, got $match_result"
+ return "$RET"
+}
+
+#check for needed privileges
+if [ "$(id -u)" -ne 0 ];then
+ RET=$ksft_skip
+ log_test "need root privileges"
+ exit $RET
+fi
+
+require_command ip
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c
index 16ac4df55fdb..b81ed0352d6c 100644
--- a/tools/testing/selftests/net/rxtimestamp.c
+++ b/tools/testing/selftests/net/rxtimestamp.c
@@ -18,7 +18,7 @@
#include <linux/net_tstamp.h>
#include <linux/errqueue.h>
-#include "../kselftest.h"
+#include "kselftest.h"
struct options {
int so_timestamp;
diff --git a/tools/testing/selftests/net/sctp_hello.c b/tools/testing/selftests/net/sctp_hello.c
index f02f1f95d227..a04dac0b8027 100644
--- a/tools/testing/selftests/net/sctp_hello.c
+++ b/tools/testing/selftests/net/sctp_hello.c
@@ -29,7 +29,6 @@ static void set_addr(struct sockaddr_storage *ss, char *ip, char *port, int *len
static int do_client(int argc, char *argv[])
{
struct sockaddr_storage ss;
- char buf[] = "hello";
int csk, ret, len;
if (argc < 5) {
@@ -56,16 +55,10 @@ static int do_client(int argc, char *argv[])
set_addr(&ss, argv[3], argv[4], &len);
ret = connect(csk, (struct sockaddr *)&ss, len);
- if (ret < 0) {
- printf("failed to connect to peer\n");
+ if (ret < 0)
return -1;
- }
- ret = send(csk, buf, strlen(buf) + 1, 0);
- if (ret < 0) {
- printf("failed to send msg %d\n", ret);
- return -1;
- }
+ recv(csk, NULL, 0, 0);
close(csk);
return 0;
@@ -75,7 +68,6 @@ int main(int argc, char *argv[])
{
struct sockaddr_storage ss;
int lsk, csk, ret, len;
- char buf[20];
if (argc < 2 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
printf("%s server|client ...\n", argv[0]);
@@ -125,11 +117,6 @@ int main(int argc, char *argv[])
return -1;
}
- ret = recv(csk, buf, sizeof(buf), 0);
- if (ret <= 0) {
- printf("failed to recv msg %d\n", ret);
- return -1;
- }
close(csk);
close(lsk);
diff --git a/tools/testing/selftests/net/sctp_vrf.sh b/tools/testing/selftests/net/sctp_vrf.sh
index c854034b6aa1..667b211aa8a1 100755
--- a/tools/testing/selftests/net/sctp_vrf.sh
+++ b/tools/testing/selftests/net/sctp_vrf.sh
@@ -20,9 +20,9 @@ setup() {
modprobe sctp_diag
setup_ns CLIENT_NS1 CLIENT_NS2 SERVER_NS
- ip net exec $CLIENT_NS1 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
- ip net exec $CLIENT_NS2 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
- ip net exec $SERVER_NS sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
+ ip net exec $CLIENT_NS1 sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip net exec $CLIENT_NS2 sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip net exec $SERVER_NS sysctl -wq net.ipv6.conf.default.accept_dad=0
ip -n $SERVER_NS link add veth1 type veth peer name veth1 netns $CLIENT_NS1
ip -n $SERVER_NS link add veth2 type veth peer name veth1 netns $CLIENT_NS2
@@ -62,17 +62,40 @@ setup() {
}
cleanup() {
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
+ wait_client $CLIENT_NS1
+ wait_client $CLIENT_NS2
+ stop_server
cleanup_ns $CLIENT_NS1 $CLIENT_NS2 $SERVER_NS
}
-wait_server() {
+start_server() {
local IFACE=$1
local CNT=0
- until ip netns exec $SERVER_NS ss -lS src $SERVER_IP:$SERVER_PORT | \
- grep LISTEN | grep "$IFACE" 2>&1 >/dev/null; do
- [ $((CNT++)) = "20" ] && { RET=3; return $RET; }
+ ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP $SERVER_PORT $IFACE &
+ disown
+ until ip netns exec $SERVER_NS ss -SlH | grep -q "$IFACE"; do
+ [ $((CNT++)) -eq 30 ] && { RET=3; return $RET; }
+ sleep 0.1
+ done
+}
+
+stop_server() {
+ local CNT=0
+
+ ip netns exec $SERVER_NS pkill sctp_hello
+ while ip netns exec $SERVER_NS ss -SaH | grep -q .; do
+ [ $((CNT++)) -eq 30 ] && break
+ sleep 0.1
+ done
+}
+
+wait_client() {
+ local CLIENT_NS=$1
+ local CNT=0
+
+ while ip netns exec $CLIENT_NS ss -SaH | grep -q .; do
+ [ $((CNT++)) -eq 30 ] && break
sleep 0.1
done
}
@@ -81,14 +104,12 @@ do_test() {
local CLIENT_NS=$1
local IFACE=$2
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE 2>&1 >/dev/null &
- disown
- wait_server $IFACE || return $RET
+ start_server $IFACE || return $RET
timeout 3 ip netns exec $CLIENT_NS ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT
RET=$?
+ wait_client $CLIENT_NS
+ stop_server
return $RET
}
@@ -96,25 +117,21 @@ do_testx() {
local IFACE1=$1
local IFACE2=$2
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE1 2>&1 >/dev/null &
- disown
- wait_server $IFACE1 || return $RET
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE2 2>&1 >/dev/null &
- disown
- wait_server $IFACE2 || return $RET
+ start_server $IFACE1 || return $RET
+ start_server $IFACE2 || return $RET
timeout 3 ip netns exec $CLIENT_NS1 ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null && \
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT && \
timeout 3 ip netns exec $CLIENT_NS2 ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT
RET=$?
+ wait_client $CLIENT_NS1
+ wait_client $CLIENT_NS2
+ stop_server
return $RET
}
testup() {
- ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=1 2>&1 >/dev/null
+ ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=1
echo -n "TEST 01: nobind, connect from client 1, l3mdev_accept=1, Y "
do_test $CLIENT_NS1 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
@@ -123,7 +140,7 @@ testup() {
do_test $CLIENT_NS2 && { echo "[FAIL]"; return $RET; }
echo "[PASS]"
- ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=0 2>&1 >/dev/null
+ ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=0
echo -n "TEST 03: nobind, connect from client 1, l3mdev_accept=0, N "
do_test $CLIENT_NS1 && { echo "[FAIL]"; return $RET; }
echo "[PASS]"
@@ -160,7 +177,7 @@ testup() {
do_testx vrf-1 vrf-2 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
- echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, N "
+ echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, Y "
do_testx vrf-2 vrf-1 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
}
diff --git a/tools/testing/selftests/net/setup_loopback.sh b/tools/testing/selftests/net/setup_loopback.sh
deleted file mode 100644
index 2070b57849de..000000000000
--- a/tools/testing/selftests/net/setup_loopback.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-readonly FLUSH_PATH="/sys/class/net/${dev}/gro_flush_timeout"
-readonly IRQ_PATH="/sys/class/net/${dev}/napi_defer_hard_irqs"
-readonly FLUSH_TIMEOUT="$(< ${FLUSH_PATH})"
-readonly HARD_IRQS="$(< ${IRQ_PATH})"
-readonly server_ns=$(mktemp -u server-XXXXXXXX)
-readonly client_ns=$(mktemp -u client-XXXXXXXX)
-
-netdev_check_for_carrier() {
- local -r dev="$1"
-
- for i in {1..5}; do
- carrier="$(cat /sys/class/net/${dev}/carrier)"
- if [[ "${carrier}" -ne 1 ]] ; then
- echo "carrier not ready yet..." >&2
- sleep 1
- else
- echo "carrier ready" >&2
- break
- fi
- done
- echo "${carrier}"
-}
-
-# Assumes that there is no existing ipvlan device on the physical device
-setup_loopback_environment() {
- local dev="$1"
-
- # Fail hard if cannot turn on loopback mode for current NIC
- ethtool -K "${dev}" loopback on || exit 1
- sleep 1
-
- # Check for the carrier
- carrier=$(netdev_check_for_carrier ${dev})
- if [[ "${carrier}" -ne 1 ]] ; then
- echo "setup_loopback_environment failed"
- exit 1
- fi
-}
-
-setup_macvlan_ns(){
- local -r link_dev="$1"
- local -r ns_name="$2"
- local -r ns_dev="$3"
- local -r ns_mac="$4"
- local -r addr="$5"
-
- ip link add link "${link_dev}" dev "${ns_dev}" \
- address "${ns_mac}" type macvlan
- exit_code=$?
- if [[ "${exit_code}" -ne 0 ]]; then
- echo "setup_macvlan_ns failed"
- exit $exit_code
- fi
-
- [[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}"
- ip link set dev "${ns_dev}" netns "${ns_name}"
- ip -netns "${ns_name}" link set dev "${ns_dev}" up
- if [[ -n "${addr}" ]]; then
- ip -netns "${ns_name}" addr add dev "${ns_dev}" "${addr}"
- fi
-
- sleep 1
-}
-
-cleanup_macvlan_ns(){
- while (( $# >= 2 )); do
- ns_name="$1"
- ns_dev="$2"
- ip -netns "${ns_name}" link del dev "${ns_dev}"
- ip netns del "${ns_name}"
- shift 2
- done
-}
-
-cleanup_loopback(){
- local -r dev="$1"
-
- ethtool -K "${dev}" loopback off
- sleep 1
-
- # Check for the carrier
- carrier=$(netdev_check_for_carrier ${dev})
- if [[ "${carrier}" -ne 1 ]] ; then
- echo "setup_loopback_environment failed"
- exit 1
- fi
-}
-
-setup_interrupt() {
- # Use timer on host to trigger the network stack
- # Also disable device interrupt to not depend on NIC interrupt
- # Reduce test flakiness caused by unexpected interrupts
- echo 100000 >"${FLUSH_PATH}"
- echo 50 >"${IRQ_PATH}"
-}
-
-setup_ns() {
- # Set up server_ns namespace and client_ns namespace
- setup_macvlan_ns "${dev}" ${server_ns} server "${SERVER_MAC}"
- setup_macvlan_ns "${dev}" ${client_ns} client "${CLIENT_MAC}"
-}
-
-cleanup_ns() {
- cleanup_macvlan_ns ${server_ns} server ${client_ns} client
-}
-
-setup() {
- setup_loopback_environment "${dev}"
- setup_interrupt
-}
-
-cleanup() {
- cleanup_loopback "${dev}"
-
- echo "${FLUSH_TIMEOUT}" >"${FLUSH_PATH}"
- echo "${HARD_IRQS}" >"${IRQ_PATH}"
-}
diff --git a/tools/testing/selftests/net/setup_veth.sh b/tools/testing/selftests/net/setup_veth.sh
deleted file mode 100644
index 152bf4c65747..000000000000
--- a/tools/testing/selftests/net/setup_veth.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-readonly server_ns=$(mktemp -u server-XXXXXXXX)
-readonly client_ns=$(mktemp -u client-XXXXXXXX)
-
-setup_veth_ns() {
- local -r link_dev="$1"
- local -r ns_name="$2"
- local -r ns_dev="$3"
- local -r ns_mac="$4"
-
- [[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}"
- echo 200000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
- echo 1 > "/sys/class/net/${ns_dev}/napi_defer_hard_irqs"
- ip link set dev "${ns_dev}" netns "${ns_name}" mtu 65535
- ip -netns "${ns_name}" link set dev "${ns_dev}" up
-
- ip netns exec "${ns_name}" ethtool -K "${ns_dev}" gro on tso off
-}
-
-setup_ns() {
- # Set up server_ns namespace and client_ns namespace
- ip link add name server type veth peer name client
-
- setup_veth_ns "${dev}" ${server_ns} server "${SERVER_MAC}"
- setup_veth_ns "${dev}" ${client_ns} client "${CLIENT_MAC}"
-}
-
-cleanup_ns() {
- local ns_name
-
- for ns_name in ${client_ns} ${server_ns}; do
- [[ -e /var/run/netns/"${ns_name}" ]] && ip netns del "${ns_name}"
- done
-}
-
-setup() {
- # no global init setup step needed
- :
-}
-
-cleanup() {
- cleanup_ns
-}
diff --git a/tools/testing/selftests/net/sk_so_peek_off.c b/tools/testing/selftests/net/sk_so_peek_off.c
index d87dd8d8d491..2a3f5c604f52 100644
--- a/tools/testing/selftests/net/sk_so_peek_off.c
+++ b/tools/testing/selftests/net/sk_so_peek_off.c
@@ -8,7 +8,7 @@
#include <sys/types.h>
#include <netinet/in.h>
#include <arpa/inet.h>
-#include "../kselftest.h"
+#include "kselftest.h"
static char *afstr(int af, int proto)
{
diff --git a/tools/testing/selftests/net/so_incoming_cpu.c b/tools/testing/selftests/net/so_incoming_cpu.c
index e9fa14e10732..4740701f1a9a 100644
--- a/tools/testing/selftests/net/so_incoming_cpu.c
+++ b/tools/testing/selftests/net/so_incoming_cpu.c
@@ -9,7 +9,7 @@
#include <sys/socket.h>
#include <sys/sysinfo.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
FIXTURE(so_incoming_cpu)
{
diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
index 8457b7ccbc09..b76df1efc2ef 100644
--- a/tools/testing/selftests/net/so_txtime.c
+++ b/tools/testing/selftests/net/so_txtime.c
@@ -174,7 +174,7 @@ static int do_recv_errqueue_timeout(int fdt)
msg.msg_controllen = sizeof(control);
while (1) {
- const char *reason;
+ const char *reason = NULL;
ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
if (ret == -1 && errno == EAGAIN)
diff --git a/tools/testing/selftests/net/socket.c b/tools/testing/selftests/net/socket.c
index db1aeb8c5d1e..9e270548dad8 100644
--- a/tools/testing/selftests/net/socket.c
+++ b/tools/testing/selftests/net/socket.c
@@ -7,7 +7,7 @@
#include <sys/socket.h>
#include <netinet/in.h>
-#include "../kselftest.h"
+#include "kselftest.h"
struct socket_testcase {
int domain;
@@ -39,6 +39,7 @@ static int run_tests(void)
{
char err_string1[ERR_STRING_SZ];
char err_string2[ERR_STRING_SZ];
+ const char *msg1, *msg2;
int i, err;
err = 0;
@@ -56,13 +57,13 @@ static int run_tests(void)
errno == -s->expect)
continue;
- strerror_r(-s->expect, err_string1, ERR_STRING_SZ);
- strerror_r(errno, err_string2, ERR_STRING_SZ);
+ msg1 = strerror_r(-s->expect, err_string1, ERR_STRING_SZ);
+ msg2 = strerror_r(errno, err_string2, ERR_STRING_SZ);
fprintf(stderr, "socket(%d, %d, %d) expected "
"err (%s) got (%s)\n",
s->domain, s->type, s->protocol,
- err_string1, err_string2);
+ msg1, msg2);
err = -1;
break;
@@ -70,12 +71,12 @@ static int run_tests(void)
close(fd);
if (s->expect < 0) {
- strerror_r(errno, err_string1, ERR_STRING_SZ);
+ msg1 = strerror_r(errno, err_string1, ERR_STRING_SZ);
fprintf(stderr, "socket(%d, %d, %d) expected "
"success got err (%s)\n",
s->domain, s->type, s->protocol,
- err_string1);
+ msg1);
err = -1;
break;
diff --git a/tools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh
index ba730655a7bf..4bc135e5c22c 100755
--- a/tools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh
+++ b/tools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh
@@ -594,7 +594,7 @@ setup_rt_local_sids()
dev "${DUMMY_DEVNAME}"
# all SIDs for VPNs start with a common locator. Routes and SRv6
- # Endpoint behavior instaces are grouped together in the 'localsid'
+ # Endpoint behavior instances are grouped together in the 'localsid'
# table.
ip -netns "${nsname}" -6 rule \
add to "${VPN_LOCATOR_SERVICE}::/16" \
diff --git a/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh
index 4b86040c58c6..34b781a2ae74 100755
--- a/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh
+++ b/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh
@@ -72,6 +72,9 @@
# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y in
# the selftest network.
#
+# In addition, every router interface connecting rt-x to rt-y is assigned an
+# IPv6 link-local address fe80::x:y/64.
+#
# Local SID/C-SID table
# =====================
#
@@ -521,6 +524,9 @@ setup_rt_networking()
ip -netns "${nsname}" addr \
add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+ ip -netns "${nsname}" addr \
+ add "fe80::${rt}:${neigh}/64" dev "${devname}" nodad
+
ip -netns "${nsname}" link set "${devname}" up
done
@@ -609,6 +615,27 @@ set_end_x_nextcsid()
nflen "${LCNODEFUNC_BLEN}" dev "${DUMMY_DEVNAME}"
}
+set_end_x_ll_nextcsid()
+{
+ local rt="$1"
+ local adj="$2"
+
+ eval nsname=\${$(get_rtname "${rt}")}
+ lcnode_func_prefix="$(build_lcnode_func_prefix "${rt}")"
+ nh6_ll_addr="fe80::${adj}:${rt}"
+ oifname="veth-rt-${rt}-${adj}"
+
+ # enabled NEXT-C-SID SRv6 End.X behavior via an IPv6 link-local nexthop
+ # address (note that "dev" is the dummy dum0 device chosen for the sake
+ # of simplicity).
+ ip -netns "${nsname}" -6 route \
+ replace "${lcnode_func_prefix}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End.X nh6 "${nh6_ll_addr}" \
+ oif "${oifname}" flavors next-csid lblen "${LCBLOCK_BLEN}" \
+ nflen "${LCNODEFUNC_BLEN}" dev "${DUMMY_DEVNAME}"
+}
+
set_underlay_sids_reachability()
{
local rt="$1"
@@ -654,7 +681,7 @@ setup_rt_local_sids()
set_underlay_sids_reachability "${rt}" "${rt_neighs}"
# all SIDs for VPNs start with a common locator. Routes and SRv6
- # Endpoint behavior instaces are grouped together in the 'localsid'
+ # Endpoint behavior instances are grouped together in the 'localsid'
# table.
ip -netns "${nsname}" -6 rule \
add to "${VPN_LOCATOR_SERVICE}::/16" \
@@ -1016,6 +1043,27 @@ host_vpn_tests()
check_and_log_hs_ipv4_connectivity 1 2
check_and_log_hs_ipv4_connectivity 2 1
+
+ # Setup the adjacencies in the SRv6 aware routers using IPv6 link-local
+ # addresses.
+ # - rt-3 SRv6 End.X adjacency with rt-4
+ # - rt-4 SRv6 End.X adjacency with rt-1
+ set_end_x_ll_nextcsid 3 4
+ set_end_x_ll_nextcsid 4 1
+
+ log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv6), link-local"
+
+ check_and_log_hs_ipv6_connectivity 1 2
+ check_and_log_hs_ipv6_connectivity 2 1
+
+ log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv4), link-local"
+
+ check_and_log_hs_ipv4_connectivity 1 2
+ check_and_log_hs_ipv4_connectivity 2 1
+
+ # Restore the previous adjacencies.
+ set_end_x_nextcsid 3 4
+ set_end_x_nextcsid 4 1
}
__nextcsid_end_x_behavior_test()
diff --git a/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh b/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh
index 3efce1718c5f..6a68c7eff1dc 100755
--- a/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh
+++ b/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh
@@ -395,7 +395,7 @@ setup_rt_local_sids()
dev "${VRF_DEVNAME}"
# all SIDs for VPNs start with a common locator. Routes and SRv6
- # Endpoint behavior instaces are grouped together in the 'localsid'
+ # Endpoint behavior instances are grouped together in the 'localsid'
# table.
ip -netns "${nsname}" -6 rule \
add to "${VPN_LOCATOR_SERVICE}::/16" \
diff --git a/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh b/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh
index cabc70538ffe..0979b5316fdf 100755
--- a/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh
+++ b/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh
@@ -343,7 +343,7 @@ setup_rt_local_sids()
encap seg6local action End dev "${DUMMY_DEVNAME}"
# all SIDs for VPNs start with a common locator. Routes and SRv6
- # Endpoint behaviors instaces are grouped together in the 'localsid'
+ # Endpoint behaviors instances are grouped together in the 'localsid'
# table.
ip -netns "${nsname}" -6 rule add \
to "${VPN_LOCATOR_SERVICE}::/16" \
diff --git a/tools/testing/selftests/net/tap.c b/tools/testing/selftests/net/tap.c
index 247c3b3ac1c9..9ec1c9b50e77 100644
--- a/tools/testing/selftests/net/tap.c
+++ b/tools/testing/selftests/net/tap.c
@@ -17,7 +17,7 @@
#include <linux/virtio_net.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
static const char param_dev_tap_name[] = "xmacvtap0";
static const char param_dev_dummy_name[] = "xdummy0";
diff --git a/tools/testing/selftests/net/tcp_ao/config b/tools/testing/selftests/net/tcp_ao/config
index 3605e38711cb..971cb6fa2d63 100644
--- a/tools/testing/selftests/net/tcp_ao/config
+++ b/tools/testing/selftests/net/tcp_ao/config
@@ -1,8 +1,8 @@
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_RMD160=y
CONFIG_CRYPTO_SHA1=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_NET_VRF=y
CONFIG_TCP_AO=y
diff --git a/tools/testing/selftests/net/tcp_ao/lib/setup.c b/tools/testing/selftests/net/tcp_ao/lib/setup.c
index a27cc03c9fbd..49aec2922a31 100644
--- a/tools/testing/selftests/net/tcp_ao/lib/setup.c
+++ b/tools/testing/selftests/net/tcp_ao/lib/setup.c
@@ -9,7 +9,7 @@
* Can't be included in the header: it defines static variables which
* will be unique to every object. Let's include it only once here.
*/
-#include "../../../kselftest.h"
+#include "kselftest.h"
/* Prevent overriding of one thread's output by another */
static pthread_mutex_t ksft_print_lock = PTHREAD_MUTEX_INITIALIZER;
diff --git a/tools/testing/selftests/net/tcp_ao/seq-ext.c b/tools/testing/selftests/net/tcp_ao/seq-ext.c
index f00245263b20..6478da6a71c3 100644
--- a/tools/testing/selftests/net/tcp_ao/seq-ext.c
+++ b/tools/testing/selftests/net/tcp_ao/seq-ext.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Check that after SEQ number wrap-around:
* 1. SEQ-extension has upper bytes set
- * 2. TCP conneciton is alive and no TCPAOBad segments
+ * 2. TCP connection is alive and no TCPAOBad segments
* In order to test (2), the test doesn't just adjust seq number for a queue
* on a connected socket, but migrates it to another sk+port number, so
* that there won't be any delayed packets that will fail to verify
diff --git a/tools/testing/selftests/net/tcp_fastopen_backup_key.c b/tools/testing/selftests/net/tcp_fastopen_backup_key.c
index c1cb0c75156a..4b3f9b5e50fe 100644
--- a/tools/testing/selftests/net/tcp_fastopen_backup_key.c
+++ b/tools/testing/selftests/net/tcp_fastopen_backup_key.c
@@ -26,7 +26,7 @@
#include <fcntl.h>
#include <time.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#ifndef TCP_FASTOPEN_KEY
#define TCP_FASTOPEN_KEY 33
diff --git a/tools/testing/selftests/net/tcp_port_share.c b/tools/testing/selftests/net/tcp_port_share.c
new file mode 100644
index 000000000000..6146b62610df
--- /dev/null
+++ b/tools/testing/selftests/net/tcp_port_share.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// Copyright (c) 2025 Cloudflare, Inc.
+
+/* Tests for TCP port sharing (bind bucket reuse). */
+
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdlib.h>
+
+#include "kselftest_harness.h"
+
+#define DST_PORT 30000
+#define SRC_PORT 40000
+
+struct sockaddr_inet {
+ union {
+ struct sockaddr_storage ss;
+ struct sockaddr_in6 v6;
+ struct sockaddr_in v4;
+ struct sockaddr sa;
+ };
+ socklen_t len;
+ char str[INET6_ADDRSTRLEN + __builtin_strlen("[]:65535") + 1];
+};
+
+const int one = 1;
+
+static int disconnect(int fd)
+{
+ return connect(fd, &(struct sockaddr){ AF_UNSPEC }, sizeof(struct sockaddr));
+}
+
+static int getsockname_port(int fd)
+{
+ struct sockaddr_inet addr = {};
+ int err;
+
+ addr.len = sizeof(addr);
+ err = getsockname(fd, &addr.sa, &addr.len);
+ if (err)
+ return -1;
+
+ switch (addr.sa.sa_family) {
+ case AF_INET:
+ return ntohs(addr.v4.sin_port);
+ case AF_INET6:
+ return ntohs(addr.v6.sin6_port);
+ default:
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+}
+
+static void make_inet_addr(int af, const char *ip, __u16 port,
+ struct sockaddr_inet *addr)
+{
+ const char *fmt = "";
+
+ memset(addr, 0, sizeof(*addr));
+
+ switch (af) {
+ case AF_INET:
+ addr->len = sizeof(addr->v4);
+ addr->v4.sin_family = af;
+ addr->v4.sin_port = htons(port);
+ inet_pton(af, ip, &addr->v4.sin_addr);
+ fmt = "%s:%hu";
+ break;
+ case AF_INET6:
+ addr->len = sizeof(addr->v6);
+ addr->v6.sin6_family = af;
+ addr->v6.sin6_port = htons(port);
+ inet_pton(af, ip, &addr->v6.sin6_addr);
+ fmt = "[%s]:%hu";
+ break;
+ }
+
+ snprintf(addr->str, sizeof(addr->str), fmt, ip, port);
+}
+
+FIXTURE(tcp_port_share) {};
+
+FIXTURE_VARIANT(tcp_port_share) {
+ int domain;
+ /* IP to listen on and connect to */
+ const char *dst_ip;
+ /* Primary IP to connect from */
+ const char *src1_ip;
+ /* Secondary IP to connect from */
+ const char *src2_ip;
+ /* IP to bind to in order to block the source port */
+ const char *bind_ip;
+};
+
+FIXTURE_VARIANT_ADD(tcp_port_share, ipv4) {
+ .domain = AF_INET,
+ .dst_ip = "127.0.0.1",
+ .src1_ip = "127.1.1.1",
+ .src2_ip = "127.2.2.2",
+ .bind_ip = "127.3.3.3",
+};
+
+FIXTURE_VARIANT_ADD(tcp_port_share, ipv6) {
+ .domain = AF_INET6,
+ .dst_ip = "::1",
+ .src1_ip = "2001:db8::1",
+ .src2_ip = "2001:db8::2",
+ .bind_ip = "2001:db8::3",
+};
+
+FIXTURE_SETUP(tcp_port_share)
+{
+ int sc;
+
+ ASSERT_EQ(unshare(CLONE_NEWNET), 0);
+ ASSERT_EQ(system("ip link set dev lo up"), 0);
+ ASSERT_EQ(system("ip addr add dev lo 2001:db8::1/32 nodad"), 0);
+ ASSERT_EQ(system("ip addr add dev lo 2001:db8::2/32 nodad"), 0);
+ ASSERT_EQ(system("ip addr add dev lo 2001:db8::3/32 nodad"), 0);
+
+ sc = open("/proc/sys/net/ipv4/ip_local_port_range", O_WRONLY);
+ ASSERT_GE(sc, 0);
+ ASSERT_GT(dprintf(sc, "%hu %hu\n", SRC_PORT, SRC_PORT), 0);
+ ASSERT_EQ(close(sc), 0);
+}
+
+FIXTURE_TEARDOWN(tcp_port_share) {}
+
+/* Verify that an ephemeral port becomes available again after the socket
+ * bound to it and blocking it from reuse is closed.
+ */
+TEST_F(tcp_port_share, can_reuse_port_after_bind_and_close)
+{
+ const typeof(variant) v = variant;
+ struct sockaddr_inet addr;
+ int c1, c2, ln, pb;
+
+ /* Listen on <dst_ip>:<DST_PORT> */
+ ln = socket(v->domain, SOCK_STREAM, 0);
+ ASSERT_GE(ln, 0) TH_LOG("socket(): %m");
+ ASSERT_EQ(setsockopt(ln, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->dst_ip, DST_PORT, &addr);
+ ASSERT_EQ(bind(ln, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+ ASSERT_EQ(listen(ln, 2), 0);
+
+ /* Connect from <src1_ip>:<SRC_PORT> */
+ c1 = socket(v->domain, SOCK_STREAM, 0);
+ ASSERT_GE(c1, 0) TH_LOG("socket(): %m");
+ ASSERT_EQ(setsockopt(c1, SOL_IP, IP_BIND_ADDRESS_NO_PORT, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->src1_ip, 0, &addr);
+ ASSERT_EQ(bind(c1, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+
+ make_inet_addr(v->domain, v->dst_ip, DST_PORT, &addr);
+ ASSERT_EQ(connect(c1, &addr.sa, addr.len), 0) TH_LOG("connect(%s): %m", addr.str);
+ ASSERT_EQ(getsockname_port(c1), SRC_PORT);
+
+ /* Bind to <bind_ip>:<SRC_PORT>. Block the port from reuse. */
+ pb = socket(v->domain, SOCK_STREAM, 0);
+ ASSERT_GE(pb, 0) TH_LOG("socket(): %m");
+ ASSERT_EQ(setsockopt(pb, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->bind_ip, SRC_PORT, &addr);
+ ASSERT_EQ(bind(pb, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+
+ /* Try to connect from <src2_ip>:<SRC_PORT>. Expect failure. */
+ c2 = socket(v->domain, SOCK_STREAM, 0);
+ ASSERT_GE(c2, 0) TH_LOG("socket");
+ ASSERT_EQ(setsockopt(c2, SOL_IP, IP_BIND_ADDRESS_NO_PORT, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->src2_ip, 0, &addr);
+ ASSERT_EQ(bind(c2, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+
+ make_inet_addr(v->domain, v->dst_ip, DST_PORT, &addr);
+ ASSERT_EQ(connect(c2, &addr.sa, addr.len), -1) TH_LOG("connect(%s)", addr.str);
+ ASSERT_EQ(errno, EADDRNOTAVAIL) TH_LOG("%m");
+
+ /* Unbind from <bind_ip>:<SRC_PORT>. Unblock the port for reuse. */
+ ASSERT_EQ(close(pb), 0);
+
+ /* Connect again from <src2_ip>:<SRC_PORT> */
+ EXPECT_EQ(connect(c2, &addr.sa, addr.len), 0) TH_LOG("connect(%s): %m", addr.str);
+ EXPECT_EQ(getsockname_port(c2), SRC_PORT);
+
+ ASSERT_EQ(close(c2), 0);
+ ASSERT_EQ(close(c1), 0);
+ ASSERT_EQ(close(ln), 0);
+}
+
+/* Verify that a socket auto-bound during connect() blocks port reuse after
+ * disconnect (connect(AF_UNSPEC)) followed by an explicit port bind().
+ */
+TEST_F(tcp_port_share, port_block_after_disconnect)
+{
+ const typeof(variant) v = variant;
+ struct sockaddr_inet addr;
+ int c1, c2, ln, pb;
+
+ /* Listen on <dst_ip>:<DST_PORT> */
+ ln = socket(v->domain, SOCK_STREAM, 0);
+ ASSERT_GE(ln, 0) TH_LOG("socket(): %m");
+ ASSERT_EQ(setsockopt(ln, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->dst_ip, DST_PORT, &addr);
+ ASSERT_EQ(bind(ln, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+ ASSERT_EQ(listen(ln, 2), 0);
+
+ /* Connect from <src1_ip>:<SRC_PORT> */
+ c1 = socket(v->domain, SOCK_STREAM, 0);
+ ASSERT_GE(c1, 0) TH_LOG("socket(): %m");
+ ASSERT_EQ(setsockopt(c1, SOL_IP, IP_BIND_ADDRESS_NO_PORT, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->src1_ip, 0, &addr);
+ ASSERT_EQ(bind(c1, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+
+ make_inet_addr(v->domain, v->dst_ip, DST_PORT, &addr);
+ ASSERT_EQ(connect(c1, &addr.sa, addr.len), 0) TH_LOG("connect(%s): %m", addr.str);
+ ASSERT_EQ(getsockname_port(c1), SRC_PORT);
+
+ /* Disconnect the socket and bind it to <bind_ip>:<SRC_PORT> to block the port */
+ ASSERT_EQ(disconnect(c1), 0) TH_LOG("disconnect: %m");
+ ASSERT_EQ(setsockopt(c1, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->bind_ip, SRC_PORT, &addr);
+ ASSERT_EQ(bind(c1, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+
+ /* Trigger port-addr bucket state update with another bind() and close() */
+ pb = socket(v->domain, SOCK_STREAM, 0);
+ ASSERT_GE(pb, 0) TH_LOG("socket(): %m");
+ ASSERT_EQ(setsockopt(pb, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->bind_ip, SRC_PORT, &addr);
+ ASSERT_EQ(bind(pb, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+
+ ASSERT_EQ(close(pb), 0);
+
+ /* Connect from <src2_ip>:<SRC_PORT>. Expect failure. */
+ c2 = socket(v->domain, SOCK_STREAM, 0);
+ ASSERT_GE(c2, 0) TH_LOG("socket: %m");
+ ASSERT_EQ(setsockopt(c2, SOL_IP, IP_BIND_ADDRESS_NO_PORT, &one, sizeof(one)), 0);
+
+ make_inet_addr(v->domain, v->src2_ip, 0, &addr);
+ ASSERT_EQ(bind(c2, &addr.sa, addr.len), 0) TH_LOG("bind(%s): %m", addr.str);
+
+ make_inet_addr(v->domain, v->dst_ip, DST_PORT, &addr);
+ EXPECT_EQ(connect(c2, &addr.sa, addr.len), -1) TH_LOG("connect(%s)", addr.str);
+ EXPECT_EQ(errno, EADDRNOTAVAIL) TH_LOG("%m");
+
+ ASSERT_EQ(close(c2), 0);
+ ASSERT_EQ(close(c1), 0);
+ ASSERT_EQ(close(ln), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/test_bridge_backup_port.sh b/tools/testing/selftests/net/test_bridge_backup_port.sh
index 1b3f89e2b86e..2a7224fe74f2 100755
--- a/tools/testing/selftests/net/test_bridge_backup_port.sh
+++ b/tools/testing/selftests/net/test_bridge_backup_port.sh
@@ -315,6 +315,29 @@ backup_port()
tc_check_packets $sw1 "dev vx0 egress" 101 1
log_test $? 0 "No forwarding out of vx0"
+ # Check that packets are forwarded out of vx0 when swp1 is
+ # administratively down and out of swp1 when it is administratively up
+ # again.
+ run_cmd "ip -n $sw1 link set dev swp1 down"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
+ log_test $? 0 "swp1 administratively down"
+
+ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+ tc_check_packets $sw1 "dev swp1 egress" 101 3
+ log_test $? 0 "No forwarding out of swp1"
+ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "Forwarding out of vx0"
+
+ run_cmd "ip -n $sw1 link set dev swp1 up"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
+ log_test $? 0 "swp1 administratively up"
+
+ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+ tc_check_packets $sw1 "dev swp1 egress" 101 4
+ log_test $? 0 "Forwarding out of swp1"
+ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "No forwarding out of vx0"
+
# Remove vx0 as the backup port of swp1 and check that packets are no
# longer forwarded out of vx0 when swp1 does not have a carrier.
run_cmd "bridge -n $sw1 link set dev swp1 nobackup_port"
@@ -322,9 +345,9 @@ backup_port()
log_test $? 1 "vx0 not configured as backup port of swp1"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
- tc_check_packets $sw1 "dev swp1 egress" 101 4
+ tc_check_packets $sw1 "dev swp1 egress" 101 5
log_test $? 0 "Forwarding out of swp1"
- tc_check_packets $sw1 "dev vx0 egress" 101 1
+ tc_check_packets $sw1 "dev vx0 egress" 101 2
log_test $? 0 "No forwarding out of vx0"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
@@ -332,9 +355,9 @@ backup_port()
log_test $? 0 "swp1 carrier off"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
- tc_check_packets $sw1 "dev swp1 egress" 101 4
+ tc_check_packets $sw1 "dev swp1 egress" 101 5
log_test $? 0 "No forwarding out of swp1"
- tc_check_packets $sw1 "dev vx0 egress" 101 1
+ tc_check_packets $sw1 "dev vx0 egress" 101 2
log_test $? 0 "No forwarding out of vx0"
}
diff --git a/tools/testing/selftests/net/test_neigh.sh b/tools/testing/selftests/net/test_neigh.sh
new file mode 100755
index 000000000000..7c594bf6ead0
--- /dev/null
+++ b/tools/testing/selftests/net/test_neigh.sh
@@ -0,0 +1,366 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source lib.sh
+TESTS="
+ extern_valid_ipv4
+ extern_valid_ipv6
+"
+VERBOSE=0
+
+################################################################################
+# Utilities
+
+run_cmd()
+{
+ local cmd="$1"
+ local out
+ local stderr="2>/dev/null"
+
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: $cmd"
+ stderr=
+ fi
+
+ out=$(eval "$cmd" "$stderr")
+ rc=$?
+ if [ "$VERBOSE" -eq 1 ] && [ -n "$out" ]; then
+ echo " $out"
+ fi
+
+ return $rc
+}
+
+################################################################################
+# Setup
+
+setup()
+{
+ set -e
+
+ setup_ns ns1 ns2
+
+ ip -n "$ns1" link add veth0 type veth peer name veth1 netns "$ns2"
+ ip -n "$ns1" link set dev veth0 up
+ ip -n "$ns2" link set dev veth1 up
+
+ ip -n "$ns1" address add 192.0.2.1/24 dev veth0
+ ip -n "$ns1" address add 2001:db8:1::1/64 dev veth0 nodad
+ ip -n "$ns2" address add 192.0.2.2/24 dev veth1
+ ip -n "$ns2" address add 2001:db8:1::2/64 dev veth1 nodad
+
+ ip netns exec "$ns1" sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
+ ip netns exec "$ns2" sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
+
+ sleep 5
+
+ set +e
+}
+
+exit_cleanup_all()
+{
+ cleanup_all_ns
+ exit "${EXIT_STATUS}"
+}
+
+################################################################################
+# Tests
+
+extern_valid_common()
+{
+ local af_str=$1; shift
+ local ip_addr=$1; shift
+ local tbl_name=$1; shift
+ local subnet=$1; shift
+ local mac
+
+ mac=$(ip -n "$ns2" -j link show dev veth1 | jq -r '.[]["address"]')
+
+ RET=0
+
+ # Check that simple addition works.
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"extern_valid\""
+ check_err $? "No \"extern_valid\" flag after addition"
+
+ log_test "$af_str \"extern_valid\" flag: Add entry"
+
+ RET=0
+
+ # Check that an entry cannot be added with "extern_valid" flag and an
+ # invalid state.
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "ip -n $ns1 neigh add $ip_addr nud none dev veth0 extern_valid"
+ check_fail $? "Managed to add an entry with \"extern_valid\" flag and an invalid state"
+
+ log_test "$af_str \"extern_valid\" flag: Add with an invalid state"
+
+ RET=0
+
+ # Check that entry cannot be added with both "extern_valid" flag and
+ # "use" / "managed" flag.
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid use"
+ check_fail $? "Managed to add an entry with \"extern_valid\" flag and \"use\" flag"
+
+ log_test "$af_str \"extern_valid\" flag: Add with \"use\" flag"
+
+ RET=0
+
+ # Check that "extern_valid" flag can be toggled using replace.
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0"
+ run_cmd "ip -n $ns1 neigh replace $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"extern_valid\""
+ check_err $? "Did not manage to set \"extern_valid\" flag with replace"
+ run_cmd "ip -n $ns1 neigh replace $ip_addr lladdr $mac nud stale dev veth0"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"extern_valid\""
+ check_fail $? "Did not manage to clear \"extern_valid\" flag with replace"
+
+ log_test "$af_str \"extern_valid\" flag: Replace entry"
+
+ RET=0
+
+ # Check that an existing "extern_valid" entry can be marked as
+ # "managed".
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 neigh replace $ip_addr lladdr $mac nud stale dev veth0 extern_valid managed"
+ check_err $? "Did not manage to add \"managed\" flag to an existing \"extern_valid\" entry"
+
+ log_test "$af_str \"extern_valid\" flag: Replace entry with \"managed\" flag"
+
+ RET=0
+
+ # Check that entry cannot be replaced with "extern_valid" flag and an
+ # invalid state.
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 neigh replace $ip_addr nud none dev veth0 extern_valid"
+ check_fail $? "Managed to replace an entry with \"extern_valid\" flag and an invalid state"
+
+ log_test "$af_str \"extern_valid\" flag: Replace with an invalid state"
+
+ RET=0
+
+ # Check that an "extern_valid" entry is flushed when the interface is
+ # put administratively down.
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 link set dev veth0 down"
+ run_cmd "ip -n $ns1 link set dev veth0 up"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0"
+ check_fail $? "\"extern_valid\" entry not flushed upon interface down"
+
+ log_test "$af_str \"extern_valid\" flag: Interface down"
+
+ RET=0
+
+ # Check that an "extern_valid" entry is not flushed when the interface
+ # loses its carrier.
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns2 link set dev veth1 down"
+ run_cmd "ip -n $ns2 link set dev veth1 up"
+ run_cmd "sleep 2"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0"
+ check_err $? "\"extern_valid\" entry flushed upon carrier down"
+
+ log_test "$af_str \"extern_valid\" flag: Carrier down"
+
+ RET=0
+
+ # Check that when entry transitions to "reachable" state it maintains
+ # the "extern_valid" flag. Wait "delay_probe" seconds for ARP request /
+ # NS to be sent.
+ local delay_probe
+
+ delay_probe=$(ip -n "$ns1" -j ntable show dev veth0 name "$tbl_name" | jq '.[]["delay_probe"]')
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 neigh replace $ip_addr lladdr $mac nud stale dev veth0 extern_valid use"
+ run_cmd "sleep $((delay_probe / 1000 + 2))"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"REACHABLE\""
+ check_err $? "Entry did not transition to \"reachable\" state"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"extern_valid\""
+ check_err $? "Entry did not maintain \"extern_valid\" flag after transition to \"reachable\" state"
+
+ log_test "$af_str \"extern_valid\" flag: Transition to \"reachable\" state"
+
+ RET=0
+
+ # Drop all packets, trigger resolution and check that entry goes back
+ # to "stale" state instead of "failed".
+ local mcast_reprobes
+ local retrans_time
+ local ucast_probes
+ local app_probes
+ local probes
+ local delay
+
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ run_cmd "tc -n $ns2 qdisc add dev veth1 clsact"
+ run_cmd "tc -n $ns2 filter add dev veth1 ingress proto all matchall action drop"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 neigh replace $ip_addr lladdr $mac nud stale dev veth0 extern_valid use"
+ retrans_time=$(ip -n "$ns1" -j ntable show dev veth0 name "$tbl_name" | jq '.[]["retrans"]')
+ ucast_probes=$(ip -n "$ns1" -j ntable show dev veth0 name "$tbl_name" | jq '.[]["ucast_probes"]')
+ app_probes=$(ip -n "$ns1" -j ntable show dev veth0 name "$tbl_name" | jq '.[]["app_probes"]')
+ mcast_reprobes=$(ip -n "$ns1" -j ntable show dev veth0 name "$tbl_name" | jq '.[]["mcast_reprobes"]')
+ delay=$((delay_probe + (ucast_probes + app_probes + mcast_reprobes) * retrans_time))
+ run_cmd "sleep $((delay / 1000 + 2))"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"STALE\""
+ check_err $? "Entry did not return to \"stale\" state"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"extern_valid\""
+ check_err $? "Entry did not maintain \"extern_valid\" flag after returning to \"stale\" state"
+ probes=$(ip -n "$ns1" -j -s neigh get "$ip_addr" dev veth0 | jq '.[]["probes"]')
+ if [[ $probes -eq 0 ]]; then
+ check_err 1 "No probes were sent"
+ fi
+
+ log_test "$af_str \"extern_valid\" flag: Transition back to \"stale\" state"
+
+ run_cmd "tc -n $ns2 qdisc del dev veth1 clsact"
+
+ RET=0
+
+ # Forced garbage collection runs whenever the number of entries is
+ # larger than "thresh3" and deletes stale entries that have not been
+ # updated in the last 5 seconds.
+ #
+ # Check that an "extern_valid" entry survives a forced garbage
+ # collection. Add an entry, wait 5 seconds and add more entries than
+ # "thresh3" so that forced garbage collection will run.
+ #
+ # Note that the garbage collection thresholds are global resources and
+ # that changes in the initial namespace affect all the namespaces.
+ local forced_gc_runs_t0
+ local forced_gc_runs_t1
+ local orig_thresh1
+ local orig_thresh2
+ local orig_thresh3
+
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ orig_thresh1=$(ip -j ntable show name "$tbl_name" | jq '.[] | select(has("thresh1")) | .["thresh1"]')
+ orig_thresh2=$(ip -j ntable show name "$tbl_name" | jq '.[] | select(has("thresh2")) | .["thresh2"]')
+ orig_thresh3=$(ip -j ntable show name "$tbl_name" | jq '.[] | select(has("thresh3")) | .["thresh3"]')
+ run_cmd "ip ntable change name $tbl_name thresh3 10 thresh2 9 thresh1 8"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 neigh add ${subnet}3 lladdr $mac nud stale dev veth0"
+ run_cmd "sleep 5"
+ forced_gc_runs_t0=$(ip -j -s ntable show name "$tbl_name" | jq '.[] | select(has("forced_gc_runs")) | .["forced_gc_runs"]')
+ for i in {1..20}; do
+ run_cmd "ip -n $ns1 neigh add ${subnet}$((i + 4)) nud none dev veth0"
+ done
+ forced_gc_runs_t1=$(ip -j -s ntable show name "$tbl_name" | jq '.[] | select(has("forced_gc_runs")) | .["forced_gc_runs"]')
+ if [[ $forced_gc_runs_t1 -eq $forced_gc_runs_t0 ]]; then
+ check_err 1 "Forced garbage collection did not run"
+ fi
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"extern_valid\""
+ check_err $? "Entry with \"extern_valid\" flag did not survive forced garbage collection"
+ run_cmd "ip -n $ns1 neigh get ${subnet}3 dev veth0"
+ check_fail $? "Entry without \"extern_valid\" flag survived forced garbage collection"
+
+ log_test "$af_str \"extern_valid\" flag: Forced garbage collection"
+
+ run_cmd "ip ntable change name $tbl_name thresh3 $orig_thresh3 thresh2 $orig_thresh2 thresh1 $orig_thresh1"
+
+ RET=0
+
+ # Periodic garbage collection runs every "base_reachable"/2 seconds and
+ # if the number of entries is larger than "thresh1", then it deletes
+ # stale entries that have not been used in the last "gc_stale" seconds.
+ #
+ # Check that an "extern_valid" entry survives a periodic garbage
+ # collection. Add an "extern_valid" entry, add more than "thresh1"
+ # regular entries, wait "base_reachable" (longer than "gc_stale")
+ # seconds and check that the "extern_valid" entry was not deleted.
+ #
+ # Note that the garbage collection thresholds and "base_reachable" are
+ # global resources and that changes in the initial namespace affect all
+ # the namespaces.
+ local periodic_gc_runs_t0
+ local periodic_gc_runs_t1
+ local orig_base_reachable
+ local orig_gc_stale
+
+ run_cmd "ip -n $ns1 neigh flush dev veth0"
+ orig_thresh1=$(ip -j ntable show name "$tbl_name" | jq '.[] | select(has("thresh1")) | .["thresh1"]')
+ orig_base_reachable=$(ip -j ntable show name "$tbl_name" | jq '.[] | select(has("thresh1")) | .["base_reachable"]')
+ run_cmd "ip ntable change name $tbl_name thresh1 10 base_reachable 10000"
+ orig_gc_stale=$(ip -n "$ns1" -j ntable show name "$tbl_name" dev veth0 | jq '.[]["gc_stale"]')
+ run_cmd "ip -n $ns1 ntable change name $tbl_name dev veth0 gc_stale 1000"
+ run_cmd "ip -n $ns1 neigh add $ip_addr lladdr $mac nud stale dev veth0 extern_valid"
+ run_cmd "ip -n $ns1 neigh add ${subnet}3 lladdr $mac nud stale dev veth0"
+ # Wait orig_base_reachable/2 for the new interval to take effect.
+ run_cmd "sleep $(((orig_base_reachable / 1000) / 2 + 2))"
+ for i in {1..20}; do
+ run_cmd "ip -n $ns1 neigh add ${subnet}$((i + 4)) nud none dev veth0"
+ done
+ periodic_gc_runs_t0=$(ip -j -s ntable show name "$tbl_name" | jq '.[] | select(has("periodic_gc_runs")) | .["periodic_gc_runs"]')
+ run_cmd "sleep 10"
+ periodic_gc_runs_t1=$(ip -j -s ntable show name "$tbl_name" | jq '.[] | select(has("periodic_gc_runs")) | .["periodic_gc_runs"]')
+ [[ $periodic_gc_runs_t1 -ne $periodic_gc_runs_t0 ]]
+ check_err $? "Periodic garbage collection did not run"
+ run_cmd "ip -n $ns1 neigh get $ip_addr dev veth0 | grep \"extern_valid\""
+ check_err $? "Entry with \"extern_valid\" flag did not survive periodic garbage collection"
+ run_cmd "ip -n $ns1 neigh get ${subnet}3 dev veth0"
+ check_fail $? "Entry without \"extern_valid\" flag survived periodic garbage collection"
+
+ log_test "$af_str \"extern_valid\" flag: Periodic garbage collection"
+
+ run_cmd "ip -n $ns1 ntable change name $tbl_name dev veth0 gc_stale $orig_gc_stale"
+ run_cmd "ip ntable change name $tbl_name thresh1 $orig_thresh1 base_reachable $orig_base_reachable"
+}
+
+extern_valid_ipv4()
+{
+ extern_valid_common "IPv4" 192.0.2.2 "arp_cache" 192.0.2.
+}
+
+extern_valid_ipv6()
+{
+ extern_valid_common "IPv6" 2001:db8:1::2 "ndisc_cache" 2001:db8:1::
+}
+
+################################################################################
+# Usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+
+ -t <test> Test(s) to run (default: all)
+ (options: $TESTS)
+ -p Pause on fail
+ -v Verbose mode (show commands and output)
+EOF
+}
+
+################################################################################
+# Main
+
+while getopts ":t:pvh" opt; do
+ case $opt in
+ t) TESTS=$OPTARG;;
+ p) PAUSE_ON_FAIL=yes;;
+ v) VERBOSE=$((VERBOSE + 1));;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
+require_command jq
+
+if ! ip neigh help 2>&1 | grep -q "extern_valid"; then
+ echo "SKIP: iproute2 ip too old, missing \"extern_valid\" support"
+ exit "$ksft_skip"
+fi
+
+trap exit_cleanup_all EXIT
+
+for t in $TESTS
+do
+ setup; $t; cleanup_all_ns;
+done
diff --git a/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
index 062f957950af..8b414d0edada 100755
--- a/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
+++ b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
@@ -21,7 +21,7 @@ test_set_remote()
{
RET=0
- ip_link_add vx up type vxlan id 2000 dstport 4789
+ adf_ip_link_add vx up type vxlan id 2000 dstport 4789
bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.20 self permanent
bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.30 self permanent
check_remotes "fdb append"
@@ -74,12 +74,12 @@ test_change_mc_remote()
{
check_command netstat || return
- ip_link_add v1 up type veth peer name v2
- ip_link_set_up v2
+ adf_ip_link_add v1 up type veth peer name v2
+ adf_ip_link_set_up v2
RET=0
- ip_link_add vx up type vxlan dstport 4789 \
+ adf_ip_link_add vx up type vxlan dstport 4789 \
local 192.0.2.1 $(fmt_remote 224.1.1.1) dev v1 vni 1000
check_membership "group=224.1.1.1 fail=0" \
diff --git a/tools/testing/selftests/net/test_vxlan_nh.sh b/tools/testing/selftests/net/test_vxlan_nh.sh
new file mode 100755
index 000000000000..20f3369f776b
--- /dev/null
+++ b/tools/testing/selftests/net/test_vxlan_nh.sh
@@ -0,0 +1,223 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source lib.sh
+TESTS="
+ basic_tx_ipv4
+ basic_tx_ipv6
+ learning
+ proxy_ipv4
+ proxy_ipv6
+"
+VERBOSE=0
+
+################################################################################
+# Utilities
+
+run_cmd()
+{
+ local cmd="$1"
+ local out
+ local stderr="2>/dev/null"
+
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: $cmd"
+ stderr=
+ fi
+
+ out=$(eval "$cmd" "$stderr")
+ rc=$?
+ if [ "$VERBOSE" -eq 1 ] && [ -n "$out" ]; then
+ echo " $out"
+ fi
+
+ return $rc
+}
+
+################################################################################
+# Cleanup
+
+exit_cleanup_all()
+{
+ cleanup_all_ns
+ exit "${EXIT_STATUS}"
+}
+
+################################################################################
+# Tests
+
+nh_stats_get()
+{
+ ip -n "$ns1" -s -j nexthop show id 10 | jq ".[][\"group_stats\"][][\"packets\"]"
+}
+
+tc_stats_get()
+{
+ tc_rule_handle_stats_get "dev dummy1 egress" 101 ".packets" "-n $ns1"
+}
+
+basic_tx_common()
+{
+ local af_str=$1; shift
+ local proto=$1; shift
+ local local_addr=$1; shift
+ local plen=$1; shift
+ local remote_addr=$1; shift
+
+ RET=0
+
+ # Test basic Tx functionality. Check that stats are incremented on
+ # both the FDB nexthop group and the egress device.
+
+ run_cmd "ip -n $ns1 link add name dummy1 up type dummy"
+ run_cmd "ip -n $ns1 route add $remote_addr/$plen dev dummy1"
+ run_cmd "tc -n $ns1 qdisc add dev dummy1 clsact"
+ run_cmd "tc -n $ns1 filter add dev dummy1 egress proto $proto pref 1 handle 101 flower ip_proto udp dst_ip $remote_addr dst_port 4789 action pass"
+
+ run_cmd "ip -n $ns1 address add $local_addr/$plen dev lo"
+
+ run_cmd "ip -n $ns1 nexthop add id 1 via $remote_addr fdb"
+ run_cmd "ip -n $ns1 nexthop add id 10 group 1 fdb"
+
+ run_cmd "ip -n $ns1 link add name vx0 up type vxlan id 10010 local $local_addr dstport 4789"
+ run_cmd "bridge -n $ns1 fdb add 00:11:22:33:44:55 dev vx0 self static nhid 10"
+
+ run_cmd "ip netns exec $ns1 mausezahn vx0 -a own -b 00:11:22:33:44:55 -c 1 -q"
+
+ busywait "$BUSYWAIT_TIMEOUT" until_counter_is "== 1" nh_stats_get > /dev/null
+ check_err $? "FDB nexthop group stats did not increase"
+
+ busywait "$BUSYWAIT_TIMEOUT" until_counter_is "== 1" tc_stats_get > /dev/null
+ check_err $? "tc filter stats did not increase"
+
+ log_test "VXLAN FDB nexthop: $af_str basic Tx"
+}
+
+basic_tx_ipv4()
+{
+ basic_tx_common "IPv4" ipv4 192.0.2.1 32 192.0.2.2
+}
+
+basic_tx_ipv6()
+{
+ basic_tx_common "IPv6" ipv6 2001:db8:1::1 128 2001:db8:1::2
+}
+
+learning()
+{
+ RET=0
+
+ # When learning is enabled on the VXLAN device, an incoming packet
+ # might try to refresh an FDB entry that points to an FDB nexthop group
+ # instead of an ordinary remote destination. Check that the kernel does
+ # not crash in this situation.
+
+ run_cmd "ip -n $ns1 address add 192.0.2.1/32 dev lo"
+ run_cmd "ip -n $ns1 address add 192.0.2.2/32 dev lo"
+
+ run_cmd "ip -n $ns1 nexthop add id 1 via 192.0.2.3 fdb"
+ run_cmd "ip -n $ns1 nexthop add id 10 group 1 fdb"
+
+ run_cmd "ip -n $ns1 link add name vx0 up type vxlan id 10010 local 192.0.2.1 dstport 12345 localbypass"
+ run_cmd "ip -n $ns1 link add name vx1 up type vxlan id 10020 local 192.0.2.2 dstport 54321 learning"
+
+ run_cmd "bridge -n $ns1 fdb add 00:11:22:33:44:55 dev vx0 self static dst 192.0.2.2 port 54321 vni 10020"
+ run_cmd "bridge -n $ns1 fdb add 00:aa:bb:cc:dd:ee dev vx1 self static nhid 10"
+
+ run_cmd "ip netns exec $ns1 mausezahn vx0 -a 00:aa:bb:cc:dd:ee -b 00:11:22:33:44:55 -c 1 -q"
+
+ log_test "VXLAN FDB nexthop: learning"
+}
+
+proxy_common()
+{
+ local af_str=$1; shift
+ local local_addr=$1; shift
+ local plen=$1; shift
+ local remote_addr=$1; shift
+ local neigh_addr=$1; shift
+ local ping_cmd=$1; shift
+
+ RET=0
+
+ # When the "proxy" option is enabled on the VXLAN device, the device
+ # will suppress ARP requests and IPv6 Neighbor Solicitation messages if
+ # it is able to reply on behalf of the remote host. That is, if a
+ # matching and valid neighbor entry is configured on the VXLAN device
+ # whose MAC address is not behind the "any" remote (0.0.0.0 / ::). The
+ # FDB entry for the neighbor's MAC address might point to an FDB
+ # nexthop group instead of an ordinary remote destination. Check that
+ # the kernel does not crash in this situation.
+
+ run_cmd "ip -n $ns1 address add $local_addr/$plen dev lo"
+
+ run_cmd "ip -n $ns1 nexthop add id 1 via $remote_addr fdb"
+ run_cmd "ip -n $ns1 nexthop add id 10 group 1 fdb"
+
+ run_cmd "ip -n $ns1 link add name vx0 up type vxlan id 10010 local $local_addr dstport 4789 proxy"
+
+ run_cmd "ip -n $ns1 neigh add $neigh_addr lladdr 00:11:22:33:44:55 nud perm dev vx0"
+
+ run_cmd "bridge -n $ns1 fdb add 00:11:22:33:44:55 dev vx0 self static nhid 10"
+
+ run_cmd "ip netns exec $ns1 $ping_cmd"
+
+ log_test "VXLAN FDB nexthop: $af_str proxy"
+}
+
+proxy_ipv4()
+{
+ proxy_common "IPv4" 192.0.2.1 32 192.0.2.2 192.0.2.3 \
+ "arping -b -c 1 -s 192.0.2.1 -I vx0 192.0.2.3"
+}
+
+proxy_ipv6()
+{
+ proxy_common "IPv6" 2001:db8:1::1 128 2001:db8:1::2 2001:db8:1::3 \
+ "ndisc6 -r 1 -s 2001:db8:1::1 -w 1 2001:db8:1::3 vx0"
+}
+
+################################################################################
+# Usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+
+ -t <test> Test(s) to run (default: all)
+ (options: $TESTS)
+ -p Pause on fail
+ -v Verbose mode (show commands and output)
+EOF
+}
+
+################################################################################
+# Main
+
+while getopts ":t:pvh" opt; do
+ case $opt in
+ t) TESTS=$OPTARG;;
+ p) PAUSE_ON_FAIL=yes;;
+ v) VERBOSE=$((VERBOSE + 1));;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
+require_command mausezahn
+require_command arping
+require_command ndisc6
+require_command jq
+
+if ! ip nexthop help 2>&1 | grep -q "stats"; then
+ echo "SKIP: iproute2 ip too old, missing nexthop stats support"
+ exit "$ksft_skip"
+fi
+
+trap exit_cleanup_all EXIT
+
+for t in $TESTS
+do
+ setup_ns ns1; $t; cleanup_all_ns;
+done
diff --git a/tools/testing/selftests/net/test_vxlan_vnifiltering.sh b/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
index 6127a78ee988..8deacc565afa 100755
--- a/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
+++ b/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
@@ -146,18 +146,17 @@ run_cmd()
}
check_hv_connectivity() {
- ip netns exec $hv_1 ping -c 1 -W 1 $1 &>/dev/null
- sleep 1
- ip netns exec $hv_1 ping -c 1 -W 1 $2 &>/dev/null
+ slowwait 5 ip netns exec $hv_1 ping -c 1 -W 1 $1 &>/dev/null
+ slowwait 5 ip netns exec $hv_1 ping -c 1 -W 1 $2 &>/dev/null
return $?
}
check_vm_connectivity() {
- run_cmd "ip netns exec $vm_11 ping -c 1 -W 1 10.0.10.12"
+ slowwait 5 run_cmd "ip netns exec $vm_11 ping -c 1 -W 1 10.0.10.12"
log_test $? 0 "VM connectivity over $1 (ipv4 default rdst)"
- run_cmd "ip netns exec $vm_21 ping -c 1 -W 1 10.0.10.22"
+ slowwait 5 run_cmd "ip netns exec $vm_21 ping -c 1 -W 1 10.0.10.22"
log_test $? 0 "VM connectivity over $1 (ipv6 default rdst)"
}
diff --git a/tools/testing/selftests/net/tfo.c b/tools/testing/selftests/net/tfo.c
new file mode 100644
index 000000000000..eb3cac5e583c
--- /dev/null
+++ b/tools/testing/selftests/net/tfo.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <error.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <netinet/tcp.h>
+#include <errno.h>
+
+static int cfg_server;
+static int cfg_client;
+static int cfg_port = 8000;
+static struct sockaddr_in6 cfg_addr;
+static char *cfg_outfile;
+
+static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
+{
+ int ret;
+
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = htons(port);
+
+ ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr);
+ if (ret != 1) {
+ /* fallback to plain IPv4 */
+ ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]);
+ if (ret != 1)
+ return -1;
+
+ /* add ::ffff prefix */
+ sin6->sin6_addr.s6_addr32[0] = 0;
+ sin6->sin6_addr.s6_addr32[1] = 0;
+ sin6->sin6_addr.s6_addr16[4] = 0;
+ sin6->sin6_addr.s6_addr16[5] = 0xffff;
+ }
+
+ return 0;
+}
+
+static void run_server(void)
+{
+ unsigned long qlen = 32;
+ int fd, opt, connfd;
+ socklen_t len;
+ char buf[64];
+ FILE *outfile;
+
+ outfile = fopen(cfg_outfile, "w");
+ if (!outfile)
+ error(1, errno, "fopen() outfile");
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (fd == -1)
+ error(1, errno, "socket()");
+
+ opt = 1;
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0)
+ error(1, errno, "setsockopt(SO_REUSEADDR)");
+
+ if (setsockopt(fd, SOL_TCP, TCP_FASTOPEN, &qlen, sizeof(qlen)) < 0)
+ error(1, errno, "setsockopt(TCP_FASTOPEN)");
+
+ if (bind(fd, (struct sockaddr *)&cfg_addr, sizeof(cfg_addr)) < 0)
+ error(1, errno, "bind()");
+
+ if (listen(fd, 5) < 0)
+ error(1, errno, "listen()");
+
+ len = sizeof(cfg_addr);
+ connfd = accept(fd, (struct sockaddr *)&cfg_addr, &len);
+ if (connfd < 0)
+ error(1, errno, "accept()");
+
+ len = sizeof(opt);
+ if (getsockopt(connfd, SOL_SOCKET, SO_INCOMING_NAPI_ID, &opt, &len) < 0)
+ error(1, errno, "getsockopt(SO_INCOMING_NAPI_ID)");
+
+ read(connfd, buf, 64);
+ fprintf(outfile, "%d\n", opt);
+
+ fclose(outfile);
+ close(connfd);
+ close(fd);
+}
+
+static void run_client(void)
+{
+ int fd;
+ char *msg = "Hello, world!";
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (fd == -1)
+ error(1, errno, "socket()");
+
+ sendto(fd, msg, strlen(msg), MSG_FASTOPEN, (struct sockaddr *)&cfg_addr, sizeof(cfg_addr));
+
+ close(fd);
+}
+
+static void usage(const char *filepath)
+{
+ error(1, 0, "Usage: %s (-s|-c) -h<server_ip> -p<port> -o<outfile> ", filepath);
+}
+
+static void parse_opts(int argc, char **argv)
+{
+ struct sockaddr_in6 *addr6 = (void *) &cfg_addr;
+ char *addr = NULL;
+ int ret;
+ int c;
+
+ if (argc <= 1)
+ usage(argv[0]);
+
+ while ((c = getopt(argc, argv, "sch:p:o:")) != -1) {
+ switch (c) {
+ case 's':
+ if (cfg_client)
+ error(1, 0, "Pass one of -s or -c");
+ cfg_server = 1;
+ break;
+ case 'c':
+ if (cfg_server)
+ error(1, 0, "Pass one of -s or -c");
+ cfg_client = 1;
+ break;
+ case 'h':
+ addr = optarg;
+ break;
+ case 'p':
+ cfg_port = strtoul(optarg, NULL, 0);
+ break;
+ case 'o':
+ cfg_outfile = strdup(optarg);
+ if (!cfg_outfile)
+ error(1, 0, "outfile invalid");
+ break;
+ }
+ }
+
+ if (cfg_server && addr)
+ error(1, 0, "Server cannot have -h specified");
+
+ memset(addr6, 0, sizeof(*addr6));
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = htons(cfg_port);
+ addr6->sin6_addr = in6addr_any;
+ if (addr) {
+ ret = parse_address(addr, cfg_port, addr6);
+ if (ret)
+ error(1, 0, "Client address parse error: %s", addr);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ parse_opts(argc, argv);
+
+ if (cfg_server)
+ run_server();
+ else if (cfg_client)
+ run_client();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/net/tfo_passive.sh b/tools/testing/selftests/net/tfo_passive.sh
new file mode 100755
index 000000000000..a4550511830a
--- /dev/null
+++ b/tools/testing/selftests/net/tfo_passive.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+source lib.sh
+
+NSIM_SV_ID=$((256 + RANDOM % 256))
+NSIM_SV_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_SV_ID
+NSIM_CL_ID=$((512 + RANDOM % 256))
+NSIM_CL_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_CL_ID
+
+NSIM_DEV_SYS_NEW=/sys/bus/netdevsim/new_device
+NSIM_DEV_SYS_DEL=/sys/bus/netdevsim/del_device
+NSIM_DEV_SYS_LINK=/sys/bus/netdevsim/link_device
+NSIM_DEV_SYS_UNLINK=/sys/bus/netdevsim/unlink_device
+
+SERVER_IP=192.168.1.1
+CLIENT_IP=192.168.1.2
+SERVER_PORT=48675
+
+setup_ns()
+{
+ set -e
+ ip netns add nssv
+ ip netns add nscl
+
+ NSIM_SV_NAME=$(find $NSIM_SV_SYS/net -maxdepth 1 -type d ! \
+ -path $NSIM_SV_SYS/net -exec basename {} \;)
+ NSIM_CL_NAME=$(find $NSIM_CL_SYS/net -maxdepth 1 -type d ! \
+ -path $NSIM_CL_SYS/net -exec basename {} \;)
+
+ ip link set $NSIM_SV_NAME netns nssv
+ ip link set $NSIM_CL_NAME netns nscl
+
+ ip netns exec nssv ip addr add "${SERVER_IP}/24" dev $NSIM_SV_NAME
+ ip netns exec nscl ip addr add "${CLIENT_IP}/24" dev $NSIM_CL_NAME
+
+ ip netns exec nssv ip link set dev $NSIM_SV_NAME up
+ ip netns exec nscl ip link set dev $NSIM_CL_NAME up
+
+ # Enable passive TFO
+ ip netns exec nssv sysctl -w net.ipv4.tcp_fastopen=519 > /dev/null
+
+ set +e
+}
+
+cleanup_ns()
+{
+ ip netns del nscl
+ ip netns del nssv
+}
+
+###
+### Code start
+###
+
+modprobe netdevsim
+
+# linking
+
+echo $NSIM_SV_ID > $NSIM_DEV_SYS_NEW
+echo $NSIM_CL_ID > $NSIM_DEV_SYS_NEW
+udevadm settle
+
+setup_ns
+
+NSIM_SV_FD=$((256 + RANDOM % 256))
+exec {NSIM_SV_FD}</var/run/netns/nssv
+NSIM_SV_IFIDX=$(ip netns exec nssv cat /sys/class/net/$NSIM_SV_NAME/ifindex)
+
+NSIM_CL_FD=$((256 + RANDOM % 256))
+exec {NSIM_CL_FD}</var/run/netns/nscl
+NSIM_CL_IFIDX=$(ip netns exec nscl cat /sys/class/net/$NSIM_CL_NAME/ifindex)
+
+echo "$NSIM_SV_FD:$NSIM_SV_IFIDX $NSIM_CL_FD:$NSIM_CL_IFIDX" > \
+ $NSIM_DEV_SYS_LINK
+
+if [ $? -ne 0 ]; then
+ echo "linking netdevsim1 with netdevsim2 should succeed"
+ cleanup_ns
+ exit 1
+fi
+
+out_file=$(mktemp)
+
+timeout -k 1s 30s ip netns exec nssv ./tfo \
+ -s \
+ -p ${SERVER_PORT} \
+ -o ${out_file}&
+
+wait_local_port_listen nssv ${SERVER_PORT} tcp
+
+ip netns exec nscl ./tfo -c -h ${SERVER_IP} -p ${SERVER_PORT}
+
+wait
+
+res=$(cat $out_file)
+rm $out_file
+
+if [ "$res" = "0" ]; then
+ echo "got invalid NAPI ID from passive TFO socket"
+ cleanup_ns
+ exit 1
+fi
+
+echo "$NSIM_SV_FD:$NSIM_SV_IFIDX" > $NSIM_DEV_SYS_UNLINK
+
+echo $NSIM_CL_ID > $NSIM_DEV_SYS_DEL
+
+cleanup_ns
+
+modprobe -r netdevsim
+
+exit 0
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 5ded3b3a7538..a3ef4b57eb5f 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -21,7 +21,7 @@
#include <sys/socket.h>
#include <sys/stat.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define TLS_PAYLOAD_MAX_LEN 16384
#define SOL_TLS 282
@@ -181,13 +181,12 @@ static int tls_send_cmsg(int fd, unsigned char record_type,
return sendmsg(fd, &msg, flags);
}
-static int tls_recv_cmsg(struct __test_metadata *_metadata,
- int fd, unsigned char record_type,
- void *data, size_t len, int flags)
+static int __tls_recv_cmsg(struct __test_metadata *_metadata,
+ int fd, unsigned char *ctype,
+ void *data, size_t len, int flags)
{
char cbuf[CMSG_SPACE(sizeof(char))];
struct cmsghdr *cmsg;
- unsigned char ctype;
struct msghdr msg;
struct iovec vec;
int n;
@@ -206,7 +205,20 @@ static int tls_recv_cmsg(struct __test_metadata *_metadata,
EXPECT_NE(cmsg, NULL);
EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
- ctype = *((unsigned char *)CMSG_DATA(cmsg));
+ if (ctype)
+ *ctype = *((unsigned char *)CMSG_DATA(cmsg));
+
+ return n;
+}
+
+static int tls_recv_cmsg(struct __test_metadata *_metadata,
+ int fd, unsigned char record_type,
+ void *data, size_t len, int flags)
+{
+ unsigned char ctype;
+ int n;
+
+ n = __tls_recv_cmsg(_metadata, fd, &ctype, data, len, flags);
EXPECT_EQ(ctype, record_type);
return n;
@@ -427,6 +439,8 @@ TEST_F(tls, sendfile)
EXPECT_GE(filefd, 0);
fstat(filefd, &st);
EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
+
+ close(filefd);
}
TEST_F(tls, send_then_sendfile)
@@ -448,6 +462,9 @@ TEST_F(tls, send_then_sendfile)
EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
EXPECT_EQ(recv(self->cfd, buf, st.st_size, MSG_WAITALL), st.st_size);
+
+ free(buf);
+ close(filefd);
}
static void chunked_sendfile(struct __test_metadata *_metadata,
@@ -547,6 +564,40 @@ TEST_F(tls, msg_more)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+TEST_F(tls, cmsg_msg_more)
+{
+ char *test_str = "test_read";
+ char record_type = 100;
+ int send_len = 10;
+
+ /* we don't allow MSG_MORE with non-DATA records */
+ EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len,
+ MSG_MORE), -1);
+ EXPECT_EQ(errno, EINVAL);
+}
+
+TEST_F(tls, msg_more_then_cmsg)
+{
+ char *test_str = "test_read";
+ char record_type = 100;
+ int send_len = 10;
+ char buf[10 * 2];
+ int ret;
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
+
+ ret = tls_send_cmsg(self->fd, record_type, test_str, send_len, 0);
+ EXPECT_EQ(ret, send_len);
+
+ /* initial DATA record didn't get merged with the non-DATA record */
+ EXPECT_EQ(recv(self->cfd, buf, send_len * 2, 0), send_len);
+
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL),
+ send_len);
+}
+
TEST_F(tls, msg_more_unsent)
{
char const *test_str = "test_read";
@@ -895,6 +946,37 @@ TEST_F(tls, peek_and_splice)
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
+#define MAX_FRAGS 48
+TEST_F(tls, splice_short)
+{
+ struct iovec sendchar_iov;
+ char read_buf[0x10000];
+ char sendbuf[0x100];
+ char sendchar = 'S';
+ int pipefds[2];
+ int i;
+
+ sendchar_iov.iov_base = &sendchar;
+ sendchar_iov.iov_len = 1;
+
+ memset(sendbuf, 's', sizeof(sendbuf));
+
+ ASSERT_GE(pipe2(pipefds, O_NONBLOCK), 0);
+ ASSERT_GE(fcntl(pipefds[0], F_SETPIPE_SZ, (MAX_FRAGS + 1) * 0x1000), 0);
+
+ for (i = 0; i < MAX_FRAGS; i++)
+ ASSERT_GE(vmsplice(pipefds[1], &sendchar_iov, 1, 0), 0);
+
+ ASSERT_EQ(write(pipefds[1], sendbuf, sizeof(sendbuf)), sizeof(sendbuf));
+
+ EXPECT_EQ(splice(pipefds[0], NULL, self->fd, NULL, MAX_FRAGS + 0x1000, 0),
+ MAX_FRAGS + sizeof(sendbuf));
+ EXPECT_EQ(recv(self->cfd, read_buf, sizeof(read_buf), 0), MAX_FRAGS + sizeof(sendbuf));
+ EXPECT_EQ(recv(self->cfd, read_buf, sizeof(read_buf), MSG_DONTWAIT), -1);
+ EXPECT_EQ(errno, EAGAIN);
+}
+#undef MAX_FRAGS
+
TEST_F(tls, recvmsg_single)
{
char const *test_str = "test_recvmsg_single";
@@ -2164,6 +2246,284 @@ TEST_F(tls, rekey_poll_delay)
}
}
+struct raw_rec {
+ unsigned int plain_len;
+ unsigned char plain_data[100];
+ unsigned int cipher_len;
+ unsigned char cipher_data[128];
+};
+
+/* TLS 1.2, AES_CCM, data, seqno:0, plaintext: 'Hello world' */
+static const struct raw_rec id0_data_l11 = {
+ .plain_len = 11,
+ .plain_data = {
+ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
+ 0x72, 0x6c, 0x64,
+ },
+ .cipher_len = 40,
+ .cipher_data = {
+ 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xa2, 0x33,
+ 0xde, 0x8d, 0x94, 0xf0, 0x29, 0x6c, 0xb1, 0xaf,
+ 0x6a, 0x75, 0xb2, 0x93, 0xad, 0x45, 0xd5, 0xfd,
+ 0x03, 0x51, 0x57, 0x8f, 0xf9, 0xcc, 0x3b, 0x42,
+ },
+};
+
+/* TLS 1.2, AES_CCM, ctrl, seqno:0, plaintext: '' */
+static const struct raw_rec id0_ctrl_l0 = {
+ .plain_len = 0,
+ .plain_data = {
+ },
+ .cipher_len = 29,
+ .cipher_data = {
+ 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x38, 0x7b,
+ 0xa6, 0x1c, 0xdd, 0xa7, 0x19, 0x33, 0xab, 0xae,
+ 0x88, 0xe1, 0xd2, 0x08, 0x4f,
+ },
+};
+
+/* TLS 1.2, AES_CCM, data, seqno:0, plaintext: '' */
+static const struct raw_rec id0_data_l0 = {
+ .plain_len = 0,
+ .plain_data = {
+ },
+ .cipher_len = 29,
+ .cipher_data = {
+ 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0x37, 0x90,
+ 0x70, 0x45, 0x89, 0xfb, 0x5c, 0xc7, 0x89, 0x03,
+ 0x68, 0x80, 0xd3, 0xd8, 0xcc,
+ },
+};
+
+/* TLS 1.2, AES_CCM, data, seqno:1, plaintext: 'Hello world' */
+static const struct raw_rec id1_data_l11 = {
+ .plain_len = 11,
+ .plain_data = {
+ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
+ 0x72, 0x6c, 0x64,
+ },
+ .cipher_len = 40,
+ .cipher_data = {
+ 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x1a, 0x9c,
+ 0xd0, 0xa8, 0x9a, 0xd6, 0x69, 0xd6, 0x1a, 0xe3,
+ 0xb5, 0x1f, 0x0d, 0x2c, 0xe2, 0x97, 0x46, 0xff,
+ 0x2b, 0xcc, 0x5a, 0xc4, 0xa3, 0xb9, 0xef, 0xba,
+ },
+};
+
+/* TLS 1.2, AES_CCM, ctrl, seqno:1, plaintext: '' */
+static const struct raw_rec id1_ctrl_l0 = {
+ .plain_len = 0,
+ .plain_data = {
+ },
+ .cipher_len = 29,
+ .cipher_data = {
+ 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0xf0, 0xfe,
+ 0xee, 0xd9, 0xe2, 0x5d, 0xc7, 0x11, 0x4c, 0xe6,
+ 0xb4, 0x7e, 0xef, 0x40, 0x2b,
+ },
+};
+
+/* TLS 1.2, AES_CCM, data, seqno:1, plaintext: '' */
+static const struct raw_rec id1_data_l0 = {
+ .plain_len = 0,
+ .plain_data = {
+ },
+ .cipher_len = 29,
+ .cipher_data = {
+ 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0xfc, 0x86,
+ 0xc8, 0xf0, 0x55, 0xf9, 0x47, 0x3f, 0x74, 0xdc,
+ 0xc9, 0xbf, 0xfe, 0x5b, 0xb1,
+ },
+};
+
+/* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: 'Hello world' */
+static const struct raw_rec id2_ctrl_l11 = {
+ .plain_len = 11,
+ .plain_data = {
+ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
+ 0x72, 0x6c, 0x64,
+ },
+ .cipher_len = 40,
+ .cipher_data = {
+ 0x16, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19,
+ 0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87,
+ 0x2a, 0x04, 0x11, 0x3d, 0xf8, 0x64, 0x5f, 0x36,
+ 0x8b, 0xa8, 0xee, 0x4c, 0x6d, 0x62, 0xa5, 0x00,
+ },
+};
+
+/* TLS 1.2, AES_CCM, data, seqno:2, plaintext: 'Hello world' */
+static const struct raw_rec id2_data_l11 = {
+ .plain_len = 11,
+ .plain_data = {
+ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
+ 0x72, 0x6c, 0x64,
+ },
+ .cipher_len = 40,
+ .cipher_data = {
+ 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19,
+ 0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87,
+ 0x8e, 0xa1, 0xd0, 0xcd, 0x33, 0xb5, 0x86, 0x2b,
+ 0x17, 0xf1, 0x52, 0x2a, 0x55, 0x62, 0x65, 0x11,
+ },
+};
+
+/* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: '' */
+static const struct raw_rec id2_ctrl_l0 = {
+ .plain_len = 0,
+ .plain_data = {
+ },
+ .cipher_len = 29,
+ .cipher_data = {
+ 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0xdc, 0x5c, 0x0e,
+ 0x41, 0xdd, 0xba, 0xd3, 0xcc, 0xcf, 0x6d, 0xd9,
+ 0x06, 0xdb, 0x79, 0xe5, 0x5d,
+ },
+};
+
+/* TLS 1.2, AES_CCM, data, seqno:2, plaintext: '' */
+static const struct raw_rec id2_data_l0 = {
+ .plain_len = 0,
+ .plain_data = {
+ },
+ .cipher_len = 29,
+ .cipher_data = {
+ 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0xc3, 0xca, 0x26,
+ 0x22, 0xe4, 0x25, 0xfb, 0x5f, 0x6d, 0xbf, 0x83,
+ 0x30, 0x48, 0x69, 0x1a, 0x47,
+ },
+};
+
+FIXTURE(zero_len)
+{
+ int fd, cfd;
+ bool notls;
+};
+
+FIXTURE_VARIANT(zero_len)
+{
+ const struct raw_rec *recs[4];
+ ssize_t recv_ret[4];
+};
+
+FIXTURE_VARIANT_ADD(zero_len, data_data_data)
+{
+ .recs = { &id0_data_l11, &id1_data_l11, &id2_data_l11, },
+ .recv_ret = { 33, -EAGAIN, },
+};
+
+FIXTURE_VARIANT_ADD(zero_len, data_0ctrl_data)
+{
+ .recs = { &id0_data_l11, &id1_ctrl_l0, &id2_data_l11, },
+ .recv_ret = { 11, 0, 11, -EAGAIN, },
+};
+
+FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0data)
+{
+ .recs = { &id0_data_l0, &id1_data_l0, &id2_data_l0, },
+ .recv_ret = { -EAGAIN, },
+};
+
+FIXTURE_VARIANT_ADD(zero_len, 0data_0data_ctrl)
+{
+ .recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l11, },
+ .recv_ret = { 0, 11, -EAGAIN, },
+};
+
+FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0ctrl)
+{
+ .recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l0, },
+ .recv_ret = { 0, 0, -EAGAIN, },
+};
+
+FIXTURE_VARIANT_ADD(zero_len, 0ctrl_0ctrl_0ctrl)
+{
+ .recs = { &id0_ctrl_l0, &id1_ctrl_l0, &id2_ctrl_l0, },
+ .recv_ret = { 0, 0, 0, -EAGAIN, },
+};
+
+FIXTURE_VARIANT_ADD(zero_len, 0data_0data_data)
+{
+ .recs = { &id0_data_l0, &id1_data_l0, &id2_data_l11, },
+ .recv_ret = { 11, -EAGAIN, },
+};
+
+FIXTURE_VARIANT_ADD(zero_len, data_0data_0data)
+{
+ .recs = { &id0_data_l11, &id1_data_l0, &id2_data_l0, },
+ .recv_ret = { 11, -EAGAIN, },
+};
+
+FIXTURE_SETUP(zero_len)
+{
+ struct tls_crypto_info_keys tls12;
+ int ret;
+
+ tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128,
+ &tls12, 0);
+
+ ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
+ if (self->notls)
+ return;
+
+ /* Don't install keys on fd, we'll send raw records */
+ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
+}
+
+FIXTURE_TEARDOWN(zero_len)
+{
+ close(self->fd);
+ close(self->cfd);
+}
+
+TEST_F(zero_len, test)
+{
+ const struct raw_rec *const *rec;
+ unsigned char buf[128];
+ int rec_off;
+ int i;
+
+ for (i = 0; i < 4 && variant->recs[i]; i++)
+ EXPECT_EQ(send(self->fd, variant->recs[i]->cipher_data,
+ variant->recs[i]->cipher_len, 0),
+ variant->recs[i]->cipher_len);
+
+ rec = &variant->recs[0];
+ rec_off = 0;
+ for (i = 0; i < 4; i++) {
+ int j, ret;
+
+ ret = variant->recv_ret[i] >= 0 ? variant->recv_ret[i] : -1;
+ EXPECT_EQ(__tls_recv_cmsg(_metadata, self->cfd, NULL,
+ buf, sizeof(buf), MSG_DONTWAIT), ret);
+ if (ret == -1)
+ EXPECT_EQ(errno, -variant->recv_ret[i]);
+ if (variant->recv_ret[i] == -EAGAIN)
+ break;
+
+ for (j = 0; j < ret; j++) {
+ while (rec_off == (*rec)->plain_len) {
+ rec++;
+ rec_off = 0;
+ }
+ EXPECT_EQ(buf[j], (*rec)->plain_data[rec_off]);
+ rec_off++;
+ }
+ }
+};
+
FIXTURE(tls_err)
{
int fd, cfd;
@@ -2480,6 +2840,163 @@ TEST_F(tls_err, poll_partial_rec_async)
}
}
+/* Use OOB+large send to trigger copy mode due to memory pressure.
+ * OOB causes a short read.
+ */
+TEST_F(tls_err, oob_pressure)
+{
+ char buf[1<<16];
+ int i;
+
+ memrnd(buf, sizeof(buf));
+
+ EXPECT_EQ(send(self->fd2, buf, 5, MSG_OOB), 5);
+ EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf));
+ for (i = 0; i < 64; i++)
+ EXPECT_EQ(send(self->fd2, buf, 5, MSG_OOB), 5);
+}
+
+/*
+ * Parse a stream of TLS records and ensure that each record respects
+ * the specified @max_payload_len.
+ */
+static size_t parse_tls_records(struct __test_metadata *_metadata,
+ const __u8 *rx_buf, int rx_len, int overhead,
+ __u16 max_payload_len)
+{
+ const __u8 *rec = rx_buf;
+ size_t total_plaintext_rx = 0;
+ const __u8 rec_header_len = 5;
+
+ while (rec < rx_buf + rx_len) {
+ __u16 record_payload_len;
+ __u16 plaintext_len;
+
+ /* Sanity check that it's a TLS header for application data */
+ ASSERT_EQ(rec[0], 23);
+ ASSERT_EQ(rec[1], 0x3);
+ ASSERT_EQ(rec[2], 0x3);
+
+ memcpy(&record_payload_len, rec + 3, 2);
+ record_payload_len = ntohs(record_payload_len);
+ ASSERT_GE(record_payload_len, overhead);
+
+ plaintext_len = record_payload_len - overhead;
+ total_plaintext_rx += plaintext_len;
+
+ /* Plaintext must not exceed the specified limit */
+ ASSERT_LE(plaintext_len, max_payload_len);
+ rec += rec_header_len + record_payload_len;
+ }
+
+ return total_plaintext_rx;
+}
+
+TEST(tls_12_tx_max_payload_len)
+{
+ struct tls_crypto_info_keys tls12;
+ int cfd, ret, fd, overhead;
+ size_t total_plaintext_rx = 0;
+ __u8 tx[1024], rx[2000];
+ __u16 limit = 128;
+ __u16 opt = 0;
+ unsigned int optlen = sizeof(opt);
+ bool notls;
+
+ tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128,
+ &tls12, 0);
+
+ ulp_sock_pair(_metadata, &fd, &cfd, &notls);
+
+ if (notls)
+ exit(KSFT_SKIP);
+
+ /* Don't install keys on fd, we'll parse raw records */
+ ret = setsockopt(cfd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_TX_MAX_PAYLOAD_LEN, &limit,
+ sizeof(limit));
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockopt(cfd, SOL_TLS, TLS_TX_MAX_PAYLOAD_LEN, &opt, &optlen);
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(limit, opt);
+ EXPECT_EQ(optlen, sizeof(limit));
+
+ memset(tx, 0, sizeof(tx));
+ ASSERT_EQ(send(cfd, tx, sizeof(tx), 0), sizeof(tx));
+ close(cfd);
+
+ ret = recv(fd, rx, sizeof(rx), 0);
+
+ /*
+ * 16B tag + 8B IV -- record header (5B) is not counted but we'll
+ * need it to walk the record stream
+ */
+ overhead = 16 + 8;
+ total_plaintext_rx = parse_tls_records(_metadata, rx, ret, overhead,
+ limit);
+
+ ASSERT_EQ(total_plaintext_rx, sizeof(tx));
+ close(fd);
+}
+
+TEST(tls_12_tx_max_payload_len_open_rec)
+{
+ struct tls_crypto_info_keys tls12;
+ int cfd, ret, fd, overhead;
+ size_t total_plaintext_rx = 0;
+ __u8 tx[1024], rx[2000];
+ __u16 tx_partial = 256;
+ __u16 og_limit = 512, limit = 128;
+ bool notls;
+
+ tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128,
+ &tls12, 0);
+
+ ulp_sock_pair(_metadata, &fd, &cfd, &notls);
+
+ if (notls)
+ exit(KSFT_SKIP);
+
+ /* Don't install keys on fd, we'll parse raw records */
+ ret = setsockopt(cfd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_TX_MAX_PAYLOAD_LEN, &og_limit,
+ sizeof(og_limit));
+ ASSERT_EQ(ret, 0);
+
+ memset(tx, 0, sizeof(tx));
+ ASSERT_EQ(send(cfd, tx, tx_partial, MSG_MORE), tx_partial);
+
+ /*
+ * Changing the payload limit with a pending open record should
+ * not be allowed.
+ */
+ ret = setsockopt(cfd, SOL_TLS, TLS_TX_MAX_PAYLOAD_LEN, &limit,
+ sizeof(limit));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EBUSY);
+
+ ASSERT_EQ(send(cfd, tx + tx_partial, sizeof(tx) - tx_partial, MSG_EOR),
+ sizeof(tx) - tx_partial);
+ close(cfd);
+
+ ret = recv(fd, rx, sizeof(rx), 0);
+
+ /*
+ * 16B tag + 8B IV -- record header (5B) is not counted but we'll
+ * need it to walk the record stream
+ */
+ overhead = 16 + 8;
+ total_plaintext_rx = parse_tls_records(_metadata, rx, ret, overhead,
+ og_limit);
+ ASSERT_EQ(total_plaintext_rx, sizeof(tx));
+ close(fd);
+}
+
TEST(non_established) {
struct tls12_crypto_info_aes_gcm_256 tls12;
struct sockaddr_in addr;
@@ -2708,6 +3225,67 @@ TEST(prequeue) {
close(cfd);
}
+TEST(data_steal) {
+ struct tls_crypto_info_keys tls;
+ char buf[20000], buf2[20000];
+ struct sockaddr_in addr;
+ int sfd, cfd, ret, fd;
+ int pid, status;
+ socklen_t len;
+
+ len = sizeof(addr);
+ memrnd(buf, sizeof(buf));
+
+ tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_256, &tls, 0);
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ASSERT_EQ(bind(sfd, &addr, sizeof(addr)), 0);
+ ASSERT_EQ(listen(sfd, 10), 0);
+ ASSERT_EQ(getsockname(sfd, &addr, &len), 0);
+ ASSERT_EQ(connect(fd, &addr, sizeof(addr)), 0);
+ ASSERT_GE(cfd = accept(sfd, &addr, &len), 0);
+ close(sfd);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret) {
+ ASSERT_EQ(errno, ENOENT);
+ SKIP(return, "no TLS support");
+ }
+ ASSERT_EQ(setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")), 0);
+
+ /* Spawn a child and get it into the read wait path of the underlying
+ * TCP socket.
+ */
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (!pid) {
+ EXPECT_EQ(recv(cfd, buf, sizeof(buf) / 2, MSG_WAITALL),
+ sizeof(buf) / 2);
+ exit(!__test_passed(_metadata));
+ }
+
+ usleep(10000);
+ ASSERT_EQ(setsockopt(fd, SOL_TLS, TLS_TX, &tls, tls.len), 0);
+ ASSERT_EQ(setsockopt(cfd, SOL_TLS, TLS_RX, &tls, tls.len), 0);
+
+ EXPECT_EQ(send(fd, buf, sizeof(buf), 0), sizeof(buf));
+ EXPECT_EQ(wait(&status), pid);
+ EXPECT_EQ(status, 0);
+ EXPECT_EQ(recv(cfd, buf2, sizeof(buf2), MSG_DONTWAIT), -1);
+ /* Don't check errno, the error will be different depending
+ * on what random bytes TLS interpreted as the record length.
+ */
+
+ close(fd);
+ close(cfd);
+}
+
static void __attribute__((constructor)) fips_check(void) {
int res;
FILE *f;
diff --git a/tools/testing/selftests/net/toeplitz.sh b/tools/testing/selftests/net/toeplitz.sh
deleted file mode 100755
index 8ff172f7bb1b..000000000000
--- a/tools/testing/selftests/net/toeplitz.sh
+++ /dev/null
@@ -1,199 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# extended toeplitz test: test rxhash plus, optionally, either (1) rss mapping
-# from rxhash to rx queue ('-rss') or (2) rps mapping from rxhash to cpu
-# ('-rps <rps_map>')
-#
-# irq-pattern-prefix can be derived from /sys/kernel/irq/*/action,
-# which is a driver-specific encoding.
-#
-# invoke as ./toeplitz.sh (-i <iface>) -u|-t -4|-6 \
-# [(-rss -irq_prefix <irq-pattern-prefix>)|(-rps <rps_map>)]
-
-source setup_loopback.sh
-readonly SERVER_IP4="192.168.1.200/24"
-readonly SERVER_IP6="fda8::1/64"
-readonly SERVER_MAC="aa:00:00:00:00:02"
-
-readonly CLIENT_IP4="192.168.1.100/24"
-readonly CLIENT_IP6="fda8::2/64"
-readonly CLIENT_MAC="aa:00:00:00:00:01"
-
-PORT=8000
-KEY="$(</proc/sys/net/core/netdev_rss_key)"
-TEST_RSS=false
-RPS_MAP=""
-PROTO_FLAG=""
-IP_FLAG=""
-DEV="eth0"
-
-# Return the number of rxqs among which RSS is configured to spread packets.
-# This is determined by reading the RSS indirection table using ethtool.
-get_rss_cfg_num_rxqs() {
- echo $(ethtool -x "${DEV}" |
- grep -E [[:space:]]+[0-9]+:[[:space:]]+ |
- cut -d: -f2- |
- awk '{$1=$1};1' |
- tr ' ' '\n' |
- sort -u |
- wc -l)
-}
-
-# Return a list of the receive irq handler cpus.
-# The list is ordered by the irqs, so first rxq-0 cpu, then rxq-1 cpu, etc.
-# Reads /sys/kernel/irq/ in order, so algorithm depends on
-# irq_{rxq-0} < irq_{rxq-1}, etc.
-get_rx_irq_cpus() {
- CPUS=""
- # sort so that irq 2 is read before irq 10
- SORTED_IRQS=$(for i in /sys/kernel/irq/*; do echo $i; done | sort -V)
- # Consider only as many queues as RSS actually uses. We assume that
- # if RSS_CFG_NUM_RXQS=N, then RSS uses rxqs 0-(N-1).
- RSS_CFG_NUM_RXQS=$(get_rss_cfg_num_rxqs)
- RXQ_COUNT=0
-
- for i in ${SORTED_IRQS}
- do
- [[ "${RXQ_COUNT}" -lt "${RSS_CFG_NUM_RXQS}" ]] || break
- # lookup relevant IRQs by action name
- [[ -e "$i/actions" ]] || continue
- cat "$i/actions" | grep -q "${IRQ_PATTERN}" || continue
- irqname=$(<"$i/actions")
-
- # does the IRQ get called
- irqcount=$(cat "$i/per_cpu_count" | tr -d '0,')
- [[ -n "${irqcount}" ]] || continue
-
- # lookup CPU
- irq=$(basename "$i")
- cpu=$(cat "/proc/irq/$irq/smp_affinity_list")
-
- if [[ -z "${CPUS}" ]]; then
- CPUS="${cpu}"
- else
- CPUS="${CPUS},${cpu}"
- fi
- RXQ_COUNT=$((RXQ_COUNT+1))
- done
-
- echo "${CPUS}"
-}
-
-get_disable_rfs_cmd() {
- echo "echo 0 > /proc/sys/net/core/rps_sock_flow_entries;"
-}
-
-get_set_rps_bitmaps_cmd() {
- CMD=""
- for i in /sys/class/net/${DEV}/queues/rx-*/rps_cpus
- do
- CMD="${CMD} echo $1 > ${i};"
- done
-
- echo "${CMD}"
-}
-
-get_disable_rps_cmd() {
- echo "$(get_set_rps_bitmaps_cmd 0)"
-}
-
-die() {
- echo "$1"
- exit 1
-}
-
-check_nic_rxhash_enabled() {
- local -r pattern="receive-hashing:\ on"
-
- ethtool -k "${DEV}" | grep -q "${pattern}" || die "rxhash must be enabled"
-}
-
-parse_opts() {
- local prog=$0
- shift 1
-
- while [[ "$1" =~ "-" ]]; do
- if [[ "$1" = "-irq_prefix" ]]; then
- shift
- IRQ_PATTERN="^$1-[0-9]*$"
- elif [[ "$1" = "-u" || "$1" = "-t" ]]; then
- PROTO_FLAG="$1"
- elif [[ "$1" = "-4" ]]; then
- IP_FLAG="$1"
- SERVER_IP="${SERVER_IP4}"
- CLIENT_IP="${CLIENT_IP4}"
- elif [[ "$1" = "-6" ]]; then
- IP_FLAG="$1"
- SERVER_IP="${SERVER_IP6}"
- CLIENT_IP="${CLIENT_IP6}"
- elif [[ "$1" = "-rss" ]]; then
- TEST_RSS=true
- elif [[ "$1" = "-rps" ]]; then
- shift
- RPS_MAP="$1"
- elif [[ "$1" = "-i" ]]; then
- shift
- DEV="$1"
- else
- die "Usage: ${prog} (-i <iface>) -u|-t -4|-6 \
- [(-rss -irq_prefix <irq-pattern-prefix>)|(-rps <rps_map>)]"
- fi
- shift
- done
-}
-
-setup() {
- setup_loopback_environment "${DEV}"
-
- # Set up server_ns namespace and client_ns namespace
- setup_macvlan_ns "${DEV}" $server_ns server \
- "${SERVER_MAC}" "${SERVER_IP}"
- setup_macvlan_ns "${DEV}" $client_ns client \
- "${CLIENT_MAC}" "${CLIENT_IP}"
-}
-
-cleanup() {
- cleanup_macvlan_ns $server_ns server $client_ns client
- cleanup_loopback "${DEV}"
-}
-
-parse_opts $0 $@
-
-setup
-trap cleanup EXIT
-
-check_nic_rxhash_enabled
-
-# Actual test starts here
-if [[ "${TEST_RSS}" = true ]]; then
- # RPS/RFS must be disabled because they move packets between cpus,
- # which breaks the PACKET_FANOUT_CPU identification of RSS decisions.
- eval "$(get_disable_rfs_cmd) $(get_disable_rps_cmd)" \
- ip netns exec $server_ns ./toeplitz "${IP_FLAG}" "${PROTO_FLAG}" \
- -d "${PORT}" -i "${DEV}" -k "${KEY}" -T 1000 \
- -C "$(get_rx_irq_cpus)" -s -v &
-elif [[ ! -z "${RPS_MAP}" ]]; then
- eval "$(get_disable_rfs_cmd) $(get_set_rps_bitmaps_cmd ${RPS_MAP})" \
- ip netns exec $server_ns ./toeplitz "${IP_FLAG}" "${PROTO_FLAG}" \
- -d "${PORT}" -i "${DEV}" -k "${KEY}" -T 1000 \
- -r "0x${RPS_MAP}" -s -v &
-else
- ip netns exec $server_ns ./toeplitz "${IP_FLAG}" "${PROTO_FLAG}" \
- -d "${PORT}" -i "${DEV}" -k "${KEY}" -T 1000 -s -v &
-fi
-
-server_pid=$!
-
-ip netns exec $client_ns ./toeplitz_client.sh "${PROTO_FLAG}" \
- "${IP_FLAG}" "${SERVER_IP%%/*}" "${PORT}" &
-
-client_pid=$!
-
-wait "${server_pid}"
-exit_code=$?
-kill -9 "${client_pid}"
-if [[ "${exit_code}" -eq 0 ]]; then
- echo "Test Succeeded!"
-fi
-exit "${exit_code}"
diff --git a/tools/testing/selftests/net/toeplitz_client.sh b/tools/testing/selftests/net/toeplitz_client.sh
deleted file mode 100755
index 2fef34f4aba1..000000000000
--- a/tools/testing/selftests/net/toeplitz_client.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# A simple program for generating traffic for the toeplitz test.
-#
-# This program sends packets periodically for, conservatively, 20 seconds. The
-# intent is for the calling program to kill this program once it is no longer
-# needed, rather than waiting for the 20 second expiration.
-
-send_traffic() {
- expiration=$((SECONDS+20))
- while [[ "${SECONDS}" -lt "${expiration}" ]]
- do
- if [[ "${PROTO}" == "-u" ]]; then
- echo "msg $i" | nc "${IPVER}" -u -w 0 "${ADDR}" "${PORT}"
- else
- echo "msg $i" | nc "${IPVER}" -w 0 "${ADDR}" "${PORT}"
- fi
- sleep 0.001
- done
-}
-
-PROTO=$1
-IPVER=$2
-ADDR=$3
-PORT=$4
-
-send_traffic
diff --git a/tools/testing/selftests/net/traceroute.sh b/tools/testing/selftests/net/traceroute.sh
index 282f14760940..a7c6ab8a0347 100755
--- a/tools/testing/selftests/net/traceroute.sh
+++ b/tools/testing/selftests/net/traceroute.sh
@@ -10,28 +10,6 @@ PAUSE_ON_FAIL=no
################################################################################
#
-log_test()
-{
- local rc=$1
- local expected=$2
- local msg="$3"
-
- if [ ${rc} -eq ${expected} ]; then
- printf "TEST: %-60s [ OK ]\n" "${msg}"
- nsuccess=$((nsuccess+1))
- else
- ret=1
- nfail=$((nfail+1))
- printf "TEST: %-60s [FAIL]\n" "${msg}"
- if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
- echo
- echo "hit enter to continue, 'q' to quit"
- read a
- [ "$a" = "q" ] && exit 1
- fi
- fi
-}
-
run_cmd()
{
local ns
@@ -58,6 +36,35 @@ run_cmd()
return $rc
}
+__check_traceroute_version()
+{
+ local cmd=$1; shift
+ local req_ver=$1; shift
+ local ver
+
+ req_ver=$(echo "$req_ver" | sed 's/\.//g')
+ ver=$($cmd -V 2>&1 | grep -Eo '[0-9]+.[0-9]+.[0-9]+' | sed 's/\.//g')
+ if [[ $ver -lt $req_ver ]]; then
+ return 1
+ else
+ return 0
+ fi
+}
+
+check_traceroute6_version()
+{
+ local req_ver=$1; shift
+
+ __check_traceroute_version traceroute6 "$req_ver"
+}
+
+check_traceroute_version()
+{
+ local req_ver=$1; shift
+
+ __check_traceroute_version traceroute "$req_ver"
+}
+
################################################################################
# create namespaces and interconnects
@@ -81,6 +88,8 @@ create_ns()
ip netns exec ${ns} ip -6 ro add unreachable default metric 8192
ip netns exec ${ns} sysctl -qw net.ipv4.ip_forward=1
+ ip netns exec ${ns} sysctl -qw net.ipv4.icmp_ratelimit=0
+ ip netns exec ${ns} sysctl -qw net.ipv6.icmp.ratelimit=0
ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.forwarding=1
ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.forwarding=1
@@ -203,34 +212,275 @@ setup_traceroute6()
run_traceroute6()
{
- if [ ! -x "$(command -v traceroute6)" ]; then
- echo "SKIP: Could not run IPV6 test without traceroute6"
- return
- fi
-
setup_traceroute6
+ RET=0
+
# traceroute6 host-2 from host-1 (expects 2000:102::2)
run_cmd $h1 "traceroute6 2000:103::4 | grep -q 2000:102::2"
- log_test $? 0 "IPV6 traceroute"
+ check_err $? "traceroute6 did not return 2000:102::2"
+ log_test "IPv6 traceroute"
cleanup_traceroute6
}
################################################################################
+# traceroute6 with VRF test
+#
+# Verify that in this scenario
+#
+# ------------------------ N2
+# | |
+# ------ ------ N3 ----
+# | R1 | | R2 |------|H2|
+# ------ ------ ----
+# | |
+# ------------------------ N1
+# |
+# ----
+# |H1|
+# ----
+#
+# Where H1's default route goes through R1 and R1's default route goes through
+# R2 over N2, traceroute6 from H1 to H2 reports R2's address on N2 and not N1.
+# The interfaces connecting R2 to the different subnets are membmer in a VRF
+# and the intention is to check that traceroute6 does not report the VRF's
+# address.
+#
+# Addresses are assigned as follows:
+#
+# N1: 2000:101::/64
+# N2: 2000:102::/64
+# N3: 2000:103::/64
+#
+# R1's host part of address: 1
+# R2's host part of address: 2
+# H1's host part of address: 3
+# H2's host part of address: 4
+#
+# For example:
+# the IPv6 address of R1's interface on N2 is 2000:102::1/64
+
+cleanup_traceroute6_vrf()
+{
+ cleanup_all_ns
+}
+
+setup_traceroute6_vrf()
+{
+ # Start clean
+ cleanup_traceroute6_vrf
+
+ setup_ns h1 h2 r1 r2
+ create_ns "$h1"
+ create_ns "$h2"
+ create_ns "$r1"
+ create_ns "$r2"
+
+ ip -n "$r2" link add name vrf100 up type vrf table 100
+ ip -n "$r2" addr add 2001:db8:100::1/64 dev vrf100
+
+ # Setup N3
+ connect_ns "$r2" eth3 - 2000:103::2/64 "$h2" eth3 - 2000:103::4/64
+
+ ip -n "$r2" link set dev eth3 master vrf100
+
+ ip -n "$h2" route add default via 2000:103::2
+
+ # Setup N2
+ connect_ns "$r1" eth2 - 2000:102::1/64 "$r2" eth2 - 2000:102::2/64
+
+ ip -n "$r1" route add default via 2000:102::2
+
+ ip -n "$r2" link set dev eth2 master vrf100
+
+ # Setup N1. host-1 and router-2 connect to a bridge in router-1.
+ ip -n "$r1" link add name br100 up type bridge
+ ip -n "$r1" addr add 2000:101::1/64 dev br100
+
+ connect_ns "$h1" eth0 - 2000:101::3/64 "$r1" eth0 - -
+
+ ip -n "$h1" route add default via 2000:101::1
+
+ ip -n "$r1" link set dev eth0 master br100
+
+ connect_ns "$r2" eth1 - 2000:101::2/64 "$r1" eth1 - -
+
+ ip -n "$r2" link set dev eth1 master vrf100
+
+ ip -n "$r1" link set dev eth1 master br100
+
+ # Prime the network
+ ip netns exec "$h1" ping6 -c5 2000:103::4 >/dev/null 2>&1
+}
+
+run_traceroute6_vrf()
+{
+ setup_traceroute6_vrf
+
+ RET=0
+
+ # traceroute6 host-2 from host-1 (expects 2000:102::2)
+ run_cmd "$h1" "traceroute6 2000:103::4 | grep 2000:102::2"
+ check_err $? "traceroute6 did not return 2000:102::2"
+ log_test "IPv6 traceroute with VRF"
+
+ cleanup_traceroute6_vrf
+}
+
+################################################################################
+# traceroute6 with ICMP extensions test
+#
+# Verify that in this scenario
+#
+# ---- ---- ----
+# |H1|--------------------------|R1|--------------------------|H2|
+# ---- N1 ---- N2 ----
+#
+# ICMP extensions are correctly reported. The loopback interfaces on all the
+# nodes are assigned global addresses and the interfaces connecting the nodes
+# are assigned IPv6 link-local addresses.
+
+cleanup_traceroute6_ext()
+{
+ cleanup_all_ns
+}
+
+setup_traceroute6_ext()
+{
+ # Start clean
+ cleanup_traceroute6_ext
+
+ setup_ns h1 r1 h2
+ create_ns "$h1"
+ create_ns "$r1"
+ create_ns "$h2"
+
+ # Setup N1
+ connect_ns "$h1" eth1 - fe80::1/64 "$r1" eth1 - fe80::2/64
+ # Setup N2
+ connect_ns "$r1" eth2 - fe80::3/64 "$h2" eth2 - fe80::4/64
+
+ # Setup H1
+ ip -n "$h1" address add 2001:db8:1::1/128 dev lo
+ ip -n "$h1" route add ::/0 nexthop via fe80::2 dev eth1
+
+ # Setup R1
+ ip -n "$r1" address add 2001:db8:1::2/128 dev lo
+ ip -n "$r1" route add 2001:db8:1::1/128 nexthop via fe80::1 dev eth1
+ ip -n "$r1" route add 2001:db8:1::3/128 nexthop via fe80::4 dev eth2
+
+ # Setup H2
+ ip -n "$h2" address add 2001:db8:1::3/128 dev lo
+ ip -n "$h2" route add ::/0 nexthop via fe80::3 dev eth2
+
+ # Prime the network
+ ip netns exec "$h1" ping6 -c5 2001:db8:1::3 >/dev/null 2>&1
+}
+
+traceroute6_ext_iio_iif_test()
+{
+ local r1_ifindex h2_ifindex
+ local pkt_len=$1; shift
+
+ # Test that incoming interface info is not appended by default.
+ run_cmd "$h1" "traceroute6 -e 2001:db8:1::3 $pkt_len | grep INC"
+ check_fail $? "Incoming interface info appended by default when should not"
+
+ # Test that the extension is appended when enabled.
+ run_cmd "$r1" "bash -c \"echo 0x01 > /proc/sys/net/ipv6/icmp/errors_extension_mask\""
+ check_err $? "Failed to enable incoming interface info extension on R1"
+
+ run_cmd "$h1" "traceroute6 -e 2001:db8:1::3 $pkt_len | grep INC"
+ check_err $? "Incoming interface info not appended after enable"
+
+ # Test that the extension is not appended when disabled.
+ run_cmd "$r1" "bash -c \"echo 0x00 > /proc/sys/net/ipv6/icmp/errors_extension_mask\""
+ check_err $? "Failed to disable incoming interface info extension on R1"
+
+ run_cmd "$h1" "traceroute6 -e 2001:db8:1::3 $pkt_len | grep INC"
+ check_fail $? "Incoming interface info appended after disable"
+
+ # Test that the extension is sent correctly from both R1 and H2.
+ run_cmd "$r1" "sysctl -w net.ipv6.icmp.errors_extension_mask=0x01"
+ r1_ifindex=$(ip -n "$r1" -j link show dev eth1 | jq '.[]["ifindex"]')
+ run_cmd "$h1" "traceroute6 -e 2001:db8:1::3 $pkt_len | grep '<INC:$r1_ifindex,\"eth1\",mtu=1500>'"
+ check_err $? "Wrong incoming interface info reported from R1"
+
+ run_cmd "$h2" "sysctl -w net.ipv6.icmp.errors_extension_mask=0x01"
+ h2_ifindex=$(ip -n "$h2" -j link show dev eth2 | jq '.[]["ifindex"]')
+ run_cmd "$h1" "traceroute6 -e 2001:db8:1::3 $pkt_len | grep '<INC:$h2_ifindex,\"eth2\",mtu=1500>'"
+ check_err $? "Wrong incoming interface info reported from H2"
+
+ # Add a global address on the incoming interface of R1 and check that
+ # it is reported.
+ run_cmd "$r1" "ip address add 2001:db8:100::1/64 dev eth1 nodad"
+ run_cmd "$h1" "traceroute6 -e 2001:db8:1::3 $pkt_len | grep '<INC:$r1_ifindex,2001:db8:100::1,\"eth1\",mtu=1500>'"
+ check_err $? "Wrong incoming interface info reported from R1 after address addition"
+ run_cmd "$r1" "ip address del 2001:db8:100::1/64 dev eth1"
+
+ # Change name and MTU and make sure the result is still correct.
+ run_cmd "$r1" "ip link set dev eth1 name eth1tag mtu 1501"
+ run_cmd "$h1" "traceroute6 -e 2001:db8:1::3 $pkt_len | grep '<INC:$r1_ifindex,\"eth1tag\",mtu=1501>'"
+ check_err $? "Wrong incoming interface info reported from R1 after name and MTU change"
+ run_cmd "$r1" "ip link set dev eth1tag name eth1 mtu 1500"
+
+ run_cmd "$r1" "sysctl -w net.ipv6.icmp.errors_extension_mask=0x00"
+ run_cmd "$h2" "sysctl -w net.ipv6.icmp.errors_extension_mask=0x00"
+}
+
+run_traceroute6_ext()
+{
+ # Need at least version 2.1.5 for RFC 5837 support.
+ if ! check_traceroute6_version 2.1.5; then
+ log_test_skip "traceroute6 too old, missing ICMP extensions support"
+ return
+ fi
+
+ setup_traceroute6_ext
+
+ RET=0
+
+ ## General ICMP extensions tests
+
+ # Test that ICMP extensions are disabled by default.
+ run_cmd "$h1" "sysctl net.ipv6.icmp.errors_extension_mask | grep \"= 0$\""
+ check_err $? "ICMP extensions are not disabled by default"
+
+ # Test that unsupported values are rejected. Do not use "sysctl" as
+ # older versions do not return an error code upon failure.
+ run_cmd "$h1" "bash -c \"echo 0x80 > /proc/sys/net/ipv6/icmp/errors_extension_mask\""
+ check_fail $? "Unsupported sysctl value was not rejected"
+
+ ## Extension-specific tests
+
+ # Incoming interface info test. Test with various packet sizes,
+ # including the default one.
+ traceroute6_ext_iio_iif_test
+ traceroute6_ext_iio_iif_test 127
+ traceroute6_ext_iio_iif_test 128
+ traceroute6_ext_iio_iif_test 129
+
+ log_test "IPv6 traceroute with ICMP extensions"
+
+ cleanup_traceroute6_ext
+}
+
+################################################################################
# traceroute test
#
-# Verify that traceroute from H1 to H2 shows 1.0.1.1 in this scenario
+# Verify that traceroute from H1 to H2 shows 1.0.3.1 and 1.0.1.1 when
+# traceroute uses 1.0.3.3 and 1.0.1.3 as the source IP, respectively.
#
-# 1.0.3.1/24
+# 1.0.3.3/24 1.0.3.1/24
# ---- 1.0.1.3/24 1.0.1.1/24 ---- 1.0.2.1/24 1.0.2.4/24 ----
# |H1|--------------------------|R1|--------------------------|H2|
# ---- N1 ---- N2 ----
#
-# where net.ipv4.icmp_errors_use_inbound_ifaddr is set on R1 and
-# 1.0.3.1/24 and 1.0.1.1/24 are respectively R1's primary and secondary
-# address on N1.
-#
+# where net.ipv4.icmp_errors_use_inbound_ifaddr is set on R1 and 1.0.3.1/24 and
+# 1.0.1.1/24 are R1's primary addresses on N1. The kernel is expected to prefer
+# a source address that is on the same subnet as the destination IP of the ICMP
+# error message.
cleanup_traceroute()
{
@@ -250,6 +500,7 @@ setup_traceroute()
connect_ns $h1 eth0 1.0.1.3/24 - \
$router eth1 1.0.3.1/24 -
+ ip -n "$h1" addr add 1.0.3.3/24 dev eth0
ip netns exec $h1 ip route add default via 1.0.1.1
ip netns exec $router ip addr add 1.0.1.1/24 dev eth1
@@ -268,18 +519,232 @@ setup_traceroute()
run_traceroute()
{
- if [ ! -x "$(command -v traceroute)" ]; then
- echo "SKIP: Could not run IPV4 test without traceroute"
+ setup_traceroute
+
+ RET=0
+
+ # traceroute host-2 from host-1. Expect a source IP that is on the same
+ # subnet as destination IP of the ICMP error message.
+ run_cmd "$h1" "traceroute -s 1.0.1.3 1.0.2.4 | grep -q 1.0.1.1"
+ check_err $? "traceroute did not return 1.0.1.1"
+ run_cmd "$h1" "traceroute -s 1.0.3.3 1.0.2.4 | grep -q 1.0.3.1"
+ check_err $? "traceroute did not return 1.0.3.1"
+ log_test "IPv4 traceroute"
+
+ cleanup_traceroute
+}
+
+################################################################################
+# traceroute with VRF test
+#
+# Verify that traceroute from H1 to H2 shows 1.0.3.1 and 1.0.1.1 when
+# traceroute uses 1.0.3.3 and 1.0.1.3 as the source IP, respectively. The
+# intention is to check that the kernel does not choose an IP assigned to the
+# VRF device, but rather an address from the VRF port (eth1) that received the
+# packet that generates the ICMP error message.
+#
+# 1.0.4.1/24 (vrf100)
+# 1.0.3.3/24 1.0.3.1/24
+# ---- 1.0.1.3/24 1.0.1.1/24 ---- 1.0.2.1/24 1.0.2.4/24 ----
+# |H1|--------------------------|R1|--------------------------|H2|
+# ---- N1 ---- N2 ----
+
+cleanup_traceroute_vrf()
+{
+ cleanup_all_ns
+}
+
+setup_traceroute_vrf()
+{
+ # Start clean
+ cleanup_traceroute_vrf
+
+ setup_ns h1 h2 router
+ create_ns "$h1"
+ create_ns "$h2"
+ create_ns "$router"
+
+ ip -n "$router" link add name vrf100 up type vrf table 100
+ ip -n "$router" addr add 1.0.4.1/24 dev vrf100
+
+ connect_ns "$h1" eth0 1.0.1.3/24 - \
+ "$router" eth1 1.0.1.1/24 -
+
+ ip -n "$h1" addr add 1.0.3.3/24 dev eth0
+ ip -n "$h1" route add default via 1.0.1.1
+
+ ip -n "$router" link set dev eth1 master vrf100
+ ip -n "$router" addr add 1.0.3.1/24 dev eth1
+ ip netns exec "$router" sysctl -qw \
+ net.ipv4.icmp_errors_use_inbound_ifaddr=1
+
+ connect_ns "$h2" eth0 1.0.2.4/24 - \
+ "$router" eth2 1.0.2.1/24 -
+
+ ip -n "$h2" route add default via 1.0.2.1
+
+ ip -n "$router" link set dev eth2 master vrf100
+
+ # Prime the network
+ ip netns exec "$h1" ping -c5 1.0.2.4 >/dev/null 2>&1
+}
+
+run_traceroute_vrf()
+{
+ setup_traceroute_vrf
+
+ RET=0
+
+ # traceroute host-2 from host-1. Expect a source IP that is on the same
+ # subnet as destination IP of the ICMP error message.
+ run_cmd "$h1" "traceroute -s 1.0.1.3 1.0.2.4 | grep 1.0.1.1"
+ check_err $? "traceroute did not return 1.0.1.1"
+ run_cmd "$h1" "traceroute -s 1.0.3.3 1.0.2.4 | grep 1.0.3.1"
+ check_err $? "traceroute did not return 1.0.3.1"
+ log_test "IPv4 traceroute with VRF"
+
+ cleanup_traceroute_vrf
+}
+
+################################################################################
+# traceroute with ICMP extensions test
+#
+# Verify that in this scenario
+#
+# ---- ---- ----
+# |H1|--------------------------|R1|--------------------------|H2|
+# ---- N1 ---- N2 ----
+#
+# ICMP extensions are correctly reported. The loopback interfaces on all the
+# nodes are assigned global addresses and the interfaces connecting the nodes
+# are assigned IPv6 link-local addresses.
+
+cleanup_traceroute_ext()
+{
+ cleanup_all_ns
+}
+
+setup_traceroute_ext()
+{
+ # Start clean
+ cleanup_traceroute_ext
+
+ setup_ns h1 r1 h2
+ create_ns "$h1"
+ create_ns "$r1"
+ create_ns "$h2"
+
+ # Setup N1
+ connect_ns "$h1" eth1 - fe80::1/64 "$r1" eth1 - fe80::2/64
+ # Setup N2
+ connect_ns "$r1" eth2 - fe80::3/64 "$h2" eth2 - fe80::4/64
+
+ # Setup H1
+ ip -n "$h1" address add 192.0.2.1/32 dev lo
+ ip -n "$h1" route add 0.0.0.0/0 nexthop via inet6 fe80::2 dev eth1
+
+ # Setup R1
+ ip -n "$r1" address add 192.0.2.2/32 dev lo
+ ip -n "$r1" route add 192.0.2.1/32 nexthop via inet6 fe80::1 dev eth1
+ ip -n "$r1" route add 192.0.2.3/32 nexthop via inet6 fe80::4 dev eth2
+
+ # Setup H2
+ ip -n "$h2" address add 192.0.2.3/32 dev lo
+ ip -n "$h2" route add 0.0.0.0/0 nexthop via inet6 fe80::3 dev eth2
+
+ # Prime the network
+ ip netns exec "$h1" ping -c5 192.0.2.3 >/dev/null 2>&1
+}
+
+traceroute_ext_iio_iif_test()
+{
+ local r1_ifindex h2_ifindex
+ local pkt_len=$1; shift
+
+ # Test that incoming interface info is not appended by default.
+ run_cmd "$h1" "traceroute -e 192.0.2.3 $pkt_len | grep INC"
+ check_fail $? "Incoming interface info appended by default when should not"
+
+ # Test that the extension is appended when enabled.
+ run_cmd "$r1" "bash -c \"echo 0x01 > /proc/sys/net/ipv4/icmp_errors_extension_mask\""
+ check_err $? "Failed to enable incoming interface info extension on R1"
+
+ run_cmd "$h1" "traceroute -e 192.0.2.3 $pkt_len | grep INC"
+ check_err $? "Incoming interface info not appended after enable"
+
+ # Test that the extension is not appended when disabled.
+ run_cmd "$r1" "bash -c \"echo 0x00 > /proc/sys/net/ipv4/icmp_errors_extension_mask\""
+ check_err $? "Failed to disable incoming interface info extension on R1"
+
+ run_cmd "$h1" "traceroute -e 192.0.2.3 $pkt_len | grep INC"
+ check_fail $? "Incoming interface info appended after disable"
+
+ # Test that the extension is sent correctly from both R1 and H2.
+ run_cmd "$r1" "sysctl -w net.ipv4.icmp_errors_extension_mask=0x01"
+ r1_ifindex=$(ip -n "$r1" -j link show dev eth1 | jq '.[]["ifindex"]')
+ run_cmd "$h1" "traceroute -e 192.0.2.3 $pkt_len | grep '<INC:$r1_ifindex,\"eth1\",mtu=1500>'"
+ check_err $? "Wrong incoming interface info reported from R1"
+
+ run_cmd "$h2" "sysctl -w net.ipv4.icmp_errors_extension_mask=0x01"
+ h2_ifindex=$(ip -n "$h2" -j link show dev eth2 | jq '.[]["ifindex"]')
+ run_cmd "$h1" "traceroute -e 192.0.2.3 $pkt_len | grep '<INC:$h2_ifindex,\"eth2\",mtu=1500>'"
+ check_err $? "Wrong incoming interface info reported from H2"
+
+ # Add a global address on the incoming interface of R1 and check that
+ # it is reported.
+ run_cmd "$r1" "ip address add 198.51.100.1/24 dev eth1"
+ run_cmd "$h1" "traceroute -e 192.0.2.3 $pkt_len | grep '<INC:$r1_ifindex,198.51.100.1,\"eth1\",mtu=1500>'"
+ check_err $? "Wrong incoming interface info reported from R1 after address addition"
+ run_cmd "$r1" "ip address del 198.51.100.1/24 dev eth1"
+
+ # Change name and MTU and make sure the result is still correct.
+ # Re-add the route towards H1 since it was deleted when we removed the
+ # last IPv4 address from eth1 on R1.
+ run_cmd "$r1" "ip route add 192.0.2.1/32 nexthop via inet6 fe80::1 dev eth1"
+ run_cmd "$r1" "ip link set dev eth1 name eth1tag mtu 1501"
+ run_cmd "$h1" "traceroute -e 192.0.2.3 $pkt_len | grep '<INC:$r1_ifindex,\"eth1tag\",mtu=1501>'"
+ check_err $? "Wrong incoming interface info reported from R1 after name and MTU change"
+ run_cmd "$r1" "ip link set dev eth1tag name eth1 mtu 1500"
+
+ run_cmd "$r1" "sysctl -w net.ipv4.icmp_errors_extension_mask=0x00"
+ run_cmd "$h2" "sysctl -w net.ipv4.icmp_errors_extension_mask=0x00"
+}
+
+run_traceroute_ext()
+{
+ # Need at least version 2.1.5 for RFC 5837 support.
+ if ! check_traceroute_version 2.1.5; then
+ log_test_skip "traceroute too old, missing ICMP extensions support"
return
fi
- setup_traceroute
+ setup_traceroute_ext
- # traceroute host-2 from host-1 (expects 1.0.1.1). Takes a while.
- run_cmd $h1 "traceroute 1.0.2.4 | grep -q 1.0.1.1"
- log_test $? 0 "IPV4 traceroute"
+ RET=0
- cleanup_traceroute
+ ## General ICMP extensions tests
+
+ # Test that ICMP extensions are disabled by default.
+ run_cmd "$h1" "sysctl net.ipv4.icmp_errors_extension_mask | grep \"= 0$\""
+ check_err $? "ICMP extensions are not disabled by default"
+
+ # Test that unsupported values are rejected. Do not use "sysctl" as
+ # older versions do not return an error code upon failure.
+ run_cmd "$h1" "bash -c \"echo 0x80 > /proc/sys/net/ipv4/icmp_errors_extension_mask\""
+ check_fail $? "Unsupported sysctl value was not rejected"
+
+ ## Extension-specific tests
+
+ # Incoming interface info test. Test with various packet sizes,
+ # including the default one.
+ traceroute_ext_iio_iif_test
+ traceroute_ext_iio_iif_test 127
+ traceroute_ext_iio_iif_test 128
+ traceroute_ext_iio_iif_test 129
+
+ log_test "IPv4 traceroute with ICMP extensions"
+
+ cleanup_traceroute_ext
}
################################################################################
@@ -288,15 +753,16 @@ run_traceroute()
run_tests()
{
run_traceroute6
+ run_traceroute6_vrf
+ run_traceroute6_ext
run_traceroute
+ run_traceroute_vrf
+ run_traceroute_ext
}
################################################################################
# main
-declare -i nfail=0
-declare -i nsuccess=0
-
while getopts :pv o
do
case $o in
@@ -306,7 +772,10 @@ do
esac
done
+require_command traceroute6
+require_command traceroute
+require_command jq
+
run_tests
-printf "\nTests passed: %3d\n" ${nsuccess}
-printf "Tests failed: %3d\n" ${nfail}
+exit "${EXIT_STATUS}"
diff --git a/tools/testing/selftests/net/tun.c b/tools/testing/selftests/net/tun.c
index fa83918b62d1..0efc67b0357a 100644
--- a/tools/testing/selftests/net/tun.c
+++ b/tools/testing/selftests/net/tun.c
@@ -15,7 +15,7 @@
#include <sys/ioctl.h>
#include <sys/socket.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
static int tun_attach(int fd, char *dev)
{
diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c
index dae91eb97d69..bcc14688661d 100644
--- a/tools/testing/selftests/net/txtimestamp.c
+++ b/tools/testing/selftests/net/txtimestamp.c
@@ -217,7 +217,7 @@ static void print_timestamp_usr(void)
static void print_timestamp(struct scm_timestamping *tss, int tstype,
int tskey, int payload_len)
{
- const char *tsname;
+ const char *tsname = NULL;
validate_key(tskey, tstype);
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index 1dc337c709f8..b17e032a6d75 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -48,7 +48,7 @@ run_one() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} &
local PID1=$!
wait_local_port_listen ${PEER_NS} 8000 udp
@@ -95,7 +95,7 @@ run_one_nat() {
# will land on the 'plain' one
ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
local PID1=$!
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${family} -b ${addr2%/*} ${rx_args} &
local PID2=$!
wait_local_port_listen "${PEER_NS}" 8000 udp
@@ -117,9 +117,9 @@ run_one_2sock() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} -p 12345 &
local PID1=$!
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 100 ${rx_args} &
local PID2=$!
wait_local_port_listen "${PEER_NS}" 12345 udp
diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
index 477392715a9a..86d80cce55b4 100644
--- a/tools/testing/selftests/net/udpgso_bench_tx.c
+++ b/tools/testing/selftests/net/udpgso_bench_tx.c
@@ -25,7 +25,7 @@
#include <sys/types.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#ifndef ETH_MAX_MTU
#define ETH_MAX_MTU 0xFFFFU
diff --git a/tools/testing/selftests/net/vlan_bridge_binding.sh b/tools/testing/selftests/net/vlan_bridge_binding.sh
index e7cb8c678bde..e8c02c64e03a 100755
--- a/tools/testing/selftests/net/vlan_bridge_binding.sh
+++ b/tools/testing/selftests/net/vlan_bridge_binding.sh
@@ -18,29 +18,29 @@ setup_prepare()
{
local port
- ip_link_add br up type bridge vlan_filtering 1
+ adf_ip_link_add br up type bridge vlan_filtering 1
for port in d1 d2 d3; do
- ip_link_add $port type veth peer name r$port
- ip_link_set_up $port
- ip_link_set_up r$port
- ip_link_set_master $port br
+ adf_ip_link_add $port type veth peer name r$port
+ adf_ip_link_set_up $port
+ adf_ip_link_set_up r$port
+ adf_ip_link_set_master $port br
done
- bridge_vlan_add vid 11 dev br self
- bridge_vlan_add vid 11 dev d1 master
+ adf_bridge_vlan_add vid 11 dev br self
+ adf_bridge_vlan_add vid 11 dev d1 master
- bridge_vlan_add vid 12 dev br self
- bridge_vlan_add vid 12 dev d2 master
+ adf_bridge_vlan_add vid 12 dev br self
+ adf_bridge_vlan_add vid 12 dev d2 master
- bridge_vlan_add vid 13 dev br self
- bridge_vlan_add vid 13 dev d1 master
- bridge_vlan_add vid 13 dev d2 master
+ adf_bridge_vlan_add vid 13 dev br self
+ adf_bridge_vlan_add vid 13 dev d1 master
+ adf_bridge_vlan_add vid 13 dev d2 master
- bridge_vlan_add vid 14 dev br self
- bridge_vlan_add vid 14 dev d1 master
- bridge_vlan_add vid 14 dev d2 master
- bridge_vlan_add vid 14 dev d3 master
+ adf_bridge_vlan_add vid 14 dev br self
+ adf_bridge_vlan_add vid 14 dev d1 master
+ adf_bridge_vlan_add vid 14 dev d2 master
+ adf_bridge_vlan_add vid 14 dev d3 master
}
operstate_is()
@@ -74,7 +74,7 @@ add_one_vlan()
local link=$1; shift
local id=$1; shift
- ip_link_add $link.$id link $link type vlan id $id "$@"
+ adf_ip_link_add $link.$id link $link type vlan id $id "$@"
}
add_vlans()
@@ -98,7 +98,7 @@ down_netdevs()
local dev
for dev in "$@"; do
- ip_link_set_down $dev
+ adf_ip_link_set_down $dev
done
}
@@ -207,13 +207,13 @@ test_binding_toggle_off()
do_test_binding_off : "on->off"
}
-dfr_set_binding_on()
+adf_set_binding_on()
{
set_vlans type vlan bridge_binding on
defer set_vlans type vlan bridge_binding off
}
-dfr_set_binding_off()
+adf_set_binding_off()
{
set_vlans type vlan bridge_binding off
defer set_vlans type vlan bridge_binding on
@@ -223,14 +223,14 @@ test_binding_toggle_on_when_lower_down()
{
add_vlans bridge_binding off
set_vlans up
- do_test_binding_on dfr_set_binding_on "off->on when lower down"
+ do_test_binding_on adf_set_binding_on "off->on when lower down"
}
test_binding_toggle_off_when_lower_down()
{
add_vlans bridge_binding on
set_vlans up
- do_test_binding_off dfr_set_binding_off "on->off when lower down"
+ do_test_binding_off adf_set_binding_off "on->off when lower down"
}
test_binding_toggle_on_when_upper_down()
@@ -249,6 +249,8 @@ test_binding_toggle_off_when_upper_down()
do_test_binding_off : "on->off when upper down"
}
+require_command jq
+
trap defer_scopes_cleanup EXIT
setup_prepare
tests_run
diff --git a/tools/testing/selftests/net/vlan_hw_filter.sh b/tools/testing/selftests/net/vlan_hw_filter.sh
index 7bc804ffaf7c..e195d5cab6f7 100755
--- a/tools/testing/selftests/net/vlan_hw_filter.sh
+++ b/tools/testing/selftests/net/vlan_hw_filter.sh
@@ -3,27 +3,101 @@
readonly NETNS="ns-$(mktemp -u XXXXXX)"
+ALL_TESTS="
+ test_vlan_filter_check
+ test_vlan0_del_crash_01
+ test_vlan0_del_crash_02
+ test_vlan0_del_crash_03
+ test_vid0_memleak
+"
+
ret=0
+setup() {
+ ip netns add ${NETNS}
+}
+
cleanup() {
- ip netns del $NETNS
+ ip netns del $NETNS 2>/dev/null
}
trap cleanup EXIT
fail() {
- echo "ERROR: ${1:-unexpected return code} (ret: $_)" >&2
- ret=1
+ echo "ERROR: ${1:-unexpected return code} (ret: $_)" >&2
+ ret=1
+}
+
+tests_run()
+{
+ local current_test
+ for current_test in ${TESTS:-$ALL_TESTS}; do
+ $current_test
+ done
+}
+
+test_vlan_filter_check() {
+ setup
+ ip netns exec ${NETNS} ip link add bond0 type bond mode 0
+ ip netns exec ${NETNS} ip link add bond_slave_1 type veth peer veth2
+ ip netns exec ${NETNS} ip link set bond_slave_1 master bond0
+ ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
+ ip netns exec ${NETNS} ip link add link bond_slave_1 name bond_slave_1.0 type vlan id 0
+ ip netns exec ${NETNS} ip link add link bond0 name bond0.0 type vlan id 0
+ ip netns exec ${NETNS} ip link set bond_slave_1 nomaster
+ ip netns exec ${NETNS} ip link del veth2 || fail "Please check vlan HW filter function"
+ cleanup
}
-ip netns add ${NETNS}
-ip netns exec ${NETNS} ip link add bond0 type bond mode 0
-ip netns exec ${NETNS} ip link add bond_slave_1 type veth peer veth2
-ip netns exec ${NETNS} ip link set bond_slave_1 master bond0
-ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
-ip netns exec ${NETNS} ip link add link bond_slave_1 name bond_slave_1.0 type vlan id 0
-ip netns exec ${NETNS} ip link add link bond0 name bond0.0 type vlan id 0
-ip netns exec ${NETNS} ip link set bond_slave_1 nomaster
-ip netns exec ${NETNS} ip link del veth2 || fail "Please check vlan HW filter function"
+#enable vlan_filter feature of real_dev with vlan0 during running time
+test_vlan0_del_crash_01() {
+ setup
+ ip netns exec ${NETNS} ip link add bond0 type bond mode 0
+ ip netns exec ${NETNS} ip link add link bond0 name vlan0 type vlan id 0 protocol 802.1q
+ ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
+ ip netns exec ${NETNS} ip link set dev bond0 up
+ ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter on
+ ip netns exec ${NETNS} ip link set dev bond0 down
+ ip netns exec ${NETNS} ip link set dev bond0 up
+ ip netns exec ${NETNS} ip link del vlan0 || fail "Please check vlan HW filter function"
+ cleanup
+}
+
+#enable vlan_filter feature and add vlan0 for real_dev during running time
+test_vlan0_del_crash_02() {
+ setup
+ ip netns exec ${NETNS} ip link add bond0 type bond mode 0
+ ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
+ ip netns exec ${NETNS} ip link set dev bond0 up
+ ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter on
+ ip netns exec ${NETNS} ip link add link bond0 name vlan0 type vlan id 0 protocol 802.1q
+ ip netns exec ${NETNS} ip link set dev bond0 down
+ ip netns exec ${NETNS} ip link set dev bond0 up
+ ip netns exec ${NETNS} ip link del vlan0 || fail "Please check vlan HW filter function"
+ cleanup
+}
+
+#enable vlan_filter feature of real_dev during running time
+#test kernel_bug of vlan unregister
+test_vlan0_del_crash_03() {
+ setup
+ ip netns exec ${NETNS} ip link add bond0 type bond mode 0
+ ip netns exec ${NETNS} ip link add link bond0 name vlan0 type vlan id 0 protocol 802.1q
+ ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
+ ip netns exec ${NETNS} ip link set dev bond0 up
+ ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter on
+ ip netns exec ${NETNS} ip link set dev bond0 down
+ ip netns exec ${NETNS} ip link del vlan0 || fail "Please check vlan HW filter function"
+ cleanup
+}
+
+test_vid0_memleak() {
+ setup
+ ip netns exec ${NETNS} ip link add bond0 up type bond mode 0
+ ip netns exec ${NETNS} ethtool -K bond0 rx-vlan-filter off
+ ip netns exec ${NETNS} ip link del dev bond0 || fail "Please check vlan HW filter function"
+ cleanup
+}
+tests_run
exit $ret
diff --git a/tools/testing/selftests/net/vrf_route_leaking.sh b/tools/testing/selftests/net/vrf_route_leaking.sh
index e9c2f71da207..ce34cb2e6e0b 100755
--- a/tools/testing/selftests/net/vrf_route_leaking.sh
+++ b/tools/testing/selftests/net/vrf_route_leaking.sh
@@ -275,7 +275,7 @@ setup_sym()
# Wait for ip config to settle
- sleep 2
+ slowwait 5 ip netns exec $h1 "${ping6}" -c1 -w1 ${H2_N2_IP6} >/dev/null 2>&1
}
setup_asym()
@@ -370,7 +370,7 @@ setup_asym()
ip -netns $r2 -6 addr add dev eth1 ${R2_N2_IP6}/64 nodad
# Wait for ip config to settle
- sleep 2
+ slowwait 5 ip netns exec $h1 "${ping6}" -c1 -w1 ${H2_N2_IP6} >/dev/null 2>&1
}
check_connectivity()
diff --git a/tools/testing/selftests/net/ynl.mk b/tools/testing/selftests/net/ynl.mk
index e907c2751956..793a2fc33d9f 100644
--- a/tools/testing/selftests/net/ynl.mk
+++ b/tools/testing/selftests/net/ynl.mk
@@ -5,10 +5,11 @@
# Inputs:
#
# YNL_GENS: families we need in the selftests
-# YNL_PROGS: TEST_PROGS which need YNL (TODO, none exist, yet)
+# YNL_GEN_PROGS: TEST_GEN_PROGS which need YNL
# YNL_GEN_FILES: TEST_GEN_FILES which need YNL
-YNL_OUTPUTS := $(patsubst %,$(OUTPUT)/%,$(YNL_GEN_FILES))
+YNL_OUTPUTS := $(patsubst %,$(OUTPUT)/%,$(YNL_GEN_FILES)) \
+ $(patsubst %,$(OUTPUT)/%,$(YNL_GEN_PROGS))
YNL_SPECS := \
$(patsubst %,$(top_srcdir)/Documentation/netlink/specs/%.yaml,$(YNL_GENS))
diff --git a/tools/testing/selftests/nolibc/Makefile b/tools/testing/selftests/nolibc/Makefile
index 94176ffe4646..40f5c2908dda 100644
--- a/tools/testing/selftests/nolibc/Makefile
+++ b/tools/testing/selftests/nolibc/Makefile
@@ -1,341 +1,26 @@
# SPDX-License-Identifier: GPL-2.0
-# Makefile for nolibc tests
-# we're in ".../tools/testing/selftests/nolibc"
-ifeq ($(srctree),)
-srctree := $(patsubst %/tools/testing/selftests/,%,$(dir $(CURDIR)))
-endif
-
-include $(srctree)/tools/scripts/utilities.mak
-# We need this for the "__cc-option" macro.
-include $(srctree)/scripts/Makefile.compiler
-
-ifneq ($(O),)
-ifneq ($(call is-absolute,$(O)),y)
-$(error Only absolute O= parameters are supported)
-endif
-objtree := $(O)
-else
-objtree ?= $(srctree)
-endif
-
-ifeq ($(ARCH),)
-include $(srctree)/scripts/subarch.include
-ARCH = $(SUBARCH)
-endif
-
-cc-option = $(call __cc-option, $(CC),$(CLANG_CROSS_FLAGS),$(1),$(2))
-
-# XARCH extends the kernel's ARCH with a few variants of the same
-# architecture that only differ by the configuration, the toolchain
-# and the Qemu program used. It is copied as-is into ARCH except for
-# a few specific values which are mapped like this:
-#
-# XARCH | ARCH | config
-# -------------|-----------|-------------------------
-# ppc | powerpc | 32 bits
-# ppc64 | powerpc | 64 bits big endian
-# ppc64le | powerpc | 64 bits little endian
-#
-# It is recommended to only use XARCH, though it does not harm if
-# ARCH is already set. For simplicity, ARCH is sufficient for all
-# architectures where both are equal.
-
-# configure default variants for target kernel supported architectures
-XARCH_powerpc = ppc
-XARCH_mips = mips32le
-XARCH_riscv = riscv64
-XARCH = $(or $(XARCH_$(ARCH)),$(ARCH))
-
-# map from user input variants to their kernel supported architectures
-ARCH_armthumb = arm
-ARCH_ppc = powerpc
-ARCH_ppc64 = powerpc
-ARCH_ppc64le = powerpc
-ARCH_mips32le = mips
-ARCH_mips32be = mips
-ARCH_riscv32 = riscv
-ARCH_riscv64 = riscv
-ARCH_s390x = s390
-ARCH_sparc32 = sparc
-ARCH_sparc64 = sparc
-ARCH := $(or $(ARCH_$(XARCH)),$(XARCH))
-# kernel image names by architecture
-IMAGE_i386 = arch/x86/boot/bzImage
-IMAGE_x86_64 = arch/x86/boot/bzImage
-IMAGE_x86 = arch/x86/boot/bzImage
-IMAGE_arm64 = arch/arm64/boot/Image
-IMAGE_arm = arch/arm/boot/zImage
-IMAGE_armthumb = arch/arm/boot/zImage
-IMAGE_mips32le = vmlinuz
-IMAGE_mips32be = vmlinuz
-IMAGE_ppc = vmlinux
-IMAGE_ppc64 = vmlinux
-IMAGE_ppc64le = arch/powerpc/boot/zImage
-IMAGE_riscv = arch/riscv/boot/Image
-IMAGE_riscv32 = arch/riscv/boot/Image
-IMAGE_riscv64 = arch/riscv/boot/Image
-IMAGE_s390x = arch/s390/boot/bzImage
-IMAGE_s390 = arch/s390/boot/bzImage
-IMAGE_loongarch = arch/loongarch/boot/vmlinuz.efi
-IMAGE_sparc32 = arch/sparc/boot/image
-IMAGE_sparc64 = arch/sparc/boot/image
-IMAGE_m68k = vmlinux
-IMAGE = $(objtree)/$(IMAGE_$(XARCH))
-IMAGE_NAME = $(notdir $(IMAGE))
+TEST_GEN_PROGS := nolibc-test
-# default kernel configurations that appear to be usable
-DEFCONFIG_i386 = defconfig
-DEFCONFIG_x86_64 = defconfig
-DEFCONFIG_x86 = defconfig
-DEFCONFIG_arm64 = defconfig
-DEFCONFIG_arm = multi_v7_defconfig
-DEFCONFIG_armthumb = multi_v7_defconfig
-DEFCONFIG_mips32le = malta_defconfig
-DEFCONFIG_mips32be = malta_defconfig generic/eb.config
-DEFCONFIG_ppc = pmac32_defconfig
-DEFCONFIG_ppc64 = powernv_be_defconfig
-DEFCONFIG_ppc64le = powernv_defconfig
-DEFCONFIG_riscv = defconfig
-DEFCONFIG_riscv32 = rv32_defconfig
-DEFCONFIG_riscv64 = defconfig
-DEFCONFIG_s390x = defconfig
-DEFCONFIG_s390 = defconfig compat.config
-DEFCONFIG_loongarch = defconfig
-DEFCONFIG_sparc32 = sparc32_defconfig
-DEFCONFIG_sparc64 = sparc64_defconfig
-DEFCONFIG_m68k = virt_defconfig
-DEFCONFIG = $(DEFCONFIG_$(XARCH))
+include ../lib.mk
+include $(top_srcdir)/scripts/Makefile.compiler
-EXTRACONFIG_m68k = -e CONFIG_BLK_DEV_INITRD
-EXTRACONFIG = $(EXTRACONFIG_$(XARCH))
-EXTRACONFIG_arm = -e CONFIG_NAMESPACES
-EXTRACONFIG_armthumb = -e CONFIG_NAMESPACES
-
-# optional tests to run (default = all)
-TEST =
-
-# QEMU_ARCH: arch names used by qemu
-QEMU_ARCH_i386 = i386
-QEMU_ARCH_x86_64 = x86_64
-QEMU_ARCH_x86 = x86_64
-QEMU_ARCH_arm64 = aarch64
-QEMU_ARCH_arm = arm
-QEMU_ARCH_armthumb = arm
-QEMU_ARCH_mips32le = mipsel # works with malta_defconfig
-QEMU_ARCH_mips32be = mips
-QEMU_ARCH_ppc = ppc
-QEMU_ARCH_ppc64 = ppc64
-QEMU_ARCH_ppc64le = ppc64
-QEMU_ARCH_riscv = riscv64
-QEMU_ARCH_riscv32 = riscv32
-QEMU_ARCH_riscv64 = riscv64
-QEMU_ARCH_s390x = s390x
-QEMU_ARCH_s390 = s390x
-QEMU_ARCH_loongarch = loongarch64
-QEMU_ARCH_sparc32 = sparc
-QEMU_ARCH_sparc64 = sparc64
-QEMU_ARCH_m68k = m68k
-QEMU_ARCH = $(QEMU_ARCH_$(XARCH))
-
-QEMU_ARCH_USER_ppc64le = ppc64le
-QEMU_ARCH_USER = $(or $(QEMU_ARCH_USER_$(XARCH)),$(QEMU_ARCH_$(XARCH)))
-
-QEMU_BIOS_DIR = /usr/share/edk2/
-QEMU_BIOS_loongarch = $(QEMU_BIOS_DIR)/loongarch64/OVMF_CODE.fd
-
-ifneq ($(QEMU_BIOS_$(XARCH)),)
-QEMU_ARGS_BIOS = -bios $(QEMU_BIOS_$(XARCH))
-endif
-
-# QEMU_ARGS : some arch-specific args to pass to qemu
-QEMU_ARGS_i386 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_x86_64 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_x86 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_arm64 = -M virt -cpu cortex-a53 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_arm = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_armthumb = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_mips32le = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_mips32be = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_ppc = -M g3beige -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_ppc64 = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_ppc64le = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_riscv = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_riscv32 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_riscv64 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_s390x = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_s390 = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_loongarch = -M virt -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_sparc32 = -M SS-5 -m 256M -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_sparc64 = -M sun4u -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS_m68k = -M virt -append "console=ttyGF0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS = -m 1G $(QEMU_ARGS_$(XARCH)) $(QEMU_ARGS_BIOS) $(QEMU_ARGS_EXTRA)
-
-# OUTPUT is only set when run from the main makefile, otherwise
-# it defaults to this nolibc directory.
-OUTPUT ?= $(CURDIR)/
-
-ifeq ($(V),1)
-Q=
-else
-Q=@
-endif
+cc-option = $(call __cc-option, $(CC),,$(1),$(2))
-CFLAGS_i386 = $(call cc-option,-m32)
-CFLAGS_arm = -marm
-CFLAGS_armthumb = -mthumb -march=armv6t2
-CFLAGS_ppc = -m32 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
-CFLAGS_ppc64 = -m64 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
-CFLAGS_ppc64le = -m64 -mlittle-endian -mno-vsx $(call cc-option,-mabi=elfv2)
-CFLAGS_s390x = -m64
-CFLAGS_s390 = -m31
-CFLAGS_mips32le = -EL -mabi=32 -fPIC
-CFLAGS_mips32be = -EB -mabi=32
-CFLAGS_sparc32 = $(call cc-option,-m32)
-ifeq ($(origin XARCH),command line)
-CFLAGS_XARCH = $(CFLAGS_$(XARCH))
-endif
-CFLAGS_STACKPROTECTOR ?= $(call cc-option,-mstack-protector-guard=global $(call cc-option,-fstack-protector-all))
-CFLAGS_SANITIZER ?= $(call cc-option,-fsanitize=undefined -fsanitize-trap=all)
-CFLAGS ?= -Os -fno-ident -fno-asynchronous-unwind-tables -std=c89 -W -Wall -Wextra \
- $(call cc-option,-fno-stack-protector) $(call cc-option,-Wmissing-prototypes) \
- $(CFLAGS_XARCH) $(CFLAGS_STACKPROTECTOR) $(CFLAGS_SANITIZER) $(CFLAGS_EXTRA)
-LDFLAGS :=
+include Makefile.include
-LIBGCC := -lgcc
+CFLAGS = -nostdlib -nostdinc -static \
+ -isystem $(top_srcdir)/tools/include/nolibc -isystem $(top_srcdir)/usr/include \
+ $(CFLAGS_NOLIBC_TEST)
-ifneq ($(LLVM),)
-# Not needed for clang
-LIBGCC :=
+ifeq ($(LLVM),)
+LDLIBS := -lgcc
endif
-# Modify CFLAGS based on LLVM=
-include $(srctree)/tools/scripts/Makefile.include
-
-# GCC uses "s390", clang "systemz"
-CLANG_CROSS_FLAGS := $(subst --target=s390-linux,--target=systemz-linux,$(CLANG_CROSS_FLAGS))
-
-REPORT ?= awk '/\[OK\][\r]*$$/{p++} /\[FAIL\][\r]*$$/{if (!f) printf("\n"); f++; print;} /\[SKIPPED\][\r]*$$/{s++} \
- END{ printf("\n%3d test(s): %3d passed, %3d skipped, %3d failed => status: ", p+s+f, p, s, f); \
- if (f || !p) printf("failure\n"); else if (s) printf("warning\n"); else printf("success\n");; \
- printf("\nSee all results in %s\n", ARGV[1]); }'
+$(OUTPUT)/nolibc-test: nolibc-test.c nolibc-test-linkage.c | headers
help:
- @echo "Supported targets under selftests/nolibc:"
- @echo " all call the \"run\" target below"
- @echo " help this help"
- @echo " sysroot create the nolibc sysroot here (uses \$$ARCH)"
- @echo " nolibc-test build the executable (uses \$$CC and \$$CROSS_COMPILE)"
- @echo " libc-test build an executable using the compiler's default libc instead"
- @echo " run-user runs the executable under QEMU (uses \$$XARCH, \$$TEST)"
- @echo " initramfs.cpio prepare the initramfs archive with nolibc-test"
- @echo " initramfs prepare the initramfs tree with nolibc-test"
- @echo " defconfig create a fresh new default config (uses \$$XARCH)"
- @echo " kernel (re)build the kernel (uses \$$XARCH)"
- @echo " kernel-standalone (re)build the kernel with the initramfs (uses \$$XARCH)"
- @echo " run runs the kernel in QEMU after building it (uses \$$XARCH, \$$TEST)"
- @echo " rerun runs a previously prebuilt kernel in QEMU (uses \$$XARCH, \$$TEST)"
- @echo " clean clean the sysroot, initramfs, build and output files"
- @echo ""
- @echo "The output file is \"run.out\". Test ranges may be passed using \$$TEST."
- @echo ""
- @echo "Currently using the following variables:"
- @echo " ARCH = $(ARCH)"
- @echo " XARCH = $(XARCH)"
- @echo " CROSS_COMPILE = $(CROSS_COMPILE)"
- @echo " CC = $(CC)"
- @echo " OUTPUT = $(OUTPUT)"
- @echo " TEST = $(TEST)"
- @echo " QEMU_ARCH = $(if $(QEMU_ARCH),$(QEMU_ARCH),UNKNOWN_ARCH) [determined from \$$XARCH]"
- @echo " IMAGE_NAME = $(if $(IMAGE_NAME),$(IMAGE_NAME),UNKNOWN_ARCH) [determined from \$$XARCH]"
- @echo ""
-
-all: run
-
-sysroot: sysroot/$(ARCH)/include
-
-sysroot/$(ARCH)/include:
- $(Q)rm -rf sysroot/$(ARCH) sysroot/sysroot
- $(QUIET_MKDIR)mkdir -p sysroot
- $(Q)$(MAKE) -C $(srctree) outputmakefile
- $(Q)$(MAKE) -C $(srctree)/tools/include/nolibc ARCH=$(ARCH) OUTPUT=$(CURDIR)/sysroot/ headers_standalone headers_check
- $(Q)mv sysroot/sysroot sysroot/$(ARCH)
-
-ifneq ($(NOLIBC_SYSROOT),0)
-nolibc-test: nolibc-test.c nolibc-test-linkage.c sysroot/$(ARCH)/include
- $(QUIET_CC)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ \
- -nostdlib -nostdinc -static -Isysroot/$(ARCH)/include nolibc-test.c nolibc-test-linkage.c $(LIBGCC)
-else
-nolibc-test: nolibc-test.c nolibc-test-linkage.c
- $(QUIET_CC)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ \
- -nostdlib -static -include $(srctree)/tools/include/nolibc/nolibc.h nolibc-test.c nolibc-test-linkage.c $(LIBGCC)
-endif
-
-libc-test: nolibc-test.c nolibc-test-linkage.c
- $(QUIET_CC)$(HOSTCC) -o $@ nolibc-test.c nolibc-test-linkage.c
-
-# local libc-test
-run-libc-test: libc-test
- $(Q)./libc-test > "$(CURDIR)/run.out" || :
- $(Q)$(REPORT) $(CURDIR)/run.out
-
-# local nolibc-test
-run-nolibc-test: nolibc-test
- $(Q)./nolibc-test > "$(CURDIR)/run.out" || :
- $(Q)$(REPORT) $(CURDIR)/run.out
-
-# qemu user-land test
-run-user: nolibc-test
- $(Q)qemu-$(QEMU_ARCH_USER) ./nolibc-test > "$(CURDIR)/run.out" || :
- $(Q)$(REPORT) $(CURDIR)/run.out
-
-initramfs.cpio: kernel nolibc-test
- $(QUIET_GEN)echo 'file /init nolibc-test 755 0 0' | $(objtree)/usr/gen_init_cpio - > initramfs.cpio
-
-initramfs: nolibc-test
- $(QUIET_MKDIR)mkdir -p initramfs
- $(call QUIET_INSTALL, initramfs/init)
- $(Q)cp nolibc-test initramfs/init
-
-defconfig:
- $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(DEFCONFIG)
- $(Q)if [ -n "$(EXTRACONFIG)" ]; then \
- $(srctree)/scripts/config --file $(objtree)/.config $(EXTRACONFIG); \
- $(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) olddefconfig < /dev/null; \
- fi
-
-kernel: | defconfig
- $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(IMAGE_NAME) < /dev/null
-
-kernel-standalone: initramfs | defconfig
- $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(IMAGE_NAME) CONFIG_INITRAMFS_SOURCE=$(CURDIR)/initramfs < /dev/null
-
-# run the tests after building the kernel
-run: kernel initramfs.cpio
- $(Q)qemu-system-$(QEMU_ARCH) -display none -no-reboot -kernel "$(IMAGE)" -initrd initramfs.cpio -serial stdio $(QEMU_ARGS) > "$(CURDIR)/run.out"
- $(Q)$(REPORT) $(CURDIR)/run.out
-
-# re-run the tests from an existing kernel
-rerun:
- $(Q)qemu-system-$(QEMU_ARCH) -display none -no-reboot -kernel "$(IMAGE)" -initrd initramfs.cpio -serial stdio $(QEMU_ARGS) > "$(CURDIR)/run.out"
- $(Q)$(REPORT) $(CURDIR)/run.out
-
-# report with existing test log
-report:
- $(Q)$(REPORT) $(CURDIR)/run.out
-
-clean:
- $(call QUIET_CLEAN, sysroot)
- $(Q)rm -rf sysroot
- $(call QUIET_CLEAN, nolibc-test)
- $(Q)rm -f nolibc-test
- $(call QUIET_CLEAN, libc-test)
- $(Q)rm -f libc-test
- $(call QUIET_CLEAN, initramfs.cpio)
- $(Q)rm -rf initramfs.cpio
- $(call QUIET_CLEAN, initramfs)
- $(Q)rm -rf initramfs
- $(call QUIET_CLEAN, run.out)
- $(Q)rm -rf run.out
+ @echo "For the custom nolibc testsuite use '$(MAKE) -f Makefile.nolibc'; available targets:"
+ @$(MAKE) -f Makefile.nolibc help
-.PHONY: sysroot/$(ARCH)/include
+.PHONY: help
diff --git a/tools/testing/selftests/nolibc/Makefile.include b/tools/testing/selftests/nolibc/Makefile.include
new file mode 100644
index 000000000000..66287fafbbe0
--- /dev/null
+++ b/tools/testing/selftests/nolibc/Makefile.include
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+__CFLAGS_STACKPROTECTOR = $(call cc-option,-fstack-protector-all) $(call cc-option,-mstack-protector-guard=global)
+_CFLAGS_STACKPROTECTOR ?= $(call try-run, \
+ echo 'void foo(void) {}' | $(CC) -x c - -o - -S $(CLANG_CROSS_FLAGS) $(__CFLAGS_STACKPROTECTOR) | grep -q __stack_chk_guard, \
+ $(__CFLAGS_STACKPROTECTOR))
+_CFLAGS_SANITIZER ?= $(call cc-option,-fsanitize=undefined -fsanitize-trap=all)
+CFLAGS_NOLIBC_TEST ?= -Os -fno-ident -fno-asynchronous-unwind-tables -std=c89 -W -Wall -Wextra \
+ $(call cc-option,-fno-stack-protector) $(call cc-option,-Wmissing-prototypes) \
+ $(_CFLAGS_STACKPROTECTOR) $(_CFLAGS_SANITIZER)
diff --git a/tools/testing/selftests/nolibc/Makefile.nolibc b/tools/testing/selftests/nolibc/Makefile.nolibc
new file mode 100644
index 000000000000..f9d43cbdc894
--- /dev/null
+++ b/tools/testing/selftests/nolibc/Makefile.nolibc
@@ -0,0 +1,382 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for nolibc tests
+# we're in ".../tools/testing/selftests/nolibc"
+ifeq ($(srctree),)
+srctree := $(patsubst %/tools/testing/selftests/,%,$(dir $(CURDIR)))
+endif
+
+include $(srctree)/tools/scripts/utilities.mak
+# We need this for the "__cc-option" macro.
+include $(srctree)/scripts/Makefile.compiler
+
+ifneq ($(O),)
+ifneq ($(call is-absolute,$(O)),y)
+$(error Only absolute O= parameters are supported)
+endif
+objtree := $(O)
+else
+objtree ?= $(srctree)
+endif
+
+ifeq ($(ARCH),)
+include $(srctree)/scripts/subarch.include
+ARCH = $(SUBARCH)
+endif
+
+cc-option = $(call __cc-option, $(CC),$(CLANG_CROSS_FLAGS),$(1),$(2))
+
+# XARCH extends the kernel's ARCH with a few variants of the same
+# architecture that only differ by the configuration, the toolchain
+# and the Qemu program used. It is copied as-is into ARCH except for
+# a few specific values which are mapped like this:
+#
+# XARCH | ARCH | config
+# -------------|-----------|-------------------------
+# ppc | powerpc | 32 bits
+# ppc64 | powerpc | 64 bits big endian
+# ppc64le | powerpc | 64 bits little endian
+#
+# It is recommended to only use XARCH, though it does not harm if
+# ARCH is already set. For simplicity, ARCH is sufficient for all
+# architectures where both are equal.
+
+# configure default variants for target kernel supported architectures
+XARCH_powerpc = ppc
+XARCH_mips = mips32le
+XARCH_riscv = riscv64
+XARCH = $(or $(XARCH_$(ARCH)),$(ARCH))
+
+# map from user input variants to their kernel supported architectures
+ARCH_x32 = x86
+ARCH_armthumb = arm
+ARCH_ppc = powerpc
+ARCH_ppc64 = powerpc
+ARCH_ppc64le = powerpc
+ARCH_mips32le = mips
+ARCH_mips32be = mips
+ARCH_mipsn32le = mips
+ARCH_mipsn32be = mips
+ARCH_mips64le = mips
+ARCH_mips64be = mips
+ARCH_riscv32 = riscv
+ARCH_riscv64 = riscv
+ARCH_s390x = s390
+ARCH_sparc32 = sparc
+ARCH_sparc64 = sparc
+ARCH_sh4 = sh
+ARCH := $(or $(ARCH_$(XARCH)),$(XARCH))
+
+# kernel image names by architecture
+IMAGE_i386 = arch/x86/boot/bzImage
+IMAGE_x86_64 = arch/x86/boot/bzImage
+IMAGE_x32 = arch/x86/boot/bzImage
+IMAGE_x86 = arch/x86/boot/bzImage
+IMAGE_arm64 = arch/arm64/boot/Image
+IMAGE_arm = arch/arm/boot/zImage
+IMAGE_armthumb = arch/arm/boot/zImage
+IMAGE_mips32le = vmlinuz
+IMAGE_mips32be = vmlinuz
+IMAGE_mipsn32le = vmlinuz
+IMAGE_mipsn32be = vmlinuz
+IMAGE_mips64le = vmlinuz
+IMAGE_mips64be = vmlinuz
+IMAGE_ppc = vmlinux
+IMAGE_ppc64 = vmlinux
+IMAGE_ppc64le = arch/powerpc/boot/zImage
+IMAGE_riscv = arch/riscv/boot/Image
+IMAGE_riscv32 = arch/riscv/boot/Image
+IMAGE_riscv64 = arch/riscv/boot/Image
+IMAGE_s390x = arch/s390/boot/bzImage
+IMAGE_loongarch = arch/loongarch/boot/vmlinuz.efi
+IMAGE_sparc32 = arch/sparc/boot/image
+IMAGE_sparc64 = arch/sparc/boot/image
+IMAGE_m68k = vmlinux
+IMAGE_sh4 = arch/sh/boot/zImage
+IMAGE = $(objtree)/$(IMAGE_$(XARCH))
+IMAGE_NAME = $(notdir $(IMAGE))
+
+# default kernel configurations that appear to be usable
+DEFCONFIG_i386 = defconfig
+DEFCONFIG_x86_64 = defconfig
+DEFCONFIG_x32 = defconfig
+DEFCONFIG_x86 = defconfig
+DEFCONFIG_arm64 = defconfig
+DEFCONFIG_arm = multi_v7_defconfig
+DEFCONFIG_armthumb = multi_v7_defconfig
+DEFCONFIG_mips32le = malta_defconfig
+DEFCONFIG_mips32be = malta_defconfig generic/eb.config
+DEFCONFIG_mipsn32le = malta_defconfig generic/64r2.config
+DEFCONFIG_mipsn32be = malta_defconfig generic/64r6.config generic/eb.config
+DEFCONFIG_mips64le = malta_defconfig generic/64r6.config
+DEFCONFIG_mips64be = malta_defconfig generic/64r2.config generic/eb.config
+DEFCONFIG_ppc = pmac32_defconfig
+DEFCONFIG_ppc64 = powernv_be_defconfig
+DEFCONFIG_ppc64le = powernv_defconfig
+DEFCONFIG_riscv = defconfig
+DEFCONFIG_riscv32 = rv32_defconfig
+DEFCONFIG_riscv64 = defconfig
+DEFCONFIG_s390x = defconfig
+DEFCONFIG_loongarch = defconfig
+DEFCONFIG_sparc32 = sparc32_defconfig
+DEFCONFIG_sparc64 = sparc64_defconfig
+DEFCONFIG_m68k = virt_defconfig
+DEFCONFIG_sh4 = rts7751r2dplus_defconfig
+DEFCONFIG = $(DEFCONFIG_$(XARCH))
+
+EXTRACONFIG_x32 = -e CONFIG_X86_X32_ABI
+EXTRACONFIG_arm = -e CONFIG_NAMESPACES
+EXTRACONFIG_armthumb = -e CONFIG_NAMESPACES
+EXTRACONFIG_m68k = -e CONFIG_BLK_DEV_INITRD
+EXTRACONFIG_sh4 = -e CONFIG_BLK_DEV_INITRD -e CONFIG_CMDLINE_FROM_BOOTLOADER
+EXTRACONFIG = $(EXTRACONFIG_$(XARCH))
+
+# optional tests to run (default = all)
+TEST =
+
+# QEMU_ARCH: arch names used by qemu
+QEMU_ARCH_i386 = i386
+QEMU_ARCH_x86_64 = x86_64
+QEMU_ARCH_x32 = x86_64
+QEMU_ARCH_x86 = x86_64
+QEMU_ARCH_arm64 = aarch64
+QEMU_ARCH_arm = arm
+QEMU_ARCH_armthumb = arm
+QEMU_ARCH_mips32le = mipsel # works with malta_defconfig
+QEMU_ARCH_mips32be = mips
+QEMU_ARCH_mipsn32le = mips64el
+QEMU_ARCH_mipsn32be = mips64
+QEMU_ARCH_mips64le = mips64el
+QEMU_ARCH_mips64be = mips64
+QEMU_ARCH_ppc = ppc
+QEMU_ARCH_ppc64 = ppc64
+QEMU_ARCH_ppc64le = ppc64
+QEMU_ARCH_riscv = riscv64
+QEMU_ARCH_riscv32 = riscv32
+QEMU_ARCH_riscv64 = riscv64
+QEMU_ARCH_s390x = s390x
+QEMU_ARCH_loongarch = loongarch64
+QEMU_ARCH_sparc32 = sparc
+QEMU_ARCH_sparc64 = sparc64
+QEMU_ARCH_m68k = m68k
+QEMU_ARCH_sh4 = sh4
+QEMU_ARCH = $(QEMU_ARCH_$(XARCH))
+
+QEMU_ARCH_USER_ppc64le = ppc64le
+QEMU_ARCH_USER_mipsn32le = mipsn32el
+QEMU_ARCH_USER_mipsn32be = mipsn32
+QEMU_ARCH_USER = $(or $(QEMU_ARCH_USER_$(XARCH)),$(QEMU_ARCH_$(XARCH)))
+
+QEMU_BIOS_DIR = /usr/share/edk2/
+QEMU_BIOS_loongarch = $(QEMU_BIOS_DIR)/loongarch64/OVMF_CODE.fd
+
+ifneq ($(QEMU_BIOS_$(XARCH)),)
+QEMU_ARGS_BIOS = -bios $(QEMU_BIOS_$(XARCH))
+endif
+
+# QEMU_ARGS : some arch-specific args to pass to qemu
+QEMU_ARGS_i386 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_x86_64 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_x32 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_x86 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_arm64 = -M virt -cpu cortex-a53 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_arm = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_armthumb = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mips32le = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mips32be = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mipsn32le = -M malta -cpu 5KEc -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mipsn32be = -M malta -cpu I6400 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mips64le = -M malta -cpu I6400 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_mips64be = -M malta -cpu 5KEc -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_ppc = -M g3beige -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_ppc64 = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_ppc64le = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_riscv = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_riscv32 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_riscv64 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_s390x = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_loongarch = -M virt -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_sparc32 = -M SS-5 -m 256M -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_sparc64 = -M sun4u -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_m68k = -M virt -append "console=ttyGF0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_sh4 = -M r2d -serial file:/dev/stdout -append "console=ttySC1,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS = -m 1G $(QEMU_ARGS_$(XARCH)) $(QEMU_ARGS_BIOS) $(QEMU_ARGS_EXTRA)
+
+# OUTPUT is only set when run from the main makefile, otherwise
+# it defaults to this nolibc directory.
+OUTPUT ?= $(CURDIR)/
+
+ifeq ($(V),1)
+Q=
+else
+Q=@
+endif
+
+CFLAGS_i386 = $(call cc-option,-m32)
+CFLAGS_x32 = -mx32
+CFLAGS_arm = -marm
+CFLAGS_armthumb = -mthumb -march=armv6t2
+CFLAGS_ppc = -m32 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
+CFLAGS_ppc64 = -m64 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
+CFLAGS_ppc64le = -m64 -mlittle-endian -mno-vsx $(call cc-option,-mabi=elfv2)
+CFLAGS_s390x = -m64
+CFLAGS_mips32le = -EL -mabi=32 -fPIC
+CFLAGS_mips32be = -EB -mabi=32
+CFLAGS_mipsn32le = -EL -mabi=n32 -fPIC -march=mips64r2
+CFLAGS_mipsn32be = -EB -mabi=n32 -march=mips64r6
+CFLAGS_mips64le = -EL -mabi=64 -march=mips64r6
+CFLAGS_mips64be = -EB -mabi=64 -march=mips64r2
+CFLAGS_loongarch = $(if $(LLVM),-fuse-ld=lld)
+CFLAGS_sparc32 = $(call cc-option,-m32)
+CFLAGS_sh4 = -ml -m4
+ifeq ($(origin XARCH),command line)
+CFLAGS_XARCH = $(CFLAGS_$(XARCH))
+endif
+
+include Makefile.include
+
+CFLAGS ?= $(CFLAGS_NOLIBC_TEST) $(CFLAGS_XARCH) $(CFLAGS_EXTRA)
+LDFLAGS :=
+
+LIBGCC := -lgcc
+
+ifeq ($(ARCH),x86)
+# Not needed on x86, probably not present for x32
+LIBGCC :=
+endif
+
+ifneq ($(LLVM),)
+# Not needed for clang
+LIBGCC :=
+endif
+
+# Modify CFLAGS based on LLVM=
+include $(srctree)/tools/scripts/Makefile.include
+
+REPORT ?= awk '/\[OK\][\r]*$$/{p++} /\[FAIL\][\r]*$$/{if (!f) printf("\n"); f++; print;} /\[SKIPPED\][\r]*$$/{s++} \
+ /^Total number of errors:/{done++} \
+ END{ printf("\n%3d test(s): %3d passed, %3d skipped, %3d failed => status: ", p+s+f, p, s, f); \
+ if (f || !p || !done) printf("failure\n"); else if (s) printf("warning\n"); else printf("success\n");; \
+ printf("\nSee all results in %s\n", ARGV[1]); }'
+
+# Execute the toplevel kernel Makefile
+KBUILD_MAKE = $(MAKE) -C $(srctree) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) LLVM=
+
+help:
+ @echo "Supported targets under selftests/nolibc:"
+ @echo " all call the \"run\" target below"
+ @echo " help this help"
+ @echo " sysroot create the nolibc sysroot here (uses \$$ARCH)"
+ @echo " nolibc-test build the executable (uses \$$CC or \$$CROSS_COMPILE)"
+ @echo " libc-test build an executable using the compiler's default libc instead"
+ @echo " run-user runs the executable under QEMU (uses \$$XARCH, \$$TEST)"
+ @echo " initramfs.cpio prepare the initramfs archive with nolibc-test"
+ @echo " initramfs prepare the initramfs tree with nolibc-test"
+ @echo " defconfig create a fresh new default config (uses \$$XARCH)"
+ @echo " kernel (re)build the kernel (uses \$$XARCH, \$$CROSS_COMPILE)"
+ @echo " kernel-standalone (re)build the kernel with the initramfs (uses \$$XARCH, \$$CROSS_COMPILE)"
+ @echo " run runs the kernel in QEMU after building it (uses \$$XARCH, \$$TEST)"
+ @echo " rerun runs a previously prebuilt kernel in QEMU (uses \$$XARCH, \$$TEST)"
+ @echo " clean clean the sysroot, initramfs, build and output files"
+ @echo ""
+ @echo "The output file is \"run.out\". Test ranges may be passed using \$$TEST."
+ @echo ""
+ @echo "Currently using the following variables:"
+ @echo " ARCH = $(ARCH)"
+ @echo " XARCH = $(XARCH)"
+ @echo " CROSS_COMPILE = $(CROSS_COMPILE)"
+ @echo " CC = $(CC)"
+ @echo " OUTPUT = $(OUTPUT)"
+ @echo " TEST = $(TEST)"
+ @echo " QEMU_ARCH = $(if $(QEMU_ARCH),$(QEMU_ARCH),UNKNOWN_ARCH) [determined from \$$XARCH]"
+ @echo " IMAGE_NAME = $(if $(IMAGE_NAME),$(IMAGE_NAME),UNKNOWN_ARCH) [determined from \$$XARCH]"
+ @echo ""
+
+all: run
+
+sysroot: sysroot/$(ARCH)/include
+
+sysroot/$(ARCH)/include:
+ $(Q)rm -rf sysroot/$(ARCH) sysroot/sysroot
+ $(QUIET_MKDIR)mkdir -p sysroot
+ $(Q)$(MAKE) -C $(srctree) outputmakefile
+ $(Q)$(MAKE) -C $(srctree)/tools/include/nolibc ARCH=$(ARCH) OUTPUT=$(CURDIR)/sysroot/ headers_standalone headers_check
+ $(Q)mv sysroot/sysroot sysroot/$(ARCH)
+
+ifneq ($(NOLIBC_SYSROOT),0)
+nolibc-test: nolibc-test.c nolibc-test-linkage.c sysroot/$(ARCH)/include
+ $(QUIET_CC)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ \
+ -nostdlib -nostdinc -static -Isysroot/$(ARCH)/include nolibc-test.c nolibc-test-linkage.c $(LIBGCC)
+else
+nolibc-test: nolibc-test.c nolibc-test-linkage.c
+ $(QUIET_CC)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ \
+ -nostdlib -static -include $(srctree)/tools/include/nolibc/nolibc.h nolibc-test.c nolibc-test-linkage.c $(LIBGCC)
+endif
+
+libc-test: nolibc-test.c nolibc-test-linkage.c
+ $(QUIET_CC)$(HOSTCC) -o $@ nolibc-test.c nolibc-test-linkage.c
+
+# local libc-test
+run-libc-test: libc-test
+ $(Q)./libc-test > "$(CURDIR)/run.out" || :
+ $(Q)$(REPORT) $(CURDIR)/run.out
+
+# local nolibc-test
+run-nolibc-test: nolibc-test
+ $(Q)./nolibc-test > "$(CURDIR)/run.out" || :
+ $(Q)$(REPORT) $(CURDIR)/run.out
+
+# qemu user-land test
+run-user: nolibc-test
+ $(Q)qemu-$(QEMU_ARCH_USER) ./nolibc-test > "$(CURDIR)/run.out" || :
+ $(Q)$(REPORT) $(CURDIR)/run.out
+
+initramfs.cpio: kernel nolibc-test
+ $(QUIET_GEN)echo 'file /init nolibc-test 755 0 0' | $(objtree)/usr/gen_init_cpio - > initramfs.cpio
+
+initramfs: nolibc-test
+ $(QUIET_MKDIR)mkdir -p initramfs
+ $(call QUIET_INSTALL, initramfs/init)
+ $(Q)cp nolibc-test initramfs/init
+
+defconfig:
+ $(Q)$(KBUILD_MAKE) $(DEFCONFIG)
+ $(Q)if [ -n "$(EXTRACONFIG)" ]; then \
+ $(srctree)/scripts/config --file $(objtree)/.config $(EXTRACONFIG); \
+ $(KBUILD_MAKE) olddefconfig < /dev/null; \
+ fi
+
+kernel:
+ $(Q)$(KBUILD_MAKE) $(IMAGE_NAME) < /dev/null
+
+kernel-standalone: initramfs
+ $(Q)$(KBUILD_MAKE) $(IMAGE_NAME) CONFIG_INITRAMFS_SOURCE=$(CURDIR)/initramfs < /dev/null
+
+# run the tests after building the kernel
+run: kernel initramfs.cpio
+ $(Q)qemu-system-$(QEMU_ARCH) -display none -no-reboot -kernel "$(IMAGE)" -initrd initramfs.cpio -serial file:/dev/stdout $(QEMU_ARGS) > "$(CURDIR)/run.out"
+ $(Q)$(REPORT) $(CURDIR)/run.out
+
+# re-run the tests from an existing kernel
+rerun:
+ $(Q)qemu-system-$(QEMU_ARCH) -display none -no-reboot -kernel "$(IMAGE)" -initrd initramfs.cpio -serial file:/dev/stdout $(QEMU_ARGS) > "$(CURDIR)/run.out"
+ $(Q)$(REPORT) $(CURDIR)/run.out
+
+# report with existing test log
+report:
+ $(Q)$(REPORT) $(CURDIR)/run.out
+
+clean:
+ $(call QUIET_CLEAN, sysroot)
+ $(Q)rm -rf sysroot
+ $(call QUIET_CLEAN, nolibc-test)
+ $(Q)rm -f nolibc-test
+ $(call QUIET_CLEAN, libc-test)
+ $(Q)rm -f libc-test
+ $(call QUIET_CLEAN, initramfs.cpio)
+ $(Q)rm -rf initramfs.cpio
+ $(call QUIET_CLEAN, initramfs)
+ $(Q)rm -rf initramfs
+ $(call QUIET_CLEAN, run.out)
+ $(Q)rm -rf run.out
+
+.PHONY: sysroot/$(ARCH)/include
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
index dbe13000fb1a..3c5a226dad3a 100644
--- a/tools/testing/selftests/nolibc/nolibc-test.c
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -25,6 +25,7 @@
#include <sys/sysmacros.h>
#include <sys/time.h>
#include <sys/timerfd.h>
+#include <sys/uio.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <dirent.h>
@@ -196,8 +197,8 @@ int expect_zr(int expr, int llen)
}
-#define EXPECT_NZ(cond, expr, val) \
- do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen; } while (0)
+#define EXPECT_NZ(cond, expr) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen); } while (0)
static __attribute__((unused))
int expect_nz(int expr, int llen)
@@ -686,7 +687,6 @@ int expect_strtox(int llen, void *func, const char *input, int base, intmax_t ex
#define CASE_TEST(name) \
case __LINE__: llen += printf("%d %s", test, #name);
-/* constructors validate that they are executed in definition order */
__attribute__((constructor))
static void constructor1(void)
{
@@ -877,7 +877,12 @@ int test_file_stream(void)
return 0;
}
-int test_fork(void)
+enum fork_type {
+ FORK_STANDARD,
+ FORK_VFORK,
+};
+
+int test_fork(enum fork_type type)
{
int status;
pid_t pid;
@@ -886,14 +891,23 @@ int test_fork(void)
fflush(stdout);
fflush(stderr);
- pid = fork();
+ switch (type) {
+ case FORK_STANDARD:
+ pid = fork();
+ break;
+ case FORK_VFORK:
+ pid = vfork();
+ break;
+ default:
+ return 1;
+ }
switch (pid) {
case -1:
return 1;
case 0:
- exit(123);
+ _exit(123);
default:
pid = waitpid(pid, &status, 0);
@@ -1269,6 +1283,10 @@ int run_syscall(int min, int max)
int proc;
int test;
int tmp;
+ struct iovec iov_one = {
+ .iov_base = &tmp,
+ .iov_len = 1,
+ };
int ret = 0;
void *p1, *p2;
int has_gettid = 1;
@@ -1320,6 +1338,7 @@ int run_syscall(int min, int max)
CASE_TEST(chroot_root); EXPECT_SYSZR(euid0, chroot("/")); break;
CASE_TEST(chroot_blah); EXPECT_SYSER(1, chroot("/proc/self/blah"), -1, ENOENT); break;
CASE_TEST(chroot_exe); EXPECT_SYSER(1, chroot(argv0), -1, ENOTDIR); break;
+ CASE_TEST(clock_nanosleep); ts.tv_nsec = -1; EXPECT_EQ(1, EINVAL, clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL)); break;
CASE_TEST(close_m1); EXPECT_SYSER(1, close(-1), -1, EBADF); break;
CASE_TEST(close_dup); EXPECT_SYSZR(1, close(dup(0))); break;
CASE_TEST(dup_0); tmp = dup(0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
@@ -1329,8 +1348,10 @@ int run_syscall(int min, int max)
CASE_TEST(dup3_0); tmp = dup3(0, 100, 0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
CASE_TEST(dup3_m1); tmp = dup3(-1, 100, 0); EXPECT_SYSER(1, tmp, -1, EBADF); if (tmp != -1) close(tmp); break;
CASE_TEST(execve_root); EXPECT_SYSER(1, execve("/", (char*[]){ [0] = "/", [1] = NULL }, NULL), -1, EACCES); break;
+ CASE_TEST(fchdir_stdin); EXPECT_SYSER(1, fchdir(STDIN_FILENO), -1, ENOTDIR); break;
+ CASE_TEST(fchdir_badfd); EXPECT_SYSER(1, fchdir(-1), -1, EBADF); break;
CASE_TEST(file_stream); EXPECT_SYSZR(1, test_file_stream()); break;
- CASE_TEST(fork); EXPECT_SYSZR(1, test_fork()); break;
+ CASE_TEST(fork); EXPECT_SYSZR(1, test_fork(FORK_STANDARD)); break;
CASE_TEST(getdents64_root); EXPECT_SYSNE(1, test_getdents64("/"), -1); break;
CASE_TEST(getdents64_null); EXPECT_SYSER(1, test_getdents64("/dev/null"), -1, ENOTDIR); break;
CASE_TEST(directories); EXPECT_SYSZR(proc, test_dirent()); break;
@@ -1349,6 +1370,7 @@ int run_syscall(int min, int max)
CASE_TEST(mmap_bad); EXPECT_PTRER(1, mmap(NULL, 0, PROT_READ, MAP_PRIVATE, 0, 0), MAP_FAILED, EINVAL); break;
CASE_TEST(munmap_bad); EXPECT_SYSER(1, munmap(NULL, 0), -1, EINVAL); break;
CASE_TEST(mmap_munmap_good); EXPECT_SYSZR(1, test_mmap_munmap()); break;
+ CASE_TEST(nanosleep); ts.tv_nsec = -1; EXPECT_SYSER(1, nanosleep(&ts, NULL), -1, EINVAL); break;
CASE_TEST(open_tty); EXPECT_SYSNE(1, tmp = open("/dev/null", O_RDONLY), -1); if (tmp != -1) close(tmp); break;
CASE_TEST(open_blah); EXPECT_SYSER(1, tmp = open("/proc/self/blah", O_RDONLY), -1, ENOENT); if (tmp != -1) close(tmp); break;
CASE_TEST(openat_dir); EXPECT_SYSZR(1, test_openat()); break;
@@ -1374,11 +1396,16 @@ int run_syscall(int min, int max)
CASE_TEST(uname_fault); EXPECT_SYSER(1, uname(NULL), -1, EFAULT); break;
CASE_TEST(unlink_root); EXPECT_SYSER(1, unlink("/"), -1, EISDIR); break;
CASE_TEST(unlink_blah); EXPECT_SYSER(1, unlink("/proc/self/blah"), -1, ENOENT); break;
+ CASE_TEST(vfork); EXPECT_SYSZR(1, test_fork(FORK_VFORK)); break;
CASE_TEST(wait_child); EXPECT_SYSER(1, wait(&tmp), -1, ECHILD); break;
CASE_TEST(waitpid_min); EXPECT_SYSER(1, waitpid(INT_MIN, &tmp, WNOHANG), -1, ESRCH); break;
CASE_TEST(waitpid_child); EXPECT_SYSER(1, waitpid(getpid(), &tmp, WNOHANG), -1, ECHILD); break;
CASE_TEST(write_badf); EXPECT_SYSER(1, write(-1, &tmp, 1), -1, EBADF); break;
CASE_TEST(write_zero); EXPECT_SYSZR(1, write(1, &tmp, 0)); break;
+ CASE_TEST(readv_badf); EXPECT_SYSER(1, readv(-1, &iov_one, 1), -1, EBADF); break;
+ CASE_TEST(readv_zero); EXPECT_SYSZR(1, readv(1, NULL, 0)); break;
+ CASE_TEST(writev_badf); EXPECT_SYSER(1, writev(-1, &iov_one, 1), -1, EBADF); break;
+ CASE_TEST(writev_zero); EXPECT_SYSZR(1, writev(1, NULL, 0)); break;
CASE_TEST(syscall_noargs); EXPECT_SYSEQ(1, syscall(__NR_getpid), getpid()); break;
CASE_TEST(syscall_args); EXPECT_SYSER(1, syscall(__NR_statx, 0, NULL, 0, 0, NULL), -1, EFAULT); break;
CASE_TEST(namespace); EXPECT_SYSZR(euid0 && proc, test_namespace()); break;
@@ -1413,7 +1440,7 @@ int run_stdlib(int min, int max)
* Add some more chars after the \0, to test functions that overwrite the buffer set
* the \0 at the exact right position.
*/
- char buf[10] = "test123456";
+ char buf[11] = "test123456";
buf[4] = '\0';
@@ -1524,6 +1551,8 @@ int run_stdlib(int min, int max)
CASE_TEST(abs); EXPECT_EQ(1, abs(-10), 10); break;
CASE_TEST(abs_noop); EXPECT_EQ(1, abs(10), 10); break;
CASE_TEST(difftime); EXPECT_ZR(1, test_difftime()); break;
+ CASE_TEST(memchr_foobar6_o); EXPECT_STREQ(1, memchr("foobar", 'o', 6), "oobar"); break;
+ CASE_TEST(memchr_foobar3_b); EXPECT_STRZR(1, memchr("foobar", 'b', 3)); break;
case __LINE__:
return ret; /* must be last */
@@ -1646,6 +1675,28 @@ int test_strerror(void)
return 0;
}
+static int test_printf_error(void)
+{
+ int fd, ret, saved_errno;
+
+ fd = open("/dev/full", O_RDWR);
+ if (fd == -1)
+ return 1;
+
+ errno = 0;
+ ret = dprintf(fd, "foo");
+ saved_errno = errno;
+ close(fd);
+
+ if (ret != -1)
+ return 2;
+
+ if (saved_errno != ENOSPC)
+ return 3;
+
+ return 0;
+}
+
static int run_printf(int min, int max)
{
int test;
@@ -1675,6 +1726,7 @@ static int run_printf(int min, int max)
CASE_TEST(width_trunc); EXPECT_VFPRINTF(25, " ", "%25d", 1); break;
CASE_TEST(scanf); EXPECT_ZR(1, test_scanf()); break;
CASE_TEST(strerror); EXPECT_ZR(1, test_strerror()); break;
+ CASE_TEST(printf_error); EXPECT_ZR(1, test_printf_error()); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
@@ -1762,12 +1814,14 @@ int prepare(void)
if (stat("/dev/.", &stat_buf) == 0 || mkdir("/dev", 0755) == 0) {
if (stat("/dev/console", &stat_buf) != 0 ||
stat("/dev/null", &stat_buf) != 0 ||
- stat("/dev/zero", &stat_buf) != 0) {
+ stat("/dev/zero", &stat_buf) != 0 ||
+ stat("/dev/full", &stat_buf) != 0) {
/* try devtmpfs first, otherwise fall back to manual creation */
if (mount("/dev", "/dev", "devtmpfs", 0, 0) != 0) {
mknod("/dev/console", 0600 | S_IFCHR, makedev(5, 1));
mknod("/dev/null", 0666 | S_IFCHR, makedev(1, 3));
mknod("/dev/zero", 0666 | S_IFCHR, makedev(1, 5));
+ mknod("/dev/full", 0666 | S_IFCHR, makedev(1, 7));
}
}
}
diff --git a/tools/testing/selftests/nolibc/run-tests.sh b/tools/testing/selftests/nolibc/run-tests.sh
index 8277599e6441..3917cfb8fdc4 100755
--- a/tools/testing/selftests/nolibc/run-tests.sh
+++ b/tools/testing/selftests/nolibc/run-tests.sh
@@ -18,15 +18,16 @@ test_mode=system
werror=1
llvm=
all_archs=(
- i386 x86_64
+ i386 x86_64 x32
arm64 arm armthumb
- mips32le mips32be
+ mips32le mips32be mipsn32le mipsn32be mips64le mips64be
ppc ppc64 ppc64le
riscv32 riscv64
- s390x s390
+ s390x
loongarch
sparc32 sparc64
m68k
+ sh4
)
archs="${all_archs[@]}"
@@ -114,6 +115,7 @@ crosstool_arch() {
mips*) echo mips;;
s390*) echo s390;;
sparc*) echo sparc64;;
+ x32*) echo x86_64;;
*) echo "$1";;
esac
}
@@ -167,9 +169,9 @@ test_arch() {
cross_compile=$(realpath "${download_location}gcc-${crosstool_version}-nolibc/${ct_arch}-${ct_abi}/bin/${ct_arch}-${ct_abi}-")
build_dir="${build_location}/${arch}"
if [ "$werror" -ne 0 ]; then
- CFLAGS_EXTRA="$CFLAGS_EXTRA -Werror"
+ CFLAGS_EXTRA="$CFLAGS_EXTRA -Werror -Wl,--fatal-warnings"
fi
- MAKE=(make -j"${nproc}" XARCH="${arch}" CROSS_COMPILE="${cross_compile}" LLVM="${llvm}" O="${build_dir}")
+ MAKE=(make -f Makefile.nolibc -j"${nproc}" XARCH="${arch}" CROSS_COMPILE="${cross_compile}" LLVM="${llvm}" O="${build_dir}")
case "$test_mode" in
'system')
@@ -183,11 +185,11 @@ test_arch() {
exit 1
esac
printf '%-15s' "$arch:"
- if [ "$arch" = "s390" ] && ([ "$llvm" = "1" ] || [ "$test_mode" = "user" ]); then
+ if [ "$arch" = "m68k" -o "$arch" = "sh4" ] && [ "$llvm" = "1" ]; then
echo "Unsupported configuration"
return
fi
- if [ "$arch" = "m68k" ] && [ "$llvm" = "1" ]; then
+ if [ "$arch" = "x32" ] && [ "$test_mode" = "user" ]; then
echo "Unsupported configuration"
return
fi
diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h
index 7056340b9339..510e60602511 100644
--- a/tools/testing/selftests/openat2/helpers.h
+++ b/tools/testing/selftests/openat2/helpers.h
@@ -12,7 +12,7 @@
#include <stdbool.h>
#include <errno.h>
#include <linux/types.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define ARRAY_LEN(X) (sizeof (X) / sizeof (*(X)))
#define BUILD_BUG_ON(e) ((void)(sizeof(struct { int:(-!!(e)); })))
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
index 5790ab446527..0e161ef9e9e4 100644
--- a/tools/testing/selftests/openat2/openat2_test.c
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -15,7 +15,7 @@
#include <stdbool.h>
#include <string.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "helpers.h"
/*
diff --git a/tools/testing/selftests/openat2/rename_attack_test.c b/tools/testing/selftests/openat2/rename_attack_test.c
index 0a770728b436..aa5699e45729 100644
--- a/tools/testing/selftests/openat2/rename_attack_test.c
+++ b/tools/testing/selftests/openat2/rename_attack_test.c
@@ -22,7 +22,7 @@
#include <limits.h>
#include <unistd.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "helpers.h"
/* Construct a test directory with the following structure:
diff --git a/tools/testing/selftests/openat2/resolve_test.c b/tools/testing/selftests/openat2/resolve_test.c
index bbafad440893..a76ef15ceb90 100644
--- a/tools/testing/selftests/openat2/resolve_test.c
+++ b/tools/testing/selftests/openat2/resolve_test.c
@@ -14,7 +14,7 @@
#include <stdbool.h>
#include <string.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "helpers.h"
/*
diff --git a/tools/testing/selftests/pci_endpoint/pci_endpoint_test.c b/tools/testing/selftests/pci_endpoint/pci_endpoint_test.c
index ac26481d29d9..23aac6f97061 100644
--- a/tools/testing/selftests/pci_endpoint/pci_endpoint_test.c
+++ b/tools/testing/selftests/pci_endpoint/pci_endpoint_test.c
@@ -20,7 +20,7 @@
#include "../../../../include/uapi/linux/pcitest.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define pci_ep_ioctl(cmd, arg) \
({ \
@@ -121,6 +121,8 @@ TEST_F(pci_ep_basic, MSI_TEST)
for (i = 1; i <= 32; i++) {
pci_ep_ioctl(PCITEST_MSI, i);
+ if (ret == -EINVAL)
+ SKIP(return, "MSI%d is disabled", i);
EXPECT_FALSE(ret) TH_LOG("Test failed for MSI%d", i);
}
}
@@ -137,6 +139,8 @@ TEST_F(pci_ep_basic, MSIX_TEST)
for (i = 1; i <= 2048; i++) {
pci_ep_ioctl(PCITEST_MSIX, i);
+ if (ret == -EINVAL)
+ SKIP(return, "MSI-X%d is disabled", i);
EXPECT_FALSE(ret) TH_LOG("Test failed for MSI-X%d", i);
}
}
@@ -229,4 +233,32 @@ TEST_F(pci_ep_data_transfer, COPY_TEST)
test_size[i]);
}
}
+
+FIXTURE(pcie_ep_doorbell)
+{
+ int fd;
+};
+
+FIXTURE_SETUP(pcie_ep_doorbell)
+{
+ self->fd = open(test_device, O_RDWR);
+
+ ASSERT_NE(-1, self->fd) TH_LOG("Can't open PCI Endpoint Test device");
+};
+
+FIXTURE_TEARDOWN(pcie_ep_doorbell)
+{
+ close(self->fd);
+};
+
+TEST_F(pcie_ep_doorbell, DOORBELL_TEST)
+{
+ int ret;
+
+ pci_ep_ioctl(PCITEST_SET_IRQTYPE, PCITEST_IRQ_TYPE_AUTO);
+ ASSERT_EQ(0, ret) TH_LOG("Can't set AUTO IRQ type");
+
+ pci_ep_ioctl(PCITEST_DOORBELL, 0);
+ EXPECT_FALSE(ret) TH_LOG("Test failed for Doorbell\n");
+}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/perf_events/.gitignore b/tools/testing/selftests/perf_events/.gitignore
index ee93dc4969b8..4931b3b6bbd3 100644
--- a/tools/testing/selftests/perf_events/.gitignore
+++ b/tools/testing/selftests/perf_events/.gitignore
@@ -2,3 +2,4 @@
sigtrap_threads
remove_on_exec
watermark_signal
+mmap
diff --git a/tools/testing/selftests/perf_events/Makefile b/tools/testing/selftests/perf_events/Makefile
index 70e3ff211278..2e5d85770dfe 100644
--- a/tools/testing/selftests/perf_events/Makefile
+++ b/tools/testing/selftests/perf_events/Makefile
@@ -2,5 +2,5 @@
CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
LDFLAGS += -lpthread
-TEST_GEN_PROGS := sigtrap_threads remove_on_exec watermark_signal
+TEST_GEN_PROGS := sigtrap_threads remove_on_exec watermark_signal mmap
include ../lib.mk
diff --git a/tools/testing/selftests/perf_events/mmap.c b/tools/testing/selftests/perf_events/mmap.c
new file mode 100644
index 000000000000..d1fa8ec58987
--- /dev/null
+++ b/tools/testing/selftests/perf_events/mmap.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE
+
+#include <dirent.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+
+#include <linux/perf_event.h>
+
+#include "kselftest_harness.h"
+
+#define RB_SIZE 0x3000
+#define AUX_SIZE 0x10000
+#define AUX_OFFS 0x4000
+
+#define HOLE_SIZE 0x1000
+
+/* Reserve space for rb, aux with space for shrink-beyond-vma testing. */
+#define REGION_SIZE (2 * RB_SIZE + 2 * AUX_SIZE)
+#define REGION_AUX_OFFS (2 * RB_SIZE)
+
+#define MAP_BASE 1
+#define MAP_AUX 2
+
+#define EVENT_SRC_DIR "/sys/bus/event_source/devices"
+
+FIXTURE(perf_mmap)
+{
+ int fd;
+ void *ptr;
+ void *region;
+};
+
+FIXTURE_VARIANT(perf_mmap)
+{
+ bool aux;
+ unsigned long ptr_size;
+};
+
+FIXTURE_VARIANT_ADD(perf_mmap, rb)
+{
+ .aux = false,
+ .ptr_size = RB_SIZE,
+};
+
+FIXTURE_VARIANT_ADD(perf_mmap, aux)
+{
+ .aux = true,
+ .ptr_size = AUX_SIZE,
+};
+
+static bool read_event_type(struct dirent *dent, __u32 *type)
+{
+ char typefn[512];
+ FILE *fp;
+ int res;
+
+ snprintf(typefn, sizeof(typefn), "%s/%s/type", EVENT_SRC_DIR, dent->d_name);
+ fp = fopen(typefn, "r");
+ if (!fp)
+ return false;
+
+ res = fscanf(fp, "%u", type);
+ fclose(fp);
+ return res > 0;
+}
+
+FIXTURE_SETUP(perf_mmap)
+{
+ struct perf_event_attr attr = {
+ .size = sizeof(attr),
+ .disabled = 1,
+ .exclude_kernel = 1,
+ .exclude_hv = 1,
+ };
+ struct perf_event_attr attr_ok = {};
+ unsigned int eacces = 0, map = 0;
+ struct perf_event_mmap_page *rb;
+ struct dirent *dent;
+ void *aux, *region;
+ DIR *dir;
+
+ self->ptr = NULL;
+
+ dir = opendir(EVENT_SRC_DIR);
+ if (!dir)
+ SKIP(return, "perf not available.");
+
+ region = mmap(NULL, REGION_SIZE, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
+ ASSERT_NE(region, MAP_FAILED);
+ self->region = region;
+
+ // Try to find a suitable event on this system
+ while ((dent = readdir(dir))) {
+ int fd;
+
+ if (!read_event_type(dent, &attr.type))
+ continue;
+
+ fd = syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
+ if (fd < 0) {
+ if (errno == EACCES)
+ eacces++;
+ continue;
+ }
+
+ // Check whether the event supports mmap()
+ rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
+ if (rb == MAP_FAILED) {
+ close(fd);
+ continue;
+ }
+
+ if (!map) {
+ // Save the event in case that no AUX capable event is found
+ attr_ok = attr;
+ map = MAP_BASE;
+ }
+
+ if (!variant->aux)
+ continue;
+
+ rb->aux_offset = AUX_OFFS;
+ rb->aux_size = AUX_SIZE;
+
+ // Check whether it supports a AUX buffer
+ aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, fd, AUX_OFFS);
+ if (aux == MAP_FAILED) {
+ munmap(rb, RB_SIZE);
+ close(fd);
+ continue;
+ }
+
+ attr_ok = attr;
+ map = MAP_AUX;
+ munmap(aux, AUX_SIZE);
+ munmap(rb, RB_SIZE);
+ close(fd);
+ break;
+ }
+ closedir(dir);
+
+ if (!map) {
+ if (!eacces)
+ SKIP(return, "No mappable perf event found.");
+ else
+ SKIP(return, "No permissions for perf_event_open()");
+ }
+
+ self->fd = syscall(SYS_perf_event_open, &attr_ok, 0, -1, -1, 0);
+ ASSERT_NE(self->fd, -1);
+
+ rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, self->fd, 0);
+ ASSERT_NE(rb, MAP_FAILED);
+
+ if (!variant->aux) {
+ self->ptr = rb;
+ return;
+ }
+
+ if (map != MAP_AUX)
+ SKIP(return, "No AUX event found.");
+
+ rb->aux_offset = AUX_OFFS;
+ rb->aux_size = AUX_SIZE;
+ aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, self->fd, AUX_OFFS);
+ ASSERT_NE(aux, MAP_FAILED);
+ self->ptr = aux;
+}
+
+FIXTURE_TEARDOWN(perf_mmap)
+{
+ ASSERT_EQ(munmap(self->region, REGION_SIZE), 0);
+ if (self->fd != -1)
+ ASSERT_EQ(close(self->fd), 0);
+}
+
+TEST_F(perf_mmap, remap)
+{
+ void *tmp, *ptr = self->ptr;
+ unsigned long size = variant->ptr_size;
+
+ // Test the invalid remaps
+ ASSERT_EQ(mremap(ptr, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
+ ASSERT_EQ(mremap(ptr + HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
+ ASSERT_EQ(mremap(ptr + size - HOLE_SIZE, HOLE_SIZE, size, MREMAP_MAYMOVE), MAP_FAILED);
+ // Shrink the end of the mapping such that we only unmap past end of the VMA,
+ // which should succeed and poke a hole into the PROT_NONE region
+ ASSERT_NE(mremap(ptr + size - HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
+
+ // Remap the whole buffer to a new address
+ tmp = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(tmp, MAP_FAILED);
+
+ // Try splitting offset 1 hole size into VMA, this should fail
+ ASSERT_EQ(mremap(ptr + HOLE_SIZE, size - HOLE_SIZE, size - HOLE_SIZE,
+ MREMAP_MAYMOVE | MREMAP_FIXED, tmp), MAP_FAILED);
+ // Remapping the whole thing should succeed fine
+ ptr = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tmp);
+ ASSERT_EQ(ptr, tmp);
+ ASSERT_EQ(munmap(tmp, size), 0);
+}
+
+TEST_F(perf_mmap, unmap)
+{
+ unsigned long size = variant->ptr_size;
+
+ // Try to poke holes into the mappings
+ ASSERT_NE(munmap(self->ptr, HOLE_SIZE), 0);
+ ASSERT_NE(munmap(self->ptr + HOLE_SIZE, HOLE_SIZE), 0);
+ ASSERT_NE(munmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE), 0);
+}
+
+TEST_F(perf_mmap, map)
+{
+ unsigned long size = variant->ptr_size;
+
+ // Try to poke holes into the mappings by mapping anonymous memory over it
+ ASSERT_EQ(mmap(self->ptr, HOLE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
+ ASSERT_EQ(mmap(self->ptr + HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
+ ASSERT_EQ(mmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/perf_events/remove_on_exec.c b/tools/testing/selftests/perf_events/remove_on_exec.c
index 5814611a1dc7..89e7b06835df 100644
--- a/tools/testing/selftests/perf_events/remove_on_exec.c
+++ b/tools/testing/selftests/perf_events/remove_on_exec.c
@@ -30,7 +30,7 @@
#include <sys/syscall.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
static volatile int signal_count;
diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c
index d1d8483ac628..b5cf8355345d 100644
--- a/tools/testing/selftests/perf_events/sigtrap_threads.c
+++ b/tools/testing/selftests/perf_events/sigtrap_threads.c
@@ -31,7 +31,7 @@
#include <sys/syscall.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define NUM_THREADS 5
diff --git a/tools/testing/selftests/perf_events/watermark_signal.c b/tools/testing/selftests/perf_events/watermark_signal.c
index e03fe1b9bba2..0f64b9b17081 100644
--- a/tools/testing/selftests/perf_events/watermark_signal.c
+++ b/tools/testing/selftests/perf_events/watermark_signal.c
@@ -15,9 +15,7 @@
#include <sys/wait.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
-
-#define __maybe_unused __attribute__((__unused__))
+#include "kselftest_harness.h"
static int sigio_count;
diff --git a/tools/testing/selftests/pid_namespace/pid_max.c b/tools/testing/selftests/pid_namespace/pid_max.c
index 96f274f0582b..c9519e7385b6 100644
--- a/tools/testing/selftests/pid_namespace/pid_max.c
+++ b/tools/testing/selftests/pid_namespace/pid_max.c
@@ -13,7 +13,7 @@
#include <sys/mount.h>
#include <sys/wait.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../pidfd/pidfd.h"
#define __STACK_SIZE (8 * 1024 * 1024)
diff --git a/tools/testing/selftests/pid_namespace/regression_enomem.c b/tools/testing/selftests/pid_namespace/regression_enomem.c
index 7d84097ad45c..059e7ec5b4fd 100644
--- a/tools/testing/selftests/pid_namespace/regression_enomem.c
+++ b/tools/testing/selftests/pid_namespace/regression_enomem.c
@@ -11,7 +11,7 @@
#include <syscall.h>
#include <sys/wait.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../pidfd/pidfd.h"
/*
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
index 0406a065deb4..144e7ff65d6a 100644
--- a/tools/testing/selftests/pidfd/.gitignore
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -10,3 +10,5 @@ pidfd_file_handle_test
pidfd_bind_mount
pidfd_info_test
pidfd_exec_helper
+pidfd_xattr_test
+pidfd_setattr_test
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index fcbefc0d77f6..764a8f9ecefa 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-CFLAGS += -g $(KHDR_INCLUDES) -pthread -Wall
+CFLAGS += -g $(KHDR_INCLUDES) $(TOOLS_INCLUDES) -pthread -Wall
TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test \
pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test \
- pidfd_file_handle_test pidfd_bind_mount pidfd_info_test
+ pidfd_file_handle_test pidfd_bind_mount pidfd_info_test \
+ pidfd_xattr_test pidfd_setattr_test
TEST_GEN_PROGS_EXTENDED := pidfd_exec_helper
diff --git a/tools/testing/selftests/pidfd/config b/tools/testing/selftests/pidfd/config
index 6133524710f7..cf7cc0ce0248 100644
--- a/tools/testing/selftests/pidfd/config
+++ b/tools/testing/selftests/pidfd/config
@@ -4,6 +4,5 @@ CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
CONFIG_TIME_NS=y
-CONFIG_GENERIC_VDSO_TIME_NS=y
CONFIG_CGROUPS=y
CONFIG_CHECKPOINT_RESTORE=y
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index efd74063126e..9085c1a3c005 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -16,9 +16,22 @@
#include <sys/types.h>
#include <sys/wait.h>
-#include "../kselftest.h"
+/*
+ * Remove the userspace definitions of the following preprocessor symbols
+ * to avoid duplicate-definition warnings from the subsequent in-kernel
+ * definitions.
+ */
+#undef SCHED_NORMAL
+#undef SCHED_FLAG_KEEP_ALL
+#undef SCHED_FLAG_UTIL_CLAMP
+
+#include "kselftest.h"
#include "../clone3/clone3_selftests.h"
+#ifndef FD_PIDFS_ROOT
+#define FD_PIDFS_ROOT -10002
+#endif
+
#ifndef P_PIDFD
#define P_PIDFD 3
#endif
@@ -56,7 +69,7 @@
#endif
#ifndef PIDFD_SELF_THREAD_GROUP
-#define PIDFD_SELF_THREAD_GROUP -20000 /* Current thread group leader. */
+#define PIDFD_SELF_THREAD_GROUP -10001 /* Current thread group leader. */
#endif
#ifndef PIDFD_SELF
@@ -135,6 +148,14 @@
#define PIDFD_INFO_COREDUMP (1UL << 4)
#endif
+#ifndef PIDFD_INFO_SUPPORTED_MASK
+#define PIDFD_INFO_SUPPORTED_MASK (1UL << 5)
+#endif
+
+#ifndef PIDFD_INFO_COREDUMP_SIGNAL
+#define PIDFD_INFO_COREDUMP_SIGNAL (1UL << 6)
+#endif
+
#ifndef PIDFD_COREDUMPED
#define PIDFD_COREDUMPED (1U << 0) /* Did crash and... */
#endif
@@ -170,8 +191,11 @@ struct pidfd_info {
__u32 fsuid;
__u32 fsgid;
__s32 exit_code;
- __u32 coredump_mask;
- __u32 __spare1;
+ struct {
+ __u32 coredump_mask;
+ __u32 coredump_signal;
+ };
+ __u64 supported_mask;
};
/*
diff --git a/tools/testing/selftests/pidfd/pidfd_bind_mount.c b/tools/testing/selftests/pidfd/pidfd_bind_mount.c
index c094aeb1c620..1fdf49939524 100644
--- a/tools/testing/selftests/pidfd/pidfd_bind_mount.c
+++ b/tools/testing/selftests/pidfd/pidfd_bind_mount.c
@@ -14,7 +14,7 @@
#include <unistd.h>
#include "pidfd.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../filesystems/wrappers.h"
FIXTURE(pidfd_bind_mount) {
diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
index f718aac75068..9935e9471c77 100644
--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
@@ -16,7 +16,7 @@
#include <sys/mount.h>
#include "pidfd.h"
-#include "../kselftest.h"
+#include "kselftest.h"
struct error {
int code;
diff --git a/tools/testing/selftests/pidfd/pidfd_file_handle_test.c b/tools/testing/selftests/pidfd/pidfd_file_handle_test.c
index 439b9c6c0457..68918734dcf3 100644
--- a/tools/testing/selftests/pidfd/pidfd_file_handle_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_file_handle_test.c
@@ -20,7 +20,7 @@
#include <sys/stat.h>
#include "pidfd.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
FIXTURE(file_handle)
{
@@ -500,4 +500,64 @@ TEST_F(file_handle, valid_name_to_handle_at_flags)
ASSERT_EQ(close(pidfd), 0);
}
+/*
+ * That we decode a file handle without having to pass a pidfd.
+ */
+TEST_F(file_handle, decode_purely_based_on_file_handle)
+{
+ int mnt_id;
+ struct file_handle *fh;
+ int pidfd = -EBADF;
+ struct stat st1, st2;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->child_pidfd1, "", fh, &mnt_id, AT_EMPTY_PATH), 0);
+
+ ASSERT_EQ(fstat(self->child_pidfd1, &st1), 0);
+
+ pidfd = open_by_handle_at(FD_PIDFS_ROOT, fh, 0);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ pidfd = open_by_handle_at(FD_PIDFS_ROOT, fh, O_CLOEXEC);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ pidfd = open_by_handle_at(FD_PIDFS_ROOT, fh, O_NONBLOCK);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ pidfd = open_by_handle_at(-EBADF, fh, 0);
+ ASSERT_LT(pidfd, 0);
+
+ pidfd = open_by_handle_at(AT_FDCWD, fh, 0);
+ ASSERT_LT(pidfd, 0);
+
+ free(fh);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd_getfd_test.c b/tools/testing/selftests/pidfd/pidfd_getfd_test.c
index cd51d547b751..ea45b37001b0 100644
--- a/tools/testing/selftests/pidfd/pidfd_getfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_getfd_test.c
@@ -19,7 +19,7 @@
#include <linux/kcmp.h>
#include "pidfd.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
/*
* UNKNOWN_FD is an fd number that should never exist in the child, as it is
diff --git a/tools/testing/selftests/pidfd/pidfd_info_test.c b/tools/testing/selftests/pidfd/pidfd_info_test.c
index a0eb6e81eaa2..6571e04acd88 100644
--- a/tools/testing/selftests/pidfd/pidfd_info_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_info_test.c
@@ -21,7 +21,7 @@
#include <sys/stat.h>
#include "pidfd.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
FIXTURE(pidfd_info)
{
@@ -690,4 +690,77 @@ TEST_F(pidfd_info, thread_group_exec_thread)
EXPECT_EQ(close(pidfd_thread), 0);
}
+/*
+ * Test: PIDFD_INFO_SUPPORTED_MASK field
+ *
+ * Verify that when PIDFD_INFO_SUPPORTED_MASK is requested, the kernel
+ * returns the supported_mask field indicating which flags the kernel supports.
+ */
+TEST(supported_mask_field)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_SUPPORTED_MASK,
+ };
+ int pidfd;
+ pid_t pid;
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0)
+ pause();
+
+ /* Request supported_mask field */
+ ASSERT_EQ(ioctl(pidfd, PIDFD_GET_INFO, &info), 0);
+
+ /* Verify PIDFD_INFO_SUPPORTED_MASK is set in the reply */
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_SUPPORTED_MASK));
+
+ /* Verify supported_mask contains expected flags */
+ ASSERT_TRUE(!!(info.supported_mask & PIDFD_INFO_PID));
+ ASSERT_TRUE(!!(info.supported_mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.supported_mask & PIDFD_INFO_CGROUPID));
+ ASSERT_TRUE(!!(info.supported_mask & PIDFD_INFO_EXIT));
+ ASSERT_TRUE(!!(info.supported_mask & PIDFD_INFO_COREDUMP));
+ ASSERT_TRUE(!!(info.supported_mask & PIDFD_INFO_SUPPORTED_MASK));
+ ASSERT_TRUE(!!(info.supported_mask & PIDFD_INFO_COREDUMP_SIGNAL));
+
+ /* Clean up */
+ sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0);
+ sys_waitid(P_PIDFD, pidfd, NULL, WEXITED);
+ close(pidfd);
+}
+
+/*
+ * Test: PIDFD_INFO_SUPPORTED_MASK always available
+ *
+ * Verify that supported_mask is returned even when other fields are requested.
+ */
+TEST(supported_mask_with_other_fields)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_SUPPORTED_MASK,
+ };
+ int pidfd;
+ pid_t pid;
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0)
+ pause();
+
+ ASSERT_EQ(ioctl(pidfd, PIDFD_GET_INFO, &info), 0);
+
+ /* Both fields should be present */
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CGROUPID));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_SUPPORTED_MASK));
+ ASSERT_NE(info.supported_mask, 0);
+
+ /* Clean up */
+ sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0);
+ sys_waitid(P_PIDFD, pidfd, NULL, WEXITED);
+ close(pidfd);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd_open_test.c b/tools/testing/selftests/pidfd/pidfd_open_test.c
index cd3de40e4977..318e6f09c8e0 100644
--- a/tools/testing/selftests/pidfd/pidfd_open_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_open_test.c
@@ -20,7 +20,7 @@
#include <unistd.h>
#include "pidfd.h"
-#include "../kselftest.h"
+#include "kselftest.h"
static int safe_int(const char *numstr, int *converted)
{
diff --git a/tools/testing/selftests/pidfd/pidfd_poll_test.c b/tools/testing/selftests/pidfd/pidfd_poll_test.c
index 55d74a50358f..232304f818c7 100644
--- a/tools/testing/selftests/pidfd/pidfd_poll_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_poll_test.c
@@ -14,7 +14,7 @@
#include <unistd.h>
#include "pidfd.h"
-#include "../kselftest.h"
+#include "kselftest.h"
static bool timeout;
diff --git a/tools/testing/selftests/pidfd/pidfd_setattr_test.c b/tools/testing/selftests/pidfd/pidfd_setattr_test.c
new file mode 100644
index 000000000000..e8562a2992f3
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_setattr_test.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <linux/types.h>
+#include <poll.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <linux/kcmp.h>
+#include <sys/stat.h>
+#include <sys/xattr.h>
+
+#include "pidfd.h"
+#include "kselftest_harness.h"
+
+FIXTURE(pidfs_setattr)
+{
+ pid_t child_pid;
+ int child_pidfd;
+};
+
+FIXTURE_SETUP(pidfs_setattr)
+{
+ self->child_pid = create_child(&self->child_pidfd, CLONE_NEWUSER | CLONE_NEWPID);
+ EXPECT_GE(self->child_pid, 0);
+
+ if (self->child_pid == 0)
+ _exit(EXIT_SUCCESS);
+}
+
+FIXTURE_TEARDOWN(pidfs_setattr)
+{
+ sys_waitid(P_PID, self->child_pid, NULL, WEXITED);
+ EXPECT_EQ(close(self->child_pidfd), 0);
+}
+
+TEST_F(pidfs_setattr, no_chown)
+{
+ ASSERT_LT(fchown(self->child_pidfd, 1234, 5678), 0);
+ ASSERT_EQ(errno, EOPNOTSUPP);
+}
+
+TEST_F(pidfs_setattr, no_chmod)
+{
+ ASSERT_LT(fchmod(self->child_pidfd, 0777), 0);
+ ASSERT_EQ(errno, EOPNOTSUPP);
+}
+
+TEST_F(pidfs_setattr, no_exec)
+{
+ char *const argv[] = { NULL };
+ char *const envp[] = { NULL };
+
+ ASSERT_LT(execveat(self->child_pidfd, "", argv, envp, AT_EMPTY_PATH), 0);
+ ASSERT_EQ(errno, EACCES);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd_setns_test.c b/tools/testing/selftests/pidfd/pidfd_setns_test.c
index e6a079b3d5e2..107edecff224 100644
--- a/tools/testing/selftests/pidfd/pidfd_setns_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_setns_test.c
@@ -18,7 +18,7 @@
#include <sys/stat.h>
#include "pidfd.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
enum {
PIDFD_NS_USER,
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index fcd85cad9f18..932cbd8caa77 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -20,7 +20,7 @@
#include <unistd.h>
#include "pidfd.h"
-#include "../kselftest.h"
+#include "kselftest.h"
#define str(s) _str(s)
#define _str(s) #s
diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c
index 1e2d49751cde..4bf702d62c1c 100644
--- a/tools/testing/selftests/pidfd/pidfd_wait.c
+++ b/tools/testing/selftests/pidfd/pidfd_wait.c
@@ -17,7 +17,7 @@
#include <unistd.h>
#include "pidfd.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
diff --git a/tools/testing/selftests/pidfd/pidfd_xattr_test.c b/tools/testing/selftests/pidfd/pidfd_xattr_test.c
new file mode 100644
index 000000000000..fd57511af7e4
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_xattr_test.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <linux/types.h>
+#include <poll.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <linux/kcmp.h>
+#include <sys/stat.h>
+#include <sys/xattr.h>
+
+#include "pidfd.h"
+#include "kselftest_harness.h"
+
+FIXTURE(pidfs_xattr)
+{
+ pid_t child_pid;
+ int child_pidfd;
+};
+
+FIXTURE_SETUP(pidfs_xattr)
+{
+ self->child_pid = create_child(&self->child_pidfd, CLONE_NEWUSER | CLONE_NEWPID);
+ EXPECT_GE(self->child_pid, 0);
+
+ if (self->child_pid == 0)
+ _exit(EXIT_SUCCESS);
+}
+
+FIXTURE_TEARDOWN(pidfs_xattr)
+{
+ sys_waitid(P_PID, self->child_pid, NULL, WEXITED);
+}
+
+TEST_F(pidfs_xattr, set_get_list_xattr_multiple)
+{
+ int ret, i;
+ char xattr_name[32];
+ char xattr_value[32];
+ char buf[32];
+ const int num_xattrs = 10;
+ char list[PATH_MAX] = {};
+
+ for (i = 0; i < num_xattrs; i++) {
+ snprintf(xattr_name, sizeof(xattr_name), "trusted.testattr%d", i);
+ snprintf(xattr_value, sizeof(xattr_value), "testvalue%d", i);
+ ret = fsetxattr(self->child_pidfd, xattr_name, xattr_value, strlen(xattr_value), 0);
+ ASSERT_EQ(ret, 0);
+ }
+
+ for (i = 0; i < num_xattrs; i++) {
+ snprintf(xattr_name, sizeof(xattr_name), "trusted.testattr%d", i);
+ snprintf(xattr_value, sizeof(xattr_value), "testvalue%d", i);
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(self->child_pidfd, xattr_name, buf, sizeof(buf));
+ ASSERT_EQ(ret, strlen(xattr_value));
+ ASSERT_EQ(strcmp(buf, xattr_value), 0);
+ }
+
+ ret = flistxattr(self->child_pidfd, list, sizeof(list));
+ ASSERT_GT(ret, 0);
+ for (i = 0; i < num_xattrs; i++) {
+ snprintf(xattr_name, sizeof(xattr_name), "trusted.testattr%d", i);
+ bool found = false;
+ for (char *it = list; it < list + ret; it += strlen(it) + 1) {
+ if (strcmp(it, xattr_name))
+ continue;
+ found = true;
+ break;
+ }
+ ASSERT_TRUE(found);
+ }
+
+ for (i = 0; i < num_xattrs; i++) {
+ snprintf(xattr_name, sizeof(xattr_name), "trusted.testattr%d", i);
+ ret = fremovexattr(self->child_pidfd, xattr_name);
+ ASSERT_EQ(ret, 0);
+
+ ret = fgetxattr(self->child_pidfd, xattr_name, buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+ }
+}
+
+TEST_F(pidfs_xattr, set_get_list_xattr_persistent)
+{
+ int ret;
+ char buf[32];
+ char list[PATH_MAX] = {};
+
+ ret = fsetxattr(self->child_pidfd, "trusted.persistent", "persistent value", strlen("persistent value"), 0);
+ ASSERT_EQ(ret, 0);
+
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(self->child_pidfd, "trusted.persistent", buf, sizeof(buf));
+ ASSERT_EQ(ret, strlen("persistent value"));
+ ASSERT_EQ(strcmp(buf, "persistent value"), 0);
+
+ ret = flistxattr(self->child_pidfd, list, sizeof(list));
+ ASSERT_GT(ret, 0);
+ ASSERT_EQ(strcmp(list, "trusted.persistent"), 0)
+
+ ASSERT_EQ(close(self->child_pidfd), 0);
+ self->child_pidfd = -EBADF;
+ sleep(2);
+
+ self->child_pidfd = sys_pidfd_open(self->child_pid, 0);
+ ASSERT_GE(self->child_pidfd, 0);
+
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(self->child_pidfd, "trusted.persistent", buf, sizeof(buf));
+ ASSERT_EQ(ret, strlen("persistent value"));
+ ASSERT_EQ(strcmp(buf, "persistent value"), 0);
+
+ ret = flistxattr(self->child_pidfd, list, sizeof(list));
+ ASSERT_GT(ret, 0);
+ ASSERT_EQ(strcmp(list, "trusted.persistent"), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/powerpc/include/instructions.h b/tools/testing/selftests/powerpc/include/instructions.h
index 4efa6314bd96..864f0c9f1afc 100644
--- a/tools/testing/selftests/powerpc/include/instructions.h
+++ b/tools/testing/selftests/powerpc/include/instructions.h
@@ -67,7 +67,7 @@ static inline int paste_last(void *i)
#define PPC_INST_PASTE_LAST __PASTE(0, 0, 1, 1)
/* This defines the prefixed load/store instructions */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define stringify_in_c(...) __VA_ARGS__
#else
# define __stringify_in_c(...) #__VA_ARGS__
diff --git a/tools/testing/selftests/prctl/set-anon-vma-name-test.c b/tools/testing/selftests/prctl/set-anon-vma-name-test.c
index 4275cb256dce..ac6721b184a6 100644
--- a/tools/testing/selftests/prctl/set-anon-vma-name-test.c
+++ b/tools/testing/selftests/prctl/set-anon-vma-name-test.c
@@ -10,7 +10,7 @@
#include <sys/mman.h>
#include <string.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define AREA_SIZE 1024
diff --git a/tools/testing/selftests/prctl/set-process-name.c b/tools/testing/selftests/prctl/set-process-name.c
index 562f707ba771..3f7b146d36df 100644
--- a/tools/testing/selftests/prctl/set-process-name.c
+++ b/tools/testing/selftests/prctl/set-process-name.c
@@ -7,7 +7,7 @@
#include <sys/prctl.h>
#include <string.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define CHANGE_NAME "changename"
#define EMPTY_NAME ""
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 973968f45bba..9c9735570abf 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -5,7 +5,9 @@
/proc-2-is-kthread
/proc-fsconfig-hidepid
/proc-loadavg-001
+/proc-maps-race
/proc-multiple-procfs
+/proc-net-dev-lseek
/proc-empty-vm
/proc-pid-vm
/proc-self-map-files-001
@@ -17,6 +19,7 @@
/proc-tid0
/proc-uptime-001
/proc-uptime-002
+/proc-pidns
/read
/self
/setns-dcache
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index b12921b9794b..a7de2bb6d8be 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -9,6 +9,8 @@ TEST_GEN_PROGS += fd-002-posix-eq
TEST_GEN_PROGS += fd-003-kthread
TEST_GEN_PROGS += proc-2-is-kthread
TEST_GEN_PROGS += proc-loadavg-001
+TEST_GEN_PROGS += proc-maps-race
+TEST_GEN_PROGS += proc-net-dev-lseek
TEST_GEN_PROGS += proc-empty-vm
TEST_GEN_PROGS += proc-pid-vm
TEST_GEN_PROGS += proc-self-map-files-001
@@ -27,5 +29,6 @@ TEST_GEN_PROGS += setns-sysvipc
TEST_GEN_PROGS += thread-self
TEST_GEN_PROGS += proc-multiple-procfs
TEST_GEN_PROGS += proc-fsconfig-hidepid
+TEST_GEN_PROGS += proc-pidns
include ../lib.mk
diff --git a/tools/testing/selftests/proc/proc-maps-race.c b/tools/testing/selftests/proc/proc-maps-race.c
new file mode 100644
index 000000000000..a734553718da
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-maps-race.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022 Google LLC.
+ * Author: Suren Baghdasaryan <surenb@google.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Fork a child that concurrently modifies address space while the main
+ * process is reading /proc/$PID/maps and verifying the results. Address
+ * space modifications include:
+ * VMA splitting and merging
+ *
+ */
+#define _GNU_SOURCE
+#include "kselftest_harness.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/fs.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+/* /proc/pid/maps parsing routines */
+struct page_content {
+ char *data;
+ ssize_t size;
+};
+
+#define LINE_MAX_SIZE 256
+
+struct line_content {
+ char text[LINE_MAX_SIZE];
+ unsigned long start_addr;
+ unsigned long end_addr;
+};
+
+enum test_state {
+ INIT,
+ CHILD_READY,
+ PARENT_READY,
+ SETUP_READY,
+ SETUP_MODIFY_MAPS,
+ SETUP_MAPS_MODIFIED,
+ SETUP_RESTORE_MAPS,
+ SETUP_MAPS_RESTORED,
+ TEST_READY,
+ TEST_DONE,
+};
+
+struct vma_modifier_info;
+
+FIXTURE(proc_maps_race)
+{
+ struct vma_modifier_info *mod_info;
+ struct page_content page1;
+ struct page_content page2;
+ struct line_content last_line;
+ struct line_content first_line;
+ unsigned long duration_sec;
+ int shared_mem_size;
+ int page_size;
+ int vma_count;
+ bool verbose;
+ int maps_fd;
+ pid_t pid;
+};
+
+typedef bool (*vma_modifier_op)(FIXTURE_DATA(proc_maps_race) *self);
+typedef bool (*vma_mod_result_check_op)(struct line_content *mod_last_line,
+ struct line_content *mod_first_line,
+ struct line_content *restored_last_line,
+ struct line_content *restored_first_line);
+
+struct vma_modifier_info {
+ int vma_count;
+ void *addr;
+ int prot;
+ void *next_addr;
+ vma_modifier_op vma_modify;
+ vma_modifier_op vma_restore;
+ vma_mod_result_check_op vma_mod_check;
+ pthread_mutex_t sync_lock;
+ pthread_cond_t sync_cond;
+ enum test_state curr_state;
+ bool exit;
+ void *child_mapped_addr[];
+};
+
+
+static bool read_two_pages(FIXTURE_DATA(proc_maps_race) *self)
+{
+ ssize_t bytes_read;
+
+ if (lseek(self->maps_fd, 0, SEEK_SET) < 0)
+ return false;
+
+ bytes_read = read(self->maps_fd, self->page1.data, self->page_size);
+ if (bytes_read <= 0)
+ return false;
+
+ self->page1.size = bytes_read;
+
+ bytes_read = read(self->maps_fd, self->page2.data, self->page_size);
+ if (bytes_read <= 0)
+ return false;
+
+ self->page2.size = bytes_read;
+
+ return true;
+}
+
+static void copy_first_line(struct page_content *page, char *first_line)
+{
+ char *pos = strchr(page->data, '\n');
+
+ strncpy(first_line, page->data, pos - page->data);
+ first_line[pos - page->data] = '\0';
+}
+
+static void copy_last_line(struct page_content *page, char *last_line)
+{
+ /* Get the last line in the first page */
+ const char *end = page->data + page->size - 1;
+ /* skip last newline */
+ const char *pos = end - 1;
+
+ /* search previous newline */
+ while (pos[-1] != '\n')
+ pos--;
+ strncpy(last_line, pos, end - pos);
+ last_line[end - pos] = '\0';
+}
+
+/* Read the last line of the first page and the first line of the second page */
+static bool read_boundary_lines(FIXTURE_DATA(proc_maps_race) *self,
+ struct line_content *last_line,
+ struct line_content *first_line)
+{
+ if (!read_two_pages(self))
+ return false;
+
+ copy_last_line(&self->page1, last_line->text);
+ copy_first_line(&self->page2, first_line->text);
+
+ return sscanf(last_line->text, "%lx-%lx", &last_line->start_addr,
+ &last_line->end_addr) == 2 &&
+ sscanf(first_line->text, "%lx-%lx", &first_line->start_addr,
+ &first_line->end_addr) == 2;
+}
+
+/* Thread synchronization routines */
+static void wait_for_state(struct vma_modifier_info *mod_info, enum test_state state)
+{
+ pthread_mutex_lock(&mod_info->sync_lock);
+ while (mod_info->curr_state != state)
+ pthread_cond_wait(&mod_info->sync_cond, &mod_info->sync_lock);
+ pthread_mutex_unlock(&mod_info->sync_lock);
+}
+
+static void signal_state(struct vma_modifier_info *mod_info, enum test_state state)
+{
+ pthread_mutex_lock(&mod_info->sync_lock);
+ mod_info->curr_state = state;
+ pthread_cond_signal(&mod_info->sync_cond);
+ pthread_mutex_unlock(&mod_info->sync_lock);
+}
+
+static void stop_vma_modifier(struct vma_modifier_info *mod_info)
+{
+ wait_for_state(mod_info, SETUP_READY);
+ mod_info->exit = true;
+ signal_state(mod_info, SETUP_MODIFY_MAPS);
+}
+
+static void print_first_lines(char *text, int nr)
+{
+ const char *end = text;
+
+ while (nr && (end = strchr(end, '\n')) != NULL) {
+ nr--;
+ end++;
+ }
+
+ if (end) {
+ int offs = end - text;
+
+ text[offs] = '\0';
+ printf("%s", text);
+ text[offs] = '\n';
+ printf("\n");
+ } else {
+ printf("%s", text);
+ }
+}
+
+static void print_last_lines(char *text, int nr)
+{
+ const char *start = text + strlen(text);
+
+ nr++; /* to ignore the last newline */
+ while (nr) {
+ while (start > text && *start != '\n')
+ start--;
+ nr--;
+ start--;
+ }
+ printf("%s", start);
+}
+
+static void print_boundaries(const char *title, FIXTURE_DATA(proc_maps_race) *self)
+{
+ if (!self->verbose)
+ return;
+
+ printf("%s", title);
+ /* Print 3 boundary lines from each page */
+ print_last_lines(self->page1.data, 3);
+ printf("-----------------page boundary-----------------\n");
+ print_first_lines(self->page2.data, 3);
+}
+
+static bool print_boundaries_on(bool condition, const char *title,
+ FIXTURE_DATA(proc_maps_race) *self)
+{
+ if (self->verbose && condition)
+ print_boundaries(title, self);
+
+ return condition;
+}
+
+static void report_test_start(const char *name, bool verbose)
+{
+ if (verbose)
+ printf("==== %s ====\n", name);
+}
+
+static struct timespec print_ts;
+
+static void start_test_loop(struct timespec *ts, bool verbose)
+{
+ if (verbose)
+ print_ts.tv_sec = ts->tv_sec;
+}
+
+static void end_test_iteration(struct timespec *ts, bool verbose)
+{
+ if (!verbose)
+ return;
+
+ /* Update every second */
+ if (print_ts.tv_sec == ts->tv_sec)
+ return;
+
+ printf(".");
+ fflush(stdout);
+ print_ts.tv_sec = ts->tv_sec;
+}
+
+static void end_test_loop(bool verbose)
+{
+ if (verbose)
+ printf("\n");
+}
+
+static bool capture_mod_pattern(FIXTURE_DATA(proc_maps_race) *self,
+ struct line_content *mod_last_line,
+ struct line_content *mod_first_line,
+ struct line_content *restored_last_line,
+ struct line_content *restored_first_line)
+{
+ print_boundaries("Before modification", self);
+
+ signal_state(self->mod_info, SETUP_MODIFY_MAPS);
+ wait_for_state(self->mod_info, SETUP_MAPS_MODIFIED);
+
+ /* Copy last line of the first page and first line of the last page */
+ if (!read_boundary_lines(self, mod_last_line, mod_first_line))
+ return false;
+
+ print_boundaries("After modification", self);
+
+ signal_state(self->mod_info, SETUP_RESTORE_MAPS);
+ wait_for_state(self->mod_info, SETUP_MAPS_RESTORED);
+
+ /* Copy last line of the first page and first line of the last page */
+ if (!read_boundary_lines(self, restored_last_line, restored_first_line))
+ return false;
+
+ print_boundaries("After restore", self);
+
+ if (!self->mod_info->vma_mod_check(mod_last_line, mod_first_line,
+ restored_last_line, restored_first_line))
+ return false;
+
+ /*
+ * The content of these lines after modify+resore should be the same
+ * as the original.
+ */
+ return strcmp(restored_last_line->text, self->last_line.text) == 0 &&
+ strcmp(restored_first_line->text, self->first_line.text) == 0;
+}
+
+static bool query_addr_at(int maps_fd, void *addr,
+ unsigned long *vma_start, unsigned long *vma_end)
+{
+ struct procmap_query q;
+
+ memset(&q, 0, sizeof(q));
+ q.size = sizeof(q);
+ /* Find the VMA at the split address */
+ q.query_addr = (unsigned long long)addr;
+ q.query_flags = 0;
+ if (ioctl(maps_fd, PROCMAP_QUERY, &q))
+ return false;
+
+ *vma_start = q.vma_start;
+ *vma_end = q.vma_end;
+
+ return true;
+}
+
+static inline bool split_vma(FIXTURE_DATA(proc_maps_race) *self)
+{
+ return mmap(self->mod_info->addr, self->page_size, self->mod_info->prot | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != MAP_FAILED;
+}
+
+static inline bool merge_vma(FIXTURE_DATA(proc_maps_race) *self)
+{
+ return mmap(self->mod_info->addr, self->page_size, self->mod_info->prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != MAP_FAILED;
+}
+
+static inline bool check_split_result(struct line_content *mod_last_line,
+ struct line_content *mod_first_line,
+ struct line_content *restored_last_line,
+ struct line_content *restored_first_line)
+{
+ /* Make sure vmas at the boundaries are changing */
+ return strcmp(mod_last_line->text, restored_last_line->text) != 0 &&
+ strcmp(mod_first_line->text, restored_first_line->text) != 0;
+}
+
+static inline bool shrink_vma(FIXTURE_DATA(proc_maps_race) *self)
+{
+ return mremap(self->mod_info->addr, self->page_size * 3,
+ self->page_size, 0) != MAP_FAILED;
+}
+
+static inline bool expand_vma(FIXTURE_DATA(proc_maps_race) *self)
+{
+ return mremap(self->mod_info->addr, self->page_size,
+ self->page_size * 3, 0) != MAP_FAILED;
+}
+
+static inline bool check_shrink_result(struct line_content *mod_last_line,
+ struct line_content *mod_first_line,
+ struct line_content *restored_last_line,
+ struct line_content *restored_first_line)
+{
+ /* Make sure only the last vma of the first page is changing */
+ return strcmp(mod_last_line->text, restored_last_line->text) != 0 &&
+ strcmp(mod_first_line->text, restored_first_line->text) == 0;
+}
+
+static inline bool remap_vma(FIXTURE_DATA(proc_maps_race) *self)
+{
+ /*
+ * Remap the last page of the next vma into the middle of the vma.
+ * This splits the current vma and the first and middle parts (the
+ * parts at lower addresses) become the last vma objserved in the
+ * first page and the first vma observed in the last page.
+ */
+ return mremap(self->mod_info->next_addr + self->page_size * 2, self->page_size,
+ self->page_size, MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ self->mod_info->addr + self->page_size) != MAP_FAILED;
+}
+
+static inline bool patch_vma(FIXTURE_DATA(proc_maps_race) *self)
+{
+ return mprotect(self->mod_info->addr + self->page_size, self->page_size,
+ self->mod_info->prot) == 0;
+}
+
+static inline bool check_remap_result(struct line_content *mod_last_line,
+ struct line_content *mod_first_line,
+ struct line_content *restored_last_line,
+ struct line_content *restored_first_line)
+{
+ /* Make sure vmas at the boundaries are changing */
+ return strcmp(mod_last_line->text, restored_last_line->text) != 0 &&
+ strcmp(mod_first_line->text, restored_first_line->text) != 0;
+}
+
+FIXTURE_SETUP(proc_maps_race)
+{
+ const char *verbose = getenv("VERBOSE");
+ const char *duration = getenv("DURATION");
+ struct vma_modifier_info *mod_info;
+ pthread_mutexattr_t mutex_attr;
+ pthread_condattr_t cond_attr;
+ unsigned long duration_sec;
+ char fname[32];
+
+ self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
+ self->verbose = verbose && !strncmp(verbose, "1", 1);
+ duration_sec = duration ? atol(duration) : 0;
+ self->duration_sec = duration_sec ? duration_sec : 5UL;
+
+ /*
+ * Have to map enough vmas for /proc/pid/maps to contain more than one
+ * page worth of vmas. Assume at least 32 bytes per line in maps output
+ */
+ self->vma_count = self->page_size / 32 + 1;
+ self->shared_mem_size = sizeof(struct vma_modifier_info) + self->vma_count * sizeof(void *);
+
+ /* map shared memory for communication with the child process */
+ self->mod_info = (struct vma_modifier_info *)mmap(NULL, self->shared_mem_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(self->mod_info, MAP_FAILED);
+ mod_info = self->mod_info;
+
+ /* Initialize shared members */
+ pthread_mutexattr_init(&mutex_attr);
+ pthread_mutexattr_setpshared(&mutex_attr, PTHREAD_PROCESS_SHARED);
+ ASSERT_EQ(pthread_mutex_init(&mod_info->sync_lock, &mutex_attr), 0);
+ pthread_condattr_init(&cond_attr);
+ pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_SHARED);
+ ASSERT_EQ(pthread_cond_init(&mod_info->sync_cond, &cond_attr), 0);
+ mod_info->vma_count = self->vma_count;
+ mod_info->curr_state = INIT;
+ mod_info->exit = false;
+
+ self->pid = fork();
+ if (!self->pid) {
+ /* Child process modifying the address space */
+ int prot = PROT_READ | PROT_WRITE;
+ int i;
+
+ for (i = 0; i < mod_info->vma_count; i++) {
+ mod_info->child_mapped_addr[i] = mmap(NULL, self->page_size * 3, prot,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(mod_info->child_mapped_addr[i], MAP_FAILED);
+ /* change protection in adjacent maps to prevent merging */
+ prot ^= PROT_WRITE;
+ }
+ signal_state(mod_info, CHILD_READY);
+ wait_for_state(mod_info, PARENT_READY);
+ while (true) {
+ signal_state(mod_info, SETUP_READY);
+ wait_for_state(mod_info, SETUP_MODIFY_MAPS);
+ if (mod_info->exit)
+ break;
+
+ ASSERT_TRUE(mod_info->vma_modify(self));
+ signal_state(mod_info, SETUP_MAPS_MODIFIED);
+ wait_for_state(mod_info, SETUP_RESTORE_MAPS);
+ ASSERT_TRUE(mod_info->vma_restore(self));
+ signal_state(mod_info, SETUP_MAPS_RESTORED);
+
+ wait_for_state(mod_info, TEST_READY);
+ while (mod_info->curr_state != TEST_DONE) {
+ ASSERT_TRUE(mod_info->vma_modify(self));
+ ASSERT_TRUE(mod_info->vma_restore(self));
+ }
+ }
+ for (i = 0; i < mod_info->vma_count; i++)
+ munmap(mod_info->child_mapped_addr[i], self->page_size * 3);
+
+ exit(0);
+ }
+
+ sprintf(fname, "/proc/%d/maps", self->pid);
+ self->maps_fd = open(fname, O_RDONLY);
+ ASSERT_NE(self->maps_fd, -1);
+
+ /* Wait for the child to map the VMAs */
+ wait_for_state(mod_info, CHILD_READY);
+
+ /* Read first two pages */
+ self->page1.data = malloc(self->page_size);
+ ASSERT_NE(self->page1.data, NULL);
+ self->page2.data = malloc(self->page_size);
+ ASSERT_NE(self->page2.data, NULL);
+
+ ASSERT_TRUE(read_boundary_lines(self, &self->last_line, &self->first_line));
+
+ /*
+ * Find the addresses corresponding to the last line in the first page
+ * and the first line in the last page.
+ */
+ mod_info->addr = NULL;
+ mod_info->next_addr = NULL;
+ for (int i = 0; i < mod_info->vma_count; i++) {
+ if (mod_info->child_mapped_addr[i] == (void *)self->last_line.start_addr) {
+ mod_info->addr = mod_info->child_mapped_addr[i];
+ mod_info->prot = PROT_READ;
+ /* Even VMAs have write permission */
+ if ((i % 2) == 0)
+ mod_info->prot |= PROT_WRITE;
+ } else if (mod_info->child_mapped_addr[i] == (void *)self->first_line.start_addr) {
+ mod_info->next_addr = mod_info->child_mapped_addr[i];
+ }
+
+ if (mod_info->addr && mod_info->next_addr)
+ break;
+ }
+ ASSERT_TRUE(mod_info->addr && mod_info->next_addr);
+
+ signal_state(mod_info, PARENT_READY);
+
+}
+
+FIXTURE_TEARDOWN(proc_maps_race)
+{
+ int status;
+
+ stop_vma_modifier(self->mod_info);
+
+ free(self->page2.data);
+ free(self->page1.data);
+
+ for (int i = 0; i < self->vma_count; i++)
+ munmap(self->mod_info->child_mapped_addr[i], self->page_size);
+ close(self->maps_fd);
+ waitpid(self->pid, &status, 0);
+ munmap(self->mod_info, self->shared_mem_size);
+}
+
+TEST_F(proc_maps_race, test_maps_tearing_from_split)
+{
+ struct vma_modifier_info *mod_info = self->mod_info;
+
+ struct line_content split_last_line;
+ struct line_content split_first_line;
+ struct line_content restored_last_line;
+ struct line_content restored_first_line;
+
+ wait_for_state(mod_info, SETUP_READY);
+
+ /* re-read the file to avoid using stale data from previous test */
+ ASSERT_TRUE(read_boundary_lines(self, &self->last_line, &self->first_line));
+
+ mod_info->vma_modify = split_vma;
+ mod_info->vma_restore = merge_vma;
+ mod_info->vma_mod_check = check_split_result;
+
+ report_test_start("Tearing from split", self->verbose);
+ ASSERT_TRUE(capture_mod_pattern(self, &split_last_line, &split_first_line,
+ &restored_last_line, &restored_first_line));
+
+ /* Now start concurrent modifications for self->duration_sec */
+ signal_state(mod_info, TEST_READY);
+
+ struct line_content new_last_line;
+ struct line_content new_first_line;
+ struct timespec start_ts, end_ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &start_ts);
+ start_test_loop(&start_ts, self->verbose);
+ do {
+ bool last_line_changed;
+ bool first_line_changed;
+ unsigned long vma_start;
+ unsigned long vma_end;
+
+ ASSERT_TRUE(read_boundary_lines(self, &new_last_line, &new_first_line));
+
+ /* Check if we read vmas after split */
+ if (!strcmp(new_last_line.text, split_last_line.text)) {
+ /*
+ * The vmas should be consistent with split results,
+ * however if vma was concurrently restored after a
+ * split, it can be reported twice (first the original
+ * split one, then the same vma but extended after the
+ * merge) because we found it as the next vma again.
+ * In that case new first line will be the same as the
+ * last restored line.
+ */
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_first_line.text, split_first_line.text) &&
+ strcmp(new_first_line.text, restored_last_line.text),
+ "Split result invalid", self));
+ } else {
+ /* The vmas should be consistent with merge results */
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_last_line.text, restored_last_line.text),
+ "Merge result invalid", self));
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_first_line.text, restored_first_line.text),
+ "Merge result invalid", self));
+ }
+ /*
+ * First and last lines should change in unison. If the last
+ * line changed then the first line should change as well and
+ * vice versa.
+ */
+ last_line_changed = strcmp(new_last_line.text, self->last_line.text) != 0;
+ first_line_changed = strcmp(new_first_line.text, self->first_line.text) != 0;
+ ASSERT_EQ(last_line_changed, first_line_changed);
+
+ /* Check if PROCMAP_QUERY ioclt() finds the right VMA */
+ ASSERT_TRUE(query_addr_at(self->maps_fd, mod_info->addr + self->page_size,
+ &vma_start, &vma_end));
+ /*
+ * The vma at the split address can be either the same as
+ * original one (if read before the split) or the same as the
+ * first line in the second page (if read after the split).
+ */
+ ASSERT_TRUE((vma_start == self->last_line.start_addr &&
+ vma_end == self->last_line.end_addr) ||
+ (vma_start == split_first_line.start_addr &&
+ vma_end == split_first_line.end_addr));
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &end_ts);
+ end_test_iteration(&end_ts, self->verbose);
+ } while (end_ts.tv_sec - start_ts.tv_sec < self->duration_sec);
+ end_test_loop(self->verbose);
+
+ /* Signal the modifyer thread to stop and wait until it exits */
+ signal_state(mod_info, TEST_DONE);
+}
+
+TEST_F(proc_maps_race, test_maps_tearing_from_resize)
+{
+ struct vma_modifier_info *mod_info = self->mod_info;
+
+ struct line_content shrunk_last_line;
+ struct line_content shrunk_first_line;
+ struct line_content restored_last_line;
+ struct line_content restored_first_line;
+
+ wait_for_state(mod_info, SETUP_READY);
+
+ /* re-read the file to avoid using stale data from previous test */
+ ASSERT_TRUE(read_boundary_lines(self, &self->last_line, &self->first_line));
+
+ mod_info->vma_modify = shrink_vma;
+ mod_info->vma_restore = expand_vma;
+ mod_info->vma_mod_check = check_shrink_result;
+
+ report_test_start("Tearing from resize", self->verbose);
+ ASSERT_TRUE(capture_mod_pattern(self, &shrunk_last_line, &shrunk_first_line,
+ &restored_last_line, &restored_first_line));
+
+ /* Now start concurrent modifications for self->duration_sec */
+ signal_state(mod_info, TEST_READY);
+
+ struct line_content new_last_line;
+ struct line_content new_first_line;
+ struct timespec start_ts, end_ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &start_ts);
+ start_test_loop(&start_ts, self->verbose);
+ do {
+ unsigned long vma_start;
+ unsigned long vma_end;
+
+ ASSERT_TRUE(read_boundary_lines(self, &new_last_line, &new_first_line));
+
+ /* Check if we read vmas after shrinking it */
+ if (!strcmp(new_last_line.text, shrunk_last_line.text)) {
+ /*
+ * The vmas should be consistent with shrunk results,
+ * however if the vma was concurrently restored, it
+ * can be reported twice (first as shrunk one, then
+ * as restored one) because we found it as the next vma
+ * again. In that case new first line will be the same
+ * as the last restored line.
+ */
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_first_line.text, shrunk_first_line.text) &&
+ strcmp(new_first_line.text, restored_last_line.text),
+ "Shrink result invalid", self));
+ } else {
+ /* The vmas should be consistent with the original/resored state */
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_last_line.text, restored_last_line.text),
+ "Expand result invalid", self));
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_first_line.text, restored_first_line.text),
+ "Expand result invalid", self));
+ }
+
+ /* Check if PROCMAP_QUERY ioclt() finds the right VMA */
+ ASSERT_TRUE(query_addr_at(self->maps_fd, mod_info->addr, &vma_start, &vma_end));
+ /*
+ * The vma should stay at the same address and have either the
+ * original size of 3 pages or 1 page if read after shrinking.
+ */
+ ASSERT_TRUE(vma_start == self->last_line.start_addr &&
+ (vma_end - vma_start == self->page_size * 3 ||
+ vma_end - vma_start == self->page_size));
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &end_ts);
+ end_test_iteration(&end_ts, self->verbose);
+ } while (end_ts.tv_sec - start_ts.tv_sec < self->duration_sec);
+ end_test_loop(self->verbose);
+
+ /* Signal the modifyer thread to stop and wait until it exits */
+ signal_state(mod_info, TEST_DONE);
+}
+
+TEST_F(proc_maps_race, test_maps_tearing_from_remap)
+{
+ struct vma_modifier_info *mod_info = self->mod_info;
+
+ struct line_content remapped_last_line;
+ struct line_content remapped_first_line;
+ struct line_content restored_last_line;
+ struct line_content restored_first_line;
+
+ wait_for_state(mod_info, SETUP_READY);
+
+ /* re-read the file to avoid using stale data from previous test */
+ ASSERT_TRUE(read_boundary_lines(self, &self->last_line, &self->first_line));
+
+ mod_info->vma_modify = remap_vma;
+ mod_info->vma_restore = patch_vma;
+ mod_info->vma_mod_check = check_remap_result;
+
+ report_test_start("Tearing from remap", self->verbose);
+ ASSERT_TRUE(capture_mod_pattern(self, &remapped_last_line, &remapped_first_line,
+ &restored_last_line, &restored_first_line));
+
+ /* Now start concurrent modifications for self->duration_sec */
+ signal_state(mod_info, TEST_READY);
+
+ struct line_content new_last_line;
+ struct line_content new_first_line;
+ struct timespec start_ts, end_ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &start_ts);
+ start_test_loop(&start_ts, self->verbose);
+ do {
+ unsigned long vma_start;
+ unsigned long vma_end;
+
+ ASSERT_TRUE(read_boundary_lines(self, &new_last_line, &new_first_line));
+
+ /* Check if we read vmas after remapping it */
+ if (!strcmp(new_last_line.text, remapped_last_line.text)) {
+ /*
+ * The vmas should be consistent with remap results,
+ * however if the vma was concurrently restored, it
+ * can be reported twice (first as split one, then
+ * as restored one) because we found it as the next vma
+ * again. In that case new first line will be the same
+ * as the last restored line.
+ */
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_first_line.text, remapped_first_line.text) &&
+ strcmp(new_first_line.text, restored_last_line.text),
+ "Remap result invalid", self));
+ } else {
+ /* The vmas should be consistent with the original/resored state */
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_last_line.text, restored_last_line.text),
+ "Remap restore result invalid", self));
+ ASSERT_FALSE(print_boundaries_on(
+ strcmp(new_first_line.text, restored_first_line.text),
+ "Remap restore result invalid", self));
+ }
+
+ /* Check if PROCMAP_QUERY ioclt() finds the right VMA */
+ ASSERT_TRUE(query_addr_at(self->maps_fd, mod_info->addr + self->page_size,
+ &vma_start, &vma_end));
+ /*
+ * The vma should either stay at the same address and have the
+ * original size of 3 pages or we should find the remapped vma
+ * at the remap destination address with size of 1 page.
+ */
+ ASSERT_TRUE((vma_start == self->last_line.start_addr &&
+ vma_end - vma_start == self->page_size * 3) ||
+ (vma_start == self->last_line.start_addr + self->page_size &&
+ vma_end - vma_start == self->page_size));
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &end_ts);
+ end_test_iteration(&end_ts, self->verbose);
+ } while (end_ts.tv_sec - start_ts.tv_sec < self->duration_sec);
+ end_test_loop(self->verbose);
+
+ /* Signal the modifyer thread to stop and wait until it exits */
+ signal_state(mod_info, TEST_DONE);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/proc/proc-net-dev-lseek.c b/tools/testing/selftests/proc/proc-net-dev-lseek.c
new file mode 100644
index 000000000000..742a3e804451
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-net-dev-lseek.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2025 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#undef _GNU_SOURCE
+#define _GNU_SOURCE
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sched.h>
+/*
+ * Test that lseek("/proc/net/dev/", 0, SEEK_SET)
+ * a) works,
+ * b) does what you think it does.
+ */
+int main(void)
+{
+ /* /proc/net/dev output is deterministic in fresh netns only. */
+ if (unshare(CLONE_NEWNET) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ return 4;
+ }
+ return 1;
+ }
+
+ const int fd = open("/proc/net/dev", O_RDONLY);
+ assert(fd >= 0);
+
+ char buf1[4096];
+ const ssize_t rv1 = read(fd, buf1, sizeof(buf1));
+ /*
+ * Not "<=", this file can't be empty:
+ * there is header, "lo" interface with some zeroes.
+ */
+ assert(0 < rv1);
+ assert(rv1 <= sizeof(buf1));
+
+ /* Believe it or not, this line broke one day. */
+ assert(lseek(fd, 0, SEEK_SET) == 0);
+
+ char buf2[4096];
+ const ssize_t rv2 = read(fd, buf2, sizeof(buf2));
+ /* Not "<=", see above. */
+ assert(0 < rv2);
+ assert(rv2 <= sizeof(buf2));
+
+ /* Test that lseek rewinds to the beginning of the file. */
+ assert(rv1 == rv2);
+ assert(memcmp(buf1, buf2, rv1) == 0);
+
+ /* Contents of the file is not validated: this test is about lseek(). */
+
+ return 0;
+}
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c
index d04685771952..4e6a3e53f975 100644
--- a/tools/testing/selftests/proc/proc-pid-vm.c
+++ b/tools/testing/selftests/proc/proc-pid-vm.c
@@ -47,7 +47,11 @@
#include <sys/resource.h>
#include <linux/fs.h>
-#include "../kselftest.h"
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+#endif
+
+#include "kselftest.h"
static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags)
{
@@ -218,12 +222,12 @@ static int make_exe(const uint8_t *payload, size_t len)
* 2: vsyscall VMA is r-xp vsyscall=emulate
*/
static volatile int g_vsyscall;
-static const char *str_vsyscall;
+static const char *str_vsyscall __maybe_unused;
-static const char str_vsyscall_0[] = "";
-static const char str_vsyscall_1[] =
+static const char str_vsyscall_0[] __maybe_unused = "";
+static const char str_vsyscall_1[] __maybe_unused =
"ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]\n";
-static const char str_vsyscall_2[] =
+static const char str_vsyscall_2[] __maybe_unused =
"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n";
#ifdef __x86_64__
diff --git a/tools/testing/selftests/proc/proc-pidns.c b/tools/testing/selftests/proc/proc-pidns.c
new file mode 100644
index 000000000000..25b9a2933c45
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-pidns.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Author: Aleksa Sarai <cyphar@cyphar.com>
+ * Copyright (C) 2025 SUSE LLC.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/prctl.h>
+
+#include "kselftest_harness.h"
+
+#define ASSERT_ERRNO(expected, _t, seen) \
+ __EXPECT(expected, #expected, \
+ ({__typeof__(seen) _tmp_seen = (seen); \
+ _tmp_seen >= 0 ? _tmp_seen : -errno; }), #seen, _t, 1)
+
+#define ASSERT_ERRNO_EQ(expected, seen) \
+ ASSERT_ERRNO(expected, ==, seen)
+
+#define ASSERT_SUCCESS(seen) \
+ ASSERT_ERRNO(0, <=, seen)
+
+static int touch(char *path)
+{
+ int fd = open(path, O_WRONLY|O_CREAT|O_CLOEXEC, 0644);
+ if (fd < 0)
+ return -1;
+ return close(fd);
+}
+
+FIXTURE(ns)
+{
+ int host_mntns, host_pidns;
+ int dummy_pidns;
+};
+
+FIXTURE_SETUP(ns)
+{
+ /* Stash the old mntns. */
+ self->host_mntns = open("/proc/self/ns/mnt", O_RDONLY|O_CLOEXEC);
+ ASSERT_SUCCESS(self->host_mntns);
+
+ /* Create a new mount namespace and make it private. */
+ ASSERT_SUCCESS(unshare(CLONE_NEWNS));
+ ASSERT_SUCCESS(mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL));
+
+ /*
+ * Create a proper tmpfs that we can use and will disappear once we
+ * leave this mntns.
+ */
+ ASSERT_SUCCESS(mount("tmpfs", "/tmp", "tmpfs", 0, NULL));
+
+ /*
+ * Create a pidns we can use for later tests. We need to fork off a
+ * child so that we get a usable nsfd that we can bind-mount and open.
+ */
+ ASSERT_SUCCESS(mkdir("/tmp/dummy", 0755));
+ ASSERT_SUCCESS(touch("/tmp/dummy/pidns"));
+ ASSERT_SUCCESS(mkdir("/tmp/dummy/proc", 0755));
+
+ self->host_pidns = open("/proc/self/ns/pid", O_RDONLY|O_CLOEXEC);
+ ASSERT_SUCCESS(self->host_pidns);
+ ASSERT_SUCCESS(unshare(CLONE_NEWPID));
+
+ pid_t pid = fork();
+ ASSERT_SUCCESS(pid);
+ if (!pid) {
+ prctl(PR_SET_PDEATHSIG, SIGKILL);
+ ASSERT_SUCCESS(mount("/proc/self/ns/pid", "/tmp/dummy/pidns", NULL, MS_BIND, NULL));
+ ASSERT_SUCCESS(mount("proc", "/tmp/dummy/proc", "proc", 0, NULL));
+ exit(0);
+ }
+
+ int wstatus;
+ ASSERT_EQ(waitpid(pid, &wstatus, 0), pid);
+ ASSERT_TRUE(WIFEXITED(wstatus));
+ ASSERT_EQ(WEXITSTATUS(wstatus), 0);
+
+ ASSERT_SUCCESS(setns(self->host_pidns, CLONE_NEWPID));
+
+ self->dummy_pidns = open("/tmp/dummy/pidns", O_RDONLY|O_CLOEXEC);
+ ASSERT_SUCCESS(self->dummy_pidns);
+}
+
+FIXTURE_TEARDOWN(ns)
+{
+ ASSERT_SUCCESS(setns(self->host_mntns, CLONE_NEWNS));
+ ASSERT_SUCCESS(close(self->host_mntns));
+
+ ASSERT_SUCCESS(close(self->host_pidns));
+ ASSERT_SUCCESS(close(self->dummy_pidns));
+}
+
+TEST_F(ns, pidns_mount_string_path)
+{
+ ASSERT_SUCCESS(mkdir("/tmp/proc-host", 0755));
+ ASSERT_SUCCESS(mount("proc", "/tmp/proc-host", "proc", 0, "pidns=/proc/self/ns/pid"));
+ ASSERT_SUCCESS(access("/tmp/proc-host/self/", X_OK));
+
+ ASSERT_SUCCESS(mkdir("/tmp/proc-dummy", 0755));
+ ASSERT_SUCCESS(mount("proc", "/tmp/proc-dummy", "proc", 0, "pidns=/tmp/dummy/pidns"));
+ ASSERT_ERRNO_EQ(-ENOENT, access("/tmp/proc-dummy/1/", X_OK));
+ ASSERT_ERRNO_EQ(-ENOENT, access("/tmp/proc-dummy/self/", X_OK));
+}
+
+TEST_F(ns, pidns_fsconfig_string_path)
+{
+ int fsfd = fsopen("proc", FSOPEN_CLOEXEC);
+ ASSERT_SUCCESS(fsfd);
+
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_SET_STRING, "pidns", "/tmp/dummy/pidns", 0));
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0));
+
+ int mountfd = fsmount(fsfd, FSMOUNT_CLOEXEC, 0);
+ ASSERT_SUCCESS(mountfd);
+
+ ASSERT_ERRNO_EQ(-ENOENT, faccessat(mountfd, "1/", X_OK, 0));
+ ASSERT_ERRNO_EQ(-ENOENT, faccessat(mountfd, "self/", X_OK, 0));
+
+ ASSERT_SUCCESS(close(fsfd));
+ ASSERT_SUCCESS(close(mountfd));
+}
+
+TEST_F(ns, pidns_fsconfig_fd)
+{
+ int fsfd = fsopen("proc", FSOPEN_CLOEXEC);
+ ASSERT_SUCCESS(fsfd);
+
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_SET_FD, "pidns", NULL, self->dummy_pidns));
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0));
+
+ int mountfd = fsmount(fsfd, FSMOUNT_CLOEXEC, 0);
+ ASSERT_SUCCESS(mountfd);
+
+ ASSERT_ERRNO_EQ(-ENOENT, faccessat(mountfd, "1/", X_OK, 0));
+ ASSERT_ERRNO_EQ(-ENOENT, faccessat(mountfd, "self/", X_OK, 0));
+
+ ASSERT_SUCCESS(close(fsfd));
+ ASSERT_SUCCESS(close(mountfd));
+}
+
+TEST_F(ns, pidns_reconfigure_remount)
+{
+ ASSERT_SUCCESS(mkdir("/tmp/proc", 0755));
+ ASSERT_SUCCESS(mount("proc", "/tmp/proc", "proc", 0, ""));
+
+ ASSERT_SUCCESS(access("/tmp/proc/1/", X_OK));
+ ASSERT_SUCCESS(access("/tmp/proc/self/", X_OK));
+
+ ASSERT_ERRNO_EQ(-EBUSY, mount(NULL, "/tmp/proc", NULL, MS_REMOUNT, "pidns=/tmp/dummy/pidns"));
+
+ ASSERT_SUCCESS(access("/tmp/proc/1/", X_OK));
+ ASSERT_SUCCESS(access("/tmp/proc/self/", X_OK));
+}
+
+TEST_F(ns, pidns_reconfigure_fsconfig_string_path)
+{
+ int fsfd = fsopen("proc", FSOPEN_CLOEXEC);
+ ASSERT_SUCCESS(fsfd);
+
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0));
+
+ int mountfd = fsmount(fsfd, FSMOUNT_CLOEXEC, 0);
+ ASSERT_SUCCESS(mountfd);
+
+ ASSERT_SUCCESS(faccessat(mountfd, "1/", X_OK, 0));
+ ASSERT_SUCCESS(faccessat(mountfd, "self/", X_OK, 0));
+
+ ASSERT_ERRNO_EQ(-EBUSY, fsconfig(fsfd, FSCONFIG_SET_STRING, "pidns", "/tmp/dummy/pidns", 0));
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_CMD_RECONFIGURE, NULL, NULL, 0)); /* noop */
+
+ ASSERT_SUCCESS(faccessat(mountfd, "1/", X_OK, 0));
+ ASSERT_SUCCESS(faccessat(mountfd, "self/", X_OK, 0));
+
+ ASSERT_SUCCESS(close(fsfd));
+ ASSERT_SUCCESS(close(mountfd));
+}
+
+TEST_F(ns, pidns_reconfigure_fsconfig_fd)
+{
+ int fsfd = fsopen("proc", FSOPEN_CLOEXEC);
+ ASSERT_SUCCESS(fsfd);
+
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0));
+
+ int mountfd = fsmount(fsfd, FSMOUNT_CLOEXEC, 0);
+ ASSERT_SUCCESS(mountfd);
+
+ ASSERT_SUCCESS(faccessat(mountfd, "1/", X_OK, 0));
+ ASSERT_SUCCESS(faccessat(mountfd, "self/", X_OK, 0));
+
+ ASSERT_ERRNO_EQ(-EBUSY, fsconfig(fsfd, FSCONFIG_SET_FD, "pidns", NULL, self->dummy_pidns));
+ ASSERT_SUCCESS(fsconfig(fsfd, FSCONFIG_CMD_RECONFIGURE, NULL, NULL, 0)); /* noop */
+
+ ASSERT_SUCCESS(faccessat(mountfd, "1/", X_OK, 0));
+ ASSERT_SUCCESS(faccessat(mountfd, "self/", X_OK, 0));
+
+ ASSERT_SUCCESS(close(fsfd));
+ ASSERT_SUCCESS(close(mountfd));
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index edc08a4433fd..ed1e2886ba3c 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -120,6 +120,7 @@ static void usage(char *progname)
" -c query the ptp clock's capabilities\n"
" -d name device to open\n"
" -e val read 'val' external time stamp events\n"
+ " -E val enable rising (1), falling (2), or both (3) edges\n"
" -f val adjust the ptp clock frequency by 'val' ppb\n"
" -F chan Enable single channel mask and keep device open for debugfs verification.\n"
" -g get the ptp clock time\n"
@@ -178,6 +179,7 @@ int main(int argc, char *argv[])
int adjphase = 0;
int capabilities = 0;
int extts = 0;
+ int edge = 0;
int flagtest = 0;
int gettime = 0;
int index = 0;
@@ -202,7 +204,7 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "cd:e:f:F:ghH:i:k:lL:n:o:p:P:rsSt:T:w:x:Xy:z"))) {
+ while (EOF != (c = getopt(argc, argv, "cd:e:E:f:F:ghH:i:k:lL:n:o:p:P:rsSt:T:w:x:Xy:z"))) {
switch (c) {
case 'c':
capabilities = 1;
@@ -213,6 +215,11 @@ int main(int argc, char *argv[])
case 'e':
extts = atoi(optarg);
break;
+ case 'E':
+ edge = atoi(optarg);
+ edge = (edge & 1 ? PTP_RISING_EDGE : 0) |
+ (edge & 2 ? PTP_FALLING_EDGE : 0);
+ break;
case 'f':
adjfreq = atoi(optarg);
break;
@@ -444,7 +451,7 @@ int main(int argc, char *argv[])
if (!readonly) {
memset(&extts_request, 0, sizeof(extts_request));
extts_request.index = index;
- extts_request.flags = PTP_ENABLE_FEATURE;
+ extts_request.flags = PTP_ENABLE_FEATURE | edge;
if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
perror("PTP_EXTTS_REQUEST");
extts = 0;
diff --git a/tools/testing/selftests/ptrace/.gitignore b/tools/testing/selftests/ptrace/.gitignore
index b7dde152e75a..f6be8efd57ea 100644
--- a/tools/testing/selftests/ptrace/.gitignore
+++ b/tools/testing/selftests/ptrace/.gitignore
@@ -3,3 +3,4 @@ get_syscall_info
get_set_sud
peeksiginfo
vmaccess
+set_syscall_info
diff --git a/tools/testing/selftests/ptrace/get_set_sud.c b/tools/testing/selftests/ptrace/get_set_sud.c
index 5297b10d25c3..2e619c7599bb 100644
--- a/tools/testing/selftests/ptrace/get_set_sud.c
+++ b/tools/testing/selftests/ptrace/get_set_sud.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <stdio.h>
#include <string.h>
#include <errno.h>
diff --git a/tools/testing/selftests/ptrace/get_syscall_info.c b/tools/testing/selftests/ptrace/get_syscall_info.c
index 5bcd1c7b5be6..3f5c3a9fdaba 100644
--- a/tools/testing/selftests/ptrace/get_syscall_info.c
+++ b/tools/testing/selftests/ptrace/get_syscall_info.c
@@ -7,7 +7,7 @@
* matches userspace expectations.
*/
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <err.h>
#include <signal.h>
#include <asm/unistd.h>
diff --git a/tools/testing/selftests/ptrace/peeksiginfo.c b/tools/testing/selftests/ptrace/peeksiginfo.c
index a6884f66dc01..2f345d11e4b8 100644
--- a/tools/testing/selftests/ptrace/peeksiginfo.c
+++ b/tools/testing/selftests/ptrace/peeksiginfo.c
@@ -199,7 +199,7 @@ int main(int argc, char *argv[])
/*
* Dump signal from the process-wide queue.
- * The number of signals is not multible to the buffer size
+ * The number of signals is not multiple to the buffer size
*/
if (check_direct_path(child, 1, 3))
goto out;
diff --git a/tools/testing/selftests/ptrace/set_syscall_info.c b/tools/testing/selftests/ptrace/set_syscall_info.c
index 4198248ef874..1cc411a41cd6 100644
--- a/tools/testing/selftests/ptrace/set_syscall_info.c
+++ b/tools/testing/selftests/ptrace/set_syscall_info.c
@@ -7,7 +7,7 @@
* matches userspace expectations.
*/
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <err.h>
#include <fcntl.h>
#include <signal.h>
diff --git a/tools/testing/selftests/ptrace/vmaccess.c b/tools/testing/selftests/ptrace/vmaccess.c
index 4db327b44586..3801b5831527 100644
--- a/tools/testing/selftests/ptrace/vmaccess.c
+++ b/tools/testing/selftests/ptrace/vmaccess.c
@@ -7,7 +7,7 @@
* when de_thread is blocked with ->cred_guard_mutex held.
*/
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include <stdio.h>
#include <fcntl.h>
#include <pthread.h>
diff --git a/tools/testing/selftests/rcutorture/bin/jitter.sh b/tools/testing/selftests/rcutorture/bin/jitter.sh
index fd1ffaa5a135..3c1e5d3f8805 100755
--- a/tools/testing/selftests/rcutorture/bin/jitter.sh
+++ b/tools/testing/selftests/rcutorture/bin/jitter.sh
@@ -39,6 +39,22 @@ do
fi
done
+# Uses global variables startsecs, startns, endsecs, endns, and limit.
+# Exit code is success for time not yet elapsed and failure otherwise.
+function timecheck {
+ local done=`awk -v limit=$limit \
+ -v startsecs=$startsecs \
+ -v startns=$startns \
+ -v endsecs=$endsecs \
+ -v endns=$endns < /dev/null '
+ BEGIN {
+ delta = (endsecs - startsecs) * 1000 * 1000;
+ delta += int((endns - startns) / 1000);
+ print delta >= limit;
+ }'`
+ return $done
+}
+
while :
do
# Check for done.
@@ -85,15 +101,20 @@ do
n=$(($n+1))
sleep .$sleeptime
- # Spin a random duration
+ # Spin a random duration, but with rather coarse granularity.
limit=`awk -v me=$me -v n=$n -v spinmax=$spinmax 'BEGIN {
srand(n + me + systime());
printf("%06d", int(rand() * spinmax));
}' < /dev/null`
n=$(($n+1))
- for i in {1..$limit}
+ startsecs=`date +%s`
+ startns=`date +%N`
+ endsecs=$startns
+ endns=$endns
+ while timecheck
do
- echo > /dev/null
+ endsecs=`date +%s`
+ endns=`date +%N`
done
done
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-again.sh b/tools/testing/selftests/rcutorture/bin/kvm-again.sh
index 88ca4e368489..b5239b52cb5d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-again.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-again.sh
@@ -31,7 +31,7 @@ fi
if ! cp "$oldrun/scenarios" $T/scenarios.oldrun
then
# Later on, can reconstitute this from console.log files.
- echo Prior run batches file does not exist: $oldrun/batches
+ echo Prior run scenarios file does not exist: $oldrun/scenarios
exit 1
fi
@@ -68,7 +68,7 @@ usage () {
echo " --datestamp string"
echo " --dryrun"
echo " --duration minutes | <seconds>s | <hours>h | <days>d"
- echo " --link hard|soft|copy"
+ echo " --link hard|soft|copy|inplace|inplace-force"
echo " --remote"
echo " --rundir /new/res/path"
echo "Command line: $scriptname $args"
@@ -121,7 +121,7 @@ do
shift
;;
--link)
- checkarg --link "hard|soft|copy" "$#" "$2" 'hard\|soft\|copy' '^--'
+ checkarg --link "hard|soft|copy|inplace|inplace-force" "$#" "$2" 'hard\|soft\|copy\|inplace\|inplace-force' '^--'
case "$2" in
copy)
arg_link="cp -R"
@@ -132,6 +132,14 @@ do
soft)
arg_link="cp -Rs"
;;
+ inplace)
+ arg_link="inplace"
+ rundir="$oldrun"
+ ;;
+ inplace-force)
+ arg_link="inplace-force"
+ rundir="$oldrun"
+ ;;
esac
shift
;;
@@ -172,21 +180,37 @@ fi
echo ---- Re-run results directory: $rundir
-# Copy old run directory tree over and adjust.
-mkdir -p "`dirname "$rundir"`"
-if ! $arg_link "$oldrun" "$rundir"
-then
- echo "Cannot copy from $oldrun to $rundir."
- usage
-fi
-rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
-touch "$rundir/log"
-echo $scriptname $args | tee -a "$rundir/log"
-echo $oldrun > "$rundir/re-run"
-if ! test -d "$rundir/../../bin"
+if test "$oldrun" != "$rundir"
then
- $arg_link "$oldrun/../../bin" "$rundir/../.."
+ # Copy old run directory tree over and adjust.
+ mkdir -p "`dirname "$rundir"`"
+ if ! $arg_link "$oldrun" "$rundir"
+ then
+ echo "Cannot copy from $oldrun to $rundir."
+ usage
+ fi
+ rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
+ touch "$rundir/log"
+ echo $scriptname $args | tee -a "$rundir/log"
+ echo $oldrun > "$rundir/re-run"
+ if ! test -d "$rundir/../../bin"
+ then
+ $arg_link "$oldrun/../../bin" "$rundir/../.."
+ fi
+else
+ # Check for a run having already happened.
+ find "$rundir" -name console.log -print > $T/oldrun-console.log
+ if test -s $T/oldrun-console.log
+ then
+ echo Run already took place in $rundir
+ if test "$arg_link" = inplace
+ then
+ usage
+ fi
+ fi
fi
+
+# Find runs to be done based on their qemu-cmd files.
for i in $rundir/*/qemu-cmd
do
cp "$i" $T
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 11f8d232b0ee..3edfd064ef81 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -44,7 +44,7 @@ fi
ncpus="`getconf _NPROCESSORS_ONLN`"
make -j$((2 * ncpus)) $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
retval=$?
-if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | grep -E -q "Stop|Error|error:|warning:" || grep -E -q "Stop|Error|error:" < $resdir/Make.out
+if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | grep -E -q "Stop|ERROR|Error|error:|warning:" || grep -E -q "Stop|ERROR|Error|error:" < $resdir/Make.out
then
echo Kernel build error
grep -E "Stop|Error|error:|warning:" < $resdir/Make.out
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-series.sh b/tools/testing/selftests/rcutorture/bin/kvm-series.sh
new file mode 100755
index 000000000000..2ff905a1853b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-series.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Usage: kvm-series.sh config-list commit-id-list [ kvm.sh parameters ]
+#
+# Tests the specified list of unadorned configs ("TREE01 SRCU-P" but not
+# "CFLIST" or "3*TRACE01") and an indication of a set of commits to test,
+# then runs each commit through the specified list of commits using kvm.sh.
+# The runs are grouped into a -series/config/commit directory tree.
+# Each run defaults to a duration of one minute.
+#
+# Run in top-level Linux source directory. Please note that this is in
+# no way a replacement for "git bisect"!!!
+#
+# This script is intended to replace kvm-check-branches.sh by providing
+# ease of use and faster execution.
+
+T="`mktemp -d ${TMPDIR-/tmp}/kvm-series.sh.XXXXXX`"
+trap 'rm -rf $T' 0
+
+scriptname=$0
+args="$*"
+
+config_list="${1}"
+if test -z "${config_list}"
+then
+ echo "$0: Need a quoted list of --config arguments for first argument."
+ exit 1
+fi
+if test -z "${config_list}" || echo "${config_list}" | grep -q '\*'
+then
+ echo "$0: Repetition ('*') not allowed in config list."
+ exit 1
+fi
+
+commit_list="${2}"
+if test -z "${commit_list}"
+then
+ echo "$0: Need a list of commits (e.g., HEAD^^^..) for second argument."
+ exit 2
+fi
+git log --pretty=format:"%h" "${commit_list}" > $T/commits
+ret=$?
+if test "${ret}" -ne 0
+then
+ echo "$0: Invalid commit list ('${commit_list}')."
+ exit 2
+fi
+sha1_list=`cat $T/commits`
+
+shift
+shift
+
+RCUTORTURE="`pwd`/tools/testing/selftests/rcutorture"; export RCUTORTURE
+PATH=${RCUTORTURE}/bin:$PATH; export PATH
+. functions.sh
+
+ret=0
+nfail=0
+nsuccess=0
+faillist=
+successlist=
+cursha1="`git rev-parse --abbrev-ref HEAD`"
+ds="`date +%Y.%m.%d-%H.%M.%S`-series"
+startdate="`date`"
+starttime="`get_starttime`"
+
+echo " --- " $scriptname $args | tee -a $T/log
+echo " --- Results directory: " $ds | tee -a $T/log
+
+for config in ${config_list}
+do
+ sha_n=0
+ for sha in ${sha1_list}
+ do
+ sha1=${sha_n}.${sha} # Enable "sort -k1nr" to list commits in order.
+ echo Starting ${config}/${sha1} at `date` | tee -a $T/log
+ git checkout "${sha}"
+ time tools/testing/selftests/rcutorture/bin/kvm.sh --configs "$config" --datestamp "$ds/${config}/${sha1}" --duration 1 "$@"
+ curret=$?
+ if test "${curret}" -ne 0
+ then
+ nfail=$((nfail+1))
+ faillist="$faillist ${config}/${sha1}(${curret})"
+ else
+ nsuccess=$((nsuccess+1))
+ successlist="$successlist ${config}/${sha1}"
+ # Successful run, so remove large files.
+ rm -f ${RCUTORTURE}/$ds/${config}/${sha1}/{vmlinux,bzImage,System.map,Module.symvers}
+ fi
+ if test "${ret}" -eq 0
+ then
+ ret=${curret}
+ fi
+ sha_n=$((sha_n+1))
+ done
+done
+git checkout "${cursha1}"
+
+echo ${nsuccess} SUCCESSES: | tee -a $T/log
+echo ${successlist} | fmt | tee -a $T/log
+echo | tee -a $T/log
+echo ${nfail} FAILURES: | tee -a $T/log
+echo ${faillist} | fmt | tee -a $T/log
+if test -n "${faillist}"
+then
+ echo | tee -a $T/log
+ echo Failures across commits: | tee -a $T/log
+ echo ${faillist} | tr ' ' '\012' | sed -e 's,^[^/]*/,,' -e 's/([0-9]*)//' |
+ sort | uniq -c | sort -k2n | tee -a $T/log
+fi
+echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
+echo Summary: Successes: ${nsuccess} Failures: ${nfail} | tee -a $T/log
+cp $T/log tools/testing/selftests/rcutorture/res/${ds}
+
+exit "${ret}"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 42e5e8597a1a..fff15821c44c 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -199,7 +199,7 @@ do
fi
;;
--kconfig|--kconfigs)
- checkarg --kconfig "(Kconfig options)" $# "$2" '^\(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\|"[^"]*"\)\( \(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\|"[^"]*"\)\)*$' '^error$'
+ checkarg --kconfig "(Kconfig options)" $# "$2" '^\(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|-\?[0-9]\+\|"[^"]*"\)\( \+\(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|-\?[0-9]\+\|"[^"]*"\)\)* *$' '^error$'
TORTURE_KCONFIG_ARG="`echo "$TORTURE_KCONFIG_ARG $2" | sed -e 's/^ *//' -e 's/ *$//'`"
shift
;;
@@ -442,18 +442,7 @@ echo $scriptname $args
touch $resdir/$ds/log
echo $scriptname $args >> $resdir/$ds/log
echo ${TORTURE_SUITE} > $resdir/$ds/torture_suite
-echo Build directory: `pwd` > $resdir/$ds/testid.txt
-if test -d .git
-then
- echo Current commit: `git rev-parse HEAD` >> $resdir/$ds/testid.txt
- echo >> $resdir/$ds/testid.txt
- echo ' ---' Output of "'"git status"'": >> $resdir/$ds/testid.txt
- git status >> $resdir/$ds/testid.txt
- echo >> $resdir/$ds/testid.txt
- echo >> $resdir/$ds/testid.txt
- echo ' ---' Output of "'"git diff HEAD"'": >> $resdir/$ds/testid.txt
- git diff HEAD >> $resdir/$ds/testid.txt
-fi
+mktestid.sh $resdir/$ds
___EOF___
kvm-assign-cpus.sh /sys/devices/system/node > $T/cpuarray.awk
kvm-get-cpus-script.sh $T/cpuarray.awk $T/dumpbatches.awk
diff --git a/tools/testing/selftests/rcutorture/bin/mktestid.sh b/tools/testing/selftests/rcutorture/bin/mktestid.sh
new file mode 100755
index 000000000000..16f9907a4dae
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/mktestid.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Create a testid.txt file in the specified directory.
+#
+# Usage: mktestid.sh dirpath
+#
+# Copyright (C) Meta Platforms, Inc. 2025
+#
+# Author: Paul E. McKenney <paulmck@kernel.org>
+
+resdir="$1"
+if test -z "${resdir}" || ! test -d "${resdir}" || ! test -w "${resdir}"
+then
+ echo Path '"'${resdir}'"' not writeable directory, no ${resdir}/testid.txt.
+ exit 1
+fi
+echo Build directory: `pwd` > ${resdir}/testid.txt
+if test -d .git
+then
+ echo Current commit: `git rev-parse HEAD` >> ${resdir}/testid.txt
+ echo >> ${resdir}/testid.txt
+ echo ' ---' Output of "'"git status"'": >> ${resdir}/testid.txt
+ git status >> ${resdir}/testid.txt
+ echo >> ${resdir}/testid.txt
+ echo >> ${resdir}/testid.txt
+ echo ' ---' Output of "'"git diff HEAD"'": >> ${resdir}/testid.txt
+ git diff HEAD >> ${resdir}/testid.txt
+fi
diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh
index e03fdaca89b3..a33ba109ef0b 100755
--- a/tools/testing/selftests/rcutorture/bin/torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/torture.sh
@@ -30,6 +30,15 @@ then
VERBOSE_BATCH_CPUS=0
fi
+# Machine architecture? ("uname -p" is said to be less portable.)1
+thisarch="`uname -m`"
+if test "${thisarch}" = aarch64
+then
+ ifnotaarch64=no
+else
+ ifnotaarch64=yes
+fi
+
# Configurations/scenarios.
configs_rcutorture=
configs_locktorture=
@@ -55,9 +64,9 @@ do_normal=yes
explicit_normal=no
do_kasan=yes
do_kcsan=no
-do_clocksourcewd=yes
+do_clocksourcewd="${ifnotaarch64}"
do_rt=yes
-do_rcutasksflavors=yes
+do_rcutasksflavors="${ifnotaarch64}" # FIXME: Back to "yes" when SMP=n auto-avoided
do_srcu_lockdep=yes
do_rcu_rust=no
@@ -85,6 +94,7 @@ usage () {
echo " --do-kvfree / --do-no-kvfree / --no-kvfree"
echo " --do-locktorture / --do-no-locktorture / --no-locktorture"
echo " --do-none"
+ echo " --do-normal / --do-no-normal / --no-normal"
echo " --do-rcuscale / --do-no-rcuscale / --no-rcuscale"
echo " --do-rcutasksflavors / --do-no-rcutasksflavors / --no-rcutasksflavors"
echo " --do-rcutorture / --do-no-rcutorture / --no-rcutorture"
@@ -124,7 +134,7 @@ do
;;
--do-all|--doall)
do_allmodconfig=yes
- do_rcutasksflavor=yes
+ do_rcutasksflavors="${ifnotaarch64}" # FIXME: Back to "yes" when SMP=n auto-avoided
do_rcutorture=yes
do_locktorture=yes
do_scftorture=yes
@@ -136,7 +146,7 @@ do
explicit_normal=no
do_kasan=yes
do_kcsan=yes
- do_clocksourcewd=yes
+ do_clocksourcewd="${ifnotaarch64}"
do_srcu_lockdep=yes
;;
--do-allmodconfig|--do-no-allmodconfig|--no-allmodconfig)
@@ -274,7 +284,7 @@ then
configs_rcutorture=CFLIST
fi
duration_rcutorture=$((duration_base*duration_rcutorture_frac/10))
-if test "$duration_rcutorture" -eq 0
+if test "$duration_rcutorture" -eq 0 && test "$do_locktorture" = "yes"
then
echo " --- Zero time for rcutorture, disabling" | tee -a $T/log
do_rcutorture=no
@@ -286,7 +296,7 @@ then
configs_locktorture=CFLIST
fi
duration_locktorture=$((duration_base*duration_locktorture_frac/10))
-if test "$duration_locktorture" -eq 0
+if test "$duration_locktorture" -eq 0 && test "$do_locktorture" = "yes"
then
echo " --- Zero time for locktorture, disabling" | tee -a $T/log
do_locktorture=no
@@ -298,12 +308,19 @@ then
configs_scftorture=CFLIST
fi
duration_scftorture=$((duration_base*duration_scftorture_frac/10))
-if test "$duration_scftorture" -eq 0
+if test "$duration_scftorture" -eq 0 && test "$do_scftorture" = "yes"
then
echo " --- Zero time for scftorture, disabling" | tee -a $T/log
do_scftorture=no
fi
+# CONFIG_EXPERT=y is currently required for arm64 KCSAN runs.
+kcsan_expert=
+if test "${thisarch}" = aarch64
+then
+ kcsan_expert="CONFIG_EXPERT=y"
+fi
+
touch $T/failures
touch $T/successes
@@ -362,13 +379,19 @@ function torture_set {
then
curflavor=$flavor
torture_one "$@"
- mv $T/last-resdir $T/last-resdir-nodebug || :
+ if test -e $T/last-resdir
+ then
+ mv $T/last-resdir $T/last-resdir-nodebug || :
+ fi
fi
if test "$do_kasan" = "yes"
then
curflavor=${flavor}-kasan
torture_one "$@" --kasan
- mv $T/last-resdir $T/last-resdir-kasan || :
+ if test -e $T/last-resdir
+ then
+ mv $T/last-resdir $T/last-resdir-kasan || :
+ fi
fi
if test "$do_kcsan" = "yes"
then
@@ -378,8 +401,16 @@ function torture_set {
kcsan_kmake_tag="--kmake-args"
cur_kcsan_kmake_args="$kcsan_kmake_args"
fi
- torture_one "$@" --kconfig "CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y" $kcsan_kmake_tag $cur_kcsan_kmake_args --kcsan
- mv $T/last-resdir $T/last-resdir-kcsan || :
+ chk_rdr_state=
+ if test "${flavor}" = rcutorture
+ then
+ chk_rdr_state="CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE=y"
+ fi
+ torture_one "$@" --kconfig "CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y ${kcsan_expert} ${chk_rdr_state}" $kcsan_kmake_tag $cur_kcsan_kmake_args --kcsan
+ if test -e $T/last-resdir
+ then
+ mv $T/last-resdir $T/last-resdir-kcsan || :
+ fi
fi
}
@@ -389,6 +420,7 @@ then
echo " --- allmodconfig:" Start `date` | tee -a $T/log
amcdir="tools/testing/selftests/rcutorture/res/$ds/allmodconfig"
mkdir -p "$amcdir"
+ mktestid.sh "$amcdir"
echo " --- make clean" | tee $amcdir/log > "$amcdir/Make.out" 2>&1
make -j$MAKE_ALLOTED_CPUS clean >> "$amcdir/Make.out" 2>&1
retcode=$?
@@ -407,6 +439,10 @@ then
make -j$MAKE_ALLOTED_CPUS >> "$amcdir/Make.out" 2>&1
retcode="$?"
echo $retcode > "$amcdir/Make.exitcode"
+ if grep -E -q "Stop|ERROR|Error|error:|warning:" < "$amcdir/Make.out"
+ then
+ retcode=99
+ fi
buildphase='"make"'
fi
if test "$retcode" -eq 0
@@ -495,6 +531,7 @@ then
echo " --- do-rcu-rust:" Start `date` | tee -a $T/log
rrdir="tools/testing/selftests/rcutorture/res/$ds/results-rcu-rust"
mkdir -p "$rrdir"
+ mktestid.sh "$rrdir"
echo " --- make LLVM=1 rustavailable " | tee -a $rrdir/log > $rrdir/rustavailable.out
make LLVM=1 rustavailable > $T/rustavailable.out 2>&1
retcode=$?
@@ -681,7 +718,14 @@ nfailures=0
echo FAILURES: | tee -a $T/log
if test -s "$T/failures"
then
- awk < "$T/failures" -v sq="'" '{ print "echo " sq $0 sq; print "sed -e " sq "1,/^ --- .* Test summary:$/d" sq " " $2 "/log | grep Summary: | sed -e " sq "s/^[^S]*/ /" sq; }' | sh | tee -a $T/log | tee "$T/failuresum"
+ awk < "$T/failures" -v sq="'" '
+ {
+ print "echo " sq $0 sq;
+ if ($2 != "")
+ print "sed -e " sq "1,/^ --- .* Test summary:$/d" sq " " $2 "/log | grep Summary: | sed -e " sq "s/^[^S]*/ /" sq;
+ else
+ print "echo " sq " " sq "Run failed to produce results directory.";
+ }' | sh | tee -a $T/log | tee "$T/failuresum"
nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`"
grep "^ Summary: " "$T/failuresum" |
grep -v '^ Summary: Bugs: [0-9]* (all bugs kcsan)$' > "$T/nonkcsan"
@@ -691,15 +735,18 @@ then
fi
ret=2
fi
-if test "$do_kcsan" = "yes"
+if test "$do_kcsan" = "yes" && test -e tools/testing/selftests/rcutorture/res/$ds
then
TORTURE_KCONFIG_KCSAN_ARG=1 tools/testing/selftests/rcutorture/bin/kcsan-collapse.sh tools/testing/selftests/rcutorture/res/$ds > tools/testing/selftests/rcutorture/res/$ds/kcsan.sum
fi
echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log
-tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
-find "$tdir" -name 'ConfigFragment.diags' -print > $T/configerrors
-find "$tdir" -name 'Make.out.diags' -print > $T/builderrors
+tdir="`cat $T/successes $T/failures | awk 'NF > 1 { print $NF }' | head -1 | sed -e 's,/[^/]\+/*$,,'`"
+if test -n "$tdir"
+then
+ find "$tdir" -name 'ConfigFragment.diags' -print > $T/configerrors
+ find "$tdir" -name 'Make.out.diags' -print > $T/builderrors
+fi
if test -s "$T/configerrors"
then
echo " Scenarios with .config errors: `wc -l "$T/configerrors" | awk '{ print $1 }'`"
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED
index 48d8a245c7fa..7d75f4b94943 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED
+++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED
@@ -5,3 +5,6 @@ CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
+CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE=y
+CONFIG_RCU_TORTURE_TEST_LOG_CPU=y
+CONFIG_RCU_TORTURE_TEST_LOG_GP=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
index 45f572570a8c..98b6175e5aa0 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
+++ b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
@@ -5,7 +5,6 @@ TREE04
TREE05
TREE07
TREE09
-SRCU-L
SRCU-N
SRCU-P
SRCU-T
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-L b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-L
deleted file mode 100644
index 3b4fa8dbef8a..000000000000
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-L
+++ /dev/null
@@ -1,10 +0,0 @@
-CONFIG_RCU_TRACE=n
-CONFIG_SMP=y
-CONFIG_NR_CPUS=6
-CONFIG_HOTPLUG_CPU=y
-CONFIG_PREEMPT_NONE=y
-CONFIG_PREEMPT_VOLUNTARY=n
-CONFIG_PREEMPT=n
-#CHECK#CONFIG_RCU_EXPERT=n
-CONFIG_KPROBES=n
-CONFIG_FTRACE=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-L.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-L.boot
deleted file mode 100644
index 0207b3138c5b..000000000000
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-L.boot
+++ /dev/null
@@ -1,3 +0,0 @@
-rcutorture.torture_type=srcu
-rcutorture.reader_flavor=0x4
-rcutorture.fwd_progress=3
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
index dc4985064b3a..67caf4276bb0 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -16,3 +16,4 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_RCU_EXPERT=y
CONFIG_RCU_EQS_DEBUG=y
CONFIG_RCU_LAZY=y
+CONFIG_RCU_DYNTICKS_TORTURE=y
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
index cd3adfc14969..3c51bdac2dfa 100644
--- a/tools/testing/selftests/resctrl/resctrl.h
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -23,7 +23,7 @@
#include <asm/unistd.h>
#include <linux/perf_event.h>
#include <linux/compiler.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#define MB (1024 * 1024)
#define RESCTRL_PATH "/sys/fs/resctrl"
diff --git a/tools/testing/selftests/ring-buffer/map_test.c b/tools/testing/selftests/ring-buffer/map_test.c
index a58f520f2f41..f24677737066 100644
--- a/tools/testing/selftests/ring-buffer/map_test.c
+++ b/tools/testing/selftests/ring-buffer/map_test.c
@@ -17,7 +17,7 @@
#include <sys/ioctl.h>
#include "../user_events/user_events_selftests.h" /* share tracefs setup */
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define TRACEFS_ROOT "/sys/kernel/tracing"
diff --git a/tools/testing/selftests/riscv/README b/tools/testing/selftests/riscv/README
new file mode 100644
index 000000000000..443da395da68
--- /dev/null
+++ b/tools/testing/selftests/riscv/README
@@ -0,0 +1,24 @@
+KSelfTest RISC-V
+================
+
+- These tests are riscv specific and so not built or run but just skipped
+ completely when env-variable ARCH is found to be different than 'riscv'.
+
+- Holding true the above, RISC-V KSFT tests can be run within the
+ KSelfTest framework using standard Linux top-level-makefile targets:
+
+ $ make TARGETS=riscv kselftest-clean
+ $ make TARGETS=riscv kselftest
+
+ or
+
+ $ make -C tools/testing/selftests TARGETS=riscv \
+ INSTALL_PATH=<your-installation-path> install
+
+ or, alternatively, only specific riscv/ subtargets can be picked:
+
+ $ make -C tools/testing/selftests TARGETS=riscv RISCV_SUBTARGETS="mm vector" \
+ INSTALL_PATH=<your-installation-path> install
+
+ Further details on building and running KSFT can be found in:
+ Documentation/dev-tools/kselftest.rst
diff --git a/tools/testing/selftests/riscv/abi/pointer_masking.c b/tools/testing/selftests/riscv/abi/pointer_masking.c
index 059d2e87eb1f..2d540af7b558 100644
--- a/tools/testing/selftests/riscv/abi/pointer_masking.c
+++ b/tools/testing/selftests/riscv/abi/pointer_masking.c
@@ -9,7 +9,7 @@
#include <sys/wait.h>
#include <unistd.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#ifndef PR_PMLEN_SHIFT
#define PR_PMLEN_SHIFT 24
diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c
index 5e96ef785d0d..f254b2edd6ce 100644
--- a/tools/testing/selftests/riscv/hwprobe/cbo.c
+++ b/tools/testing/selftests/riscv/hwprobe/cbo.c
@@ -15,24 +15,31 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <asm/ucontext.h>
+#include <getopt.h>
#include "hwprobe.h"
-#include "../../kselftest.h"
+#include "kselftest.h"
#define MK_CBO(fn) le32_bswap((uint32_t)(fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15)
+#define MK_PREFETCH(fn) \
+ le32_bswap(0 << 25 | (uint32_t)(fn) << 20 | 10 << 15 | 6 << 12 | 0 << 7 | 19)
static char mem[4096] __aligned(4096) = { [0 ... 4095] = 0xa5 };
-static bool illegal_insn;
+static bool got_fault;
-static void sigill_handler(int sig, siginfo_t *info, void *context)
+static void fault_handler(int sig, siginfo_t *info, void *context)
{
unsigned long *regs = (unsigned long *)&((ucontext_t *)context)->uc_mcontext;
uint32_t insn = *(uint32_t *)regs[0];
- assert(insn == MK_CBO(regs[11]));
+ if (sig == SIGILL)
+ assert(insn == MK_CBO(regs[11]));
- illegal_insn = true;
+ if (sig == SIGSEGV || sig == SIGBUS)
+ assert(insn == MK_PREFETCH(regs[11]));
+
+ got_fault = true;
regs[0] += 4;
}
@@ -45,39 +52,51 @@ static void sigill_handler(int sig, siginfo_t *info, void *context)
: : "r" (base), "i" (fn), "i" (MK_CBO(fn)) : "a0", "a1", "memory"); \
})
+#define prefetch_insn(base, fn) \
+({ \
+ asm volatile( \
+ "mv a0, %0\n" \
+ "li a1, %1\n" \
+ ".4byte %2\n" \
+ : : "r" (base), "i" (fn), "i" (MK_PREFETCH(fn)) : "a0", "a1"); \
+})
+
static void cbo_inval(char *base) { cbo_insn(base, 0); }
static void cbo_clean(char *base) { cbo_insn(base, 1); }
static void cbo_flush(char *base) { cbo_insn(base, 2); }
static void cbo_zero(char *base) { cbo_insn(base, 4); }
+static void prefetch_i(char *base) { prefetch_insn(base, 0); }
+static void prefetch_r(char *base) { prefetch_insn(base, 1); }
+static void prefetch_w(char *base) { prefetch_insn(base, 3); }
static void test_no_cbo_inval(void *arg)
{
ksft_print_msg("Testing cbo.inval instruction remain privileged\n");
- illegal_insn = false;
+ got_fault = false;
cbo_inval(&mem[0]);
- ksft_test_result(illegal_insn, "No cbo.inval\n");
+ ksft_test_result(got_fault, "No cbo.inval\n");
}
static void test_no_zicbom(void *arg)
{
ksft_print_msg("Testing Zicbom instructions remain privileged\n");
- illegal_insn = false;
+ got_fault = false;
cbo_clean(&mem[0]);
- ksft_test_result(illegal_insn, "No cbo.clean\n");
+ ksft_test_result(got_fault, "No cbo.clean\n");
- illegal_insn = false;
+ got_fault = false;
cbo_flush(&mem[0]);
- ksft_test_result(illegal_insn, "No cbo.flush\n");
+ ksft_test_result(got_fault, "No cbo.flush\n");
}
static void test_no_zicboz(void *arg)
{
ksft_print_msg("No Zicboz, testing cbo.zero remains privileged\n");
- illegal_insn = false;
+ got_fault = false;
cbo_zero(&mem[0]);
- ksft_test_result(illegal_insn, "No cbo.zero\n");
+ ksft_test_result(got_fault, "No cbo.zero\n");
}
static bool is_power_of_2(__u64 n)
@@ -85,6 +104,51 @@ static bool is_power_of_2(__u64 n)
return n != 0 && (n & (n - 1)) == 0;
}
+static void test_zicbop(void *arg)
+{
+ struct riscv_hwprobe pair = {
+ .key = RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE,
+ };
+ struct sigaction act = {
+ .sa_sigaction = &fault_handler,
+ .sa_flags = SA_SIGINFO
+ };
+ struct sigaction dfl = {
+ .sa_handler = SIG_DFL
+ };
+ cpu_set_t *cpus = (cpu_set_t *)arg;
+ __u64 block_size;
+ long rc;
+
+ rc = sigaction(SIGSEGV, &act, NULL);
+ assert(rc == 0);
+ rc = sigaction(SIGBUS, &act, NULL);
+ assert(rc == 0);
+
+ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)cpus, 0);
+ block_size = pair.value;
+ ksft_test_result(rc == 0 && pair.key == RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE &&
+ is_power_of_2(block_size), "Zicbop block size\n");
+ ksft_print_msg("Zicbop block size: %llu\n", block_size);
+
+ got_fault = false;
+ prefetch_i(&mem[0]);
+ prefetch_r(&mem[0]);
+ prefetch_w(&mem[0]);
+ ksft_test_result(!got_fault, "Zicbop prefetch.* on valid address\n");
+
+ got_fault = false;
+ prefetch_i(NULL);
+ prefetch_r(NULL);
+ prefetch_w(NULL);
+ ksft_test_result(!got_fault, "Zicbop prefetch.* on NULL\n");
+
+ rc = sigaction(SIGBUS, &dfl, NULL);
+ assert(rc == 0);
+ rc = sigaction(SIGSEGV, &dfl, NULL);
+ assert(rc == 0);
+}
+
static void test_zicbom(void *arg)
{
struct riscv_hwprobe pair = {
@@ -100,13 +164,13 @@ static void test_zicbom(void *arg)
is_power_of_2(block_size), "Zicbom block size\n");
ksft_print_msg("Zicbom block size: %llu\n", block_size);
- illegal_insn = false;
+ got_fault = false;
cbo_clean(&mem[block_size]);
- ksft_test_result(!illegal_insn, "cbo.clean\n");
+ ksft_test_result(!got_fault, "cbo.clean\n");
- illegal_insn = false;
+ got_fault = false;
cbo_flush(&mem[block_size]);
- ksft_test_result(!illegal_insn, "cbo.flush\n");
+ ksft_test_result(!got_fault, "cbo.flush\n");
}
static void test_zicboz(void *arg)
@@ -125,11 +189,11 @@ static void test_zicboz(void *arg)
is_power_of_2(block_size), "Zicboz block size\n");
ksft_print_msg("Zicboz block size: %llu\n", block_size);
- illegal_insn = false;
+ got_fault = false;
cbo_zero(&mem[block_size]);
- ksft_test_result(!illegal_insn, "cbo.zero\n");
+ ksft_test_result(!got_fault, "cbo.zero\n");
- if (illegal_insn || !is_power_of_2(block_size)) {
+ if (got_fault || !is_power_of_2(block_size)) {
ksft_test_result_skip("cbo.zero check\n");
return;
}
@@ -177,7 +241,19 @@ static void check_no_zicbo_cpus(cpu_set_t *cpus, __u64 cbo)
rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&one_cpu, 0);
assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
- cbostr = cbo == RISCV_HWPROBE_EXT_ZICBOZ ? "Zicboz" : "Zicbom";
+ switch (cbo) {
+ case RISCV_HWPROBE_EXT_ZICBOZ:
+ cbostr = "Zicboz";
+ break;
+ case RISCV_HWPROBE_EXT_ZICBOM:
+ cbostr = "Zicbom";
+ break;
+ case RISCV_HWPROBE_EXT_ZICBOP:
+ cbostr = "Zicbop";
+ break;
+ default:
+ ksft_exit_fail_msg("Internal error: invalid cbo %llu\n", cbo);
+ }
if (pair.value & cbo)
ksft_exit_fail_msg("%s is only present on a subset of harts.\n"
@@ -194,6 +270,7 @@ enum {
TEST_ZICBOM,
TEST_NO_ZICBOM,
TEST_NO_CBO_INVAL,
+ TEST_ZICBOP,
};
static struct test_info {
@@ -206,26 +283,51 @@ static struct test_info {
[TEST_ZICBOM] = { .nr_tests = 3, test_zicbom },
[TEST_NO_ZICBOM] = { .nr_tests = 2, test_no_zicbom },
[TEST_NO_CBO_INVAL] = { .nr_tests = 1, test_no_cbo_inval },
+ [TEST_ZICBOP] = { .nr_tests = 3, test_zicbop },
+};
+
+static const struct option long_opts[] = {
+ {"zicbom-raises-sigill", no_argument, 0, 'm'},
+ {"zicboz-raises-sigill", no_argument, 0, 'z'},
+ {0, 0, 0, 0}
};
int main(int argc, char **argv)
{
struct sigaction act = {
- .sa_sigaction = &sigill_handler,
+ .sa_sigaction = &fault_handler,
.sa_flags = SA_SIGINFO,
};
struct riscv_hwprobe pair;
unsigned int plan = 0;
cpu_set_t cpus;
long rc;
- int i;
-
- if (argc > 1 && !strcmp(argv[1], "--sigill")) {
- rc = sigaction(SIGILL, &act, NULL);
- assert(rc == 0);
- tests[TEST_NO_ZICBOZ].enabled = true;
- tests[TEST_NO_ZICBOM].enabled = true;
- tests[TEST_NO_CBO_INVAL].enabled = true;
+ int i, opt, long_index;
+
+ long_index = 0;
+
+ while ((opt = getopt_long(argc, argv, "mz", long_opts, &long_index)) != -1) {
+ switch (opt) {
+ case 'm':
+ tests[TEST_NO_ZICBOM].enabled = true;
+ tests[TEST_NO_CBO_INVAL].enabled = true;
+ rc = sigaction(SIGILL, &act, NULL);
+ assert(rc == 0);
+ break;
+ case 'z':
+ tests[TEST_NO_ZICBOZ].enabled = true;
+ tests[TEST_NO_CBO_INVAL].enabled = true;
+ rc = sigaction(SIGILL, &act, NULL);
+ assert(rc == 0);
+ break;
+ case '?':
+ fprintf(stderr,
+ "Usage: %s [--zicbom-raises-sigill|-m] [--zicboz-raises-sigill|-z]\n",
+ argv[0]);
+ exit(1);
+ default:
+ break;
+ }
}
rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus);
@@ -253,6 +355,11 @@ int main(int argc, char **argv)
check_no_zicbo_cpus(&cpus, RISCV_HWPROBE_EXT_ZICBOM);
}
+ if (pair.value & RISCV_HWPROBE_EXT_ZICBOP)
+ tests[TEST_ZICBOP].enabled = true;
+ else
+ check_no_zicbo_cpus(&cpus, RISCV_HWPROBE_EXT_ZICBOP);
+
for (i = 0; i < ARRAY_SIZE(tests); ++i)
plan += tests[i].enabled ? tests[i].nr_tests : 0;
diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.c b/tools/testing/selftests/riscv/hwprobe/hwprobe.c
index fd73c87804f3..54c435af9923 100644
--- a/tools/testing/selftests/riscv/hwprobe/hwprobe.c
+++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include "hwprobe.h"
-#include "../../kselftest.h"
+#include "kselftest.h"
int main(int argc, char **argv)
{
diff --git a/tools/testing/selftests/riscv/hwprobe/which-cpus.c b/tools/testing/selftests/riscv/hwprobe/which-cpus.c
index 82c121412dfc..3ab53067e8dd 100644
--- a/tools/testing/selftests/riscv/hwprobe/which-cpus.c
+++ b/tools/testing/selftests/riscv/hwprobe/which-cpus.c
@@ -14,7 +14,7 @@
#include <assert.h>
#include "hwprobe.h"
-#include "../../kselftest.h"
+#include "kselftest.h"
static void help(void)
{
diff --git a/tools/testing/selftests/riscv/mm/mmap_bottomup.c b/tools/testing/selftests/riscv/mm/mmap_bottomup.c
index f9ccae50349b..461a65c9be00 100644
--- a/tools/testing/selftests/riscv/mm/mmap_bottomup.c
+++ b/tools/testing/selftests/riscv/mm/mmap_bottomup.c
@@ -2,7 +2,7 @@
#include <sys/mman.h>
#include <mmap_test.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
TEST(infinite_rlimit)
{
diff --git a/tools/testing/selftests/riscv/mm/mmap_default.c b/tools/testing/selftests/riscv/mm/mmap_default.c
index 3f53b6ecc326..58db7d172af2 100644
--- a/tools/testing/selftests/riscv/mm/mmap_default.c
+++ b/tools/testing/selftests/riscv/mm/mmap_default.c
@@ -2,7 +2,7 @@
#include <sys/mman.h>
#include <mmap_test.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
TEST(default_rlimit)
{
diff --git a/tools/testing/selftests/riscv/mm/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h
index 75918d15919f..266a6becdeba 100644
--- a/tools/testing/selftests/riscv/mm/mmap_test.h
+++ b/tools/testing/selftests/riscv/mm/mmap_test.h
@@ -5,7 +5,7 @@
#include <sys/resource.h>
#include <stddef.h>
#include <strings.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define TOP_DOWN 0
#define BOTTOM_UP 1
diff --git a/tools/testing/selftests/riscv/sigreturn/sigreturn.c b/tools/testing/selftests/riscv/sigreturn/sigreturn.c
index ed351a1cb917..e10873d95fed 100644
--- a/tools/testing/selftests/riscv/sigreturn/sigreturn.c
+++ b/tools/testing/selftests/riscv/sigreturn/sigreturn.c
@@ -4,7 +4,7 @@
#include <stdlib.h>
#include <ucontext.h>
#include <linux/ptrace.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#define RISCV_V_MAGIC 0x53465457
#define DEFAULT_VALUE 2
diff --git a/tools/testing/selftests/riscv/vector/Makefile b/tools/testing/selftests/riscv/vector/Makefile
index 6f7497f4e7b3..2c2a33fc083e 100644
--- a/tools/testing/selftests/riscv/vector/Makefile
+++ b/tools/testing/selftests/riscv/vector/Makefile
@@ -2,7 +2,7 @@
# Copyright (C) 2021 ARM Limited
# Originally tools/testing/arm64/abi/Makefile
-TEST_GEN_PROGS := v_initval vstate_prctl
+TEST_GEN_PROGS := v_initval vstate_prctl vstate_ptrace
TEST_GEN_PROGS_EXTENDED := vstate_exec_nolibc v_exec_initval_nolibc
include ../../lib.mk
@@ -26,3 +26,6 @@ $(OUTPUT)/v_initval: v_initval.c $(OUTPUT)/sys_hwprobe.o $(OUTPUT)/v_helpers.o
$(OUTPUT)/v_exec_initval_nolibc: v_exec_initval_nolibc.c
$(CC) -nostdlib -static -include ../../../../include/nolibc/nolibc.h \
-Wall $(CFLAGS) $(LDFLAGS) $^ -o $@ -lgcc
+
+$(OUTPUT)/vstate_ptrace: vstate_ptrace.c $(OUTPUT)/sys_hwprobe.o $(OUTPUT)/v_helpers.o
+ $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
diff --git a/tools/testing/selftests/riscv/vector/v_initval.c b/tools/testing/selftests/riscv/vector/v_initval.c
index be9e1d18ad29..5fd2382e15a2 100644
--- a/tools/testing/selftests/riscv/vector/v_initval.c
+++ b/tools/testing/selftests/riscv/vector/v_initval.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "v_helpers.h"
#define NEXT_PROGRAM "./v_exec_initval_nolibc"
diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c
index 62fbb17a0556..d607af3900c1 100644
--- a/tools/testing/selftests/riscv/vector/vstate_prctl.c
+++ b/tools/testing/selftests/riscv/vector/vstate_prctl.c
@@ -6,7 +6,7 @@
#include <sys/types.h>
#include <stdlib.h>
-#include "../../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "v_helpers.h"
#define NEXT_PROGRAM "./vstate_exec_nolibc"
diff --git a/tools/testing/selftests/riscv/vector/vstate_ptrace.c b/tools/testing/selftests/riscv/vector/vstate_ptrace.c
new file mode 100644
index 000000000000..1479abc0c9cb
--- /dev/null
+++ b/tools/testing/selftests/riscv/vector/vstate_ptrace.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stdio.h>
+#include <stdlib.h>
+#include <asm/ptrace.h>
+#include <linux/elf.h>
+#include <sys/ptrace.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+#include "../../kselftest.h"
+#include "v_helpers.h"
+
+int parent_set_val, child_set_val;
+
+static long do_ptrace(enum __ptrace_request op, pid_t pid, long type, size_t size, void *data)
+{
+ struct iovec v_iovec = {
+ .iov_len = size,
+ .iov_base = data
+ };
+
+ return ptrace(op, pid, type, &v_iovec);
+}
+
+static int do_child(void)
+{
+ int out;
+
+ if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) {
+ ksft_perror("PTRACE_TRACEME failed\n");
+ return EXIT_FAILURE;
+ }
+
+ asm volatile (".option push\n\t"
+ ".option arch, +v\n\t"
+ ".option norvc\n\t"
+ "vsetivli x0, 1, e32, m1, ta, ma\n\t"
+ "vmv.s.x v31, %[in]\n\t"
+ "ebreak\n\t"
+ "vmv.x.s %[out], v31\n\t"
+ ".option pop\n\t"
+ : [out] "=r" (out)
+ : [in] "r" (child_set_val));
+
+ if (out != parent_set_val)
+ return EXIT_FAILURE;
+
+ return EXIT_SUCCESS;
+}
+
+static void do_parent(pid_t child)
+{
+ int status;
+ void *data = NULL;
+
+ /* Attach to the child */
+ while (waitpid(child, &status, 0)) {
+ if (WIFEXITED(status)) {
+ ksft_test_result(WEXITSTATUS(status) == 0, "SETREGSET vector\n");
+ goto out;
+ } else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) {
+ size_t size;
+ void *data, *v31;
+ struct __riscv_v_regset_state *v_regset_hdr;
+ struct user_regs_struct *gpreg;
+
+ size = sizeof(*v_regset_hdr);
+ data = malloc(size);
+ if (!data)
+ goto out;
+ v_regset_hdr = (struct __riscv_v_regset_state *)data;
+
+ if (do_ptrace(PTRACE_GETREGSET, child, NT_RISCV_VECTOR, size, data))
+ goto out;
+
+ ksft_print_msg("vlenb %ld\n", v_regset_hdr->vlenb);
+ data = realloc(data, size + v_regset_hdr->vlenb * 32);
+ if (!data)
+ goto out;
+ v_regset_hdr = (struct __riscv_v_regset_state *)data;
+ v31 = (void *)(data + size + v_regset_hdr->vlenb * 31);
+ size += v_regset_hdr->vlenb * 32;
+
+ if (do_ptrace(PTRACE_GETREGSET, child, NT_RISCV_VECTOR, size, data))
+ goto out;
+
+ ksft_test_result(*(int *)v31 == child_set_val, "GETREGSET vector\n");
+
+ *(int *)v31 = parent_set_val;
+ if (do_ptrace(PTRACE_SETREGSET, child, NT_RISCV_VECTOR, size, data))
+ goto out;
+
+ /* move the pc forward */
+ size = sizeof(*gpreg);
+ data = realloc(data, size);
+ gpreg = (struct user_regs_struct *)data;
+
+ if (do_ptrace(PTRACE_GETREGSET, child, NT_PRSTATUS, size, data))
+ goto out;
+
+ gpreg->pc += 4;
+ if (do_ptrace(PTRACE_SETREGSET, child, NT_PRSTATUS, size, data))
+ goto out;
+ }
+
+ ptrace(PTRACE_CONT, child, NULL, NULL);
+ }
+
+out:
+ free(data);
+}
+
+int main(void)
+{
+ pid_t child;
+
+ ksft_set_plan(2);
+ if (!is_vector_supported() && !is_xtheadvector_supported())
+ ksft_exit_skip("Vector not supported\n");
+
+ srandom(getpid());
+ parent_set_val = rand();
+ child_set_val = rand();
+
+ child = fork();
+ if (child < 0)
+ ksft_exit_fail_msg("Fork failed %d\n", child);
+
+ if (!child)
+ return do_child();
+
+ do_parent(child);
+
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
index 2348d2c20d0a..1193612bf327 100644
--- a/tools/testing/selftests/rseq/basic_percpu_ops_test.c
+++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
@@ -9,7 +9,7 @@
#include <string.h>
#include <stddef.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "rseq.h"
#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
index 67d544aaa9a3..06c840e81c8b 100644
--- a/tools/testing/selftests/rseq/rseq-riscv.h
+++ b/tools/testing/selftests/rseq/rseq-riscv.h
@@ -8,6 +8,7 @@
* exception when executed in all modes.
*/
#include <endian.h>
+#include <asm/fence.h>
#if defined(__BYTE_ORDER) ? (__BYTE_ORDER == __LITTLE_ENDIAN) : defined(__LITTLE_ENDIAN)
#define RSEQ_SIG 0xf1401073 /* csrr mhartid, x0 */
@@ -24,8 +25,6 @@
#define REG_L __REG_SEL("ld ", "lw ")
#define REG_S __REG_SEL("sd ", "sw ")
-#define RISCV_FENCE(p, s) \
- __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
#define rseq_smp_mb() RISCV_FENCE(rw, rw)
#define rseq_smp_rmb() RISCV_FENCE(r, r)
#define rseq_smp_wmb() RISCV_FENCE(w, w)
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
index 33baaa9f9997..e7b858cd3736 100644
--- a/tools/testing/selftests/rseq/rseq-s390.h
+++ b/tools/testing/selftests/rseq/rseq-s390.h
@@ -28,8 +28,6 @@ do { \
RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
-#ifdef __s390x__
-
#define LONG_L "lg"
#define LONG_S "stg"
#define LONG_LT_R "ltgr"
@@ -63,43 +61,6 @@ do { \
".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \
".popsection\n\t"
-#elif __s390__
-
-#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
- start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_cs, \"aw\"\n\t" \
- ".balign 32\n\t" \
- __rseq_str(label) ":\n\t" \
- ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
- ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
- ".popsection\n\t" \
- ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
- ".long 0x0, " __rseq_str(label) "b\n\t" \
- ".popsection\n\t"
-
-/*
- * Exit points of a rseq critical section consist of all instructions outside
- * of the critical section where a critical section can either branch to or
- * reach through the normal course of its execution. The abort IP and the
- * post-commit IP are already part of the __rseq_cs section and should not be
- * explicitly defined as additional exit points. Knowing all exit points is
- * useful to assist debuggers stepping over the critical section.
- */
-#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
- ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
- ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) "\n\t" \
- ".popsection\n\t"
-
-#define LONG_L "l"
-#define LONG_S "st"
-#define LONG_LT_R "ltr"
-#define LONG_CMP "c"
-#define LONG_CMP_R "cr"
-#define LONG_ADDI "ahi"
-#define LONG_ADD_R "ar"
-
-#endif
-
#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
index 663a9cef1952..a736727b83c1 100644
--- a/tools/testing/selftests/rseq/rseq.c
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -33,16 +33,16 @@
#include <linux/compiler.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "rseq.h"
/*
* Define weak versions to play nice with binaries that are statically linked
* against a libc that doesn't support registering its own rseq.
*/
-__weak ptrdiff_t __rseq_offset;
-__weak unsigned int __rseq_size;
-__weak unsigned int __rseq_flags;
+extern __weak ptrdiff_t __rseq_offset;
+extern __weak unsigned int __rseq_size;
+extern __weak unsigned int __rseq_flags;
static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset;
static const unsigned int *libc_rseq_size_p = &__rseq_size;
@@ -209,7 +209,7 @@ void rseq_init(void)
* libc not having registered a restartable sequence. Try to find the
* symbols if that's the case.
*/
- if (!*libc_rseq_size_p) {
+ if (!libc_rseq_size_p || !*libc_rseq_size_p) {
libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index be175c0e6ae3..8047d9879039 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -16,7 +16,7 @@
#include <time.h>
#include <unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#define NUM_UIE 3
#define ALARM_DELTA 3
diff --git a/tools/testing/selftests/run_kselftest.sh b/tools/testing/selftests/run_kselftest.sh
index 0443beacf362..d4be97498b32 100755
--- a/tools/testing/selftests/run_kselftest.sh
+++ b/tools/testing/selftests/run_kselftest.sh
@@ -33,6 +33,7 @@ Usage: $0 [OPTIONS]
-c | --collection COLLECTION Run all tests from COLLECTION
-l | --list List the available collection:test entries
-d | --dry-run Don't actually run any tests
+ -f | --no-error-on-fail Don't exit with an error just because tests failed
-n | --netns Run each test in namespace
-h | --help Show this usage info
-o | --override-timeout Number of seconds after which we timeout
@@ -44,6 +45,7 @@ COLLECTIONS=""
TESTS=""
dryrun=""
kselftest_override_timeout=""
+ERROR_ON_FAIL=true
while true; do
case "$1" in
-s | --summary)
@@ -65,6 +67,9 @@ while true; do
-d | --dry-run)
dryrun="echo"
shift ;;
+ -f | --no-error-on-fail)
+ ERROR_ON_FAIL=false
+ shift ;;
-n | --netns)
RUN_IN_NETNS=1
shift ;;
@@ -105,9 +110,18 @@ if [ -n "$TESTS" ]; then
available="$(echo "$valid" | sed -e 's/ /\n/g')"
fi
+kselftest_failures_file="$(mktemp --tmpdir kselftest-failures-XXXXXX)"
+export kselftest_failures_file
+
collections=$(echo "$available" | cut -d: -f1 | sort | uniq)
for collection in $collections ; do
[ -w /dev/kmsg ] && echo "kselftest: Running tests in $collection" >> /dev/kmsg
tests=$(echo "$available" | grep "^$collection:" | cut -d: -f2)
($dryrun cd "$collection" && $dryrun run_many $tests)
done
+
+failures="$(cat "$kselftest_failures_file")"
+rm "$kselftest_failures_file"
+if "$ERROR_ON_FAIL" && [ "$failures" ]; then
+ exit 1
+fi
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 9d9d6b4c38b0..5fe45f9c5f8f 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -174,6 +174,7 @@ auto-test-targets := \
minimal \
numa \
allowed_cpus \
+ peek_dsq \
prog_run \
reload_loop \
select_cpu_dfl \
diff --git a/tools/testing/selftests/sched_ext/exit.c b/tools/testing/selftests/sched_ext/exit.c
index 9451782689de..ee25824b1cbe 100644
--- a/tools/testing/selftests/sched_ext/exit.c
+++ b/tools/testing/selftests/sched_ext/exit.c
@@ -22,6 +22,14 @@ static enum scx_test_status run(void *ctx)
struct bpf_link *link;
char buf[16];
+ /*
+ * On single-CPU systems, ops.select_cpu() is never
+ * invoked, so skip this test to avoid getting stuck
+ * indefinitely.
+ */
+ if (tc == EXIT_SELECT_CPU && libbpf_num_possible_cpus() == 1)
+ continue;
+
skel = exit__open();
SCX_ENUM_INIT(skel);
skel->rodata->exit_point = tc;
diff --git a/tools/testing/selftests/sched_ext/hotplug.c b/tools/testing/selftests/sched_ext/hotplug.c
index 1c9ceb661c43..0cfbb111a2d0 100644
--- a/tools/testing/selftests/sched_ext/hotplug.c
+++ b/tools/testing/selftests/sched_ext/hotplug.c
@@ -6,7 +6,6 @@
#include <bpf/bpf.h>
#include <sched.h>
#include <scx/common.h>
-#include <sched.h>
#include <sys/wait.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
index 430f5e13bf55..01cf4f3da4e0 100644
--- a/tools/testing/selftests/sched_ext/maximal.bpf.c
+++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
@@ -123,6 +123,10 @@ void BPF_STRUCT_OPS(maximal_cgroup_cancel_move, struct task_struct *p,
void BPF_STRUCT_OPS(maximal_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
{}
+void BPF_STRUCT_OPS(maximal_cgroup_set_bandwidth, struct cgroup *cgrp,
+ u64 period_us, u64 quota_us, u64 burst_us)
+{}
+
s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init)
{
return scx_bpf_create_dsq(DSQ_ID, -1);
@@ -160,6 +164,7 @@ struct sched_ext_ops maximal_ops = {
.cgroup_move = (void *) maximal_cgroup_move,
.cgroup_cancel_move = (void *) maximal_cgroup_cancel_move,
.cgroup_set_weight = (void *) maximal_cgroup_set_weight,
+ .cgroup_set_bandwidth = (void *) maximal_cgroup_set_bandwidth,
.init = (void *) maximal_init,
.exit = (void *) maximal_exit,
.name = "maximal",
diff --git a/tools/testing/selftests/sched_ext/peek_dsq.bpf.c b/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
new file mode 100644
index 000000000000..a3faf5bb49d6
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/peek_dsq.bpf.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A BPF program for testing DSQ operations and peek in particular.
+ *
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Ryan Newton <ryan.newton@alum.mit.edu>
+ */
+
+#include <scx/common.bpf.h>
+#include <scx/compat.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei); /* Error handling */
+
+#define MAX_SAMPLES 100
+#define MAX_CPUS 512
+#define DSQ_POOL_SIZE 8
+int max_samples = MAX_SAMPLES;
+int max_cpus = MAX_CPUS;
+int dsq_pool_size = DSQ_POOL_SIZE;
+
+/* Global variables to store test results */
+int dsq_peek_result1 = -1;
+long dsq_inserted_pid = -1;
+int insert_test_cpu = -1; /* Set to the cpu that performs the test */
+long dsq_peek_result2 = -1;
+long dsq_peek_result2_pid = -1;
+long dsq_peek_result2_expected = -1;
+int test_dsq_id = 1234; /* Use a simple ID like create_dsq example */
+int real_dsq_id = 1235; /* DSQ for normal operation */
+int enqueue_count = -1;
+int dispatch_count = -1;
+bool debug_ksym_exists;
+
+/* DSQ pool for stress testing */
+int dsq_pool_base_id = 2000;
+int phase1_complete = -1;
+long total_peek_attempts = -1;
+long successful_peeks = -1;
+
+/* BPF map for sharing peek results with userspace */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, MAX_SAMPLES);
+ __type(key, u32);
+ __type(value, long);
+} peek_results SEC(".maps");
+
+static int get_random_dsq_id(void)
+{
+ u64 time = bpf_ktime_get_ns();
+
+ return dsq_pool_base_id + (time % DSQ_POOL_SIZE);
+}
+
+static void record_peek_result(long pid)
+{
+ u32 slot_key;
+ long *slot_pid_ptr;
+ int ix;
+
+ if (pid <= 0)
+ return;
+
+ /* Find an empty slot or one with the same PID */
+ bpf_for(ix, 0, 10) {
+ slot_key = (pid + ix) % MAX_SAMPLES;
+ slot_pid_ptr = bpf_map_lookup_elem(&peek_results, &slot_key);
+ if (!slot_pid_ptr)
+ continue;
+
+ if (*slot_pid_ptr == -1 || *slot_pid_ptr == pid) {
+ *slot_pid_ptr = pid;
+ break;
+ }
+ }
+}
+
+/* Scan all DSQs in the pool and try to move a task to local */
+static int scan_dsq_pool(void)
+{
+ struct task_struct *task;
+ int moved = 0;
+ int i;
+
+ bpf_for(i, 0, DSQ_POOL_SIZE) {
+ int dsq_id = dsq_pool_base_id + i;
+
+ total_peek_attempts++;
+
+ task = __COMPAT_scx_bpf_dsq_peek(dsq_id);
+ if (task) {
+ successful_peeks++;
+ record_peek_result(task->pid);
+
+ /* Try to move this task to local */
+ if (!moved && scx_bpf_dsq_move_to_local(dsq_id) == 0) {
+ moved = 1;
+ break;
+ }
+ }
+ }
+ return moved;
+}
+
+/* Struct_ops scheduler for testing DSQ peek operations */
+void BPF_STRUCT_OPS(peek_dsq_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ struct task_struct *peek_result;
+ int last_insert_test_cpu, cpu;
+
+ enqueue_count++;
+ cpu = bpf_get_smp_processor_id();
+ last_insert_test_cpu = __sync_val_compare_and_swap(&insert_test_cpu, -1, cpu);
+
+ /* Phase 1: Simple insert-then-peek test (only on first task) */
+ if (last_insert_test_cpu == -1) {
+ bpf_printk("peek_dsq_enqueue beginning phase 1 peek test on cpu %d", cpu);
+
+ /* Test 1: Peek empty DSQ - should return NULL */
+ peek_result = __COMPAT_scx_bpf_dsq_peek(test_dsq_id);
+ dsq_peek_result1 = (long)peek_result; /* Should be 0 (NULL) */
+
+ /* Test 2: Insert task into test DSQ for testing in dispatch callback */
+ dsq_inserted_pid = p->pid;
+ scx_bpf_dsq_insert(p, test_dsq_id, 0, enq_flags);
+ dsq_peek_result2_expected = (long)p; /* Expected the task we just inserted */
+ } else if (!phase1_complete) {
+ /* Still in phase 1, use real DSQ */
+ scx_bpf_dsq_insert(p, real_dsq_id, 0, enq_flags);
+ } else {
+ /* Phase 2: Random DSQ insertion for stress testing */
+ int random_dsq_id = get_random_dsq_id();
+
+ scx_bpf_dsq_insert(p, random_dsq_id, 0, enq_flags);
+ }
+}
+
+void BPF_STRUCT_OPS(peek_dsq_dispatch, s32 cpu, struct task_struct *prev)
+{
+ dispatch_count++;
+
+ /* Phase 1: Complete the simple peek test if we inserted a task but
+ * haven't tested peek yet
+ */
+ if (insert_test_cpu == cpu && dsq_peek_result2 == -1) {
+ struct task_struct *peek_result;
+
+ bpf_printk("peek_dsq_dispatch completing phase 1 peek test on cpu %d", cpu);
+
+ /* Test 3: Peek DSQ after insert - should return the task we inserted */
+ peek_result = __COMPAT_scx_bpf_dsq_peek(test_dsq_id);
+ /* Store the PID of the peeked task for comparison */
+ dsq_peek_result2 = (long)peek_result;
+ dsq_peek_result2_pid = peek_result ? peek_result->pid : -1;
+
+ /* Now consume the task since we've peeked at it */
+ scx_bpf_dsq_move_to_local(test_dsq_id);
+
+ /* Mark phase 1 as complete */
+ phase1_complete = 1;
+ bpf_printk("Phase 1 complete, starting phase 2 stress testing");
+ } else if (!phase1_complete) {
+ /* Still in phase 1, use real DSQ */
+ scx_bpf_dsq_move_to_local(real_dsq_id);
+ } else {
+ /* Phase 2: Scan all DSQs in the pool and try to move a task */
+ if (!scan_dsq_pool()) {
+ /* No tasks found in DSQ pool, fall back to real DSQ */
+ scx_bpf_dsq_move_to_local(real_dsq_id);
+ }
+ }
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(peek_dsq_init)
+{
+ s32 err;
+ int i;
+
+ /* Always set debug values so we can see which version we're using */
+ debug_ksym_exists = bpf_ksym_exists(scx_bpf_dsq_peek) ? 1 : 0;
+
+ /* Initialize state first */
+ insert_test_cpu = -1;
+ enqueue_count = 0;
+ dispatch_count = 0;
+ phase1_complete = 0;
+ total_peek_attempts = 0;
+ successful_peeks = 0;
+
+ /* Create the test and real DSQs */
+ err = scx_bpf_create_dsq(test_dsq_id, -1);
+ if (err) {
+ scx_bpf_error("Failed to create DSQ %d: %d", test_dsq_id, err);
+ return err;
+ }
+ err = scx_bpf_create_dsq(real_dsq_id, -1);
+ if (err) {
+ scx_bpf_error("Failed to create DSQ %d: %d", test_dsq_id, err);
+ return err;
+ }
+
+ /* Create the DSQ pool for stress testing */
+ bpf_for(i, 0, DSQ_POOL_SIZE) {
+ int dsq_id = dsq_pool_base_id + i;
+
+ err = scx_bpf_create_dsq(dsq_id, -1);
+ if (err) {
+ scx_bpf_error("Failed to create DSQ pool entry %d: %d", dsq_id, err);
+ return err;
+ }
+ }
+
+ /* Initialize the peek results map */
+ bpf_for(i, 0, MAX_SAMPLES) {
+ u32 key = i;
+ long pid = -1;
+
+ bpf_map_update_elem(&peek_results, &key, &pid, BPF_ANY);
+ }
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(peek_dsq_exit, struct scx_exit_info *ei)
+{
+ int i;
+
+ /* Destroy the primary DSQs */
+ scx_bpf_destroy_dsq(test_dsq_id);
+ scx_bpf_destroy_dsq(real_dsq_id);
+
+ /* Destroy the DSQ pool */
+ bpf_for(i, 0, DSQ_POOL_SIZE) {
+ int dsq_id = dsq_pool_base_id + i;
+
+ scx_bpf_destroy_dsq(dsq_id);
+ }
+
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops peek_dsq_ops = {
+ .enqueue = (void *)peek_dsq_enqueue,
+ .dispatch = (void *)peek_dsq_dispatch,
+ .init = (void *)peek_dsq_init,
+ .exit = (void *)peek_dsq_exit,
+ .name = "peek_dsq",
+};
diff --git a/tools/testing/selftests/sched_ext/peek_dsq.c b/tools/testing/selftests/sched_ext/peek_dsq.c
new file mode 100644
index 000000000000..a717384a3224
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/peek_dsq.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for DSQ operations including create, destroy, and peek operations.
+ *
+ * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2025 Ryan Newton <ryan.newton@alum.mit.edu>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <string.h>
+#include <sched.h>
+#include "peek_dsq.bpf.skel.h"
+#include "scx_test.h"
+
+#define NUM_WORKERS 4
+
+static bool workload_running = true;
+static pthread_t workload_threads[NUM_WORKERS];
+
+/**
+ * Background workload thread that sleeps and wakes rapidly to exercise
+ * the scheduler's enqueue operations and ensure DSQ operations get tested.
+ */
+static void *workload_thread_fn(void *arg)
+{
+ while (workload_running) {
+ /* Sleep for a very short time to trigger scheduler activity */
+ usleep(1000); /* 1ms sleep */
+ /* Yield to ensure we go through the scheduler */
+ sched_yield();
+ }
+ return NULL;
+}
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct peek_dsq *skel;
+
+ skel = peek_dsq__open();
+ SCX_FAIL_IF(!skel, "Failed to open");
+ SCX_ENUM_INIT(skel);
+ SCX_FAIL_IF(peek_dsq__load(skel), "Failed to load skel");
+
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static int print_observed_pids(struct bpf_map *map, int max_samples, const char *dsq_name)
+{
+ long count = 0;
+
+ printf("Observed %s DSQ peek pids:\n", dsq_name);
+ for (int i = 0; i < max_samples; i++) {
+ long pid;
+ int err;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(map), &i, &pid);
+ if (err == 0) {
+ if (pid == 0) {
+ printf(" Sample %d: NULL peek\n", i);
+ } else if (pid > 0) {
+ printf(" Sample %d: pid %ld\n", i, pid);
+ count++;
+ }
+ } else {
+ printf(" Sample %d: error reading pid (err=%d)\n", i, err);
+ }
+ }
+ printf("Observed ~%ld pids in the %s DSQ(s)\n", count, dsq_name);
+ return count;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct peek_dsq *skel = ctx;
+ bool failed = false;
+ int seconds = 3;
+ int err;
+
+ /* Enable the scheduler to test DSQ operations */
+ printf("Enabling scheduler to test DSQ insert operations...\n");
+
+ struct bpf_link *link =
+ bpf_map__attach_struct_ops(skel->maps.peek_dsq_ops);
+
+ if (!link) {
+ SCX_ERR("Failed to attach struct_ops");
+ return SCX_TEST_FAIL;
+ }
+
+ printf("Starting %d background workload threads...\n", NUM_WORKERS);
+ workload_running = true;
+ for (int i = 0; i < NUM_WORKERS; i++) {
+ err = pthread_create(&workload_threads[i], NULL, workload_thread_fn, NULL);
+ if (err) {
+ SCX_ERR("Failed to create workload thread %d: %s", i, strerror(err));
+ /* Stop already created threads */
+ workload_running = false;
+ for (int j = 0; j < i; j++)
+ pthread_join(workload_threads[j], NULL);
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+ }
+
+ printf("Waiting for enqueue events.\n");
+ sleep(seconds);
+ while (skel->data->enqueue_count <= 0) {
+ printf(".");
+ fflush(stdout);
+ sleep(1);
+ seconds++;
+ if (seconds >= 30) {
+ printf("\n\u2717 Timeout waiting for enqueue events\n");
+ /* Stop workload threads and cleanup */
+ workload_running = false;
+ for (int i = 0; i < NUM_WORKERS; i++)
+ pthread_join(workload_threads[i], NULL);
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+ }
+
+ workload_running = false;
+ for (int i = 0; i < NUM_WORKERS; i++) {
+ err = pthread_join(workload_threads[i], NULL);
+ if (err) {
+ SCX_ERR("Failed to join workload thread %d: %s", i, strerror(err));
+ bpf_link__destroy(link);
+ return SCX_TEST_FAIL;
+ }
+ }
+ printf("Background workload threads stopped.\n");
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE));
+
+ /* Detach the scheduler */
+ bpf_link__destroy(link);
+
+ printf("Enqueue/dispatch count over %d seconds: %d / %d\n", seconds,
+ skel->data->enqueue_count, skel->data->dispatch_count);
+ printf("Debug: ksym_exists=%d\n",
+ skel->bss->debug_ksym_exists);
+
+ /* Check DSQ insert result */
+ printf("DSQ insert test done on cpu: %d\n", skel->data->insert_test_cpu);
+ if (skel->data->insert_test_cpu != -1)
+ printf("\u2713 DSQ insert succeeded !\n");
+ else {
+ printf("\u2717 DSQ insert failed or not attempted\n");
+ failed = true;
+ }
+
+ /* Check DSQ peek results */
+ printf(" DSQ peek result 1 (before insert): %d\n",
+ skel->data->dsq_peek_result1);
+ if (skel->data->dsq_peek_result1 == 0)
+ printf("\u2713 DSQ peek verification success: peek returned NULL!\n");
+ else {
+ printf("\u2717 DSQ peek verification failed\n");
+ failed = true;
+ }
+
+ printf(" DSQ peek result 2 (after insert): %ld\n",
+ skel->data->dsq_peek_result2);
+ printf(" DSQ peek result 2, expected: %ld\n",
+ skel->data->dsq_peek_result2_expected);
+ if (skel->data->dsq_peek_result2 ==
+ skel->data->dsq_peek_result2_expected)
+ printf("\u2713 DSQ peek verification success: peek returned the inserted task!\n");
+ else {
+ printf("\u2717 DSQ peek verification failed\n");
+ failed = true;
+ }
+
+ printf(" Inserted test task -> pid: %ld\n", skel->data->dsq_inserted_pid);
+ printf(" DSQ peek result 2 -> pid: %ld\n", skel->data->dsq_peek_result2_pid);
+
+ int pid_count;
+
+ pid_count = print_observed_pids(skel->maps.peek_results,
+ skel->data->max_samples, "DSQ pool");
+ printf("Total non-null peek observations: %ld out of %ld\n",
+ skel->data->successful_peeks, skel->data->total_peek_attempts);
+
+ if (skel->bss->debug_ksym_exists && pid_count == 0) {
+ printf("\u2717 DSQ pool test failed: no successful peeks in native mode\n");
+ failed = true;
+ }
+ if (skel->bss->debug_ksym_exists && pid_count > 0)
+ printf("\u2713 DSQ pool test success: observed successful peeks in native mode\n");
+
+ if (failed)
+ return SCX_TEST_FAIL;
+ else
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct peek_dsq *skel = ctx;
+
+ if (workload_running) {
+ workload_running = false;
+ for (int i = 0; i < NUM_WORKERS; i++)
+ pthread_join(workload_threads[i], NULL);
+ }
+
+ peek_dsq__destroy(skel);
+}
+
+struct scx_test peek_dsq = {
+ .name = "peek_dsq",
+ .description =
+ "Test DSQ create/destroy operations and future peek functionality",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&peek_dsq)
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c
index 5822e25e0217..ea4068cdefd6 100644
--- a/tools/testing/selftests/seccomp/seccomp_benchmark.c
+++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c
@@ -20,7 +20,7 @@
#include <sys/syscall.h>
#include <sys/types.h>
-#include "../kselftest.h"
+#include "kselftest.h"
unsigned long long timing(clockid_t clk_id, unsigned long long samples)
{
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 61acbd45ffaa..32e2d4df397b 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -24,6 +24,7 @@
#include <linux/filter.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
+#include <sys/time.h>
#include <sys/user.h>
#include <linux/prctl.h>
#include <linux/ptrace.h>
@@ -53,7 +54,7 @@
#include <sys/syscall.h>
#include <poll.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "../clone3/clone3_selftests.h"
/* Attempt to de-conflict with the selftests tree. */
@@ -73,6 +74,14 @@
#define noinline __attribute__((noinline))
#endif
+#ifndef __nocf_check
+#define __nocf_check __attribute__((nocf_check))
+#endif
+
+#ifndef __naked
+#define __naked __attribute__((__naked__))
+#endif
+
#ifndef PR_SET_NO_NEW_PRIVS
#define PR_SET_NO_NEW_PRIVS 38
#define PR_GET_NO_NEW_PRIVS 39
@@ -3547,6 +3556,10 @@ static void signal_handler(int signal)
perror("write from signal");
}
+static void signal_handler_nop(int signal)
+{
+}
+
TEST(user_notification_signal)
{
pid_t pid;
@@ -4819,6 +4832,132 @@ TEST(user_notification_wait_killable_fatal)
EXPECT_EQ(SIGTERM, WTERMSIG(status));
}
+/* Ensure signals after the reply do not interrupt */
+TEST(user_notification_wait_killable_after_reply)
+{
+ int i, max_iter = 100000;
+ int listener, status;
+ int pipe_fds[2];
+ pid_t pid;
+ long ret;
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret)
+ {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ listener = user_notif_syscall(
+ __NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER |
+ SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV);
+ ASSERT_GE(listener, 0);
+
+ /*
+ * Used to count invocations. One token is transferred from the child
+ * to the parent per syscall invocation, the parent tries to take
+ * one token per successful RECV. If the syscall is restarted after
+ * RECV the parent will try to get two tokens while the child only
+ * provided one.
+ */
+ ASSERT_EQ(pipe(pipe_fds), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ struct sigaction new_action = {
+ .sa_handler = signal_handler_nop,
+ .sa_flags = SA_RESTART,
+ };
+ struct itimerval timer = {
+ .it_value = { .tv_usec = 1000 },
+ .it_interval = { .tv_usec = 1000 },
+ };
+ char c = 'a';
+
+ close(pipe_fds[0]);
+
+ /* Setup the sigaction with SA_RESTART */
+ if (sigaction(SIGALRM, &new_action, NULL)) {
+ perror("sigaction");
+ exit(1);
+ }
+
+ /*
+ * Kill with SIGALRM repeatedly, to try to hit the race when
+ * handling the syscall.
+ */
+ if (setitimer(ITIMER_REAL, &timer, NULL) < 0)
+ perror("setitimer");
+
+ for (i = 0; i < max_iter; ++i) {
+ int fd;
+
+ /* Send one token per iteration to catch repeats. */
+ if (write(pipe_fds[1], &c, sizeof(c)) != 1) {
+ perror("write");
+ exit(1);
+ }
+
+ fd = syscall(__NR_dup, 0);
+ if (fd < 0) {
+ perror("dup");
+ exit(1);
+ }
+ close(fd);
+ }
+
+ exit(0);
+ }
+
+ close(pipe_fds[1]);
+
+ for (i = 0; i < max_iter; ++i) {
+ struct seccomp_notif req = {};
+ struct seccomp_notif_addfd addfd = {};
+ struct pollfd pfd = {
+ .fd = pipe_fds[0],
+ .events = POLLIN,
+ };
+ char c;
+
+ /*
+ * Try to receive one token. If it failed, one child syscall
+ * was restarted after RECV and needed to be handled twice.
+ */
+ ASSERT_EQ(poll(&pfd, 1, 1000), 1)
+ kill(pid, SIGKILL);
+
+ ASSERT_EQ(read(pipe_fds[0], &c, sizeof(c)), 1)
+ kill(pid, SIGKILL);
+
+ /*
+ * Get the notification, reply to it as fast as possible to test
+ * whether the child wrongly skips going into the non-preemptible
+ * (TASK_KILLABLE) state.
+ */
+ do
+ ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
+ while (ret < 0 && errno == ENOENT); /* Accept interruptions before RECV */
+ ASSERT_EQ(ret, 0)
+ kill(pid, SIGKILL);
+
+ addfd.id = req.id;
+ addfd.flags = SECCOMP_ADDFD_FLAG_SEND;
+ addfd.srcfd = 0;
+ ASSERT_GE(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), 0)
+ kill(pid, SIGKILL);
+ }
+
+ /*
+ * Wait for the process to exit, and make sure the process terminated
+ * with a zero exit code..
+ */
+ EXPECT_EQ(waitpid(pid, &status, 0), pid);
+ EXPECT_EQ(true, WIFEXITED(status));
+ EXPECT_EQ(0, WEXITSTATUS(status));
+}
+
struct tsync_vs_thread_leader_args {
pthread_t leader;
};
@@ -4896,7 +5035,36 @@ TEST(tsync_vs_dead_thread_leader)
EXPECT_EQ(0, status);
}
-noinline int probed(void)
+#ifdef __x86_64__
+
+/*
+ * We need naked probed_uprobe function. Using __nocf_check
+ * check to skip possible endbr64 instruction and ignoring
+ * -Wattributes, otherwise the compilation might fail.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wattributes"
+
+__naked __nocf_check noinline int probed_uprobe(void)
+{
+ /*
+ * Optimized uprobe is possible only on top of nop5 instruction.
+ */
+ asm volatile (" \n"
+ ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00 \n"
+ "ret \n"
+ );
+}
+#pragma GCC diagnostic pop
+
+#else
+noinline int probed_uprobe(void)
+{
+ return 1;
+}
+#endif
+
+noinline int probed_uretprobe(void)
{
return 1;
}
@@ -4949,35 +5117,46 @@ static ssize_t get_uprobe_offset(const void *addr)
return found ? (uintptr_t)addr - start + base : -1;
}
-FIXTURE(URETPROBE) {
+FIXTURE(UPROBE) {
int fd;
};
-FIXTURE_VARIANT(URETPROBE) {
+FIXTURE_VARIANT(UPROBE) {
/*
- * All of the URETPROBE behaviors can be tested with either
- * uretprobe attached or not
+ * All of the U(RET)PROBE behaviors can be tested with either
+ * u(ret)probe attached or not
*/
bool attach;
+ /*
+ * Test both uprobe and uretprobe.
+ */
+ bool uretprobe;
+};
+
+FIXTURE_VARIANT_ADD(UPROBE, not_attached) {
+ .attach = false,
+ .uretprobe = false,
};
-FIXTURE_VARIANT_ADD(URETPROBE, attached) {
+FIXTURE_VARIANT_ADD(UPROBE, uprobe_attached) {
.attach = true,
+ .uretprobe = false,
};
-FIXTURE_VARIANT_ADD(URETPROBE, not_attached) {
- .attach = false,
+FIXTURE_VARIANT_ADD(UPROBE, uretprobe_attached) {
+ .attach = true,
+ .uretprobe = true,
};
-FIXTURE_SETUP(URETPROBE)
+FIXTURE_SETUP(UPROBE)
{
const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_event_attr attr;
ssize_t offset;
int type, bit;
-#ifndef __NR_uretprobe
- SKIP(return, "__NR_uretprobe syscall not defined");
+#if !defined(__NR_uprobe) || !defined(__NR_uretprobe)
+ SKIP(return, "__NR_uprobe ot __NR_uretprobe syscalls not defined");
#endif
if (!variant->attach)
@@ -4987,12 +5166,17 @@ FIXTURE_SETUP(URETPROBE)
type = determine_uprobe_perf_type();
ASSERT_GE(type, 0);
- bit = determine_uprobe_retprobe_bit();
- ASSERT_GE(bit, 0);
- offset = get_uprobe_offset(probed);
+
+ if (variant->uretprobe) {
+ bit = determine_uprobe_retprobe_bit();
+ ASSERT_GE(bit, 0);
+ }
+
+ offset = get_uprobe_offset(variant->uretprobe ? probed_uretprobe : probed_uprobe);
ASSERT_GE(offset, 0);
- attr.config |= 1 << bit;
+ if (variant->uretprobe)
+ attr.config |= 1 << bit;
attr.size = attr_sz;
attr.type = type;
attr.config1 = ptr_to_u64("/proc/self/exe");
@@ -5003,7 +5187,7 @@ FIXTURE_SETUP(URETPROBE)
PERF_FLAG_FD_CLOEXEC);
}
-FIXTURE_TEARDOWN(URETPROBE)
+FIXTURE_TEARDOWN(UPROBE)
{
/* we could call close(self->fd), but we'd need extra filter for
* that and since we are calling _exit right away..
@@ -5017,11 +5201,17 @@ static int run_probed_with_filter(struct sock_fprog *prog)
return -1;
}
- probed();
+ /*
+ * Uprobe is optimized after first hit, so let's hit twice.
+ */
+ probed_uprobe();
+ probed_uprobe();
+
+ probed_uretprobe();
return 0;
}
-TEST_F(URETPROBE, uretprobe_default_allow)
+TEST_F(UPROBE, uprobe_default_allow)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
@@ -5034,7 +5224,7 @@ TEST_F(URETPROBE, uretprobe_default_allow)
ASSERT_EQ(0, run_probed_with_filter(&prog));
}
-TEST_F(URETPROBE, uretprobe_default_block)
+TEST_F(UPROBE, uprobe_default_block)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
@@ -5051,11 +5241,14 @@ TEST_F(URETPROBE, uretprobe_default_block)
ASSERT_EQ(0, run_probed_with_filter(&prog));
}
-TEST_F(URETPROBE, uretprobe_block_uretprobe_syscall)
+TEST_F(UPROBE, uprobe_block_syscall)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
+#ifdef __NR_uprobe
+ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_uprobe, 1, 2),
+#endif
#ifdef __NR_uretprobe
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_uretprobe, 0, 1),
#endif
@@ -5070,11 +5263,14 @@ TEST_F(URETPROBE, uretprobe_block_uretprobe_syscall)
ASSERT_EQ(0, run_probed_with_filter(&prog));
}
-TEST_F(URETPROBE, uretprobe_default_block_with_uretprobe_syscall)
+TEST_F(UPROBE, uprobe_default_block_with_syscall)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
+#ifdef __NR_uprobe
+ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_uprobe, 3, 0),
+#endif
#ifdef __NR_uretprobe
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_uretprobe, 2, 0),
#endif
diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c
index 9820b3809c69..13b84e54ce38 100644
--- a/tools/testing/selftests/sgx/main.c
+++ b/tools/testing/selftests/sgx/main.c
@@ -18,7 +18,7 @@
#include <sys/types.h>
#include <sys/auxv.h>
#include "defines.h"
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#include "main.h"
static const uint64_t MAGIC = 0x1122334455667788ULL;
diff --git a/tools/testing/selftests/signal/mangle_uc_sigmask.c b/tools/testing/selftests/signal/mangle_uc_sigmask.c
index b79ab92178a8..11dbc14bbc8e 100644
--- a/tools/testing/selftests/signal/mangle_uc_sigmask.c
+++ b/tools/testing/selftests/signal/mangle_uc_sigmask.c
@@ -39,7 +39,7 @@
#include <signal.h>
#include <ucontext.h>
-#include "../kselftest.h"
+#include "kselftest.h"
void handler_verify_ucontext(int signo, siginfo_t *info, void *uc)
{
diff --git a/tools/testing/selftests/signal/sas.c b/tools/testing/selftests/signal/sas.c
index 07227fab1cc9..306b996ab365 100644
--- a/tools/testing/selftests/signal/sas.c
+++ b/tools/testing/selftests/signal/sas.c
@@ -19,7 +19,7 @@
#include <errno.h>
#include <sys/auxv.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "current_stack_pointer.h"
#ifndef SS_AUTODISARM
diff --git a/tools/testing/selftests/sparc64/drivers/adi-test.c b/tools/testing/selftests/sparc64/drivers/adi-test.c
index 84e5d9fd20b0..b986714e7a52 100644
--- a/tools/testing/selftests/sparc64/drivers/adi-test.c
+++ b/tools/testing/selftests/sparc64/drivers/adi-test.c
@@ -16,7 +16,7 @@
#include <sys/stat.h>
#include <unistd.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
#define DEBUG_LEVEL_1_BIT (0x0001)
#define DEBUG_LEVEL_2_BIT (0x0002)
diff --git a/tools/testing/selftests/sync/sync_test.c b/tools/testing/selftests/sync/sync_test.c
index 93db5aa246a3..2b44e5d88b63 100644
--- a/tools/testing/selftests/sync/sync_test.c
+++ b/tools/testing/selftests/sync/sync_test.c
@@ -34,7 +34,7 @@
#include <errno.h>
#include <string.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#include "synctest.h"
static int run_test(int (*test)(void), char *name)
diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
index d975a6767329..b855c6000287 100644
--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
@@ -10,18 +10,24 @@
#include <sys/sysinfo.h>
#include <sys/syscall.h>
#include <signal.h>
+#include <stdbool.h>
+#include <stdlib.h>
#include <asm/unistd.h>
-#include "../kselftest_harness.h"
+#include "kselftest_harness.h"
#ifndef PR_SET_SYSCALL_USER_DISPATCH
# define PR_SET_SYSCALL_USER_DISPATCH 59
# define PR_SYS_DISPATCH_OFF 0
-# define PR_SYS_DISPATCH_ON 1
# define SYSCALL_DISPATCH_FILTER_ALLOW 0
# define SYSCALL_DISPATCH_FILTER_BLOCK 1
#endif
+#ifndef PR_SYS_DISPATCH_EXCLUSIVE_ON
+# define PR_SYS_DISPATCH_EXCLUSIVE_ON 1
+# define PR_SYS_DISPATCH_INCLUSIVE_ON 2
+#endif
+
#ifndef SYS_USER_DISPATCH
# define SYS_USER_DISPATCH 2
#endif
@@ -65,7 +71,7 @@ TEST_SIGNAL(dispatch_trigger_sigsys, SIGSYS)
ret = sysinfo(&info);
ASSERT_EQ(0, ret);
- ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, 0, &sel);
+ ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_EXCLUSIVE_ON, 0, 0, &sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
@@ -79,6 +85,21 @@ TEST_SIGNAL(dispatch_trigger_sigsys, SIGSYS)
}
}
+static void prctl_valid(struct __test_metadata *_metadata,
+ unsigned long op, unsigned long off,
+ unsigned long size, void *sel)
+{
+ EXPECT_EQ(0, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, sel));
+}
+
+static void prctl_invalid(struct __test_metadata *_metadata,
+ unsigned long op, unsigned long off,
+ unsigned long size, void *sel, int err)
+{
+ EXPECT_EQ(-1, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, sel));
+ EXPECT_EQ(err, errno);
+}
+
TEST(bad_prctl_param)
{
char sel = SYSCALL_DISPATCH_FILTER_ALLOW;
@@ -86,57 +107,54 @@ TEST(bad_prctl_param)
/* Invalid op */
op = -1;
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0, 0, &sel);
- ASSERT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0, 0, &sel, EINVAL);
/* PR_SYS_DISPATCH_OFF */
op = PR_SYS_DISPATCH_OFF;
/* offset != 0 */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, 0);
- EXPECT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0x1, 0x0, 0, EINVAL);
/* len != 0 */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0xff, 0);
- EXPECT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0x0, 0xff, 0, EINVAL);
/* sel != NULL */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, &sel);
- EXPECT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0x0, 0x0, &sel, EINVAL);
/* Valid parameter */
- errno = 0;
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, 0x0);
- EXPECT_EQ(0, errno);
+ prctl_valid(_metadata, op, 0x0, 0x0, 0x0);
- /* PR_SYS_DISPATCH_ON */
- op = PR_SYS_DISPATCH_ON;
+ /* PR_SYS_DISPATCH_EXCLUSIVE_ON */
+ op = PR_SYS_DISPATCH_EXCLUSIVE_ON;
/* Dispatcher region is bad (offset > 0 && len == 0) */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, &sel);
- EXPECT_EQ(EINVAL, errno);
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, -1L, 0x0, &sel);
- EXPECT_EQ(EINVAL, errno);
+ prctl_invalid(_metadata, op, 0x1, 0x0, &sel, EINVAL);
+ prctl_invalid(_metadata, op, -1L, 0x0, &sel, EINVAL);
/* Invalid selector */
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x1, (void *) -1);
- ASSERT_EQ(EFAULT, errno);
+ prctl_invalid(_metadata, op, 0x0, 0x1, (void *) -1, EFAULT);
/*
* Dispatcher range overflows unsigned long
*/
- prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 1, -1L, &sel);
- ASSERT_EQ(EINVAL, errno) {
- TH_LOG("Should reject bad syscall range");
- }
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_EXCLUSIVE_ON, 1, -1L, &sel, EINVAL);
/*
* Allowed range overflows usigned long
*/
- prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, -1L, 0x1, &sel);
- ASSERT_EQ(EINVAL, errno) {
- TH_LOG("Should reject bad syscall range");
- }
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_EXCLUSIVE_ON, -1L, 0x1, &sel, EINVAL);
+
+ /* 0 len should fail for PR_SYS_DISPATCH_INCLUSIVE_ON */
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_INCLUSIVE_ON, 1, 0, 0, EINVAL);
+
+ /* Range wrap-around should fail */
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_INCLUSIVE_ON, -1L, 2, 0, EINVAL);
+
+ /* Normal range shouldn't fail */
+ prctl_valid(_metadata, PR_SYS_DISPATCH_INCLUSIVE_ON, 2, 3, 0);
+
+ /* Invalid selector */
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_INCLUSIVE_ON, 2, 3, (void *) -1, EFAULT);
}
/*
@@ -147,11 +165,13 @@ char glob_sel;
int nr_syscalls_emulated;
int si_code;
int si_errno;
+unsigned long syscall_addr;
static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
{
si_code = info->si_code;
si_errno = info->si_errno;
+ syscall_addr = (unsigned long)info->si_call_addr;
if (info->si_syscall == MAGIC_SYSCALL_1)
nr_syscalls_emulated++;
@@ -174,31 +194,34 @@ static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
#endif
}
-TEST(dispatch_and_return)
+int setup_sigsys_handler(void)
{
- long ret;
struct sigaction act;
sigset_t mask;
- glob_sel = 0;
- nr_syscalls_emulated = 0;
- si_code = 0;
- si_errno = 0;
-
memset(&act, 0, sizeof(act));
sigemptyset(&mask);
-
act.sa_sigaction = handle_sigsys;
act.sa_flags = SA_SIGINFO;
act.sa_mask = mask;
+ return sigaction(SIGSYS, &act, NULL);
+}
- ret = sigaction(SIGSYS, &act, NULL);
- ASSERT_EQ(0, ret);
+TEST(dispatch_and_return)
+{
+ long ret;
+
+ glob_sel = 0;
+ nr_syscalls_emulated = 0;
+ si_code = 0;
+ si_errno = 0;
+
+ ASSERT_EQ(0, setup_sigsys_handler());
/* Make sure selector is good prior to prctl. */
SYSCALL_DISPATCH_OFF(glob_sel);
- ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, 0, &glob_sel);
+ ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_EXCLUSIVE_ON, 0, 0, &glob_sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
@@ -254,7 +277,7 @@ TEST_SIGNAL(bad_selector, SIGSYS)
/* Make sure selector is good prior to prctl. */
SYSCALL_DISPATCH_OFF(glob_sel);
- ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, 0, &glob_sel);
+ ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_EXCLUSIVE_ON, 0, 0, &glob_sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
@@ -278,7 +301,7 @@ TEST(disable_dispatch)
struct sysinfo info;
char sel = 0;
- ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, 0, &sel);
+ ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_EXCLUSIVE_ON, 0, 0, &sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
@@ -310,7 +333,7 @@ TEST(direct_dispatch_range)
* Instead of calculating libc addresses; allow the entire
* memory map and lock the selector.
*/
- ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 0, -1L, &sel);
+ ret = prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_EXCLUSIVE_ON, 0, -1L, &sel);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SYSCALL_USER_DISPATCH");
}
@@ -323,4 +346,35 @@ TEST(direct_dispatch_range)
}
}
+static void test_range(struct __test_metadata *_metadata,
+ unsigned long op, unsigned long off,
+ unsigned long size, bool dispatch)
+{
+ nr_syscalls_emulated = 0;
+ SYSCALL_DISPATCH_OFF(glob_sel);
+ EXPECT_EQ(0, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, &glob_sel));
+ SYSCALL_DISPATCH_ON(glob_sel);
+ if (dispatch) {
+ EXPECT_EQ(syscall(MAGIC_SYSCALL_1), MAGIC_SYSCALL_1);
+ EXPECT_EQ(nr_syscalls_emulated, 1);
+ } else {
+ EXPECT_EQ(syscall(MAGIC_SYSCALL_1), -1);
+ EXPECT_EQ(nr_syscalls_emulated, 0);
+ }
+}
+
+TEST(dispatch_range)
+{
+ ASSERT_EQ(0, setup_sigsys_handler());
+ test_range(_metadata, PR_SYS_DISPATCH_EXCLUSIVE_ON, 0, 0, true);
+ test_range(_metadata, PR_SYS_DISPATCH_EXCLUSIVE_ON, syscall_addr, 1, false);
+ test_range(_metadata, PR_SYS_DISPATCH_EXCLUSIVE_ON, syscall_addr-100, 200, false);
+ test_range(_metadata, PR_SYS_DISPATCH_EXCLUSIVE_ON, syscall_addr+1, 100, true);
+ test_range(_metadata, PR_SYS_DISPATCH_EXCLUSIVE_ON, syscall_addr-100, 100, true);
+ test_range(_metadata, PR_SYS_DISPATCH_INCLUSIVE_ON, syscall_addr, 1, true);
+ test_range(_metadata, PR_SYS_DISPATCH_INCLUSIVE_ON, syscall_addr-1, 1, false);
+ test_range(_metadata, PR_SYS_DISPATCH_INCLUSIVE_ON, syscall_addr+1, 1, false);
+ SYSCALL_DISPATCH_OFF(glob_sel);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index a10350c8a46e..b2d8bd9026a7 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
# Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index db176fe7d0c3..c20aa16b1d63 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -21,6 +21,7 @@ CONFIG_NF_NAT=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NET_SCHED=y
+CONFIG_IP_SET=m
#
# Queueing/Scheduling
@@ -30,6 +31,7 @@ CONFIG_NET_SCH_CBS=m
CONFIG_NET_SCH_CHOKE=m
CONFIG_NET_SCH_CODEL=m
CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_DUALPI2=m
CONFIG_NET_SCH_ETF=m
CONFIG_NET_SCH_FQ=m
CONFIG_NET_SCH_FQ_CODEL=m
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index 5596f4df0e9f..b2cc6ea74450 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -879,7 +879,7 @@
"cmdUnderTest": "$TC actions add action police pkts_rate 1000 pkts_burst 200 index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions ls action police",
- "matchPattern": "action order [0-9]*: police 0x1 rate 0bit burst 0b mtu 4096Mb pkts_rate 1000 pkts_burst 200",
+ "matchPattern": "action order [0-9]*: police 0x1 rate 0bit burst 0b mtu (4Gb|4096Mb) pkts_rate 1000 pkts_burst 200",
"matchCount": "1",
"teardown": [
"$TC actions flush action police"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
index 9aa44d8176d9..47de27fd4f90 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
@@ -128,6 +128,32 @@
]
},
{
+ "id": "5456",
+ "name": "Test htb_dequeue_tree with deactivation and row emptying",
+ "category": [
+ "qdisc",
+ "htb"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: htb default 1",
+ "$TC class add dev $DUMMY parent 1: classid 1:1 htb rate 64bit ",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2: netem",
+ "$TC qdisc add dev $DUMMY parent 2:1 handle 3: blackhole"
+ ],
+ "cmdUnderTest": "ping -c1 -W0.01 -I $DUMMY 10.10.11.11",
+ "expExitCode": "1",
+ "verifyCmd": "$TC -j qdisc show dev $DUMMY",
+ "matchJSON": [],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY root"
+ ]
+ },
+ {
"id": "c024",
"name": "Test TBF with SKBPRIO - catch qlen corner cases",
"category": [
@@ -160,6 +186,204 @@
]
},
{
+ "id": "34c0",
+ "name": "Test TBF with HHF Backlog Accounting in gso_skb case against underflow",
+ "category": [
+ "qdisc",
+ "tbf",
+ "hhf"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin"
+ ]
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms",
+ "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 hhf limit 1000",
+ [
+ "ping -I $DUMMY -c2 10.10.11.11",
+ 1
+ ],
+ "$TC qdisc change dev $DUMMY handle 2: parent 1:1 hhf limit 1"
+ ],
+ "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "backlog 0b 0p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "fd68",
+ "name": "Test TBF with CODEL Backlog Accounting in gso_skb case against underflow",
+ "category": [
+ "qdisc",
+ "tbf",
+ "codel"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin"
+ ]
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms",
+ "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 codel limit 1000",
+ [
+ "ping -I $DUMMY -c2 10.10.11.11",
+ 1
+ ],
+ "$TC qdisc change dev $DUMMY handle 2: parent 1:1 codel limit 1"
+ ],
+ "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "backlog 0b 0p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "514e",
+ "name": "Test TBF with PIE Backlog Accounting in gso_skb case against underflow",
+ "category": [
+ "qdisc",
+ "tbf",
+ "pie"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin"
+ ]
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms",
+ "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 pie limit 1000",
+ [
+ "ping -I $DUMMY -c2 10.10.11.11",
+ 1
+ ],
+ "$TC qdisc change dev $DUMMY handle 2: parent 1:1 pie limit 1"
+ ],
+ "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "backlog 0b 0p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "6c97",
+ "name": "Test TBF with FQ Backlog Accounting in gso_skb case against underflow",
+ "category": [
+ "qdisc",
+ "tbf",
+ "fq"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin"
+ ]
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms",
+ "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 fq limit 1000",
+ [
+ "ping -I $DUMMY -c2 10.10.11.11",
+ 1
+ ],
+ "$TC qdisc change dev $DUMMY handle 2: parent 1:1 fq limit 1"
+ ],
+ "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "backlog 0b 0p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "5d0b",
+ "name": "Test TBF with FQ_CODEL Backlog Accounting in gso_skb case against underflow",
+ "category": [
+ "qdisc",
+ "tbf",
+ "fq_codel"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin"
+ ]
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms",
+ "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 fq_codel limit 1000",
+ [
+ "ping -I $DUMMY -c2 10.10.11.11",
+ 1
+ ],
+ "$TC qdisc change dev $DUMMY handle 2: parent 1:1 fq_codel limit 1"
+ ],
+ "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "backlog 0b 0p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "21c3",
+ "name": "Test TBF with FQ_PIE Backlog Accounting in gso_skb case against underflow",
+ "category": [
+ "qdisc",
+ "tbf",
+ "fq_pie"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin"
+ ]
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 100ms",
+ "$TC qdisc replace dev $DUMMY handle 2: parent 1:1 fq_pie limit 1000",
+ [
+ "ping -I $DUMMY -c2 10.10.11.11",
+ 1
+ ],
+ "$TC qdisc change dev $DUMMY handle 2: parent 1:1 fq_pie limit 1"
+ ],
+ "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 2: parent 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "backlog 0b 0p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
"id": "a4bb",
"name": "Test FQ_CODEL with HTB parent - force packet drop with empty queue",
"category": [
@@ -478,7 +702,6 @@
"$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%",
"$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip dst 10.10.10.1/32 flowid 1:1",
"$TC class add dev $DUMMY parent 1:0 classid 1:2 hfsc ls m2 10Mbit",
- "$TC qdisc add dev $DUMMY parent 1:2 handle 3:0 netem duplicate 100%",
"$TC filter add dev $DUMMY parent 1:0 protocol ip prio 2 u32 match ip dst 10.10.10.2/32 flowid 1:2",
"ping -c 1 10.10.10.1 -I$DUMMY > /dev/null || true",
"$TC filter del dev $DUMMY parent 1:0 protocol ip prio 1",
@@ -491,8 +714,8 @@
{
"kind": "hfsc",
"handle": "1:",
- "bytes": 392,
- "packets": 4
+ "bytes": 294,
+ "packets": 3
}
],
"matchCount": "1",
@@ -635,5 +858,180 @@
"$TC qdisc del dev $DUMMY handle 1:0 root",
"$IP addr del 10.10.10.10/24 dev $DUMMY || true"
]
+ },
+ {
+ "id": "d74b",
+ "name": "Test use-after-free with DRR/NETEM/BLACKHOLE chain",
+ "category": [
+ "qdisc",
+ "hfsc",
+ "drr",
+ "netem",
+ "blackhole"
+ ],
+ "plugins": {
+ "requires": [
+ "nsPlugin",
+ "scapyPlugin"
+ ]
+ },
+ "setup": [
+ "$IP link set dev $DUMMY up || true",
+ "$IP addr add 10.10.11.10/24 dev $DUMMY || true",
+ "$TC qdisc add dev $DUMMY root handle 1: drr",
+ "$TC filter add dev $DUMMY parent 1: basic classid 1:1",
+ "$TC class add dev $DUMMY parent 1: classid 1:1 drr",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2: hfsc def 1",
+ "$TC class add dev $DUMMY parent 2: classid 2:1 hfsc rt m1 8 d 1 m2 0",
+ "$TC qdisc add dev $DUMMY parent 2:1 handle 3: netem",
+ "$TC qdisc add dev $DUMMY parent 3:1 handle 4: blackhole",
+ "ping -c1 -W0.01 -I $DUMMY 10.10.11.11 || true",
+ "$TC class del dev $DUMMY classid 1:1"
+ ],
+ "cmdUnderTest": "ping -c1 -W0.01 -I $DUMMY 10.10.11.11",
+ "expExitCode": "1",
+ "verifyCmd": "$TC -j class ls dev $DUMMY classid 1:1",
+ "matchJSON": [],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY root handle 1: drr"
+ ]
+ },
+ {
+ "id": "be28",
+ "name": "Try to add fq_codel qdisc as a child of an hhf qdisc",
+ "category": [
+ "qdisc",
+ "fq_codel",
+ "hhf"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY root handle a: hhf"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY parent a: handle b: fq_codel",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -j qdisc ls dev $DUMMY handle b:",
+ "matchJSON": [],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY root"
+ ]
+ },
+ {
+ "id": "fcb5",
+ "name": "Try to add pie qdisc as a child of a drr qdisc",
+ "category": [
+ "qdisc",
+ "pie",
+ "drr"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY root handle a: drr"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY parent a: handle b: pie",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -j qdisc ls dev $DUMMY handle b:",
+ "matchJSON": [],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY root"
+ ]
+ },
+ {
+ "id": "7801",
+ "name": "Try to add fq qdisc as a child of an inexistent hfsc class",
+ "category": [
+ "qdisc",
+ "sfq",
+ "hfsc"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY root handle a: hfsc"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY parent a:fff2 sfq limit 4",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -j qdisc ls dev $DUMMY handle b:",
+ "matchJSON": [],
+ "teardown": [
+ "$TC qdisc del dev $DUMMY root"
+ ]
+ },
+ {
+ "id": "4989",
+ "name": "Try to add an fq child to an ingress qdisc",
+ "category": [
+ "qdisc",
+ "ingress"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY handle ffff:0 ingress"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY parent ffff:0 handle ffe0:0 fq",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -j qdisc ls dev $DUMMY handle ffe0:",
+ "matchJSON": [],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY ingress"
+ ]
+ },
+ {
+ "id": "c2b0",
+ "name": "Try to add an fq child to a clsact qdisc",
+ "category": [
+ "qdisc",
+ "ingress"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY handle ffff:0 clsact"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY parent ffff:0 handle ffe0:0 fq",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -j qdisc ls dev $DUMMY handle ffe0:",
+ "matchJSON": [],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY clsact"
+ ]
+ },
+ {
+ "id": "4366",
+ "name": "CAKE with QFQ Parent - CAKE enqueue with packets dropping",
+ "category": [
+ "qdisc",
+ "cake",
+ "netem"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup":[
+ "$TC qdisc add dev $DUMMY handle 1: root qfq",
+ "$TC class add dev $DUMMY parent 1: classid 1:1 qfq maxpkt 1024",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2: cake memlimit 9",
+ "$TC filter add dev $DUMMY protocol ip parent 1: prio 1 u32 match ip protocol 1 0xff flowid 1:1",
+ "ping -I$DUMMY -f -c1 -s64 -W1 10.10.10.1 || true",
+ "$TC qdisc replace dev $DUMMY parent 1:1 handle 3: netem delay 0ms"
+ ],
+ "cmdUnderTest": "ping -I$DUMMY -f -c1 -s64 -W1 10.10.10.1 || true",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "qdisc qfq 1:",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json
new file mode 100644
index 000000000000..cd1f2ee8f354
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json
@@ -0,0 +1,254 @@
+[
+ {
+ "id": "a4c7",
+ "name": "Create DualPI2 with default setting",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* step_thresh 1ms min_qlen_step 0p coupling_factor 2 drop_on_overload drop_dequeue classic_protection 10% l4s_ect split_gso",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "1ea4",
+ "name": "Create DualPI2 with memlimit",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 memlimit 20000000",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* memlimit 20000000B",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "2130",
+ "name": "Create DualPI2 with typical_rtt and max_rtt",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 typical_rtt 20ms max_rtt 200ms",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* target 20ms tupdate 20ms alpha 0.042969 beta 1.496094",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "90c1",
+ "name": "Create DualPI2 with max_rtt",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 max_rtt 300ms",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* target 50ms tupdate 50ms alpha 0.050781 beta 0.996094",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "7b3c",
+ "name": "Create DualPI2 with any_ect option",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 any_ect",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* any_ect",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "49a3",
+ "name": "Create DualPI2 with overflow option",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 overflow",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* overflow",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "d0a1",
+ "name": "Create DualPI2 with drop_enqueue option",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 drop_enqueue",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* drop_enqueue",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "f051",
+ "name": "Create DualPI2 with no_split_gso option",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 no_split_gso",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* no_split_gso",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "456b",
+ "name": "Create DualPI2 with packet step_thresh",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 step_thresh 3p",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* step_thresh 3p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "610c",
+ "name": "Create DualPI2 with packet min_qlen_step",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 min_qlen_step 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* min_qlen_step 1p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "b4fa",
+ "name": "Create DualPI2 with packet coupling_factor",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 coupling_factor 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* coupling_factor 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "37f1",
+ "name": "Create DualPI2 with packet classic_protection",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 classic_protection 0",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* classic_protection 0%",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json
index 3c4444961488..718d2df2aafa 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json
@@ -336,5 +336,86 @@
"teardown": [
"$TC qdisc del dev $DUMMY handle 1: root"
]
+ },
+ {
+ "id": "d34d",
+ "name": "NETEM test qdisc duplication restriction in qdisc tree in netem_change root",
+ "category": ["qdisc", "netem"],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY root handle 1: netem limit 1",
+ "$TC qdisc add dev $DUMMY parent 1: handle 2: netem limit 1"
+ ],
+ "cmdUnderTest": "$TC qdisc change dev $DUMMY handle 1: netem duplicate 50%",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "qdisc netem",
+ "matchCount": "2",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root"
+ ]
+ },
+ {
+ "id": "b33f",
+ "name": "NETEM test qdisc duplication restriction in qdisc tree in netem_change non-root",
+ "category": ["qdisc", "netem"],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY root handle 1: netem limit 1",
+ "$TC qdisc add dev $DUMMY parent 1: handle 2: netem limit 1"
+ ],
+ "cmdUnderTest": "$TC qdisc change dev $DUMMY handle 2: netem duplicate 50%",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "qdisc netem",
+ "matchCount": "2",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root"
+ ]
+ },
+ {
+ "id": "cafe",
+ "name": "NETEM test qdisc duplication restriction in qdisc tree",
+ "category": ["qdisc", "netem"],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY root handle 1: netem limit 1 duplicate 100%"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY parent 1: handle 2: netem duplicate 100%",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "qdisc netem",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root"
+ ]
+ },
+ {
+ "id": "1337",
+ "name": "NETEM test qdisc duplication restriction in qdisc tree across branches",
+ "category": ["qdisc", "netem"],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DUMMY parent root handle 1:0 hfsc",
+ "$TC class add dev $DUMMY parent 1:0 classid 1:1 hfsc rt m2 10Mbit",
+ "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem",
+ "$TC class add dev $DUMMY parent 1:0 classid 1:2 hfsc rt m2 10Mbit"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY parent 1:2 handle 3:0 netem duplicate 100%",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+ "matchPattern": "qdisc netem",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1:0 root"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json